├── .gitignore ├── LICENSE ├── README.md ├── flotilla-client ├── broker │ └── client.go └── main.go └── flotilla-server ├── daemon ├── broker │ ├── activemq │ │ ├── activemq.go │ │ └── orchestrator.go │ ├── amqp │ │ ├── amqp.go │ │ └── rabbitmq │ │ │ └── orchestrator.go │ ├── beanstalkd │ │ ├── beanstalkd.go │ │ └── orchestrator.go │ ├── broker.go │ ├── kafka │ │ ├── kafka.go │ │ └── orchestrator.go │ ├── kestrel │ │ ├── kestrel.go │ │ └── orchestrator.go │ ├── nats │ │ ├── nats.go │ │ └── orchestrator.go │ ├── nsq │ │ ├── nsq.go │ │ └── orchestrator.go │ └── pubsub │ │ ├── orchestrator.go │ │ └── pubsub.go ├── daemon.go ├── publisher.go └── subscriber.go └── main.go /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 2 | *.o 3 | *.a 4 | *.so 5 | 6 | # Folders 7 | _obj 8 | _test 9 | 10 | # Architecture specific extensions/prefixes 11 | *.[568vq] 12 | [568vq].out 13 | 14 | *.cgo1.go 15 | *.cgo2.c 16 | _cgo_defun.c 17 | _cgo_gotypes.go 18 | _cgo_export.* 19 | 20 | _testmain.go 21 | 22 | *.exe 23 | *.test 24 | *.prof 25 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Flotilla 2 | 3 | Flotilla is a **work-in-progress** tool for testing message queues in more realistic environments. [Many benchmarks](https://github.com/tylertreat/mq-benchmarking) only measure performance characteristics on a single machine, sometimes with producers and consumers in the *same process* even. The reality is this information is marginally useful, if at all, and often deceiving. This [blog post](http://www.bravenewgeek.com/benchmark-responsibly/) provides some more background on the motivation behind this project. 4 | 5 | Testing anything at scale can be difficult to achieve in practice. It generally takes a lot of resources and often requires ad hoc solutions. Flotilla attempts to provide automated orchestration for benchmarking message queues in scaled-up configurations. Simply put, we can benchmark a message broker with arbitrarily many producers and consumers distributed across arbitrarily many machines with a single command. 6 | 7 | ```shell 8 | flotilla-client \ 9 | --broker=kafka \ 10 | --host=192.168.59.100:9500 \ 11 | --peer-hosts=localhost:9500,192.168.59.101:9500,192.168.59.102:9500,192.168.59.103:9500 \ 12 | --producers=5 \ 13 | --consumers=3 \ 14 | --num-messages=1000000 15 | --message-size=5000 16 | ``` 17 | 18 | In addition to simulating more realistic testing scenarios, Flotilla also tries to offer more statistically meaningful results in the benchmarking itself. It relies on [HDR Histogram](http://hdrhistogram.github.io/HdrHistogram/) (or rather a [Go variant](https://github.com/codahale/hdrhistogram) of it) which supports recording and analyzing sampled data value counts at extremely low latencies. See the "Caveats" section below for potential benchmarking issues and areas for improvement. 19 | 20 | Flotilla supports several message brokers out of the box: 21 | 22 | - [Beanstalkd](http://kr.github.io/beanstalkd/) 23 | - [NATS](http://nats.io/) 24 | - [Kafka](http://kafka.apache.org/) 25 | - [Kestrel](http://twitter.github.io/kestrel/) 26 | - [ActiveMQ](http://activemq.apache.org/) 27 | - [RabbitMQ](http://www.rabbitmq.com/) 28 | - [NSQ](http://nsq.io/) 29 | - [Google Cloud Pub/Sub](https://cloud.google.com/pubsub/docs) 30 | 31 | ## Installation 32 | 33 | Flotilla consists of two binaries: the server daemon and client. The daemon runs on any machines you wish to include in your tests. The client orchestrates and executes the tests. Note that the daemon makes use of [Docker](https://www.docker.com/) for running many of the brokers, so it must be installed on the host machine. If you're running OSX, use [boot2docker](http://boot2docker.io/). 34 | 35 | To install the daemon, run: 36 | 37 | ```bash 38 | $ go get github.com/tylertreat/flotilla/flotilla-server 39 | ``` 40 | 41 | To install the client, run: 42 | 43 | ```bash 44 | $ go get github.com/tylertreat/flotilla/flotilla-client 45 | ``` 46 | 47 | ## Usage 48 | 49 | Ensure the daemon is running on any machines you wish Flotilla to communicate with: 50 | 51 | ```bash 52 | $ flotilla-server 53 | Flotilla daemon started on port 9500... 54 | ``` 55 | 56 | ### Local Configuration 57 | 58 | Flotilla can be run locally to perform benchmarks on a single machine. First, start the daemon with `flotilla-server`. Next, run a benchmark using the client: 59 | 60 | ```bash 61 | $ flotilla-client --broker=rabbitmq 62 | ``` 63 | 64 | Flotilla will run everything on localhost. 65 | 66 | ### Distributed Configuration 67 | 68 | With all daemons started, run a benchmark using the client and provide the peers you wish to communicate with: 69 | 70 | ```bash 71 | $ flotilla-client --broker=rabbitmq --host= --peer-hosts= 72 | ``` 73 | 74 | For full usage details, run: 75 | 76 | ```bash 77 | $ flotilla-client --help 78 | ``` 79 | 80 | ### Running on OSX 81 | 82 | Flotilla starts most brokers using a Docker container. This can be achieved on OSX using boot2docker, which runs the container in a VM. The daemon needs to know the address of the VM. This can be provided from the client using the `--docker-host` flag, which specifies the host machine (or VM, in this case) the broker will run on. 83 | 84 | ```bash 85 | $ flotilla-client --broker=rabbitmq --docker-host=$(boot2docker ip) 86 | ``` 87 | 88 | ## Caveats 89 | 90 | - *Not all brokers are created equal.* Flotilla is designed to make it easy to test drive different messaging systems, but comparing results between them can often be misguided. 91 | - Several brokers support publishing batches of messages to boost throughput (with a latency penalty). Some brokers don't support batching, so messages are published one at a time for these. This affects throughput significantly. 92 | - The latency of a message is measured as the time it's sent subtracted from the time it's received. This requires recording the clocks of both the sender and receiver. If you're running scaled-up, *distributed* tests, then the clocks aren't perfectly synchronized. *These benchmarks aren't perfect.* 93 | - Related to the above point, measuring *anything* requires some computational overhead, which affects results. HDR Histogram tries to minimize this problem but can't remove it altogether. 94 | - There is currently no security built in. Use this tool *at your own risk*. The daemon runs on port 9500 by default. 95 | 96 | ## TODO 97 | 98 | - Many message brokers, such as Kafka, are designed to operate in a clustered configuration for higher availability. Add support for these types of topologies. This gets us closer to what would be deployed in production. 99 | - Some broker clients provide back-pressure heuristics. For example, NATS allows us to slow down publishing if it determines the receiver is falling behind. This greatly improves throughput. 100 | - Replace use of `os/exec` with Docker REST API (how does this work with boot2docker?) 101 | - Plottable data output. 102 | - Integration with [Comcast](https://github.com/tylertreat/Comcast) for testing under different network conditions. 103 | - Use [etcd](https://github.com/coreos/etcd) to provide shared configuration and daemon discovery 104 | - Use [usl](https://github.com/codahale/usl) to populate a [Universal Scalability Law](http://www.perfdynamics.com/Manifesto/USLscalability.html) model 105 | - Use [tinystat](https://github.com/codahale/tinystat) to compare benchmark runs and tease out statistical noise 106 | -------------------------------------------------------------------------------- /flotilla-client/broker/client.go: -------------------------------------------------------------------------------- 1 | package broker 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "fmt" 7 | "time" 8 | 9 | "github.com/go-mangos/mangos" 10 | "github.com/go-mangos/mangos/protocol/req" 11 | "github.com/go-mangos/mangos/transport/tcp" 12 | ) 13 | 14 | type operation string 15 | type daemon string 16 | 17 | const ( 18 | minNumMessages = 100 19 | minMessageSize = 9 20 | start operation = "start" 21 | stop operation = "stop" 22 | sub operation = "subscribers" 23 | pub operation = "publishers" 24 | run operation = "run" 25 | results operation = "results" 26 | teardown operation = "teardown" 27 | resultsSleep = time.Second 28 | sendRecvDeadline = 5 * time.Second 29 | ) 30 | 31 | type request struct { 32 | Operation operation `json:"operation"` 33 | Broker string `json:"broker"` 34 | Port string `json:"port"` 35 | NumMessages uint `json:"num_messages"` 36 | MessageSize uint64 `json:"message_size"` 37 | Count uint `json:"count"` 38 | Host string `json:"host"` 39 | } 40 | 41 | type response struct { 42 | Success bool `json:"success"` 43 | Message string `json:"message"` 44 | Result interface{} `json:"result"` 45 | PubResults []*Result `json:"pub_results,omitempty"` 46 | SubResults []*Result `json:"sub_results,omitempty"` 47 | } 48 | 49 | // Benchmark contains configuration settings for broker tests. 50 | type Benchmark struct { 51 | BrokerdHost string 52 | BrokerName string 53 | BrokerHost string 54 | BrokerPort string 55 | PeerHosts []string 56 | NumMessages uint 57 | MessageSize uint64 58 | Publishers uint 59 | Subscribers uint 60 | StartupSleep uint 61 | DaemonTimeout uint 62 | } 63 | 64 | func (b *Benchmark) validate() error { 65 | if b.BrokerdHost == "" { 66 | return errors.New("Invalid broker daemon host") 67 | } 68 | 69 | if b.BrokerName == "" { 70 | return errors.New("Invalid broker name") 71 | } 72 | 73 | if b.BrokerHost == "" { 74 | return errors.New("Invalid broker host") 75 | } 76 | 77 | if b.BrokerPort == "" { 78 | return errors.New("Invalid broker port") 79 | } 80 | 81 | if len(b.PeerHosts) == 0 { 82 | return errors.New("Must provide at least one peer host") 83 | } 84 | 85 | if b.NumMessages < minNumMessages { 86 | return fmt.Errorf("Number of messages must be at least %d", minNumMessages) 87 | } 88 | 89 | if b.MessageSize < minMessageSize { 90 | return fmt.Errorf("Message size must be at least %d", minMessageSize) 91 | } 92 | 93 | if b.Publishers <= 0 { 94 | return errors.New("Number of producers must be greater than zero") 95 | } 96 | 97 | if b.Subscribers <= 0 { 98 | return errors.New("Number of consumers must be greater than zero") 99 | } 100 | 101 | return nil 102 | } 103 | 104 | // Result contains test result data for a single peer. 105 | type Result struct { 106 | Duration float32 `json:"duration,omitempty"` 107 | Throughput float32 `json:"throughput,omitempty"` 108 | Latency LatencyResults `json:"latency,omitempty"` 109 | Err string `json:"error"` 110 | } 111 | 112 | // ResultContainer contains the Results for a single node. 113 | type ResultContainer struct { 114 | Peer string 115 | PublisherResults []*Result 116 | SubscriberResults []*Result 117 | } 118 | 119 | // LatencyResults contains the latency result data for a single peer. 120 | type LatencyResults struct { 121 | Min int64 `json:"min"` 122 | Q1 int64 `json:"q1"` 123 | Q2 int64 `json:"q2"` 124 | Q3 int64 `json:"q3"` 125 | Max int64 `json:"max"` 126 | Mean float64 `json:"mean"` 127 | StdDev float64 `json:"std_dev"` 128 | } 129 | 130 | // Client provides an API for interacting with Flotilla. 131 | type Client struct { 132 | brokerd mangos.Socket 133 | peerd map[string]mangos.Socket 134 | Benchmark *Benchmark 135 | } 136 | 137 | // NewClient creates and returns a new Client from the provided Benchmark 138 | // configuration. It returns an error if the Benchmark is not valid or it 139 | // can't communicate with any of the specified peers. 140 | func NewClient(b *Benchmark) (*Client, error) { 141 | if err := b.validate(); err != nil { 142 | return nil, err 143 | } 144 | 145 | brokerd, err := req.NewSocket() 146 | if err != nil { 147 | return nil, err 148 | } 149 | 150 | brokerd.AddTransport(tcp.NewTransport()) 151 | brokerd.SetOption(mangos.OptionSendDeadline, time.Duration(b.DaemonTimeout)*time.Second) 152 | brokerd.SetOption(mangos.OptionRecvDeadline, time.Duration(b.DaemonTimeout)*time.Second) 153 | 154 | if err := brokerd.Dial(fmt.Sprintf("tcp://%s", b.BrokerdHost)); err != nil { 155 | return nil, err 156 | } 157 | 158 | peerd := make(map[string]mangos.Socket, len(b.PeerHosts)) 159 | for _, peer := range b.PeerHosts { 160 | s, err := req.NewSocket() 161 | if err != nil { 162 | return nil, err 163 | } 164 | 165 | s.AddTransport(tcp.NewTransport()) 166 | s.SetOption(mangos.OptionSendDeadline, time.Duration(b.DaemonTimeout)*time.Second) 167 | s.SetOption(mangos.OptionRecvDeadline, time.Duration(b.DaemonTimeout)*time.Second) 168 | 169 | if err := s.Dial(fmt.Sprintf("tcp://%s", peer)); err != nil { 170 | return nil, err 171 | } 172 | 173 | peerd[peer] = s 174 | } 175 | 176 | return &Client{ 177 | brokerd: brokerd, 178 | peerd: peerd, 179 | Benchmark: b, 180 | }, nil 181 | } 182 | 183 | // Start begins the broker test. 184 | func (c *Client) Start() ([]*ResultContainer, error) { 185 | fmt.Println("Starting broker - if the image hasn't been pulled yet, this may take a while...") 186 | if err := c.startBroker(); err != nil { 187 | return nil, fmt.Errorf("Failed to start broker: %s", err.Error()) 188 | } 189 | 190 | // Allow some time for broker startup. 191 | time.Sleep(time.Duration(c.Benchmark.StartupSleep) * time.Second) 192 | 193 | fmt.Println("Preparing producers") 194 | if err := c.startPublishers(); err != nil { 195 | return nil, fmt.Errorf("Failed to start producers: %s", err.Error()) 196 | } 197 | 198 | fmt.Println("Preparing consumers") 199 | if err := c.startSubscribers(); err != nil { 200 | return nil, fmt.Errorf("Failed to start consumers %s:", err.Error()) 201 | } 202 | 203 | fmt.Println("Running benchmark") 204 | if err := c.runBenchmark(); err != nil { 205 | return nil, fmt.Errorf("Failed to run benchmark %s:", err.Error()) 206 | } 207 | 208 | return <-c.collectResults(), nil 209 | } 210 | 211 | func (c *Client) startBroker() error { 212 | resp, err := sendRequest(c.brokerd, request{ 213 | Operation: start, 214 | Broker: c.Benchmark.BrokerName, 215 | Host: c.Benchmark.BrokerHost, 216 | Port: c.Benchmark.BrokerPort, 217 | }) 218 | 219 | if err != nil { 220 | return err 221 | } 222 | 223 | if !resp.Success { 224 | return errors.New(resp.Message) 225 | } 226 | 227 | return nil 228 | } 229 | 230 | func (c *Client) startSubscribers() error { 231 | for _, peerd := range c.peerd { 232 | resp, err := sendRequest(peerd, request{ 233 | Operation: sub, 234 | Broker: c.Benchmark.BrokerName, 235 | Host: fmt.Sprintf("%s:%s", c.Benchmark.BrokerHost, c.Benchmark.BrokerPort), 236 | Count: c.Benchmark.Subscribers, 237 | NumMessages: c.Benchmark.NumMessages, 238 | MessageSize: c.Benchmark.MessageSize, 239 | }) 240 | 241 | if err != nil { 242 | return err 243 | } 244 | 245 | if !resp.Success { 246 | return errors.New(resp.Message) 247 | } 248 | } 249 | return nil 250 | } 251 | 252 | func (c *Client) startPublishers() error { 253 | for _, peerd := range c.peerd { 254 | resp, err := sendRequest(peerd, request{ 255 | Operation: pub, 256 | Broker: c.Benchmark.BrokerName, 257 | Host: fmt.Sprintf("%s:%s", c.Benchmark.BrokerHost, c.Benchmark.BrokerPort), 258 | Count: c.Benchmark.Publishers, 259 | NumMessages: c.Benchmark.NumMessages, 260 | MessageSize: c.Benchmark.MessageSize, 261 | }) 262 | 263 | if err != nil { 264 | return err 265 | } 266 | 267 | if !resp.Success { 268 | return errors.New(resp.Message) 269 | } 270 | } 271 | return nil 272 | } 273 | 274 | func (c *Client) runBenchmark() error { 275 | for _, peerd := range c.peerd { 276 | resp, err := sendRequest(peerd, request{Operation: run}) 277 | if err != nil { 278 | return err 279 | } 280 | 281 | if !resp.Success { 282 | return errors.New(resp.Message) 283 | } 284 | } 285 | return nil 286 | } 287 | 288 | func (c *Client) collectResults() <-chan []*ResultContainer { 289 | resultsChan := make(chan []*ResultContainer, 1) 290 | 291 | go func(chan<- []*ResultContainer) { 292 | results := make([]*ResultContainer, 0, len(c.peerd)) 293 | subResults := make(chan *ResultContainer, len(c.peerd)) 294 | complete := 0 295 | 296 | for host, peerd := range c.peerd { 297 | go collectResultsFromPeer(host, peerd, subResults) 298 | } 299 | 300 | for { 301 | select { 302 | case subResult, ok := <-subResults: 303 | if !ok { 304 | return 305 | } 306 | results = append(results, subResult) 307 | complete++ 308 | } 309 | 310 | if complete == len(c.peerd) { 311 | resultsChan <- results 312 | return 313 | } 314 | } 315 | }(resultsChan) 316 | 317 | return resultsChan 318 | } 319 | 320 | // Teardown performs any necessary cleanup logic, including stopping the 321 | // broker and tearing down peers. 322 | func (c *Client) Teardown() { 323 | fmt.Println("Tearing down peers") 324 | for _, peerd := range c.peerd { 325 | _, err := sendRequest(peerd, request{Operation: teardown}) 326 | if err != nil { 327 | fmt.Printf("Failed to teardown peer: %s\n", err.Error()) 328 | } 329 | } 330 | 331 | fmt.Println("Stopping broker") 332 | if err := c.stopBroker(); err != nil { 333 | fmt.Printf("Failed to stop broker: %s\n", err.Error()) 334 | } 335 | } 336 | 337 | func (c *Client) stopBroker() error { 338 | resp, err := sendRequest(c.brokerd, request{Operation: stop}) 339 | if err != nil { 340 | return err 341 | } 342 | 343 | if !resp.Success { 344 | return errors.New(resp.Message) 345 | } 346 | 347 | return nil 348 | } 349 | 350 | func sendRequest(s mangos.Socket, request request) (*response, error) { 351 | requestJSON, err := json.Marshal(request) 352 | if err != nil { 353 | // This is not recoverable. 354 | panic(err) 355 | } 356 | 357 | if err := s.Send(requestJSON); err != nil { 358 | return nil, err 359 | } 360 | 361 | rep, err := s.Recv() 362 | if err != nil { 363 | return nil, err 364 | } 365 | 366 | var resp response 367 | if err := json.Unmarshal(rep, &resp); err != nil { 368 | return nil, err 369 | } 370 | 371 | return &resp, nil 372 | } 373 | 374 | func collectResultsFromPeer(host string, peerd mangos.Socket, subResults chan *ResultContainer) { 375 | for { 376 | resp, err := sendRequest(peerd, request{Operation: results}) 377 | if err != nil { 378 | fmt.Println("Failed to collect results from peer:", err.Error()) 379 | close(subResults) 380 | return 381 | } 382 | 383 | if !resp.Success { 384 | fmt.Printf("Failed to collect results from peer: %s", resp.Message) 385 | subResults <- nil 386 | } 387 | 388 | if resp.Message == "Results not ready" { 389 | time.Sleep(resultsSleep) 390 | continue 391 | } 392 | 393 | subResults <- &ResultContainer{ 394 | Peer: host, 395 | PublisherResults: resp.PubResults, 396 | SubscriberResults: resp.SubResults, 397 | } 398 | break 399 | } 400 | } 401 | -------------------------------------------------------------------------------- /flotilla-client/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "os" 7 | "os/signal" 8 | "strconv" 9 | "strings" 10 | "syscall" 11 | "time" 12 | 13 | "github.com/olekukonko/tablewriter" 14 | "github.com/tylertreat/Flotilla/flotilla-client/broker" 15 | ) 16 | 17 | const ( 18 | defaultDaemonPort = "9500" 19 | defaultBrokerPort = "5000" 20 | defaultNumMessages = 500000 21 | defaultMessageSize = 1000 22 | defaultNumProducers = 1 23 | defaultNumConsumers = 1 24 | defaultStartupSleep = 8 25 | defaultDaemonTimeout = 5 26 | defaultHost = "localhost" 27 | defaultDaemonHost = defaultHost + ":" + defaultDaemonPort 28 | ) 29 | 30 | var brokers = []string{ 31 | "beanstalkd", 32 | "nats", 33 | "kafka", 34 | "kestrel", 35 | "activemq", 36 | "rabbitmq", 37 | "nsq", 38 | "pubsub", 39 | } 40 | 41 | func main() { 42 | var ( 43 | brokerName = flag.String("broker", brokers[0], brokerList()) 44 | brokerPort = flag.String("broker-port", defaultBrokerPort, "host machine broker port") 45 | dockerHost = flag.String("docker-host", defaultHost, "host machine (or VM) running Docker") 46 | brokerdHost = flag.String("host", defaultDaemonHost, "machine running broker daemon") 47 | peerHosts = flag.String("peer-hosts", defaultDaemonHost, "comma-separated list of machines to run peers") 48 | producers = flag.Uint("producers", defaultNumProducers, "number of producers per host") 49 | consumers = flag.Uint("consumers", defaultNumConsumers, "number of consumers per host") 50 | numMessages = flag.Uint("num-messages", defaultNumMessages, "number of messages to send from each producer") 51 | messageSize = flag.Uint64("message-size", defaultMessageSize, "size of each message in bytes") 52 | startupSleep = flag.Uint("startup-sleep", defaultStartupSleep, "seconds to wait after broker start before benchmarking") 53 | daemonTimeout = flag.Uint("daemon-timeout", defaultDaemonTimeout, "seconds to wait for daemon before timing out") 54 | ) 55 | flag.Parse() 56 | 57 | peers := strings.Split(*peerHosts, ",") 58 | 59 | client, err := broker.NewClient(&broker.Benchmark{ 60 | BrokerdHost: *brokerdHost, 61 | BrokerName: *brokerName, 62 | BrokerHost: *dockerHost, 63 | BrokerPort: *brokerPort, 64 | PeerHosts: peers, 65 | NumMessages: *numMessages, 66 | MessageSize: *messageSize, 67 | Publishers: *producers, 68 | Subscribers: *consumers, 69 | StartupSleep: *startupSleep, 70 | DaemonTimeout: *daemonTimeout, 71 | }) 72 | if err != nil { 73 | fmt.Println("Failed to connect to flotilla:", err) 74 | os.Exit(1) 75 | } 76 | 77 | start := time.Now() 78 | results, err := runBenchmark(client) 79 | if err != nil { 80 | fmt.Println(err) 81 | os.Exit(1) 82 | } 83 | elapsed := time.Since(start) 84 | 85 | printSummary(client.Benchmark, elapsed) 86 | printResults(results) 87 | } 88 | 89 | func runBenchmark(client *broker.Client) ([]*broker.ResultContainer, error) { 90 | defer client.Teardown() 91 | sig := make(chan os.Signal, 2) 92 | signal.Notify(sig, os.Interrupt, syscall.SIGTERM) 93 | go func() { 94 | <-sig 95 | fmt.Println("\nShutting down...") 96 | client.Teardown() 97 | os.Exit(1) 98 | }() 99 | 100 | return client.Start() 101 | } 102 | 103 | func printSummary(benchmark *broker.Benchmark, elapsed time.Duration) { 104 | brokerHost := strings.Split(benchmark.BrokerdHost, ":")[0] + ":" + benchmark.BrokerPort 105 | msgSent := int(benchmark.NumMessages) * len(benchmark.PeerHosts) * int(benchmark.Publishers) 106 | msgRecv := int(benchmark.NumMessages) * len(benchmark.PeerHosts) * int(benchmark.Subscribers) 107 | dataSentKB := (msgSent * int(benchmark.MessageSize)) / 1000 108 | dataRecvKB := (msgRecv * int(benchmark.MessageSize)) / 1000 109 | fmt.Println("\nTEST SUMMARY\n") 110 | fmt.Printf("Time Elapsed: %s\n", elapsed.String()) 111 | fmt.Printf("Broker: %s (%s)\n", benchmark.BrokerName, brokerHost) 112 | fmt.Printf("Nodes: %s\n", benchmark.PeerHosts) 113 | fmt.Printf("Producers per node: %d\n", benchmark.Publishers) 114 | fmt.Printf("Consumers per node: %d\n", benchmark.Subscribers) 115 | fmt.Printf("Messages produced: %d\n", msgSent) 116 | fmt.Printf("Messages consumed: %d\n", msgRecv) 117 | fmt.Printf("Bytes per message: %d\n", benchmark.MessageSize) 118 | fmt.Printf("Data produced (KB): %d\n", dataSentKB) 119 | fmt.Printf("Data consumed (KB): %d\n", dataRecvKB) 120 | fmt.Println("") 121 | } 122 | 123 | func printResults(results []*broker.ResultContainer) { 124 | var ( 125 | producerData = [][]string{} 126 | pubDurations = float32(0) 127 | pubThroughputs = float32(0) 128 | i = 1 129 | ) 130 | for _, peerResults := range results { 131 | for _, result := range peerResults.PublisherResults { 132 | pubDurations += result.Duration 133 | pubThroughputs += result.Throughput 134 | producerData = append(producerData, []string{ 135 | strconv.Itoa(i), 136 | peerResults.Peer, 137 | strconv.FormatBool(result.Err != ""), 138 | strconv.FormatFloat(float64(result.Duration), 'f', 3, 32), 139 | strconv.FormatFloat(float64(result.Throughput), 'f', 3, 32), 140 | }) 141 | i++ 142 | } 143 | } 144 | avgPubDuration := pubDurations / (float32(i) - 1) 145 | avgPubThroughput := pubThroughputs / (float32(i) - 1) 146 | producerData = append(producerData, []string{ 147 | "AVG", 148 | "", 149 | "", 150 | strconv.FormatFloat(float64(avgPubDuration), 'f', 3, 32), 151 | strconv.FormatFloat(float64(avgPubThroughput), 'f', 3, 32), 152 | }) 153 | printTable([]string{ 154 | "Producer", 155 | "Node", 156 | "Error", 157 | "Duration", 158 | "Throughput (msg/sec)", 159 | }, producerData) 160 | 161 | consumerData := [][]string{} 162 | i = 1 163 | var ( 164 | subDurations = float32(0) 165 | subThroughputs = float32(0) 166 | subMins = int64(0) 167 | subQ1s = int64(0) 168 | subQ2s = int64(0) 169 | subQ3s = int64(0) 170 | subMaxes = int64(0) 171 | subMeans = float64(0) 172 | subIQRs = int64(0) 173 | subStdDevs = float64(0) 174 | ) 175 | for _, peerResults := range results { 176 | for _, result := range peerResults.SubscriberResults { 177 | subDurations += result.Duration 178 | subThroughputs += result.Throughput 179 | subMins += result.Latency.Min 180 | subQ1s += result.Latency.Q1 181 | subQ2s += result.Latency.Q2 182 | subQ3s += result.Latency.Q3 183 | subMaxes += result.Latency.Max 184 | subMeans += result.Latency.Mean 185 | subIQRs += result.Latency.Q3 - result.Latency.Q1 186 | subStdDevs += result.Latency.StdDev 187 | consumerData = append(consumerData, []string{ 188 | strconv.Itoa(i), 189 | peerResults.Peer, 190 | strconv.FormatBool(result.Err != ""), 191 | strconv.FormatFloat(float64(result.Duration), 'f', 3, 32), 192 | strconv.FormatFloat(float64(result.Throughput), 'f', 3, 32), 193 | strconv.FormatInt(result.Latency.Min, 10), 194 | strconv.FormatInt(result.Latency.Q1, 10), 195 | strconv.FormatInt(result.Latency.Q2, 10), 196 | strconv.FormatInt(result.Latency.Q3, 10), 197 | strconv.FormatInt(result.Latency.Max, 10), 198 | strconv.FormatFloat(result.Latency.Mean, 'f', 3, 64), 199 | strconv.FormatInt(result.Latency.Q3-result.Latency.Q1, 10), 200 | strconv.FormatFloat(result.Latency.StdDev, 'f', 3, 64), 201 | }) 202 | i++ 203 | } 204 | } 205 | var ( 206 | avgSubDuration = subDurations / (float32(i) - 1) 207 | avgSubThroughput = subThroughputs / (float32(i) - 1) 208 | avgSubMin = subMins / (int64(i) - 1) 209 | avgSubQ1 = subQ1s / (int64(i) - 1) 210 | avgSubQ2 = subQ2s / (int64(i) - 1) 211 | avgSubQ3 = subQ3s / (int64(i) - 1) 212 | avgSubMaxes = subMaxes / (int64(i) - 1) 213 | avgSubMeans = subMeans / (float64(i) - 1) 214 | avgSubIQRs = subIQRs / (int64(i) - 1) 215 | avgSubStdDevs = subStdDevs / (float64(i) - 1) 216 | ) 217 | consumerData = append(consumerData, []string{ 218 | "AVG", 219 | "", 220 | "", 221 | strconv.FormatFloat(float64(avgSubDuration), 'f', 3, 32), 222 | strconv.FormatFloat(float64(avgSubThroughput), 'f', 3, 32), 223 | strconv.FormatFloat(float64(avgSubMin), 'f', 3, 32), 224 | strconv.FormatFloat(float64(avgSubQ1), 'f', 3, 32), 225 | strconv.FormatFloat(float64(avgSubQ2), 'f', 3, 32), 226 | strconv.FormatFloat(float64(avgSubQ3), 'f', 3, 32), 227 | strconv.FormatFloat(float64(avgSubMaxes), 'f', 3, 32), 228 | strconv.FormatFloat(float64(avgSubMeans), 'f', 3, 32), 229 | strconv.FormatFloat(float64(avgSubIQRs), 'f', 3, 32), 230 | strconv.FormatFloat(float64(avgSubStdDevs), 'f', 3, 32), 231 | }) 232 | printTable([]string{ 233 | "Consumer", 234 | "Node", 235 | "Error", 236 | "Duration", 237 | "Throughput (msg/sec)", 238 | "Min", 239 | "Q1", 240 | "Q2", 241 | "Q3", 242 | "Max", 243 | "Mean", 244 | "IQR", 245 | "Std Dev", 246 | }, consumerData) 247 | fmt.Println("All units ms unless noted otherwise") 248 | } 249 | 250 | func printTable(headers []string, data [][]string) { 251 | table := tablewriter.NewWriter(os.Stdout) 252 | table.SetHeader(headers) 253 | for _, row := range data { 254 | table.Append(row) 255 | } 256 | table.SetAlignment(tablewriter.ALIGN_LEFT) 257 | table.Render() 258 | } 259 | 260 | func brokerList() string { 261 | brokerList := "[" 262 | for i, broker := range brokers { 263 | brokerList = brokerList + broker 264 | if i != len(brokers)-1 { 265 | brokerList = brokerList + "|" 266 | } 267 | } 268 | brokerList = brokerList + "]" 269 | return brokerList 270 | } 271 | -------------------------------------------------------------------------------- /flotilla-server/daemon/broker/activemq/activemq.go: -------------------------------------------------------------------------------- 1 | package activemq 2 | 3 | import "gopkg.in/stomp.v1" 4 | 5 | const queue = "test" 6 | 7 | // Peer implements the peer interface for ActiveMQ. 8 | type Peer struct { 9 | conn *stomp.Conn 10 | sub *stomp.Subscription 11 | send chan []byte 12 | errors chan error 13 | done chan bool 14 | } 15 | 16 | // NewPeer creates and returns a new Peer for communicating with ActiveMQ. 17 | func NewPeer(host string) (*Peer, error) { 18 | conn, err := stomp.Dial("tcp", host, stomp.Options{}) 19 | if err != nil { 20 | return nil, err 21 | } 22 | 23 | return &Peer{ 24 | conn: conn, 25 | send: make(chan []byte), 26 | errors: make(chan error, 1), 27 | done: make(chan bool), 28 | }, nil 29 | } 30 | 31 | // Subscribe prepares the peer to consume messages. 32 | func (a *Peer) Subscribe() error { 33 | sub, err := a.conn.Subscribe(queue, stomp.AckAuto) 34 | if err != nil { 35 | return err 36 | } 37 | 38 | a.sub = sub 39 | return nil 40 | } 41 | 42 | // Recv returns a single message consumed by the peer. Subscribe must be called 43 | // before this. It returns an error if the receive failed. 44 | func (a *Peer) Recv() ([]byte, error) { 45 | message := <-a.sub.C 46 | return message.Body, message.Err 47 | } 48 | 49 | // Send returns a channel on which messages can be sent for publishing. 50 | func (a *Peer) Send() chan<- []byte { 51 | return a.send 52 | } 53 | 54 | // Errors returns the channel on which the peer sends publish errors. 55 | func (a *Peer) Errors() <-chan error { 56 | return a.errors 57 | } 58 | 59 | // Done signals to the peer that message publishing has completed. 60 | func (a *Peer) Done() { 61 | a.done <- true 62 | } 63 | 64 | // Setup prepares the peer for testing. 65 | func (a *Peer) Setup() { 66 | go func() { 67 | for { 68 | select { 69 | case msg := <-a.send: 70 | if err := a.conn.Send(queue, "", msg, nil); err != nil { 71 | a.errors <- err 72 | } 73 | case <-a.done: 74 | return 75 | } 76 | } 77 | }() 78 | } 79 | 80 | // Teardown performs any cleanup logic that needs to be performed after the 81 | // test is complete. 82 | func (a *Peer) Teardown() { 83 | if a.sub != nil { 84 | a.sub.Unsubscribe() 85 | } 86 | a.conn.Disconnect() 87 | } 88 | -------------------------------------------------------------------------------- /flotilla-server/daemon/broker/activemq/orchestrator.go: -------------------------------------------------------------------------------- 1 | package activemq 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os/exec" 7 | ) 8 | 9 | const ( 10 | activeMQ = "rmohr/activemq" 11 | internalPort = "61613" 12 | ) 13 | 14 | // Broker implements the broker interface for ActiveMQ. 15 | type Broker struct { 16 | containerID string 17 | } 18 | 19 | // Start will start the message broker and prepare it for testing. 20 | func (a *Broker) Start(host, port string) (interface{}, error) { 21 | containerID, err := exec.Command("/bin/sh", "-c", 22 | fmt.Sprintf("docker run -d -p %s:%s %s", port, internalPort, activeMQ)).Output() 23 | if err != nil { 24 | log.Printf("Failed to start container %s: %s", activeMQ, err.Error()) 25 | return "", err 26 | } 27 | 28 | log.Printf("Started container %s: %s", activeMQ, containerID) 29 | a.containerID = string(containerID) 30 | return string(containerID), nil 31 | } 32 | 33 | // Stop will stop the message broker. 34 | func (a *Broker) Stop() (interface{}, error) { 35 | containerID, err := exec.Command("/bin/sh", "-c", 36 | fmt.Sprintf("docker kill %s", a.containerID)).Output() 37 | if err != nil { 38 | log.Printf("Failed to stop container %s: %s", activeMQ, err.Error()) 39 | return "", err 40 | } 41 | 42 | log.Printf("Stopped container %s: %s", activeMQ, a.containerID) 43 | return string(containerID), nil 44 | } 45 | -------------------------------------------------------------------------------- /flotilla-server/daemon/broker/amqp/amqp.go: -------------------------------------------------------------------------------- 1 | package amqp 2 | 3 | import ( 4 | "github.com/streadway/amqp" 5 | "github.com/tylertreat/Flotilla/flotilla-server/daemon/broker" 6 | ) 7 | 8 | const ( 9 | exchange = "test" 10 | ) 11 | 12 | // Peer implements the peer interface for AMQP brokers. 13 | type Peer struct { 14 | conn *amqp.Connection 15 | queue amqp.Queue 16 | channel *amqp.Channel 17 | inbound <-chan amqp.Delivery 18 | send chan []byte 19 | errors chan error 20 | done chan bool 21 | } 22 | 23 | // NewPeer creates and returns a new Peer for communicating with AMQP brokers. 24 | func NewPeer(host string) (*Peer, error) { 25 | conn, err := amqp.Dial("amqp://" + host) 26 | if err != nil { 27 | return nil, err 28 | } 29 | 30 | channel, err := conn.Channel() 31 | if err != nil { 32 | return nil, err 33 | } 34 | 35 | queue, err := channel.QueueDeclare( 36 | broker.GenerateName(), // name 37 | false, // not durable 38 | false, // delete when unused 39 | true, // exclusive 40 | false, // no wait 41 | nil, // arguments 42 | ) 43 | if err != nil { 44 | return nil, err 45 | } 46 | 47 | err = channel.ExchangeDeclare( 48 | exchange, // name 49 | "fanout", // type 50 | false, // not durable 51 | false, // auto-deleted 52 | false, // internal 53 | false, // no wait 54 | nil, // arguments 55 | ) 56 | if err != nil { 57 | return nil, err 58 | } 59 | 60 | return &Peer{ 61 | conn: conn, 62 | queue: queue, 63 | channel: channel, 64 | send: make(chan []byte), 65 | errors: make(chan error, 1), 66 | done: make(chan bool), 67 | }, nil 68 | } 69 | 70 | // Subscribe prepares the peer to consume messages. 71 | func (a *Peer) Subscribe() error { 72 | err := a.channel.QueueBind( 73 | a.queue.Name, 74 | a.queue.Name, 75 | exchange, 76 | false, 77 | nil, 78 | ) 79 | if err != nil { 80 | return err 81 | } 82 | 83 | a.inbound, err = a.channel.Consume( 84 | a.queue.Name, // queue 85 | "", // consumer 86 | true, // auto ack 87 | false, // exclusive 88 | true, // no local 89 | false, // no wait 90 | nil, // args 91 | ) 92 | if err != nil { 93 | return err 94 | } 95 | 96 | return nil 97 | } 98 | 99 | // Recv returns a single message consumed by the peer. Subscribe must be called 100 | // before this. It returns an error if the receive failed. 101 | func (a *Peer) Recv() ([]byte, error) { 102 | message := <-a.inbound 103 | return message.Body, nil 104 | } 105 | 106 | // Send returns a channel on which messages can be sent for publishing. 107 | func (a *Peer) Send() chan<- []byte { 108 | return a.send 109 | } 110 | 111 | // Errors returns the channel on which the peer sends publish errors. 112 | func (a *Peer) Errors() <-chan error { 113 | return a.errors 114 | } 115 | 116 | // Done signals to the peer that message publishing has completed. 117 | func (a *Peer) Done() { 118 | a.done <- true 119 | } 120 | 121 | // Setup prepares the peer for testing. 122 | func (a *Peer) Setup() { 123 | go func() { 124 | for { 125 | select { 126 | case msg := <-a.send: 127 | if err := a.channel.Publish( 128 | exchange, // exchange 129 | "", // routing key 130 | false, // mandatory 131 | false, // immediate 132 | amqp.Publishing{Body: msg}, 133 | ); err != nil { 134 | a.errors <- err 135 | } 136 | case <-a.done: 137 | return 138 | } 139 | } 140 | }() 141 | } 142 | 143 | // Teardown performs any cleanup logic that needs to be performed after the 144 | // test is complete. 145 | func (a *Peer) Teardown() { 146 | a.channel.Close() 147 | a.conn.Close() 148 | } 149 | -------------------------------------------------------------------------------- /flotilla-server/daemon/broker/amqp/rabbitmq/orchestrator.go: -------------------------------------------------------------------------------- 1 | package rabbitmq 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os/exec" 7 | ) 8 | 9 | const ( 10 | rabbitMQ = "dockerfile/rabbitmq" 11 | internalPort = "5672" 12 | ) 13 | 14 | // Broker implements the Broker interface for RabbitMQ. 15 | type Broker struct { 16 | containerID string 17 | } 18 | 19 | // Start will start the message broker and prepare it for testing. 20 | func (r *Broker) Start(host, port string) (interface{}, error) { 21 | containerID, err := exec.Command("/bin/sh", "-c", 22 | fmt.Sprintf("docker run -d -p %s:%s %s", port, internalPort, rabbitMQ)).Output() 23 | if err != nil { 24 | log.Printf("Failed to start container %s: %s", rabbitMQ, err.Error()) 25 | return "", err 26 | } 27 | 28 | log.Printf("Started container %s: %s", rabbitMQ, containerID) 29 | r.containerID = string(containerID) 30 | return string(containerID), nil 31 | } 32 | 33 | // Stop will stop the message broker. 34 | func (r *Broker) Stop() (interface{}, error) { 35 | containerID, err := exec.Command("/bin/sh", "-c", 36 | fmt.Sprintf("docker kill %s", r.containerID)).Output() 37 | if err != nil { 38 | log.Printf("Failed to stop container %s: %s", rabbitMQ, err.Error()) 39 | return "", err 40 | } 41 | 42 | log.Printf("Stopped container %s: %s", rabbitMQ, r.containerID) 43 | return string(containerID), nil 44 | } 45 | -------------------------------------------------------------------------------- /flotilla-server/daemon/broker/beanstalkd/beanstalkd.go: -------------------------------------------------------------------------------- 1 | package beanstalkd 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/kr/beanstalk" 7 | ) 8 | 9 | // Peer implements the peer interface for Beanstalkd. 10 | type Peer struct { 11 | conn *beanstalk.Conn 12 | messages chan []byte 13 | send chan []byte 14 | errors chan error 15 | done chan bool 16 | } 17 | 18 | // NewPeer creates and returns a new Peer for communicating with Beanstalkd. 19 | func NewPeer(host string) (*Peer, error) { 20 | conn, err := beanstalk.Dial("tcp", host) 21 | if err != nil { 22 | return nil, err 23 | } 24 | 25 | return &Peer{ 26 | conn: conn, 27 | messages: make(chan []byte, 10000), 28 | send: make(chan []byte), 29 | errors: make(chan error, 1), 30 | done: make(chan bool), 31 | }, nil 32 | } 33 | 34 | // Subscribe prepares the peer to consume messages. 35 | func (b *Peer) Subscribe() error { 36 | go func() { 37 | for { 38 | id, message, err := b.conn.Reserve(5 * time.Second) 39 | if err != nil { 40 | // Broker shutdown. 41 | return 42 | } 43 | 44 | b.conn.Delete(id) 45 | b.messages <- message 46 | } 47 | }() 48 | return nil 49 | } 50 | 51 | // Recv returns a single message consumed by the peer. Subscribe must be called 52 | // before this. It returns an error if the receive failed. 53 | func (b *Peer) Recv() ([]byte, error) { 54 | return <-b.messages, nil 55 | } 56 | 57 | // Send returns a channel on which messages can be sent for publishing. 58 | func (b *Peer) Send() chan<- []byte { 59 | return b.send 60 | } 61 | 62 | // Errors returns the channel on which the peer sends publish errors. 63 | func (b *Peer) Errors() <-chan error { 64 | return b.errors 65 | } 66 | 67 | // Done signals to the peer that message publishing has completed. 68 | func (b *Peer) Done() { 69 | b.done <- true 70 | } 71 | 72 | // Setup prepares the peer for testing. 73 | func (b *Peer) Setup() { 74 | go func() { 75 | for { 76 | select { 77 | case msg := <-b.send: 78 | if _, err := b.conn.Put(msg, 1, 0, 0); err != nil { 79 | b.errors <- err 80 | } 81 | case <-b.done: 82 | return 83 | } 84 | } 85 | }() 86 | } 87 | 88 | // Teardown performs any cleanup logic that needs to be performed after the 89 | // test is complete. 90 | func (b *Peer) Teardown() { 91 | b.conn.Close() 92 | } 93 | -------------------------------------------------------------------------------- /flotilla-server/daemon/broker/beanstalkd/orchestrator.go: -------------------------------------------------------------------------------- 1 | package beanstalkd 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os/exec" 7 | ) 8 | 9 | const ( 10 | beanstalkd = "m0ikz/beanstalkd" 11 | internalPort = "11300" 12 | ) 13 | 14 | // Broker implements the broker interface for Beanstalkd. 15 | type Broker struct { 16 | containerID string 17 | } 18 | 19 | // Start will start the message broker and prepare it for testing. 20 | func (b *Broker) Start(host, port string) (interface{}, error) { 21 | containerID, err := exec.Command("/bin/sh", "-c", 22 | fmt.Sprintf("docker run -d -p %s:%s %s", port, internalPort, beanstalkd)).Output() 23 | if err != nil { 24 | log.Printf("Failed to start container %s: %s", beanstalkd, err.Error()) 25 | return "", err 26 | } 27 | 28 | log.Printf("Started container %s: %s", beanstalkd, containerID) 29 | b.containerID = string(containerID) 30 | return string(containerID), nil 31 | } 32 | 33 | // Stop will stop the message broker. 34 | func (b *Broker) Stop() (interface{}, error) { 35 | containerID, err := exec.Command("/bin/sh", "-c", 36 | fmt.Sprintf("docker kill %s", b.containerID)).Output() 37 | if err != nil { 38 | log.Printf("Failed to stop container %s: %s", beanstalkd, err.Error()) 39 | return "", err 40 | } 41 | 42 | log.Printf("Stopped container %s: %s", beanstalkd, b.containerID) 43 | return string(containerID), nil 44 | } 45 | -------------------------------------------------------------------------------- /flotilla-server/daemon/broker/broker.go: -------------------------------------------------------------------------------- 1 | package broker 2 | 3 | import "crypto/rand" 4 | 5 | // GenerateName returns a randomly generated, 32-byte alphanumeric name. This 6 | // is useful for cases where multiple clients which need to subscribe to a 7 | // broker topic. 8 | func GenerateName() string { 9 | alphanum := "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" 10 | bytes := make([]byte, 32) 11 | rand.Read(bytes) 12 | for i, b := range bytes { 13 | bytes[i] = alphanum[b%byte(len(alphanum))] 14 | } 15 | return string(bytes) 16 | } 17 | -------------------------------------------------------------------------------- /flotilla-server/daemon/broker/kafka/kafka.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "strings" 5 | 6 | "github.com/Shopify/sarama" 7 | ) 8 | 9 | const topic = "test" 10 | 11 | // Peer implements the peer interface for Kafka. 12 | type Peer struct { 13 | client sarama.Client 14 | producer sarama.AsyncProducer 15 | consumer sarama.PartitionConsumer 16 | send chan []byte 17 | errors chan error 18 | done chan bool 19 | } 20 | 21 | // NewPeer creates and returns a new Peer for communicating with Kafka. 22 | func NewPeer(host string) (*Peer, error) { 23 | host = strings.Split(host, ":")[0] + ":9092" 24 | config := sarama.NewConfig() 25 | client, err := sarama.NewClient([]string{host}, config) 26 | if err != nil { 27 | return nil, err 28 | } 29 | 30 | producer, err := sarama.NewAsyncProducer([]string{host}, config) 31 | if err != nil { 32 | return nil, err 33 | } 34 | 35 | consumer, err := sarama.NewConsumer([]string{host}, config) 36 | if err != nil { 37 | return nil, err 38 | } 39 | 40 | partitionConsumer, err := consumer.ConsumePartition(topic, 0, sarama.OffsetNewest) 41 | if err != nil { 42 | return nil, err 43 | } 44 | 45 | return &Peer{ 46 | client: client, 47 | producer: producer, 48 | consumer: partitionConsumer, 49 | send: make(chan []byte), 50 | errors: make(chan error, 1), 51 | done: make(chan bool), 52 | }, nil 53 | } 54 | 55 | // Subscribe prepares the peer to consume messages. 56 | func (k *Peer) Subscribe() error { 57 | return nil 58 | } 59 | 60 | // Recv returns a single message consumed by the peer. Subscribe must be called 61 | // before this. It returns an error if the receive failed. 62 | func (k *Peer) Recv() ([]byte, error) { 63 | msg := <-k.consumer.Messages() 64 | return msg.Value, nil 65 | } 66 | 67 | // Send returns a channel on which messages can be sent for publishing. 68 | func (k *Peer) Send() chan<- []byte { 69 | return k.send 70 | } 71 | 72 | // Errors returns the channel on which the peer sends publish errors. 73 | func (k *Peer) Errors() <-chan error { 74 | return k.errors 75 | } 76 | 77 | // Done signals to the peer that message publishing has completed. 78 | func (k *Peer) Done() { 79 | k.done <- true 80 | } 81 | 82 | // Setup prepares the peer for testing. 83 | func (k *Peer) Setup() { 84 | go func() { 85 | for { 86 | select { 87 | case msg := <-k.send: 88 | if err := k.sendMessage(msg); err != nil { 89 | k.errors <- err 90 | } 91 | case <-k.done: 92 | return 93 | } 94 | } 95 | }() 96 | } 97 | 98 | func (k *Peer) sendMessage(message []byte) error { 99 | select { 100 | case k.producer.Input() <- &sarama.ProducerMessage{Topic: topic, Key: nil, Value: sarama.ByteEncoder(message)}: 101 | return nil 102 | case err := <-k.producer.Errors(): 103 | return err.Err 104 | } 105 | } 106 | 107 | // Teardown performs any cleanup logic that needs to be performed after the 108 | // test is complete. 109 | func (k *Peer) Teardown() { 110 | k.producer.Close() 111 | if k.consumer != nil { 112 | k.consumer.Close() 113 | } 114 | k.client.Close() 115 | } 116 | -------------------------------------------------------------------------------- /flotilla-server/daemon/broker/kafka/orchestrator.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os/exec" 7 | "time" 8 | ) 9 | 10 | const ( 11 | zookeeper = "jplock/zookeeper:3.4.6" 12 | zookeeperCmd = "docker run -d -p %s:%s %s" 13 | zookeeperPort = "2181" 14 | kafka = "ches/kafka" 15 | kafkaPort = "9092" 16 | jmxPort = "7203" 17 | // TODO: Use --link. 18 | kafkaCmd = `docker run -d \ 19 | -h %s \ 20 | -p %s:%s -p %s:%s \ 21 | -e EXPOSED_HOST=%s \ 22 | -e ZOOKEEPER_IP=%s %s` 23 | ) 24 | 25 | // Broker implements the broker interface for Kafka. 26 | type Broker struct { 27 | kafkaContainerID string 28 | zookeeperContainerID string 29 | } 30 | 31 | // Start will start the message broker and prepare it for testing. 32 | func (k *Broker) Start(host, port string) (interface{}, error) { 33 | if port == zookeeperPort || port == jmxPort { 34 | return nil, fmt.Errorf("Port %s is reserved", port) 35 | } 36 | 37 | cmd := fmt.Sprintf(zookeeperCmd, zookeeperPort, zookeeperPort, zookeeper) 38 | zkContainerID, err := exec.Command("/bin/sh", "-c", cmd).Output() 39 | if err != nil { 40 | log.Printf("Failed to start container %s: %s", zookeeper, err.Error()) 41 | return "", err 42 | } 43 | log.Printf("Started container %s: %s", zookeeper, zkContainerID) 44 | 45 | cmd = fmt.Sprintf(kafkaCmd, host, kafkaPort, kafkaPort, jmxPort, jmxPort, host, host, kafka) 46 | kafkaContainerID, err := exec.Command("/bin/sh", "-c", cmd).Output() 47 | if err != nil { 48 | log.Printf("Failed to start container %s: %s", kafka, err.Error()) 49 | k.Stop() 50 | return "", err 51 | } 52 | 53 | log.Printf("Started container %s: %s", kafka, kafkaContainerID) 54 | k.kafkaContainerID = string(kafkaContainerID) 55 | k.zookeeperContainerID = string(zkContainerID) 56 | 57 | // NOTE: Leader election can take a while. For now, just sleep to try to 58 | // ensure the cluster is ready. Is there a way to avoid this or make it 59 | // better? 60 | time.Sleep(time.Minute) 61 | 62 | return string(kafkaContainerID), nil 63 | } 64 | 65 | // Stop will stop the message broker. 66 | func (k *Broker) Stop() (interface{}, error) { 67 | _, err := exec.Command("/bin/sh", "-c", fmt.Sprintf("docker kill %s", k.zookeeperContainerID)).Output() 68 | if err != nil { 69 | log.Printf("Failed to stop container %s: %s", zookeeper, err.Error()) 70 | } else { 71 | log.Printf("Stopped container %s: %s", zookeeper, k.zookeeperContainerID) 72 | } 73 | 74 | kafkaContainerID, e := exec.Command("/bin/sh", "-c", fmt.Sprintf("docker kill %s", k.kafkaContainerID)).Output() 75 | if e != nil { 76 | log.Printf("Failed to stop container %s: %s", kafka, err.Error()) 77 | err = e 78 | } else { 79 | log.Printf("Stopped container %s: %s", kafka, k.kafkaContainerID) 80 | } 81 | 82 | return string(kafkaContainerID), err 83 | } 84 | -------------------------------------------------------------------------------- /flotilla-server/daemon/broker/kestrel/kestrel.go: -------------------------------------------------------------------------------- 1 | package kestrel 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | "strings" 7 | 8 | "github.com/alindeman/go-kestrel" 9 | ) 10 | 11 | const ( 12 | queue = "test" 13 | 14 | // bufferSize is the number of messages we try to publish and consume at a 15 | // time to increase throughput. TODO: this might need tweaking. 16 | bufferSize = 100 17 | ) 18 | 19 | // Peer implements the peer interface for Kestrel. 20 | type Peer struct { 21 | client *kestrel.Client 22 | messages chan []byte 23 | send chan []byte 24 | errors chan error 25 | done chan bool 26 | flush chan bool 27 | subscriber bool 28 | } 29 | 30 | // NewPeer creates and returns a new Peer for communicating with Kestrel. 31 | func NewPeer(host string) (*Peer, error) { 32 | addrAndPort := strings.Split(host, ":") 33 | if len(addrAndPort) < 2 { 34 | return nil, fmt.Errorf("Invalid host: %s", host) 35 | } 36 | 37 | port, err := strconv.Atoi(addrAndPort[1]) 38 | if err != nil { 39 | return nil, err 40 | } 41 | 42 | client := kestrel.NewClient(addrAndPort[0], port) 43 | if err := client.FlushAllQueues(); err != nil { 44 | client.Close() 45 | return nil, err 46 | } 47 | 48 | return &Peer{ 49 | client: client, 50 | messages: make(chan []byte, 10000), 51 | send: make(chan []byte), 52 | errors: make(chan error, 1), 53 | done: make(chan bool), 54 | flush: make(chan bool), 55 | }, nil 56 | } 57 | 58 | // Subscribe prepares the peer to consume messages. 59 | func (k *Peer) Subscribe() error { 60 | k.subscriber = true 61 | go func() { 62 | for { 63 | items, err := k.client.Get(queue, bufferSize, 0, 0) 64 | if err != nil { 65 | // Broker shutdown. 66 | return 67 | } 68 | for _, item := range items { 69 | k.messages <- item.Data 70 | } 71 | } 72 | }() 73 | return nil 74 | } 75 | 76 | // Recv returns a single message consumed by the peer. Subscribe must be called 77 | // before this. It returns an error if the receive failed. 78 | func (k *Peer) Recv() ([]byte, error) { 79 | return <-k.messages, nil 80 | } 81 | 82 | // Send returns a channel on which messages can be sent for publishing. 83 | func (k *Peer) Send() chan<- []byte { 84 | return k.send 85 | } 86 | 87 | // Errors returns the channel on which the peer sends publish errors. 88 | func (k *Peer) Errors() <-chan error { 89 | return k.errors 90 | } 91 | 92 | // Done signals to the peer that message publishing has completed. 93 | func (k *Peer) Done() { 94 | k.done <- true 95 | <-k.flush 96 | } 97 | 98 | // Setup prepares the peer for testing. 99 | func (k *Peer) Setup() { 100 | buffer := make([][]byte, bufferSize) 101 | go func() { 102 | i := 0 103 | for { 104 | select { 105 | case msg := <-k.send: 106 | buffer[i] = msg 107 | i++ 108 | if i == bufferSize { 109 | if _, err := k.client.Put(queue, buffer); err != nil { 110 | k.errors <- err 111 | } 112 | i = 0 113 | } 114 | case <-k.done: 115 | if i > 0 { 116 | if _, err := k.client.Put(queue, buffer[0:i]); err != nil { 117 | k.errors <- err 118 | } 119 | } 120 | k.flush <- true 121 | return 122 | } 123 | } 124 | }() 125 | } 126 | 127 | // Teardown performs any cleanup logic that needs to be performed after the 128 | // test is complete. 129 | func (k *Peer) Teardown() { 130 | k.client.Close() 131 | } 132 | -------------------------------------------------------------------------------- /flotilla-server/daemon/broker/kestrel/orchestrator.go: -------------------------------------------------------------------------------- 1 | package kestrel 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os/exec" 7 | ) 8 | 9 | const ( 10 | kestrelImage = "thefactory/kestrel" 11 | internalPort = "2229" 12 | ) 13 | 14 | // Broker implements the broker interface for Kestrel. 15 | type Broker struct { 16 | containerID string 17 | } 18 | 19 | // Start will start the message broker and prepare it for testing. 20 | func (k *Broker) Start(host, port string) (interface{}, error) { 21 | containerID, err := exec.Command("/bin/sh", "-c", 22 | fmt.Sprintf("docker run -d -p %s:%s %s", port, internalPort, kestrelImage)).Output() 23 | if err != nil { 24 | log.Printf("Failed to start container %s: %s", kestrelImage, err.Error()) 25 | return "", err 26 | } 27 | 28 | log.Printf("Started container %s: %s", kestrelImage, containerID) 29 | k.containerID = string(containerID) 30 | return string(containerID), nil 31 | } 32 | 33 | // Stop will stop the message broker. 34 | func (k *Broker) Stop() (interface{}, error) { 35 | containerID, err := exec.Command("/bin/sh", "-c", 36 | fmt.Sprintf("docker kill %s", k.containerID)).Output() 37 | if err != nil { 38 | log.Printf("Failed to stop container %s: %s", kestrelImage, err.Error()) 39 | return "", err 40 | } 41 | 42 | log.Printf("Stopped container %s: %s", kestrelImage, k.containerID) 43 | return string(containerID), nil 44 | } 45 | -------------------------------------------------------------------------------- /flotilla-server/daemon/broker/nats/nats.go: -------------------------------------------------------------------------------- 1 | package nats 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/nats-io/nats" 8 | ) 9 | 10 | const ( 11 | subject = "test" 12 | 13 | // Maximum bytes we will get behind before we start slowing down publishing. 14 | maxBytesBehind = 1024 * 1024 // 1MB 15 | 16 | // Maximum msgs we will get behind before we start slowing down publishing. 17 | maxMsgsBehind = 65536 // 64k 18 | 19 | // Time to delay publishing when we are behind. 20 | delay = 1 * time.Millisecond 21 | ) 22 | 23 | // Peer implements the peer interface for NATS. 24 | type Peer struct { 25 | conn *nats.Conn 26 | messages chan []byte 27 | send chan []byte 28 | errors chan error 29 | done chan bool 30 | } 31 | 32 | // NewPeer creates and returns a new Peer for communicating with NATS. 33 | func NewPeer(host string) (*Peer, error) { 34 | conn, err := nats.Connect(fmt.Sprintf("nats://%s", host)) 35 | if err != nil { 36 | return nil, err 37 | } 38 | 39 | // We want to be alerted if we get disconnected, this will be due to Slow 40 | // Consumer. 41 | conn.Opts.AllowReconnect = false 42 | 43 | return &Peer{ 44 | conn: conn, 45 | messages: make(chan []byte, 10000), 46 | send: make(chan []byte), 47 | errors: make(chan error, 1), 48 | done: make(chan bool), 49 | }, nil 50 | } 51 | 52 | // Subscribe prepares the peer to consume messages. 53 | func (n *Peer) Subscribe() error { 54 | n.conn.Subscribe(subject, func(message *nats.Msg) { 55 | n.messages <- message.Data 56 | }) 57 | return nil 58 | } 59 | 60 | // Recv returns a single message consumed by the peer. Subscribe must be called 61 | // before this. It returns an error if the receive failed. 62 | func (n *Peer) Recv() ([]byte, error) { 63 | return <-n.messages, nil 64 | } 65 | 66 | // Send returns a channel on which messages can be sent for publishing. 67 | func (n *Peer) Send() chan<- []byte { 68 | return n.send 69 | } 70 | 71 | // Errors returns the channel on which the peer sends publish errors. 72 | func (n *Peer) Errors() <-chan error { 73 | return n.errors 74 | } 75 | 76 | // Done signals to the peer that message publishing has completed. 77 | func (n *Peer) Done() { 78 | n.done <- true 79 | } 80 | 81 | // Setup prepares the peer for testing. 82 | func (n *Peer) Setup() { 83 | go func() { 84 | for { 85 | select { 86 | case msg := <-n.send: 87 | if err := n.sendMessage(msg); err != nil { 88 | n.errors <- err 89 | } 90 | case <-n.done: 91 | return 92 | } 93 | } 94 | }() 95 | } 96 | 97 | func (n *Peer) sendMessage(message []byte) error { 98 | // Check if we are behind by >= 1MB bytes. 99 | bytesDeltaOver := n.conn.OutBytes-n.conn.InBytes >= maxBytesBehind 100 | 101 | // Check if we are behind by >= 65k msgs. 102 | msgsDeltaOver := n.conn.OutMsgs-n.conn.InMsgs >= maxMsgsBehind 103 | 104 | // If we are behind on either condition, sleep a bit to catch up receiver. 105 | if bytesDeltaOver || msgsDeltaOver { 106 | time.Sleep(delay) 107 | } 108 | 109 | return n.conn.Publish(subject, message) 110 | } 111 | 112 | // Teardown performs any cleanup logic that needs to be performed after the 113 | // test is complete. 114 | func (n *Peer) Teardown() { 115 | n.conn.Close() 116 | } 117 | -------------------------------------------------------------------------------- /flotilla-server/daemon/broker/nats/orchestrator.go: -------------------------------------------------------------------------------- 1 | package nats 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os/exec" 7 | ) 8 | 9 | const ( 10 | gnatsd = "nats" 11 | internalPort = "4222" 12 | ) 13 | 14 | // Broker implements the broker interface for NATS. 15 | type Broker struct { 16 | containerID string 17 | } 18 | 19 | // Start will start the message broker and prepare it for testing. 20 | func (n *Broker) Start(host, port string) (interface{}, error) { 21 | containerID, err := exec.Command("/bin/sh", "-c", 22 | fmt.Sprintf("docker run -d -p %s:%s %s", port, internalPort, gnatsd)).Output() 23 | if err != nil { 24 | log.Printf("Failed to start container %s: %s", gnatsd, err.Error()) 25 | return "", err 26 | } 27 | 28 | log.Printf("Started container %s: %s", gnatsd, containerID) 29 | n.containerID = string(containerID) 30 | return string(containerID), nil 31 | } 32 | 33 | // Stop will stop the message broker. 34 | func (n *Broker) Stop() (interface{}, error) { 35 | containerID, err := exec.Command("/bin/sh", "-c", 36 | fmt.Sprintf("docker kill %s", n.containerID)).Output() 37 | if err != nil { 38 | log.Printf("Failed to stop container %s: %s", gnatsd, err.Error()) 39 | return "", err 40 | } 41 | 42 | log.Printf("Stopped container %s: %s", gnatsd, n.containerID) 43 | n.containerID = "" 44 | return string(containerID), nil 45 | } 46 | -------------------------------------------------------------------------------- /flotilla-server/daemon/broker/nsq/nsq.go: -------------------------------------------------------------------------------- 1 | package nsq 2 | 3 | import ( 4 | "github.com/bitly/go-nsq" 5 | "github.com/tylertreat/Flotilla/flotilla-server/daemon/broker" 6 | ) 7 | 8 | const ( 9 | topic = "test" 10 | 11 | // bufferSize is the number of messages we try to publish at a time to 12 | // increase throughput. TODO: this might need tweaking. 13 | bufferSize = 50 14 | ) 15 | 16 | // Peer implements the peer interface for NSQ. 17 | type Peer struct { 18 | producer *nsq.Producer 19 | consumer *nsq.Consumer 20 | host string 21 | messages chan []byte 22 | send chan []byte 23 | errors chan error 24 | done chan bool 25 | flush chan bool 26 | } 27 | 28 | // NewPeer creates and returns a new Peer for communicating with NSQ. 29 | func NewPeer(host string) (*Peer, error) { 30 | producer, err := nsq.NewProducer(host, nsq.NewConfig()) 31 | if err != nil { 32 | return nil, err 33 | } 34 | 35 | return &Peer{ 36 | host: host, 37 | producer: producer, 38 | messages: make(chan []byte, 10000), 39 | send: make(chan []byte), 40 | errors: make(chan error, 1), 41 | done: make(chan bool), 42 | flush: make(chan bool), 43 | }, nil 44 | } 45 | 46 | // Subscribe prepares the peer to consume messages. 47 | func (n *Peer) Subscribe() error { 48 | consumer, err := nsq.NewConsumer(topic, broker.GenerateName(), nsq.NewConfig()) 49 | if err != nil { 50 | return err 51 | } 52 | 53 | consumer.AddHandler(nsq.HandlerFunc(func(message *nsq.Message) error { 54 | n.messages <- message.Body 55 | return nil 56 | })) 57 | 58 | if err := consumer.ConnectToNSQD(n.host); err != nil { 59 | return err 60 | } 61 | 62 | n.consumer = consumer 63 | return nil 64 | } 65 | 66 | // Recv returns a single message consumed by the peer. Subscribe must be called 67 | // before this. It returns an error if the receive failed. 68 | func (n *Peer) Recv() ([]byte, error) { 69 | return <-n.messages, nil 70 | } 71 | 72 | // Send returns a channel on which messages can be sent for publishing. 73 | func (n *Peer) Send() chan<- []byte { 74 | return n.send 75 | } 76 | 77 | // Errors returns the channel on which the peer sends publish errors. 78 | func (n *Peer) Errors() <-chan error { 79 | return n.errors 80 | } 81 | 82 | // Done signals to the peer that message publishing has completed. 83 | func (n *Peer) Done() { 84 | n.done <- true 85 | <-n.flush 86 | } 87 | 88 | // Setup prepares the peer for testing. 89 | func (n *Peer) Setup() { 90 | buffer := make([][]byte, bufferSize) 91 | go func() { 92 | i := 0 93 | for { 94 | select { 95 | case msg := <-n.send: 96 | buffer[i] = msg 97 | i++ 98 | if i == bufferSize { 99 | if err := n.producer.MultiPublishAsync(topic, buffer, nil, nil); err != nil { 100 | n.errors <- err 101 | } 102 | i = 0 103 | } 104 | case <-n.done: 105 | if i > 0 { 106 | if err := n.producer.MultiPublishAsync(topic, buffer[0:i], nil, nil); err != nil { 107 | n.errors <- err 108 | } 109 | } 110 | n.flush <- true 111 | return 112 | } 113 | } 114 | }() 115 | 116 | } 117 | 118 | // Teardown performs any cleanup logic that needs to be performed after the 119 | // test is complete. 120 | func (n *Peer) Teardown() { 121 | n.producer.Stop() 122 | if n.consumer != nil { 123 | n.consumer.Stop() 124 | <-n.consumer.StopChan 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /flotilla-server/daemon/broker/nsq/orchestrator.go: -------------------------------------------------------------------------------- 1 | package nsq 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os/exec" 7 | ) 8 | 9 | const ( 10 | nsqlookupd = "nsqio/nsqlookupd" 11 | nsqlookupdPort1 = "4160" 12 | nsqlookupdPort2 = "4161" 13 | nsqlookupdCmd = "docker run -d -p %s:%s -p %s:%s %s" 14 | nsqd = "nsqio/nsqd" 15 | internalPort = "4150" 16 | nsqdPort = "4151" 17 | nsqdCmd = `docker run -d -p %s:%s -p %s:%s %s \ 18 | --broadcast-address=%s \ 19 | --lookupd-tcp-address=%s:%s` 20 | ) 21 | 22 | // Broker is an implementation of the broker interface which handles 23 | // orchestrating NSQ. 24 | type Broker struct { 25 | nsqlookupdContainerID string 26 | nsqdContainerID string 27 | } 28 | 29 | // Start will start the message broker and prepare it for testing. 30 | func (n *Broker) Start(host, port string) (interface{}, error) { 31 | if port == nsqlookupdPort1 || port == nsqlookupdPort2 || port == nsqdPort { 32 | return nil, fmt.Errorf("Port %s is reserved", port) 33 | } 34 | 35 | cmd := fmt.Sprintf(nsqlookupdCmd, nsqlookupdPort1, nsqlookupdPort1, nsqlookupdPort2, 36 | nsqlookupdPort2, nsqlookupd) 37 | nsqlookupdContainerID, err := exec.Command("/bin/sh", "-c", cmd).Output() 38 | if err != nil { 39 | log.Printf("Failed to start container %s: %s", nsqlookupd, err.Error()) 40 | return "", err 41 | } 42 | log.Printf("Started container %s: %s", nsqlookupd, nsqlookupdContainerID) 43 | 44 | cmd = fmt.Sprintf(nsqdCmd, port, internalPort, nsqdPort, nsqdPort, nsqd, host, 45 | host, nsqlookupdPort1) 46 | nsqdContainerID, err := exec.Command("/bin/sh", "-c", cmd).Output() 47 | if err != nil { 48 | log.Printf("Failed to start container %s: %s", nsqd, err.Error()) 49 | return "", err 50 | } 51 | 52 | log.Printf("Started container %s: %s", nsqd, nsqdContainerID) 53 | n.nsqlookupdContainerID = string(nsqlookupdContainerID) 54 | n.nsqdContainerID = string(nsqdContainerID) 55 | return string(nsqdContainerID), nil 56 | } 57 | 58 | // Stop will stop the message broker. 59 | func (n *Broker) Stop() (interface{}, error) { 60 | _, err := exec.Command("/bin/sh", "-c", fmt.Sprintf("docker kill %s", n.nsqlookupdContainerID)).Output() 61 | if err != nil { 62 | log.Printf("Failed to stop container %s: %s", nsqlookupd, err.Error()) 63 | } else { 64 | log.Printf("Stopped container %s: %s", nsqlookupd, n.nsqlookupdContainerID) 65 | } 66 | 67 | nsqdContainerID, e := exec.Command("/bin/sh", "-c", fmt.Sprintf("docker kill %s", n.nsqdContainerID)).Output() 68 | if e != nil { 69 | log.Printf("Failed to stop container %s: %s", nsqd, err.Error()) 70 | err = e 71 | } else { 72 | log.Printf("Stopped container %s: %s", nsqd, n.nsqdContainerID) 73 | } 74 | 75 | return string(nsqdContainerID), err 76 | } 77 | -------------------------------------------------------------------------------- /flotilla-server/daemon/broker/pubsub/orchestrator.go: -------------------------------------------------------------------------------- 1 | package pubsub 2 | 3 | import ( 4 | "errors" 5 | "io/ioutil" 6 | "log" 7 | 8 | "golang.org/x/net/context" 9 | "golang.org/x/oauth2" 10 | "golang.org/x/oauth2/google" 11 | "google.golang.org/cloud" 12 | "google.golang.org/cloud/pubsub" 13 | ) 14 | 15 | const topic = "test" 16 | 17 | // Broker implements the broker interface for Google Cloud Pub/Sub. 18 | type Broker struct { 19 | ProjectID string 20 | JSONKey string 21 | } 22 | 23 | // Start will start the message broker and prepare it for testing. 24 | func (c *Broker) Start(host, port string) (interface{}, error) { 25 | ctx, err := newContext(c.ProjectID, c.JSONKey) 26 | if err != nil { 27 | return "", err 28 | } 29 | 30 | exists, err := pubsub.TopicExists(ctx, topic) 31 | if err != nil { 32 | log.Printf("Failed to check Cloud Pub/Sub topic: %s", err.Error()) 33 | return "", err 34 | } 35 | 36 | if exists { 37 | if err := pubsub.DeleteTopic(ctx, topic); err != nil { 38 | log.Printf("Failed to delete Cloud Pub/Sub topic: %s", err.Error()) 39 | return "", err 40 | } 41 | } 42 | 43 | if err := pubsub.CreateTopic(ctx, topic); err != nil { 44 | log.Printf("Failed to create Cloud Pub/Sub topic: %s", err.Error()) 45 | return "", err 46 | } 47 | 48 | log.Println("Created Cloud Pub/Sub topic") 49 | 50 | return "", nil 51 | } 52 | 53 | // Stop will stop the message broker. 54 | func (c *Broker) Stop() (interface{}, error) { 55 | ctx, err := newContext(c.ProjectID, c.JSONKey) 56 | if err != nil { 57 | return "", err 58 | } 59 | 60 | if err := pubsub.DeleteTopic(ctx, topic); err != nil { 61 | log.Printf("Failed to delete Cloud Pub/Sub topic: %s", err.Error()) 62 | return "", err 63 | } 64 | 65 | log.Println("Deleted Cloud Pub/Sub topic") 66 | return "", err 67 | } 68 | 69 | func newContext(projectID, jsonKey string) (context.Context, error) { 70 | if projectID == "" { 71 | return nil, errors.New("project id not provided") 72 | } 73 | 74 | if jsonKey == "" { 75 | return nil, errors.New("JSON key not provided") 76 | } 77 | 78 | key, err := ioutil.ReadFile(jsonKey) 79 | if err != nil { 80 | return nil, err 81 | } 82 | 83 | conf, err := google.JWTConfigFromJSON( 84 | key, 85 | pubsub.ScopeCloudPlatform, 86 | pubsub.ScopePubSub, 87 | ) 88 | if err != nil { 89 | return nil, err 90 | } 91 | 92 | ctx := cloud.NewContext(projectID, conf.Client(oauth2.NoContext)) 93 | return ctx, nil 94 | } 95 | -------------------------------------------------------------------------------- /flotilla-server/daemon/broker/pubsub/pubsub.go: -------------------------------------------------------------------------------- 1 | package pubsub 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "strings" 7 | "sync/atomic" 8 | 9 | "golang.org/x/net/context" 10 | "google.golang.org/cloud/pubsub" 11 | 12 | "github.com/tylertreat/Flotilla/flotilla-server/daemon/broker" 13 | ) 14 | 15 | const ( 16 | stopped = 1 17 | 18 | // bufferSize is the number of messages we try to publish and consume at a 19 | // time to increase throughput. TODO: this might need tweaking. 20 | bufferSize = 100 21 | ) 22 | 23 | // Peer implements the peer interface for Google Cloud Pub/Sub. 24 | type Peer struct { 25 | context context.Context 26 | subscription string 27 | messages chan []byte 28 | stopped int32 29 | acks chan []string 30 | ackDone chan bool 31 | send chan []byte 32 | errors chan error 33 | done chan bool 34 | flush chan bool 35 | } 36 | 37 | // NewPeer creates and returns a new Peer for communicating with Google Cloud 38 | // Pub/Sub. 39 | func NewPeer(projectID, jsonKey string) (*Peer, error) { 40 | ctx, err := newContext(projectID, jsonKey) 41 | if err != nil { 42 | return nil, err 43 | } 44 | 45 | return &Peer{ 46 | context: ctx, 47 | messages: make(chan []byte, 10000), 48 | acks: make(chan []string, 100), 49 | ackDone: make(chan bool, 1), 50 | send: make(chan []byte), 51 | errors: make(chan error, 1), 52 | done: make(chan bool), 53 | flush: make(chan bool), 54 | }, nil 55 | } 56 | 57 | // Subscribe prepares the peer to consume messages. 58 | func (c *Peer) Subscribe() error { 59 | // Subscription names must start with a lowercase letter, end with a 60 | // lowercase letter or number, and contain only lowercase letters, numbers, 61 | // dashes, underscores or periods. 62 | c.subscription = strings.ToLower(fmt.Sprintf("x%sx", broker.GenerateName())) 63 | exists, err := pubsub.SubExists(c.context, c.subscription) 64 | if err != nil { 65 | return err 66 | } 67 | 68 | if exists { 69 | return fmt.Errorf("Subscription %s already exists", c.subscription) 70 | } 71 | 72 | if err := pubsub.CreateSub(c.context, c.subscription, topic, 0, ""); err != nil { 73 | return err 74 | } 75 | 76 | go c.ack() 77 | 78 | go func() { 79 | // TODO: Can we avoid using atomic flag? 80 | for atomic.LoadInt32(&c.stopped) != stopped { 81 | messages, err := pubsub.PullWait(c.context, c.subscription, bufferSize) 82 | if err != nil { 83 | // Timed out. 84 | continue 85 | } 86 | 87 | ids := make([]string, len(messages)) 88 | for i, message := range messages { 89 | ids[i] = message.AckID 90 | c.messages <- message.Data 91 | } 92 | c.acks <- ids 93 | } 94 | }() 95 | return nil 96 | } 97 | 98 | // Recv returns a single message consumed by the peer. Subscribe must be called 99 | // before this. It returns an error if the receive failed. 100 | func (c *Peer) Recv() ([]byte, error) { 101 | return <-c.messages, nil 102 | } 103 | 104 | // Send returns a channel on which messages can be sent for publishing. 105 | func (c *Peer) Send() chan<- []byte { 106 | return c.send 107 | } 108 | 109 | // Errors returns the channel on which the peer sends publish errors. 110 | func (c *Peer) Errors() <-chan error { 111 | return c.errors 112 | } 113 | 114 | // Done signals to the peer that message publishing has completed. 115 | func (c *Peer) Done() { 116 | c.done <- true 117 | <-c.flush 118 | } 119 | 120 | // Setup prepares the peer for testing. 121 | func (c *Peer) Setup() { 122 | buffer := make([]*pubsub.Message, bufferSize) 123 | go func() { 124 | i := 0 125 | for { 126 | select { 127 | case msg := <-c.send: 128 | buffer[i] = &pubsub.Message{Data: msg} 129 | i++ 130 | if i == bufferSize { 131 | if _, err := pubsub.Publish(c.context, topic, buffer...); err != nil { 132 | c.errors <- err 133 | } 134 | i = 0 135 | } 136 | case <-c.done: 137 | if i > 0 { 138 | if _, err := pubsub.Publish(c.context, topic, buffer[0:i]...); err != nil { 139 | c.errors <- err 140 | } 141 | } 142 | c.flush <- true 143 | return 144 | } 145 | } 146 | }() 147 | } 148 | 149 | // Teardown performs any cleanup logic that needs to be performed after the 150 | // test is complete. 151 | func (c *Peer) Teardown() { 152 | atomic.StoreInt32(&c.stopped, stopped) 153 | c.ackDone <- true 154 | pubsub.DeleteSub(c.context, c.subscription) 155 | } 156 | 157 | func (c *Peer) ack() { 158 | for { 159 | select { 160 | case ids := <-c.acks: 161 | if len(ids) > 0 { 162 | if err := pubsub.Ack(c.context, c.subscription, ids...); err != nil { 163 | log.Println("Failed to ack messages") 164 | } 165 | } 166 | case <-c.ackDone: 167 | return 168 | } 169 | } 170 | } 171 | -------------------------------------------------------------------------------- /flotilla-server/daemon/daemon.go: -------------------------------------------------------------------------------- 1 | package daemon 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "fmt" 7 | "log" 8 | 9 | "github.com/go-mangos/mangos" 10 | "github.com/go-mangos/mangos/protocol/rep" 11 | "github.com/go-mangos/mangos/transport/tcp" 12 | "github.com/tylertreat/Flotilla/flotilla-server/daemon/broker/activemq" 13 | "github.com/tylertreat/Flotilla/flotilla-server/daemon/broker/amqp" 14 | "github.com/tylertreat/Flotilla/flotilla-server/daemon/broker/amqp/rabbitmq" 15 | "github.com/tylertreat/Flotilla/flotilla-server/daemon/broker/beanstalkd" 16 | "github.com/tylertreat/Flotilla/flotilla-server/daemon/broker/kafka" 17 | "github.com/tylertreat/Flotilla/flotilla-server/daemon/broker/kestrel" 18 | "github.com/tylertreat/Flotilla/flotilla-server/daemon/broker/nats" 19 | "github.com/tylertreat/Flotilla/flotilla-server/daemon/broker/nsq" 20 | "github.com/tylertreat/Flotilla/flotilla-server/daemon/broker/pubsub" 21 | ) 22 | 23 | type daemon string 24 | type operation string 25 | 26 | const ( 27 | start operation = "start" 28 | stop operation = "stop" 29 | run operation = "run" 30 | sub operation = "subscribers" 31 | pub operation = "publishers" 32 | results operation = "results" 33 | teardown operation = "teardown" 34 | ) 35 | 36 | // These are supported message brokers. 37 | const ( 38 | NATS = "nats" 39 | Beanstalkd = "beanstalkd" 40 | Kafka = "kafka" 41 | Kestrel = "kestrel" 42 | ActiveMQ = "activemq" 43 | RabbitMQ = "rabbitmq" 44 | NSQ = "nsq" 45 | CloudPubSub = "pubsub" 46 | ) 47 | 48 | type request struct { 49 | Operation operation `json:"operation"` 50 | Broker string `json:"broker"` 51 | Port string `json:"port"` 52 | NumMessages int `json:"num_messages"` 53 | MessageSize int64 `json:"message_size"` 54 | Count int `json:"count"` 55 | Host string `json:"host"` 56 | } 57 | 58 | type response struct { 59 | Success bool `json:"success"` 60 | Message string `json:"message"` 61 | Result interface{} `json:"result"` 62 | PubResults []*result `json:"pub_results,omitempty"` 63 | SubResults []*result `json:"sub_results,omitempty"` 64 | } 65 | 66 | type result struct { 67 | Duration float32 `json:"duration,omitempty"` 68 | Throughput float32 `json:"throughput,omitempty"` 69 | Latency *latencyResults `json:"latency,omitempty"` 70 | Err string `json:"error,omitempty"` 71 | } 72 | 73 | // broker handles configuring the message broker for testing. 74 | type broker interface { 75 | // Start will start the message broker and prepare it for testing. 76 | Start(string, string) (interface{}, error) 77 | 78 | // Stop will stop the message broker. 79 | Stop() (interface{}, error) 80 | } 81 | 82 | // peer is a single producer or consumer in the test. 83 | type peer interface { 84 | // Subscribe prepares the peer to consume messages. 85 | Subscribe() error 86 | 87 | // Recv returns a single message consumed by the peer. Subscribe must be 88 | // called before this. It returns an error if the receive failed. 89 | Recv() ([]byte, error) 90 | 91 | // Send returns a channel on which messages can be sent for publishing. 92 | Send() chan<- []byte 93 | 94 | // Errors returns the channel on which the peer sends publish errors. 95 | Errors() <-chan error 96 | 97 | // Done signals to the peer that message publishing has completed. 98 | Done() 99 | 100 | // Setup prepares the peer for testing. 101 | Setup() 102 | 103 | // Teardown performs any cleanup logic that needs to be performed after the 104 | // test is complete. 105 | Teardown() 106 | } 107 | 108 | // Config contains configuration settings for the Flotilla daemon. 109 | type Config struct { 110 | GoogleCloudProjectID string 111 | GoogleCloudJSONKey string 112 | } 113 | 114 | // Daemon is the server portion of Flotilla which runs on machines we want to 115 | // communicate with and include in our benchmarks. 116 | type Daemon struct { 117 | mangos.Socket 118 | broker broker 119 | publishers []*publisher 120 | subscribers []*subscriber 121 | config *Config 122 | } 123 | 124 | // NewDaemon creates and returns a new Daemon from the provided Config. An 125 | // error is returned if the Daemon cannot be created. 126 | func NewDaemon(config *Config) (*Daemon, error) { 127 | rep, err := rep.NewSocket() 128 | if err != nil { 129 | return nil, err 130 | } 131 | rep.AddTransport(tcp.NewTransport()) 132 | return &Daemon{rep, nil, []*publisher{}, []*subscriber{}, config}, nil 133 | } 134 | 135 | // Start will allow the Daemon to begin processing requests. This is a blocking 136 | // call. 137 | func (d *Daemon) Start(port int) error { 138 | if err := d.Listen(fmt.Sprintf("tcp://:%d", port)); err != nil { 139 | return err 140 | } 141 | return d.loop() 142 | } 143 | 144 | func (d *Daemon) loop() error { 145 | for { 146 | msg, err := d.Recv() 147 | if err != nil { 148 | log.Println(err) 149 | continue 150 | } 151 | 152 | var req request 153 | if err := json.Unmarshal(msg, &req); err != nil { 154 | log.Println("Invalid peer request:", err) 155 | d.sendResponse(response{ 156 | Success: false, 157 | Message: fmt.Sprintf("Invalid request: %s", err.Error()), 158 | }) 159 | continue 160 | } 161 | 162 | resp := d.processRequest(req) 163 | d.sendResponse(resp) 164 | } 165 | } 166 | 167 | func (d *Daemon) sendResponse(rep response) { 168 | repJSON, err := json.Marshal(rep) 169 | if err != nil { 170 | // This is not recoverable. 171 | panic(err) 172 | } 173 | 174 | if err := d.Send(repJSON); err != nil { 175 | log.Println(err) 176 | } 177 | } 178 | 179 | func (d *Daemon) processRequest(req request) response { 180 | var ( 181 | response response 182 | err error 183 | ) 184 | switch req.Operation { 185 | case start: 186 | response.Result, err = d.processBrokerStart(req.Broker, req.Host, req.Port) 187 | case stop: 188 | response.Result, err = d.processBrokerStop() 189 | case pub: 190 | err = d.processPub(req) 191 | case sub: 192 | err = d.processSub(req) 193 | case run: 194 | err = d.processPublisherStart() 195 | case results: 196 | response.PubResults, response.SubResults, err = d.processResults() 197 | if err != nil { 198 | response.Message = err.Error() 199 | err = nil 200 | } 201 | case teardown: 202 | d.processTeardown() 203 | default: 204 | err = fmt.Errorf("Invalid operation %s", req.Operation) 205 | } 206 | 207 | if err != nil { 208 | response.Message = err.Error() 209 | } else { 210 | response.Success = true 211 | } 212 | 213 | return response 214 | } 215 | func (d *Daemon) processBrokerStart(broker, host, port string) (interface{}, error) { 216 | if d.broker != nil { 217 | return "", errors.New("Broker already running") 218 | } 219 | 220 | switch broker { 221 | case NATS: 222 | d.broker = &nats.Broker{} 223 | case Beanstalkd: 224 | d.broker = &beanstalkd.Broker{} 225 | case Kafka: 226 | d.broker = &kafka.Broker{} 227 | case Kestrel: 228 | d.broker = &kestrel.Broker{} 229 | case ActiveMQ: 230 | d.broker = &activemq.Broker{} 231 | case RabbitMQ: 232 | d.broker = &rabbitmq.Broker{} 233 | case NSQ: 234 | d.broker = &nsq.Broker{} 235 | case CloudPubSub: 236 | d.broker = &pubsub.Broker{ 237 | ProjectID: d.config.GoogleCloudProjectID, 238 | JSONKey: d.config.GoogleCloudJSONKey, 239 | } 240 | default: 241 | return "", fmt.Errorf("Invalid broker %s", broker) 242 | } 243 | 244 | result, err := d.broker.Start(host, port) 245 | if err != nil { 246 | d.broker = nil 247 | } 248 | return result, err 249 | } 250 | 251 | func (d *Daemon) processBrokerStop() (interface{}, error) { 252 | if d.broker == nil { 253 | return "", errors.New("No broker running") 254 | } 255 | 256 | result, err := d.broker.Stop() 257 | if err == nil { 258 | d.broker = nil 259 | } 260 | return result, err 261 | } 262 | 263 | func (d *Daemon) processPub(req request) error { 264 | for i := 0; i < req.Count; i++ { 265 | sender, err := d.newPeer(req.Broker, req.Host) 266 | if err != nil { 267 | return err 268 | } 269 | 270 | d.publishers = append(d.publishers, &publisher{ 271 | peer: sender, 272 | id: i, 273 | numMessages: req.NumMessages, 274 | messageSize: req.MessageSize, 275 | }) 276 | } 277 | 278 | return nil 279 | } 280 | 281 | func (d *Daemon) processSub(req request) error { 282 | for i := 0; i < req.Count; i++ { 283 | receiver, err := d.newPeer(req.Broker, req.Host) 284 | if err != nil { 285 | return err 286 | } 287 | 288 | if err := receiver.Subscribe(); err != nil { 289 | return err 290 | } 291 | 292 | subscriber := &subscriber{ 293 | peer: receiver, 294 | id: i, 295 | numMessages: req.NumMessages, 296 | messageSize: req.MessageSize, 297 | } 298 | d.subscribers = append(d.subscribers, subscriber) 299 | go subscriber.start() 300 | } 301 | 302 | return nil 303 | } 304 | 305 | func (d *Daemon) processPublisherStart() error { 306 | for _, publisher := range d.publishers { 307 | go publisher.start() 308 | } 309 | 310 | return nil 311 | } 312 | 313 | func (d *Daemon) processResults() ([]*result, []*result, error) { 314 | subResults := make([]*result, 0, len(d.subscribers)) 315 | for _, subscriber := range d.subscribers { 316 | result, err := subscriber.getResults() 317 | if err != nil { 318 | return nil, nil, err 319 | } 320 | subResults = append(subResults, result) 321 | } 322 | 323 | pubResults := make([]*result, 0, len(d.publishers)) 324 | for _, publisher := range d.publishers { 325 | result, err := publisher.getResults() 326 | if err != nil { 327 | return nil, nil, err 328 | } 329 | pubResults = append(pubResults, result) 330 | } 331 | 332 | log.Println("Benchmark completed") 333 | return pubResults, subResults, nil 334 | } 335 | 336 | func (d *Daemon) processTeardown() { 337 | for _, subscriber := range d.subscribers { 338 | subscriber.Teardown() 339 | } 340 | d.subscribers = d.subscribers[:0] 341 | 342 | for _, publisher := range d.publishers { 343 | publisher.Teardown() 344 | } 345 | d.publishers = d.publishers[:0] 346 | } 347 | 348 | func (d *Daemon) newPeer(broker, host string) (peer, error) { 349 | switch broker { 350 | case NATS: 351 | return nats.NewPeer(host) 352 | case Beanstalkd: 353 | return beanstalkd.NewPeer(host) 354 | case Kafka: 355 | return kafka.NewPeer(host) 356 | case Kestrel: 357 | return kestrel.NewPeer(host) 358 | case ActiveMQ: 359 | return activemq.NewPeer(host) 360 | case RabbitMQ: 361 | return amqp.NewPeer(host) 362 | case NSQ: 363 | return nsq.NewPeer(host) 364 | case CloudPubSub: 365 | return pubsub.NewPeer( 366 | d.config.GoogleCloudProjectID, 367 | d.config.GoogleCloudJSONKey, 368 | ) 369 | default: 370 | return nil, fmt.Errorf("Invalid broker: %s", broker) 371 | } 372 | } 373 | -------------------------------------------------------------------------------- /flotilla-server/daemon/publisher.go: -------------------------------------------------------------------------------- 1 | package daemon 2 | 3 | import ( 4 | "encoding/binary" 5 | "errors" 6 | "log" 7 | "sync" 8 | "time" 9 | ) 10 | 11 | type publisher struct { 12 | peer 13 | id int 14 | numMessages int 15 | messageSize int64 16 | results *result 17 | mu sync.Mutex 18 | } 19 | 20 | func (p *publisher) start() { 21 | p.Setup() 22 | defer p.Done() 23 | 24 | var ( 25 | send = p.Send() 26 | errors = p.Errors() 27 | message = make([]byte, p.messageSize) 28 | start = time.Now().UnixNano() 29 | ) 30 | 31 | for i := 0; i < p.numMessages; i++ { 32 | binary.PutVarint(message, time.Now().UnixNano()) 33 | select { 34 | case send <- message: 35 | continue 36 | case err := <-errors: 37 | // TODO: If a publish fails, a subscriber will probably deadlock. 38 | // The best option is probably to signal back to the client that 39 | // a publisher failed so it can orchestrate a shutdown. 40 | log.Printf("Failed to send message: %s", err.Error()) 41 | p.mu.Lock() 42 | p.results = &result{Err: err.Error()} 43 | p.mu.Unlock() 44 | return 45 | } 46 | } 47 | stop := time.Now().UnixNano() 48 | ms := float32(stop-start) / 1000000 49 | p.mu.Lock() 50 | p.results = &result{ 51 | Duration: ms, 52 | Throughput: 1000 * float32(p.numMessages) / ms, 53 | } 54 | p.mu.Unlock() 55 | log.Println("Publisher completed") 56 | } 57 | 58 | func (p *publisher) getResults() (*result, error) { 59 | p.mu.Lock() 60 | r := p.results 61 | p.mu.Unlock() 62 | if r == nil { 63 | return nil, errors.New("Results not ready") 64 | } 65 | return r, nil 66 | } 67 | -------------------------------------------------------------------------------- /flotilla-server/daemon/subscriber.go: -------------------------------------------------------------------------------- 1 | package daemon 2 | 3 | import ( 4 | "encoding/binary" 5 | "errors" 6 | "log" 7 | "sync" 8 | "time" 9 | 10 | "github.com/codahale/hdrhistogram" 11 | ) 12 | 13 | const ( 14 | maxRecordableLatencyMS = 300000 15 | sigFigs = 5 16 | ) 17 | 18 | type subscriber struct { 19 | peer 20 | id int 21 | numMessages int 22 | messageSize int64 23 | hasStarted bool 24 | started int64 25 | stopped int64 26 | counter int 27 | results *result 28 | mu sync.Mutex 29 | } 30 | 31 | type latencyResults struct { 32 | Min int64 `json:"min"` 33 | Q1 int64 `json:"q1"` 34 | Q2 int64 `json:"q2"` 35 | Q3 int64 `json:"q3"` 36 | Max int64 `json:"max"` 37 | Mean float64 `json:"mean"` 38 | StdDev float64 `json:"std_dev"` 39 | } 40 | 41 | func (s *subscriber) start() { 42 | latencies := hdrhistogram.New(0, maxRecordableLatencyMS, sigFigs) 43 | for { 44 | message, err := s.Recv() 45 | now := time.Now().UnixNano() 46 | if err != nil { 47 | log.Printf("Subscriber error: %s", err.Error()) 48 | s.mu.Lock() 49 | s.results = &result{Err: err.Error()} 50 | s.mu.Unlock() 51 | return 52 | } 53 | 54 | then, _ := binary.Varint(message) 55 | latencies.RecordValue((now - then) / 1000000) 56 | 57 | if !s.hasStarted { 58 | s.hasStarted = true 59 | s.started = time.Now().UnixNano() 60 | } 61 | 62 | s.counter++ 63 | if s.counter == s.numMessages { 64 | s.stopped = time.Now().UnixNano() 65 | durationMS := float32(s.stopped-s.started) / 1000000.0 66 | s.mu.Lock() 67 | s.results = &result{ 68 | Duration: durationMS, 69 | Throughput: 1000 * float32(s.numMessages) / durationMS, 70 | Latency: &latencyResults{ 71 | Min: latencies.Min(), 72 | Q1: latencies.ValueAtQuantile(25), 73 | Q2: latencies.ValueAtQuantile(50), 74 | Q3: latencies.ValueAtQuantile(75), 75 | Max: latencies.Max(), 76 | Mean: latencies.Mean(), 77 | StdDev: latencies.StdDev(), 78 | }, 79 | } 80 | s.mu.Unlock() 81 | log.Println("Subscriber completed") 82 | return 83 | } 84 | } 85 | } 86 | 87 | func (s *subscriber) getResults() (*result, error) { 88 | s.mu.Lock() 89 | r := s.results 90 | s.mu.Unlock() 91 | if r == nil { 92 | return nil, errors.New("Results not ready") 93 | } 94 | return r, nil 95 | } 96 | -------------------------------------------------------------------------------- /flotilla-server/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "runtime" 7 | 8 | "github.com/tylertreat/Flotilla/flotilla-server/daemon" 9 | ) 10 | 11 | const defaultPort = 9500 12 | 13 | func main() { 14 | var ( 15 | port = flag.Int("port", defaultPort, "daemon port") 16 | gCloudProjectID = flag.String("gcloud-project-id", "", 17 | "Google Cloud project id (needed for Cloud Pub/Sub)") 18 | gCloudJSONKey = flag.String("gcloud-json-key", "", 19 | "Google Cloud project JSON key file (needed for Cloud Pub/Sub)") 20 | ) 21 | flag.Parse() 22 | runtime.GOMAXPROCS(runtime.NumCPU()) 23 | 24 | config := &daemon.Config{ 25 | GoogleCloudProjectID: *gCloudProjectID, 26 | GoogleCloudJSONKey: *gCloudJSONKey, 27 | } 28 | 29 | d, err := daemon.NewDaemon(config) 30 | if err != nil { 31 | panic(err) 32 | } 33 | 34 | fmt.Printf("Flotilla daemon started on port %d...\n", *port) 35 | if err := d.Start(*port); err != nil { 36 | panic(err) 37 | } 38 | } 39 | --------------------------------------------------------------------------------