├── .gitignore ├── README.md ├── benchmark ├── mq │ ├── activemq.go │ ├── beanstalkd.go │ ├── gnatsd.go │ ├── inproc.go │ ├── iris.go │ ├── kafka.go │ ├── kestrel.go │ ├── nanomsg.go │ ├── nsq.go │ ├── rabbitmq.go │ ├── redis.go │ ├── surgemq.go │ └── zeromq.go ├── receiver.go ├── sender.go └── tester.go └── main.go /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 2 | *.o 3 | *.a 4 | *.so 5 | 6 | # Folders 7 | _obj 8 | _test 9 | 10 | # Architecture specific extensions/prefixes 11 | *.[568vq] 12 | [568vq].out 13 | 14 | *.cgo1.go 15 | *.cgo2.c 16 | _cgo_defun.c 17 | _cgo_gotypes.go 18 | _cgo_export.* 19 | 20 | _testmain.go 21 | 22 | *.exe 23 | *.test 24 | 25 | *.log 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | mq-benchmarking 2 | ========================== 3 | 4 | **Deprecated:** Use [Flotilla](https://github.com/tylertreat/Flotilla) for running benchmarks. See related [blog post](http://www.bravenewgeek.com/benchmark-responsibly/) for more information. 5 | 6 | Results: http://www.bravenewgeek.com/dissecting-message-queues/ 7 | ___ 8 | 9 | **Usage:** `go run main.go subject [test_latency] [num_messages] [message_size]` 10 | 11 | **subject:** inproc, zeromq, nanomsg, kestrel, kafka, rabbitmq, nsq, redis, activemq, nats, beanstalkd, iris 12 | 13 | **test_latency:** `true` will test latency, `false` will test throughput 14 | 15 | **num_messages:** number of messages to send in the test 16 | 17 | **message_size:** size of each message in bytes 18 | -------------------------------------------------------------------------------- /benchmark/mq/activemq.go: -------------------------------------------------------------------------------- 1 | package mq 2 | 3 | import ( 4 | "github.com/tylertreat/mq-benchmarking/benchmark" 5 | "gopkg.in/stomp.v1" 6 | ) 7 | 8 | type Activemq struct { 9 | handler benchmark.MessageHandler 10 | pub *stomp.Conn 11 | subConn *stomp.Conn 12 | sub *stomp.Subscription 13 | queue string 14 | } 15 | 16 | func activemqReceive(a *Activemq) { 17 | for { 18 | message := <-a.sub.C 19 | if a.handler.ReceiveMessage(message.Body) { 20 | break 21 | } 22 | } 23 | } 24 | 25 | func NewActivemq(numberOfMessages int, testLatency bool) *Activemq { 26 | queue := "test" 27 | pub, _ := stomp.Dial("tcp", "localhost:61613", stomp.Options{}) 28 | subConn, _ := stomp.Dial("tcp", "localhost:61613", stomp.Options{}) 29 | sub, _ := subConn.Subscribe(queue, stomp.AckAuto) 30 | 31 | var handler benchmark.MessageHandler 32 | if testLatency { 33 | handler = &benchmark.LatencyMessageHandler{ 34 | NumberOfMessages: numberOfMessages, 35 | Latencies: []float32{}, 36 | } 37 | } else { 38 | handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages} 39 | } 40 | 41 | return &Activemq{ 42 | handler: handler, 43 | queue: queue, 44 | pub: pub, 45 | subConn: subConn, 46 | sub: sub, 47 | } 48 | } 49 | 50 | func (a *Activemq) Setup() { 51 | go activemqReceive(a) 52 | } 53 | 54 | func (a *Activemq) Teardown() { 55 | a.pub.Disconnect() 56 | a.subConn.Disconnect() 57 | } 58 | 59 | func (a *Activemq) Send(message []byte) { 60 | a.pub.Send(a.queue, "", message, nil) 61 | } 62 | 63 | func (a *Activemq) MessageHandler() *benchmark.MessageHandler { 64 | return &a.handler 65 | } 66 | -------------------------------------------------------------------------------- /benchmark/mq/beanstalkd.go: -------------------------------------------------------------------------------- 1 | package mq 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/kr/beanstalk" 8 | "github.com/tylertreat/mq-benchmarking/benchmark" 9 | ) 10 | 11 | type Beanstalkd struct { 12 | handler benchmark.MessageHandler 13 | pub *beanstalk.Conn 14 | sub *beanstalk.Conn 15 | } 16 | 17 | func beanstalkdReceive(b *Beanstalkd) { 18 | for { 19 | id, message, err := b.sub.Reserve(5 * time.Second) 20 | if err != nil { 21 | panic(fmt.Sprintf("beanstalkd: Received an error! %v\n", err)) 22 | } 23 | 24 | b.sub.Delete(id) 25 | if b.handler.ReceiveMessage(message) { 26 | break 27 | } 28 | } 29 | } 30 | 31 | func NewBeanstalkd(numberOfMessages int, testLatency bool) *Beanstalkd { 32 | pub, _ := beanstalk.Dial("tcp", "localhost:11300") 33 | sub, _ := beanstalk.Dial("tcp", "localhost:11300") 34 | 35 | var handler benchmark.MessageHandler 36 | if testLatency { 37 | handler = &benchmark.LatencyMessageHandler{ 38 | NumberOfMessages: numberOfMessages, 39 | Latencies: []float32{}, 40 | } 41 | } else { 42 | handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages} 43 | } 44 | 45 | return &Beanstalkd{ 46 | handler: handler, 47 | pub: pub, 48 | sub: sub, 49 | } 50 | } 51 | 52 | func (b *Beanstalkd) Setup() { 53 | go beanstalkdReceive(b) 54 | } 55 | 56 | func (b *Beanstalkd) Teardown() { 57 | b.pub.Close() 58 | b.sub.Close() 59 | } 60 | 61 | func (b *Beanstalkd) Send(message []byte) { 62 | b.pub.Put(message, 1, 0, 0) 63 | } 64 | 65 | func (b *Beanstalkd) MessageHandler() *benchmark.MessageHandler { 66 | return &b.handler 67 | } 68 | -------------------------------------------------------------------------------- /benchmark/mq/gnatsd.go: -------------------------------------------------------------------------------- 1 | package mq 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/nats-io/nats" 8 | "github.com/tylertreat/mq-benchmarking/benchmark" 9 | ) 10 | 11 | type Gnatsd struct { 12 | handler benchmark.MessageHandler 13 | conn *nats.Conn 14 | subject string 15 | testLatency bool 16 | } 17 | 18 | func NewGnatsd(numberOfMessages int, testLatency bool) *Gnatsd { 19 | conn, _ := nats.Connect(nats.DefaultURL) 20 | 21 | // We want to be alerted if we get disconnected, this will 22 | // be due to Slow Consumer. 23 | conn.Opts.AllowReconnect = false 24 | 25 | // Report async errors. 26 | conn.Opts.AsyncErrorCB = func(nc *nats.Conn, sub *nats.Subscription, err error) { 27 | panic(fmt.Sprintf("NATS: Received an async error! %v\n", err)) 28 | } 29 | 30 | // Report a disconnect scenario. 31 | conn.Opts.DisconnectedCB = func(nc *nats.Conn) { 32 | fmt.Printf("Getting behind! %d\n", nc.OutMsgs-nc.InMsgs) 33 | panic("NATS: Got disconnected!") 34 | } 35 | 36 | var handler benchmark.MessageHandler 37 | if testLatency { 38 | handler = &benchmark.LatencyMessageHandler{ 39 | NumberOfMessages: numberOfMessages, 40 | Latencies: []float32{}, 41 | } 42 | } else { 43 | handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages} 44 | } 45 | 46 | return &Gnatsd{ 47 | handler: handler, 48 | subject: "test", 49 | conn: conn, 50 | testLatency: testLatency, 51 | } 52 | } 53 | 54 | func (g *Gnatsd) Setup() { 55 | g.conn.Subscribe(g.subject, func(message *nats.Msg) { 56 | g.handler.ReceiveMessage(message.Data) 57 | }) 58 | } 59 | 60 | func (g *Gnatsd) Teardown() { 61 | g.conn.Close() 62 | } 63 | 64 | const ( 65 | // Maximum bytes we will get behind before we start slowing down publishing. 66 | maxBytesBehind = 1024 * 1024 // 1MB 67 | 68 | // Maximum msgs we will get behind before we start slowing down publishing. 69 | maxMsgsBehind = 65536 // 64k 70 | 71 | // Maximum msgs we will get behind when testing latency 72 | maxLatencyMsgsBehind = 10 // 10 73 | 74 | // Time to delay publishing when we are behind. 75 | delay = 1 * time.Millisecond 76 | ) 77 | 78 | func (g *Gnatsd) Send(message []byte) { 79 | // Check if we are behind by >= 1MB bytes 80 | bytesDeltaOver := g.conn.OutBytes-g.conn.InBytes >= maxBytesBehind 81 | // Check if we are behind by >= 65k msgs 82 | msgsDeltaOver := g.conn.OutMsgs-g.conn.InMsgs >= maxMsgsBehind 83 | // Override for latency test. 84 | if g.testLatency { 85 | msgsDeltaOver = g.conn.OutMsgs-g.conn.InMsgs >= maxLatencyMsgsBehind 86 | } 87 | 88 | // If we are behind on either condition, sleep a bit to catch up receiver. 89 | if bytesDeltaOver || msgsDeltaOver { 90 | time.Sleep(delay) 91 | } 92 | 93 | g.conn.Publish(g.subject, message) 94 | } 95 | 96 | func (g *Gnatsd) MessageHandler() *benchmark.MessageHandler { 97 | return &g.handler 98 | } 99 | -------------------------------------------------------------------------------- /benchmark/mq/inproc.go: -------------------------------------------------------------------------------- 1 | package mq 2 | 3 | import "github.com/tylertreat/mq-benchmarking/benchmark" 4 | 5 | type Inproc struct { 6 | handler benchmark.MessageHandler 7 | } 8 | 9 | func NewInproc(numberOfMessages int, testLatency bool) *Inproc { 10 | var handler benchmark.MessageHandler 11 | if testLatency { 12 | handler = &benchmark.LatencyMessageHandler{ 13 | NumberOfMessages: numberOfMessages, 14 | Latencies: []float32{}, 15 | } 16 | } else { 17 | handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages} 18 | } 19 | 20 | return &Inproc{handler: handler} 21 | } 22 | 23 | func (inproc *Inproc) Send(message []byte) { 24 | inproc.handler.ReceiveMessage(message) 25 | } 26 | 27 | func (inproc *Inproc) MessageHandler() *benchmark.MessageHandler { 28 | return &inproc.handler 29 | } 30 | 31 | func (inproc *Inproc) Setup() {} 32 | 33 | func (inproc *Inproc) Teardown() {} 34 | -------------------------------------------------------------------------------- /benchmark/mq/iris.go: -------------------------------------------------------------------------------- 1 | package mq 2 | 3 | import ( 4 | "runtime" 5 | 6 | "github.com/tylertreat/mq-benchmarking/benchmark" 7 | "gopkg.in/project-iris/iris-go.v1" 8 | ) 9 | 10 | type Iris struct { 11 | handler benchmark.MessageHandler 12 | topic string 13 | pub *iris.Connection 14 | sub *iris.Connection 15 | } 16 | 17 | type EventHandler struct { 18 | handler benchmark.MessageHandler 19 | } 20 | 21 | func (t *EventHandler) HandleEvent(event []byte) { 22 | t.handler.ReceiveMessage(event) 23 | } 24 | 25 | func NewIris(numberOfMessages int, testLatency bool) *Iris { 26 | topic := "test" 27 | pub, _ := iris.Connect(55555) 28 | sub, _ := iris.Connect(55555) 29 | 30 | var handler benchmark.MessageHandler 31 | if testLatency { 32 | handler = &benchmark.LatencyMessageHandler{ 33 | NumberOfMessages: numberOfMessages, 34 | Latencies: []float32{}, 35 | } 36 | } else { 37 | handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages} 38 | } 39 | 40 | sub.Subscribe(topic, &EventHandler{handler}, &iris.TopicLimits{ 41 | EventThreads: runtime.NumCPU(), 42 | EventMemory: 1024 * 1024, 43 | }) 44 | 45 | return &Iris{ 46 | handler: handler, 47 | topic: topic, 48 | pub: pub, 49 | sub: sub, 50 | } 51 | } 52 | 53 | func (i *Iris) Setup() {} 54 | 55 | func (i *Iris) Teardown() { 56 | i.sub.Unsubscribe(i.topic) 57 | i.pub.Close() 58 | i.sub.Close() 59 | } 60 | 61 | func (i *Iris) Send(message []byte) { 62 | i.pub.Publish(i.topic, message) 63 | } 64 | 65 | func (i *Iris) MessageHandler() *benchmark.MessageHandler { 66 | return &i.handler 67 | } 68 | -------------------------------------------------------------------------------- /benchmark/mq/kafka.go: -------------------------------------------------------------------------------- 1 | package mq 2 | 3 | import ( 4 | "github.com/Shopify/sarama" 5 | "github.com/tylertreat/mq-benchmarking/benchmark" 6 | ) 7 | 8 | type Kafka struct { 9 | handler benchmark.MessageHandler 10 | client sarama.Client 11 | pub sarama.AsyncProducer 12 | sub sarama.PartitionConsumer 13 | topic string 14 | } 15 | 16 | func kafkaReceive(k *Kafka) { 17 | for { 18 | msg := <-k.sub.Messages() 19 | if k.handler.ReceiveMessage(msg.Value) { 20 | break 21 | } 22 | } 23 | } 24 | 25 | func kafkaAsyncErrors(k *Kafka) { 26 | for _ = range k.pub.Errors() { 27 | } 28 | } 29 | 30 | func NewKafka(numberOfMessages int, testLatency bool) *Kafka { 31 | config := sarama.NewConfig() 32 | client, _ := sarama.NewClient([]string{"localhost:9092"}, config) 33 | 34 | topic := "test" 35 | pub, _ := sarama.NewAsyncProducer([]string{"localhost:9092"}, config) 36 | consumer, _ := sarama.NewConsumerFromClient(client) 37 | sub, _ := consumer.ConsumePartition(topic, 0, sarama.OffsetNewest) 38 | 39 | var handler benchmark.MessageHandler 40 | if testLatency { 41 | handler = &benchmark.LatencyMessageHandler{ 42 | NumberOfMessages: numberOfMessages, 43 | Latencies: []float32{}, 44 | } 45 | } else { 46 | handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages} 47 | } 48 | 49 | return &Kafka{ 50 | handler: handler, 51 | client: client, 52 | pub: pub, 53 | sub: sub, 54 | topic: topic, 55 | } 56 | } 57 | 58 | func (k *Kafka) Setup() { 59 | go kafkaReceive(k) 60 | go kafkaAsyncErrors(k) 61 | } 62 | 63 | func (k *Kafka) Teardown() { 64 | k.pub.Close() 65 | k.sub.Close() 66 | k.client.Close() 67 | } 68 | 69 | func (k *Kafka) Send(message []byte) { 70 | k.pub.Input() <- &sarama.ProducerMessage{ 71 | Topic: k.topic, 72 | Key: nil, 73 | Value: sarama.ByteEncoder(message), 74 | } 75 | } 76 | 77 | func (k *Kafka) MessageHandler() *benchmark.MessageHandler { 78 | return &k.handler 79 | } 80 | -------------------------------------------------------------------------------- /benchmark/mq/kestrel.go: -------------------------------------------------------------------------------- 1 | package mq 2 | 3 | import ( 4 | "github.com/alindeman/go-kestrel" 5 | "github.com/tylertreat/mq-benchmarking/benchmark" 6 | ) 7 | 8 | type Kestrel struct { 9 | handler benchmark.MessageHandler 10 | queue string 11 | pub *kestrel.Client 12 | sub *kestrel.Client 13 | } 14 | 15 | func kestrelReceive(k *Kestrel) { 16 | for { 17 | message, _ := k.sub.Get(k.queue, 1, 0, 0) 18 | if len(message) > 0 { 19 | if k.handler.ReceiveMessage(message[0].Data) { 20 | break 21 | } 22 | } 23 | } 24 | } 25 | 26 | func NewKestrel(numberOfMessages int, testLatency bool) *Kestrel { 27 | pub := kestrel.NewClient("localhost", 2229) 28 | sub := kestrel.NewClient("localhost", 2229) 29 | 30 | var handler benchmark.MessageHandler 31 | if testLatency { 32 | handler = &benchmark.LatencyMessageHandler{ 33 | NumberOfMessages: numberOfMessages, 34 | Latencies: []float32{}, 35 | } 36 | } else { 37 | handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages} 38 | } 39 | 40 | return &Kestrel{ 41 | handler: handler, 42 | queue: "transient_events", 43 | pub: pub, 44 | sub: sub, 45 | } 46 | } 47 | 48 | func (k *Kestrel) Setup() { 49 | k.pub.FlushAllQueues() 50 | go kestrelReceive(k) 51 | } 52 | 53 | func (k *Kestrel) Teardown() { 54 | k.pub.Close() 55 | k.sub.Close() 56 | } 57 | 58 | func (k *Kestrel) Send(message []byte) { 59 | k.pub.Put(k.queue, [][]byte{message}) 60 | } 61 | 62 | func (k *Kestrel) MessageHandler() *benchmark.MessageHandler { 63 | return &k.handler 64 | } 65 | -------------------------------------------------------------------------------- /benchmark/mq/nanomsg.go: -------------------------------------------------------------------------------- 1 | package mq 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/op/go-nanomsg" 7 | "github.com/tylertreat/mq-benchmarking/benchmark" 8 | ) 9 | 10 | type Nanomsg struct { 11 | handler benchmark.MessageHandler 12 | sender *nanomsg.PubSocket 13 | receiver *nanomsg.SubSocket 14 | } 15 | 16 | func nanoReceive(nano *Nanomsg) { 17 | for { 18 | // TODO: Some messages come back empty. Is this a slow-consumer problem? 19 | // Should DontWait be used? 20 | message, _ := nano.receiver.Recv(nanomsg.DontWait) 21 | if nano.handler.ReceiveMessage(message) { 22 | break 23 | } 24 | } 25 | } 26 | 27 | func NewNanomsg(numberOfMessages int, testLatency bool) *Nanomsg { 28 | pub, _ := nanomsg.NewPubSocket() 29 | pub.Bind("tcp://*:5555") 30 | sub, _ := nanomsg.NewSubSocket() 31 | sub.Subscribe("") 32 | sub.Connect("tcp://localhost:5555") 33 | 34 | var handler benchmark.MessageHandler 35 | if testLatency { 36 | handler = &benchmark.LatencyMessageHandler{ 37 | NumberOfMessages: numberOfMessages, 38 | Latencies: []float32{}, 39 | } 40 | } else { 41 | handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages} 42 | } 43 | 44 | return &Nanomsg{ 45 | handler: handler, 46 | sender: pub, 47 | receiver: sub, 48 | } 49 | } 50 | 51 | func (nano *Nanomsg) Setup() { 52 | // Sleep is needed to avoid race condition with receiving initial messages. 53 | time.Sleep(3 * time.Second) 54 | go nanoReceive(nano) 55 | } 56 | 57 | func (nano *Nanomsg) Teardown() { 58 | nano.sender.Close() 59 | nano.receiver.Close() 60 | } 61 | 62 | func (nano *Nanomsg) Send(message []byte) { 63 | // TODO: Should DontWait be used? Possibly overloading consumer. 64 | nano.sender.Send(message, nanomsg.DontWait) 65 | } 66 | 67 | func (nano *Nanomsg) MessageHandler() *benchmark.MessageHandler { 68 | return &nano.handler 69 | } 70 | -------------------------------------------------------------------------------- /benchmark/mq/nsq.go: -------------------------------------------------------------------------------- 1 | package mq 2 | 3 | import ( 4 | "github.com/bitly/go-nsq" 5 | "github.com/tylertreat/mq-benchmarking/benchmark" 6 | ) 7 | 8 | type Nsq struct { 9 | handler benchmark.MessageHandler 10 | pub *nsq.Producer 11 | sub *nsq.Consumer 12 | topic string 13 | channel string 14 | } 15 | 16 | func NewNsq(numberOfMessages int, testLatency bool) *Nsq { 17 | topic := "test" 18 | channel := "test" 19 | pub, _ := nsq.NewProducer("localhost:4150", nsq.NewConfig()) 20 | sub, _ := nsq.NewConsumer(topic, channel, nsq.NewConfig()) 21 | 22 | var handler benchmark.MessageHandler 23 | if testLatency { 24 | handler = &benchmark.LatencyMessageHandler{ 25 | NumberOfMessages: numberOfMessages, 26 | Latencies: []float32{}, 27 | } 28 | } else { 29 | handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages} 30 | } 31 | 32 | return &Nsq{ 33 | handler: handler, 34 | pub: pub, 35 | sub: sub, 36 | topic: topic, 37 | channel: channel, 38 | } 39 | } 40 | 41 | func (n *Nsq) Setup() { 42 | n.sub.AddHandler(nsq.HandlerFunc(func(message *nsq.Message) error { 43 | n.handler.ReceiveMessage(message.Body) 44 | return nil 45 | })) 46 | n.sub.ConnectToNSQD("localhost:4150") 47 | } 48 | 49 | func (n *Nsq) Teardown() { 50 | n.sub.Stop() 51 | n.pub.Stop() 52 | } 53 | 54 | func (n *Nsq) Send(message []byte) { 55 | n.pub.PublishAsync(n.topic, message, nil) 56 | } 57 | 58 | func (n *Nsq) MessageHandler() *benchmark.MessageHandler { 59 | return &n.handler 60 | } 61 | -------------------------------------------------------------------------------- /benchmark/mq/rabbitmq.go: -------------------------------------------------------------------------------- 1 | package mq 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/streadway/amqp" 7 | "github.com/tylertreat/mq-benchmarking/benchmark" 8 | ) 9 | 10 | type consumer struct { 11 | conn *amqp.Connection 12 | channel *amqp.Channel 13 | tag string 14 | done chan error 15 | } 16 | 17 | type producer struct { 18 | conn *amqp.Connection 19 | channel *amqp.Channel 20 | } 21 | 22 | type Rabbitmq struct { 23 | handler benchmark.MessageHandler 24 | pub *producer 25 | sub *consumer 26 | queue string 27 | exchange string 28 | key string 29 | } 30 | 31 | func newConsumer(amqpUri, exchange, exchangeType, queueName, key, ctag string) (*consumer, error) { 32 | c := &consumer{ 33 | conn: nil, 34 | channel: nil, 35 | tag: ctag, 36 | done: make(chan error), 37 | } 38 | 39 | var err error 40 | 41 | c.conn, err = amqp.Dial(amqpUri) 42 | if err != nil { 43 | return nil, err 44 | } 45 | 46 | go func() { 47 | fmt.Printf("closing: %s", <-c.conn.NotifyClose(make(chan *amqp.Error))) 48 | }() 49 | 50 | c.channel, err = c.conn.Channel() 51 | if err != nil { 52 | return nil, err 53 | } 54 | 55 | if err = c.channel.ExchangeDeclare( 56 | exchange, 57 | exchangeType, 58 | false, // not durable 59 | false, 60 | false, 61 | false, 62 | nil, 63 | ); err != nil { 64 | return nil, err 65 | } 66 | 67 | queue, err := c.channel.QueueDeclare( 68 | queueName, 69 | false, // not durable 70 | false, 71 | false, 72 | false, 73 | nil, 74 | ) 75 | if err != nil { 76 | return nil, err 77 | } 78 | 79 | if err = c.channel.QueueBind( 80 | queue.Name, 81 | key, 82 | exchange, 83 | false, 84 | nil, 85 | ); err != nil { 86 | return nil, err 87 | } 88 | 89 | return c, nil 90 | } 91 | 92 | func rabbitmqReceive(r *Rabbitmq) { 93 | loop: 94 | for { 95 | deliveries, _ := r.sub.channel.Consume( 96 | r.queue, 97 | r.sub.tag, 98 | false, 99 | false, 100 | false, 101 | false, 102 | nil, 103 | ) 104 | 105 | for d := range deliveries { 106 | if r.handler.ReceiveMessage(d.Body) { 107 | break loop 108 | } 109 | } 110 | } 111 | } 112 | 113 | func NewRabbitmq(numberOfMessages int, testLatency bool) *Rabbitmq { 114 | exchange := "test" 115 | exchangeType := "direct" 116 | uri := "amqp://guest:guest@localhost:5672" 117 | queue := "test" 118 | key := "test-key" 119 | ctag := "tag" 120 | 121 | pubConn, _ := amqp.Dial(uri) 122 | pubChan, _ := pubConn.Channel() 123 | pubChan.ExchangeDeclare( 124 | exchange, 125 | exchangeType, 126 | false, // not durable 127 | false, 128 | false, 129 | false, 130 | nil, 131 | ) 132 | pub := &producer{conn: pubConn, channel: pubChan} 133 | sub, _ := newConsumer(uri, exchange, exchangeType, queue, key, ctag) 134 | 135 | var handler benchmark.MessageHandler 136 | if testLatency { 137 | handler = &benchmark.LatencyMessageHandler{ 138 | NumberOfMessages: numberOfMessages, 139 | Latencies: []float32{}, 140 | } 141 | } else { 142 | handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages} 143 | } 144 | 145 | return &Rabbitmq{ 146 | handler: handler, 147 | pub: pub, 148 | sub: sub, 149 | queue: queue, 150 | exchange: exchange, 151 | key: key, 152 | } 153 | } 154 | 155 | func (r *Rabbitmq) Setup() { 156 | go rabbitmqReceive(r) 157 | } 158 | 159 | func (r *Rabbitmq) Teardown() { 160 | r.pub.conn.Close() 161 | r.sub.conn.Close() 162 | } 163 | 164 | func (r *Rabbitmq) Send(message []byte) { 165 | r.pub.channel.Publish( 166 | r.exchange, 167 | r.key, 168 | false, 169 | false, 170 | amqp.Publishing{ 171 | Headers: amqp.Table{}, 172 | ContentType: "text/plain", 173 | ContentEncoding: "", 174 | Body: message, 175 | DeliveryMode: amqp.Transient, 176 | Priority: 0, 177 | }, 178 | ) 179 | } 180 | 181 | func (r *Rabbitmq) MessageHandler() *benchmark.MessageHandler { 182 | return &r.handler 183 | } 184 | -------------------------------------------------------------------------------- /benchmark/mq/redis.go: -------------------------------------------------------------------------------- 1 | package mq 2 | 3 | import ( 4 | "github.com/garyburd/redigo/redis" 5 | "github.com/tylertreat/mq-benchmarking/benchmark" 6 | ) 7 | 8 | type Redis struct { 9 | handler benchmark.MessageHandler 10 | pub redis.Conn 11 | sub redis.PubSubConn 12 | channel string 13 | } 14 | 15 | func redisReceive(r *Redis) { 16 | for { 17 | switch v := r.sub.Receive().(type) { 18 | case redis.Message: 19 | if r.handler.ReceiveMessage(v.Data) { 20 | break 21 | } 22 | } 23 | } 24 | } 25 | 26 | func NewRedis(numberOfMessages int, testLatency bool) *Redis { 27 | channel := "test" 28 | pub, _ := redis.Dial("tcp", ":6379") 29 | subConn, _ := redis.Dial("tcp", ":6379") 30 | sub := redis.PubSubConn{subConn} 31 | sub.Subscribe(channel) 32 | 33 | var handler benchmark.MessageHandler 34 | if testLatency { 35 | handler = &benchmark.LatencyMessageHandler{ 36 | NumberOfMessages: numberOfMessages, 37 | Latencies: []float32{}, 38 | } 39 | } else { 40 | handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages} 41 | } 42 | 43 | return &Redis{ 44 | handler: handler, 45 | pub: pub, 46 | sub: sub, 47 | channel: channel, 48 | } 49 | } 50 | 51 | func (r *Redis) Setup() { 52 | go redisReceive(r) 53 | } 54 | 55 | func (r *Redis) Teardown() { 56 | r.pub.Close() 57 | r.sub.Close() 58 | } 59 | 60 | func (r *Redis) Send(message []byte) { 61 | r.pub.Send("PUBLISH", r.channel, message) 62 | r.pub.Flush() 63 | } 64 | 65 | func (r *Redis) MessageHandler() *benchmark.MessageHandler { 66 | return &r.handler 67 | } 68 | -------------------------------------------------------------------------------- /benchmark/mq/surgemq.go: -------------------------------------------------------------------------------- 1 | package mq 2 | 3 | import ( 4 | "github.com/surge/surgemq/service" 5 | 6 | "github.com/surgemq/message" 7 | "github.com/tylertreat/mq-benchmarking/benchmark" 8 | ) 9 | 10 | type SurgeMQ struct { 11 | handler benchmark.MessageHandler 12 | client *service.Client 13 | subject string 14 | testLatency bool 15 | } 16 | 17 | func NewSurgeMQ(numberOfMessages int, testLatency bool) *SurgeMQ { 18 | uri := "tcp://127.0.0.1:1883" 19 | client := &service.Client{} 20 | 21 | msg := message.NewConnectMessage() 22 | msg.SetWillQos(1) 23 | msg.SetVersion(4) 24 | msg.SetCleanSession(true) 25 | msg.SetClientId([]byte("surgemq")) 26 | msg.SetKeepAlive(10) 27 | msg.SetWillTopic([]byte("will")) 28 | msg.SetWillMessage([]byte("send me home")) 29 | msg.SetUsername([]byte("surgemq")) 30 | msg.SetPassword([]byte("verysecret")) 31 | 32 | err := client.Connect(uri, msg) 33 | 34 | if err != nil { 35 | panic(err) 36 | } 37 | 38 | var handler benchmark.MessageHandler 39 | if testLatency { 40 | handler = &benchmark.LatencyMessageHandler{ 41 | NumberOfMessages: numberOfMessages, 42 | Latencies: []float32{}, 43 | } 44 | } else { 45 | handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages} 46 | } 47 | 48 | return &SurgeMQ{ 49 | handler: handler, 50 | subject: "test", 51 | client: client, 52 | testLatency: testLatency, 53 | } 54 | } 55 | 56 | func (smq *SurgeMQ) Setup() { 57 | msg := message.NewSubscribeMessage() 58 | msg.SetPacketId(1) 59 | msg.AddTopic([]byte(smq.subject), 0) 60 | 61 | smq.client.Subscribe(msg, nil, func(msg *message.PublishMessage) error { 62 | smq.handler.ReceiveMessage(msg.Payload()) 63 | return nil 64 | }) 65 | } 66 | 67 | func (smq *SurgeMQ) Teardown() { 68 | smq.client.Disconnect() 69 | } 70 | 71 | func (smq *SurgeMQ) Send(msgbytes []byte) { 72 | msg := message.NewPublishMessage() 73 | msg.SetTopic([]byte(smq.subject)) 74 | msg.SetPayload(msgbytes) 75 | msg.SetQoS(0) 76 | 77 | smq.client.Publish(msg, nil) 78 | } 79 | 80 | func (smq *SurgeMQ) MessageHandler() *benchmark.MessageHandler { 81 | return &smq.handler 82 | } 83 | -------------------------------------------------------------------------------- /benchmark/mq/zeromq.go: -------------------------------------------------------------------------------- 1 | package mq 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/pebbe/zmq4" 7 | "github.com/tylertreat/mq-benchmarking/benchmark" 8 | ) 9 | 10 | type Zeromq struct { 11 | handler benchmark.MessageHandler 12 | sender *zmq4.Socket 13 | receiver *zmq4.Socket 14 | } 15 | 16 | func zeromqReceive(zeromq *Zeromq) { 17 | for { 18 | // TODO: Some messages come back empty. Is this a slow-consumer problem? 19 | // Should DONTWAIT be used? 20 | message, _ := zeromq.receiver.RecvBytes(zmq4.DONTWAIT) 21 | if zeromq.handler.ReceiveMessage(message) { 22 | break 23 | } 24 | } 25 | } 26 | 27 | func NewZeromq(numberOfMessages int, testLatency bool) *Zeromq { 28 | ctx, _ := zmq4.NewContext() 29 | pub, _ := ctx.NewSocket(zmq4.PUB) 30 | pub.Bind("tcp://*:5555") 31 | sub, _ := ctx.NewSocket(zmq4.SUB) 32 | sub.SetSubscribe("") 33 | sub.Connect("tcp://localhost:5555") 34 | 35 | var handler benchmark.MessageHandler 36 | if testLatency { 37 | handler = &benchmark.LatencyMessageHandler{ 38 | NumberOfMessages: numberOfMessages, 39 | Latencies: []float32{}, 40 | } 41 | } else { 42 | handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages} 43 | } 44 | 45 | return &Zeromq{ 46 | handler: handler, 47 | sender: pub, 48 | receiver: sub, 49 | } 50 | } 51 | 52 | func (zeromq *Zeromq) Setup() { 53 | // Sleep is needed to avoid race condition with receiving initial messages. 54 | time.Sleep(3 * time.Second) 55 | go zeromqReceive(zeromq) 56 | } 57 | 58 | func (zeromq *Zeromq) Teardown() { 59 | zeromq.sender.Close() 60 | zeromq.receiver.Close() 61 | } 62 | 63 | func (zeromq *Zeromq) Send(message []byte) { 64 | // TODO: Should DONTWAIT be used? Possibly overloading consumer. 65 | zeromq.sender.SendBytes(message, zmq4.DONTWAIT) 66 | } 67 | 68 | func (zeromq *Zeromq) MessageHandler() *benchmark.MessageHandler { 69 | return &zeromq.handler 70 | } 71 | -------------------------------------------------------------------------------- /benchmark/receiver.go: -------------------------------------------------------------------------------- 1 | package benchmark 2 | 3 | import ( 4 | "encoding/binary" 5 | "log" 6 | "sync" 7 | "time" 8 | ) 9 | 10 | type MessageReceiver interface { 11 | MessageHandler() *MessageHandler 12 | Setup() 13 | Teardown() 14 | } 15 | 16 | type ReceiveEndpoint struct { 17 | MessageReceiver MessageReceiver 18 | NumberOfMessages int 19 | Handler *MessageHandler 20 | } 21 | 22 | func NewReceiveEndpoint(receiver MessageReceiver, numberOfMessages int) *ReceiveEndpoint { 23 | return &ReceiveEndpoint{ 24 | MessageReceiver: receiver, 25 | NumberOfMessages: numberOfMessages, 26 | Handler: receiver.MessageHandler(), 27 | } 28 | } 29 | 30 | type MessageHandler interface { 31 | // Process a received message. Return true if it's the last message, otherwise 32 | // return false. 33 | ReceiveMessage([]byte) bool 34 | 35 | // Indicate whether the handler has been marked complete, meaning all messages 36 | // have been received. 37 | HasCompleted() bool 38 | } 39 | 40 | type ThroughputMessageHandler struct { 41 | hasStarted bool 42 | hasCompleted bool 43 | messageCounter int 44 | NumberOfMessages int 45 | started int64 46 | stopped int64 47 | completionLock sync.Mutex 48 | } 49 | 50 | type LatencyMessageHandler struct { 51 | NumberOfMessages int 52 | Latencies []float32 53 | messageCounter int 54 | hasCompleted bool 55 | completionLock sync.Mutex 56 | } 57 | 58 | func (handler *ThroughputMessageHandler) HasCompleted() bool { 59 | handler.completionLock.Lock() 60 | defer handler.completionLock.Unlock() 61 | return handler.hasCompleted 62 | } 63 | 64 | // Increment a message counter. If this is the first message, set the started timestamp. 65 | // If it's the last message, set the stopped timestamp and compute the total runtime 66 | // and print it out. Return true if it's the last message, otherwise return false. 67 | func (handler *ThroughputMessageHandler) ReceiveMessage(message []byte) bool { 68 | if !handler.hasStarted { 69 | handler.hasStarted = true 70 | handler.started = time.Now().UnixNano() 71 | } 72 | 73 | handler.messageCounter++ 74 | 75 | if handler.messageCounter == handler.NumberOfMessages { 76 | handler.stopped = time.Now().UnixNano() 77 | ms := float32(handler.stopped-handler.started) / 1000000.0 78 | log.Printf("Received %d messages in %f ms\n", handler.NumberOfMessages, ms) 79 | log.Printf("Received %f per second\n", 1000*float32(handler.NumberOfMessages)/ms) 80 | 81 | handler.completionLock.Lock() 82 | handler.hasCompleted = true 83 | handler.completionLock.Unlock() 84 | 85 | return true 86 | } 87 | 88 | return false 89 | } 90 | 91 | func (handler *LatencyMessageHandler) HasCompleted() bool { 92 | handler.completionLock.Lock() 93 | defer handler.completionLock.Unlock() 94 | return handler.hasCompleted 95 | } 96 | 97 | // Record each message's latency. The message contains the timestamp when it was sent. 98 | // If it's the last message, compute the average latency and print it out. Return true 99 | // if the message is the last one, otherwise return false. 100 | func (handler *LatencyMessageHandler) ReceiveMessage(message []byte) bool { 101 | now := time.Now().UnixNano() 102 | then, _ := binary.Varint(message) 103 | 104 | // TODO: Figure out why nanomsg and ZeroMQ sometimes receive empty messages. 105 | if then != 0 { 106 | handler.Latencies = append(handler.Latencies, (float32(now-then))/1000/1000) 107 | } 108 | 109 | handler.messageCounter++ 110 | if handler.messageCounter == handler.NumberOfMessages { 111 | sum := float32(0) 112 | for _, latency := range handler.Latencies { 113 | sum += latency 114 | } 115 | avgLatency := float32(sum) / float32(len(handler.Latencies)) 116 | log.Printf("Mean latency for %d messages: %f ms\n", handler.NumberOfMessages, 117 | avgLatency) 118 | 119 | handler.completionLock.Lock() 120 | handler.hasCompleted = true 121 | handler.completionLock.Unlock() 122 | 123 | return true 124 | } 125 | 126 | return false 127 | } 128 | 129 | func (endpoint ReceiveEndpoint) WaitForCompletion() { 130 | for { 131 | if (*endpoint.Handler).HasCompleted() { 132 | break 133 | } else { 134 | time.Sleep(10 * time.Millisecond) 135 | } 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /benchmark/sender.go: -------------------------------------------------------------------------------- 1 | package benchmark 2 | 3 | import ( 4 | "encoding/binary" 5 | "log" 6 | "time" 7 | ) 8 | 9 | type MessageSender interface { 10 | Send([]byte) 11 | } 12 | 13 | type SendEndpoint struct { 14 | MessageSender MessageSender 15 | } 16 | 17 | func (endpoint SendEndpoint) TestThroughput(messageSize int, numberToSend int) { 18 | message := make([]byte, messageSize) 19 | start := time.Now().UnixNano() 20 | for i := 0; i < numberToSend; i++ { 21 | endpoint.MessageSender.Send(message) 22 | } 23 | 24 | stop := time.Now().UnixNano() 25 | ms := float32(stop-start) / 1000000 26 | log.Printf("Sent %d messages in %f ms\n", numberToSend, ms) 27 | log.Printf("Sent %f per second\n", 1000*float32(numberToSend)/ms) 28 | } 29 | 30 | func (endpoint SendEndpoint) TestLatency(messageSize int, numberToSend int) { 31 | start := time.Now().UnixNano() 32 | b := make([]byte, 9) 33 | for i := 0; i < numberToSend; i++ { 34 | binary.PutVarint(b, time.Now().UnixNano()) 35 | endpoint.MessageSender.Send(b) 36 | } 37 | 38 | stop := time.Now().UnixNano() 39 | ms := float32(stop-start) / 1000000 40 | log.Printf("Sent %d messages in %f ms\n", numberToSend, ms) 41 | log.Printf("Sent %f per second\n", 1000*float32(numberToSend)/ms) 42 | } 43 | -------------------------------------------------------------------------------- /benchmark/tester.go: -------------------------------------------------------------------------------- 1 | package benchmark 2 | 3 | import "log" 4 | 5 | type Tester struct { 6 | Name string 7 | MessageSize int 8 | MessageCount int 9 | TestLatency bool 10 | MessageSender 11 | MessageReceiver 12 | } 13 | 14 | func (tester Tester) Test() { 15 | log.Printf("Begin %s test", tester.Name) 16 | tester.Setup() 17 | defer tester.Teardown() 18 | 19 | if tester.TestLatency { 20 | tester.testLatency() 21 | } else { 22 | tester.testThroughput() 23 | } 24 | 25 | log.Printf("End %s test", tester.Name) 26 | } 27 | 28 | func (tester Tester) testThroughput() { 29 | receiver := NewReceiveEndpoint(tester, tester.MessageCount) 30 | sender := &SendEndpoint{MessageSender: tester} 31 | sender.TestThroughput(tester.MessageSize, tester.MessageCount) 32 | receiver.WaitForCompletion() 33 | } 34 | 35 | func (tester Tester) testLatency() { 36 | receiver := NewReceiveEndpoint(tester, tester.MessageCount) 37 | sender := &SendEndpoint{MessageSender: tester} 38 | sender.TestLatency(tester.MessageSize, tester.MessageCount) 39 | receiver.WaitForCompletion() 40 | } 41 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | "strconv" 8 | 9 | "github.com/tylertreat/mq-benchmarking/benchmark" 10 | "github.com/tylertreat/mq-benchmarking/benchmark/mq" 11 | ) 12 | 13 | func newTester(subject string, testLatency bool, msgCount, msgSize int) *benchmark.Tester { 14 | var messageSender benchmark.MessageSender 15 | var messageReceiver benchmark.MessageReceiver 16 | 17 | switch subject { 18 | case "inproc": 19 | inproc := mq.NewInproc(msgCount, testLatency) 20 | messageSender = inproc 21 | messageReceiver = inproc 22 | case "zeromq": 23 | zeromq := mq.NewZeromq(msgCount, testLatency) 24 | messageSender = zeromq 25 | messageReceiver = zeromq 26 | case "nanomsg": 27 | nanomsg := mq.NewNanomsg(msgCount, testLatency) 28 | messageSender = nanomsg 29 | messageReceiver = nanomsg 30 | case "kestrel": 31 | kestrel := mq.NewKestrel(msgCount, testLatency) 32 | messageSender = kestrel 33 | messageReceiver = kestrel 34 | case "kafka": 35 | kafka := mq.NewKafka(msgCount, testLatency) 36 | messageSender = kafka 37 | messageReceiver = kafka 38 | case "rabbitmq": 39 | rabbitmq := mq.NewRabbitmq(msgCount, testLatency) 40 | messageSender = rabbitmq 41 | messageReceiver = rabbitmq 42 | case "nsq": 43 | nsq := mq.NewNsq(msgCount, testLatency) 44 | messageSender = nsq 45 | messageReceiver = nsq 46 | case "redis": 47 | redis := mq.NewRedis(msgCount, testLatency) 48 | messageSender = redis 49 | messageReceiver = redis 50 | case "activemq": 51 | activemq := mq.NewActivemq(msgCount, testLatency) 52 | messageSender = activemq 53 | messageReceiver = activemq 54 | case "nats": 55 | gnatsd := mq.NewGnatsd(msgCount, testLatency) 56 | messageSender = gnatsd 57 | messageReceiver = gnatsd 58 | case "beanstalkd": 59 | beanstalkd := mq.NewBeanstalkd(msgCount, testLatency) 60 | messageSender = beanstalkd 61 | messageReceiver = beanstalkd 62 | case "iris": 63 | iris := mq.NewIris(msgCount, testLatency) 64 | messageSender = iris 65 | messageReceiver = iris 66 | case "surge": 67 | surge := mq.NewSurgeMQ(msgCount, testLatency) 68 | messageSender = surge 69 | messageReceiver = surge 70 | default: 71 | return nil 72 | } 73 | 74 | return &benchmark.Tester{ 75 | subject, 76 | msgSize, 77 | msgCount, 78 | testLatency, 79 | messageSender, 80 | messageReceiver, 81 | } 82 | } 83 | 84 | func parseArgs(usage string) (string, bool, int, int) { 85 | 86 | if len(os.Args) < 2 { 87 | log.Print(usage) 88 | os.Exit(1) 89 | } 90 | 91 | test := os.Args[1] 92 | messageCount := 1000000 93 | messageSize := 1000 94 | testLatency := false 95 | 96 | if len(os.Args) > 2 { 97 | latency, err := strconv.ParseBool(os.Args[2]) 98 | if err != nil { 99 | log.Print(usage) 100 | os.Exit(1) 101 | } 102 | testLatency = latency 103 | } 104 | 105 | if len(os.Args) > 3 { 106 | count, err := strconv.Atoi(os.Args[3]) 107 | if err != nil { 108 | log.Print(usage) 109 | os.Exit(1) 110 | } 111 | messageCount = count 112 | } 113 | 114 | if len(os.Args) > 4 { 115 | size, err := strconv.Atoi(os.Args[4]) 116 | if err != nil { 117 | log.Print(usage) 118 | os.Exit(1) 119 | } 120 | messageSize = size 121 | } 122 | 123 | return test, testLatency, messageCount, messageSize 124 | } 125 | 126 | func main() { 127 | usage := fmt.Sprintf( 128 | "usage: %s "+ 129 | "{"+ 130 | "inproc|"+ 131 | "zeromq|"+ 132 | "nanomsg|"+ 133 | "kestrel|"+ 134 | "kafka|"+ 135 | "rabbitmq|"+ 136 | "nsq|"+ 137 | "redis|"+ 138 | "activemq|"+ 139 | "nats|"+ 140 | "beanstalkd|"+ 141 | "iris"+ 142 | "} "+ 143 | "[test_latency] [num_messages] [message_size]", 144 | os.Args[0]) 145 | 146 | tester := newTester(parseArgs(usage)) 147 | if tester == nil { 148 | log.Println(usage) 149 | os.Exit(1) 150 | } 151 | 152 | tester.Test() 153 | } 154 | --------------------------------------------------------------------------------