├── .gitignore
├── .travis.yml
├── LICENSE
├── README.md
├── loadtest
└── main.go
├── many
└── main.go
├── mqtt.go
├── mqttsrv
└── main.go
├── pingtest
└── main.go
├── pub
└── main.go
├── smqttsrv
└── main.go
├── sub
└── main.go
├── ticktock
└── main.go
└── wild_test.go
/.gitignore:
--------------------------------------------------------------------------------
1 | *~
2 | loadtest/loadtest
3 | pingtest/pingtest
4 | pub/pub
5 | sub/sub
6 | smqttsrv/server.crt
7 | smqttsrv/server.key
8 | smqttsrv/smqttsrv
9 | mqttsrv/mqttsrv
10 | vbridge/vbridge
11 | ticktock/ticktock
12 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: go
2 |
3 |
4 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2013 Jeff R. Allen. All rights reserved.
2 |
3 | Redistribution and use in source and binary forms, with or without
4 | modification, are permitted provided that the following conditions are
5 | met:
6 |
7 | * Redistributions of source code must retain the above copyright
8 | notice, this list of conditions and the following disclaimer.
9 | * Redistributions in binary form must reproduce the above
10 | copyright notice, this list of conditions and the following disclaimer
11 | in the documentation and/or other materials provided with the
12 | distribution.
13 | * The names of the contributors may not be used to
14 | endorse or promote products derived from this software without
15 | specific prior written permission.
16 |
17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | mqtt
2 | ====
3 |
4 | MQTT Clients, Servers and Load Testers in Go
5 |
6 | For docs, see: http://godoc.org/github.com/jeffallen/mqtt
7 |
8 | For a little discussion of this code see: http://blog.nella.org/mqtt-code-golf
9 |
10 | Limitations
11 | -----------
12 |
13 | At this time, the following limitations apply:
14 | * QoS level 0 only; messages are only stored in RAM
15 | * Retain works, but at QoS level 0. Retained messages are lost on server restart.
16 | * Last will messages are not implemented.
17 | * Keepalive and timeouts are not implemented.
18 |
19 | Servers
20 | -------
21 |
22 | The example MQTT servers are in directories mqttsrv and smqttsrv (secured with TLS).
23 |
24 | Benchmarking Tools
25 | ------------------
26 |
27 | To use the benchmarking tools, cd into pingtest, loadtest, or many and type "go build". The tools have reasonable defaults, but you'll also want to use the -help flag to find out what can be tuned.
28 |
29 | All benchmarks suck, and these three suck in different ways.
30 |
31 | pingtest simulates a number of pairs of clients who are bouncing messages between them as fast as possible. It aims to measure latency of messages through the system when under load.
32 |
33 | loadtest simulates a number of pairs of clients where one is sending as fast as possible to the other. Realistically, this ends up testing the ability of the system to decode and queue messages, because any slight inbalance in scheduling of readers causes a pile up of messages from the writer slamming them down the throat of the server.
34 |
35 | many simulates a large number of clients who send a low transaction rate. The goal is to eventually use this to achieve 1 million (and more?) concurrent, active MQTT sessions in one server. So far, mqttsrv has survived a load of 40k concurrent connections from many.
36 |
--------------------------------------------------------------------------------
/loadtest/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "flag"
5 | "fmt"
6 | "log"
7 | "net"
8 | "os"
9 | "sync"
10 | "time"
11 |
12 | proto "github.com/huin/mqtt"
13 | "github.com/jeffallen/mqtt"
14 | )
15 |
16 | var conns = flag.Int("conns", 100, "how many connections")
17 | var messages = flag.Int("messages", 100, "how many messages")
18 | var host = flag.String("host", "localhost:1883", "hostname of broker")
19 | var dump = flag.Bool("dump", false, "dump messages?")
20 | var user = flag.String("user", "", "username")
21 | var pass = flag.String("pass", "", "password")
22 |
23 | var cwg sync.WaitGroup
24 |
25 | func main() {
26 | log.SetFlags(log.Lmicroseconds)
27 | flag.Parse()
28 |
29 | if *conns&1 != 0 {
30 | log.Fatal("Number of connections should be even.")
31 | }
32 |
33 | timeStart := time.Now()
34 | timeConnStart := time.Now()
35 |
36 | // a system to check how long connection establishment takes
37 | cwg.Add(*conns)
38 | go func() {
39 | cwg.Wait()
40 | log.Print("all connections made")
41 | elapsed := time.Now().Sub(timeConnStart)
42 | log.Print("Time to establish connections: ", elapsed)
43 | }()
44 |
45 | var wg sync.WaitGroup
46 | publishers := *conns / 2
47 | for i := 0; i < publishers; i++ {
48 | wg.Add(2)
49 | go func(i int) {
50 | sub(i, &wg)
51 | pub(i)
52 | wg.Done()
53 | }(i)
54 | }
55 | log.Print("all started")
56 | wg.Wait()
57 | log.Print("all finished")
58 |
59 | timeEnd := time.Now()
60 |
61 | rc := 0
62 | loop:
63 | for {
64 | select {
65 | case i := <-bad:
66 | println("subscriber missed messages: ", i)
67 | rc = 1
68 | default:
69 | // nothing to read on bad, done.
70 | break loop
71 | }
72 | }
73 |
74 | elapsed := timeEnd.Sub(timeStart)
75 | totmsg := float64(*messages * publishers)
76 | nspermsg := float64(elapsed) / totmsg
77 | msgpersec := int(1 / nspermsg * 1e9)
78 |
79 | log.Print("elapsed time: ", elapsed)
80 | log.Print("messages : ", totmsg)
81 | log.Print("messages/sec: ", msgpersec)
82 |
83 | os.Exit(rc)
84 | }
85 |
86 | func pub(i int) {
87 | topic := fmt.Sprintf("loadtest/%v", i)
88 |
89 | var cc *mqtt.ClientConn
90 | if cc = connect(fmt.Sprintf("pub%v", i)); cc == nil {
91 | return
92 | }
93 |
94 | for i := 0; i < *messages; i++ {
95 | cc.Publish(&proto.Publish{
96 | Header: proto.Header{QosLevel: proto.QosAtMostOnce},
97 | TopicName: topic,
98 | Payload: proto.BytesPayload([]byte("loadtest payload")),
99 | })
100 | }
101 |
102 | cc.Disconnect()
103 | }
104 |
105 | // A mechanism to limit parallelism during connects.
106 | const numTokens = 100
107 |
108 | var tokens = make(chan struct{}, numTokens)
109 |
110 | func init() {
111 | var tok struct{}
112 | for i := 0; i < numTokens; i++ {
113 | tokens <- tok
114 | }
115 | }
116 |
117 | func connect(who string) *mqtt.ClientConn {
118 | tok := <-tokens
119 | defer func() { tokens <- tok }()
120 |
121 | conn, err := net.Dial("tcp", *host)
122 | if err != nil {
123 | fmt.Fprintf(os.Stderr, "dial: %v\n", err)
124 | os.Exit(2)
125 | }
126 | cc := mqtt.NewClientConn(conn)
127 | cc.Dump = *dump
128 | cc.ClientId = who
129 |
130 | err = cc.Connect(*user, *pass)
131 | if err != nil {
132 | fmt.Println(err)
133 | return nil
134 | }
135 |
136 | cwg.Done()
137 | return cc
138 | }
139 |
140 | // A channel to communicate subscribers that didn't get what they expected
141 | var bad = make(chan int)
142 |
143 | func sub(i int, wg *sync.WaitGroup) {
144 | topic := fmt.Sprintf("loadtest/%v", i)
145 |
146 | var cc *mqtt.ClientConn
147 | if cc = connect(fmt.Sprintf("sub%v", i)); cc == nil {
148 | return
149 | }
150 |
151 | ack := cc.Subscribe([]proto.TopicQos{
152 | {topic, proto.QosAtLeastOnce},
153 | })
154 | if *dump {
155 | fmt.Printf("suback: %#v\n", ack)
156 | }
157 |
158 | go func() {
159 | ok := false
160 | count := 0
161 | for range cc.Incoming {
162 | count++
163 | if count == *messages {
164 | cc.Disconnect()
165 | ok = true
166 | }
167 | }
168 |
169 | if !ok {
170 | bad <- i
171 | }
172 |
173 | wg.Done()
174 | }()
175 | }
176 |
--------------------------------------------------------------------------------
/many/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "flag"
5 | "log"
6 | "math/rand"
7 | "net"
8 | "os"
9 | "time"
10 |
11 | proto "github.com/huin/mqtt"
12 | "github.com/jeffallen/mqtt"
13 | )
14 |
15 | var conns = flag.Int("conns", 10, "how many conns (0 means infinite)")
16 | var host = flag.String("host", "localhost:1883", "hostname of broker")
17 | var user = flag.String("user", "", "username")
18 | var pass = flag.String("pass", "", "password")
19 | var dump = flag.Bool("dump", false, "dump messages?")
20 | var wait = flag.Int("wait", 10, "ms to wait between client connects")
21 | var pace = flag.Int("pace", 60, "send a message on average once every pace seconds")
22 |
23 | var payload proto.Payload
24 | var topic string
25 |
26 | func main() {
27 | flag.Parse()
28 |
29 | if flag.NArg() != 2 {
30 | topic = "many"
31 | payload = proto.BytesPayload([]byte("hello"))
32 | } else {
33 | topic = flag.Arg(0)
34 | payload = proto.BytesPayload([]byte(flag.Arg(1)))
35 | }
36 |
37 | if *conns == 0 {
38 | *conns = -1
39 | }
40 |
41 | i := 0
42 | for {
43 | go client(i)
44 | i++
45 |
46 | *conns--
47 | if *conns == 0 {
48 | break
49 | }
50 | time.Sleep(time.Duration(*wait) * time.Millisecond)
51 | }
52 |
53 | // sleep forever
54 | <-make(chan struct{})
55 | }
56 |
57 | func client(i int) {
58 | log.Print("starting client ", i)
59 | conn, err := net.Dial("tcp", *host)
60 | if err != nil {
61 | log.Fatal("dial: ", err)
62 | }
63 | cc := mqtt.NewClientConn(conn)
64 | cc.Dump = *dump
65 |
66 | if err := cc.Connect(*user, *pass); err != nil {
67 | log.Fatalf("connect: %v\n", err)
68 | os.Exit(1)
69 | }
70 |
71 | half := int32(*pace / 2)
72 |
73 | for {
74 | cc.Publish(&proto.Publish{
75 | Header: proto.Header{},
76 | TopicName: topic,
77 | Payload: payload,
78 | })
79 | sltime := rand.Int31n(half) - (half / 2) + int32(*pace)
80 | time.Sleep(time.Duration(sltime) * time.Second)
81 | }
82 | }
83 |
--------------------------------------------------------------------------------
/mqtt.go:
--------------------------------------------------------------------------------
1 | // Package mqtt implements MQTT clients and servers.
2 | package mqtt
3 |
4 | import (
5 | crand "crypto/rand"
6 | "errors"
7 | "fmt"
8 | "io"
9 | "log"
10 | "math/rand"
11 | "net"
12 | "runtime"
13 | "strings"
14 | "sync"
15 | "sync/atomic"
16 | "time"
17 |
18 | proto "github.com/huin/mqtt"
19 | )
20 |
21 | // A random number generator ready to make client-id's, if
22 | // they do not provide them to us.
23 | var cliRand *rand.Rand
24 |
25 | func init() {
26 | var seed int64
27 | var sb [4]byte
28 | crand.Read(sb[:])
29 | seed = int64(time.Now().Nanosecond())<<32 |
30 | int64(sb[0])<<24 | int64(sb[1])<<16 |
31 | int64(sb[2])<<8 | int64(sb[3])
32 | cliRand = rand.New(rand.NewSource(seed))
33 | }
34 |
35 | type stats struct {
36 | recv int64
37 | sent int64
38 | clients int64
39 | clientsMax int64
40 | lastmsgs int64
41 | }
42 |
43 | func (s *stats) messageRecv() { atomic.AddInt64(&s.recv, 1) }
44 | func (s *stats) messageSend() { atomic.AddInt64(&s.sent, 1) }
45 | func (s *stats) clientConnect() { atomic.AddInt64(&s.clients, 1) }
46 | func (s *stats) clientDisconnect() { atomic.AddInt64(&s.clients, -1) }
47 |
48 | func statsMessage(topic string, stat int64) *proto.Publish {
49 | return &proto.Publish{
50 | Header: header(dupFalse, proto.QosAtMostOnce, retainTrue),
51 | TopicName: topic,
52 | Payload: newIntPayload(stat),
53 | }
54 | }
55 |
56 | func (s *stats) publish(sub *subscriptions, interval time.Duration) {
57 | clients := atomic.LoadInt64(&s.clients)
58 | clientsMax := atomic.LoadInt64(&s.clientsMax)
59 | if clients > clientsMax {
60 | clientsMax = clients
61 | atomic.StoreInt64(&s.clientsMax, clientsMax)
62 | }
63 | sub.submit(nil, statsMessage("$SYS/broker/clients/active", clients))
64 | sub.submit(nil, statsMessage("$SYS/broker/clients/maximum", clientsMax))
65 | sub.submit(nil, statsMessage("$SYS/broker/messages/received",
66 | atomic.LoadInt64(&s.recv)))
67 | sub.submit(nil, statsMessage("$SYS/broker/messages/sent",
68 | atomic.LoadInt64(&s.sent)))
69 |
70 | msgs := atomic.LoadInt64(&s.recv) + atomic.LoadInt64(&s.sent)
71 | msgpersec := (msgs - s.lastmsgs) / int64(interval/time.Second)
72 | // no need for atomic because we are the only reader/writer of it
73 | s.lastmsgs = msgs
74 |
75 | sub.submit(nil, statsMessage("$SYS/broker/messages/per-sec", msgpersec))
76 | }
77 |
78 | // An intPayload implements proto.Payload, and is an int64 that
79 | // formats itself and then prints itself into the payload.
80 | type intPayload string
81 |
82 | func newIntPayload(i int64) intPayload {
83 | return intPayload(fmt.Sprint(i))
84 | }
85 | func (ip intPayload) ReadPayload(r io.Reader) error {
86 | // not implemented
87 | return nil
88 | }
89 | func (ip intPayload) WritePayload(w io.Writer) error {
90 | _, err := w.Write([]byte(string(ip)))
91 | return err
92 | }
93 | func (ip intPayload) Size() int {
94 | return len(ip)
95 | }
96 |
97 | // A retain holds information necessary to correctly manage retained
98 | // messages.
99 | //
100 | // This needs to hold copies of the proto.Publish, not pointers to
101 | // it, or else we can send out one with the wrong retain flag.
102 | type retain struct {
103 | m proto.Publish
104 | wild wild
105 | }
106 |
107 | type subscriptions struct {
108 | workers int
109 | posts chan post
110 |
111 | mu sync.Mutex // guards access to fields below
112 | subs map[string][]*incomingConn
113 | wildcards []wild
114 | retain map[string]retain
115 | stats *stats
116 | }
117 |
118 | // The length of the queue that subscription processing
119 | // workers are taking from.
120 | const postQueue = 100
121 |
122 | func newSubscriptions(workers int) *subscriptions {
123 | s := &subscriptions{
124 | subs: make(map[string][]*incomingConn),
125 | retain: make(map[string]retain),
126 | posts: make(chan post, postQueue),
127 | workers: workers,
128 | }
129 | for i := 0; i < s.workers; i++ {
130 | go s.run(i)
131 | }
132 | return s
133 | }
134 |
135 | func (s *subscriptions) sendRetain(topic string, c *incomingConn) {
136 | s.mu.Lock()
137 | var tlist []string
138 | if isWildcard(topic) {
139 |
140 | // TODO: select matching topics from the retain map
141 | } else {
142 | tlist = []string{topic}
143 | }
144 | for _, t := range tlist {
145 | if r, ok := s.retain[t]; ok {
146 | c.submit(&r.m)
147 | }
148 | }
149 | s.mu.Unlock()
150 | }
151 |
152 | func (s *subscriptions) add(topic string, c *incomingConn) {
153 | s.mu.Lock()
154 | defer s.mu.Unlock()
155 | if isWildcard(topic) {
156 | w := newWild(topic, c)
157 | if w.valid() {
158 | s.wildcards = append(s.wildcards, w)
159 | }
160 | } else {
161 | s.subs[topic] = append(s.subs[topic], c)
162 | }
163 | }
164 |
165 | type wild struct {
166 | wild []string
167 | c *incomingConn
168 | }
169 |
170 | func newWild(topic string, c *incomingConn) wild {
171 | return wild{wild: strings.Split(topic, "/"), c: c}
172 | }
173 |
174 | func (w wild) matches(parts []string) bool {
175 | i := 0
176 | for i < len(parts) {
177 | // topic is longer, no match
178 | if i >= len(w.wild) {
179 | return false
180 | }
181 | // matched up to here, and now the wildcard says "all others will match"
182 | if w.wild[i] == "#" {
183 | return true
184 | }
185 | // text does not match, and there wasn't a + to excuse it
186 | if parts[i] != w.wild[i] && w.wild[i] != "+" {
187 | return false
188 | }
189 | i++
190 | }
191 |
192 | // make finance/stock/ibm/# match finance/stock/ibm
193 | if i == len(w.wild)-1 && w.wild[len(w.wild)-1] == "#" {
194 | return true
195 | }
196 |
197 | if i == len(w.wild) {
198 | return true
199 | }
200 | return false
201 | }
202 |
203 | // Find all connections that are subscribed to this topic.
204 | func (s *subscriptions) subscribers(topic string) []*incomingConn {
205 | s.mu.Lock()
206 | defer s.mu.Unlock()
207 |
208 | // non-wildcard subscribers
209 | res := s.subs[topic]
210 |
211 | // process wildcards
212 | parts := strings.Split(topic, "/")
213 | for _, w := range s.wildcards {
214 | if w.matches(parts) {
215 | res = append(res, w.c)
216 | }
217 | }
218 |
219 | return res
220 | }
221 |
222 | // Remove all subscriptions that refer to a connection.
223 | func (s *subscriptions) unsubAll(c *incomingConn) {
224 | s.mu.Lock()
225 | for _, v := range s.subs {
226 | for i := range v {
227 | if v[i] == c {
228 | v[i] = nil
229 | }
230 | }
231 | }
232 |
233 | // remove any associated entries in the wildcard list
234 | var wildNew []wild
235 | for i := 0; i < len(s.wildcards); i++ {
236 | if s.wildcards[i].c != c {
237 | wildNew = append(wildNew, s.wildcards[i])
238 | }
239 | }
240 | s.wildcards = wildNew
241 |
242 | s.mu.Unlock()
243 | }
244 |
245 | // Remove the subscription to topic for a given connection.
246 | func (s *subscriptions) unsub(topic string, c *incomingConn) {
247 | s.mu.Lock()
248 | if subs, ok := s.subs[topic]; ok {
249 | nils := 0
250 |
251 | // Search the list, removing references to our connection.
252 | // At the same time, count the nils to see if this list is now empty.
253 | for i := 0; i < len(subs); i++ {
254 | if subs[i] == c {
255 | subs[i] = nil
256 | }
257 | if subs[i] == nil {
258 | nils++
259 | }
260 | }
261 |
262 | if nils == len(subs) {
263 | delete(s.subs, topic)
264 | }
265 | }
266 | s.mu.Unlock()
267 | }
268 |
269 | // The subscription processing worker.
270 | func (s *subscriptions) run(id int) {
271 | tag := fmt.Sprintf("worker %d ", id)
272 | log.Print(tag, "started")
273 | for post := range s.posts {
274 | // Remember the original retain setting, but send out immediate
275 | // copies without retain: "When a server sends a PUBLISH to a client
276 | // as a result of a subscription that already existed when the
277 | // original PUBLISH arrived, the Retain flag should not be set,
278 | // regardless of the Retain flag of the original PUBLISH.
279 | isRetain := post.m.Header.Retain
280 | post.m.Header.Retain = false
281 |
282 | // Handle "retain with payload size zero = delete retain".
283 | // Once the delete is done, return instead of continuing.
284 | if isRetain && post.m.Payload.Size() == 0 {
285 | s.mu.Lock()
286 | delete(s.retain, post.m.TopicName)
287 | s.mu.Unlock()
288 | return
289 | }
290 |
291 | // Find all the connections that should be notified of this message.
292 | conns := s.subscribers(post.m.TopicName)
293 |
294 | // Queue the outgoing messages
295 | for _, c := range conns {
296 | // Do not echo messages back to where they came from.
297 | if c == post.c {
298 | continue
299 | }
300 |
301 | if c != nil {
302 | c.submit(post.m)
303 | }
304 | }
305 |
306 | if isRetain {
307 | s.mu.Lock()
308 | // Save a copy of it, and set that copy's Retain to true, so that
309 | // when we send it out later we notify new subscribers that this
310 | // is an old message.
311 | msg := *post.m
312 | msg.Header.Retain = true
313 | s.retain[post.m.TopicName] = retain{m: msg}
314 | s.mu.Unlock()
315 | }
316 | }
317 | }
318 |
319 | func (s *subscriptions) submit(c *incomingConn, m *proto.Publish) {
320 | s.posts <- post{c: c, m: m}
321 | }
322 |
323 | // A post is a unit of work for the subscription processing workers.
324 | type post struct {
325 | c *incomingConn
326 | m *proto.Publish
327 | }
328 |
329 | // A Server holds all the state associated with an MQTT server.
330 | type Server struct {
331 | l net.Listener
332 | subs *subscriptions
333 | stats *stats
334 | Done chan struct{}
335 | StatsInterval time.Duration // Defaults to 10 seconds. Must be set using sync/atomic.StoreInt64().
336 | Dump bool // When true, dump the messages in and out.
337 | rand *rand.Rand
338 | }
339 |
340 | // NewServer creates a new MQTT server, which accepts connections from
341 | // the given listener. When the server is stopped (for instance by
342 | // another goroutine closing the net.Listener), channel Done will become
343 | // readable.
344 | func NewServer(l net.Listener) *Server {
345 | svr := &Server{
346 | l: l,
347 | stats: &stats{},
348 | Done: make(chan struct{}),
349 | StatsInterval: time.Second * 10,
350 | subs: newSubscriptions(runtime.GOMAXPROCS(0)),
351 | }
352 |
353 | // start the stats reporting goroutine
354 | go func() {
355 | for {
356 | svr.stats.publish(svr.subs, svr.StatsInterval)
357 | select {
358 | case <-svr.Done:
359 | return
360 | default:
361 | // keep going
362 | }
363 | time.Sleep(svr.StatsInterval)
364 | }
365 | }()
366 |
367 | return svr
368 | }
369 |
370 | // Start makes the Server start accepting and handling connections.
371 | func (s *Server) Start() {
372 | go func() {
373 | for {
374 | conn, err := s.l.Accept()
375 | if err != nil {
376 | log.Print("Accept: ", err)
377 | break
378 | }
379 |
380 | cli := s.newIncomingConn(conn)
381 | s.stats.clientConnect()
382 | cli.start()
383 | }
384 | close(s.Done)
385 | }()
386 | }
387 |
388 | // An IncomingConn represents a connection into a Server.
389 | type incomingConn struct {
390 | svr *Server
391 | conn net.Conn
392 | jobs chan job
393 | clientid string
394 | Done chan struct{}
395 | }
396 |
397 | var clients = make(map[string]*incomingConn)
398 | var clientsMu sync.Mutex
399 |
400 | const sendingQueueLength = 10000
401 |
402 | // newIncomingConn creates a new incomingConn associated with this
403 | // server. The connection becomes the property of the incomingConn
404 | // and should not be touched again by the caller until the Done
405 | // channel becomes readable.
406 | func (s *Server) newIncomingConn(conn net.Conn) *incomingConn {
407 | return &incomingConn{
408 | svr: s,
409 | conn: conn,
410 | jobs: make(chan job, sendingQueueLength),
411 | Done: make(chan struct{}),
412 | }
413 | }
414 |
415 | type receipt chan struct{}
416 |
417 | // Wait for the receipt to indicate that the job is done.
418 | func (r receipt) wait() {
419 | // TODO: timeout
420 | <-r
421 | }
422 |
423 | type job struct {
424 | m proto.Message
425 | r receipt
426 | }
427 |
428 | // Start reading and writing on this connection.
429 | func (c *incomingConn) start() {
430 | go c.reader()
431 | go c.writer()
432 | }
433 |
434 | // Add this connection to the map, or find out that an existing connection
435 | // already exists for the same client-id.
436 | func (c *incomingConn) add() *incomingConn {
437 | clientsMu.Lock()
438 | defer clientsMu.Unlock()
439 |
440 | existing, ok := clients[c.clientid]
441 | if ok {
442 | // this client id already exists, return it
443 | return existing
444 | }
445 |
446 | clients[c.clientid] = c
447 | return nil
448 | }
449 |
450 | // Delete a connection; the connection must be closed by the caller first.
451 | func (c *incomingConn) del() {
452 | clientsMu.Lock()
453 | delete(clients, c.clientid)
454 | clientsMu.Unlock()
455 | return
456 | }
457 |
458 | // Queue a message; no notification of sending is done.
459 | func (c *incomingConn) submit(m proto.Message) {
460 | j := job{m: m}
461 | select {
462 | case c.jobs <- j:
463 | default:
464 | log.Print(c, ": failed to submit message")
465 | }
466 | return
467 | }
468 |
469 | func (c *incomingConn) String() string {
470 | return fmt.Sprintf("{IncomingConn: %v}", c.clientid)
471 | }
472 |
473 | // Queue a message, returns a channel that will be readable
474 | // when the message is sent.
475 | func (c *incomingConn) submitSync(m proto.Message) receipt {
476 | j := job{m: m, r: make(receipt)}
477 | c.jobs <- j
478 | return j.r
479 | }
480 |
481 | func (c *incomingConn) reader() {
482 | // On exit, close the connection and arrange for the writer to exit
483 | // by closing the output channel.
484 | defer func() {
485 | c.conn.Close()
486 | c.svr.stats.clientDisconnect()
487 | close(c.jobs)
488 | }()
489 |
490 | for {
491 | // TODO: timeout (first message and/or keepalives)
492 | m, err := proto.DecodeOneMessage(c.conn, nil)
493 | if err != nil {
494 | if err == io.EOF {
495 | return
496 | }
497 | if strings.HasSuffix(err.Error(), "use of closed network connection") {
498 | return
499 | }
500 | log.Print("reader: ", err)
501 | return
502 | }
503 | c.svr.stats.messageRecv()
504 |
505 | if c.svr.Dump {
506 | log.Printf("dump in: %T", m)
507 | }
508 |
509 | switch m := m.(type) {
510 | case *proto.Connect:
511 | rc := proto.RetCodeAccepted
512 |
513 | if m.ProtocolName != "MQIsdp" ||
514 | m.ProtocolVersion != 3 {
515 | log.Print("reader: reject connection from ", m.ProtocolName, " version ", m.ProtocolVersion)
516 | rc = proto.RetCodeUnacceptableProtocolVersion
517 | }
518 |
519 | // Check client id.
520 | if len(m.ClientId) < 1 || len(m.ClientId) > 23 {
521 | rc = proto.RetCodeIdentifierRejected
522 | }
523 | c.clientid = m.ClientId
524 |
525 | // Disconnect existing connections.
526 | if existing := c.add(); existing != nil {
527 | disconnect := &proto.Disconnect{}
528 | r := existing.submitSync(disconnect)
529 | r.wait()
530 | c.add()
531 | }
532 |
533 | // TODO: Last will
534 |
535 | connack := &proto.ConnAck{
536 | ReturnCode: rc,
537 | }
538 | c.submit(connack)
539 |
540 | // close connection if it was a bad connect
541 | if rc != proto.RetCodeAccepted {
542 | log.Printf("Connection refused for %v: %v", c.conn.RemoteAddr(), ConnectionErrors[rc])
543 | return
544 | }
545 |
546 | // Log in mosquitto format.
547 | clean := 0
548 | if m.CleanSession {
549 | clean = 1
550 | }
551 | log.Printf("New client connected from %v as %v (c%v, k%v).", c.conn.RemoteAddr(), c.clientid, clean, m.KeepAliveTimer)
552 |
553 | case *proto.Publish:
554 | // TODO: Proper QoS support
555 | if m.Header.QosLevel != proto.QosAtMostOnce {
556 | log.Printf("reader: no support for QoS %v yet", m.Header.QosLevel)
557 | return
558 | }
559 | if m.Header.QosLevel != proto.QosAtMostOnce && m.MessageId == 0 {
560 | // Invalid message ID. See MQTT-2.3.1-1.
561 | log.Printf("reader: invalid MessageId in PUBLISH.")
562 | return
563 | }
564 | if isWildcard(m.TopicName) {
565 | log.Print("reader: ignoring PUBLISH with wildcard topic ", m.TopicName)
566 | } else {
567 | c.svr.subs.submit(c, m)
568 | }
569 | c.submit(&proto.PubAck{MessageId: m.MessageId})
570 |
571 | case *proto.PingReq:
572 | c.submit(&proto.PingResp{})
573 |
574 | case *proto.Subscribe:
575 | if m.Header.QosLevel != proto.QosAtLeastOnce {
576 | // protocol error, disconnect
577 | return
578 | }
579 | if m.MessageId == 0 {
580 | // Invalid message ID. See MQTT-2.3.1-1.
581 | log.Printf("reader: invalid MessageId in SUBSCRIBE.")
582 | return
583 | }
584 | suback := &proto.SubAck{
585 | MessageId: m.MessageId,
586 | TopicsQos: make([]proto.QosLevel, len(m.Topics)),
587 | }
588 | for i, tq := range m.Topics {
589 | // TODO: Handle varying QoS correctly
590 | c.svr.subs.add(tq.Topic, c)
591 | suback.TopicsQos[i] = proto.QosAtMostOnce
592 | }
593 | c.submit(suback)
594 |
595 | // Process retained messages.
596 | for _, tq := range m.Topics {
597 | c.svr.subs.sendRetain(tq.Topic, c)
598 | }
599 |
600 | case *proto.Unsubscribe:
601 | if m.Header.QosLevel != proto.QosAtMostOnce && m.MessageId == 0 {
602 | // Invalid message ID. See MQTT-2.3.1-1.
603 | log.Printf("reader: invalid MessageId in UNSUBSCRIBE.")
604 | return
605 | }
606 | for _, t := range m.Topics {
607 | c.svr.subs.unsub(t, c)
608 | }
609 | ack := &proto.UnsubAck{MessageId: m.MessageId}
610 | c.submit(ack)
611 |
612 | case *proto.Disconnect:
613 | return
614 |
615 | default:
616 | log.Printf("reader: unknown msg type %T", m)
617 | return
618 | }
619 | }
620 | }
621 |
622 | func (c *incomingConn) writer() {
623 |
624 | // Close connection on exit in order to cause reader to exit.
625 | defer func() {
626 | c.conn.Close()
627 | c.del()
628 | c.svr.subs.unsubAll(c)
629 | }()
630 |
631 | for job := range c.jobs {
632 | if c.svr.Dump {
633 | log.Printf("dump out: %T", job.m)
634 | }
635 |
636 | // TODO: write timeout
637 | err := job.m.Encode(c.conn)
638 | if job.r != nil {
639 | // notifiy the sender that this message is sent
640 | close(job.r)
641 | }
642 | if err != nil {
643 | // This one is not interesting; it happens when clients
644 | // disappear before we send their acks.
645 | oe, isoe := err.(*net.OpError)
646 | if isoe && oe.Err.Error() == "use of closed network connection" {
647 | return
648 | }
649 | // In Go < 1.5, the error is not an OpError.
650 | if err.Error() == "use of closed network connection" {
651 | return
652 | }
653 |
654 | log.Print("writer: ", err)
655 | return
656 | }
657 | c.svr.stats.messageSend()
658 |
659 | if _, ok := job.m.(*proto.Disconnect); ok {
660 | log.Print("writer: sent disconnect message")
661 | return
662 | }
663 | }
664 | }
665 |
666 | // header is used to initialize a proto.Header when the zero value
667 | // is not correct. The zero value of proto.Header is
668 | // the equivalent of header(dupFalse, proto.QosAtMostOnce, retainFalse)
669 | // and is correct for most messages.
670 | func header(d dupFlag, q proto.QosLevel, r retainFlag) proto.Header {
671 | return proto.Header{
672 | DupFlag: bool(d), QosLevel: q, Retain: bool(r),
673 | }
674 | }
675 |
676 | type retainFlag bool
677 | type dupFlag bool
678 |
679 | const (
680 | retainFalse retainFlag = false
681 | retainTrue = true
682 | dupFalse dupFlag = false
683 | dupTrue = true
684 | )
685 |
686 | func isWildcard(topic string) bool {
687 | if strings.Contains(topic, "#") || strings.Contains(topic, "+") {
688 | return true
689 | }
690 | return false
691 | }
692 |
693 | func (w wild) valid() bool {
694 | for i, part := range w.wild {
695 | // catch things like finance#
696 | if isWildcard(part) && len(part) != 1 {
697 | return false
698 | }
699 | // # can only occur as the last part
700 | if part == "#" && i != len(w.wild)-1 {
701 | return false
702 | }
703 | }
704 | return true
705 | }
706 |
707 | const clientQueueLength = 100
708 |
709 | // A ClientConn holds all the state associated with a connection
710 | // to an MQTT server. It should be allocated via NewClientConn.
711 | // Concurrent access to a ClientConn is NOT safe.
712 | type ClientConn struct {
713 | ClientId string // May be set before the call to Connect.
714 | Dump bool // When true, dump the messages in and out.
715 | Incoming chan *proto.Publish // Incoming messages arrive on this channel.
716 | id uint16 // next MessageId
717 | out chan job
718 | conn net.Conn
719 | done chan struct{} // This channel will be readable once a Disconnect has been successfully sent and the connection is closed.
720 | connack chan *proto.ConnAck
721 | suback chan *proto.SubAck
722 | }
723 |
724 | // NewClientConn allocates a new ClientConn.
725 | func NewClientConn(c net.Conn) *ClientConn {
726 | cc := &ClientConn{
727 | conn: c,
728 | id: 1,
729 | out: make(chan job, clientQueueLength),
730 | Incoming: make(chan *proto.Publish, clientQueueLength),
731 | done: make(chan struct{}),
732 | connack: make(chan *proto.ConnAck),
733 | suback: make(chan *proto.SubAck),
734 | }
735 | go cc.reader()
736 | go cc.writer()
737 | return cc
738 | }
739 |
740 | func (c *ClientConn) reader() {
741 | defer func() {
742 | // Cause the writer to exit.
743 | close(c.out)
744 | // Cause any goroutines waiting on messages to arrive to exit.
745 | close(c.Incoming)
746 | c.conn.Close()
747 | }()
748 |
749 | for {
750 | // TODO: timeout (first message and/or keepalives)
751 | m, err := proto.DecodeOneMessage(c.conn, nil)
752 | if err != nil {
753 | if err == io.EOF {
754 | return
755 | }
756 | if strings.HasSuffix(err.Error(), "use of closed network connection") {
757 | return
758 | }
759 | log.Print("cli reader: ", err)
760 | return
761 | }
762 |
763 | if c.Dump {
764 | log.Printf("dump in: %T", m)
765 | }
766 |
767 | switch m := m.(type) {
768 | case *proto.Publish:
769 | c.Incoming <- m
770 | case *proto.PubAck:
771 | // ignore these
772 | continue
773 | case *proto.ConnAck:
774 | c.connack <- m
775 | case *proto.SubAck:
776 | c.suback <- m
777 | case *proto.Disconnect:
778 | return
779 | default:
780 | log.Printf("cli reader: got msg type %T", m)
781 | }
782 | }
783 | }
784 |
785 | func (c *ClientConn) writer() {
786 | // Close connection on exit in order to cause reader to exit.
787 | defer func() {
788 | // Signal to Disconnect() that the message is on its way, or
789 | // that the connection is closing one way or the other...
790 | close(c.done)
791 | }()
792 |
793 | for job := range c.out {
794 | if c.Dump {
795 | log.Printf("dump out: %T", job.m)
796 | }
797 |
798 | // TODO: write timeout
799 | err := job.m.Encode(c.conn)
800 | if job.r != nil {
801 | close(job.r)
802 | }
803 |
804 | if err != nil {
805 | log.Print("cli writer: ", err)
806 | return
807 | }
808 |
809 | if _, ok := job.m.(*proto.Disconnect); ok {
810 | return
811 | }
812 | }
813 | }
814 |
815 | // Connect sends the CONNECT message to the server. If the ClientId is not already
816 | // set, use a default (a 63-bit decimal random number). The "clean session"
817 | // bit is always set.
818 | func (c *ClientConn) Connect(user, pass string) error {
819 | // TODO: Keepalive timer
820 | if c.ClientId == "" {
821 | c.ClientId = fmt.Sprint(cliRand.Int63())
822 | }
823 | req := &proto.Connect{
824 | ProtocolName: "MQIsdp",
825 | ProtocolVersion: 3,
826 | ClientId: c.ClientId,
827 | CleanSession: true,
828 | }
829 | if user != "" {
830 | req.UsernameFlag = true
831 | req.PasswordFlag = true
832 | req.Username = user
833 | req.Password = pass
834 | }
835 |
836 | c.sync(req)
837 | ack := <-c.connack
838 | return ConnectionErrors[ack.ReturnCode]
839 | }
840 |
841 | // ConnectionErrors is an array of errors corresponding to the
842 | // Connect return codes specified in the specification.
843 | var ConnectionErrors = [6]error{
844 | nil, // Connection Accepted (not an error)
845 | errors.New("Connection Refused: unacceptable protocol version"),
846 | errors.New("Connection Refused: identifier rejected"),
847 | errors.New("Connection Refused: server unavailable"),
848 | errors.New("Connection Refused: bad user name or password"),
849 | errors.New("Connection Refused: not authorized"),
850 | }
851 |
852 | // Disconnect sends a DISCONNECT message to the server. This function
853 | // blocks until the disconnect message is actually sent, and the connection
854 | // is closed.
855 | func (c *ClientConn) Disconnect() {
856 | c.sync(&proto.Disconnect{})
857 | <-c.done
858 | }
859 |
860 | func (c *ClientConn) nextid() uint16 {
861 | id := c.id
862 | c.id++
863 | return id
864 | }
865 |
866 | // Subscribe subscribes this connection to a list of topics. Messages
867 | // will be delivered on the Incoming channel.
868 | func (c *ClientConn) Subscribe(tqs []proto.TopicQos) *proto.SubAck {
869 | c.sync(&proto.Subscribe{
870 | Header: header(dupFalse, proto.QosAtLeastOnce, retainFalse),
871 | MessageId: c.nextid(),
872 | Topics: tqs,
873 | })
874 | ack := <-c.suback
875 | return ack
876 | }
877 |
878 | // Publish publishes the given message to the MQTT server.
879 | // The QosLevel of the message must be QosAtLeastOnce for now.
880 | func (c *ClientConn) Publish(m *proto.Publish) {
881 | if m.QosLevel != proto.QosAtMostOnce {
882 | panic("unsupported QoS level")
883 | }
884 | m.MessageId = c.nextid()
885 | c.out <- job{m: m}
886 | }
887 |
888 | // sync sends a message and blocks until it was actually sent.
889 | func (c *ClientConn) sync(m proto.Message) {
890 | j := job{m: m, r: make(receipt)}
891 | c.out <- j
892 | <-j.r
893 | return
894 | }
895 |
--------------------------------------------------------------------------------
/mqttsrv/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "flag"
5 | "log"
6 | "net"
7 |
8 | "github.com/jeffallen/mqtt"
9 | )
10 |
11 | var addr = flag.String("addr", "localhost:1883", "listen address of broker")
12 |
13 | func main() {
14 | flag.Parse()
15 |
16 | l, err := net.Listen("tcp", *addr)
17 | if err != nil {
18 | log.Print("listen: ", err)
19 | return
20 | }
21 | svr := mqtt.NewServer(l)
22 | svr.Start()
23 | <-svr.Done
24 | }
25 |
--------------------------------------------------------------------------------
/pingtest/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bytes"
5 | "flag"
6 | "fmt"
7 | "log"
8 | "net"
9 | "os"
10 | "sync"
11 | "time"
12 |
13 | "github.com/eclesh/welford"
14 | proto "github.com/huin/mqtt"
15 | "github.com/jeffallen/mqtt"
16 | )
17 |
18 | var pairs = flag.Int("pairs", 100, "how many ping/reply pairs")
19 | var wsubs = flag.Int("wsubs", 20, "how many wildcard subscribers")
20 | var messages = flag.Int("messages", 1000, "how many messages")
21 | var host = flag.String("host", "localhost:1883", "hostname of broker")
22 | var dump = flag.Bool("dump", false, "dump messages?")
23 | var id = flag.String("id", "", "client id (default: use a random one)")
24 | var user = flag.String("user", "", "username")
25 | var pass = flag.String("pass", "", "password")
26 |
27 | var cwg sync.WaitGroup
28 |
29 | type sd struct {
30 | mu sync.Mutex
31 | w welford.Stats
32 | }
33 |
34 | func (s *sd) add(sample time.Duration) {
35 | s.mu.Lock()
36 | defer s.mu.Unlock()
37 |
38 | s.w.Add(float64(sample))
39 | }
40 |
41 | func (s *sd) calculate() (min, max, mean, stddev time.Duration) {
42 | s.mu.Lock()
43 | defer s.mu.Unlock()
44 |
45 | min = time.Duration(s.w.Min())
46 | max = time.Duration(s.w.Max())
47 | mean = time.Duration(s.w.Mean())
48 | stddev = time.Duration(s.w.Stddev())
49 |
50 | return
51 | }
52 |
53 | var stddev sd
54 |
55 | func main() {
56 | log.SetFlags(log.Lmicroseconds)
57 | flag.Parse()
58 |
59 | timeStart := time.Now()
60 | timeConnStart := time.Now()
61 |
62 | // a system to check how long connection establishment takes
63 | cwg.Add(2**pairs + *wsubs)
64 | go func() {
65 | cwg.Wait()
66 | log.Print("all connections made")
67 | log.Print("Time to establish connections: ", time.Now().Sub(timeConnStart))
68 | }()
69 |
70 | // start the wildcard subscribers
71 | wsubExit.Add(*wsubs)
72 | wsubReady.Add(*wsubs)
73 | for i := 0; i < *wsubs; i++ {
74 | go wsub()
75 | }
76 |
77 | var wg sync.WaitGroup
78 | for i := 0; i < *pairs; i++ {
79 | wg.Add(1)
80 | go func(i int) {
81 | ping(i)
82 | wg.Done()
83 | }(i)
84 | }
85 | log.Print("all started")
86 | wg.Wait()
87 | log.Print("all finished")
88 |
89 | timeEnd := time.Now()
90 |
91 | log.Print("checking wildcard subscribers:")
92 | wsubExit.Wait()
93 | log.Print("ok")
94 |
95 | elapsed := timeEnd.Sub(timeStart)
96 | totmsg := float64(*messages * 2 * *pairs)
97 | msgpersec := totmsg / (float64(elapsed) / float64(time.Second))
98 |
99 | log.Print("elapsed time: ", elapsed)
100 | log.Print("messages : ", totmsg)
101 | log.Print("messages/sec: ", msgpersec)
102 |
103 | log.Print("round-trip latency")
104 | min, max, mean, stddev := stddev.calculate()
105 | log.Print("min : ", min)
106 | log.Print("max : ", max)
107 | log.Print("mean : ", mean)
108 | log.Print("stddev: ", stddev)
109 | }
110 |
111 | func ping(i int) {
112 | wsubReady.Wait()
113 |
114 | topic := fmt.Sprintf("pingtest/%v/request", i)
115 | topic2 := fmt.Sprintf("pingtest/%v/reply", i)
116 |
117 | start := make(chan struct{})
118 | stop := make(chan struct{})
119 |
120 | // the goroutine to reply to pings for this pair
121 | go func() {
122 | payload := []byte("ok")
123 | cc := connect()
124 | ack := cc.Subscribe([]proto.TopicQos{
125 | {topic, proto.QosAtLeastOnce},
126 | })
127 | if *dump {
128 | fmt.Printf("suback: %#v\n", ack)
129 | }
130 |
131 | close(start)
132 | for {
133 | select {
134 | // for each incoming message, send it back unchanged
135 | case in := <-cc.Incoming:
136 | if *dump {
137 | fmt.Printf("request: %#v\n", in)
138 | }
139 | in.TopicName = topic2
140 | in.Payload = proto.BytesPayload(payload)
141 | cc.Publish(in)
142 | case _ = <-stop:
143 | cc.Disconnect()
144 | return
145 | }
146 | }
147 | }()
148 |
149 | _ = <-start
150 |
151 | cc := connect()
152 |
153 | ack := cc.Subscribe([]proto.TopicQos{
154 | {topic2, proto.QosAtLeastOnce},
155 | })
156 | if *dump {
157 | fmt.Printf("suback: %#v\n", ack)
158 | }
159 |
160 | for i := 0; i < *messages; i++ {
161 | payload := []byte(fmt.Sprintf("ping %v", i))
162 |
163 | timeStart := time.Now()
164 | cc.Publish(&proto.Publish{
165 | Header: proto.Header{QosLevel: proto.QosAtMostOnce},
166 | TopicName: topic,
167 | Payload: proto.BytesPayload(payload),
168 | })
169 |
170 | in := <-cc.Incoming
171 | if in == nil {
172 | break
173 | }
174 |
175 | if *dump {
176 | fmt.Printf("reply: %#v\n", in)
177 | }
178 |
179 | elapsed := time.Now().Sub(timeStart)
180 | stddev.add(elapsed)
181 |
182 | buf := &bytes.Buffer{}
183 | err := in.Payload.WritePayload(buf)
184 | if err != nil {
185 | log.Fatalln("payload data:", err)
186 | }
187 |
188 | if !bytes.Equal(buf.Bytes(), []byte("ok")) {
189 | log.Println("unexpected reply: ", string(buf.Bytes()))
190 | break
191 | }
192 | }
193 |
194 | cc.Disconnect()
195 | close(stop)
196 | }
197 |
198 | func connect() *mqtt.ClientConn {
199 | conn, err := net.Dial("tcp", *host)
200 | if err != nil {
201 | fmt.Fprintf(os.Stderr, "dial: %v\n", err)
202 | os.Exit(2)
203 | }
204 | cc := mqtt.NewClientConn(conn)
205 | cc.Dump = *dump
206 | cc.ClientId = *id
207 |
208 | err = cc.Connect(*user, *pass)
209 | if err != nil {
210 | fmt.Println(err)
211 | os.Exit(3)
212 | }
213 |
214 | cwg.Done()
215 | return cc
216 | }
217 |
218 | var wsubExit sync.WaitGroup
219 | var wsubReady sync.WaitGroup
220 |
221 | func wsub() {
222 | topic := "pingtest/0/#"
223 |
224 | cc := connect()
225 | defer cc.Disconnect()
226 |
227 | ack := cc.Subscribe([]proto.TopicQos{
228 | {topic, proto.QosAtLeastOnce},
229 | })
230 | if *dump {
231 | fmt.Printf("suback: %#v\n", ack)
232 | }
233 |
234 | wsubReady.Done()
235 |
236 | count := 0
237 | for range cc.Incoming {
238 | count++
239 | if count == (2 * *messages) {
240 | wsubExit.Done()
241 | return
242 | }
243 | }
244 | }
245 |
--------------------------------------------------------------------------------
/pub/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "flag"
5 | "fmt"
6 | "net"
7 | "os"
8 |
9 | proto "github.com/huin/mqtt"
10 | "github.com/jeffallen/mqtt"
11 | )
12 |
13 | var host = flag.String("host", "localhost:1883", "hostname of broker")
14 | var user = flag.String("user", "", "username")
15 | var pass = flag.String("pass", "", "password")
16 | var dump = flag.Bool("dump", false, "dump messages?")
17 | var retain = flag.Bool("retain", false, "retain message?")
18 | var wait = flag.Bool("wait", false, "stay connected after publishing?")
19 |
20 | func main() {
21 | flag.Parse()
22 |
23 | if flag.NArg() != 2 {
24 | fmt.Fprintln(os.Stderr, "usage: pub topic message")
25 | return
26 | }
27 |
28 | conn, err := net.Dial("tcp", *host)
29 | if err != nil {
30 | fmt.Fprint(os.Stderr, "dial: ", err)
31 | return
32 | }
33 | cc := mqtt.NewClientConn(conn)
34 | cc.Dump = *dump
35 |
36 | if err := cc.Connect(*user, *pass); err != nil {
37 | fmt.Fprintf(os.Stderr, "connect: %v\n", err)
38 | os.Exit(1)
39 | }
40 | fmt.Println("Connected with client id", cc.ClientId)
41 |
42 | cc.Publish(&proto.Publish{
43 | Header: proto.Header{Retain: *retain},
44 | TopicName: flag.Arg(0),
45 | Payload: proto.BytesPayload([]byte(flag.Arg(1))),
46 | })
47 |
48 | if *wait {
49 | <-make(chan bool)
50 | }
51 |
52 | cc.Disconnect()
53 | }
54 |
--------------------------------------------------------------------------------
/smqttsrv/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "crypto/tls"
5 | "github.com/jeffallen/mqtt"
6 | "log"
7 | )
8 |
9 | // See http://mosquitto.org/man/mosquitto-tls-7.html for how to make the
10 | // server.{crt,key} files. Then use mosquitto like this to talk to it:
11 | //
12 | // mosquitto_sub --cafile ca.crt --tls-version tlsv1 -p 8883
13 | //
14 | // tls-version is required, because Go's TLS is limited to TLS 1.0, but
15 | // OpenSSL will try to ask for TLS 1.2 by default.
16 |
17 | func readCert() []tls.Certificate {
18 | c, err := tls.LoadX509KeyPair("server.crt", "server.key")
19 | if err != nil {
20 | panic(err)
21 | }
22 | return []tls.Certificate{c}
23 | }
24 |
25 | func main() {
26 | cfg := &tls.Config{
27 | Certificates: readCert(),
28 | NextProtos: []string{"mqtt"},
29 | }
30 | l, err := tls.Listen("tcp", ":8883", cfg)
31 | if err != nil {
32 | log.Print("listen: ", err)
33 | return
34 | }
35 | svr := mqtt.NewServer(l)
36 | svr.Start()
37 | <-svr.Done
38 | }
39 |
--------------------------------------------------------------------------------
/sub/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "flag"
5 | "fmt"
6 | "net"
7 | "os"
8 |
9 | proto "github.com/huin/mqtt"
10 | "github.com/jeffallen/mqtt"
11 | )
12 |
13 | var host = flag.String("host", "localhost:1883", "hostname of broker")
14 | var id = flag.String("id", "", "client id")
15 | var user = flag.String("user", "", "username")
16 | var pass = flag.String("pass", "", "password")
17 | var dump = flag.Bool("dump", false, "dump messages?")
18 |
19 | func main() {
20 | flag.Parse()
21 |
22 | if flag.NArg() < 1 {
23 | fmt.Fprintln(os.Stderr, "usage: sub topic [topic topic...]")
24 | return
25 | }
26 |
27 | conn, err := net.Dial("tcp", *host)
28 | if err != nil {
29 | fmt.Fprint(os.Stderr, "dial: ", err)
30 | return
31 | }
32 | cc := mqtt.NewClientConn(conn)
33 | cc.Dump = *dump
34 | cc.ClientId = *id
35 |
36 | tq := make([]proto.TopicQos, flag.NArg())
37 | for i := 0; i < flag.NArg(); i++ {
38 | tq[i].Topic = flag.Arg(i)
39 | tq[i].Qos = proto.QosAtMostOnce
40 | }
41 |
42 | if err := cc.Connect(*user, *pass); err != nil {
43 | fmt.Fprintf(os.Stderr, "connect: %v\n", err)
44 | os.Exit(1)
45 | }
46 | fmt.Println("Connected with client id", cc.ClientId)
47 | cc.Subscribe(tq)
48 |
49 | for m := range cc.Incoming {
50 | fmt.Print(m.TopicName, "\t")
51 | m.Payload.WritePayload(os.Stdout)
52 | fmt.Println("\tr: ", m.Header.Retain)
53 | }
54 | }
55 |
--------------------------------------------------------------------------------
/ticktock/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "flag"
5 | "fmt"
6 | "net"
7 | "os"
8 | "time"
9 |
10 | proto "github.com/huin/mqtt"
11 | "github.com/jeffallen/mqtt"
12 | )
13 |
14 | var host = flag.String("host", "localhost:1883", "hostname of broker")
15 | var id = flag.String("id", "", "client id")
16 | var user = flag.String("user", "", "username")
17 | var pass = flag.String("pass", "", "password")
18 | var dump = flag.Bool("dump", false, "dump messages?")
19 | var delay = flag.Duration("delay", time.Second, "delay between messages")
20 | var who = flag.String("who", "bonnie", "who is this? (to make two instance of ticktock distinct)")
21 |
22 | func main() {
23 | flag.Parse()
24 |
25 | conn, err := net.Dial("tcp", *host)
26 | if err != nil {
27 | fmt.Fprint(os.Stderr, "dial: ", err)
28 | return
29 | }
30 | cc := mqtt.NewClientConn(conn)
31 | cc.Dump = *dump
32 | cc.ClientId = *id
33 |
34 | tq := []proto.TopicQos{
35 | {Topic: "tick", Qos: proto.QosAtMostOnce},
36 | }
37 |
38 | if err := cc.Connect(*user, *pass); err != nil {
39 | fmt.Fprintf(os.Stderr, "connect: %v\n", err)
40 | os.Exit(1)
41 | }
42 |
43 | cc.Subscribe(tq)
44 |
45 | // Sender
46 | go func() {
47 | for {
48 | now := time.Now()
49 | what := fmt.Sprintf("%v at %v", *who, now)
50 |
51 | cc.Publish(&proto.Publish{
52 | Header: proto.Header{Retain: false},
53 | TopicName: "tick",
54 | Payload: proto.BytesPayload([]byte(what)),
55 | })
56 |
57 | time.Sleep(*delay)
58 | }
59 | }()
60 |
61 | // Receiver
62 | for m := range cc.Incoming {
63 | fmt.Print(m.TopicName, "\t")
64 | m.Payload.WritePayload(os.Stdout)
65 | fmt.Println("\tr: ", m.Header.Retain)
66 | }
67 | }
68 |
--------------------------------------------------------------------------------
/wild_test.go:
--------------------------------------------------------------------------------
1 | package mqtt
2 |
3 | import (
4 | "strings"
5 | "testing"
6 | )
7 |
8 | func TestWild(t *testing.T) {
9 | var tests = []struct {
10 | wild, topic string
11 | valid, want bool
12 | }{
13 | {"finance#", "", false, false},
14 | {"finance+", "", false, false},
15 | {"a/#/b", "", false, false},
16 | {"finance/stock/ibm/#", "finance/stock", true, false},
17 | {"finance/stock/ibm/#", "", true, false},
18 | {"finance/stock/ibm/#", "finance/stock/ibm", true, true},
19 | {"finance/stock/ibm/#", "", true, false},
20 | {"#", "anything", true, true},
21 | {"#", "anything/no/matter/how/deep", true, true},
22 | {"", "", true, true},
23 | {"+/#", "one", true, true},
24 | {"+/#", "", true, true},
25 | {"finance/stock/+/close", "finance/stock", true, false},
26 | {"finance/stock/+/close", "finance/stock/ibm", true, false},
27 | {"finance/stock/+/close", "finance/stock/ibm/close", true, true},
28 | {"finance/stock/+/close", "finance/stock/ibm/open", true, false},
29 | {"+/+/+", "", true, false},
30 | {"+/+/+", "a/b", true, false},
31 | {"+/+/+", "a/b/c", true, true},
32 | {"+/+/+", "a/b/c/d", true, false},
33 | }
34 |
35 | for _, x := range tests {
36 | w := newWild(x.wild, nil)
37 | if w.valid() != x.valid {
38 | t.Fatal("Validation error: ", x.wild)
39 | }
40 | got := w.matches(strings.Split(x.topic, "/"))
41 | if x.want != got {
42 | t.Error("Fail:", x.wild, x.topic, "got", got)
43 | }
44 | }
45 | }
46 |
--------------------------------------------------------------------------------