├── .gitignore ├── Makefile ├── go.mod ├── LICENSE ├── README.md ├── go.sum └── main.go /.gitignore: -------------------------------------------------------------------------------- 1 | data*/ 2 | ticketd -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | go build -o ticketd *.go -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/tidwall/ticketd 2 | 3 | go 1.13 4 | 5 | require ( 6 | github.com/gomodule/redigo v2.0.0+incompatible 7 | github.com/hashicorp/raft v1.1.1 8 | github.com/tidwall/raft-jss v0.0.0-20200101220329-7b83f95eebdb 9 | github.com/tidwall/raft-wal v0.0.0-20200101222402-f79abb78a4f6 10 | github.com/tidwall/redcon v1.0.1 11 | github.com/tidwall/redlog v0.0.0-20191219133640-9febe8549085 12 | github.com/tidwall/wal v0.0.0-20200101222051-d8500e1f916a // indirect 13 | golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 // indirect 14 | ) 15 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2020 Joshua J Baker 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so, 10 | subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ticketd 2 | 3 | A distributed service for monotonically increasing tickets. 4 | 5 | ## Features 6 | 7 | - Generates unique tickets 8 | - Simple API, uses the [Redis Protocol](https://redis.io/topics/protocol) 9 | - Fault-tolerant, uses [Raft Consensus Algorithm](https://raft.github.io) 10 | 11 | ## Building 12 | 13 | [Go](https://golang.com) is required. 14 | 15 | ``` 16 | make 17 | ``` 18 | 19 | ## Running 20 | 21 | It's ideal to have three, five, or seven nodes in your cluster. 22 | 23 | Let's create the first node. 24 | 25 | ``` 26 | ticketd --id 1 --saddr :11001 --raddr :12001 27 | ``` 28 | 29 | This will create a node named `1` and bind the client service address to 30 | `:11001` and the Raft network transport to `:12001` 31 | 32 | Now let's create two more nodes and add them to the cluster 33 | 34 | ``` 35 | ticketd --id 2 --saddr :11002 --raddr :12002 --join :11001 36 | ticketd --id 3 --saddr :11003 --raddr :12003 --join :11001 37 | ``` 38 | 39 | Now we have a fault-tolerant three node cluster up and running. 40 | 41 | ## Using 42 | 43 | You can use any Redis compatible client, the `redis-cli`, telnet, or netcat. 44 | 45 | I'll use the `redis-cli` in the example below. 46 | 47 | 48 | Connect to the leader. This will probably be the first node you created. 49 | 50 | ``` 51 | redis-cli -p 11001 52 | ``` 53 | 54 | Send the server a `TICKET` command and receive the first ticket. 55 | 56 | ``` 57 | redis> TICKET 58 | 1 59 | ``` 60 | 61 | From here on every `TICKET` command will guarentee to generate a value larger 62 | than the previous `TICKET`command. 63 | 64 | ``` 65 | redis> TICKET 66 | 2 67 | redis> TICKET 68 | 3 69 | redis> TICKET 70 | 4 71 | redis> TICKET 72 | 5 73 | ``` 74 | 75 | ## Pipelining and Client Multiplexing 76 | 77 | ticketd supports pipelining commands, which means you can put multiple TICKET 78 | commands into a single network packet. This is a performance enhancement and 79 | will multiply the speed of applying new tickets by the number of tickets per 80 | packet. 81 | 82 | Client multiplexing is also supported, which means the server will read as many 83 | TICKET commands from all connected clients as possible and apply them as a 84 | group. This can be a big performance enhancement on a multi-core server with 85 | lots of concurrently connected clients. 86 | 87 | ## Other Commands 88 | 89 | ``` 90 | PING 91 | QUIT 92 | RAFT.ADDVOTER id addr 93 | RAFT.CONFIGURATION 94 | RAFT.LASTCONTACT 95 | RAFT.LEADER 96 | RAFT.REMOVESERVER id addr 97 | RAFT.SNAPSHOT 98 | RAFT.STATS 99 | ``` 100 | 101 | ## Durability 102 | 103 | By default ticketd is highly durable, ensuring every command is applied to 104 | disk and fsynced. You can optionally choose to one of three durability levels 105 | (high,medium,low) when the server starts up. For example: 106 | 107 | ``` 108 | ticketd --id 1 --saddr :11001 --raddr :12001 --durability low 109 | ``` 110 | 111 | 112 | ## Contact 113 | Josh Baker [@tidwall](http://twitter.com/tidwall) 114 | 115 | ## License 116 | ticketd source code is available under the MIT [License](/LICENSE). 117 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= 2 | github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= 3 | github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= 4 | github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= 5 | github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= 6 | github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= 7 | github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= 8 | github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= 9 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 10 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 11 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 12 | github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0= 13 | github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= 14 | github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= 15 | github.com/hashicorp/go-hclog v0.9.1 h1:9PZfAcVEvez4yhLH2TBU64/h/z4xlFI80cWXRrxuKuM= 16 | github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= 17 | github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= 18 | github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= 19 | github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= 20 | github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= 21 | github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= 22 | github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= 23 | github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= 24 | github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= 25 | github.com/hashicorp/raft v1.1.1 h1:HJr7UE1x/JrJSc9Oy6aDBHtNHUUBHjcQjTgvUVihoZs= 26 | github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= 27 | github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea h1:xykPFhrBAS2J0VBzVa5e80b5ZtYuNQtgXjN40qBZlD4= 28 | github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= 29 | github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= 30 | github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= 31 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 32 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 33 | github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= 34 | github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= 35 | github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= 36 | github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= 37 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 38 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 39 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 40 | github.com/tidwall/raft-jss v0.0.0-20200101220329-7b83f95eebdb h1:pq6NO53VtL5xergySdVUng6iTW0ASfwiFZt4edhGmIs= 41 | github.com/tidwall/raft-jss v0.0.0-20200101220329-7b83f95eebdb/go.mod h1:dAAbGmg4caB+C5ogIBMxVTwIW8rG6kONv91AGKTRZgU= 42 | github.com/tidwall/raft-wal v0.0.0-20200101005921-09d0cde31951 h1:iiZKWAcLTM3KoG4iw4VyfzU24uTZNNWVyJIQMhCUrwk= 43 | github.com/tidwall/raft-wal v0.0.0-20200101005921-09d0cde31951/go.mod h1:ux3wRx/EPiT4bH5BXRUd5B0JzwaRfh/lvXyZGkNCviA= 44 | github.com/tidwall/raft-wal v0.0.0-20200101222402-f79abb78a4f6 h1:cp55LbXYt5yh++T9dx/b28d3NfFrJlOs4yk1Cal2NgA= 45 | github.com/tidwall/raft-wal v0.0.0-20200101222402-f79abb78a4f6/go.mod h1:ux3wRx/EPiT4bH5BXRUd5B0JzwaRfh/lvXyZGkNCviA= 46 | github.com/tidwall/redcon v1.0.1 h1:CbIzXnU5R0v3xQYLEdhB/nfPSHHQvRWJZ4F6KLHWSeA= 47 | github.com/tidwall/redcon v1.0.1/go.mod h1:bdYBm4rlcWpst2XMwKVzWDF9CoUxEbUmM7CQrKeOZas= 48 | github.com/tidwall/redlog v0.0.0-20191219133640-9febe8549085 h1:ZBgpTC9k+HhBvlL2mENgzHO89eFI7l8AgvhWPDzyNuY= 49 | github.com/tidwall/redlog v0.0.0-20191219133640-9febe8549085/go.mod h1:NssoNA+Uwqd5WHKkVwAzO7AT6VuG3wiC8r5nBqds3Ao= 50 | github.com/tidwall/wal v0.0.0-20191231235127-b55521ca5b04 h1:HPyL0XEIf9Q98xNF3rukkaIA/z1K/PMU8jN16yd7xBg= 51 | github.com/tidwall/wal v0.0.0-20191231235127-b55521ca5b04/go.mod h1:bo/wBA1457lN345+EXAKn7vj4hzK7bE5yWBAkCdfyL4= 52 | github.com/tidwall/wal v0.0.0-20200101222051-d8500e1f916a h1:GI6uSySR+twVDRPd5YWF5rE6bb9GRnuJ7pHzVzTGym4= 53 | github.com/tidwall/wal v0.0.0-20200101222051-d8500e1f916a/go.mod h1:bo/wBA1457lN345+EXAKn7vj4hzK7bE5yWBAkCdfyL4= 54 | github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= 55 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 56 | golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= 57 | golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 58 | golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 59 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 60 | golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 61 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 62 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 63 | golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5 h1:sM3evRHxE/1RuMe1FYAL3j7C7fUfIjkbE+NiDAYUF8U= 64 | golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 65 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 66 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/binary" 5 | "errors" 6 | "flag" 7 | "fmt" 8 | "io" 9 | "io/ioutil" 10 | "net" 11 | "os" 12 | "path/filepath" 13 | "sort" 14 | "strconv" 15 | "strings" 16 | "time" 17 | 18 | "github.com/gomodule/redigo/redis" 19 | "github.com/hashicorp/raft" 20 | raftjss "github.com/tidwall/raft-jss" 21 | raftwal "github.com/tidwall/raft-wal" 22 | "github.com/tidwall/redcon" 23 | "github.com/tidwall/redlog" 24 | ) 25 | 26 | const deadline = 10 * time.Second // network deadline 27 | var log *redlog.Logger 28 | 29 | func main() { 30 | nodeID := "1" // node id 31 | sbindAddr := ":11001" // resp service bind addr 32 | rbindAddr := ":12001" // raft transport bind addr 33 | joinAddr := "" // address of existing node in cluster 34 | dir := "data" // data directory 35 | dur := "high" // log durability 36 | 37 | flag.StringVar(&nodeID, "id", nodeID, "Node id") 38 | flag.StringVar(&sbindAddr, "saddr", sbindAddr, "Service bind address") 39 | flag.StringVar(&rbindAddr, "raddr", rbindAddr, "Raft bind address") 40 | flag.StringVar(&joinAddr, "join", joinAddr, "Join existing Raft cluster.") 41 | flag.StringVar(&dir, "dir", dir, "Data directory") 42 | flag.StringVar(&dur, "durability", dur, "Durability (high,medium,low)") 43 | flag.Parse() 44 | 45 | advertise := rbindAddr 46 | logOutput := os.Stderr // output for logger 47 | 48 | // Create a unified logger and filter the hashicorp raft logs 49 | log = redlog.New(logOutput) 50 | log.SetFilter(redlog.HashicorpRaftFilter) 51 | log.SetIgnoreDups(true) 52 | log.SetLevel(0) 53 | 54 | durability := raftwal.High 55 | switch dur { 56 | default: 57 | log.Fatalf("Invalid durability: %s", dur) 58 | case "low": 59 | durability = raftwal.Low 60 | case "medium": 61 | durability = raftwal.Medium 62 | case "high": 63 | durability = raftwal.High 64 | } 65 | 66 | // Create shared data directory 67 | if err := os.MkdirAll(filepath.Join(dir, nodeID), 0777); err != nil { 68 | log.Fatal(err) 69 | } 70 | 71 | // Open the Raft log storage, which is a write ahead log that is used to 72 | // keep the data in the cluster consistent. 73 | logs, err := raftwal.Open(filepath.Join(dir, nodeID, "log"), durability) 74 | if err != nil { 75 | log.Fatal(err) 76 | } 77 | if durability < raftwal.High { 78 | // Performs fsyncs in the background every 200 milliseconds when the 79 | // durability is not set to High. This ensures that log data will not 80 | // stay in memory for too long. 81 | go func() { 82 | for range time.NewTicker(time.Second / 5).C { 83 | logs.Sync() 84 | } 85 | }() 86 | } 87 | 88 | // Open the Raft stable storage, which is used to maintain the state of 89 | // the Raft cluster. 90 | stable, err := raftjss.Open(filepath.Join(dir, nodeID, "stable.json")) 91 | if err != nil { 92 | log.Fatal(err) 93 | } 94 | // Open the Raft snapshot storage, this is used to keep the size of the 95 | // log small by hanging onto a snapshot of the data a specific point. 96 | snaps, err := raft.NewFileSnapshotStore(filepath.Join(dir, nodeID), 1, log) 97 | if err != nil { 98 | log.Fatal(err) 99 | } 100 | 101 | // Create the Raft network transport for communicating with other nodes in 102 | // the cluster. 103 | taddr, err := net.ResolveTCPAddr("tcp", advertise) 104 | if err != nil { 105 | log.Fatal(err) 106 | } 107 | trans, err := raft.NewTCPTransport(rbindAddr, taddr, 8, deadline, log) 108 | if err != nil { 109 | log.Fatal(err) 110 | } 111 | 112 | // Create a new finite state machine object, which handles the ticket data 113 | // and applying that data to the Raft cluster. 114 | m := new(machine) 115 | 116 | // Start the Raft machine and bind it to all transport, fsm, and storage. 117 | conf := raft.DefaultConfig() 118 | conf.LocalID = raft.ServerID(nodeID) 119 | conf.LogOutput = log 120 | ra, err := raft.NewRaft(conf, m, logs, stable, snaps, trans) 121 | if err != nil { 122 | log.Fatal(err) 123 | } 124 | 125 | // Get the current Raft cluster configuration for determining whether this 126 | // server needs to bootstrap a new cluster, or join/re-join an existing 127 | // cluster. 128 | cfg := ra.GetConfiguration() 129 | if err := cfg.Error(); err != nil { 130 | log.Fatalf("Could not get Raft configuration: %v", err) 131 | } 132 | servers := cfg.Configuration().Servers 133 | if len(servers) == 0 { 134 | // Empty configuration. Either bootstrap or join an existing cluster. 135 | if joinAddr == "" { 136 | // No '-join' flag provided. 137 | // Bootstrap new cluster. 138 | log.Noticef("Bootstrapping new cluster") 139 | var configuration raft.Configuration 140 | configuration.Servers = []raft.Server{ 141 | raft.Server{ 142 | ID: raft.ServerID(nodeID), 143 | Address: raft.ServerAddress(advertise), 144 | }, 145 | } 146 | err = ra.BootstrapCluster(configuration).Error() 147 | if err != nil && err != raft.ErrCantBootstrap { 148 | log.Fatal(err) 149 | } 150 | } else { 151 | // Joining an existing cluster 152 | log.Noticef("Joining existing cluster at %v", joinAddr) 153 | conn, err := redis.Dial("tcp", joinAddr) 154 | if err != nil { 155 | log.Fatal(err) 156 | } 157 | defer conn.Close() 158 | res, err := redis.String(conn.Do("raft.addvoter", 159 | nodeID, advertise)) 160 | if err != nil { 161 | log.Fatal(err) 162 | } 163 | if res != "OK" { 164 | log.Fatalf("Expected 'OK', got '%s'", res) 165 | } 166 | } 167 | } else if joinAddr != "" { 168 | log.Debugf( 169 | "Ignoring Join request. Server already belongs to a cluster.") 170 | } 171 | 172 | // Start the Resp service 173 | startService(ra, sbindAddr) 174 | } 175 | 176 | // machine represents a Raft finite-state machine. 177 | type machine struct { 178 | ticket uint64 // monotonically growing ticket 179 | } 180 | 181 | // Apply a log to the machine. There's only one possible thing that can be 182 | // done, which is update the ticket by a delta value. 183 | func (m *machine) Apply(l *raft.Log) interface{} { 184 | delta, n := binary.Uvarint(l.Data) 185 | if n != len(l.Data) { 186 | return errors.New("invalid data") 187 | } 188 | m.ticket += delta 189 | return m.ticket 190 | } 191 | 192 | // Restore from a snapshot. The only thing stored in a snapshot is the 193 | // last known ticket in plain-text. 194 | func (m *machine) Restore(rc io.ReadCloser) error { 195 | data, err := ioutil.ReadAll(rc) 196 | if err != nil { 197 | return err 198 | } 199 | ticket, err := strconv.ParseUint(string(data), 10, 64) 200 | if err != nil { 201 | return err 202 | } 203 | m.ticket = ticket 204 | return nil 205 | } 206 | 207 | // Snapshot returns a snapshot with the ticket. 208 | func (m *machine) Snapshot() (raft.FSMSnapshot, error) { 209 | return &snapshot{m.ticket}, nil 210 | } 211 | 212 | // snapshot represents a Raft snapshot. 213 | type snapshot struct { 214 | ticket uint64 215 | } 216 | 217 | // Persist the snapshot to the Raft snapshot storage. This is simply the ticket 218 | // in plain-text. 219 | func (s *snapshot) Persist(sink raft.SnapshotSink) error { 220 | _, err := sink.Write([]byte(strconv.FormatUint(s.ticket, 10))) 221 | if err != nil { 222 | if err2 := sink.Cancel(); err2 != nil { 223 | return fmt.Errorf( 224 | "Sink cancel failed: %v, sink write failed: %v", err2, err) 225 | } 226 | return err 227 | } 228 | return sink.Close() 229 | } 230 | func (s *snapshot) Release() { 231 | // snapshot has no resources, such as file handles, to release. 232 | } 233 | 234 | // cmds are is the resp command table 235 | var cmds = map[string]func(s *service, conn redcon.Conn, cmd redcon.Command){ 236 | // Standard commands 237 | "ping": cmdPING, 238 | "quit": cmdQUIT, 239 | 240 | // Ticket commands 241 | "ticket": cmdTICKET, 242 | 243 | // Various raft commands 244 | "raft.addvoter": cmdRAFTADDVOTER, 245 | "raft.configuration": cmdRAFTCONFIGURATION, 246 | "raft.lastcontact": cmdRAFTLASTCONTACT, 247 | "raft.leader": cmdRAFTLEADER, 248 | "raft.removeserver": cmdRAFTREMOVESERVER, 249 | "raft.snapshot": cmdRAFTSNAPSHOT, 250 | "raft.stats": cmdRAFTSTATS, 251 | } 252 | 253 | // service is an application-wide state object representing everything needed 254 | // to manage the server 255 | type service struct { 256 | raft *raft.Raft 257 | ticketC chan *ticketR 258 | } 259 | 260 | // ticketR represents a client ticket request. It may request one or more 261 | // tickets and the wait channel receives the starting ticket or an error. 262 | type ticketR struct { 263 | count uint64 // number of needed tickets 264 | wait chan interface{} // wait for starting ticket 265 | } 266 | 267 | func startService(ra *raft.Raft, addr string) { 268 | // We are up and running. Now create a server context that'll manage the 269 | // the clients applying commands. 270 | s := &service{ 271 | raft: ra, 272 | ticketC: make(chan *ticketR), 273 | } 274 | 275 | // Start the ticket applier background routine. 276 | go ticketApplier(s) 277 | 278 | // Start client resp server 279 | handler := func(conn redcon.Conn, cmd redcon.Command) { 280 | fn := cmds[strings.ToLower(string(cmd.Args[0]))] 281 | if fn == nil { 282 | conn.WriteError("ERR unknown command '" + string(cmd.Args[0]) + "'") 283 | } else { 284 | fn(s, conn, cmd) 285 | } 286 | } 287 | saddr, err := net.ResolveTCPAddr("tcp", addr) 288 | if err != nil { 289 | log.Fatal(err) 290 | } 291 | rsvr := redcon.NewServer(saddr.String(), handler, nil, nil) 292 | sig := make(chan error) 293 | go func() { 294 | if <-sig == nil { 295 | log.Printf("Service started %v", rsvr.Addr()) 296 | } 297 | }() 298 | if err := rsvr.ListenServeAndSignal(sig); err != nil { 299 | log.Fatal(err) 300 | } 301 | } 302 | 303 | // ticketApplier is a background routine that is responsible for gathering and 304 | // applying new ticket requests. 305 | func ticketApplier(s *service) { 306 | var data [10]byte // stores uintvars 307 | var reqs []*ticketR // incoming ticket requests. 308 | for { 309 | var totalCount uint64 310 | v := <-s.ticketC 311 | totalCount += v.count 312 | reqs = append(reqs[:0], v) 313 | var done bool 314 | for !done { 315 | select { 316 | case v := <-s.ticketC: 317 | totalCount += v.count 318 | reqs = append(reqs, v) 319 | default: 320 | done = true 321 | } 322 | } 323 | n := binary.PutUvarint(data[:], totalCount) 324 | f := s.raft.Apply(data[:n], deadline) 325 | err := f.Error() 326 | if err != nil { 327 | // Something bad happened. Notify all the callers. 328 | for _, req := range reqs { 329 | req.wait <- err 330 | } 331 | } else { 332 | // Ticket update applied. 333 | ticket := f.Response().(uint64) 334 | for _, req := range reqs { 335 | ticket -= req.count 336 | req.wait <- ticket + 1 337 | } 338 | } 339 | } 340 | } 341 | 342 | //////////////////////////////////////////////////////////////////////////////// 343 | // Below are the service commands. 344 | // 345 | // Since this service uses the Redis protocol, also known as RESP, most any 346 | // Redis compatible library can be used, including the `redis-cli`. 347 | //////////////////////////////////////////////////////////////////////////////// 348 | 349 | // TICKET 350 | func cmdTICKET(s *service, conn redcon.Conn, cmd redcon.Command) { 351 | if len(cmd.Args) != 1 { 352 | conn.WriteError("ERR wrong number of arguments") 353 | return 354 | } 355 | var req *ticketR 356 | v := conn.Context() 357 | if v == nil { 358 | req = &ticketR{wait: make(chan interface{})} 359 | conn.SetContext(req) 360 | } else { 361 | req = v.(*ticketR) 362 | } 363 | req.count = 1 364 | pl := conn.PeekPipeline() 365 | if len(pl) > 0 { 366 | // The request belongs to a packet that has multiple commands, 367 | // also known a pipelined packet. This is good because it means 368 | // that we can bulk Apply() to the log. But all commands must 369 | // be a valid TICKET command in order to do so. If not, then we 370 | // fall back to one at a time. 371 | valid := true 372 | for _, cmd := range pl { 373 | if len(cmd.Args) != 1 || 374 | strings.ToLower(string(cmd.Args[0])) != "ticket" { 375 | valid = false 376 | break 377 | } 378 | } 379 | if valid { 380 | // Read the commands from the pipeline and increment the 381 | // request count. 382 | conn.ReadPipeline() 383 | req.count += uint64(len(pl)) 384 | } 385 | } 386 | // We'll send each request to the central receiver which will 387 | // attempt to gather multiple tickets and apply them all at once. 388 | s.ticketC <- req 389 | switch v := (<-req.wait).(type) { 390 | case uint64: 391 | // received a new ticket 392 | for i := uint64(0); i < req.count; i++ { 393 | // Write as a string for compatibilty for large uint64 values. 394 | conn.WriteString(strconv.FormatUint(v+i, 10)) 395 | } 396 | case error: 397 | // received an error 398 | for i := uint64(0); i < req.count; i++ { 399 | conn.WriteError("ERR " + v.Error()) 400 | } 401 | } 402 | } 403 | 404 | // PING 405 | func cmdPING(s *service, conn redcon.Conn, cmd redcon.Command) { 406 | conn.WriteString("PONG") 407 | } 408 | 409 | // QUIT 410 | func cmdQUIT(s *service, conn redcon.Conn, cmd redcon.Command) { 411 | conn.WriteString("OK") 412 | conn.Close() 413 | } 414 | 415 | // RAFT.REMOVESERVER node-id [prev-index] 416 | func cmdRAFTREMOVESERVER(s *service, conn redcon.Conn, cmd redcon.Command) { 417 | var prevIndex uint64 418 | switch len(cmd.Args) { 419 | default: 420 | conn.WriteError("ERR wrong number of arguments") 421 | return 422 | case 3: 423 | case 4: 424 | n, err := strconv.ParseUint(string(cmd.Args[3]), 10, 64) 425 | if err != nil { 426 | conn.WriteError("ERR syntax error") 427 | return 428 | } 429 | prevIndex = n 430 | } 431 | err := s.raft.RemoveServer( 432 | raft.ServerID(string(cmd.Args[1])), 433 | prevIndex, 0, 434 | ).Error() 435 | if err != nil { 436 | conn.WriteError("ERR " + err.Error()) 437 | return 438 | } 439 | conn.WriteString("OK") 440 | } 441 | 442 | // RAFT.ADDVOTER node-id server-addr [prev-index] 443 | func cmdRAFTADDVOTER(s *service, conn redcon.Conn, cmd redcon.Command) { 444 | var prevIndex uint64 445 | switch len(cmd.Args) { 446 | default: 447 | conn.WriteError("ERR wrong number of arguments") 448 | return 449 | case 3: 450 | case 4: 451 | n, err := strconv.ParseUint(string(cmd.Args[3]), 10, 64) 452 | if err != nil { 453 | conn.WriteError("ERR syntax error") 454 | return 455 | } 456 | prevIndex = n 457 | } 458 | err := s.raft.AddVoter( 459 | raft.ServerID(string(cmd.Args[1])), 460 | raft.ServerAddress(string(cmd.Args[2])), 461 | prevIndex, 0, 462 | ).Error() 463 | if err != nil { 464 | conn.WriteError("ERR " + err.Error()) 465 | return 466 | } 467 | conn.WriteString("OK") 468 | } 469 | 470 | // RAFT.LEADER 471 | // Returns the current leader 472 | func cmdRAFTLEADER(s *service, conn redcon.Conn, cmd redcon.Command) { 473 | conn.WriteString(string(s.raft.Leader())) 474 | } 475 | 476 | // RAFT.STATS 477 | func cmdRAFTSTATS(s *service, conn redcon.Conn, cmd redcon.Command) { 478 | m := s.raft.Stats() 479 | keys := make([]string, 0, len(m)) 480 | for k := range m { 481 | keys = append(keys, k) 482 | } 483 | sort.Strings(keys) 484 | conn.WriteArray(len(keys) * 2) 485 | for _, k := range keys { 486 | conn.WriteBulkString(k) 487 | conn.WriteBulkString(m[k]) 488 | } 489 | } 490 | 491 | // RAFT.LASTCONTACT 492 | func cmdRAFTLASTCONTACT(s *service, conn redcon.Conn, cmd redcon.Command) { 493 | conn.WriteBulkString(s.raft.LastContact().String()) 494 | } 495 | 496 | // RAFT.SNAPSHOT 497 | func cmdRAFTSNAPSHOT(s *service, conn redcon.Conn, cmd redcon.Command) { 498 | err := s.raft.Snapshot().Error() 499 | if err != nil { 500 | conn.WriteError("ERR " + err.Error()) 501 | return 502 | } 503 | conn.WriteString("OK") 504 | } 505 | 506 | // RAFT.CONFIGURATION 507 | func cmdRAFTCONFIGURATION(s *service, conn redcon.Conn, cmd redcon.Command) { 508 | fcfg := s.raft.GetConfiguration() 509 | if err := fcfg.Error(); err != nil { 510 | fmt.Println("ERR " + err.Error()) 511 | return 512 | } 513 | cfg := fcfg.Configuration() 514 | conn.WriteArray(len(cfg.Servers)) 515 | for _, svr := range cfg.Servers { 516 | conn.WriteArray(6) 517 | conn.WriteBulkString("id") 518 | conn.WriteBulkString(string(svr.ID)) 519 | conn.WriteBulkString("address") 520 | conn.WriteBulkString(string(svr.Address)) 521 | conn.WriteBulkString("suffrage") 522 | conn.WriteBulkString(fmt.Sprint(svr.Suffrage)) 523 | } 524 | } 525 | --------------------------------------------------------------------------------