├── .circleci └── config.yml ├── .gitignore ├── CLUSTERING.md ├── LICENSE ├── README.md ├── appveyor.yml ├── go.mod ├── go.sum ├── http ├── service.go └── service_test.go ├── main.go └── store ├── store.go └── store_test.go /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | jobs: 3 | build: 4 | working_directory: ~/hraftd/src/github.com/otoolep/hraftd 5 | docker: 6 | - image: cimg/go:1.22.0 7 | resource_class: large 8 | 9 | steps: 10 | - checkout 11 | - run: go version 12 | - run: go get -t -d -v ./... 13 | - run: go vet . 14 | - run: go test -timeout 60s -v ./... 15 | - run: 16 | command: go test -race -timeout 120s -v ./... 17 | environment: 18 | GORACE: "halt_on_error=1" 19 | 20 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 2 | *.o 3 | *.a 4 | *.so 5 | # executables 6 | 7 | hraftd 8 | **/hraftd 9 | 10 | # Folders 11 | _obj 12 | _test 13 | 14 | # Architecture specific extensions/prefixes 15 | *.[568vq] 16 | [568vq].out 17 | 18 | *.cgo1.go 19 | *.cgo2.c 20 | _cgo_defun.c 21 | _cgo_gotypes.go 22 | _cgo_export.* 23 | 24 | _testmain.go 25 | 26 | *.exe 27 | *.test 28 | *.prof 29 | -------------------------------------------------------------------------------- /CLUSTERING.md: -------------------------------------------------------------------------------- 1 | # Multi-node Clustering 2 | What follows is a detailed example of running a multi-node hraftd cluster. 3 | 4 | Imagine you have 3 machines, with the IP addresses 192.168.0.1, 192.168.0.2, and 192.168.0.3 respectively. Let's also assume that each machine can reach the other two machines using these addresses. 5 | 6 | ## Walkthrough 7 | You should start the first node like so: 8 | ``` 9 | $GOPATH/bin/hraftd -id node1 -haddr 192.168.0.1:11000 -raddr 192.168.0.1:12000 ~/node 10 | ``` 11 | This way the node is listening on an address reachable from the other nodes. This node will start up and become leader of a single-node cluster. 12 | 13 | Next, start the second node as follows: 14 | ``` 15 | $GOPATH/bin/hraftd -id node2 -haddr 192.168.0.2:11000 -raddr 192.168.0.2:12000 -join 192.168.0.1:11000 ~/node 16 | ``` 17 | 18 | Finally, start the third node as follows: 19 | ``` 20 | $GOPATH/bin/hraftd -id node3 -haddr 192.168.0.3:11000 -raddr 192.168.0.3:12000 -join 192.168.0.2:11000 ~/node 21 | ``` 22 | 23 | _Specifically using ports 11000 and 12000 is not required. You can use other ports if you wish._ 24 | 25 | Note how each node listens on its own address, but joins to the address of the leader node. The second and third nodes will start, join the with leader at `192.168.0.2:11000`, and a 3-node cluster will be formed. 26 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015-2025 Philip O'Toole 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | hraftd 2 | ====== 3 | [![Circle CI](https://circleci.com/gh/otoolep/hraftd/tree/master.svg?style=svg)](https://circleci.com/gh/otoolep/hraftd/tree/master) 4 | [![AppVeyor](https://ci.appveyor.com/api/projects/status/github/otoolep/hraftd?branch=master&svg=true)](https://ci.appveyor.com/project/otoolep/hraftd) 5 | [![Go Reference](https://pkg.go.dev/badge/github.com/otoolep/hraftd.svg)](https://pkg.go.dev/github.com/otoolep/hraftd) 6 | [![Go Report Card](https://goreportcard.com/badge/github.com/otoolep/hraftd)](https://goreportcard.com/report/github.com/otoolep/hraftd) 7 | 8 | _For background on this project check out this [blog post](http://www.philipotoole.com/building-a-distributed-key-value-store-using-raft/)._ 9 | 10 | _You should also check out the GopherCon2023 talk "Build Your Own Distributed System Using Go" ([video](https://www.youtube.com/watch?v=8XbxQ1Epi5w), [slides](https://www.philipotoole.com/gophercon2023)), which explains step-by-step how to use the Hashicorp Raft library._ 11 | 12 | ## What is hraftd? 13 | hraftd is a reference example use of the [Hashicorp Raft implementation](https://github.com/hashicorp/raft). [Raft](https://raft.github.io/) is a _distributed consensus protocol_, meaning its purpose is to ensure that a set of nodes -- a cluster -- agree on the state of some arbitrary state machine, even when nodes are vulnerable to failure and network partitions. Distributed consensus is a fundamental concept when it comes to building fault-tolerant systems. 14 | 15 | A simple example system like hraftd makes it easy to study the Raft consensus protocol in general, and Hashicorp's Raft implementation in particular. It can be run on Linux, macOS, and Windows. 16 | 17 | ## Reading and writing keys 18 | The reference implementation is a very simple in-memory key-value store. You can set a key by sending a request to the HTTP bind address (which defaults to `localhost:11000`): 19 | ```bash 20 | curl -XPOST localhost:11000/key -d '{"foo": "bar"}' 21 | ``` 22 | 23 | You can read the value for a key like so: 24 | ```bash 25 | curl -XGET localhost:11000/key/foo 26 | ``` 27 | 28 | ## Running hraftd 29 | *Building hraftd requires Go 1.20 or later. [gvm](https://github.com/moovweb/gvm) is a great tool for installing and managing your versions of Go.* 30 | 31 | Starting and running a hraftd cluster is easy. Download and build hraftd like so: 32 | ```bash 33 | mkdir work # or any directory you like 34 | cd work 35 | export GOPATH=$PWD 36 | mkdir -p src/github.com/otoolep 37 | cd src/github.com/otoolep/ 38 | git clone git@github.com:otoolep/hraftd.git 39 | cd hraftd 40 | go install 41 | ``` 42 | 43 | Run your first hraftd node like so: 44 | ```bash 45 | $GOPATH/bin/hraftd -id node0 ~/node0 46 | ``` 47 | 48 | You can now set a key and read its value back: 49 | ```bash 50 | curl -XPOST localhost:11000/key -d '{"user1": "batman"}' 51 | curl -XGET localhost:11000/key/user1 52 | ``` 53 | 54 | ### Bring up a cluster 55 | _A walkthrough of setting up a more realistic cluster is [here](https://github.com/otoolep/hraftd/blob/master/CLUSTERING.md)._ 56 | 57 | Let's bring up 2 more nodes, so we have a 3-node cluster. That way we can tolerate the failure of 1 node: 58 | ```bash 59 | $GOPATH/bin/hraftd -id node1 -haddr localhost:11001 -raddr localhost:12001 -join :11000 ~/node1 60 | $GOPATH/bin/hraftd -id node2 -haddr localhost:11002 -raddr localhost:12002 -join :11000 ~/node2 61 | ``` 62 | _This example shows each hraftd node running on the same host, so each node must listen on different ports. This would not be necessary if each node ran on a different host._ 63 | 64 | This tells each new node to join the existing node. Once joined, each node now knows about the key: 65 | ```bash 66 | curl -XGET localhost:11000/key/user1 67 | curl -XGET localhost:11001/key/user1 68 | curl -XGET localhost:11002/key/user1 69 | ``` 70 | 71 | Furthermore you can add a second key: 72 | ```bash 73 | curl -XPOST localhost:11000/key -d '{"user2": "robin"}' 74 | ``` 75 | 76 | Confirm that the new key has been set like so: 77 | ```bash 78 | curl -XGET localhost:11000/key/user2 79 | curl -XGET localhost:11001/key/user2 80 | curl -XGET localhost:11002/key/user2 81 | ``` 82 | 83 | #### Stale reads 84 | Because any node will answer a GET request, and nodes may "fall behind" updates, stale reads are possible. Again, hraftd is a simple program, for the purpose of demonstrating a distributed key-value store. If you are particularly interested in learning more about issue, you should check out [rqlite](https://rqlite.io/). rqlite allows the client to control [read consistency](https://rqlite.io/docs/api/read-consistency/), allowing the client to trade off read-responsiveness and correctness. 85 | 86 | Read-consistency support could be ported to hraftd if necessary. 87 | 88 | ### Tolerating failure 89 | Kill the leader process and watch one of the other nodes be elected leader. The keys are still available for query on the other nodes, and you can set keys on the new leader. Furthermore, when the first node is restarted, it will rejoin the cluster and learn about any updates that occurred while it was down. 90 | 91 | A 3-node cluster can tolerate the failure of a single node, but a 5-node cluster can tolerate the failure of two nodes. But 5-node clusters require that the leader contact a larger number of nodes before any change e.g. setting a key's value, can be considered committed. 92 | 93 | ### Leader-forwarding 94 | Automatically forwarding requests to set keys to the current leader is not implemented. The client must always send requests to change a key to the leader or an error will be returned. 95 | 96 | ## Production use of Raft 97 | For a production-grade example of using Hashicorp's Raft implementation, to replicate a SQLite database, check out [rqlite](https://github.com/rqlite/rqlite). 98 | -------------------------------------------------------------------------------- /appveyor.yml: -------------------------------------------------------------------------------- 1 | os: Visual Studio 2019 2 | 3 | stack: go 1.22 4 | 5 | version: 1.{build} 6 | 7 | platform: x64 8 | 9 | clone_folder: c:\gopath\src\github.com\otoolep\hraftd 10 | 11 | environment: 12 | GOPATH: c:\gopath 13 | 14 | install: 15 | - set BUILD_ENV=gnu 16 | - if %BUILD_ENV%==gnu set PATH=C:\msys64\mingw64\bin;C:\msys64\usr\bin\;%PATH% 17 | - if %BUILD_ENV%==gnu set MSYSTEM=MINGW64 18 | - if %BUILD_ENV%==gnu set MSYS=winsymlinks=lnk 19 | 20 | build_script: 21 | - go get ./... 22 | - go test -v ./... 23 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/otoolep/hraftd 2 | 3 | go 1.20 4 | 5 | require ( 6 | github.com/hashicorp/raft v1.7.0 7 | github.com/hashicorp/raft-boltdb/v2 v2.3.0 8 | ) 9 | 10 | require ( 11 | github.com/armon/go-metrics v0.4.1 // indirect 12 | github.com/boltdb/bolt v1.3.1 // indirect 13 | github.com/fatih/color v1.17.0 // indirect 14 | github.com/hashicorp/go-hclog v1.6.3 // indirect 15 | github.com/hashicorp/go-immutable-radix v1.3.1 // indirect 16 | github.com/hashicorp/go-msgpack/v2 v2.1.2 // indirect 17 | github.com/hashicorp/golang-lru v1.0.2 // indirect 18 | github.com/mattn/go-colorable v0.1.13 // indirect 19 | github.com/mattn/go-isatty v0.0.20 // indirect 20 | go.etcd.io/bbolt v1.3.10 // indirect 21 | golang.org/x/sys v0.21.0 // indirect 22 | ) 23 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= 2 | github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= 3 | github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= 4 | github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= 5 | github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= 6 | github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= 7 | github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= 8 | github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= 9 | github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= 10 | github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= 11 | github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= 12 | github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= 13 | github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 14 | github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= 15 | github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= 16 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 17 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 18 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 19 | github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= 20 | github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= 21 | github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= 22 | github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= 23 | github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= 24 | github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= 25 | github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= 26 | github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= 27 | github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= 28 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 29 | github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 30 | github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 31 | github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 32 | github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 33 | github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= 34 | github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= 35 | github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= 36 | github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= 37 | github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= 38 | github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= 39 | github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= 40 | github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= 41 | github.com/hashicorp/go-msgpack/v2 v2.1.2 h1:4Ee8FTp834e+ewB71RDrQ0VKpyFdrKOjvYtnQ/ltVj0= 42 | github.com/hashicorp/go-msgpack/v2 v2.1.2/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= 43 | github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= 44 | github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= 45 | github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= 46 | github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= 47 | github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= 48 | github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= 49 | github.com/hashicorp/raft v1.7.0 h1:4u24Qn6lQ6uwziM++UgsyiT64Q8GyRn43CV41qPiz1o= 50 | github.com/hashicorp/raft v1.7.0/go.mod h1:N1sKh6Vn47mrWvEArQgILTyng8GoDRNYlgKyK7PMjs0= 51 | github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702 h1:RLKEcCuKcZ+qp2VlaaZsYZfLOmIiuJNpEi48Rl8u9cQ= 52 | github.com/hashicorp/raft-boltdb/v2 v2.3.0 h1:fPpQR1iGEVYjZ2OELvUHX600VAK5qmdnDEv3eXOwZUA= 53 | github.com/hashicorp/raft-boltdb/v2 v2.3.0/go.mod h1:YHukhB04ChJsLHLJEUD6vjFyLX2L3dsX3wPBZcX4tmc= 54 | github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= 55 | github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= 56 | github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= 57 | github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= 58 | github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= 59 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 60 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 61 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 62 | github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= 63 | github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= 64 | github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= 65 | github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= 66 | github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= 67 | github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= 68 | github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= 69 | github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= 70 | github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= 71 | github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= 72 | github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 73 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 74 | github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= 75 | github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= 76 | github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= 77 | github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= 78 | github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= 79 | github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 80 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 81 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 82 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 83 | github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= 84 | github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= 85 | github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= 86 | github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= 87 | github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= 88 | github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= 89 | github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= 90 | github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= 91 | github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= 92 | github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= 93 | github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= 94 | github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= 95 | github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= 96 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 97 | github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 98 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 99 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 100 | github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= 101 | github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= 102 | github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= 103 | github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= 104 | go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= 105 | go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= 106 | golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= 107 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 108 | golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 109 | golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 110 | golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 111 | golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 112 | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 113 | golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= 114 | golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 115 | golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 116 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 117 | golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 118 | golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 119 | golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 120 | golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 121 | golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 122 | golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 123 | golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 124 | golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 125 | golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 126 | golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= 127 | golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 128 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 129 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 130 | gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= 131 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 132 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 133 | gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 134 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 135 | gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 136 | gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 137 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 138 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 139 | -------------------------------------------------------------------------------- /http/service.go: -------------------------------------------------------------------------------- 1 | // Package httpd provides the HTTP server for accessing the distributed key-value store. 2 | // It also provides the endpoint for other nodes to join an existing cluster. 3 | package httpd 4 | 5 | import ( 6 | "encoding/json" 7 | "io" 8 | "log" 9 | "net" 10 | "net/http" 11 | "strings" 12 | 13 | store "github.com/otoolep/hraftd/store" 14 | ) 15 | 16 | // Store is the interface Raft-backed key-value stores must implement. 17 | type Store interface { 18 | // Get returns the value for the given key. 19 | Get(key string) (string, error) 20 | 21 | // Set sets the value for the given key, via distributed consensus. 22 | Set(key, value string) error 23 | 24 | // Delete removes the given key, via distributed consensus. 25 | Delete(key string) error 26 | 27 | // Join joins the node, identitifed by nodeID and reachable at addr, to the cluster. 28 | Join(nodeID string, addr string) error 29 | 30 | // Show who is me, the leader, and followers 31 | Status() (store.StoreStatus, error) 32 | } 33 | 34 | // Service provides HTTP service. 35 | type Service struct { 36 | addr string 37 | ln net.Listener 38 | 39 | store Store 40 | } 41 | 42 | // New returns an uninitialized HTTP service. 43 | func New(addr string, store Store) *Service { 44 | return &Service{ 45 | addr: addr, 46 | store: store, 47 | } 48 | } 49 | 50 | // Start starts the service. 51 | func (s *Service) Start() error { 52 | server := http.Server{ 53 | Handler: s, 54 | } 55 | 56 | ln, err := net.Listen("tcp", s.addr) 57 | if err != nil { 58 | return err 59 | } 60 | s.ln = ln 61 | 62 | http.Handle("/", s) 63 | 64 | go func() { 65 | err := server.Serve(s.ln) 66 | if err != nil { 67 | log.Fatalf("HTTP serve: %s", err) 68 | } 69 | }() 70 | 71 | return nil 72 | } 73 | 74 | // Close closes the service. 75 | func (s *Service) Close() { 76 | s.ln.Close() 77 | return 78 | } 79 | 80 | // ServeHTTP allows Service to serve HTTP requests. 81 | func (s *Service) ServeHTTP(w http.ResponseWriter, r *http.Request) { 82 | if strings.HasPrefix(r.URL.Path, "/key") { 83 | s.handleKeyRequest(w, r) 84 | } else if r.URL.Path == "/join" { 85 | s.handleJoin(w, r) 86 | } else if r.URL.Path == "/status" { 87 | s.handleStatus(w, r) 88 | } else { 89 | w.WriteHeader(http.StatusNotFound) 90 | } 91 | } 92 | 93 | func (s *Service) handleJoin(w http.ResponseWriter, r *http.Request) { 94 | m := map[string]string{} 95 | if err := json.NewDecoder(r.Body).Decode(&m); err != nil { 96 | w.WriteHeader(http.StatusBadRequest) 97 | return 98 | } 99 | 100 | if len(m) != 2 { 101 | w.WriteHeader(http.StatusBadRequest) 102 | return 103 | } 104 | 105 | remoteAddr, ok := m["addr"] 106 | if !ok { 107 | w.WriteHeader(http.StatusBadRequest) 108 | return 109 | } 110 | 111 | nodeID, ok := m["id"] 112 | if !ok { 113 | w.WriteHeader(http.StatusBadRequest) 114 | return 115 | } 116 | 117 | if err := s.store.Join(nodeID, remoteAddr); err != nil { 118 | w.WriteHeader(http.StatusInternalServerError) 119 | return 120 | } 121 | } 122 | 123 | func (s *Service) handleStatus(w http.ResponseWriter, r *http.Request) { 124 | if r.Method != "GET" { 125 | w.WriteHeader(http.StatusMethodNotAllowed) 126 | } 127 | 128 | status, err := s.store.Status() 129 | if err != nil { 130 | http.Error(w, err.Error(), http.StatusInternalServerError) 131 | return 132 | } 133 | // Set the Content-Type header to application/json 134 | w.Header().Set("Content-Type", "application/json") 135 | 136 | // Encode the response struct to JSON 137 | statusJson, err := json.Marshal(status) 138 | if err != nil { 139 | http.Error(w, err.Error(), http.StatusInternalServerError) 140 | return 141 | } 142 | 143 | // write it to the response writer 144 | _, err = w.Write(statusJson) 145 | if err != nil { 146 | http.Error(w, err.Error(), http.StatusInternalServerError) 147 | return 148 | } 149 | } 150 | 151 | func (s *Service) handleKeyRequest(w http.ResponseWriter, r *http.Request) { 152 | getKey := func() string { 153 | parts := strings.Split(r.URL.Path, "/") 154 | if len(parts) != 3 { 155 | return "" 156 | } 157 | return parts[2] 158 | } 159 | 160 | switch r.Method { 161 | case "GET": 162 | k := getKey() 163 | if k == "" { 164 | w.WriteHeader(http.StatusBadRequest) 165 | } 166 | v, err := s.store.Get(k) 167 | if err != nil { 168 | w.WriteHeader(http.StatusInternalServerError) 169 | return 170 | } 171 | 172 | b, err := json.Marshal(map[string]string{k: v}) 173 | if err != nil { 174 | w.WriteHeader(http.StatusInternalServerError) 175 | return 176 | } 177 | io.WriteString(w, string(b)) 178 | 179 | case "POST": 180 | // Read the value from the POST body. 181 | m := map[string]string{} 182 | if err := json.NewDecoder(r.Body).Decode(&m); err != nil { 183 | w.WriteHeader(http.StatusBadRequest) 184 | return 185 | } 186 | for k, v := range m { 187 | if err := s.store.Set(k, v); err != nil { 188 | w.WriteHeader(http.StatusInternalServerError) 189 | return 190 | } 191 | } 192 | 193 | case "DELETE": 194 | k := getKey() 195 | if k == "" { 196 | w.WriteHeader(http.StatusBadRequest) 197 | return 198 | } 199 | if err := s.store.Delete(k); err != nil { 200 | w.WriteHeader(http.StatusInternalServerError) 201 | return 202 | } 203 | 204 | default: 205 | w.WriteHeader(http.StatusMethodNotAllowed) 206 | } 207 | return 208 | } 209 | 210 | // Addr returns the address on which the Service is listening 211 | func (s *Service) Addr() net.Addr { 212 | return s.ln.Addr() 213 | } 214 | -------------------------------------------------------------------------------- /http/service_test.go: -------------------------------------------------------------------------------- 1 | package httpd 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "io/ioutil" 8 | "net/http" 9 | "net/url" 10 | "strings" 11 | "testing" 12 | 13 | store "github.com/otoolep/hraftd/store" 14 | ) 15 | 16 | // Test_NewServer tests that a server can perform all basic operations. 17 | func Test_NewServer(t *testing.T) { 18 | store := newTestStore() 19 | s := &testServer{New(":0", store)} 20 | if s == nil { 21 | t.Fatal("failed to create HTTP service") 22 | } 23 | 24 | if err := s.Start(); err != nil { 25 | t.Fatalf("failed to start HTTP service: %s", err) 26 | } 27 | 28 | b := doGet(t, s.URL(), "k1") 29 | if string(b) != `{"k1":""}` { 30 | t.Fatalf("wrong value received for key k1: %s (expected empty string)", string(b)) 31 | } 32 | 33 | doPost(t, s.URL(), "k1", "v1") 34 | 35 | b = doGet(t, s.URL(), "k1") 36 | if string(b) != `{"k1":"v1"}` { 37 | t.Fatalf(`wrong value received for key k1: %s (expected "v1")`, string(b)) 38 | } 39 | 40 | store.m["k2"] = "v2" 41 | b = doGet(t, s.URL(), "k2") 42 | if string(b) != `{"k2":"v2"}` { 43 | t.Fatalf(`wrong value received for key k2: %s (expected "v2")`, string(b)) 44 | } 45 | 46 | doDelete(t, s.URL(), "k2") 47 | b = doGet(t, s.URL(), "k2") 48 | if string(b) != `{"k2":""}` { 49 | t.Fatalf(`wrong value received for key k2: %s (expected empty string)`, string(b)) 50 | } 51 | 52 | doStatus(t, s.URL()) 53 | } 54 | 55 | type testServer struct { 56 | *Service 57 | } 58 | 59 | func (t *testServer) URL() string { 60 | port := strings.TrimLeft(t.Addr().String(), "[::]:") 61 | return fmt.Sprintf("http://127.0.0.1:%s", port) 62 | } 63 | 64 | type testStore struct { 65 | m map[string]string 66 | } 67 | 68 | func newTestStore() *testStore { 69 | return &testStore{ 70 | m: make(map[string]string), 71 | } 72 | } 73 | 74 | func (t *testStore) Get(key string) (string, error) { 75 | return t.m[key], nil 76 | } 77 | 78 | func (t *testStore) Set(key, value string) error { 79 | t.m[key] = value 80 | return nil 81 | } 82 | 83 | func (t *testStore) Delete(key string) error { 84 | delete(t.m, key) 85 | return nil 86 | } 87 | 88 | func (t *testStore) Join(nodeID, addr string) error { 89 | return nil 90 | } 91 | 92 | func (t *testStore) Status() (store.StoreStatus, error) { 93 | return store.StoreStatus{ 94 | Me: store.Node{ 95 | ID: "01", 96 | Address: "127.0.0.1:1210", 97 | }, 98 | Leader: store.Node{ 99 | ID: "01", 100 | Address: "127.0.0.1:1210", 101 | }, 102 | Followers: []store.Node{}, 103 | }, nil 104 | } 105 | 106 | func doGet(t *testing.T, url, key string) string { 107 | resp, err := http.Get(fmt.Sprintf("%s/key/%s", url, key)) 108 | if err != nil { 109 | t.Fatalf("failed to GET key: %s", err) 110 | } 111 | defer resp.Body.Close() 112 | body, err := ioutil.ReadAll(resp.Body) 113 | if err != nil { 114 | t.Fatalf("failed to read response: %s", err) 115 | } 116 | return string(body) 117 | } 118 | 119 | func doPost(t *testing.T, url, key, value string) { 120 | b, err := json.Marshal(map[string]string{key: value}) 121 | if err != nil { 122 | t.Fatalf("failed to encode key and value for POST: %s", err) 123 | } 124 | resp, err := http.Post(fmt.Sprintf("%s/key", url), "application-type/json", bytes.NewReader(b)) 125 | if err != nil { 126 | t.Fatalf("POST request failed: %s", err) 127 | } 128 | defer resp.Body.Close() 129 | } 130 | 131 | func doDelete(t *testing.T, u, key string) { 132 | ru, err := url.Parse(fmt.Sprintf("%s/key/%s", u, key)) 133 | if err != nil { 134 | t.Fatalf("failed to parse URL for delete: %s", err) 135 | } 136 | req := &http.Request{ 137 | Method: "DELETE", 138 | URL: ru, 139 | } 140 | 141 | client := http.Client{} 142 | resp, err := client.Do(req) 143 | if err != nil { 144 | t.Fatalf("failed to GET key: %s", err) 145 | } 146 | defer resp.Body.Close() 147 | } 148 | 149 | func doStatus(t *testing.T, url string) { 150 | resp, err := http.Get(fmt.Sprintf("%s/status", url)) 151 | if err != nil { 152 | t.Fatalf("failed to fetch status: %s", err) 153 | } 154 | defer resp.Body.Close() 155 | body, err := ioutil.ReadAll(resp.Body) 156 | if err != nil { 157 | t.Fatalf("failed to read response: %s", err) 158 | } 159 | 160 | var status store.StoreStatus 161 | err = json.Unmarshal(body, &status) 162 | if err != nil { 163 | t.Fatalf("status is not a valid status json") 164 | } 165 | } 166 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "flag" 7 | "fmt" 8 | "log" 9 | "net/http" 10 | "os" 11 | "os/signal" 12 | 13 | httpd "github.com/otoolep/hraftd/http" 14 | "github.com/otoolep/hraftd/store" 15 | ) 16 | 17 | // Command line defaults 18 | const ( 19 | DefaultHTTPAddr = "localhost:11000" 20 | DefaultRaftAddr = "localhost:12000" 21 | ) 22 | 23 | // Command line parameters 24 | var inmem bool 25 | var httpAddr string 26 | var raftAddr string 27 | var joinAddr string 28 | var nodeID string 29 | 30 | func init() { 31 | flag.BoolVar(&inmem, "inmem", false, "Use in-memory storage for Raft") 32 | flag.StringVar(&httpAddr, "haddr", DefaultHTTPAddr, "Set the HTTP bind address") 33 | flag.StringVar(&raftAddr, "raddr", DefaultRaftAddr, "Set Raft bind address") 34 | flag.StringVar(&joinAddr, "join", "", "Set join address, if any") 35 | flag.StringVar(&nodeID, "id", "", "Node ID. If not set, same as Raft bind address") 36 | flag.Usage = func() { 37 | fmt.Fprintf(os.Stderr, "Usage: %s [options] \n", os.Args[0]) 38 | flag.PrintDefaults() 39 | } 40 | } 41 | 42 | func main() { 43 | flag.Parse() 44 | if flag.NArg() == 0 { 45 | fmt.Fprintf(os.Stderr, "No Raft storage directory specified\n") 46 | os.Exit(1) 47 | } 48 | 49 | if nodeID == "" { 50 | nodeID = raftAddr 51 | } 52 | 53 | // Ensure Raft storage exists. 54 | raftDir := flag.Arg(0) 55 | if raftDir == "" { 56 | log.Fatalln("No Raft storage directory specified") 57 | } 58 | if err := os.MkdirAll(raftDir, 0700); err != nil { 59 | log.Fatalf("failed to create path for Raft storage: %s", err.Error()) 60 | } 61 | 62 | s := store.New(inmem) 63 | s.RaftDir = raftDir 64 | s.RaftBind = raftAddr 65 | if err := s.Open(joinAddr == "", nodeID); err != nil { 66 | log.Fatalf("failed to open store: %s", err.Error()) 67 | } 68 | 69 | h := httpd.New(httpAddr, s) 70 | if err := h.Start(); err != nil { 71 | log.Fatalf("failed to start HTTP service: %s", err.Error()) 72 | } 73 | 74 | // If join was specified, make the join request. 75 | if joinAddr != "" { 76 | if err := join(joinAddr, raftAddr, nodeID); err != nil { 77 | log.Fatalf("failed to join node at %s: %s", joinAddr, err.Error()) 78 | } 79 | } 80 | 81 | // We're up and running! 82 | log.Printf("hraftd started successfully, listening on http://%s", httpAddr) 83 | 84 | terminate := make(chan os.Signal, 1) 85 | signal.Notify(terminate, os.Interrupt) 86 | <-terminate 87 | log.Println("hraftd exiting") 88 | } 89 | 90 | func join(joinAddr, raftAddr, nodeID string) error { 91 | b, err := json.Marshal(map[string]string{"addr": raftAddr, "id": nodeID}) 92 | if err != nil { 93 | return err 94 | } 95 | resp, err := http.Post(fmt.Sprintf("http://%s/join", joinAddr), "application-type/json", bytes.NewReader(b)) 96 | if err != nil { 97 | return err 98 | } 99 | defer resp.Body.Close() 100 | return nil 101 | } 102 | -------------------------------------------------------------------------------- /store/store.go: -------------------------------------------------------------------------------- 1 | // Package store provides a simple distributed key-value store. The keys and 2 | // associated values are changed via distributed consensus, meaning that the 3 | // values are changed only when a majority of nodes in the cluster agree on 4 | // the new value. 5 | // 6 | // Distributed consensus is provided via the Raft algorithm, specifically the 7 | // Hashicorp implementation. 8 | package store 9 | 10 | import ( 11 | "encoding/json" 12 | "fmt" 13 | "io" 14 | "log" 15 | "net" 16 | "os" 17 | "path/filepath" 18 | "sync" 19 | "time" 20 | 21 | "github.com/hashicorp/raft" 22 | raftboltdb "github.com/hashicorp/raft-boltdb/v2" 23 | ) 24 | 25 | const ( 26 | retainSnapshotCount = 2 27 | raftTimeout = 10 * time.Second 28 | ) 29 | 30 | type command struct { 31 | Op string `json:"op,omitempty"` 32 | Key string `json:"key,omitempty"` 33 | Value string `json:"value,omitempty"` 34 | } 35 | 36 | // Node represents a node in the cluster. 37 | type Node struct { 38 | ID string `json:"id"` 39 | Address string `json:"address"` 40 | } 41 | 42 | // StoreStatus is the Status a Store returns. 43 | type StoreStatus struct { 44 | Me Node `json:"me"` 45 | Leader Node `json:"leader"` 46 | Followers []Node `json:"followers"` 47 | } 48 | 49 | // Store is a simple key-value store, where all changes are made via Raft consensus. 50 | type Store struct { 51 | RaftDir string 52 | RaftBind string 53 | inmem bool 54 | 55 | mu sync.Mutex 56 | m map[string]string // The key-value store for the system. 57 | 58 | raft *raft.Raft // The consensus mechanism 59 | 60 | logger *log.Logger 61 | } 62 | 63 | // New returns a new Store. 64 | func New(inmem bool) *Store { 65 | return &Store{ 66 | m: make(map[string]string), 67 | inmem: inmem, 68 | logger: log.New(os.Stderr, "[store] ", log.LstdFlags), 69 | } 70 | } 71 | 72 | // Open opens the store. If enableSingle is set, and there are no existing peers, 73 | // then this node becomes the first node, and therefore leader, of the cluster. 74 | // localID should be the server identifier for this node. 75 | func (s *Store) Open(enableSingle bool, localID string) error { 76 | // Setup Raft configuration. 77 | config := raft.DefaultConfig() 78 | config.LocalID = raft.ServerID(localID) 79 | 80 | // Setup Raft communication. 81 | addr, err := net.ResolveTCPAddr("tcp", s.RaftBind) 82 | if err != nil { 83 | return err 84 | } 85 | transport, err := raft.NewTCPTransport(s.RaftBind, addr, 3, 10*time.Second, os.Stderr) 86 | if err != nil { 87 | return err 88 | } 89 | 90 | // Create the snapshot store. This allows the Raft to truncate the log. 91 | snapshots, err := raft.NewFileSnapshotStore(s.RaftDir, retainSnapshotCount, os.Stderr) 92 | if err != nil { 93 | return fmt.Errorf("file snapshot store: %s", err) 94 | } 95 | 96 | // Create the log store and stable store. 97 | var logStore raft.LogStore 98 | var stableStore raft.StableStore 99 | if s.inmem { 100 | logStore = raft.NewInmemStore() 101 | stableStore = raft.NewInmemStore() 102 | } else { 103 | boltDB, err := raftboltdb.New(raftboltdb.Options{ 104 | Path: filepath.Join(s.RaftDir, "raft.db"), 105 | }) 106 | if err != nil { 107 | return fmt.Errorf("new bbolt store: %s", err) 108 | } 109 | logStore = boltDB 110 | stableStore = boltDB 111 | } 112 | 113 | // Instantiate the Raft systems. 114 | ra, err := raft.NewRaft(config, (*fsm)(s), logStore, stableStore, snapshots, transport) 115 | if err != nil { 116 | return fmt.Errorf("new raft: %s", err) 117 | } 118 | s.raft = ra 119 | 120 | if enableSingle { 121 | configuration := raft.Configuration{ 122 | Servers: []raft.Server{ 123 | { 124 | ID: config.LocalID, 125 | Address: transport.LocalAddr(), 126 | }, 127 | }, 128 | } 129 | ra.BootstrapCluster(configuration) 130 | } 131 | 132 | return nil 133 | } 134 | 135 | // Get returns the value for the given key. 136 | func (s *Store) Get(key string) (string, error) { 137 | s.mu.Lock() 138 | defer s.mu.Unlock() 139 | return s.m[key], nil 140 | } 141 | 142 | // Set sets the value for the given key. 143 | func (s *Store) Set(key, value string) error { 144 | if s.raft.State() != raft.Leader { 145 | return fmt.Errorf("not leader") 146 | } 147 | 148 | c := &command{ 149 | Op: "set", 150 | Key: key, 151 | Value: value, 152 | } 153 | b, err := json.Marshal(c) 154 | if err != nil { 155 | return err 156 | } 157 | 158 | f := s.raft.Apply(b, raftTimeout) 159 | return f.Error() 160 | } 161 | 162 | // Delete deletes the given key. 163 | func (s *Store) Delete(key string) error { 164 | if s.raft.State() != raft.Leader { 165 | return fmt.Errorf("not leader") 166 | } 167 | 168 | c := &command{ 169 | Op: "delete", 170 | Key: key, 171 | } 172 | b, err := json.Marshal(c) 173 | if err != nil { 174 | return err 175 | } 176 | 177 | f := s.raft.Apply(b, raftTimeout) 178 | return f.Error() 179 | } 180 | 181 | // Join joins a node, identified by nodeID and located at addr, to this store. 182 | // The node must be ready to respond to Raft communications at that address. 183 | func (s *Store) Join(nodeID, addr string) error { 184 | s.logger.Printf("received join request for remote node %s at %s", nodeID, addr) 185 | 186 | configFuture := s.raft.GetConfiguration() 187 | if err := configFuture.Error(); err != nil { 188 | s.logger.Printf("failed to get raft configuration: %v", err) 189 | return err 190 | } 191 | 192 | for _, srv := range configFuture.Configuration().Servers { 193 | // If a node already exists with either the joining node's ID or address, 194 | // that node may need to be removed from the config first. 195 | if srv.ID == raft.ServerID(nodeID) || srv.Address == raft.ServerAddress(addr) { 196 | // However if *both* the ID and the address are the same, then nothing -- not even 197 | // a join operation -- is needed. 198 | if srv.Address == raft.ServerAddress(addr) && srv.ID == raft.ServerID(nodeID) { 199 | s.logger.Printf("node %s at %s already member of cluster, ignoring join request", nodeID, addr) 200 | return nil 201 | } 202 | 203 | future := s.raft.RemoveServer(srv.ID, 0, 0) 204 | if err := future.Error(); err != nil { 205 | return fmt.Errorf("error removing existing node %s at %s: %s", nodeID, addr, err) 206 | } 207 | } 208 | } 209 | 210 | f := s.raft.AddVoter(raft.ServerID(nodeID), raft.ServerAddress(addr), 0, 0) 211 | if f.Error() != nil { 212 | return f.Error() 213 | } 214 | s.logger.Printf("node %s at %s joined successfully", nodeID, addr) 215 | return nil 216 | } 217 | 218 | // Status returns information about the Store. 219 | func (s *Store) Status() (StoreStatus, error) { 220 | leaderServerAddr, leaderId := s.raft.LeaderWithID() 221 | leader := Node{ 222 | ID: string(leaderId), 223 | Address: string(leaderServerAddr), 224 | } 225 | 226 | servers := s.raft.GetConfiguration().Configuration().Servers 227 | followers := []Node{} 228 | me := Node{ 229 | Address: s.RaftBind, 230 | } 231 | for _, server := range servers { 232 | if server.ID != leaderId { 233 | followers = append(followers, Node{ 234 | ID: string(server.ID), 235 | Address: string(server.Address), 236 | }) 237 | } 238 | 239 | if string(server.Address) == s.RaftBind { 240 | me = Node{ 241 | ID: string(server.ID), 242 | Address: string(server.Address), 243 | } 244 | } 245 | } 246 | 247 | status := StoreStatus{ 248 | Me: me, 249 | Leader: leader, 250 | Followers: followers, 251 | } 252 | 253 | return status, nil 254 | } 255 | 256 | type fsm Store 257 | 258 | // Apply applies a Raft log entry to the key-value store. 259 | func (f *fsm) Apply(l *raft.Log) interface{} { 260 | var c command 261 | if err := json.Unmarshal(l.Data, &c); err != nil { 262 | panic(fmt.Sprintf("failed to unmarshal command: %s", err.Error())) 263 | } 264 | 265 | switch c.Op { 266 | case "set": 267 | return f.applySet(c.Key, c.Value) 268 | case "delete": 269 | return f.applyDelete(c.Key) 270 | default: 271 | panic(fmt.Sprintf("unrecognized command op: %s", c.Op)) 272 | } 273 | } 274 | 275 | // Snapshot returns a snapshot of the key-value store. 276 | func (f *fsm) Snapshot() (raft.FSMSnapshot, error) { 277 | f.mu.Lock() 278 | defer f.mu.Unlock() 279 | 280 | // Clone the map. 281 | o := make(map[string]string) 282 | for k, v := range f.m { 283 | o[k] = v 284 | } 285 | return &fsmSnapshot{store: o}, nil 286 | } 287 | 288 | // Restore stores the key-value store to a previous state. 289 | func (f *fsm) Restore(rc io.ReadCloser) error { 290 | o := make(map[string]string) 291 | if err := json.NewDecoder(rc).Decode(&o); err != nil { 292 | return err 293 | } 294 | 295 | // Set the state from the snapshot, no lock required according to 296 | // Hashicorp docs. 297 | f.m = o 298 | return nil 299 | } 300 | 301 | func (f *fsm) applySet(key, value string) interface{} { 302 | f.mu.Lock() 303 | defer f.mu.Unlock() 304 | f.m[key] = value 305 | return nil 306 | } 307 | 308 | func (f *fsm) applyDelete(key string) interface{} { 309 | f.mu.Lock() 310 | defer f.mu.Unlock() 311 | delete(f.m, key) 312 | return nil 313 | } 314 | 315 | type fsmSnapshot struct { 316 | store map[string]string 317 | } 318 | 319 | func (f *fsmSnapshot) Persist(sink raft.SnapshotSink) error { 320 | err := func() error { 321 | // Encode data. 322 | b, err := json.Marshal(f.store) 323 | if err != nil { 324 | return err 325 | } 326 | 327 | // Write data to sink. 328 | if _, err := sink.Write(b); err != nil { 329 | return err 330 | } 331 | 332 | // Close the sink. 333 | return sink.Close() 334 | }() 335 | 336 | if err != nil { 337 | sink.Cancel() 338 | } 339 | 340 | return err 341 | } 342 | 343 | func (f *fsmSnapshot) Release() {} 344 | -------------------------------------------------------------------------------- /store/store_test.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "io/ioutil" 5 | "os" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | // Test_StoreOpen tests that the store can be opened. 11 | func Test_StoreOpen(t *testing.T) { 12 | s := New(false) 13 | tmpDir, _ := ioutil.TempDir("", "store_test") 14 | defer os.RemoveAll(tmpDir) 15 | 16 | s.RaftBind = "127.0.0.1:0" 17 | s.RaftDir = tmpDir 18 | if s == nil { 19 | t.Fatalf("failed to create store") 20 | } 21 | 22 | if err := s.Open(false, "node0"); err != nil { 23 | t.Fatalf("failed to open store: %s", err) 24 | } 25 | } 26 | 27 | // Test_StoreOpenSingleNode tests that a command can be applied to the log 28 | func Test_StoreOpenSingleNode(t *testing.T) { 29 | s := New(false) 30 | tmpDir, _ := ioutil.TempDir("", "store_test") 31 | defer os.RemoveAll(tmpDir) 32 | 33 | s.RaftBind = "127.0.0.1:0" 34 | s.RaftDir = tmpDir 35 | if s == nil { 36 | t.Fatalf("failed to create store") 37 | } 38 | 39 | if err := s.Open(true, "node0"); err != nil { 40 | t.Fatalf("failed to open store: %s", err) 41 | } 42 | 43 | // Simple way to ensure there is a leader. 44 | time.Sleep(3 * time.Second) 45 | 46 | if err := s.Set("foo", "bar"); err != nil { 47 | t.Fatalf("failed to set key: %s", err.Error()) 48 | } 49 | 50 | // Wait for committed log entry to be applied. 51 | time.Sleep(500 * time.Millisecond) 52 | value, err := s.Get("foo") 53 | if err != nil { 54 | t.Fatalf("failed to get key: %s", err.Error()) 55 | } 56 | if value != "bar" { 57 | t.Fatalf("key has wrong value: %s", value) 58 | } 59 | 60 | if err := s.Delete("foo"); err != nil { 61 | t.Fatalf("failed to delete key: %s", err.Error()) 62 | } 63 | 64 | // Wait for committed log entry to be applied. 65 | time.Sleep(500 * time.Millisecond) 66 | value, err = s.Get("foo") 67 | if err != nil { 68 | t.Fatalf("failed to get key: %s", err.Error()) 69 | } 70 | if value != "" { 71 | t.Fatalf("key has wrong value: %s", value) 72 | } 73 | } 74 | 75 | // Test_StoreInMemOpenSingleNode tests that a command can be applied to the log 76 | // stored in RAM. 77 | func Test_StoreInMemOpenSingleNode(t *testing.T) { 78 | s := New(true) 79 | tmpDir, _ := ioutil.TempDir("", "store_test") 80 | defer os.RemoveAll(tmpDir) 81 | 82 | s.RaftBind = "127.0.0.1:0" 83 | s.RaftDir = tmpDir 84 | if s == nil { 85 | t.Fatalf("failed to create store") 86 | } 87 | 88 | if err := s.Open(true, "node0"); err != nil { 89 | t.Fatalf("failed to open store: %s", err) 90 | } 91 | 92 | // Simple way to ensure there is a leader. 93 | time.Sleep(3 * time.Second) 94 | 95 | if err := s.Set("foo", "bar"); err != nil { 96 | t.Fatalf("failed to set key: %s", err.Error()) 97 | } 98 | 99 | // Wait for committed log entry to be applied. 100 | time.Sleep(500 * time.Millisecond) 101 | value, err := s.Get("foo") 102 | if err != nil { 103 | t.Fatalf("failed to get key: %s", err.Error()) 104 | } 105 | if value != "bar" { 106 | t.Fatalf("key has wrong value: %s", value) 107 | } 108 | 109 | if err := s.Delete("foo"); err != nil { 110 | t.Fatalf("failed to delete key: %s", err.Error()) 111 | } 112 | 113 | // Wait for committed log entry to be applied. 114 | time.Sleep(500 * time.Millisecond) 115 | value, err = s.Get("foo") 116 | if err != nil { 117 | t.Fatalf("failed to get key: %s", err.Error()) 118 | } 119 | if value != "" { 120 | t.Fatalf("key has wrong value: %s", value) 121 | } 122 | } 123 | 124 | func Test_StoreStatus(t *testing.T) { 125 | s := New(false) 126 | tmpDir, _ := ioutil.TempDir("", "store_test") 127 | defer os.RemoveAll(tmpDir) 128 | 129 | s.RaftBind = "127.0.0.1:0" 130 | s.RaftDir = tmpDir 131 | if s == nil { 132 | t.Fatalf("failed to create store") 133 | } 134 | 135 | if err := s.Open(true, "node0"); err != nil { 136 | t.Fatalf("failed to open store: %s", err) 137 | } 138 | 139 | // assuming 3 seconds enough for raft to initialized 140 | time.Sleep(3 * time.Second) 141 | 142 | status, err := s.Status() 143 | if err != nil { 144 | t.Errorf("failed to get store status: %s", err) 145 | } 146 | 147 | if status.Me.ID != "node0" { 148 | t.Errorf("status `me.id` has invalid value") 149 | } 150 | if status.Me.Address != s.RaftBind { 151 | t.Errorf("status `me.address` has invalid value") 152 | } 153 | 154 | for _, follower := range status.Followers { 155 | if (follower.ID == status.Leader.ID) || (follower.Address == status.Leader.Address) { 156 | t.Fatalf("a node cannot be leader and follow at the same time") 157 | } 158 | } 159 | 160 | isMeInFollowersOrLeader := false 161 | for _, node := range append(status.Followers, status.Leader) { 162 | if node.ID == status.Me.ID && node.Address == status.Me.Address { 163 | isMeInFollowersOrLeader = true 164 | break 165 | } 166 | } 167 | if !isMeInFollowersOrLeader { 168 | t.Errorf("me must be exist exclusively as a leader or as a follower") 169 | } 170 | } 171 | --------------------------------------------------------------------------------