├── .gitignore
├── LICENSE
├── Makefile
├── README.md
├── buf.gen.yaml
├── buf.work.yaml
├── cmd
├── popcli
│ ├── app
│ │ ├── app.go
│ │ ├── cmd.go
│ │ └── interface.go
│ └── client.go
├── root.go
└── server
│ └── server.go
├── githooks
└── pre-commit
├── go.mod
├── go.sum
├── internal
├── conf
│ └── conf.go
├── constants
│ └── constants.go
├── proto
│ └── raft
│ │ └── v1
│ │ ├── raft.pb.go
│ │ └── raft_grpc.pb.go
├── raft
│ ├── consensus.go
│ ├── fortest.go
│ ├── raft_test.go
│ ├── server.go
│ └── storage.go
└── statemachine
│ ├── db.go
│ └── interface.go
├── logo.png
├── main.go
├── openapi
└── raft
│ └── v1
│ └── raft.swagger.json
├── proto
├── buf.lock
├── buf.yaml
└── raft
│ └── v1
│ └── raft.proto
└── scripts
├── grpc.sh
└── init.sh
/.gitignore:
--------------------------------------------------------------------------------
1 | # IDEs and editors
2 | /.idea
3 | .project
4 | .classpath
5 | .c9/
6 | *.launch
7 | .settings/
8 | *.sublime-workspace
9 |
10 | # IDE - VSCode
11 | .vscode/*
12 |
13 | # System Files
14 | .DS_Store
15 | Thumbs.db
16 | /.run
17 |
18 | # Binary file
19 | bin
20 | vendor
21 |
22 | # Tmp file
23 | gl-code-quality-report.json
24 |
25 | # Sonarqube file
26 | .scannerwork
27 |
28 | # fsm data
29 | /data
30 |
31 | # raft log
32 | /rlog
33 |
34 | # a toml config file
35 | /config.toml
36 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 hoorayman
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | OS = Linux
2 | VERSION = 0.0.1
3 | ROOT_PACKAGE=github.com/hoorayman/popple
4 |
5 | CURDIR = $(shell pwd)
6 | SOURCEDIR = $(CURDIR)
7 | COVER = $($3)
8 |
9 | ECHO = echo
10 | RM = rm -rf
11 | MKDIR = mkdir
12 |
13 | CLIENT = github.com/hoorayman/popple/cmd/popcli
14 |
15 | .PHONY: test grpc
16 |
17 | default: test lint vet
18 |
19 | test:
20 | go test -v ./...
21 |
22 | race:
23 | go test -cover=true -race $(PACKAGES)
24 |
25 | # http://golang.org/cmd/go/#hdr-Run_gofmt_on_package_sources
26 | fmt:
27 | go fmt ./...
28 |
29 | # https://godoc.org/golang.org/x/tools/cmd/goimports
30 | imports:
31 | goimports -e -d -w -local $(ROOT_PACKAGE) ./
32 |
33 | # https://github.com/golang/lint
34 | # go get github.com/golang/lint/golint
35 | lint:
36 | golint ./...
37 |
38 | # http://godoc.org/code.google.com/p/go.tools/cmd/vet
39 | # go get code.google.com/p/go.tools/cmd/vet
40 | vet:
41 | go vet ./...
42 |
43 | tidy:
44 | go mod tidy
45 |
46 | all: test
47 |
48 | grpc:
49 | sh ./scripts/grpc.sh
50 |
51 | #grpc-go:
52 | # bash ./scripts/grpc-go.sh
53 | #
54 |
55 | init:
56 | bash ./scripts/init.sh
57 |
58 | BUILD_PATH = $(shell if [ "$(CI_DEST_DIR)" != "" ]; then echo "$(CI_DEST_DIR)" ; else echo "$(PWD)"; fi)
59 |
60 | build:
61 | @$(ECHO) "Will build on "$(BUILD_PATH)
62 | CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -ldflags "-w -s" -v -o $(BUILD_PATH)/bin/${MODULE} $(ROOT_PACKAGE)
63 |
64 | client:
65 | @$(ECHO) "Will build on "$(BUILD_PATH)
66 | CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -ldflags "-w -s" -v -o $(BUILD_PATH)/bin/${MODULE} ${CLIENT}
67 |
68 | help:
69 | @$(ECHO) "Targets:"
70 | @$(ECHO) "all - test"
71 | @$(ECHO) "test - run all unit tests"
72 | @$(ECHO) "race - run all unit tests in race condition"
73 | @$(ECHO) "fmt - run go fmt command to format code"
74 | @$(ECHO) "lint - run go lint command to check code style"
75 | @$(ECHO) "vet - run go vet command to check code errors"
76 | @$(ECHO) "build - build and exports using CI_DEST_DIR"
77 | @$(ECHO) "grpc - run 'grpc-gen-pb' and 'grpc-go-mock'"
78 | @$(ECHO) "init - init the project"
79 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | Popple
4 |
5 |
6 | [](https://github.com/hoorayman/popple/blob/main/LICENSE)
7 |
8 | Popple is a distributed, highly available, general purpose key/value database.
9 |
10 | Features
11 | ========
12 |
13 | - In-memory database for fast reads and writes using b-tree
14 | - Embeddable with a simple HTTP API
15 | - ACID semantics with locking transactions that support rollbacks
16 | - A flexible key/value store enables storing dynamic data, like redis sds
17 | - Ensure high availability and consistency using the Raft protocol
18 | - Simple to use command line or optional configuration file
19 | - Grpc and http serve the same port
20 |
21 | Quick Start
22 | ===============
23 |
24 | ## Installing
25 |
26 | To start using Popple, install Go and run `make build` in root source folder.
27 | This will generate an executable file named popple in ./bin folder.
28 | You can move it to any where in your `PATH`.
29 |
30 | ## Usage
31 |
32 | Run `popple --help` or `popple server --help` to view the usage. For example:
33 |
34 | ```sh
35 | Start a Popple server
36 |
37 | Usage:
38 | popple server [flags]
39 |
40 | Flags:
41 | --cluster string Define cluster servers. The format is sid:address pairs, comma separation. For example, "0=127.0.0.1:8876,1=192.168.0.2:8889,2=192.168.0.56:8080"
42 | -c, --config string Path to a configuration file.
43 | --data-dir string Path to raft log and database file dir. (default "./")
44 | --dev Enable development mode. In this mode, Popple runs in-memory and starts. As the name implies, do not run "dev" mode in production. The default is false.
45 | -h, --help help for server
46 | --sid int Current Server ID. default is 0
47 | ```
48 |
49 | The command line arguments are simple and straightforward.
50 | For example: To run a single node dev mode server, you just run `popple server --dev`. The server will select a random port to serve. You can get the port from console:
51 |
52 | ```sh
53 | 2023/02/21 11:17:55 DevMode: true
54 | 2023/02/21 11:17:55 server[0] listening at [::]:36967
55 |
56 | ...
57 | ```
58 |
59 | ## Testing
60 |
61 | Run `make test` in root source folder to view high availability on conditions like leader down, network partition...And so forth.
62 |
63 | How to contact?
64 | ===============
65 |
66 | - Mail: hoorayman@126.com
67 | - 今日头条: 浩仔浩仔
68 |
69 | Contribute
70 | ===============
71 |
72 | For Popple, if you have any questions, welcome to issue, or you can also directly start Pull Requests. Thank you for your interest in contributing!
73 |
--------------------------------------------------------------------------------
/buf.gen.yaml:
--------------------------------------------------------------------------------
1 | version: v1
2 | plugins:
3 | - name: go
4 | out: internal/proto
5 | opt:
6 | - paths=source_relative
7 | - name: go-grpc
8 | out: internal/proto
9 | opt:
10 | - paths=source_relative
11 | - require_unimplemented_servers=false
12 | - name: grpc-gateway
13 | out: internal/proto
14 | opt:
15 | - paths=source_relative
16 | - name: openapiv2
17 | out: openapi
18 | opt:
19 | - json_names_for_fields=false
20 |
--------------------------------------------------------------------------------
/buf.work.yaml:
--------------------------------------------------------------------------------
1 | version: v1
2 | directories:
3 | - proto
4 |
--------------------------------------------------------------------------------
/cmd/popcli/app/app.go:
--------------------------------------------------------------------------------
1 | package app
2 |
3 | import (
4 | "fmt"
5 | "os"
6 |
7 | "github.com/spf13/cobra"
8 | )
9 |
10 | var rootCmd = &cobra.Command{
11 | Use: "popcli",
12 | }
13 |
14 | func Execute() {
15 | if err := rootCmd.Execute(); err != nil {
16 | fmt.Println(err)
17 | os.Exit(1)
18 | }
19 | }
20 |
21 | func init() {
22 | rootCmd.AddCommand(getCmd, setCmd, delCmd)
23 | }
24 |
--------------------------------------------------------------------------------
/cmd/popcli/app/cmd.go:
--------------------------------------------------------------------------------
1 | package app
2 |
3 | import (
4 | "fmt"
5 | "io/ioutil"
6 | "math/rand"
7 | "strings"
8 | "time"
9 |
10 | "github.com/go-resty/resty/v2"
11 | "github.com/hoorayman/popple/internal/statemachine"
12 | "github.com/spf13/cobra"
13 | )
14 |
15 | const (
16 | opPath = "/submit"
17 | fetchPath = "/fetch/%s"
18 | defaultScheme = "http://"
19 | tmpfile = "/tmp/.popleader"
20 | )
21 |
22 | func init() {
23 | getCmd.Flags().StringVarP(&servers, "servers", "s", "", `Remote server addresses. The format is address pairs, comma separation. For example, "127.0.0.1:8876,192.168.0.2:8889,192.168.0.56:8080"`)
24 | setCmd.Flags().StringVarP(&servers, "servers", "s", "", `Remote server addresses. The format is address pairs, comma separation. For example, "127.0.0.1:8876,192.168.0.2:8889,192.168.0.56:8080"`)
25 | delCmd.Flags().StringVarP(&servers, "servers", "s", "", `Remote server addresses. The format is address pairs, comma separation. For example, "127.0.0.1:8876,192.168.0.2:8889,192.168.0.56:8080"`)
26 | getCmd.MarkFlagRequired("servers")
27 | setCmd.MarkFlagRequired("servers")
28 | delCmd.MarkFlagRequired("servers")
29 | }
30 |
31 | var servers string
32 | var getCmd = &cobra.Command{
33 | Use: "get",
34 | Short: "Get a key from Popple cluster",
35 | Args: cobra.ExactArgs(1),
36 | RunE: func(cmd *cobra.Command, args []string) error {
37 | cli := NewClient(servers)
38 | val, err := cli.FetchKey(args[0])
39 | if err != nil {
40 | return err
41 | }
42 | fmt.Println(val)
43 |
44 | return nil
45 | },
46 | }
47 |
48 | var setCmd = &cobra.Command{
49 | Use: "set",
50 | Short: "Set a key to Popple cluster",
51 | Args: cobra.ExactArgs(2),
52 | RunE: func(cmd *cobra.Command, args []string) error {
53 | cli := NewClient(servers)
54 | res, err := cli.SetKey(args[0], args[1])
55 | if err != nil {
56 | return err
57 | }
58 | fmt.Println(res)
59 |
60 | return nil
61 | },
62 | }
63 |
64 | var delCmd = &cobra.Command{
65 | Use: "del",
66 | Short: "Delete a key from Popple cluster",
67 | Args: cobra.ExactArgs(1),
68 | RunE: func(cmd *cobra.Command, args []string) error {
69 | cli := NewClient(servers)
70 | res, err := cli.DelKey(args[0])
71 | if err != nil {
72 | return err
73 | }
74 | fmt.Println(res)
75 |
76 | return nil
77 | },
78 | }
79 |
80 | type client struct {
81 | servers string
82 | }
83 |
84 | func NewClient(servers string) Client {
85 | return &client{
86 | servers: servers,
87 | }
88 | }
89 |
90 | func (cli *client) FetchKey(key string) (string, error) {
91 | svrs := strings.Split(cli.servers, ",")
92 | remote := svrs[rand.Intn(len(svrs))]
93 |
94 | rclient := resty.New()
95 | resp, err := rclient.R().
96 | Get(defaultScheme + remote + fmt.Sprintf(fetchPath, key))
97 | if err != nil {
98 | return "", err
99 | }
100 |
101 | return string(resp.Body()), nil
102 | }
103 |
104 | type opResult struct {
105 | Code int `json:"code"`
106 | Msg string `json:"msg"`
107 | Err error `json:"-"`
108 | StatusCode int `json:"-"`
109 | Result string `json:"-"`
110 | Remote string `json:"-"`
111 | }
112 |
113 | func (cli *client) SetKey(key, val string) (string, error) {
114 | return cli.op("set", key, val)
115 | }
116 |
117 | func (cli *client) DelKey(key string) (string, error) {
118 | return cli.op("del", key, "")
119 | }
120 |
121 | func (cli *client) op(op, key, val string) (string, error) {
122 | svrs := strings.Split(cli.servers, ",")
123 | content, _ := ioutil.ReadFile(tmpfile)
124 | remote := string(content)
125 | rclient := resty.New()
126 | request := statemachine.Cmd{
127 | Op: op,
128 | Key: key,
129 | Value: val,
130 | }
131 |
132 | inCluster := false
133 | for _, svr := range svrs {
134 | if remote == svr {
135 | inCluster = true
136 | break
137 | }
138 | }
139 | if inCluster {
140 | firstTry := opResult{}
141 | resp, err := rclient.R().
142 | SetHeader("Accept", "application/json").
143 | SetBody(request).
144 | SetResult(&firstTry).
145 | Post(defaultScheme + remote + opPath)
146 | if err == nil && resp.StatusCode() == 200 && firstTry.Code == 0 {
147 | return string(resp.Body()), nil
148 | }
149 | }
150 |
151 | signals := []chan interface{}{}
152 | for _, svr := range svrs {
153 | signals = append(signals, func() chan interface{} {
154 | result := make(chan interface{})
155 |
156 | go func(remote string) {
157 | defer close(result)
158 |
159 | try := opResult{}
160 | resp, err := rclient.R().
161 | SetHeader("Accept", "application/json").
162 | SetBody(request).
163 | SetResult(&try).
164 | Post(defaultScheme + remote + opPath)
165 | try.Err = err
166 | try.Remote = remote
167 | if resp != nil {
168 | try.StatusCode = resp.StatusCode()
169 | try.Result = string(resp.Body())
170 | }
171 |
172 | result <- try
173 | }(svr)
174 |
175 | return result
176 | }())
177 | }
178 |
179 | siganal := orChannel(signals...)
180 | timeout := time.After(10 * time.Second)
181 | Loop:
182 | for {
183 | select {
184 | case sig, ok := <-siganal:
185 | if !ok {
186 | break Loop
187 | }
188 | res := sig.(opResult)
189 | if res.Err == nil && res.StatusCode == 200 && res.Code == 0 {
190 | ioutil.WriteFile(tmpfile, []byte(res.Remote), 0644)
191 | return res.Result, nil
192 | }
193 | case <-timeout:
194 | break Loop
195 | }
196 | }
197 |
198 | return "", fmt.Errorf("not accepted")
199 | }
200 |
201 | func orChannel(in ...chan interface{}) chan interface{} {
202 | if len(in) == 0 {
203 | return nil
204 | }
205 | if len(in) == 1 {
206 | return in[0]
207 | }
208 |
209 | result := make(chan interface{})
210 | go func() {
211 | defer close(result)
212 | c1, c2 := in[0], orChannel(in[1:]...)
213 |
214 | for {
215 | select {
216 | case v, ok := <-c1:
217 | if !ok {
218 | c1 = nil
219 | } else {
220 | result <- v
221 | }
222 | case v, ok := <-c2:
223 | if !ok {
224 | c2 = nil
225 | } else {
226 | result <- v
227 | }
228 | }
229 | if c1 == nil && c2 == nil {
230 | break
231 | }
232 | }
233 | }()
234 |
235 | return result
236 | }
237 |
--------------------------------------------------------------------------------
/cmd/popcli/app/interface.go:
--------------------------------------------------------------------------------
1 | package app
2 |
3 | type Client interface {
4 | FetchKey(key string) (string, error)
5 | SetKey(key, val string) (string, error)
6 | DelKey(key string) (string, error)
7 | }
8 |
--------------------------------------------------------------------------------
/cmd/popcli/client.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "math/rand"
5 | "time"
6 |
7 | "github.com/hoorayman/popple/cmd/popcli/app"
8 | )
9 |
10 | func main() {
11 | rand.Seed(time.Now().UnixNano())
12 | app.Execute()
13 | }
14 |
--------------------------------------------------------------------------------
/cmd/root.go:
--------------------------------------------------------------------------------
1 | // Package cmd is the package for all commands
2 | package cmd
3 |
4 | import (
5 | "fmt"
6 | "os"
7 |
8 | "github.com/spf13/cobra"
9 |
10 | "github.com/hoorayman/popple/cmd/server"
11 | )
12 |
13 | var rootCmd = &cobra.Command{
14 | Use: "popple",
15 | }
16 |
17 | func Execute() {
18 | if err := rootCmd.Execute(); err != nil {
19 | fmt.Println(err)
20 | os.Exit(1)
21 | }
22 | }
23 |
24 | func init() {
25 | rootCmd.AddCommand(server.ServerCmd)
26 | }
27 |
--------------------------------------------------------------------------------
/cmd/server/server.go:
--------------------------------------------------------------------------------
1 | package server
2 |
3 | import (
4 | "log"
5 |
6 | "github.com/spf13/cobra"
7 |
8 | "github.com/hoorayman/popple/internal/conf"
9 | "github.com/hoorayman/popple/internal/raft"
10 | )
11 |
12 | func init() {
13 | // make flags
14 | ServerCmd.Flags().Bool("dev", false, `Enable development mode. In this mode, Popple runs in-memory and starts. As the name implies, do not run "dev" mode in production. The default is false.`)
15 | ServerCmd.Flags().StringP("config", "c", "", "Path to a configuration file.")
16 | ServerCmd.Flags().StringP("data-dir", "", "./", "Path to raft log and database file dir.")
17 | ServerCmd.Flags().Int64P("sid", "", 0, "Current Server ID. default is 0")
18 | ServerCmd.Flags().StringP("cluster", "", "", `Define cluster servers. The format is sid:address pairs, comma separation. For example, "0=127.0.0.1:8876,1=192.168.0.2:8889,2=192.168.0.56:8080"`)
19 | // make config
20 | conf.BindPFlag("dev", ServerCmd.Flags().Lookup("dev"))
21 | conf.BindPFlag("config", ServerCmd.Flags().Lookup("config"))
22 | conf.BindPFlag("data-dir", ServerCmd.Flags().Lookup("data-dir"))
23 | conf.BindPFlag("sid", ServerCmd.Flags().Lookup("sid"))
24 | conf.BindPFlag("cluster", ServerCmd.Flags().Lookup("cluster"))
25 | }
26 |
27 | var ServerCmd = &cobra.Command{
28 | Use: "server",
29 | Short: "Start a Popple server",
30 | RunE: func(cmd *cobra.Command, args []string) error {
31 | conf.InitConfig(conf.SetEnv(), conf.SetConfigFile(conf.GetString("config")))
32 | if conf.GetBool("dev") {
33 | log.Print("DevMode: ", conf.GetBool("dev"), conf.GetString("config"))
34 | }
35 | ready := make(chan struct{})
36 | server := raft.NewServer(conf.GetInt64("sid"), conf.GetString("cluster"), ready)
37 | go func() {
38 | server.WaitConnectToPeers()
39 | close(ready)
40 | }()
41 | server.Serve()
42 |
43 | return nil
44 | },
45 | }
46 |
--------------------------------------------------------------------------------
/githooks/pre-commit:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | go mod tidy
4 | buf lint
5 | buf breaking --against .git
6 | make fmt
7 | make imports
8 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/hoorayman/popple
2 |
3 | go 1.16
4 |
5 | require (
6 | github.com/fxamacker/cbor/v2 v2.4.0
7 | github.com/gin-gonic/gin v1.9.0
8 | github.com/go-playground/validator/v10 v10.11.2
9 | github.com/go-resty/resty/v2 v2.7.0
10 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0
11 | github.com/soheilhy/cmux v0.1.5
12 | github.com/spf13/cobra v1.6.1
13 | github.com/spf13/pflag v1.0.5
14 | github.com/spf13/viper v1.15.0
15 | github.com/tidwall/buntdb v1.2.10
16 | golang.org/x/sync v0.1.0
17 | golang.org/x/sys v0.5.0
18 | google.golang.org/grpc v1.52.3
19 | google.golang.org/protobuf v1.28.1
20 | )
21 |
--------------------------------------------------------------------------------
/internal/conf/conf.go:
--------------------------------------------------------------------------------
1 | // Package conf provide config
2 | package conf
3 |
4 | import (
5 | "log"
6 |
7 | "time"
8 |
9 | "github.com/go-playground/validator/v10"
10 |
11 | "github.com/spf13/pflag"
12 | "github.com/spf13/viper"
13 | )
14 |
15 | var c *Conf
16 |
17 | func init() {
18 | c = NewDefault()
19 | }
20 |
21 | type Option func(*Conf)
22 |
23 | func SetConfigFile(file string) Option {
24 | return func(c *Conf) {
25 | if file != "" {
26 | c.file = file
27 | c.SetConfigFile(c.file)
28 | err := c.ReadInConfig()
29 | if err != nil {
30 | log.Fatal(err)
31 | }
32 | }
33 | }
34 | }
35 |
36 | func SetEnv() Option {
37 | return func(c *Conf) {
38 | c.AutomaticEnv()
39 | }
40 | }
41 |
42 | type Conf struct {
43 | *viper.Viper
44 | file string
45 | validate *validator.Validate
46 | }
47 |
48 | func NewDefault() *Conf {
49 | return New(viper.GetViper())
50 | }
51 |
52 | func New(v *viper.Viper, opts ...Option) *Conf {
53 | c := &Conf{
54 | Viper: v,
55 | validate: validator.New(),
56 | }
57 |
58 | for _, o := range opts {
59 | o(c)
60 | }
61 |
62 | return c
63 | }
64 |
65 | func GetRaftPort() int {
66 | return 0
67 | }
68 |
69 | func SetDefault(key string, value interface{}) { c.SetDefault(key, value) }
70 |
71 | func Set(key string, value interface{}) { c.Set(key, value) }
72 |
73 | func Get(key string) interface{} { return c.Get(key) }
74 |
75 | func GetString(key string) string { return c.GetString(key) }
76 |
77 | func GetBool(key string) bool { return c.GetBool(key) }
78 |
79 | func GetInt(key string) int { return c.GetInt(key) }
80 |
81 | func GetUint32(key string) uint32 { return c.GetUint32(key) }
82 |
83 | func GetInt32(key string) int32 { return c.GetInt32(key) }
84 |
85 | func GetInt64(key string) int64 { return c.GetInt64(key) }
86 |
87 | func GetFloat64(key string) float64 { return c.GetFloat64(key) }
88 |
89 | func GetDuration(key string) time.Duration {
90 | return c.GetDuration(key)
91 | }
92 |
93 | // BindPFlag bind pflag
94 | func BindPFlag(key string, flag *pflag.Flag) error { return c.BindPFlag(key, flag) }
95 |
96 | // InitConfig set options to config
97 | func InitConfig(opts ...Option) {
98 | for _, opt := range opts {
99 | opt(c)
100 | }
101 | }
102 |
--------------------------------------------------------------------------------
/internal/constants/constants.go:
--------------------------------------------------------------------------------
1 | package constants
2 |
--------------------------------------------------------------------------------
/internal/proto/raft/v1/raft.pb.go:
--------------------------------------------------------------------------------
1 | // Code generated by protoc-gen-go. DO NOT EDIT.
2 | // versions:
3 | // protoc-gen-go v1.28.0
4 | // protoc (unknown)
5 | // source: raft/v1/raft.proto
6 |
7 | package v1
8 |
9 | import (
10 | reflect "reflect"
11 | sync "sync"
12 |
13 | _ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
14 | protoreflect "google.golang.org/protobuf/reflect/protoreflect"
15 | protoimpl "google.golang.org/protobuf/runtime/protoimpl"
16 | )
17 |
18 | const (
19 | // Verify that this generated code is sufficiently up-to-date.
20 | _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
21 | // Verify that runtime/protoimpl is sufficiently up-to-date.
22 | _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
23 | )
24 |
25 | // The request payload for RequestVote
26 | type RequestVoteRequest struct {
27 | state protoimpl.MessageState
28 | sizeCache protoimpl.SizeCache
29 | unknownFields protoimpl.UnknownFields
30 |
31 | // candidate’s term
32 | Term int64 `protobuf:"varint,1,opt,name=term,proto3" json:"term,omitempty"`
33 | // candidate requesting vote
34 | CandidateId int64 `protobuf:"varint,2,opt,name=candidate_id,json=candidateId,proto3" json:"candidate_id,omitempty"`
35 | // index of candidate’s last log entry
36 | LastLogIndex int64 `protobuf:"varint,3,opt,name=last_log_index,json=lastLogIndex,proto3" json:"last_log_index,omitempty"`
37 | // term of candidate’s last log entry
38 | LastLogTerm int64 `protobuf:"varint,4,opt,name=last_log_term,json=lastLogTerm,proto3" json:"last_log_term,omitempty"`
39 | }
40 |
41 | func (x *RequestVoteRequest) Reset() {
42 | *x = RequestVoteRequest{}
43 | if protoimpl.UnsafeEnabled {
44 | mi := &file_raft_v1_raft_proto_msgTypes[0]
45 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
46 | ms.StoreMessageInfo(mi)
47 | }
48 | }
49 |
50 | func (x *RequestVoteRequest) String() string {
51 | return protoimpl.X.MessageStringOf(x)
52 | }
53 |
54 | func (*RequestVoteRequest) ProtoMessage() {}
55 |
56 | func (x *RequestVoteRequest) ProtoReflect() protoreflect.Message {
57 | mi := &file_raft_v1_raft_proto_msgTypes[0]
58 | if protoimpl.UnsafeEnabled && x != nil {
59 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
60 | if ms.LoadMessageInfo() == nil {
61 | ms.StoreMessageInfo(mi)
62 | }
63 | return ms
64 | }
65 | return mi.MessageOf(x)
66 | }
67 |
68 | // Deprecated: Use RequestVoteRequest.ProtoReflect.Descriptor instead.
69 | func (*RequestVoteRequest) Descriptor() ([]byte, []int) {
70 | return file_raft_v1_raft_proto_rawDescGZIP(), []int{0}
71 | }
72 |
73 | func (x *RequestVoteRequest) GetTerm() int64 {
74 | if x != nil {
75 | return x.Term
76 | }
77 | return 0
78 | }
79 |
80 | func (x *RequestVoteRequest) GetCandidateId() int64 {
81 | if x != nil {
82 | return x.CandidateId
83 | }
84 | return 0
85 | }
86 |
87 | func (x *RequestVoteRequest) GetLastLogIndex() int64 {
88 | if x != nil {
89 | return x.LastLogIndex
90 | }
91 | return 0
92 | }
93 |
94 | func (x *RequestVoteRequest) GetLastLogTerm() int64 {
95 | if x != nil {
96 | return x.LastLogTerm
97 | }
98 | return 0
99 | }
100 |
101 | // The response body for RequestVote
102 | type RequestVoteResponse struct {
103 | state protoimpl.MessageState
104 | sizeCache protoimpl.SizeCache
105 | unknownFields protoimpl.UnknownFields
106 |
107 | // currentTerm, for candidate to update itself
108 | Term int64 `protobuf:"varint,1,opt,name=term,proto3" json:"term,omitempty"`
109 | // true means candidate received vote
110 | VoteGranted bool `protobuf:"varint,2,opt,name=vote_granted,json=voteGranted,proto3" json:"vote_granted,omitempty"`
111 | }
112 |
113 | func (x *RequestVoteResponse) Reset() {
114 | *x = RequestVoteResponse{}
115 | if protoimpl.UnsafeEnabled {
116 | mi := &file_raft_v1_raft_proto_msgTypes[1]
117 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
118 | ms.StoreMessageInfo(mi)
119 | }
120 | }
121 |
122 | func (x *RequestVoteResponse) String() string {
123 | return protoimpl.X.MessageStringOf(x)
124 | }
125 |
126 | func (*RequestVoteResponse) ProtoMessage() {}
127 |
128 | func (x *RequestVoteResponse) ProtoReflect() protoreflect.Message {
129 | mi := &file_raft_v1_raft_proto_msgTypes[1]
130 | if protoimpl.UnsafeEnabled && x != nil {
131 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
132 | if ms.LoadMessageInfo() == nil {
133 | ms.StoreMessageInfo(mi)
134 | }
135 | return ms
136 | }
137 | return mi.MessageOf(x)
138 | }
139 |
140 | // Deprecated: Use RequestVoteResponse.ProtoReflect.Descriptor instead.
141 | func (*RequestVoteResponse) Descriptor() ([]byte, []int) {
142 | return file_raft_v1_raft_proto_rawDescGZIP(), []int{1}
143 | }
144 |
145 | func (x *RequestVoteResponse) GetTerm() int64 {
146 | if x != nil {
147 | return x.Term
148 | }
149 | return 0
150 | }
151 |
152 | func (x *RequestVoteResponse) GetVoteGranted() bool {
153 | if x != nil {
154 | return x.VoteGranted
155 | }
156 | return false
157 | }
158 |
159 | // The request payload for AppendEntries
160 | type AppendEntriesRequest struct {
161 | state protoimpl.MessageState
162 | sizeCache protoimpl.SizeCache
163 | unknownFields protoimpl.UnknownFields
164 |
165 | // leader’s term
166 | Term int64 `protobuf:"varint,1,opt,name=term,proto3" json:"term,omitempty"`
167 | // so follower can redirect clients
168 | LeaderId int64 `protobuf:"varint,2,opt,name=leader_id,json=leaderId,proto3" json:"leader_id,omitempty"`
169 | // index of log entry immediately preceding new ones
170 | PrevLogIndex int64 `protobuf:"varint,3,opt,name=prev_log_index,json=prevLogIndex,proto3" json:"prev_log_index,omitempty"`
171 | // term of prevLogIndex entry
172 | PrevLogTerm int64 `protobuf:"varint,4,opt,name=prev_log_term,json=prevLogTerm,proto3" json:"prev_log_term,omitempty"`
173 | // log entries to store (empty for heartbeat; may send more than one for efficiency)
174 | Entries []*LogEntry `protobuf:"bytes,5,rep,name=entries,proto3" json:"entries,omitempty"`
175 | // leader’s commitIndex
176 | LeaderCommit int64 `protobuf:"varint,6,opt,name=leader_commit,json=leaderCommit,proto3" json:"leader_commit,omitempty"`
177 | }
178 |
179 | func (x *AppendEntriesRequest) Reset() {
180 | *x = AppendEntriesRequest{}
181 | if protoimpl.UnsafeEnabled {
182 | mi := &file_raft_v1_raft_proto_msgTypes[2]
183 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
184 | ms.StoreMessageInfo(mi)
185 | }
186 | }
187 |
188 | func (x *AppendEntriesRequest) String() string {
189 | return protoimpl.X.MessageStringOf(x)
190 | }
191 |
192 | func (*AppendEntriesRequest) ProtoMessage() {}
193 |
194 | func (x *AppendEntriesRequest) ProtoReflect() protoreflect.Message {
195 | mi := &file_raft_v1_raft_proto_msgTypes[2]
196 | if protoimpl.UnsafeEnabled && x != nil {
197 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
198 | if ms.LoadMessageInfo() == nil {
199 | ms.StoreMessageInfo(mi)
200 | }
201 | return ms
202 | }
203 | return mi.MessageOf(x)
204 | }
205 |
206 | // Deprecated: Use AppendEntriesRequest.ProtoReflect.Descriptor instead.
207 | func (*AppendEntriesRequest) Descriptor() ([]byte, []int) {
208 | return file_raft_v1_raft_proto_rawDescGZIP(), []int{2}
209 | }
210 |
211 | func (x *AppendEntriesRequest) GetTerm() int64 {
212 | if x != nil {
213 | return x.Term
214 | }
215 | return 0
216 | }
217 |
218 | func (x *AppendEntriesRequest) GetLeaderId() int64 {
219 | if x != nil {
220 | return x.LeaderId
221 | }
222 | return 0
223 | }
224 |
225 | func (x *AppendEntriesRequest) GetPrevLogIndex() int64 {
226 | if x != nil {
227 | return x.PrevLogIndex
228 | }
229 | return 0
230 | }
231 |
232 | func (x *AppendEntriesRequest) GetPrevLogTerm() int64 {
233 | if x != nil {
234 | return x.PrevLogTerm
235 | }
236 | return 0
237 | }
238 |
239 | func (x *AppendEntriesRequest) GetEntries() []*LogEntry {
240 | if x != nil {
241 | return x.Entries
242 | }
243 | return nil
244 | }
245 |
246 | func (x *AppendEntriesRequest) GetLeaderCommit() int64 {
247 | if x != nil {
248 | return x.LeaderCommit
249 | }
250 | return 0
251 | }
252 |
253 | // LogEntry
254 | type LogEntry struct {
255 | state protoimpl.MessageState
256 | sizeCache protoimpl.SizeCache
257 | unknownFields protoimpl.UnknownFields
258 |
259 | // command
260 | Command []byte `protobuf:"bytes,1,opt,name=command,proto3" json:"command,omitempty"`
261 | // term
262 | Term int64 `protobuf:"varint,2,opt,name=term,proto3" json:"term,omitempty"`
263 | }
264 |
265 | func (x *LogEntry) Reset() {
266 | *x = LogEntry{}
267 | if protoimpl.UnsafeEnabled {
268 | mi := &file_raft_v1_raft_proto_msgTypes[3]
269 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
270 | ms.StoreMessageInfo(mi)
271 | }
272 | }
273 |
274 | func (x *LogEntry) String() string {
275 | return protoimpl.X.MessageStringOf(x)
276 | }
277 |
278 | func (*LogEntry) ProtoMessage() {}
279 |
280 | func (x *LogEntry) ProtoReflect() protoreflect.Message {
281 | mi := &file_raft_v1_raft_proto_msgTypes[3]
282 | if protoimpl.UnsafeEnabled && x != nil {
283 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
284 | if ms.LoadMessageInfo() == nil {
285 | ms.StoreMessageInfo(mi)
286 | }
287 | return ms
288 | }
289 | return mi.MessageOf(x)
290 | }
291 |
292 | // Deprecated: Use LogEntry.ProtoReflect.Descriptor instead.
293 | func (*LogEntry) Descriptor() ([]byte, []int) {
294 | return file_raft_v1_raft_proto_rawDescGZIP(), []int{3}
295 | }
296 |
297 | func (x *LogEntry) GetCommand() []byte {
298 | if x != nil {
299 | return x.Command
300 | }
301 | return nil
302 | }
303 |
304 | func (x *LogEntry) GetTerm() int64 {
305 | if x != nil {
306 | return x.Term
307 | }
308 | return 0
309 | }
310 |
311 | // The response body for AppendEntries
312 | type AppendEntriesResponse struct {
313 | state protoimpl.MessageState
314 | sizeCache protoimpl.SizeCache
315 | unknownFields protoimpl.UnknownFields
316 |
317 | // currentTerm, for leader to update itself
318 | Term int64 `protobuf:"varint,1,opt,name=term,proto3" json:"term,omitempty"`
319 | // true if follower contained entry matching prevLogIndex and prevLogTerm
320 | Success bool `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"`
321 | }
322 |
323 | func (x *AppendEntriesResponse) Reset() {
324 | *x = AppendEntriesResponse{}
325 | if protoimpl.UnsafeEnabled {
326 | mi := &file_raft_v1_raft_proto_msgTypes[4]
327 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
328 | ms.StoreMessageInfo(mi)
329 | }
330 | }
331 |
332 | func (x *AppendEntriesResponse) String() string {
333 | return protoimpl.X.MessageStringOf(x)
334 | }
335 |
336 | func (*AppendEntriesResponse) ProtoMessage() {}
337 |
338 | func (x *AppendEntriesResponse) ProtoReflect() protoreflect.Message {
339 | mi := &file_raft_v1_raft_proto_msgTypes[4]
340 | if protoimpl.UnsafeEnabled && x != nil {
341 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
342 | if ms.LoadMessageInfo() == nil {
343 | ms.StoreMessageInfo(mi)
344 | }
345 | return ms
346 | }
347 | return mi.MessageOf(x)
348 | }
349 |
350 | // Deprecated: Use AppendEntriesResponse.ProtoReflect.Descriptor instead.
351 | func (*AppendEntriesResponse) Descriptor() ([]byte, []int) {
352 | return file_raft_v1_raft_proto_rawDescGZIP(), []int{4}
353 | }
354 |
355 | func (x *AppendEntriesResponse) GetTerm() int64 {
356 | if x != nil {
357 | return x.Term
358 | }
359 | return 0
360 | }
361 |
362 | func (x *AppendEntriesResponse) GetSuccess() bool {
363 | if x != nil {
364 | return x.Success
365 | }
366 | return false
367 | }
368 |
369 | var File_raft_v1_raft_proto protoreflect.FileDescriptor
370 |
371 | var file_raft_v1_raft_proto_rawDesc = []byte{
372 | 0x0a, 0x12, 0x72, 0x61, 0x66, 0x74, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x61, 0x66, 0x74, 0x2e, 0x70,
373 | 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x72, 0x61, 0x66, 0x74, 0x2e, 0x76, 0x31, 0x1a, 0x2e, 0x70,
374 | 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
375 | 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
376 | 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe3, 0x01,
377 | 0x0a, 0x12, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71,
378 | 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x18, 0x01, 0x20, 0x01,
379 | 0x28, 0x03, 0x42, 0x10, 0x92, 0x41, 0x0d, 0x32, 0x0b, 0x74, 0x65, 0x72, 0x6d, 0x20, 0x6e, 0x75,
380 | 0x6d, 0x62, 0x65, 0x72, 0x52, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x12, 0x34, 0x0a, 0x0c, 0x63, 0x61,
381 | 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03,
382 | 0x42, 0x11, 0x92, 0x41, 0x0e, 0x32, 0x0c, 0x63, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65,
383 | 0x20, 0x69, 0x64, 0x52, 0x0b, 0x63, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x49, 0x64,
384 | 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x6e, 0x64,
385 | 0x65, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x13, 0x92, 0x41, 0x10, 0x32, 0x0e, 0x6c,
386 | 0x61, 0x73, 0x74, 0x20, 0x6c, 0x6f, 0x67, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x0c, 0x6c,
387 | 0x61, 0x73, 0x74, 0x4c, 0x6f, 0x67, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x36, 0x0a, 0x0d, 0x6c,
388 | 0x61, 0x73, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x18, 0x04, 0x20, 0x01,
389 | 0x28, 0x03, 0x42, 0x12, 0x92, 0x41, 0x0f, 0x32, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x20, 0x6c, 0x6f,
390 | 0x67, 0x20, 0x74, 0x65, 0x72, 0x6d, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x4c, 0x6f, 0x67, 0x54,
391 | 0x65, 0x72, 0x6d, 0x22, 0x71, 0x0a, 0x13, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x6f,
392 | 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x04, 0x74, 0x65,
393 | 0x72, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x42, 0x10, 0x92, 0x41, 0x0d, 0x32, 0x0b, 0x74,
394 | 0x65, 0x72, 0x6d, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x04, 0x74, 0x65, 0x72, 0x6d,
395 | 0x12, 0x34, 0x0a, 0x0c, 0x76, 0x6f, 0x74, 0x65, 0x5f, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x65, 0x64,
396 | 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, 0x11, 0x92, 0x41, 0x0e, 0x32, 0x0c, 0x76, 0x6f, 0x74,
397 | 0x65, 0x20, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x0b, 0x76, 0x6f, 0x74, 0x65, 0x47,
398 | 0x72, 0x61, 0x6e, 0x74, 0x65, 0x64, 0x22, 0xd4, 0x02, 0x0a, 0x14, 0x41, 0x70, 0x70, 0x65, 0x6e,
399 | 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
400 | 0x24, 0x0a, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x42, 0x10, 0x92,
401 | 0x41, 0x0d, 0x32, 0x0b, 0x74, 0x65, 0x72, 0x6d, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52,
402 | 0x04, 0x74, 0x65, 0x72, 0x6d, 0x12, 0x2b, 0x0a, 0x09, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f,
403 | 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x42, 0x0e, 0x92, 0x41, 0x0b, 0x32, 0x09, 0x6c,
404 | 0x65, 0x61, 0x64, 0x65, 0x72, 0x20, 0x69, 0x64, 0x52, 0x08, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72,
405 | 0x49, 0x64, 0x12, 0x39, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x76, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x69,
406 | 0x6e, 0x64, 0x65, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x13, 0x92, 0x41, 0x10, 0x32,
407 | 0x0e, 0x70, 0x72, 0x65, 0x76, 0x20, 0x6c, 0x6f, 0x67, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x52,
408 | 0x0c, 0x70, 0x72, 0x65, 0x76, 0x4c, 0x6f, 0x67, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x36, 0x0a,
409 | 0x0d, 0x70, 0x72, 0x65, 0x76, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x18, 0x04,
410 | 0x20, 0x01, 0x28, 0x03, 0x42, 0x12, 0x92, 0x41, 0x0f, 0x32, 0x0d, 0x70, 0x72, 0x65, 0x76, 0x20,
411 | 0x6c, 0x6f, 0x67, 0x20, 0x74, 0x65, 0x72, 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x76, 0x4c, 0x6f,
412 | 0x67, 0x54, 0x65, 0x72, 0x6d, 0x12, 0x3d, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73,
413 | 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x2e, 0x76, 0x31,
414 | 0x2e, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x10, 0x92, 0x41, 0x0d, 0x32, 0x0b,
415 | 0x6c, 0x6f, 0x67, 0x20, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x07, 0x65, 0x6e, 0x74,
416 | 0x72, 0x69, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x0d, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x63,
417 | 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x12, 0x92, 0x41, 0x0f,
418 | 0x32, 0x0d, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x20, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52,
419 | 0x0c, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x22, 0x58, 0x0a,
420 | 0x08, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x26, 0x0a, 0x07, 0x63, 0x6f, 0x6d,
421 | 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x0c, 0x92, 0x41, 0x09, 0x32,
422 | 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e,
423 | 0x64, 0x12, 0x24, 0x0a, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x42,
424 | 0x10, 0x92, 0x41, 0x0d, 0x32, 0x0b, 0x74, 0x65, 0x72, 0x6d, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65,
425 | 0x72, 0x52, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x22, 0x65, 0x0a, 0x15, 0x41, 0x70, 0x70, 0x65, 0x6e,
426 | 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
427 | 0x12, 0x24, 0x0a, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x42, 0x10,
428 | 0x92, 0x41, 0x0d, 0x32, 0x0b, 0x74, 0x65, 0x72, 0x6d, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72,
429 | 0x52, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x12, 0x26, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73,
430 | 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0c, 0x92, 0x41, 0x09, 0x32, 0x07, 0x73, 0x75,
431 | 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x32, 0xa7,
432 | 0x01, 0x0a, 0x0b, 0x52, 0x61, 0x66, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x48,
433 | 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x6f, 0x74, 0x65, 0x12, 0x1b, 0x2e,
434 | 0x72, 0x61, 0x66, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56,
435 | 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x72, 0x61, 0x66,
436 | 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x6f, 0x74, 0x65,
437 | 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x0d, 0x41, 0x70, 0x70, 0x65,
438 | 0x6e, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x1d, 0x2e, 0x72, 0x61, 0x66, 0x74,
439 | 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65,
440 | 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x2e,
441 | 0x76, 0x31, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73,
442 | 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68,
443 | 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x6f, 0x6f, 0x72, 0x61, 0x79, 0x6d, 0x61, 0x6e,
444 | 0x2f, 0x70, 0x6f, 0x70, 0x70, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x61,
445 | 0x66, 0x74, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
446 | }
447 |
448 | var (
449 | file_raft_v1_raft_proto_rawDescOnce sync.Once
450 | file_raft_v1_raft_proto_rawDescData = file_raft_v1_raft_proto_rawDesc
451 | )
452 |
453 | func file_raft_v1_raft_proto_rawDescGZIP() []byte {
454 | file_raft_v1_raft_proto_rawDescOnce.Do(func() {
455 | file_raft_v1_raft_proto_rawDescData = protoimpl.X.CompressGZIP(file_raft_v1_raft_proto_rawDescData)
456 | })
457 | return file_raft_v1_raft_proto_rawDescData
458 | }
459 |
460 | var file_raft_v1_raft_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
461 | var file_raft_v1_raft_proto_goTypes = []interface{}{
462 | (*RequestVoteRequest)(nil), // 0: raft.v1.RequestVoteRequest
463 | (*RequestVoteResponse)(nil), // 1: raft.v1.RequestVoteResponse
464 | (*AppendEntriesRequest)(nil), // 2: raft.v1.AppendEntriesRequest
465 | (*LogEntry)(nil), // 3: raft.v1.LogEntry
466 | (*AppendEntriesResponse)(nil), // 4: raft.v1.AppendEntriesResponse
467 | }
468 | var file_raft_v1_raft_proto_depIdxs = []int32{
469 | 3, // 0: raft.v1.AppendEntriesRequest.entries:type_name -> raft.v1.LogEntry
470 | 0, // 1: raft.v1.RaftService.RequestVote:input_type -> raft.v1.RequestVoteRequest
471 | 2, // 2: raft.v1.RaftService.AppendEntries:input_type -> raft.v1.AppendEntriesRequest
472 | 1, // 3: raft.v1.RaftService.RequestVote:output_type -> raft.v1.RequestVoteResponse
473 | 4, // 4: raft.v1.RaftService.AppendEntries:output_type -> raft.v1.AppendEntriesResponse
474 | 3, // [3:5] is the sub-list for method output_type
475 | 1, // [1:3] is the sub-list for method input_type
476 | 1, // [1:1] is the sub-list for extension type_name
477 | 1, // [1:1] is the sub-list for extension extendee
478 | 0, // [0:1] is the sub-list for field type_name
479 | }
480 |
481 | func init() { file_raft_v1_raft_proto_init() }
482 | func file_raft_v1_raft_proto_init() {
483 | if File_raft_v1_raft_proto != nil {
484 | return
485 | }
486 | if !protoimpl.UnsafeEnabled {
487 | file_raft_v1_raft_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
488 | switch v := v.(*RequestVoteRequest); i {
489 | case 0:
490 | return &v.state
491 | case 1:
492 | return &v.sizeCache
493 | case 2:
494 | return &v.unknownFields
495 | default:
496 | return nil
497 | }
498 | }
499 | file_raft_v1_raft_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
500 | switch v := v.(*RequestVoteResponse); i {
501 | case 0:
502 | return &v.state
503 | case 1:
504 | return &v.sizeCache
505 | case 2:
506 | return &v.unknownFields
507 | default:
508 | return nil
509 | }
510 | }
511 | file_raft_v1_raft_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
512 | switch v := v.(*AppendEntriesRequest); i {
513 | case 0:
514 | return &v.state
515 | case 1:
516 | return &v.sizeCache
517 | case 2:
518 | return &v.unknownFields
519 | default:
520 | return nil
521 | }
522 | }
523 | file_raft_v1_raft_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
524 | switch v := v.(*LogEntry); i {
525 | case 0:
526 | return &v.state
527 | case 1:
528 | return &v.sizeCache
529 | case 2:
530 | return &v.unknownFields
531 | default:
532 | return nil
533 | }
534 | }
535 | file_raft_v1_raft_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
536 | switch v := v.(*AppendEntriesResponse); i {
537 | case 0:
538 | return &v.state
539 | case 1:
540 | return &v.sizeCache
541 | case 2:
542 | return &v.unknownFields
543 | default:
544 | return nil
545 | }
546 | }
547 | }
548 | type x struct{}
549 | out := protoimpl.TypeBuilder{
550 | File: protoimpl.DescBuilder{
551 | GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
552 | RawDescriptor: file_raft_v1_raft_proto_rawDesc,
553 | NumEnums: 0,
554 | NumMessages: 5,
555 | NumExtensions: 0,
556 | NumServices: 1,
557 | },
558 | GoTypes: file_raft_v1_raft_proto_goTypes,
559 | DependencyIndexes: file_raft_v1_raft_proto_depIdxs,
560 | MessageInfos: file_raft_v1_raft_proto_msgTypes,
561 | }.Build()
562 | File_raft_v1_raft_proto = out.File
563 | file_raft_v1_raft_proto_rawDesc = nil
564 | file_raft_v1_raft_proto_goTypes = nil
565 | file_raft_v1_raft_proto_depIdxs = nil
566 | }
567 |
--------------------------------------------------------------------------------
/internal/proto/raft/v1/raft_grpc.pb.go:
--------------------------------------------------------------------------------
1 | // Code generated by protoc-gen-go-grpc. DO NOT EDIT.
2 | // versions:
3 | // - protoc-gen-go-grpc v1.2.0
4 | // - protoc (unknown)
5 | // source: raft/v1/raft.proto
6 |
7 | package v1
8 |
9 | import (
10 | context "context"
11 |
12 | grpc "google.golang.org/grpc"
13 | codes "google.golang.org/grpc/codes"
14 | status "google.golang.org/grpc/status"
15 | )
16 |
17 | // This is a compile-time assertion to ensure that this generated file
18 | // is compatible with the grpc package it is being compiled against.
19 | // Requires gRPC-Go v1.32.0 or later.
20 | const _ = grpc.SupportPackageIsVersion7
21 |
22 | // RaftServiceClient is the client API for RaftService service.
23 | //
24 | // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
25 | type RaftServiceClient interface {
26 | // Invoked by candidates to gather votes
27 | RequestVote(ctx context.Context, in *RequestVoteRequest, opts ...grpc.CallOption) (*RequestVoteResponse, error)
28 | // Invoked by leader to replicate log entries; also used as heartbeat
29 | AppendEntries(ctx context.Context, in *AppendEntriesRequest, opts ...grpc.CallOption) (*AppendEntriesResponse, error)
30 | }
31 |
32 | type raftServiceClient struct {
33 | cc grpc.ClientConnInterface
34 | }
35 |
36 | func NewRaftServiceClient(cc grpc.ClientConnInterface) RaftServiceClient {
37 | return &raftServiceClient{cc}
38 | }
39 |
40 | func (c *raftServiceClient) RequestVote(ctx context.Context, in *RequestVoteRequest, opts ...grpc.CallOption) (*RequestVoteResponse, error) {
41 | out := new(RequestVoteResponse)
42 | err := c.cc.Invoke(ctx, "/raft.v1.RaftService/RequestVote", in, out, opts...)
43 | if err != nil {
44 | return nil, err
45 | }
46 | return out, nil
47 | }
48 |
49 | func (c *raftServiceClient) AppendEntries(ctx context.Context, in *AppendEntriesRequest, opts ...grpc.CallOption) (*AppendEntriesResponse, error) {
50 | out := new(AppendEntriesResponse)
51 | err := c.cc.Invoke(ctx, "/raft.v1.RaftService/AppendEntries", in, out, opts...)
52 | if err != nil {
53 | return nil, err
54 | }
55 | return out, nil
56 | }
57 |
58 | // RaftServiceServer is the server API for RaftService service.
59 | // All implementations should embed UnimplementedRaftServiceServer
60 | // for forward compatibility
61 | type RaftServiceServer interface {
62 | // Invoked by candidates to gather votes
63 | RequestVote(context.Context, *RequestVoteRequest) (*RequestVoteResponse, error)
64 | // Invoked by leader to replicate log entries; also used as heartbeat
65 | AppendEntries(context.Context, *AppendEntriesRequest) (*AppendEntriesResponse, error)
66 | }
67 |
68 | // UnimplementedRaftServiceServer should be embedded to have forward compatible implementations.
69 | type UnimplementedRaftServiceServer struct {
70 | }
71 |
72 | func (UnimplementedRaftServiceServer) RequestVote(context.Context, *RequestVoteRequest) (*RequestVoteResponse, error) {
73 | return nil, status.Errorf(codes.Unimplemented, "method RequestVote not implemented")
74 | }
75 | func (UnimplementedRaftServiceServer) AppendEntries(context.Context, *AppendEntriesRequest) (*AppendEntriesResponse, error) {
76 | return nil, status.Errorf(codes.Unimplemented, "method AppendEntries not implemented")
77 | }
78 |
79 | // UnsafeRaftServiceServer may be embedded to opt out of forward compatibility for this service.
80 | // Use of this interface is not recommended, as added methods to RaftServiceServer will
81 | // result in compilation errors.
82 | type UnsafeRaftServiceServer interface {
83 | mustEmbedUnimplementedRaftServiceServer()
84 | }
85 |
86 | func RegisterRaftServiceServer(s grpc.ServiceRegistrar, srv RaftServiceServer) {
87 | s.RegisterService(&RaftService_ServiceDesc, srv)
88 | }
89 |
90 | func _RaftService_RequestVote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
91 | in := new(RequestVoteRequest)
92 | if err := dec(in); err != nil {
93 | return nil, err
94 | }
95 | if interceptor == nil {
96 | return srv.(RaftServiceServer).RequestVote(ctx, in)
97 | }
98 | info := &grpc.UnaryServerInfo{
99 | Server: srv,
100 | FullMethod: "/raft.v1.RaftService/RequestVote",
101 | }
102 | handler := func(ctx context.Context, req interface{}) (interface{}, error) {
103 | return srv.(RaftServiceServer).RequestVote(ctx, req.(*RequestVoteRequest))
104 | }
105 | return interceptor(ctx, in, info, handler)
106 | }
107 |
108 | func _RaftService_AppendEntries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
109 | in := new(AppendEntriesRequest)
110 | if err := dec(in); err != nil {
111 | return nil, err
112 | }
113 | if interceptor == nil {
114 | return srv.(RaftServiceServer).AppendEntries(ctx, in)
115 | }
116 | info := &grpc.UnaryServerInfo{
117 | Server: srv,
118 | FullMethod: "/raft.v1.RaftService/AppendEntries",
119 | }
120 | handler := func(ctx context.Context, req interface{}) (interface{}, error) {
121 | return srv.(RaftServiceServer).AppendEntries(ctx, req.(*AppendEntriesRequest))
122 | }
123 | return interceptor(ctx, in, info, handler)
124 | }
125 |
126 | // RaftService_ServiceDesc is the grpc.ServiceDesc for RaftService service.
127 | // It's only intended for direct use with grpc.RegisterService,
128 | // and not to be introspected or modified (even as a copy)
129 | var RaftService_ServiceDesc = grpc.ServiceDesc{
130 | ServiceName: "raft.v1.RaftService",
131 | HandlerType: (*RaftServiceServer)(nil),
132 | Methods: []grpc.MethodDesc{
133 | {
134 | MethodName: "RequestVote",
135 | Handler: _RaftService_RequestVote_Handler,
136 | },
137 | {
138 | MethodName: "AppendEntries",
139 | Handler: _RaftService_AppendEntries_Handler,
140 | },
141 | },
142 | Streams: []grpc.StreamDesc{},
143 | Metadata: "raft/v1/raft.proto",
144 | }
145 |
--------------------------------------------------------------------------------
/internal/raft/consensus.go:
--------------------------------------------------------------------------------
1 | // raft consensus module
2 | package raft
3 |
4 | import (
5 | "context"
6 | "fmt"
7 | "log"
8 | "math/rand"
9 | "strconv"
10 | "sync"
11 | "time"
12 |
13 | "github.com/tidwall/buntdb"
14 |
15 | "github.com/hoorayman/popple/internal/conf"
16 | raftv1 "github.com/hoorayman/popple/internal/proto/raft/v1"
17 | "github.com/hoorayman/popple/internal/statemachine"
18 | )
19 |
20 | type LogEntry struct {
21 | Command []byte
22 | Term int64
23 | }
24 |
25 | type CMState int
26 |
27 | const (
28 | Follower CMState = iota
29 | Candidate
30 | Leader
31 | Dead
32 | )
33 |
34 | func (s CMState) String() string {
35 | switch s {
36 | case Follower:
37 | return "Follower"
38 | case Candidate:
39 | return "Candidate"
40 | case Leader:
41 | return "Leader"
42 | case Dead:
43 | return "Dead"
44 | default:
45 | panic("undefined consensus module state")
46 | }
47 | }
48 |
49 | type ConsensusModule struct {
50 | mu sync.Mutex
51 | id int64
52 | peerIds []int64
53 | server *Server
54 |
55 | newCommitReadyChan chan struct{}
56 |
57 | storage *RaftPersistent
58 | stateMachine statemachine.IKVDB
59 |
60 | currentTerm int64
61 | votedFor int64
62 | log []LogEntry
63 |
64 | commitIndex int
65 | lastApplied int
66 | state CMState
67 | electionResetEvent time.Time
68 | nextIndex map[int64]int
69 | matchIndex map[int64]int
70 | }
71 |
72 | func NewConsensusModule(id int64, peerIds []int64, server *Server, ready <-chan struct{}) *ConsensusModule {
73 | cm := new(ConsensusModule)
74 | cm.id = id
75 | cm.peerIds = peerIds
76 | cm.server = server
77 | cm.state = Follower
78 | cm.votedFor = -1
79 | cm.newCommitReadyChan = make(chan struct{}, 32)
80 | cm.commitIndex = -1
81 | cm.lastApplied = -1
82 | cm.nextIndex = make(map[int64]int)
83 | cm.matchIndex = make(map[int64]int)
84 | cm.storage = &RaftPersistent{}
85 | fsm, err := statemachine.NewKVDB()
86 | if err != nil {
87 | log.Fatalf("server[%v] failed to make fsm: %s", id, err)
88 | }
89 | cm.stateMachine = fsm
90 |
91 | if !conf.GetBool("dev") {
92 | cm.restoreFromStorage()
93 | cm.restoreLastApplied()
94 | }
95 |
96 | go func() {
97 | <-ready
98 | cm.mu.Lock()
99 | cm.electionResetEvent = time.Now()
100 | cm.mu.Unlock()
101 | cm.runElectionTimer()
102 | }()
103 |
104 | go cm.commitChanSender()
105 | return cm
106 | }
107 |
108 | func (cm *ConsensusModule) restoreFromStorage() {
109 | err := cm.storage.InitAndLoadLog(&cm.log)
110 | if err != nil {
111 | log.Fatal("InitAndLoadLog fail: ", err)
112 | }
113 | cm.currentTerm = cm.storage.GetTerm()
114 | cm.votedFor = cm.storage.GetVotedFor()
115 | }
116 |
117 | func (cm *ConsensusModule) persistToStorage(rollback, entries []LogEntry) {
118 | err := cm.storage.SetTerm(cm.currentTerm)
119 | if err != nil {
120 | log.Fatal("SetTerm fail: ", err)
121 | }
122 | err = cm.storage.SetVotedFor(cm.votedFor)
123 | if err != nil {
124 | log.Fatal("SetVotedFor fail: ", err)
125 | }
126 | err = cm.storage.AppendLog(rollback, entries)
127 | if err != nil {
128 | log.Fatal("AppendLog fail: ", err)
129 | }
130 | }
131 |
132 | func (cm *ConsensusModule) restoreLastApplied() {
133 | val, err := cm.stateMachine.Get(statemachine.LastAppliedKey)
134 | if err != nil && err != buntdb.ErrNotFound {
135 | log.Fatal("restoreLastApplied fail: ", err)
136 | }
137 | if err == buntdb.ErrNotFound {
138 | cm.lastApplied = -1
139 | return
140 | }
141 | cm.lastApplied, err = strconv.Atoi(val)
142 | if err != nil {
143 | log.Fatal("restoreLastApplied parse fail: ", err)
144 | }
145 | }
146 |
147 | func (cm *ConsensusModule) commitChanSender() {
148 | for range cm.newCommitReadyChan {
149 | // Find which entries we have to apply.
150 | cm.mu.Lock()
151 | savedLastApplied := cm.lastApplied
152 | var entries []LogEntry
153 | if cm.commitIndex > cm.lastApplied {
154 | entries = cm.log[cm.lastApplied+1 : cm.commitIndex+1]
155 | for _, e := range entries {
156 | err := cm.stateMachine.Call(e.Command)
157 | if err != nil && err != buntdb.ErrNotFound {
158 | log.Fatal("fsm run command fail: ", err)
159 | }
160 | }
161 | err := cm.stateMachine.Set(statemachine.LastAppliedKey, strconv.Itoa(cm.commitIndex))
162 | if err != nil {
163 | log.Fatal("fsm save lastApplied fail: ", err)
164 | }
165 | cm.lastApplied = cm.commitIndex
166 | }
167 | cm.mu.Unlock()
168 | cm.dlog("commitChanSender entries=%v, savedLastApplied=%d", entries, savedLastApplied)
169 | }
170 |
171 | cm.dlog("commitChanSender done")
172 | }
173 |
174 | func (cm *ConsensusModule) Report() (id int64, term int64, isLeader bool) {
175 | cm.mu.Lock()
176 | defer cm.mu.Unlock()
177 | return cm.id, cm.currentTerm, cm.state == Leader
178 | }
179 |
180 | func (cm *ConsensusModule) Stop() {
181 | cm.mu.Lock()
182 | defer cm.mu.Unlock()
183 | cm.state = Dead
184 | cm.dlog("becomes Dead")
185 | close(cm.newCommitReadyChan)
186 | }
187 |
188 | func (cm *ConsensusModule) Submit(command []byte) bool {
189 | cm.mu.Lock()
190 | defer cm.mu.Unlock()
191 |
192 | cm.dlog("Submit received by %v: %v", cm.state, command)
193 | if cm.state == Leader && cm.stateMachine.CommandCheck(command) {
194 | cm.log = append(cm.log, LogEntry{Command: command, Term: cm.currentTerm})
195 | if !conf.GetBool("dev") {
196 | cm.persistToStorage(nil, []LogEntry{{Command: command, Term: cm.currentTerm}})
197 | }
198 | cm.dlog("... log=%v", cm.log)
199 | return true
200 | }
201 |
202 | return false
203 | }
204 |
205 | // dlog display debug message.
206 | func (cm *ConsensusModule) dlog(format string, args ...interface{}) {
207 | if conf.GetBool("dev") {
208 | format = fmt.Sprintf("server[%d] ", cm.id) + format
209 | log.Printf(format, args...)
210 | }
211 | }
212 |
213 | func (cm *ConsensusModule) becomeFollower(term int64) {
214 | cm.dlog("becomes Follower with term=%d; log=%v", term, cm.log)
215 | cm.state = Follower
216 | cm.currentTerm = term
217 | cm.votedFor = -1
218 | cm.electionResetEvent = time.Now()
219 |
220 | go cm.runElectionTimer()
221 | }
222 |
223 | func (cm *ConsensusModule) RequestVote(ctx context.Context, request *raftv1.RequestVoteRequest) (*raftv1.RequestVoteResponse, error) {
224 | cm.mu.Lock()
225 | defer cm.mu.Unlock()
226 | if cm.state == Dead {
227 | return nil, nil
228 | }
229 |
230 | lastLogIndex, lastLogTerm := cm.lastLogIndexAndTerm()
231 | cm.dlog("RequestVote: %+v [currentTerm=%d, votedFor=%d, log index/term=(%d, %d)]", request, cm.currentTerm, cm.votedFor, lastLogIndex, lastLogTerm)
232 | if request.Term > cm.currentTerm {
233 | cm.dlog("... term out of date in RequestVote")
234 | cm.becomeFollower(request.Term)
235 | }
236 |
237 | reply := &raftv1.RequestVoteResponse{}
238 | if request.Term == cm.currentTerm &&
239 | (cm.votedFor == -1 || cm.votedFor == request.CandidateId) &&
240 | (request.LastLogTerm > lastLogTerm ||
241 | (request.LastLogTerm == lastLogTerm && int(request.LastLogIndex) >= lastLogIndex)) {
242 | reply.VoteGranted = true
243 | cm.votedFor = request.CandidateId
244 | cm.electionResetEvent = time.Now()
245 | } else {
246 | reply.VoteGranted = false
247 | }
248 | reply.Term = cm.currentTerm
249 | if !conf.GetBool("dev") {
250 | cm.persistToStorage(nil, nil)
251 | }
252 | cm.dlog("... RequestVote reply: %+v", reply)
253 |
254 | return reply, nil
255 | }
256 |
257 | func (cm *ConsensusModule) AppendEntries(ctx context.Context, request *raftv1.AppendEntriesRequest) (*raftv1.AppendEntriesResponse, error) {
258 | cm.mu.Lock()
259 | defer cm.mu.Unlock()
260 | if cm.state == Dead {
261 | return nil, nil
262 | }
263 | cm.dlog("AppendEntries: %+v", request)
264 |
265 | if request.Term > cm.currentTerm {
266 | cm.dlog("... term out of date in AppendEntries")
267 | cm.becomeFollower(request.Term)
268 | }
269 |
270 | reply := &raftv1.AppendEntriesResponse{}
271 | reply.Success = false
272 | if request.Term == cm.currentTerm {
273 | if cm.state != Follower {
274 | cm.becomeFollower(request.Term)
275 | }
276 | cm.electionResetEvent = time.Now()
277 |
278 | if request.PrevLogIndex == -1 ||
279 | (request.PrevLogIndex < int64(len(cm.log)) && request.PrevLogTerm == cm.log[request.PrevLogIndex].Term) {
280 | reply.Success = true
281 |
282 | logInsertIndex := int(request.PrevLogIndex + 1)
283 | newEntriesIndex := 0
284 |
285 | for {
286 | if logInsertIndex >= len(cm.log) || newEntriesIndex >= len(request.Entries) {
287 | break
288 | }
289 | if cm.log[logInsertIndex].Term != request.Entries[newEntriesIndex].Term {
290 | break
291 | }
292 | logInsertIndex++
293 | newEntriesIndex++
294 | }
295 |
296 | if newEntriesIndex < len(request.Entries) {
297 | cm.dlog("... rollback entries %v", cm.log[logInsertIndex:])
298 | newEntries := []LogEntry{}
299 | for _, e := range request.Entries[newEntriesIndex:] {
300 | newEntries = append(newEntries, LogEntry{Command: e.Command, Term: e.Term})
301 | }
302 | if !conf.GetBool("dev") {
303 | cm.persistToStorage(cm.log[logInsertIndex:], newEntries)
304 | }
305 | cm.dlog("... inserting entries %v from index %d", newEntries, logInsertIndex)
306 | cm.log = append(cm.log[:logInsertIndex], newEntries...)
307 | cm.dlog("... log is now: %v", cm.log)
308 | }
309 | // Set commit index.
310 | if int(request.LeaderCommit) > cm.commitIndex {
311 | cm.commitIndex = intMin(int(request.LeaderCommit), len(cm.log)-1)
312 | cm.dlog("... setting commitIndex=%d", cm.commitIndex)
313 | cm.newCommitReadyChan <- struct{}{}
314 | }
315 | }
316 | }
317 |
318 | reply.Term = cm.currentTerm
319 | if !conf.GetBool("dev") {
320 | cm.persistToStorage(nil, nil)
321 | }
322 | cm.dlog("AppendEntries reply: %+v", reply)
323 |
324 | return reply, nil
325 | }
326 |
327 | func (cm *ConsensusModule) lastLogIndexAndTerm() (int, int64) {
328 | if len(cm.log) > 0 {
329 | lastIndex := len(cm.log) - 1
330 | return lastIndex, cm.log[lastIndex].Term
331 | } else {
332 | return -1, -1
333 | }
334 | }
335 |
336 | // electionTimeout make a pseudo-random election timeout duration.
337 | func (cm *ConsensusModule) electionTimeout() time.Duration {
338 | return time.Duration(150+rand.Intn(150)) * time.Millisecond // follow the raft paper
339 | }
340 |
341 | func (cm *ConsensusModule) runElectionTimer() {
342 | timeoutDuration := cm.electionTimeout()
343 | cm.mu.Lock()
344 | termStarted := cm.currentTerm
345 | cm.mu.Unlock()
346 |
347 | cm.dlog("election timer started (%v), term=%d", timeoutDuration, termStarted)
348 | ticker := time.NewTicker(10 * time.Millisecond)
349 | defer ticker.Stop()
350 | for {
351 | <-ticker.C
352 |
353 | cm.mu.Lock()
354 | if cm.state != Candidate && cm.state != Follower {
355 | cm.dlog("in election timer state=%s, bailing out", cm.state)
356 | cm.mu.Unlock()
357 | return
358 | }
359 |
360 | if termStarted != cm.currentTerm {
361 | cm.dlog("in election timer term changed from %d to %d, bailing out", termStarted, cm.currentTerm)
362 | cm.mu.Unlock()
363 | return
364 | }
365 |
366 | if elapsed := time.Since(cm.electionResetEvent); elapsed >= timeoutDuration {
367 | cm.startElection()
368 | cm.mu.Unlock()
369 | return
370 | }
371 | cm.mu.Unlock()
372 | }
373 | }
374 |
375 | func (cm *ConsensusModule) startElection() {
376 | cm.state = Candidate
377 | cm.currentTerm += 1
378 | savedCurrentTerm := cm.currentTerm
379 | cm.electionResetEvent = time.Now()
380 | cm.votedFor = cm.id
381 | cm.dlog("becomes Candidate (currentTerm=%d); log=%v", savedCurrentTerm, cm.log)
382 |
383 | votesReceived := 1 // vote for itself
384 |
385 | if len(cm.peerIds) == 0 {
386 | cm.dlog("wins election with %d votes", votesReceived)
387 | cm.startLeader()
388 | return
389 | }
390 |
391 | for _, peerId := range cm.peerIds {
392 | go func(peerId int64) {
393 | cm.mu.Lock()
394 | savedLastLogIndex, savedLastLogTerm := cm.lastLogIndexAndTerm()
395 | cm.mu.Unlock()
396 |
397 | request := &raftv1.RequestVoteRequest{
398 | Term: savedCurrentTerm,
399 | CandidateId: cm.id,
400 | LastLogIndex: int64(savedLastLogIndex),
401 | LastLogTerm: savedLastLogTerm,
402 | }
403 |
404 | cm.dlog("sending RequestVote to %d: %+v", peerId, request)
405 | cli := cm.server.peerClients[peerId]
406 | if cli == nil {
407 | return
408 | }
409 | if reply, err := cli.RequestVote(context.Background(), request); err == nil {
410 | cm.mu.Lock()
411 | defer cm.mu.Unlock()
412 | cm.dlog("received RequestVoteReply %+v", reply)
413 |
414 | if cm.state != Candidate {
415 | cm.dlog("while waiting for reply, state = %v", cm.state)
416 | return
417 | }
418 |
419 | if reply.Term > savedCurrentTerm {
420 | cm.dlog("term out of date in RequestVoteReply")
421 | cm.becomeFollower(reply.Term)
422 | return
423 | } else if reply.Term == savedCurrentTerm {
424 | if reply.VoteGranted {
425 | votesReceived += 1
426 | if votesReceived<<1 > len(cm.peerIds)+1 {
427 | cm.dlog("wins election with %d votes", votesReceived)
428 | cm.startLeader()
429 | return
430 | }
431 | }
432 | }
433 | }
434 | }(peerId)
435 | }
436 |
437 | go cm.runElectionTimer()
438 | }
439 |
440 | func (cm *ConsensusModule) startLeader() {
441 | cm.state = Leader
442 |
443 | for _, peerId := range cm.peerIds {
444 | cm.nextIndex[peerId] = len(cm.log)
445 | cm.matchIndex[peerId] = -1
446 | }
447 | cm.dlog("becomes Leader; term=%d, nextIndex=%v, matchIndex=%v; log=%v", cm.currentTerm, cm.nextIndex, cm.matchIndex, cm.log)
448 |
449 | go func() {
450 | ticker := time.NewTicker(20 * time.Millisecond)
451 | defer ticker.Stop()
452 |
453 | for {
454 | cm.leaderSendHeartbeats()
455 | <-ticker.C
456 |
457 | cm.mu.Lock()
458 | if cm.state != Leader {
459 | cm.mu.Unlock()
460 | return
461 | }
462 | cm.mu.Unlock()
463 | }
464 | }()
465 | }
466 |
467 | func (cm *ConsensusModule) leaderSendHeartbeats() {
468 | cm.mu.Lock()
469 | if cm.state != Leader {
470 | cm.mu.Unlock()
471 | return
472 | }
473 | savedCurrentTerm := cm.currentTerm
474 | cm.mu.Unlock()
475 |
476 | if len(cm.peerIds) == 0 {
477 | cm.mu.Lock()
478 | if cm.state == Leader {
479 | cm.commitIndex = len(cm.log) - 1
480 | cm.dlog("leader sets commitIndex := %d", cm.commitIndex)
481 | cm.newCommitReadyChan <- struct{}{}
482 | }
483 | cm.mu.Unlock()
484 | }
485 | for _, peerId := range cm.peerIds {
486 | go func(peerId int64) {
487 | cm.mu.Lock()
488 | ni := cm.nextIndex[peerId]
489 | prevLogIndex := ni - 1
490 | var prevLogTerm int64 = -1
491 | if prevLogIndex >= 0 {
492 | prevLogTerm = cm.log[prevLogIndex].Term
493 | }
494 | entries := cm.log[ni:]
495 |
496 | sendEntries := []*raftv1.LogEntry{}
497 | for _, e := range entries {
498 | sendEntries = append(sendEntries, &raftv1.LogEntry{Command: e.Command, Term: e.Term})
499 | }
500 | request := &raftv1.AppendEntriesRequest{
501 | Term: savedCurrentTerm,
502 | LeaderId: cm.id,
503 | PrevLogIndex: int64(prevLogIndex),
504 | PrevLogTerm: prevLogTerm,
505 | Entries: sendEntries,
506 | LeaderCommit: int64(cm.commitIndex),
507 | }
508 | cm.mu.Unlock()
509 | cm.dlog("sending AppendEntries to %v: ni=%d, args=%+v", peerId, ni, request)
510 |
511 | cli := cm.server.peerClients[peerId]
512 | if cli == nil {
513 | return
514 | }
515 | if reply, err := cli.AppendEntries(context.Background(), request); err == nil {
516 | cm.mu.Lock()
517 | defer cm.mu.Unlock()
518 |
519 | if reply.Term > savedCurrentTerm {
520 | cm.dlog("term out of date in heartbeat reply")
521 | cm.becomeFollower(reply.Term)
522 | return
523 | }
524 |
525 | if cm.state == Leader && savedCurrentTerm == reply.Term {
526 | if reply.Success {
527 | cm.nextIndex[peerId] = ni + len(entries)
528 | cm.matchIndex[peerId] = cm.nextIndex[peerId] - 1
529 | cm.dlog("AppendEntries reply from %d success: nextIndex := %v, matchIndex := %v", peerId, cm.nextIndex, cm.matchIndex)
530 |
531 | savedCommitIndex := cm.commitIndex
532 | for i := cm.commitIndex + 1; i < len(cm.log); i++ {
533 | if cm.log[i].Term == cm.currentTerm {
534 | matchCount := 1
535 | for _, peerId := range cm.peerIds {
536 | if cm.matchIndex[peerId] >= i {
537 | matchCount++
538 | }
539 | }
540 | if matchCount<<1 > len(cm.peerIds)+1 {
541 | cm.commitIndex = i
542 | }
543 | }
544 | }
545 | if cm.commitIndex != savedCommitIndex {
546 | cm.dlog("leader sets commitIndex := %d", cm.commitIndex)
547 | cm.newCommitReadyChan <- struct{}{}
548 | }
549 | } else {
550 | cm.nextIndex[peerId] = ni - 1
551 | cm.dlog("AppendEntries reply from %d !success: nextIndex := %d", peerId, ni-1)
552 | }
553 | }
554 | }
555 | }(peerId)
556 | }
557 | }
558 |
559 | func intMin(a, b int) int {
560 | if a < b {
561 | return a
562 | }
563 |
564 | return b
565 | }
566 |
--------------------------------------------------------------------------------
/internal/raft/fortest.go:
--------------------------------------------------------------------------------
1 | package raft
2 |
3 | import (
4 | "strconv"
5 | "strings"
6 | "time"
7 |
8 | "github.com/hoorayman/popple/internal/conf"
9 | )
10 |
11 | func MakeCluster(n int) []*Server {
12 | conf.Set("dev", true) // set to dev mode
13 |
14 | sl := []string{}
15 | for i := 0; i < n; i++ {
16 | sl = append(sl, strconv.Itoa(i))
17 | }
18 | cluster := strings.Join(sl, ",")
19 |
20 | ready := make(chan struct{})
21 | svcs := make([]*Server, n)
22 | for i := 0; i < n; i++ {
23 | svcs[i] = NewServer(int64(i), cluster, ready)
24 | go svcs[i].Serve()
25 | }
26 | sidAddr := WaitSettingAddr(svcs)
27 | for i := 0; i < n; i++ {
28 | svcs[i].SetServerAddr(sidAddr)
29 | }
30 | for i := 0; i < n; i++ {
31 | svcs[i].WaitConnectToPeers()
32 | }
33 | close(ready)
34 |
35 | return svcs
36 | }
37 |
38 | func WaitSettingAddr(svcs []*Server) map[int64]string {
39 | ticker := time.NewTicker(1 * time.Second)
40 | defer ticker.Stop()
41 |
42 | sidAddr := make(map[int64]string)
43 | for {
44 | <-ticker.C
45 |
46 | for sid, server := range svcs {
47 | sidAddr[int64(sid)] = server.GetListenAddr()
48 | }
49 |
50 | ready := true
51 | for sid := range svcs {
52 | if sidAddr[int64(sid)] == "" {
53 | ready = false
54 | }
55 | }
56 | if ready {
57 | return sidAddr
58 | }
59 | }
60 | }
61 |
62 | // count leader
63 | func CheckLeader(svcs []*Server) int {
64 | n := 0
65 | for _, server := range svcs {
66 | _, _, leader := server.cm.Report()
67 | if leader {
68 | n++
69 | }
70 | }
71 |
72 | return n
73 | }
74 |
75 | // get leader id
76 | func GetLeader(svcs []*Server) int {
77 | for i, server := range svcs {
78 | _, _, leader := server.cm.Report()
79 | if leader {
80 | return i
81 | }
82 | }
83 |
84 | return -1
85 | }
86 |
--------------------------------------------------------------------------------
/internal/raft/raft_test.go:
--------------------------------------------------------------------------------
1 | package raft
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | "testing"
7 | "time"
8 |
9 | "github.com/hoorayman/popple/internal/statemachine"
10 | )
11 |
12 | func TestLeader(t *testing.T) {
13 | tests := []struct {
14 | name string
15 | args int
16 | want int
17 | }{
18 | {
19 | name: "1 node",
20 | args: 1,
21 | want: 1,
22 | },
23 | {
24 | name: "3 nodes",
25 | args: 3,
26 | want: 1,
27 | },
28 | {
29 | name: "5 nodes",
30 | args: 5,
31 | want: 1,
32 | },
33 | }
34 |
35 | for _, tt := range tests {
36 | t.Run(tt.name, func(t *testing.T) {
37 | svcs := MakeCluster(tt.args)
38 | time.Sleep(3 * time.Second)
39 | n := CheckLeader(svcs)
40 | fmt.Println("leader count: ", n)
41 | if n != tt.want {
42 | t.Errorf("leader must be elected and be one!")
43 | }
44 | })
45 | }
46 | }
47 |
48 | func TestSubmit(t *testing.T) {
49 | tests := []struct {
50 | name string
51 | nodes int
52 | args []statemachine.Cmd
53 | want struct{ Key, Val string }
54 | }{
55 | {
56 | name: "1 node",
57 | nodes: 1,
58 | args: []statemachine.Cmd{
59 | {
60 | Op: "set",
61 | Key: "mykey",
62 | Value: "",
63 | },
64 | {
65 | Op: "set",
66 | Key: "mykey",
67 | Value: "val",
68 | },
69 | {
70 | Op: "set",
71 | Key: "mykey",
72 | Value: "value",
73 | },
74 | },
75 | want: struct{ Key, Val string }{Key: "mykey", Val: "value"},
76 | },
77 | {
78 | name: "3 nodes",
79 | nodes: 3,
80 | args: []statemachine.Cmd{
81 | {
82 | Op: "set",
83 | Key: "mykey",
84 | Value: "",
85 | },
86 | {
87 | Op: "set",
88 | Key: "mykey",
89 | Value: "val",
90 | },
91 | {
92 | Op: "set",
93 | Key: "mykey",
94 | Value: "value",
95 | },
96 | },
97 | want: struct{ Key, Val string }{Key: "mykey", Val: "value"},
98 | },
99 | {
100 | name: "5 nodes",
101 | nodes: 5,
102 | args: []statemachine.Cmd{
103 | {
104 | Op: "set",
105 | Key: "mykey",
106 | Value: "",
107 | },
108 | {
109 | Op: "set",
110 | Key: "mykey",
111 | Value: "val",
112 | },
113 | {
114 | Op: "set",
115 | Key: "mykey",
116 | Value: "value",
117 | },
118 | },
119 | want: struct{ Key, Val string }{Key: "mykey", Val: "value"},
120 | },
121 | }
122 |
123 | for _, tt := range tests {
124 | t.Run(tt.name, func(t *testing.T) {
125 | svcs := MakeCluster(tt.nodes)
126 | time.Sleep(3 * time.Second)
127 | leaderID := GetLeader(svcs)
128 | for _, cmd := range tt.args {
129 | payload, _ := json.Marshal(cmd)
130 | svcs[leaderID].cm.Submit(payload)
131 | }
132 | time.Sleep(3 * time.Second)
133 | for i, server := range svcs {
134 | val, _ := server.cm.stateMachine.Get(tt.want.Key)
135 | if val != tt.want.Val {
136 | t.Errorf("server[%d] fsm state error. key: %s with value: %s but not wanted: %s", i, tt.want.Key, val, tt.want.Val)
137 | }
138 | }
139 | })
140 | }
141 | }
142 |
143 | func TestLeaderDown(t *testing.T) {
144 | tests := []struct {
145 | name string
146 | args int
147 | want int
148 | }{
149 | {
150 | name: "1 node",
151 | args: 1,
152 | want: 0,
153 | },
154 | {
155 | name: "3 nodes",
156 | args: 3,
157 | want: 1,
158 | },
159 | {
160 | name: "5 nodes",
161 | args: 5,
162 | want: 1,
163 | },
164 | }
165 |
166 | for _, tt := range tests {
167 | t.Run(tt.name, func(t *testing.T) {
168 | svcs := MakeCluster(tt.args)
169 | time.Sleep(3 * time.Second)
170 | leaderID := GetLeader(svcs)
171 | _, term, _ := svcs[leaderID].cm.Report()
172 | svcs[leaderID].Shutdown()
173 | for _, server := range svcs {
174 | server.DisconnectPeer(int64(leaderID))
175 | }
176 | time.Sleep(3 * time.Second)
177 | newLeaderID := GetLeader(svcs)
178 | var newTerm int64 = -1
179 | if tt.args > 1 {
180 | _, newTerm, _ = svcs[newLeaderID].cm.Report()
181 | }
182 | if (newLeaderID == -1 || newLeaderID == leaderID || newTerm <= term) && tt.want != 0 {
183 | t.Errorf("no new leader elected after old leader down!")
184 | }
185 | })
186 | }
187 | }
188 |
189 | func TestNetworkPartition(t *testing.T) {
190 | tests := []struct {
191 | name string
192 | args int
193 | partition int
194 | }{
195 | {
196 | name: "3 nodes",
197 | args: 3,
198 | partition: 1,
199 | },
200 | {
201 | name: "5 nodes",
202 | args: 5,
203 | partition: 2,
204 | },
205 | }
206 |
207 | for _, tt := range tests {
208 | t.Run(tt.name, func(t *testing.T) {
209 | svcs := MakeCluster(tt.args)
210 | time.Sleep(3 * time.Second)
211 | leaderID := GetLeader(svcs)
212 | _, term, _ := svcs[leaderID].cm.Report()
213 |
214 | count := 0
215 | for _, id := range svcs[leaderID].peerIds {
216 | if count < tt.partition {
217 | svcs[leaderID].DisconnectPeer(id)
218 | }
219 | count++
220 | }
221 | time.Sleep(3 * time.Second)
222 | newLeaderID := GetLeader(svcs)
223 | _, newTerm, _ := svcs[newLeaderID].cm.Report()
224 | if newLeaderID == leaderID || newTerm == term {
225 | t.Errorf("leader not exchange after network partition!")
226 | }
227 | })
228 | }
229 | }
230 |
231 | func TestFollowerDown(t *testing.T) {
232 | tests := []struct {
233 | name string
234 | args int
235 | down int
236 | }{
237 | {
238 | name: "3 nodes",
239 | args: 3,
240 | down: 1,
241 | },
242 | {
243 | name: "5 nodes",
244 | args: 5,
245 | down: 2,
246 | },
247 | }
248 |
249 | for _, tt := range tests {
250 | t.Run(tt.name, func(t *testing.T) {
251 | svcs := MakeCluster(tt.args)
252 | time.Sleep(3 * time.Second)
253 | leaderID := GetLeader(svcs)
254 | _, term, _ := svcs[leaderID].cm.Report()
255 |
256 | count := 0
257 | for _, id := range svcs[leaderID].peerIds {
258 | if count < tt.down {
259 | svcs[id].Shutdown()
260 | svcs[leaderID].DisconnectPeer(id)
261 | }
262 | count++
263 | }
264 | time.Sleep(3 * time.Second)
265 | newLeaderID := GetLeader(svcs)
266 | _, newTerm, _ := svcs[newLeaderID].cm.Report()
267 | if newLeaderID != leaderID || newTerm != term {
268 | t.Errorf("leader not work after follower down!")
269 | }
270 | })
271 | }
272 | }
273 |
--------------------------------------------------------------------------------
/internal/raft/server.go:
--------------------------------------------------------------------------------
1 | package raft
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "fmt"
7 | "log"
8 | "math/rand"
9 | "net"
10 | "net/http"
11 | "os"
12 | "os/signal"
13 | "strconv"
14 | "strings"
15 | "sync"
16 | "syscall"
17 | "time"
18 |
19 | "google.golang.org/grpc"
20 | "google.golang.org/grpc/credentials/insecure"
21 |
22 | "github.com/gin-gonic/gin"
23 | "github.com/soheilhy/cmux"
24 | "golang.org/x/sync/errgroup"
25 |
26 | "github.com/hoorayman/popple/internal/conf"
27 | raftv1 "github.com/hoorayman/popple/internal/proto/raft/v1"
28 | "github.com/hoorayman/popple/internal/statemachine"
29 | )
30 |
31 | type Server struct {
32 | mu sync.Mutex
33 |
34 | serverId int64
35 | peerIds []int64
36 |
37 | cm *ConsensusModule
38 | rpcProxy *RPCProxy
39 |
40 | rpcServer *grpc.Server
41 | listener net.Listener
42 |
43 | peerClients map[int64]raftv1.RaftServiceClient
44 |
45 | sidAddr map[int64]string
46 |
47 | ready <-chan struct{}
48 | quit chan struct{}
49 | }
50 |
51 | func NewServer(sid int64, cluster string, ready <-chan struct{}) *Server {
52 | s := new(Server)
53 | s.serverId = sid
54 | s.peerClients = make(map[int64]raftv1.RaftServiceClient)
55 | s.ready = ready
56 | s.quit = make(chan struct{})
57 | sidAddr, err := GetClusterIdAddr(cluster)
58 | if err != nil {
59 | log.Fatal("Parse --cluster fail: ", err)
60 | }
61 | s.sidAddr = sidAddr
62 | for sid := range s.sidAddr {
63 | if sid != s.serverId {
64 | s.peerIds = append(s.peerIds, sid)
65 | }
66 | }
67 |
68 | return s
69 | }
70 |
71 | func (s *Server) Serve() {
72 | s.mu.Lock()
73 | s.cm = NewConsensusModule(s.serverId, s.peerIds, s, s.ready)
74 |
75 | s.rpcServer = grpc.NewServer()
76 | s.rpcProxy = &RPCProxy{s.cm}
77 | raftv1.RegisterRaftServiceServer(s.rpcServer, s.rpcProxy)
78 |
79 | var err error
80 | s.listener, err = net.Listen("tcp", s.sidAddr[s.serverId])
81 | if err != nil {
82 | log.Fatalf("[%v] failed to listen: %s", s.serverId, err)
83 | }
84 | log.Printf("server[%v] listening at %s", s.serverId, s.listener.Addr())
85 | s.mu.Unlock()
86 |
87 | c := make(chan os.Signal)
88 | signal.Notify(c, os.Interrupt, syscall.SIGTERM)
89 | go func() {
90 | select {
91 | case <-s.quit:
92 | log.Printf("server[%v] quit", s.serverId)
93 | case <-c:
94 | s.listener.Close()
95 | os.Exit(1)
96 | }
97 | }()
98 |
99 | m := cmux.New(s.listener)
100 | grpcListener := m.MatchWithWriters(cmux.HTTP2MatchHeaderFieldSendSettings("content-type", "application/grpc"))
101 | httpListener := m.Match(cmux.Any())
102 |
103 | // http server
104 | if conf.GetBool("dev") {
105 | gin.SetMode(gin.DebugMode)
106 | } else {
107 | gin.SetMode(gin.ReleaseMode)
108 | }
109 | r := gin.New()
110 | r.Use(gin.Recovery())
111 | r.POST("/submit", func(c *gin.Context) {
112 | var req statemachine.Cmd
113 | if err := c.ShouldBindJSON(&req); err != nil {
114 | c.JSON(http.StatusBadRequest, gin.H{"code": -1, "msg": err.Error()})
115 | return
116 | }
117 |
118 | data, err := json.Marshal(req)
119 | if err != nil {
120 | c.JSON(http.StatusBadRequest, gin.H{"code": -1, "msg": err.Error()})
121 | return
122 | }
123 | if s.cm.Submit(data) {
124 | c.JSON(http.StatusOK, gin.H{"code": 0, "msg": "success"})
125 | return
126 | } else {
127 | c.JSON(http.StatusInternalServerError, gin.H{"code": -2, "msg": "not accepted"})
128 | return
129 | }
130 | })
131 | r.GET("/fetch/:key", func(c *gin.Context) {
132 | key := c.Param("key")
133 | val, err := s.cm.stateMachine.Get(key)
134 | if err != nil {
135 | c.JSON(http.StatusInternalServerError, gin.H{"code": -2, "msg": err.Error()})
136 | return
137 | }
138 |
139 | c.JSON(http.StatusOK, gin.H{"code": 0, "data": val})
140 | })
141 |
142 | g := errgroup.Group{}
143 | g.Go(func() error {
144 | return s.rpcServer.Serve(grpcListener)
145 | })
146 | g.Go(func() error {
147 | return http.Serve(httpListener, r)
148 | })
149 | g.Go(func() error {
150 | return m.Serve()
151 | })
152 |
153 | err = g.Wait()
154 | if err != nil {
155 | log.Fatalf("failed to serve: %s", err)
156 | }
157 | }
158 |
159 | func (s *Server) DisconnectAll() {
160 | s.mu.Lock()
161 | defer s.mu.Unlock()
162 | for id := range s.peerClients {
163 | if s.peerClients[id] != nil {
164 | s.peerClients[id] = nil
165 | }
166 | }
167 | }
168 |
169 | func (s *Server) Shutdown() {
170 | s.cm.Stop()
171 | close(s.quit)
172 | }
173 |
174 | func (s *Server) GetListenAddr() string {
175 | s.mu.Lock()
176 | defer s.mu.Unlock()
177 | addr := s.listener.Addr()
178 | if addr != nil {
179 | return addr.String()
180 | }
181 |
182 | return ""
183 | }
184 |
185 | func (s *Server) SetServerAddr(sidAddr map[int64]string) {
186 | s.mu.Lock()
187 | defer s.mu.Unlock()
188 | s.sidAddr = sidAddr
189 | }
190 |
191 | func (s *Server) ConnectToPeer(peerId int64, addr string) error {
192 | s.mu.Lock()
193 | defer s.mu.Unlock()
194 | if s.peerClients[peerId] == nil {
195 | conn, err := grpc.Dial(addr, grpc.WithTransportCredentials(insecure.NewCredentials()))
196 | if err != nil {
197 | return err
198 | }
199 | s.peerClients[peerId] = raftv1.NewRaftServiceClient(conn)
200 | }
201 |
202 | return nil
203 | }
204 |
205 | func (s *Server) DisconnectPeer(peerId int64) error {
206 | s.mu.Lock()
207 | defer s.mu.Unlock()
208 | if s.peerClients[peerId] != nil {
209 | s.peerClients[peerId] = nil
210 | }
211 |
212 | return nil
213 | }
214 |
215 | func (s *Server) WaitConnectToPeers() {
216 | ticker := time.NewTicker(1 * time.Second)
217 | defer ticker.Stop()
218 |
219 | for {
220 | <-ticker.C
221 |
222 | for _, sid := range s.peerIds {
223 | if s.peerClients[sid] == nil {
224 | s.ConnectToPeer(sid, s.sidAddr[sid])
225 | }
226 | }
227 |
228 | ready := true
229 | for _, sid := range s.peerIds {
230 | if s.peerClients[sid] == nil {
231 | ready = false
232 | }
233 | }
234 | if ready {
235 | return
236 | }
237 | }
238 | }
239 |
240 | type RPCProxy struct {
241 | cm *ConsensusModule
242 | }
243 |
244 | func (rpp *RPCProxy) RequestVote(ctx context.Context, request *raftv1.RequestVoteRequest) (*raftv1.RequestVoteResponse, error) {
245 | if len(os.Getenv("RAFT_UNRELIABLE_RPC")) > 0 {
246 | dice := rand.Intn(10)
247 | if dice == 9 {
248 | rpp.cm.dlog("drop RequestVote")
249 | return nil, fmt.Errorf("RPC failed")
250 | } else if dice == 8 {
251 | rpp.cm.dlog("delay RequestVote")
252 | time.Sleep(75 * time.Millisecond)
253 | }
254 | }
255 |
256 | return rpp.cm.RequestVote(ctx, request)
257 | }
258 |
259 | func (rpp *RPCProxy) AppendEntries(ctx context.Context, request *raftv1.AppendEntriesRequest) (*raftv1.AppendEntriesResponse, error) {
260 | if len(os.Getenv("RAFT_UNRELIABLE_RPC")) > 0 {
261 | dice := rand.Intn(10)
262 | if dice == 9 {
263 | rpp.cm.dlog("drop AppendEntries")
264 | return nil, fmt.Errorf("RPC failed")
265 | } else if dice == 8 {
266 | rpp.cm.dlog("delay AppendEntries")
267 | time.Sleep(75 * time.Millisecond)
268 | }
269 | }
270 |
271 | return rpp.cm.AppendEntries(ctx, request)
272 | }
273 |
274 | func GetClusterIdAddr(cluster string) (map[int64]string, error) {
275 | result := make(map[int64]string)
276 |
277 | servers := strings.Split(cluster, ",")
278 | for _, sv := range servers {
279 | idAddr := strings.Split(sv, "=")
280 | if idAddr[0] == "" {
281 | continue
282 | }
283 | i, err := strconv.Atoi(idAddr[0])
284 | if err != nil {
285 | return nil, err
286 | }
287 | if len(idAddr) >= 2 {
288 | result[int64(i)] = idAddr[1]
289 | } else {
290 | result[int64(i)] = ""
291 | }
292 | }
293 |
294 | return result, nil
295 | }
296 |
--------------------------------------------------------------------------------
/internal/raft/storage.go:
--------------------------------------------------------------------------------
1 | package raft
2 |
3 | import (
4 | "bytes"
5 | "io"
6 | "log"
7 | "os"
8 | "path/filepath"
9 | "unsafe"
10 |
11 | "github.com/fxamacker/cbor/v2"
12 | "golang.org/x/sys/unix"
13 |
14 | "github.com/hoorayman/popple/internal/conf"
15 | )
16 |
17 | const (
18 | raftLogFileName = "rlog"
19 | defaultAllocSize = 1024 * 1024 // 1M
20 | )
21 |
22 | type RaftPersistent struct {
23 | fd int
24 | file *os.File
25 | data []byte
26 | used *int64
27 | term *int64
28 | votedfor *int64
29 | }
30 |
31 | func (rp *RaftPersistent) InitAndLoadLog(dest *[]LogEntry) error {
32 | file, err := os.OpenFile(filepath.Join(conf.GetString("data-dir"), raftLogFileName), os.O_CREATE|os.O_RDWR, 0644)
33 | if err != nil {
34 | return err
35 | }
36 | rp.file = file
37 | rp.fd = int(file.Fd())
38 |
39 | info, err := file.Stat()
40 | if err != nil {
41 | return err
42 | }
43 | size := info.Size()
44 | newFile := false
45 | if size == 0 {
46 | newFile = true
47 | err := file.Truncate(defaultAllocSize)
48 | if err != nil {
49 | return err
50 | }
51 | size = defaultAllocSize
52 | }
53 |
54 | data, err := unix.Mmap(int(file.Fd()), 0, int(size), unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED)
55 | if err != nil {
56 | return err
57 | }
58 | rp.data = data
59 | rp.used = (*int64)(unsafe.Pointer(&data[0]))
60 | rp.term = (*int64)(unsafe.Pointer(&data[8]))
61 | rp.votedfor = (*int64)(unsafe.Pointer(&data[16]))
62 | if newFile {
63 | *rp.used = 24
64 | *rp.votedfor = -1
65 | }
66 |
67 | buf := bytes.NewReader(data[24:(*rp.used)])
68 | g := cbor.NewDecoder(buf)
69 | for {
70 | var x LogEntry
71 | err = g.Decode(&x)
72 | if err != nil {
73 | break
74 | }
75 | *dest = append(*dest, LogEntry{Command: x.Command, Term: x.Term})
76 | }
77 | if err != io.EOF {
78 | log.Fatal(err)
79 | }
80 |
81 | return nil
82 | }
83 |
84 | func (rp *RaftPersistent) AppendLog(rollback, entries []LogEntry) error {
85 | rbuf := bytes.NewBuffer(nil)
86 | rolle := cbor.NewEncoder(rbuf)
87 | for _, entry := range rollback {
88 | err := rolle.Encode(entry)
89 | if err != nil {
90 | return err
91 | }
92 | }
93 | *rp.used -= int64(rbuf.Len())
94 |
95 | if *rp.used < 24 {
96 | *rp.used = 24
97 | }
98 | currentFileSize := *rp.used
99 | buf := bytes.NewBuffer(nil)
100 | g := cbor.NewEncoder(buf)
101 | for _, entry := range entries {
102 | err := g.Encode(entry)
103 | if err != nil {
104 | return err
105 | }
106 | }
107 |
108 | if currentFileSize+int64(buf.Len()) > int64(len(rp.data)) {
109 | newSize := currentFileSize + int64(buf.Len()) + defaultAllocSize
110 | err := rp.file.Truncate(newSize)
111 | if err != nil {
112 | return err
113 | }
114 |
115 | unix.Munmap(rp.data)
116 | data, err := unix.Mmap(rp.fd, 0, int(newSize), unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED)
117 | if err != nil {
118 | return err
119 | }
120 | rp.data = data
121 | rp.used = (*int64)(unsafe.Pointer(&data[0]))
122 | rp.term = (*int64)(unsafe.Pointer(&data[8]))
123 | rp.votedfor = (*int64)(unsafe.Pointer(&data[16]))
124 | }
125 |
126 | copy(rp.data[currentFileSize:], buf.Bytes())
127 | newUsedSize := currentFileSize + int64(buf.Len())
128 | *rp.used = newUsedSize
129 |
130 | // err := unix.Msync(rp.data, unix.MS_SYNC)
131 | // if err != nil {
132 | // return err
133 | // }
134 |
135 | return nil
136 | }
137 |
138 | func (rp *RaftPersistent) LogLen() int64 {
139 | return *rp.used
140 | }
141 |
142 | func (rp *RaftPersistent) SetTerm(term int64) error {
143 | *rp.term = term
144 | // return unix.Msync(rp.data, unix.MS_SYNC)
145 | return nil
146 | }
147 |
148 | func (rp *RaftPersistent) GetTerm() int64 {
149 | return *rp.term
150 | }
151 |
152 | func (rp *RaftPersistent) SetVotedFor(votefor int64) error {
153 | *rp.votedfor = votefor
154 | // return unix.Msync(rp.data, unix.MS_SYNC)
155 | return nil
156 | }
157 |
158 | func (rp *RaftPersistent) GetVotedFor() int64 {
159 | return *rp.votedfor
160 | }
161 |
--------------------------------------------------------------------------------
/internal/statemachine/db.go:
--------------------------------------------------------------------------------
1 | package statemachine
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | "path/filepath"
7 |
8 | "github.com/tidwall/buntdb"
9 |
10 | "github.com/hoorayman/popple/internal/conf"
11 | )
12 |
13 | const (
14 | defaultKVDBFileName = "data"
15 | LastAppliedKey = "kvdbLastApplied"
16 | )
17 |
18 | type KVDB struct {
19 | db *buntdb.DB
20 | }
21 |
22 | func NewKVDB() (IKVDB, error) {
23 | var db *buntdb.DB
24 |
25 | if conf.GetBool("dev") {
26 | b, err := buntdb.Open(":memory:")
27 | if err != nil {
28 | return nil, err
29 | }
30 | db = b
31 | } else {
32 | b, err := buntdb.Open(filepath.Join(conf.GetString("data-dir"), defaultKVDBFileName))
33 | if err != nil {
34 | return nil, err
35 | }
36 | db = b
37 | }
38 |
39 | return &KVDB{db: db}, nil
40 | }
41 |
42 | func (k *KVDB) Set(key, value string) error {
43 | return k.db.Update(func(tx *buntdb.Tx) error {
44 | _, _, err := tx.Set(key, value, nil)
45 | return err
46 | })
47 | }
48 |
49 | func (k *KVDB) Get(key string) (string, error) {
50 | result := ""
51 | err := k.db.View(func(tx *buntdb.Tx) error {
52 | val, err := tx.Get(key)
53 | if err != nil {
54 | return err
55 | }
56 | result = val
57 |
58 | return nil
59 | })
60 |
61 | return result, err
62 | }
63 |
64 | func (k *KVDB) Delete(key string) error {
65 | return k.db.Update(func(tx *buntdb.Tx) error {
66 | _, err := tx.Delete(key)
67 | return err
68 | })
69 | }
70 |
71 | type Cmd struct {
72 | Op string `json:"op" binding:"required"`
73 | Key string `json:"key"`
74 | Value string `json:"value"`
75 | }
76 |
77 | var SupportOps map[string]struct{} = map[string]struct{}{"set": {}, "del": {}}
78 |
79 | func (k *KVDB) Call(command []byte) error {
80 | cmd := Cmd{}
81 | err := json.Unmarshal(command, &cmd)
82 | if err != nil {
83 | return err
84 | }
85 |
86 | if _, ok := SupportOps[cmd.Op]; !ok {
87 | return fmt.Errorf("unsupported operation")
88 | }
89 | if cmd.Op == "set" {
90 | return k.Set(cmd.Key, cmd.Value)
91 | }
92 | if cmd.Op == "del" {
93 | return k.Delete(cmd.Key)
94 | }
95 |
96 | return nil
97 | }
98 |
99 | func (k *KVDB) CommandCheck(command []byte) bool {
100 | cmd := Cmd{}
101 | err := json.Unmarshal(command, &cmd)
102 | if err != nil {
103 | return false
104 | }
105 |
106 | if _, ok := SupportOps[cmd.Op]; !ok {
107 | return false
108 | }
109 |
110 | return true
111 | }
112 |
--------------------------------------------------------------------------------
/internal/statemachine/interface.go:
--------------------------------------------------------------------------------
1 | package statemachine
2 |
3 | type IKVDB interface {
4 | Set(key, value string) error
5 | Get(key string) (string, error)
6 | Delete(key string) error
7 | Call(command []byte) error
8 | CommandCheck(command []byte) bool
9 | }
10 |
--------------------------------------------------------------------------------
/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hoorayman/popple/8e51596a375cb3284defbe6e6468dc18022d4886/logo.png
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "math/rand"
5 | "time"
6 |
7 | "github.com/hoorayman/popple/cmd"
8 | )
9 |
10 | func main() {
11 | rand.Seed(time.Now().UnixNano())
12 | cmd.Execute()
13 | }
14 |
--------------------------------------------------------------------------------
/openapi/raft/v1/raft.swagger.json:
--------------------------------------------------------------------------------
1 | {
2 | "swagger": "2.0",
3 | "info": {
4 | "title": "raft/v1/raft.proto",
5 | "version": "version not set"
6 | },
7 | "tags": [
8 | {
9 | "name": "RaftService"
10 | }
11 | ],
12 | "consumes": [
13 | "application/json"
14 | ],
15 | "produces": [
16 | "application/json"
17 | ],
18 | "paths": {},
19 | "definitions": {
20 | "protobufAny": {
21 | "type": "object",
22 | "properties": {
23 | "type_url": {
24 | "type": "string"
25 | },
26 | "value": {
27 | "type": "string",
28 | "format": "byte"
29 | }
30 | }
31 | },
32 | "rpcStatus": {
33 | "type": "object",
34 | "properties": {
35 | "code": {
36 | "type": "integer",
37 | "format": "int32"
38 | },
39 | "message": {
40 | "type": "string"
41 | },
42 | "details": {
43 | "type": "array",
44 | "items": {
45 | "$ref": "#/definitions/protobufAny"
46 | }
47 | }
48 | }
49 | },
50 | "v1AppendEntriesResponse": {
51 | "type": "object",
52 | "properties": {
53 | "term": {
54 | "type": "string",
55 | "format": "int64",
56 | "description": "term number",
57 | "title": "currentTerm, for leader to update itself"
58 | },
59 | "success": {
60 | "type": "boolean",
61 | "description": "success",
62 | "title": "true if follower contained entry matching prevLogIndex and prevLogTerm"
63 | }
64 | },
65 | "title": "The response body for AppendEntries"
66 | },
67 | "v1LogEntry": {
68 | "type": "object",
69 | "properties": {
70 | "command": {
71 | "type": "string",
72 | "format": "byte",
73 | "description": "command",
74 | "title": "command"
75 | },
76 | "term": {
77 | "type": "string",
78 | "format": "int64",
79 | "description": "term number",
80 | "title": "term"
81 | }
82 | },
83 | "title": "LogEntry"
84 | },
85 | "v1RequestVoteResponse": {
86 | "type": "object",
87 | "properties": {
88 | "term": {
89 | "type": "string",
90 | "format": "int64",
91 | "description": "term number",
92 | "title": "currentTerm, for candidate to update itself"
93 | },
94 | "vote_granted": {
95 | "type": "boolean",
96 | "description": "vote granted",
97 | "title": "true means candidate received vote"
98 | }
99 | },
100 | "title": "The response body for RequestVote"
101 | }
102 | }
103 | }
104 |
--------------------------------------------------------------------------------
/proto/buf.lock:
--------------------------------------------------------------------------------
1 | # Generated by buf. DO NOT EDIT.
2 | version: v1
3 | deps:
4 | - remote: buf.build
5 | owner: googleapis
6 | repository: googleapis
7 | commit: 75b4300737fb4efca0831636be94e517
8 | - remote: buf.build
9 | owner: grpc-ecosystem
10 | repository: grpc-gateway
11 | commit: a1ecdc58eccd49aa8bea2a7a9022dc27
12 |
--------------------------------------------------------------------------------
/proto/buf.yaml:
--------------------------------------------------------------------------------
1 | version: v1
2 | name: buf.build/hoorayman/grpc-proto
3 | deps:
4 | - buf.build/googleapis/googleapis
5 | - buf.build/grpc-ecosystem/grpc-gateway
6 | breaking:
7 | use:
8 | - FILE
9 | lint:
10 | use:
11 | - DEFAULT
12 | - COMMENTS
13 | rpc_allow_google_protobuf_empty_requests: true
14 | rpc_allow_google_protobuf_empty_responses: true
15 | allow_comment_ignores: true
16 |
--------------------------------------------------------------------------------
/proto/raft/v1/raft.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | package raft.v1;
4 | option go_package = "github.com/hoorayman/popple/proto/raft/v1";
5 |
6 | import "protoc-gen-openapiv2/options/annotations.proto";
7 |
8 | // RaftService APIs
9 | service RaftService {
10 | // Invoked by candidates to gather votes
11 | rpc RequestVote(RequestVoteRequest) returns (RequestVoteResponse);
12 | // Invoked by leader to replicate log entries; also used as heartbeat
13 | rpc AppendEntries(AppendEntriesRequest) returns (AppendEntriesResponse);
14 | }
15 |
16 | // The request payload for RequestVote
17 | message RequestVoteRequest {
18 | // candidate’s term
19 | int64 term = 1 [(grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
20 | description: "term number",
21 | }];
22 | // candidate requesting vote
23 | int64 candidate_id = 2 [(grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
24 | description: "candidate id",
25 | }];
26 | // index of candidate’s last log entry
27 | int64 last_log_index = 3 [(grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
28 | description: "last log index",
29 | }];
30 | // term of candidate’s last log entry
31 | int64 last_log_term = 4 [(grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
32 | description: "last log term",
33 | }];
34 | }
35 |
36 | // The response body for RequestVote
37 | message RequestVoteResponse {
38 | // currentTerm, for candidate to update itself
39 | int64 term = 1 [(grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
40 | description: "term number",
41 | }];
42 | // true means candidate received vote
43 | bool vote_granted = 2 [(grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
44 | description: "vote granted",
45 | }];
46 | }
47 |
48 | // The request payload for AppendEntries
49 | message AppendEntriesRequest{
50 | // leader’s term
51 | int64 term = 1 [(grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
52 | description: "term number",
53 | }];
54 | // so follower can redirect clients
55 | int64 leader_id = 2 [(grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
56 | description: "leader id",
57 | }];
58 | // index of log entry immediately preceding new ones
59 | int64 prev_log_index = 3 [(grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
60 | description: "prev log index",
61 | }];
62 | // term of prevLogIndex entry
63 | int64 prev_log_term = 4 [(grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
64 | description: "prev log term",
65 | }];
66 | // log entries to store (empty for heartbeat; may send more than one for efficiency)
67 | repeated LogEntry entries = 5 [(grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
68 | description: "log entries",
69 | }];
70 | // leader’s commitIndex
71 | int64 leader_commit = 6 [(grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
72 | description: "leader commit",
73 | }];
74 | }
75 |
76 | // LogEntry
77 | message LogEntry {
78 | // command
79 | bytes command = 1 [(grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
80 | description: "command",
81 | }];
82 | // term
83 | int64 term = 2 [(grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
84 | description: "term number",
85 | }];
86 | }
87 |
88 | // The response body for AppendEntries
89 | message AppendEntriesResponse {
90 | // currentTerm, for leader to update itself
91 | int64 term = 1 [(grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
92 | description: "term number",
93 | }];
94 | // true if follower contained entry matching prevLogIndex and prevLogTerm
95 | bool success = 2 [(grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
96 | description: "success",
97 | }];
98 | }
99 |
--------------------------------------------------------------------------------
/scripts/grpc.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | #buf mod update
4 | buf lint
5 | buf breaking --against .git
6 | buf generate
7 |
--------------------------------------------------------------------------------
/scripts/init.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | git init
4 | cp ./githooks/pre-commit .git/hooks/pre-commit
5 | chmod 777 .git/hooks/pre-commit
6 | echo 'git hooks init done.'
7 | exit 0
--------------------------------------------------------------------------------