├── .gitignore ├── LICENSE ├── README.md ├── client ├── alpha.go ├── feedback_server.go └── user_client.go ├── proto ├── client │ ├── client.pb.go │ └── client.proto └── server │ ├── server.pb.go │ └── server.proto ├── sctripts └── refresh_proto.sh ├── server ├── alpha.go ├── bftraft.go ├── clients.go ├── config.go ├── consensus.go ├── conshash.go ├── group.go ├── hosts.go ├── log_entries.go ├── membership.go ├── observer.go ├── peers.go ├── server_test.go ├── store.go ├── time_wheel.go └── vote.go ├── test ├── server1.json ├── server2.json ├── server3.json ├── server4.json ├── server5.json ├── server6.json ├── server7.json └── testserver.go └── utils ├── alpha.go ├── conns.go ├── consensus.go ├── encoding.go ├── rpcs.go ├── shares.go ├── signature.go └── utils_test.go /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.dll 4 | *.so 5 | *.dylib 6 | 7 | # Test binary, build with `go test -c` 8 | *.test 9 | 10 | # Output of the go coverage tool, specifically when used with LiteIDE 11 | *.out 12 | 13 | # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 14 | .glide/ 15 | 16 | .idea 17 | BFTRaft.go.iml 18 | server/test_data 19 | test/test_data 20 | test/test -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Hao Shi 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # BFTRaft.go 2 | Byzantine fault tolerance raft state machine 3 | -------------------------------------------------------------------------------- /client/alpha.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | spb "github.com/PomeloCloud/BFTRaft4go/proto/server" 5 | "github.com/PomeloCloud/BFTRaft4go/utils" 6 | "log" 7 | "sync" 8 | "time" 9 | ) 10 | 11 | type AlphaRPCsCache struct { 12 | lock sync.Mutex 13 | bootstraps []string 14 | rpcs []*spb.BFTRaftClient 15 | lastCheck time.Time 16 | } 17 | 18 | func (arc *AlphaRPCsCache) ResetBootstrap(addrs []string) { 19 | arc.bootstraps = addrs 20 | } 21 | 22 | func (arc *AlphaRPCsCache) Get() []*spb.BFTRaftClient { 23 | arc.lock.Lock() 24 | defer arc.lock.Unlock() 25 | if len(arc.rpcs) < 1 || time.Now().After(arc.lastCheck.Add(5*time.Minute)) { 26 | nodes := utils.AlphaNodes(arc.bootstraps) 27 | arc.rpcs = []*spb.BFTRaftClient{} 28 | bootstraps := []string{} 29 | if len(nodes) == 0 { 30 | log.Println("thre is no alpha nodes found") 31 | } 32 | for _, node := range nodes { 33 | if c, err := utils.GetClusterRPC(node.ServerAddr); err == nil { 34 | arc.rpcs = append(arc.rpcs, &c) 35 | bootstraps = append(bootstraps, node.ServerAddr) 36 | } else { 37 | log.Println("cannot get cluster rpc for getting alpha nodes:", err) 38 | } 39 | } 40 | if len(bootstraps) > 0 { 41 | arc.ResetBootstrap(bootstraps) 42 | } 43 | arc.lastCheck = time.Now() 44 | log.Println("alpha nodes refreshed:", len(arc.rpcs)) 45 | } 46 | return arc.rpcs 47 | } 48 | 49 | func NewAlphaRPCsCache(bootstraps []string) AlphaRPCsCache { 50 | return AlphaRPCsCache{ 51 | lock: sync.Mutex{}, 52 | bootstraps: bootstraps, 53 | rpcs: []*spb.BFTRaftClient{}, 54 | lastCheck: time.Now(), 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /client/feedback_server.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "context" 5 | pb "github.com/PomeloCloud/BFTRaft4go/proto/client" 6 | "log" 7 | ) 8 | 9 | type FeedbackServer struct { 10 | ClientIns *BFTRaftClient 11 | } 12 | 13 | func (fs *FeedbackServer) ResponseCommand(ctx context.Context, cmd *pb.CommandResult) (*pb.Nothing, error) { 14 | // TODO: Verify signature 15 | // signData := server.CommandSignData(cmd.Group, cmd.NodeId, cmd.RequestId, cmd.Result) 16 | log.Println("command response from:", cmd.NodeId, "for group:", cmd.Group, "reqId:", cmd.RequestId) 17 | go func() { 18 | fs.ClientIns.CmdResChan[cmd.Group][cmd.RequestId] <- cmd.Result 19 | }() 20 | return &pb.Nothing{}, nil 21 | } 22 | -------------------------------------------------------------------------------- /client/user_client.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "context" 5 | "crypto/rsa" 6 | "errors" 7 | "strconv" 8 | "sync" 9 | "sync/atomic" 10 | "time" 11 | 12 | spb "github.com/PomeloCloud/BFTRaft4go/proto/server" 13 | "github.com/PomeloCloud/BFTRaft4go/utils" 14 | "github.com/patrickmn/go-cache" 15 | "log" 16 | ) 17 | 18 | type BFTRaftClient struct { 19 | Id uint64 20 | PrivateKey *rsa.PrivateKey 21 | AlphaRPCs AlphaRPCsCache 22 | GroupHosts *cache.Cache 23 | GroupLeader *cache.Cache 24 | CmdResChan map[uint64]map[uint64]chan []byte 25 | Counter int64 26 | Lock sync.RWMutex 27 | } 28 | 29 | type ClientOptions struct { 30 | PrivateKey []byte 31 | } 32 | 33 | // bootstraps is a list of server address believed to be the member of the network 34 | // the list does not need to contain alpha nodes since all of the nodes on the network will get informed 35 | func NewClient(bootstraps []string, opts ClientOptions) (*BFTRaftClient, error) { 36 | privateKey, err := utils.ParsePrivateKey(opts.PrivateKey) 37 | if err != nil { 38 | return nil, err 39 | } 40 | publicKey := utils.PublicKeyFromPrivate(privateKey) 41 | bftclient := &BFTRaftClient{ 42 | Id: utils.HashPublicKey(publicKey), 43 | PrivateKey: privateKey, 44 | Lock: sync.RWMutex{}, 45 | AlphaRPCs: NewAlphaRPCsCache(bootstraps), 46 | GroupHosts: cache.New(1*time.Minute, 1*time.Minute), 47 | GroupLeader: cache.New(1*time.Minute, 1*time.Minute), 48 | CmdResChan: map[uint64]map[uint64]chan []byte{}, 49 | Counter: 0, 50 | } 51 | return bftclient, nil 52 | } 53 | 54 | func (brc *BFTRaftClient) GetGroupHosts(groupId uint64) *[]*spb.Host { 55 | cacheKey := strconv.Itoa(int(groupId)) 56 | if cached, found := brc.GroupHosts.Get(cacheKey); found { 57 | return cached.(*[]*spb.Host) 58 | } 59 | res := utils.MajorityResponse(brc.AlphaRPCs.Get(), func(client spb.BFTRaftClient) (interface{}, []byte) { 60 | if res, err := client.GroupHosts( 61 | context.Background(), &spb.GroupId{GroupId: groupId}, 62 | ); err == nil { 63 | return &res.Nodes, utils.NodesSignData(res.Nodes) 64 | } else { 65 | log.Println("error on getting group host:", err) 66 | return nil, []byte{} 67 | } 68 | }) 69 | var hosts *[]*spb.Host = nil 70 | if res != nil { 71 | hosts = res.(*[]*spb.Host) 72 | } 73 | if hosts != nil { 74 | brc.GroupHosts.Set(cacheKey, hosts, cache.DefaultExpiration) 75 | } 76 | return hosts 77 | } 78 | 79 | func (brc *BFTRaftClient) GetGroupLeader(groupId uint64) spb.BFTRaftClient { 80 | cacheKey := strconv.Itoa(int(groupId)) 81 | if cached, found := brc.GroupLeader.Get(cacheKey); found { 82 | return cached.(spb.BFTRaftClient) 83 | } 84 | res := utils.MajorityResponse(brc.AlphaRPCs.Get(), func(client spb.BFTRaftClient) (interface{}, []byte) { 85 | if res, err := client.GetGroupLeader( 86 | context.Background(), &spb.GroupId{GroupId: groupId}, 87 | ); err == nil { 88 | // TODO: verify signature 89 | if res.Node == nil { 90 | log.Println("nil response for get leader for group:", groupId) 91 | } 92 | return res.Node, []byte(res.Node.ServerAddr) 93 | } else { 94 | log.Println("cannot get group leader on alpha peer:", err) 95 | return nil, []byte{} 96 | } 97 | }) 98 | var leaderHost *spb.Host = nil 99 | if res != nil { 100 | leaderHost = res.(*spb.Host) 101 | } 102 | if leaderHost != nil { 103 | if leader, err := utils.GetClusterRPC(leaderHost.ServerAddr); err == nil { 104 | brc.GroupLeader.Set(cacheKey, leader, cache.DefaultExpiration) 105 | return leader 106 | } 107 | } else { 108 | log.Println(brc.Id, ", group", groupId, "has no leader") 109 | } 110 | return nil 111 | } 112 | 113 | func (brc *BFTRaftClient) GroupExists(groupId uint64) bool { 114 | res := utils.MajorityResponse(brc.AlphaRPCs.Get(), func(client spb.BFTRaftClient) (interface{}, []byte) { 115 | if _, err := client.GetGroupContent( 116 | context.Background(), &spb.GroupId{GroupId: groupId}, 117 | ); err == nil { 118 | // TODO: verify signature 119 | return true, []byte{1} 120 | } else { 121 | return false, []byte{0} 122 | } 123 | }) 124 | return res.(bool) 125 | } 126 | 127 | func (brc *BFTRaftClient) ExecCommand(groupId uint64, funcId uint64, arg []byte) (*[]byte, error) { 128 | leader := brc.GetGroupLeader(groupId) 129 | if leader == nil { 130 | return nil, errors.New("cannot found leader") 131 | } 132 | reqId := uint64(atomic.AddInt64(&brc.Counter, 1)) 133 | cmdReq := &spb.CommandRequest{ 134 | Group: groupId, 135 | ClientId: brc.Id, 136 | RequestId: reqId, 137 | FuncId: funcId, 138 | Arg: arg, 139 | } 140 | signData := utils.ExecCommandSignData(cmdReq) 141 | cmdReq.Signature = utils.Sign(brc.PrivateKey, signData) 142 | if _, found := brc.CmdResChan[groupId]; !found { 143 | brc.CmdResChan[groupId] = map[uint64]chan []byte{} 144 | } 145 | brc.CmdResChan[groupId][reqId] = make(chan []byte) 146 | defer func() { 147 | close(brc.CmdResChan[groupId][reqId]) 148 | delete(brc.CmdResChan[groupId], reqId) 149 | }() 150 | go func() { 151 | if cmdRes, err := leader.ExecCommand(context.Background(), cmdReq); err == nil { 152 | // TODO: verify signature 153 | // TODO: update leader if needed 154 | // TODO: verify response matches request 155 | brc.CmdResChan[groupId][reqId] <- cmdRes.Result 156 | 157 | } else { 158 | log.Println("cannot exec on leader:", err) 159 | } 160 | }() 161 | hosts := brc.GetGroupHosts(groupId) 162 | if hosts == nil { 163 | return nil, errors.New("cannot get group hosts") 164 | } 165 | expectedResponse := utils.ExpectedPlayers(len(*hosts)) 166 | responseReceived := map[uint64][]byte{} 167 | responseHashes := []uint64{} 168 | replicationCompleted := make(chan bool, 1) 169 | wg := sync.WaitGroup{} 170 | wg.Add(expectedResponse) 171 | go func() { 172 | for i := 0; i < expectedResponse; i++ { 173 | res := <-brc.CmdResChan[groupId][reqId] 174 | hash := utils.HashData(res) 175 | responseReceived[hash] = res 176 | responseHashes = append(responseHashes, hash) 177 | wg.Done() 178 | } 179 | }() 180 | go func() { 181 | wg.Wait() 182 | replicationCompleted <- true 183 | }() 184 | select { 185 | case <-replicationCompleted: 186 | majorityHash := utils.PickMajority(responseHashes) 187 | majorityData := responseReceived[majorityHash] 188 | return &majorityData, nil 189 | case <-time.After(10 * time.Second): 190 | return nil, errors.New("does not receive enough response") 191 | } 192 | } 193 | -------------------------------------------------------------------------------- /proto/client/client.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go. DO NOT EDIT. 2 | // source: proto/client/client.proto 3 | 4 | /* 5 | Package client is a generated protocol buffer package. 6 | 7 | It is generated from these files: 8 | proto/client/client.proto 9 | 10 | It has these top-level messages: 11 | CommandResult 12 | Nothing 13 | */ 14 | package client 15 | 16 | import proto "github.com/golang/protobuf/proto" 17 | import fmt "fmt" 18 | import math "math" 19 | 20 | import ( 21 | context "golang.org/x/net/context" 22 | grpc "google.golang.org/grpc" 23 | ) 24 | 25 | // Reference imports to suppress errors if they are not otherwise used. 26 | var _ = proto.Marshal 27 | var _ = fmt.Errorf 28 | var _ = math.Inf 29 | 30 | // This is a compile-time assertion to ensure that this generated file 31 | // is compatible with the proto package it is being compiled against. 32 | // A compilation error at this line likely means your copy of the 33 | // proto package needs to be updated. 34 | const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package 35 | 36 | type CommandResult struct { 37 | Group uint64 `protobuf:"varint,1,opt,name=group" json:"group,omitempty"` 38 | NodeId uint64 `protobuf:"varint,2,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` 39 | RequestId uint64 `protobuf:"varint,3,opt,name=request_id,json=requestId" json:"request_id,omitempty"` 40 | Signature []byte `protobuf:"bytes,4,opt,name=signature,proto3" json:"signature,omitempty"` 41 | Result []byte `protobuf:"bytes,5,opt,name=result,proto3" json:"result,omitempty"` 42 | } 43 | 44 | func (m *CommandResult) Reset() { *m = CommandResult{} } 45 | func (m *CommandResult) String() string { return proto.CompactTextString(m) } 46 | func (*CommandResult) ProtoMessage() {} 47 | func (*CommandResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } 48 | 49 | func (m *CommandResult) GetGroup() uint64 { 50 | if m != nil { 51 | return m.Group 52 | } 53 | return 0 54 | } 55 | 56 | func (m *CommandResult) GetNodeId() uint64 { 57 | if m != nil { 58 | return m.NodeId 59 | } 60 | return 0 61 | } 62 | 63 | func (m *CommandResult) GetRequestId() uint64 { 64 | if m != nil { 65 | return m.RequestId 66 | } 67 | return 0 68 | } 69 | 70 | func (m *CommandResult) GetSignature() []byte { 71 | if m != nil { 72 | return m.Signature 73 | } 74 | return nil 75 | } 76 | 77 | func (m *CommandResult) GetResult() []byte { 78 | if m != nil { 79 | return m.Result 80 | } 81 | return nil 82 | } 83 | 84 | type Nothing struct { 85 | } 86 | 87 | func (m *Nothing) Reset() { *m = Nothing{} } 88 | func (m *Nothing) String() string { return proto.CompactTextString(m) } 89 | func (*Nothing) ProtoMessage() {} 90 | func (*Nothing) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } 91 | 92 | func init() { 93 | proto.RegisterType((*CommandResult)(nil), "client.CommandResult") 94 | proto.RegisterType((*Nothing)(nil), "client.Nothing") 95 | } 96 | 97 | // Reference imports to suppress errors if they are not otherwise used. 98 | var _ context.Context 99 | var _ grpc.ClientConn 100 | 101 | // This is a compile-time assertion to ensure that this generated file 102 | // is compatible with the grpc package it is being compiled against. 103 | const _ = grpc.SupportPackageIsVersion4 104 | 105 | // Client API for BFTRaftClient service 106 | 107 | type BFTRaftClientClient interface { 108 | ResponseCommand(ctx context.Context, in *CommandResult, opts ...grpc.CallOption) (*Nothing, error) 109 | } 110 | 111 | type bFTRaftClientClient struct { 112 | cc *grpc.ClientConn 113 | } 114 | 115 | func NewBFTRaftClientClient(cc *grpc.ClientConn) BFTRaftClientClient { 116 | return &bFTRaftClientClient{cc} 117 | } 118 | 119 | func (c *bFTRaftClientClient) ResponseCommand(ctx context.Context, in *CommandResult, opts ...grpc.CallOption) (*Nothing, error) { 120 | out := new(Nothing) 121 | err := grpc.Invoke(ctx, "/client.BFTRaftClient/ResponseCommand", in, out, c.cc, opts...) 122 | if err != nil { 123 | return nil, err 124 | } 125 | return out, nil 126 | } 127 | 128 | // Server API for BFTRaftClient service 129 | 130 | type BFTRaftClientServer interface { 131 | ResponseCommand(context.Context, *CommandResult) (*Nothing, error) 132 | } 133 | 134 | func RegisterBFTRaftClientServer(s *grpc.Server, srv BFTRaftClientServer) { 135 | s.RegisterService(&_BFTRaftClient_serviceDesc, srv) 136 | } 137 | 138 | func _BFTRaftClient_ResponseCommand_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 139 | in := new(CommandResult) 140 | if err := dec(in); err != nil { 141 | return nil, err 142 | } 143 | if interceptor == nil { 144 | return srv.(BFTRaftClientServer).ResponseCommand(ctx, in) 145 | } 146 | info := &grpc.UnaryServerInfo{ 147 | Server: srv, 148 | FullMethod: "/client.BFTRaftClient/ResponseCommand", 149 | } 150 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 151 | return srv.(BFTRaftClientServer).ResponseCommand(ctx, req.(*CommandResult)) 152 | } 153 | return interceptor(ctx, in, info, handler) 154 | } 155 | 156 | var _BFTRaftClient_serviceDesc = grpc.ServiceDesc{ 157 | ServiceName: "client.BFTRaftClient", 158 | HandlerType: (*BFTRaftClientServer)(nil), 159 | Methods: []grpc.MethodDesc{ 160 | { 161 | MethodName: "ResponseCommand", 162 | Handler: _BFTRaftClient_ResponseCommand_Handler, 163 | }, 164 | }, 165 | Streams: []grpc.StreamDesc{}, 166 | Metadata: "proto/client/client.proto", 167 | } 168 | 169 | func init() { proto.RegisterFile("proto/client/client.proto", fileDescriptor0) } 170 | 171 | var fileDescriptor0 = []byte{ 172 | // 216 bytes of a gzipped FileDescriptorProto 173 | 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x8f, 0xcd, 0x4a, 0xc4, 0x30, 174 | 0x14, 0x85, 0x8d, 0xce, 0x64, 0xe8, 0xc5, 0x61, 0xe0, 0xe2, 0x4f, 0x14, 0x85, 0xa1, 0xab, 0xae, 175 | 0x2a, 0xe8, 0xd2, 0x9d, 0x05, 0xa1, 0x20, 0x2e, 0x82, 0x7b, 0xa9, 0xe6, 0x5a, 0x03, 0x6d, 0x52, 176 | 0xf3, 0xf3, 0x24, 0xbe, 0xb0, 0x34, 0x8d, 0x88, 0xab, 0xf0, 0x7d, 0x07, 0x72, 0xcf, 0x81, 0x8b, 177 | 0xc9, 0xd9, 0x60, 0x6f, 0xde, 0x07, 0x4d, 0x26, 0xe4, 0xa7, 0x4e, 0x0e, 0xf9, 0x42, 0xe5, 0x37, 178 | 0x83, 0x6d, 0x63, 0xc7, 0xb1, 0x33, 0x4a, 0x92, 0x8f, 0x43, 0xc0, 0x13, 0x58, 0xf7, 0xce, 0xc6, 179 | 0x49, 0xb0, 0x3d, 0xab, 0x56, 0x72, 0x01, 0x3c, 0x87, 0x8d, 0xb1, 0x8a, 0x5e, 0xb5, 0x12, 0x87, 180 | 0xc9, 0xf3, 0x19, 0x5b, 0x85, 0xd7, 0x00, 0x8e, 0xbe, 0x22, 0xf9, 0x30, 0x67, 0x47, 0x29, 0x2b, 181 | 0xb2, 0x69, 0x15, 0x5e, 0x41, 0xe1, 0x75, 0x6f, 0xba, 0x10, 0x1d, 0x89, 0xd5, 0x9e, 0x55, 0xc7, 182 | 0xf2, 0x4f, 0xe0, 0x19, 0x70, 0x97, 0xae, 0x8a, 0x75, 0x8a, 0x32, 0x95, 0x05, 0x6c, 0x9e, 0x6d, 183 | 0xf8, 0xd4, 0xa6, 0xbf, 0x7d, 0x82, 0xed, 0xc3, 0xe3, 0x8b, 0xec, 0x3e, 0x42, 0x93, 0x1a, 0xe3, 184 | 0x3d, 0xec, 0x24, 0xf9, 0xc9, 0x1a, 0x4f, 0xb9, 0x38, 0x9e, 0xd6, 0x79, 0xdb, 0xbf, 0x25, 0x97, 185 | 0xbb, 0x5f, 0x9d, 0xff, 0x2a, 0x0f, 0xde, 0x78, 0x5a, 0x7f, 0xf7, 0x13, 0x00, 0x00, 0xff, 0xff, 186 | 0xc0, 0x70, 0x00, 0xe2, 0x1a, 0x01, 0x00, 0x00, 187 | } 188 | -------------------------------------------------------------------------------- /proto/client/client.proto: -------------------------------------------------------------------------------- 1 | // This file is client server protocal for BFT raft implementation 2 | // Because each peer need to send response right to client for result voting, 3 | // client server is required in this system 4 | 5 | syntax = "proto3"; 6 | package client; 7 | 8 | service BFTRaftClient { 9 | rpc ResponseCommand(CommandResult) returns (Nothing) {} 10 | } 11 | 12 | message CommandResult { 13 | uint64 group = 1; 14 | uint64 node_id = 2; 15 | uint64 request_id = 3; 16 | bytes signature = 4; 17 | bytes result = 5; 18 | } 19 | 20 | message Nothing {} -------------------------------------------------------------------------------- /proto/server/server.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go. DO NOT EDIT. 2 | // source: proto/server/server.proto 3 | 4 | /* 5 | Package server is a generated protocol buffer package. 6 | 7 | It is generated from these files: 8 | proto/server/server.proto 9 | 10 | It has these top-level messages: 11 | CommandRequest 12 | CommandResponse 13 | LogEntry 14 | RequestVoteRequest 15 | RequestVoteResponse 16 | AppendEntriesRequest 17 | AppendEntriesResponse 18 | Peer 19 | Host 20 | RaftGroup 21 | ServerConfig 22 | ApproveAppendResponse 23 | GroupId 24 | GroupNodesResponse 25 | GroupMember 26 | GroupMembersResponse 27 | PullGroupLogsResuest 28 | LogEntries 29 | NodeJoinGroupEntry 30 | GroupLeader 31 | GroupInvitation 32 | RaftGroupNodes 33 | Nothing 34 | */ 35 | package server 36 | 37 | import proto "github.com/golang/protobuf/proto" 38 | import fmt "fmt" 39 | import math "math" 40 | 41 | import ( 42 | context "golang.org/x/net/context" 43 | grpc "google.golang.org/grpc" 44 | ) 45 | 46 | // Reference imports to suppress errors if they are not otherwise used. 47 | var _ = proto.Marshal 48 | var _ = fmt.Errorf 49 | var _ = math.Inf 50 | 51 | // This is a compile-time assertion to ensure that this generated file 52 | // is compatible with the proto package it is being compiled against. 53 | // A compilation error at this line likely means your copy of the 54 | // proto package needs to be updated. 55 | const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package 56 | 57 | type CommandRequest struct { 58 | Group uint64 `protobuf:"varint,1,opt,name=group" json:"group,omitempty"` 59 | ClientId uint64 `protobuf:"varint,2,opt,name=client_id,json=clientId" json:"client_id,omitempty"` 60 | RequestId uint64 `protobuf:"varint,3,opt,name=request_id,json=requestId" json:"request_id,omitempty"` 61 | FuncId uint64 `protobuf:"varint,4,opt,name=func_id,json=funcId" json:"func_id,omitempty"` 62 | Signature []byte `protobuf:"bytes,5,opt,name=signature,proto3" json:"signature,omitempty"` 63 | Arg []byte `protobuf:"bytes,6,opt,name=arg,proto3" json:"arg,omitempty"` 64 | } 65 | 66 | func (m *CommandRequest) Reset() { *m = CommandRequest{} } 67 | func (m *CommandRequest) String() string { return proto.CompactTextString(m) } 68 | func (*CommandRequest) ProtoMessage() {} 69 | func (*CommandRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } 70 | 71 | func (m *CommandRequest) GetGroup() uint64 { 72 | if m != nil { 73 | return m.Group 74 | } 75 | return 0 76 | } 77 | 78 | func (m *CommandRequest) GetClientId() uint64 { 79 | if m != nil { 80 | return m.ClientId 81 | } 82 | return 0 83 | } 84 | 85 | func (m *CommandRequest) GetRequestId() uint64 { 86 | if m != nil { 87 | return m.RequestId 88 | } 89 | return 0 90 | } 91 | 92 | func (m *CommandRequest) GetFuncId() uint64 { 93 | if m != nil { 94 | return m.FuncId 95 | } 96 | return 0 97 | } 98 | 99 | func (m *CommandRequest) GetSignature() []byte { 100 | if m != nil { 101 | return m.Signature 102 | } 103 | return nil 104 | } 105 | 106 | func (m *CommandRequest) GetArg() []byte { 107 | if m != nil { 108 | return m.Arg 109 | } 110 | return nil 111 | } 112 | 113 | type CommandResponse struct { 114 | Group uint64 `protobuf:"varint,1,opt,name=group" json:"group,omitempty"` 115 | LeaderId uint64 `protobuf:"varint,2,opt,name=leader_id,json=leaderId" json:"leader_id,omitempty"` 116 | NodeId uint64 `protobuf:"varint,3,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` 117 | RequestId uint64 `protobuf:"varint,4,opt,name=request_id,json=requestId" json:"request_id,omitempty"` 118 | Signature []byte `protobuf:"bytes,5,opt,name=signature,proto3" json:"signature,omitempty"` 119 | Result []byte `protobuf:"bytes,6,opt,name=result,proto3" json:"result,omitempty"` 120 | } 121 | 122 | func (m *CommandResponse) Reset() { *m = CommandResponse{} } 123 | func (m *CommandResponse) String() string { return proto.CompactTextString(m) } 124 | func (*CommandResponse) ProtoMessage() {} 125 | func (*CommandResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } 126 | 127 | func (m *CommandResponse) GetGroup() uint64 { 128 | if m != nil { 129 | return m.Group 130 | } 131 | return 0 132 | } 133 | 134 | func (m *CommandResponse) GetLeaderId() uint64 { 135 | if m != nil { 136 | return m.LeaderId 137 | } 138 | return 0 139 | } 140 | 141 | func (m *CommandResponse) GetNodeId() uint64 { 142 | if m != nil { 143 | return m.NodeId 144 | } 145 | return 0 146 | } 147 | 148 | func (m *CommandResponse) GetRequestId() uint64 { 149 | if m != nil { 150 | return m.RequestId 151 | } 152 | return 0 153 | } 154 | 155 | func (m *CommandResponse) GetSignature() []byte { 156 | if m != nil { 157 | return m.Signature 158 | } 159 | return nil 160 | } 161 | 162 | func (m *CommandResponse) GetResult() []byte { 163 | if m != nil { 164 | return m.Result 165 | } 166 | return nil 167 | } 168 | 169 | type LogEntry struct { 170 | Term uint64 `protobuf:"varint,1,opt,name=term" json:"term,omitempty"` 171 | Index uint64 `protobuf:"varint,2,opt,name=index" json:"index,omitempty"` 172 | Hash []byte `protobuf:"bytes,3,opt,name=hash,proto3" json:"hash,omitempty"` 173 | Command *CommandRequest `protobuf:"bytes,4,opt,name=command" json:"command,omitempty"` 174 | } 175 | 176 | func (m *LogEntry) Reset() { *m = LogEntry{} } 177 | func (m *LogEntry) String() string { return proto.CompactTextString(m) } 178 | func (*LogEntry) ProtoMessage() {} 179 | func (*LogEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } 180 | 181 | func (m *LogEntry) GetTerm() uint64 { 182 | if m != nil { 183 | return m.Term 184 | } 185 | return 0 186 | } 187 | 188 | func (m *LogEntry) GetIndex() uint64 { 189 | if m != nil { 190 | return m.Index 191 | } 192 | return 0 193 | } 194 | 195 | func (m *LogEntry) GetHash() []byte { 196 | if m != nil { 197 | return m.Hash 198 | } 199 | return nil 200 | } 201 | 202 | func (m *LogEntry) GetCommand() *CommandRequest { 203 | if m != nil { 204 | return m.Command 205 | } 206 | return nil 207 | } 208 | 209 | type RequestVoteRequest struct { 210 | Group uint64 `protobuf:"varint,1,opt,name=group" json:"group,omitempty"` 211 | Term uint64 `protobuf:"varint,2,opt,name=term" json:"term,omitempty"` 212 | LogIndex uint64 `protobuf:"varint,3,opt,name=log_index,json=logIndex" json:"log_index,omitempty"` 213 | LogTerm uint64 `protobuf:"varint,4,opt,name=log_term,json=logTerm" json:"log_term,omitempty"` 214 | CandidateId uint64 `protobuf:"varint,5,opt,name=candidate_id,json=candidateId" json:"candidate_id,omitempty"` 215 | Signature []byte `protobuf:"bytes,6,opt,name=signature,proto3" json:"signature,omitempty"` 216 | } 217 | 218 | func (m *RequestVoteRequest) Reset() { *m = RequestVoteRequest{} } 219 | func (m *RequestVoteRequest) String() string { return proto.CompactTextString(m) } 220 | func (*RequestVoteRequest) ProtoMessage() {} 221 | func (*RequestVoteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } 222 | 223 | func (m *RequestVoteRequest) GetGroup() uint64 { 224 | if m != nil { 225 | return m.Group 226 | } 227 | return 0 228 | } 229 | 230 | func (m *RequestVoteRequest) GetTerm() uint64 { 231 | if m != nil { 232 | return m.Term 233 | } 234 | return 0 235 | } 236 | 237 | func (m *RequestVoteRequest) GetLogIndex() uint64 { 238 | if m != nil { 239 | return m.LogIndex 240 | } 241 | return 0 242 | } 243 | 244 | func (m *RequestVoteRequest) GetLogTerm() uint64 { 245 | if m != nil { 246 | return m.LogTerm 247 | } 248 | return 0 249 | } 250 | 251 | func (m *RequestVoteRequest) GetCandidateId() uint64 { 252 | if m != nil { 253 | return m.CandidateId 254 | } 255 | return 0 256 | } 257 | 258 | func (m *RequestVoteRequest) GetSignature() []byte { 259 | if m != nil { 260 | return m.Signature 261 | } 262 | return nil 263 | } 264 | 265 | type RequestVoteResponse struct { 266 | Group uint64 `protobuf:"varint,1,opt,name=group" json:"group,omitempty"` 267 | Term uint64 `protobuf:"varint,2,opt,name=term" json:"term,omitempty"` 268 | LogIndex uint64 `protobuf:"varint,3,opt,name=log_index,json=logIndex" json:"log_index,omitempty"` 269 | CandidateId uint64 `protobuf:"varint,5,opt,name=candidate_id,json=candidateId" json:"candidate_id,omitempty"` 270 | Voter uint64 `protobuf:"varint,6,opt,name=voter" json:"voter,omitempty"` 271 | Granted bool `protobuf:"varint,7,opt,name=granted" json:"granted,omitempty"` 272 | Signature []byte `protobuf:"bytes,8,opt,name=signature,proto3" json:"signature,omitempty"` 273 | } 274 | 275 | func (m *RequestVoteResponse) Reset() { *m = RequestVoteResponse{} } 276 | func (m *RequestVoteResponse) String() string { return proto.CompactTextString(m) } 277 | func (*RequestVoteResponse) ProtoMessage() {} 278 | func (*RequestVoteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } 279 | 280 | func (m *RequestVoteResponse) GetGroup() uint64 { 281 | if m != nil { 282 | return m.Group 283 | } 284 | return 0 285 | } 286 | 287 | func (m *RequestVoteResponse) GetTerm() uint64 { 288 | if m != nil { 289 | return m.Term 290 | } 291 | return 0 292 | } 293 | 294 | func (m *RequestVoteResponse) GetLogIndex() uint64 { 295 | if m != nil { 296 | return m.LogIndex 297 | } 298 | return 0 299 | } 300 | 301 | func (m *RequestVoteResponse) GetCandidateId() uint64 { 302 | if m != nil { 303 | return m.CandidateId 304 | } 305 | return 0 306 | } 307 | 308 | func (m *RequestVoteResponse) GetVoter() uint64 { 309 | if m != nil { 310 | return m.Voter 311 | } 312 | return 0 313 | } 314 | 315 | func (m *RequestVoteResponse) GetGranted() bool { 316 | if m != nil { 317 | return m.Granted 318 | } 319 | return false 320 | } 321 | 322 | func (m *RequestVoteResponse) GetSignature() []byte { 323 | if m != nil { 324 | return m.Signature 325 | } 326 | return nil 327 | } 328 | 329 | type AppendEntriesRequest struct { 330 | Group uint64 `protobuf:"varint,1,opt,name=group" json:"group,omitempty"` 331 | Term uint64 `protobuf:"varint,2,opt,name=term" json:"term,omitempty"` 332 | LeaderId uint64 `protobuf:"varint,3,opt,name=leader_id,json=leaderId" json:"leader_id,omitempty"` 333 | PrevLogIndex uint64 `protobuf:"varint,4,opt,name=prev_log_index,json=prevLogIndex" json:"prev_log_index,omitempty"` 334 | PrevLogTerm uint64 `protobuf:"varint,5,opt,name=prev_log_term,json=prevLogTerm" json:"prev_log_term,omitempty"` 335 | Signature []byte `protobuf:"bytes,6,opt,name=signature,proto3" json:"signature,omitempty"` 336 | QuorumVotes []*RequestVoteResponse `protobuf:"bytes,7,rep,name=quorum_votes,json=quorumVotes" json:"quorum_votes,omitempty"` 337 | Entries []*LogEntry `protobuf:"bytes,8,rep,name=entries" json:"entries,omitempty"` 338 | } 339 | 340 | func (m *AppendEntriesRequest) Reset() { *m = AppendEntriesRequest{} } 341 | func (m *AppendEntriesRequest) String() string { return proto.CompactTextString(m) } 342 | func (*AppendEntriesRequest) ProtoMessage() {} 343 | func (*AppendEntriesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } 344 | 345 | func (m *AppendEntriesRequest) GetGroup() uint64 { 346 | if m != nil { 347 | return m.Group 348 | } 349 | return 0 350 | } 351 | 352 | func (m *AppendEntriesRequest) GetTerm() uint64 { 353 | if m != nil { 354 | return m.Term 355 | } 356 | return 0 357 | } 358 | 359 | func (m *AppendEntriesRequest) GetLeaderId() uint64 { 360 | if m != nil { 361 | return m.LeaderId 362 | } 363 | return 0 364 | } 365 | 366 | func (m *AppendEntriesRequest) GetPrevLogIndex() uint64 { 367 | if m != nil { 368 | return m.PrevLogIndex 369 | } 370 | return 0 371 | } 372 | 373 | func (m *AppendEntriesRequest) GetPrevLogTerm() uint64 { 374 | if m != nil { 375 | return m.PrevLogTerm 376 | } 377 | return 0 378 | } 379 | 380 | func (m *AppendEntriesRequest) GetSignature() []byte { 381 | if m != nil { 382 | return m.Signature 383 | } 384 | return nil 385 | } 386 | 387 | func (m *AppendEntriesRequest) GetQuorumVotes() []*RequestVoteResponse { 388 | if m != nil { 389 | return m.QuorumVotes 390 | } 391 | return nil 392 | } 393 | 394 | func (m *AppendEntriesRequest) GetEntries() []*LogEntry { 395 | if m != nil { 396 | return m.Entries 397 | } 398 | return nil 399 | } 400 | 401 | type AppendEntriesResponse struct { 402 | Group uint64 `protobuf:"varint,1,opt,name=group" json:"group,omitempty"` 403 | Term uint64 `protobuf:"varint,2,opt,name=term" json:"term,omitempty"` 404 | Index uint64 `protobuf:"varint,3,opt,name=index" json:"index,omitempty"` 405 | Peer uint64 `protobuf:"varint,4,opt,name=peer" json:"peer,omitempty"` 406 | Successed bool `protobuf:"varint,5,opt,name=successed" json:"successed,omitempty"` 407 | Convinced bool `protobuf:"varint,6,opt,name=convinced" json:"convinced,omitempty"` 408 | Hash []byte `protobuf:"bytes,7,opt,name=hash,proto3" json:"hash,omitempty"` 409 | Signature []byte `protobuf:"bytes,8,opt,name=signature,proto3" json:"signature,omitempty"` 410 | } 411 | 412 | func (m *AppendEntriesResponse) Reset() { *m = AppendEntriesResponse{} } 413 | func (m *AppendEntriesResponse) String() string { return proto.CompactTextString(m) } 414 | func (*AppendEntriesResponse) ProtoMessage() {} 415 | func (*AppendEntriesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } 416 | 417 | func (m *AppendEntriesResponse) GetGroup() uint64 { 418 | if m != nil { 419 | return m.Group 420 | } 421 | return 0 422 | } 423 | 424 | func (m *AppendEntriesResponse) GetTerm() uint64 { 425 | if m != nil { 426 | return m.Term 427 | } 428 | return 0 429 | } 430 | 431 | func (m *AppendEntriesResponse) GetIndex() uint64 { 432 | if m != nil { 433 | return m.Index 434 | } 435 | return 0 436 | } 437 | 438 | func (m *AppendEntriesResponse) GetPeer() uint64 { 439 | if m != nil { 440 | return m.Peer 441 | } 442 | return 0 443 | } 444 | 445 | func (m *AppendEntriesResponse) GetSuccessed() bool { 446 | if m != nil { 447 | return m.Successed 448 | } 449 | return false 450 | } 451 | 452 | func (m *AppendEntriesResponse) GetConvinced() bool { 453 | if m != nil { 454 | return m.Convinced 455 | } 456 | return false 457 | } 458 | 459 | func (m *AppendEntriesResponse) GetHash() []byte { 460 | if m != nil { 461 | return m.Hash 462 | } 463 | return nil 464 | } 465 | 466 | func (m *AppendEntriesResponse) GetSignature() []byte { 467 | if m != nil { 468 | return m.Signature 469 | } 470 | return nil 471 | } 472 | 473 | type Peer struct { 474 | Id uint64 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` 475 | Group uint64 `protobuf:"varint,2,opt,name=group" json:"group,omitempty"` 476 | NextIndex uint64 `protobuf:"varint,4,opt,name=next_index,json=nextIndex" json:"next_index,omitempty"` 477 | MatchIndex uint64 `protobuf:"varint,5,opt,name=match_index,json=matchIndex" json:"match_index,omitempty"` 478 | } 479 | 480 | func (m *Peer) Reset() { *m = Peer{} } 481 | func (m *Peer) String() string { return proto.CompactTextString(m) } 482 | func (*Peer) ProtoMessage() {} 483 | func (*Peer) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } 484 | 485 | func (m *Peer) GetId() uint64 { 486 | if m != nil { 487 | return m.Id 488 | } 489 | return 0 490 | } 491 | 492 | func (m *Peer) GetGroup() uint64 { 493 | if m != nil { 494 | return m.Group 495 | } 496 | return 0 497 | } 498 | 499 | func (m *Peer) GetNextIndex() uint64 { 500 | if m != nil { 501 | return m.NextIndex 502 | } 503 | return 0 504 | } 505 | 506 | func (m *Peer) GetMatchIndex() uint64 { 507 | if m != nil { 508 | return m.MatchIndex 509 | } 510 | return 0 511 | } 512 | 513 | type Host struct { 514 | Id uint64 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` 515 | LastSeen uint64 `protobuf:"varint,2,opt,name=last_seen,json=lastSeen" json:"last_seen,omitempty"` 516 | Online bool `protobuf:"varint,3,opt,name=online" json:"online,omitempty"` 517 | ServerAddr string `protobuf:"bytes,4,opt,name=server_addr,json=serverAddr" json:"server_addr,omitempty"` 518 | PublicKey []byte `protobuf:"bytes,5,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` 519 | } 520 | 521 | func (m *Host) Reset() { *m = Host{} } 522 | func (m *Host) String() string { return proto.CompactTextString(m) } 523 | func (*Host) ProtoMessage() {} 524 | func (*Host) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } 525 | 526 | func (m *Host) GetId() uint64 { 527 | if m != nil { 528 | return m.Id 529 | } 530 | return 0 531 | } 532 | 533 | func (m *Host) GetLastSeen() uint64 { 534 | if m != nil { 535 | return m.LastSeen 536 | } 537 | return 0 538 | } 539 | 540 | func (m *Host) GetOnline() bool { 541 | if m != nil { 542 | return m.Online 543 | } 544 | return false 545 | } 546 | 547 | func (m *Host) GetServerAddr() string { 548 | if m != nil { 549 | return m.ServerAddr 550 | } 551 | return "" 552 | } 553 | 554 | func (m *Host) GetPublicKey() []byte { 555 | if m != nil { 556 | return m.PublicKey 557 | } 558 | return nil 559 | } 560 | 561 | type RaftGroup struct { 562 | Replications uint32 `protobuf:"varint,1,opt,name=replications" json:"replications,omitempty"` 563 | Id uint64 `protobuf:"varint,2,opt,name=id" json:"id,omitempty"` 564 | Term uint64 `protobuf:"varint,4,opt,name=term" json:"term,omitempty"` 565 | } 566 | 567 | func (m *RaftGroup) Reset() { *m = RaftGroup{} } 568 | func (m *RaftGroup) String() string { return proto.CompactTextString(m) } 569 | func (*RaftGroup) ProtoMessage() {} 570 | func (*RaftGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } 571 | 572 | func (m *RaftGroup) GetReplications() uint32 { 573 | if m != nil { 574 | return m.Replications 575 | } 576 | return 0 577 | } 578 | 579 | func (m *RaftGroup) GetId() uint64 { 580 | if m != nil { 581 | return m.Id 582 | } 583 | return 0 584 | } 585 | 586 | func (m *RaftGroup) GetTerm() uint64 { 587 | if m != nil { 588 | return m.Term 589 | } 590 | return 0 591 | } 592 | 593 | type ServerConfig struct { 594 | PrivateKey []byte `protobuf:"bytes,1,opt,name=private_key,json=privateKey,proto3" json:"private_key,omitempty"` 595 | } 596 | 597 | func (m *ServerConfig) Reset() { *m = ServerConfig{} } 598 | func (m *ServerConfig) String() string { return proto.CompactTextString(m) } 599 | func (*ServerConfig) ProtoMessage() {} 600 | func (*ServerConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } 601 | 602 | func (m *ServerConfig) GetPrivateKey() []byte { 603 | if m != nil { 604 | return m.PrivateKey 605 | } 606 | return nil 607 | } 608 | 609 | type ApproveAppendResponse struct { 610 | Group uint64 `protobuf:"varint,1,opt,name=group" json:"group,omitempty"` 611 | Peer uint64 `protobuf:"varint,2,opt,name=peer" json:"peer,omitempty"` 612 | Index uint64 `protobuf:"varint,3,opt,name=index" json:"index,omitempty"` 613 | Appended bool `protobuf:"varint,4,opt,name=appended" json:"appended,omitempty"` 614 | Delayed bool `protobuf:"varint,5,opt,name=delayed" json:"delayed,omitempty"` 615 | Failed bool `protobuf:"varint,6,opt,name=failed" json:"failed,omitempty"` 616 | Signature []byte `protobuf:"bytes,7,opt,name=signature,proto3" json:"signature,omitempty"` 617 | } 618 | 619 | func (m *ApproveAppendResponse) Reset() { *m = ApproveAppendResponse{} } 620 | func (m *ApproveAppendResponse) String() string { return proto.CompactTextString(m) } 621 | func (*ApproveAppendResponse) ProtoMessage() {} 622 | func (*ApproveAppendResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } 623 | 624 | func (m *ApproveAppendResponse) GetGroup() uint64 { 625 | if m != nil { 626 | return m.Group 627 | } 628 | return 0 629 | } 630 | 631 | func (m *ApproveAppendResponse) GetPeer() uint64 { 632 | if m != nil { 633 | return m.Peer 634 | } 635 | return 0 636 | } 637 | 638 | func (m *ApproveAppendResponse) GetIndex() uint64 { 639 | if m != nil { 640 | return m.Index 641 | } 642 | return 0 643 | } 644 | 645 | func (m *ApproveAppendResponse) GetAppended() bool { 646 | if m != nil { 647 | return m.Appended 648 | } 649 | return false 650 | } 651 | 652 | func (m *ApproveAppendResponse) GetDelayed() bool { 653 | if m != nil { 654 | return m.Delayed 655 | } 656 | return false 657 | } 658 | 659 | func (m *ApproveAppendResponse) GetFailed() bool { 660 | if m != nil { 661 | return m.Failed 662 | } 663 | return false 664 | } 665 | 666 | func (m *ApproveAppendResponse) GetSignature() []byte { 667 | if m != nil { 668 | return m.Signature 669 | } 670 | return nil 671 | } 672 | 673 | type GroupId struct { 674 | GroupId uint64 `protobuf:"varint,1,opt,name=group_id,json=groupId" json:"group_id,omitempty"` 675 | } 676 | 677 | func (m *GroupId) Reset() { *m = GroupId{} } 678 | func (m *GroupId) String() string { return proto.CompactTextString(m) } 679 | func (*GroupId) ProtoMessage() {} 680 | func (*GroupId) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } 681 | 682 | func (m *GroupId) GetGroupId() uint64 { 683 | if m != nil { 684 | return m.GroupId 685 | } 686 | return 0 687 | } 688 | 689 | type GroupNodesResponse struct { 690 | Nodes []*Host `protobuf:"bytes,1,rep,name=nodes" json:"nodes,omitempty"` 691 | Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` 692 | } 693 | 694 | func (m *GroupNodesResponse) Reset() { *m = GroupNodesResponse{} } 695 | func (m *GroupNodesResponse) String() string { return proto.CompactTextString(m) } 696 | func (*GroupNodesResponse) ProtoMessage() {} 697 | func (*GroupNodesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } 698 | 699 | func (m *GroupNodesResponse) GetNodes() []*Host { 700 | if m != nil { 701 | return m.Nodes 702 | } 703 | return nil 704 | } 705 | 706 | func (m *GroupNodesResponse) GetSignature() []byte { 707 | if m != nil { 708 | return m.Signature 709 | } 710 | return nil 711 | } 712 | 713 | type GroupMember struct { 714 | Host *Host `protobuf:"bytes,1,opt,name=host" json:"host,omitempty"` 715 | Peer *Peer `protobuf:"bytes,2,opt,name=peer" json:"peer,omitempty"` 716 | } 717 | 718 | func (m *GroupMember) Reset() { *m = GroupMember{} } 719 | func (m *GroupMember) String() string { return proto.CompactTextString(m) } 720 | func (*GroupMember) ProtoMessage() {} 721 | func (*GroupMember) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } 722 | 723 | func (m *GroupMember) GetHost() *Host { 724 | if m != nil { 725 | return m.Host 726 | } 727 | return nil 728 | } 729 | 730 | func (m *GroupMember) GetPeer() *Peer { 731 | if m != nil { 732 | return m.Peer 733 | } 734 | return nil 735 | } 736 | 737 | type GroupMembersResponse struct { 738 | LastEntry *LogEntry `protobuf:"bytes,2,opt,name=last_entry,json=lastEntry" json:"last_entry,omitempty"` 739 | Members []*GroupMember `protobuf:"bytes,3,rep,name=members" json:"members,omitempty"` 740 | Signature []byte `protobuf:"bytes,4,opt,name=signature,proto3" json:"signature,omitempty"` 741 | } 742 | 743 | func (m *GroupMembersResponse) Reset() { *m = GroupMembersResponse{} } 744 | func (m *GroupMembersResponse) String() string { return proto.CompactTextString(m) } 745 | func (*GroupMembersResponse) ProtoMessage() {} 746 | func (*GroupMembersResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } 747 | 748 | func (m *GroupMembersResponse) GetLastEntry() *LogEntry { 749 | if m != nil { 750 | return m.LastEntry 751 | } 752 | return nil 753 | } 754 | 755 | func (m *GroupMembersResponse) GetMembers() []*GroupMember { 756 | if m != nil { 757 | return m.Members 758 | } 759 | return nil 760 | } 761 | 762 | func (m *GroupMembersResponse) GetSignature() []byte { 763 | if m != nil { 764 | return m.Signature 765 | } 766 | return nil 767 | } 768 | 769 | type PullGroupLogsResuest struct { 770 | Group uint64 `protobuf:"varint,1,opt,name=group" json:"group,omitempty"` 771 | Index uint64 `protobuf:"varint,2,opt,name=index" json:"index,omitempty"` 772 | } 773 | 774 | func (m *PullGroupLogsResuest) Reset() { *m = PullGroupLogsResuest{} } 775 | func (m *PullGroupLogsResuest) String() string { return proto.CompactTextString(m) } 776 | func (*PullGroupLogsResuest) ProtoMessage() {} 777 | func (*PullGroupLogsResuest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } 778 | 779 | func (m *PullGroupLogsResuest) GetGroup() uint64 { 780 | if m != nil { 781 | return m.Group 782 | } 783 | return 0 784 | } 785 | 786 | func (m *PullGroupLogsResuest) GetIndex() uint64 { 787 | if m != nil { 788 | return m.Index 789 | } 790 | return 0 791 | } 792 | 793 | type LogEntries struct { 794 | Entries []*LogEntry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` 795 | Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` 796 | } 797 | 798 | func (m *LogEntries) Reset() { *m = LogEntries{} } 799 | func (m *LogEntries) String() string { return proto.CompactTextString(m) } 800 | func (*LogEntries) ProtoMessage() {} 801 | func (*LogEntries) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } 802 | 803 | func (m *LogEntries) GetEntries() []*LogEntry { 804 | if m != nil { 805 | return m.Entries 806 | } 807 | return nil 808 | } 809 | 810 | func (m *LogEntries) GetSignature() []byte { 811 | if m != nil { 812 | return m.Signature 813 | } 814 | return nil 815 | } 816 | 817 | type NodeJoinGroupEntry struct { 818 | Group uint64 `protobuf:"varint,1,opt,name=group" json:"group,omitempty"` 819 | } 820 | 821 | func (m *NodeJoinGroupEntry) Reset() { *m = NodeJoinGroupEntry{} } 822 | func (m *NodeJoinGroupEntry) String() string { return proto.CompactTextString(m) } 823 | func (*NodeJoinGroupEntry) ProtoMessage() {} 824 | func (*NodeJoinGroupEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } 825 | 826 | func (m *NodeJoinGroupEntry) GetGroup() uint64 { 827 | if m != nil { 828 | return m.Group 829 | } 830 | return 0 831 | } 832 | 833 | type GroupLeader struct { 834 | Node *Host `protobuf:"bytes,1,opt,name=node" json:"node,omitempty"` 835 | Accuate bool `protobuf:"varint,2,opt,name=Accuate" json:"Accuate,omitempty"` 836 | Signature []byte `protobuf:"bytes,3,opt,name=signature,proto3" json:"signature,omitempty"` 837 | } 838 | 839 | func (m *GroupLeader) Reset() { *m = GroupLeader{} } 840 | func (m *GroupLeader) String() string { return proto.CompactTextString(m) } 841 | func (*GroupLeader) ProtoMessage() {} 842 | func (*GroupLeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } 843 | 844 | func (m *GroupLeader) GetNode() *Host { 845 | if m != nil { 846 | return m.Node 847 | } 848 | return nil 849 | } 850 | 851 | func (m *GroupLeader) GetAccuate() bool { 852 | if m != nil { 853 | return m.Accuate 854 | } 855 | return false 856 | } 857 | 858 | func (m *GroupLeader) GetSignature() []byte { 859 | if m != nil { 860 | return m.Signature 861 | } 862 | return nil 863 | } 864 | 865 | type GroupInvitation struct { 866 | Group uint64 `protobuf:"varint,1,opt,name=group" json:"group,omitempty"` 867 | Leader uint64 `protobuf:"varint,2,opt,name=leader" json:"leader,omitempty"` 868 | Node uint64 `protobuf:"varint,3,opt,name=node" json:"node,omitempty"` 869 | Signature []byte `protobuf:"bytes,4,opt,name=signature,proto3" json:"signature,omitempty"` 870 | } 871 | 872 | func (m *GroupInvitation) Reset() { *m = GroupInvitation{} } 873 | func (m *GroupInvitation) String() string { return proto.CompactTextString(m) } 874 | func (*GroupInvitation) ProtoMessage() {} 875 | func (*GroupInvitation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } 876 | 877 | func (m *GroupInvitation) GetGroup() uint64 { 878 | if m != nil { 879 | return m.Group 880 | } 881 | return 0 882 | } 883 | 884 | func (m *GroupInvitation) GetLeader() uint64 { 885 | if m != nil { 886 | return m.Leader 887 | } 888 | return 0 889 | } 890 | 891 | func (m *GroupInvitation) GetNode() uint64 { 892 | if m != nil { 893 | return m.Node 894 | } 895 | return 0 896 | } 897 | 898 | func (m *GroupInvitation) GetSignature() []byte { 899 | if m != nil { 900 | return m.Signature 901 | } 902 | return nil 903 | } 904 | 905 | type RaftGroupNodes struct { 906 | Nodes []*Host `protobuf:"bytes,1,rep,name=nodes" json:"nodes,omitempty"` 907 | } 908 | 909 | func (m *RaftGroupNodes) Reset() { *m = RaftGroupNodes{} } 910 | func (m *RaftGroupNodes) String() string { return proto.CompactTextString(m) } 911 | func (*RaftGroupNodes) ProtoMessage() {} 912 | func (*RaftGroupNodes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } 913 | 914 | func (m *RaftGroupNodes) GetNodes() []*Host { 915 | if m != nil { 916 | return m.Nodes 917 | } 918 | return nil 919 | } 920 | 921 | type Nothing struct { 922 | } 923 | 924 | func (m *Nothing) Reset() { *m = Nothing{} } 925 | func (m *Nothing) String() string { return proto.CompactTextString(m) } 926 | func (*Nothing) ProtoMessage() {} 927 | func (*Nothing) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } 928 | 929 | func init() { 930 | proto.RegisterType((*CommandRequest)(nil), "server.CommandRequest") 931 | proto.RegisterType((*CommandResponse)(nil), "server.CommandResponse") 932 | proto.RegisterType((*LogEntry)(nil), "server.LogEntry") 933 | proto.RegisterType((*RequestVoteRequest)(nil), "server.RequestVoteRequest") 934 | proto.RegisterType((*RequestVoteResponse)(nil), "server.RequestVoteResponse") 935 | proto.RegisterType((*AppendEntriesRequest)(nil), "server.AppendEntriesRequest") 936 | proto.RegisterType((*AppendEntriesResponse)(nil), "server.AppendEntriesResponse") 937 | proto.RegisterType((*Peer)(nil), "server.Peer") 938 | proto.RegisterType((*Host)(nil), "server.Host") 939 | proto.RegisterType((*RaftGroup)(nil), "server.RaftGroup") 940 | proto.RegisterType((*ServerConfig)(nil), "server.ServerConfig") 941 | proto.RegisterType((*ApproveAppendResponse)(nil), "server.ApproveAppendResponse") 942 | proto.RegisterType((*GroupId)(nil), "server.GroupId") 943 | proto.RegisterType((*GroupNodesResponse)(nil), "server.GroupNodesResponse") 944 | proto.RegisterType((*GroupMember)(nil), "server.GroupMember") 945 | proto.RegisterType((*GroupMembersResponse)(nil), "server.GroupMembersResponse") 946 | proto.RegisterType((*PullGroupLogsResuest)(nil), "server.PullGroupLogsResuest") 947 | proto.RegisterType((*LogEntries)(nil), "server.LogEntries") 948 | proto.RegisterType((*NodeJoinGroupEntry)(nil), "server.NodeJoinGroupEntry") 949 | proto.RegisterType((*GroupLeader)(nil), "server.GroupLeader") 950 | proto.RegisterType((*GroupInvitation)(nil), "server.GroupInvitation") 951 | proto.RegisterType((*RaftGroupNodes)(nil), "server.RaftGroupNodes") 952 | proto.RegisterType((*Nothing)(nil), "server.Nothing") 953 | } 954 | 955 | // Reference imports to suppress errors if they are not otherwise used. 956 | var _ context.Context 957 | var _ grpc.ClientConn 958 | 959 | // This is a compile-time assertion to ensure that this generated file 960 | // is compatible with the grpc package it is being compiled against. 961 | const _ = grpc.SupportPackageIsVersion4 962 | 963 | // Client API for BFTRaft service 964 | 965 | type BFTRaftClient interface { 966 | ExecCommand(ctx context.Context, in *CommandRequest, opts ...grpc.CallOption) (*CommandResponse, error) 967 | RequestVote(ctx context.Context, in *RequestVoteRequest, opts ...grpc.CallOption) (*RequestVoteResponse, error) 968 | AppendEntries(ctx context.Context, in *AppendEntriesRequest, opts ...grpc.CallOption) (*AppendEntriesResponse, error) 969 | ApproveAppend(ctx context.Context, in *AppendEntriesResponse, opts ...grpc.CallOption) (*ApproveAppendResponse, error) 970 | GroupHosts(ctx context.Context, in *GroupId, opts ...grpc.CallOption) (*GroupNodesResponse, error) 971 | GroupMembers(ctx context.Context, in *GroupId, opts ...grpc.CallOption) (*GroupMembersResponse, error) 972 | GetGroupLeader(ctx context.Context, in *GroupId, opts ...grpc.CallOption) (*GroupLeader, error) 973 | GetGroupContent(ctx context.Context, in *GroupId, opts ...grpc.CallOption) (*RaftGroup, error) 974 | PullGroupLogs(ctx context.Context, in *PullGroupLogsResuest, opts ...grpc.CallOption) (*LogEntries, error) 975 | SendGroupInvitation(ctx context.Context, in *GroupInvitation, opts ...grpc.CallOption) (*Nothing, error) 976 | } 977 | 978 | type bFTRaftClient struct { 979 | cc *grpc.ClientConn 980 | } 981 | 982 | func NewBFTRaftClient(cc *grpc.ClientConn) BFTRaftClient { 983 | return &bFTRaftClient{cc} 984 | } 985 | 986 | func (c *bFTRaftClient) ExecCommand(ctx context.Context, in *CommandRequest, opts ...grpc.CallOption) (*CommandResponse, error) { 987 | out := new(CommandResponse) 988 | err := grpc.Invoke(ctx, "/server.BFTRaft/ExecCommand", in, out, c.cc, opts...) 989 | if err != nil { 990 | return nil, err 991 | } 992 | return out, nil 993 | } 994 | 995 | func (c *bFTRaftClient) RequestVote(ctx context.Context, in *RequestVoteRequest, opts ...grpc.CallOption) (*RequestVoteResponse, error) { 996 | out := new(RequestVoteResponse) 997 | err := grpc.Invoke(ctx, "/server.BFTRaft/RequestVote", in, out, c.cc, opts...) 998 | if err != nil { 999 | return nil, err 1000 | } 1001 | return out, nil 1002 | } 1003 | 1004 | func (c *bFTRaftClient) AppendEntries(ctx context.Context, in *AppendEntriesRequest, opts ...grpc.CallOption) (*AppendEntriesResponse, error) { 1005 | out := new(AppendEntriesResponse) 1006 | err := grpc.Invoke(ctx, "/server.BFTRaft/AppendEntries", in, out, c.cc, opts...) 1007 | if err != nil { 1008 | return nil, err 1009 | } 1010 | return out, nil 1011 | } 1012 | 1013 | func (c *bFTRaftClient) ApproveAppend(ctx context.Context, in *AppendEntriesResponse, opts ...grpc.CallOption) (*ApproveAppendResponse, error) { 1014 | out := new(ApproveAppendResponse) 1015 | err := grpc.Invoke(ctx, "/server.BFTRaft/ApproveAppend", in, out, c.cc, opts...) 1016 | if err != nil { 1017 | return nil, err 1018 | } 1019 | return out, nil 1020 | } 1021 | 1022 | func (c *bFTRaftClient) GroupHosts(ctx context.Context, in *GroupId, opts ...grpc.CallOption) (*GroupNodesResponse, error) { 1023 | out := new(GroupNodesResponse) 1024 | err := grpc.Invoke(ctx, "/server.BFTRaft/GroupHosts", in, out, c.cc, opts...) 1025 | if err != nil { 1026 | return nil, err 1027 | } 1028 | return out, nil 1029 | } 1030 | 1031 | func (c *bFTRaftClient) GroupMembers(ctx context.Context, in *GroupId, opts ...grpc.CallOption) (*GroupMembersResponse, error) { 1032 | out := new(GroupMembersResponse) 1033 | err := grpc.Invoke(ctx, "/server.BFTRaft/GroupMembers", in, out, c.cc, opts...) 1034 | if err != nil { 1035 | return nil, err 1036 | } 1037 | return out, nil 1038 | } 1039 | 1040 | func (c *bFTRaftClient) GetGroupLeader(ctx context.Context, in *GroupId, opts ...grpc.CallOption) (*GroupLeader, error) { 1041 | out := new(GroupLeader) 1042 | err := grpc.Invoke(ctx, "/server.BFTRaft/GetGroupLeader", in, out, c.cc, opts...) 1043 | if err != nil { 1044 | return nil, err 1045 | } 1046 | return out, nil 1047 | } 1048 | 1049 | func (c *bFTRaftClient) GetGroupContent(ctx context.Context, in *GroupId, opts ...grpc.CallOption) (*RaftGroup, error) { 1050 | out := new(RaftGroup) 1051 | err := grpc.Invoke(ctx, "/server.BFTRaft/GetGroupContent", in, out, c.cc, opts...) 1052 | if err != nil { 1053 | return nil, err 1054 | } 1055 | return out, nil 1056 | } 1057 | 1058 | func (c *bFTRaftClient) PullGroupLogs(ctx context.Context, in *PullGroupLogsResuest, opts ...grpc.CallOption) (*LogEntries, error) { 1059 | out := new(LogEntries) 1060 | err := grpc.Invoke(ctx, "/server.BFTRaft/PullGroupLogs", in, out, c.cc, opts...) 1061 | if err != nil { 1062 | return nil, err 1063 | } 1064 | return out, nil 1065 | } 1066 | 1067 | func (c *bFTRaftClient) SendGroupInvitation(ctx context.Context, in *GroupInvitation, opts ...grpc.CallOption) (*Nothing, error) { 1068 | out := new(Nothing) 1069 | err := grpc.Invoke(ctx, "/server.BFTRaft/SendGroupInvitation", in, out, c.cc, opts...) 1070 | if err != nil { 1071 | return nil, err 1072 | } 1073 | return out, nil 1074 | } 1075 | 1076 | // Server API for BFTRaft service 1077 | 1078 | type BFTRaftServer interface { 1079 | ExecCommand(context.Context, *CommandRequest) (*CommandResponse, error) 1080 | RequestVote(context.Context, *RequestVoteRequest) (*RequestVoteResponse, error) 1081 | AppendEntries(context.Context, *AppendEntriesRequest) (*AppendEntriesResponse, error) 1082 | ApproveAppend(context.Context, *AppendEntriesResponse) (*ApproveAppendResponse, error) 1083 | GroupHosts(context.Context, *GroupId) (*GroupNodesResponse, error) 1084 | GroupMembers(context.Context, *GroupId) (*GroupMembersResponse, error) 1085 | GetGroupLeader(context.Context, *GroupId) (*GroupLeader, error) 1086 | GetGroupContent(context.Context, *GroupId) (*RaftGroup, error) 1087 | PullGroupLogs(context.Context, *PullGroupLogsResuest) (*LogEntries, error) 1088 | SendGroupInvitation(context.Context, *GroupInvitation) (*Nothing, error) 1089 | } 1090 | 1091 | func RegisterBFTRaftServer(s *grpc.Server, srv BFTRaftServer) { 1092 | s.RegisterService(&_BFTRaft_serviceDesc, srv) 1093 | } 1094 | 1095 | func _BFTRaft_ExecCommand_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 1096 | in := new(CommandRequest) 1097 | if err := dec(in); err != nil { 1098 | return nil, err 1099 | } 1100 | if interceptor == nil { 1101 | return srv.(BFTRaftServer).ExecCommand(ctx, in) 1102 | } 1103 | info := &grpc.UnaryServerInfo{ 1104 | Server: srv, 1105 | FullMethod: "/server.BFTRaft/ExecCommand", 1106 | } 1107 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 1108 | return srv.(BFTRaftServer).ExecCommand(ctx, req.(*CommandRequest)) 1109 | } 1110 | return interceptor(ctx, in, info, handler) 1111 | } 1112 | 1113 | func _BFTRaft_RequestVote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 1114 | in := new(RequestVoteRequest) 1115 | if err := dec(in); err != nil { 1116 | return nil, err 1117 | } 1118 | if interceptor == nil { 1119 | return srv.(BFTRaftServer).RequestVote(ctx, in) 1120 | } 1121 | info := &grpc.UnaryServerInfo{ 1122 | Server: srv, 1123 | FullMethod: "/server.BFTRaft/RequestVote", 1124 | } 1125 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 1126 | return srv.(BFTRaftServer).RequestVote(ctx, req.(*RequestVoteRequest)) 1127 | } 1128 | return interceptor(ctx, in, info, handler) 1129 | } 1130 | 1131 | func _BFTRaft_AppendEntries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 1132 | in := new(AppendEntriesRequest) 1133 | if err := dec(in); err != nil { 1134 | return nil, err 1135 | } 1136 | if interceptor == nil { 1137 | return srv.(BFTRaftServer).AppendEntries(ctx, in) 1138 | } 1139 | info := &grpc.UnaryServerInfo{ 1140 | Server: srv, 1141 | FullMethod: "/server.BFTRaft/AppendEntries", 1142 | } 1143 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 1144 | return srv.(BFTRaftServer).AppendEntries(ctx, req.(*AppendEntriesRequest)) 1145 | } 1146 | return interceptor(ctx, in, info, handler) 1147 | } 1148 | 1149 | func _BFTRaft_ApproveAppend_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 1150 | in := new(AppendEntriesResponse) 1151 | if err := dec(in); err != nil { 1152 | return nil, err 1153 | } 1154 | if interceptor == nil { 1155 | return srv.(BFTRaftServer).ApproveAppend(ctx, in) 1156 | } 1157 | info := &grpc.UnaryServerInfo{ 1158 | Server: srv, 1159 | FullMethod: "/server.BFTRaft/ApproveAppend", 1160 | } 1161 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 1162 | return srv.(BFTRaftServer).ApproveAppend(ctx, req.(*AppendEntriesResponse)) 1163 | } 1164 | return interceptor(ctx, in, info, handler) 1165 | } 1166 | 1167 | func _BFTRaft_GroupHosts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 1168 | in := new(GroupId) 1169 | if err := dec(in); err != nil { 1170 | return nil, err 1171 | } 1172 | if interceptor == nil { 1173 | return srv.(BFTRaftServer).GroupHosts(ctx, in) 1174 | } 1175 | info := &grpc.UnaryServerInfo{ 1176 | Server: srv, 1177 | FullMethod: "/server.BFTRaft/GroupHosts", 1178 | } 1179 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 1180 | return srv.(BFTRaftServer).GroupHosts(ctx, req.(*GroupId)) 1181 | } 1182 | return interceptor(ctx, in, info, handler) 1183 | } 1184 | 1185 | func _BFTRaft_GroupMembers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 1186 | in := new(GroupId) 1187 | if err := dec(in); err != nil { 1188 | return nil, err 1189 | } 1190 | if interceptor == nil { 1191 | return srv.(BFTRaftServer).GroupMembers(ctx, in) 1192 | } 1193 | info := &grpc.UnaryServerInfo{ 1194 | Server: srv, 1195 | FullMethod: "/server.BFTRaft/GroupMembers", 1196 | } 1197 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 1198 | return srv.(BFTRaftServer).GroupMembers(ctx, req.(*GroupId)) 1199 | } 1200 | return interceptor(ctx, in, info, handler) 1201 | } 1202 | 1203 | func _BFTRaft_GetGroupLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 1204 | in := new(GroupId) 1205 | if err := dec(in); err != nil { 1206 | return nil, err 1207 | } 1208 | if interceptor == nil { 1209 | return srv.(BFTRaftServer).GetGroupLeader(ctx, in) 1210 | } 1211 | info := &grpc.UnaryServerInfo{ 1212 | Server: srv, 1213 | FullMethod: "/server.BFTRaft/GetGroupLeader", 1214 | } 1215 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 1216 | return srv.(BFTRaftServer).GetGroupLeader(ctx, req.(*GroupId)) 1217 | } 1218 | return interceptor(ctx, in, info, handler) 1219 | } 1220 | 1221 | func _BFTRaft_GetGroupContent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 1222 | in := new(GroupId) 1223 | if err := dec(in); err != nil { 1224 | return nil, err 1225 | } 1226 | if interceptor == nil { 1227 | return srv.(BFTRaftServer).GetGroupContent(ctx, in) 1228 | } 1229 | info := &grpc.UnaryServerInfo{ 1230 | Server: srv, 1231 | FullMethod: "/server.BFTRaft/GetGroupContent", 1232 | } 1233 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 1234 | return srv.(BFTRaftServer).GetGroupContent(ctx, req.(*GroupId)) 1235 | } 1236 | return interceptor(ctx, in, info, handler) 1237 | } 1238 | 1239 | func _BFTRaft_PullGroupLogs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 1240 | in := new(PullGroupLogsResuest) 1241 | if err := dec(in); err != nil { 1242 | return nil, err 1243 | } 1244 | if interceptor == nil { 1245 | return srv.(BFTRaftServer).PullGroupLogs(ctx, in) 1246 | } 1247 | info := &grpc.UnaryServerInfo{ 1248 | Server: srv, 1249 | FullMethod: "/server.BFTRaft/PullGroupLogs", 1250 | } 1251 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 1252 | return srv.(BFTRaftServer).PullGroupLogs(ctx, req.(*PullGroupLogsResuest)) 1253 | } 1254 | return interceptor(ctx, in, info, handler) 1255 | } 1256 | 1257 | func _BFTRaft_SendGroupInvitation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 1258 | in := new(GroupInvitation) 1259 | if err := dec(in); err != nil { 1260 | return nil, err 1261 | } 1262 | if interceptor == nil { 1263 | return srv.(BFTRaftServer).SendGroupInvitation(ctx, in) 1264 | } 1265 | info := &grpc.UnaryServerInfo{ 1266 | Server: srv, 1267 | FullMethod: "/server.BFTRaft/SendGroupInvitation", 1268 | } 1269 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 1270 | return srv.(BFTRaftServer).SendGroupInvitation(ctx, req.(*GroupInvitation)) 1271 | } 1272 | return interceptor(ctx, in, info, handler) 1273 | } 1274 | 1275 | var _BFTRaft_serviceDesc = grpc.ServiceDesc{ 1276 | ServiceName: "server.BFTRaft", 1277 | HandlerType: (*BFTRaftServer)(nil), 1278 | Methods: []grpc.MethodDesc{ 1279 | { 1280 | MethodName: "ExecCommand", 1281 | Handler: _BFTRaft_ExecCommand_Handler, 1282 | }, 1283 | { 1284 | MethodName: "RequestVote", 1285 | Handler: _BFTRaft_RequestVote_Handler, 1286 | }, 1287 | { 1288 | MethodName: "AppendEntries", 1289 | Handler: _BFTRaft_AppendEntries_Handler, 1290 | }, 1291 | { 1292 | MethodName: "ApproveAppend", 1293 | Handler: _BFTRaft_ApproveAppend_Handler, 1294 | }, 1295 | { 1296 | MethodName: "GroupHosts", 1297 | Handler: _BFTRaft_GroupHosts_Handler, 1298 | }, 1299 | { 1300 | MethodName: "GroupMembers", 1301 | Handler: _BFTRaft_GroupMembers_Handler, 1302 | }, 1303 | { 1304 | MethodName: "GetGroupLeader", 1305 | Handler: _BFTRaft_GetGroupLeader_Handler, 1306 | }, 1307 | { 1308 | MethodName: "GetGroupContent", 1309 | Handler: _BFTRaft_GetGroupContent_Handler, 1310 | }, 1311 | { 1312 | MethodName: "PullGroupLogs", 1313 | Handler: _BFTRaft_PullGroupLogs_Handler, 1314 | }, 1315 | { 1316 | MethodName: "SendGroupInvitation", 1317 | Handler: _BFTRaft_SendGroupInvitation_Handler, 1318 | }, 1319 | }, 1320 | Streams: []grpc.StreamDesc{}, 1321 | Metadata: "proto/server/server.proto", 1322 | } 1323 | 1324 | func init() { proto.RegisterFile("proto/server/server.proto", fileDescriptor0) } 1325 | 1326 | var fileDescriptor0 = []byte{ 1327 | // 1245 bytes of a gzipped FileDescriptorProto 1328 | 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0x5f, 0x6f, 0x1b, 0x45, 1329 | 0x10, 0xf7, 0xf9, 0xdf, 0xd9, 0x63, 0x27, 0x29, 0xdb, 0xd0, 0xba, 0x6e, 0x2b, 0xc2, 0xaa, 0x0f, 1330 | 0x51, 0x25, 0x1a, 0x14, 0x90, 0x40, 0x42, 0x02, 0xd2, 0xa8, 0xb4, 0x86, 0x10, 0xca, 0xa5, 0xca, 1331 | 0xab, 0x75, 0xb9, 0xdd, 0x5c, 0x4e, 0x9c, 0x77, 0x9d, 0xbb, 0xb5, 0x95, 0xf0, 0x15, 0x78, 0xe5, 1332 | 0x5b, 0xf0, 0x80, 0xe0, 0x85, 0x0f, 0xc0, 0x33, 0x1f, 0x80, 0x6f, 0x83, 0x76, 0x76, 0xef, 0x7c, 1333 | 0x77, 0x76, 0x9c, 0x96, 0xa7, 0xdc, 0xfc, 0xd9, 0xd9, 0xdf, 0xcc, 0xfc, 0x66, 0xd6, 0x81, 0x07, 1334 | 0xd3, 0x44, 0x2a, 0xb9, 0x97, 0xf2, 0x64, 0xce, 0x13, 0xfb, 0xe7, 0x19, 0xea, 0x48, 0xdb, 0x48, 1335 | 0xf4, 0x37, 0x07, 0x36, 0x0f, 0xe5, 0x64, 0xe2, 0x0b, 0xe6, 0xf1, 0xcb, 0x19, 0x4f, 0x15, 0xd9, 1336 | 0x86, 0x56, 0x98, 0xc8, 0xd9, 0x74, 0xe0, 0xec, 0x38, 0xbb, 0x4d, 0xcf, 0x08, 0xe4, 0x21, 0x74, 1337 | 0x83, 0x38, 0xe2, 0x42, 0x8d, 0x23, 0x36, 0xa8, 0xa3, 0xa5, 0x63, 0x14, 0x23, 0x46, 0x1e, 0x03, 1338 | 0x24, 0xe6, 0xb4, 0xb6, 0x36, 0xd0, 0xda, 0xb5, 0x9a, 0x11, 0x23, 0xf7, 0xc1, 0x3d, 0x9f, 0x89, 1339 | 0x40, 0xdb, 0x9a, 0x68, 0x6b, 0x6b, 0x71, 0xc4, 0xc8, 0x23, 0xe8, 0xa6, 0x51, 0x28, 0x7c, 0x35, 1340 | 0x4b, 0xf8, 0xa0, 0xb5, 0xe3, 0xec, 0xf6, 0xbd, 0x85, 0x82, 0xdc, 0x81, 0x86, 0x9f, 0x84, 0x83, 1341 | 0x36, 0xea, 0xf5, 0x27, 0xfd, 0xd3, 0x81, 0xad, 0x1c, 0x6d, 0x3a, 0x95, 0x22, 0xe5, 0x37, 0xc3, 1342 | 0x8d, 0xb9, 0xcf, 0x78, 0x52, 0x80, 0x6b, 0x14, 0x06, 0x8f, 0x90, 0x8c, 0x2f, 0xb0, 0xb6, 0xb5, 1343 | 0xb8, 0x94, 0x47, 0xb3, 0x9a, 0xc7, 0x7a, 0xb8, 0xf7, 0xa0, 0x9d, 0xf0, 0x74, 0x16, 0x2b, 0x8b, 1344 | 0xd8, 0x4a, 0xf4, 0x67, 0xe8, 0x1c, 0xc9, 0xf0, 0x85, 0x50, 0xc9, 0x35, 0x21, 0xd0, 0x54, 0x3c, 1345 | 0x99, 0x58, 0xac, 0xf8, 0xad, 0x13, 0x88, 0x04, 0xe3, 0x57, 0x16, 0xa6, 0x11, 0xb4, 0xe7, 0x85, 1346 | 0x9f, 0x5e, 0x20, 0xc0, 0xbe, 0x87, 0xdf, 0xe4, 0x63, 0x70, 0x03, 0x93, 0x3d, 0x62, 0xeb, 0xed, 1347 | 0xdf, 0x7b, 0x66, 0x9b, 0x5a, 0x6e, 0xa1, 0x97, 0xb9, 0xd1, 0xbf, 0x1c, 0x20, 0x56, 0x79, 0x2a, 1348 | 0x15, 0x5f, 0xdf, 0xe2, 0x0c, 0x5c, 0xbd, 0x00, 0x4e, 0xd7, 0x51, 0x86, 0x63, 0x03, 0xb0, 0x61, 1349 | 0xeb, 0x28, 0xc3, 0x11, 0x62, 0x7c, 0x00, 0xfa, 0x7b, 0x8c, 0x87, 0x4c, 0xb1, 0xdc, 0x58, 0x86, 1350 | 0x6f, 0xf4, 0xb9, 0x0f, 0xa1, 0x1f, 0xf8, 0x82, 0x45, 0xcc, 0x57, 0x58, 0xe7, 0x16, 0x9a, 0x7b, 1351 | 0xb9, 0xae, 0x5a, 0xcd, 0x76, 0xa5, 0x9a, 0xf4, 0x1f, 0x07, 0xee, 0x96, 0x90, 0xaf, 0x6d, 0xf7, 1352 | 0x3b, 0x43, 0x7f, 0x0b, 0x7c, 0xdb, 0xd0, 0x9a, 0x4b, 0xc5, 0x13, 0xc4, 0xd6, 0xf4, 0x8c, 0x40, 1353 | 0x06, 0xe0, 0x86, 0x89, 0x2f, 0x14, 0x67, 0x03, 0x77, 0xc7, 0xd9, 0xed, 0x78, 0x99, 0x58, 0xce, 1354 | 0xa7, 0x53, 0xcd, 0xe7, 0x8f, 0x3a, 0x6c, 0x1f, 0x4c, 0xa7, 0x5c, 0x30, 0xcd, 0x84, 0x88, 0xa7, 1355 | 0xff, 0xaf, 0x17, 0x39, 0xa7, 0x1b, 0x15, 0x4e, 0x3f, 0x81, 0xcd, 0x69, 0xc2, 0xe7, 0xe3, 0x45, 1356 | 0xca, 0xa6, 0x23, 0x7d, 0xad, 0x3d, 0xca, 0xd2, 0xa6, 0xb0, 0x91, 0x7b, 0x61, 0x7c, 0x9b, 0xb7, 1357 | 0x75, 0xc2, 0xd6, 0xad, 0xed, 0x0b, 0xf9, 0x12, 0xfa, 0x97, 0x33, 0x99, 0xcc, 0x26, 0x63, 0x5d, 1358 | 0x8f, 0x74, 0xe0, 0xee, 0x34, 0x76, 0x7b, 0xfb, 0x0f, 0x33, 0x22, 0xae, 0x68, 0x99, 0xd7, 0x33, 1359 | 0x07, 0xb4, 0x2e, 0x25, 0x4f, 0xc1, 0xe5, 0xa6, 0x00, 0x83, 0x0e, 0x1e, 0xbd, 0x93, 0x1d, 0xcd, 1360 | 0x86, 0xc4, 0xcb, 0x1c, 0xe8, 0xbf, 0x0e, 0xbc, 0x5f, 0xa9, 0xd9, 0x3b, 0xb3, 0x20, 0x9f, 0xae, 1361 | 0x46, 0x65, 0xba, 0xa6, 0x9c, 0x27, 0xb6, 0x46, 0xf8, 0x8d, 0x79, 0xcf, 0x82, 0x80, 0xa7, 0x29, 1362 | 0x37, 0x7c, 0xe8, 0x78, 0x0b, 0x85, 0xb6, 0x06, 0x52, 0xcc, 0x23, 0x11, 0x70, 0x86, 0x55, 0xe9, 1363 | 0x78, 0x0b, 0x45, 0x3e, 0xad, 0x6e, 0x61, 0x5a, 0xd7, 0xf3, 0x21, 0x86, 0xe6, 0x6b, 0x7d, 0xeb, 1364 | 0x26, 0xd4, 0x23, 0x66, 0xd3, 0xa8, 0x47, 0x6c, 0x91, 0x59, 0xbd, 0x98, 0xd9, 0x63, 0x00, 0xc1, 1365 | 0xaf, 0x54, 0xa9, 0xb3, 0x5d, 0xad, 0x31, 0x6d, 0xfd, 0x00, 0x7a, 0x13, 0x5f, 0x05, 0x17, 0xd6, 1366 | 0x6e, 0x9a, 0x0a, 0xa8, 0x42, 0x07, 0xfa, 0x8b, 0x03, 0xcd, 0x57, 0x32, 0x55, 0x4b, 0xd7, 0x69, 1367 | 0x4e, 0xf9, 0xa9, 0x1a, 0xa7, 0x9c, 0x8b, 0x7c, 0x4f, 0xfa, 0xa9, 0x3a, 0xe1, 0x5c, 0xe8, 0x8d, 1368 | 0x26, 0x45, 0x1c, 0x09, 0x8e, 0xc5, 0xeb, 0x78, 0x56, 0xd2, 0xd7, 0x99, 0x9e, 0x8d, 0x7d, 0xc6, 1369 | 0x4c, 0x11, 0xbb, 0x1e, 0x18, 0xd5, 0x01, 0x63, 0x89, 0x86, 0x3b, 0x9d, 0x9d, 0xc5, 0x51, 0x30, 1370 | 0xfe, 0x89, 0x5f, 0x67, 0x9b, 0xd2, 0x68, 0xbe, 0xe3, 0xd7, 0xf4, 0x04, 0xba, 0x9e, 0x7f, 0xae, 1371 | 0x5e, 0x62, 0x6a, 0x14, 0xfa, 0x09, 0x9f, 0xc6, 0x51, 0xe0, 0xab, 0x48, 0x8a, 0x14, 0xb1, 0x6d, 1372 | 0x78, 0x25, 0x9d, 0x45, 0x5d, 0xcf, 0x51, 0x67, 0x8d, 0x6e, 0x2e, 0x1a, 0x4d, 0xf7, 0xa0, 0x7f, 1373 | 0x82, 0x08, 0x0e, 0xa5, 0x38, 0x8f, 0x42, 0x0d, 0x72, 0x9a, 0x44, 0x73, 0x3d, 0xdf, 0x1a, 0x84, 1374 | 0x83, 0x20, 0xc0, 0xaa, 0x34, 0x8a, 0xbf, 0x0d, 0xbb, 0x12, 0x39, 0xe7, 0x86, 0x64, 0xb7, 0xb3, 1375 | 0x0b, 0x39, 0x53, 0x2f, 0x70, 0x66, 0x35, 0xbb, 0x86, 0xd0, 0xf1, 0x31, 0x22, 0x37, 0x8b, 0xba, 1376 | 0xe3, 0xe5, 0xb2, 0xde, 0x1f, 0x8c, 0xc7, 0xfe, 0x75, 0xce, 0xb1, 0x4c, 0xd4, 0xd5, 0x3e, 0xf7, 1377 | 0xa3, 0x38, 0xa7, 0x97, 0x95, 0xca, 0x3c, 0x72, 0xab, 0x3c, 0x7a, 0x02, 0x2e, 0xd6, 0x71, 0xc4, 1378 | 0xf4, 0x3a, 0x46, 0xa4, 0xe3, 0xbc, 0xc3, 0x6e, 0x68, 0x4c, 0xf4, 0x14, 0x08, 0x7a, 0x1d, 0x4b, 1379 | 0x56, 0x98, 0x22, 0x0a, 0x2d, 0xfd, 0xf0, 0xe9, 0x9a, 0xeb, 0x49, 0xec, 0x67, 0x93, 0xa8, 0x99, 1380 | 0xe2, 0x19, 0x53, 0xf9, 0xf6, 0x7a, 0xf5, 0xf6, 0x1f, 0xa1, 0x87, 0x71, 0xbf, 0xe7, 0x93, 0x33, 1381 | 0x9e, 0x90, 0x1d, 0x68, 0x5e, 0xc8, 0x54, 0xe1, 0xed, 0xd5, 0x78, 0x68, 0xd1, 0x1e, 0x79, 0x11, 1382 | 0x0b, 0x1e, 0x7a, 0x14, 0x4c, 0x49, 0xe9, 0xaf, 0x0e, 0x6c, 0x17, 0x62, 0x2e, 0xd0, 0xee, 0x01, 1383 | 0x20, 0x55, 0xf5, 0x76, 0xb8, 0xb6, 0x01, 0x96, 0x97, 0x07, 0xd2, 0xd9, 0x3c, 0xb6, 0x1f, 0x81, 1384 | 0x3b, 0x31, 0x31, 0x06, 0x0d, 0x4c, 0xf0, 0x6e, 0xe6, 0x5d, 0x88, 0xef, 0x65, 0x3e, 0xe5, 0x4c, 1385 | 0x9b, 0xd5, 0x4c, 0x9f, 0xc3, 0xf6, 0xeb, 0x59, 0x1c, 0xe3, 0xc9, 0x23, 0x19, 0x6a, 0x58, 0x6b, 1386 | 0xd6, 0xf7, 0xca, 0x37, 0x9d, 0x9e, 0x02, 0x58, 0x9c, 0x51, 0x79, 0x13, 0x3a, 0xb7, 0x6c, 0xc2, 1387 | 0x5b, 0xba, 0xf0, 0x14, 0x88, 0x6e, 0xec, 0xb7, 0x32, 0x12, 0x88, 0xcf, 0xa4, 0xbf, 0x12, 0x19, 1388 | 0x0d, 0x6d, 0xc7, 0x8e, 0xf0, 0xe1, 0xd0, 0xfd, 0xd0, 0x7d, 0x5e, 0xdd, 0x31, 0x6d, 0xd1, 0x84, 1389 | 0x3d, 0x08, 0x82, 0x99, 0xaf, 0xcc, 0xc5, 0x1d, 0x2f, 0x13, 0xcb, 0xa0, 0x1a, 0x55, 0x50, 0x97, 1390 | 0xb0, 0x65, 0x88, 0x29, 0xe6, 0x91, 0xc2, 0x39, 0xbe, 0xa1, 0x56, 0xf7, 0xa0, 0x6d, 0x5e, 0x31, 1391 | 0x5b, 0x2c, 0x2b, 0xe9, 0x79, 0x43, 0x68, 0x66, 0xb4, 0x0c, 0x98, 0xf5, 0x3d, 0xfa, 0x14, 0x36, 1392 | 0xf3, 0xbd, 0x82, 0x4c, 0x7f, 0x1b, 0x86, 0xd3, 0x2e, 0xb8, 0xc7, 0x52, 0x5d, 0x44, 0x22, 0xdc, 1393 | 0xff, 0xbd, 0x05, 0xee, 0xf3, 0x6f, 0xde, 0xe8, 0x20, 0xe4, 0x6b, 0xe8, 0xbd, 0xb8, 0xe2, 0x81, 1394 | 0xfd, 0x65, 0x45, 0x6e, 0xf8, 0xa9, 0x35, 0xbc, 0xbf, 0xa4, 0x37, 0x74, 0xa5, 0x35, 0xf2, 0x0a, 1395 | 0x7a, 0x85, 0xe7, 0x90, 0x0c, 0x57, 0xbe, 0x91, 0x26, 0xca, 0xba, 0xf7, 0x93, 0xd6, 0xc8, 0x31, 1396 | 0x6c, 0x94, 0xde, 0x41, 0xf2, 0x28, 0xf3, 0x5f, 0xf5, 0x93, 0x62, 0xf8, 0xf8, 0x06, 0x6b, 0x1e, 1397 | 0xef, 0x07, 0x8c, 0xb7, 0xd8, 0x7c, 0x64, 0xfd, 0x89, 0x52, 0xc0, 0xe5, 0x7d, 0x49, 0x6b, 0xe4, 1398 | 0x0b, 0x00, 0xac, 0xba, 0xae, 0x6b, 0x4a, 0xb6, 0x4a, 0x73, 0x36, 0x62, 0xc3, 0x61, 0x49, 0x51, 1399 | 0x5a, 0x42, 0xb4, 0x46, 0xbe, 0x82, 0x7e, 0x71, 0xe0, 0x97, 0x8f, 0x3f, 0x5a, 0x31, 0xb7, 0xc5, 1400 | 0x00, 0x9f, 0xc3, 0xe6, 0x4b, 0xae, 0x8a, 0xb4, 0x5e, 0x0a, 0x51, 0x1e, 0x7d, 0xe3, 0x45, 0x6b, 1401 | 0xe4, 0x33, 0xd8, 0xca, 0x4e, 0x1e, 0x4a, 0xa1, 0xb8, 0x50, 0xcb, 0x47, 0xdf, 0xcb, 0x7b, 0x93, 1402 | 0x71, 0x8b, 0xd6, 0xc8, 0x21, 0x6c, 0x94, 0xd6, 0xc1, 0xa2, 0x23, 0xab, 0xb6, 0xc4, 0x90, 0x54, 1403 | 0x46, 0x5b, 0xff, 0xba, 0xa9, 0x91, 0x03, 0xb8, 0x7b, 0xc2, 0x05, 0xab, 0x8e, 0xc9, 0xfd, 0x32, 1404 | 0x82, 0xdc, 0x30, 0xcc, 0xa1, 0x59, 0xbe, 0xd2, 0xda, 0x59, 0x1b, 0xff, 0x9d, 0xfb, 0xe4, 0xbf, 1405 | 0x00, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x85, 0x52, 0xd7, 0xeb, 0x0d, 0x00, 0x00, 1406 | } 1407 | -------------------------------------------------------------------------------- /proto/server/server.proto: -------------------------------------------------------------------------------- 1 | // This file deinfes a fundamental protocal for BFT raft impelmentation 2 | // For details of this protocal please refer: 3 | // http://www.scs.stanford.edu/14au-cs244b/labs/projects/copeland_zhong.pdf 4 | 5 | syntax = "proto3"; 6 | package server; 7 | 8 | service BFTRaft { 9 | rpc ExecCommand (CommandRequest) returns (CommandResponse) {} 10 | rpc RequestVote (RequestVoteRequest) returns (RequestVoteResponse) {} 11 | rpc AppendEntries (AppendEntriesRequest) returns (AppendEntriesResponse) {} 12 | rpc ApproveAppend (AppendEntriesResponse) returns (ApproveAppendResponse) {} 13 | rpc GroupHosts (GroupId) returns (GroupNodesResponse) {} 14 | rpc GroupMembers (GroupId) returns (GroupMembersResponse) {} 15 | rpc GetGroupLeader (GroupId) returns (GroupLeader) {} 16 | rpc GetGroupContent (GroupId) returns (RaftGroup) {} 17 | rpc PullGroupLogs (PullGroupLogsResuest) returns (LogEntries) {} 18 | rpc SendGroupInvitation (GroupInvitation) returns (Nothing) {} 19 | } 20 | 21 | message CommandRequest { 22 | uint64 group = 1; 23 | uint64 client_id = 2; 24 | uint64 request_id = 3; 25 | uint64 func_id = 4; 26 | bytes signature = 5; 27 | bytes arg = 6; 28 | } 29 | 30 | message CommandResponse { 31 | uint64 group = 1; 32 | uint64 leader_id = 2; 33 | uint64 node_id = 3; 34 | uint64 request_id = 4; 35 | bytes signature = 5; 36 | bytes result = 6; 37 | } 38 | 39 | message LogEntry { 40 | uint64 term = 1; 41 | uint64 index = 2; 42 | bytes hash = 3; 43 | CommandRequest command = 4; 44 | } 45 | 46 | message RequestVoteRequest { 47 | uint64 group = 1; 48 | uint64 term = 2; 49 | uint64 log_index = 3; 50 | uint64 log_term = 4; 51 | uint64 candidate_id = 5; 52 | bytes signature = 6; 53 | } 54 | 55 | message RequestVoteResponse { 56 | uint64 group = 1; 57 | uint64 term = 2; 58 | uint64 log_index = 3; 59 | uint64 candidate_id = 5; 60 | uint64 voter = 6; 61 | bool granted = 7; 62 | bytes signature = 8; 63 | } 64 | 65 | message AppendEntriesRequest { 66 | uint64 group = 1; 67 | uint64 term = 2; 68 | uint64 leader_id = 3; 69 | uint64 prev_log_index = 4; 70 | uint64 prev_log_term = 5; 71 | bytes signature = 6; 72 | repeated RequestVoteResponse quorum_votes = 7; 73 | repeated LogEntry entries = 8; 74 | } 75 | 76 | message AppendEntriesResponse { 77 | uint64 group = 1; 78 | uint64 term = 2; 79 | uint64 index = 3; 80 | uint64 peer = 4; 81 | bool successed = 5; 82 | bool convinced = 6; 83 | bytes hash = 7; 84 | bytes signature = 8; 85 | } 86 | 87 | message Peer { 88 | uint64 id = 1; 89 | uint64 group = 2; 90 | uint64 next_index = 4; 91 | uint64 match_index = 5; 92 | } 93 | 94 | message Host { 95 | uint64 id = 1; 96 | uint64 last_seen = 2; 97 | bool online = 3; 98 | string server_addr = 4; 99 | bytes public_key = 5; 100 | } 101 | 102 | message RaftGroup { 103 | uint32 replications = 1; 104 | uint64 id = 2; 105 | uint64 term = 4; 106 | // repeated uint64 peers = 4; Can be searched from the database buy scanning 107 | } 108 | 109 | message ServerConfig { 110 | bytes private_key = 1; 111 | } 112 | 113 | message ApproveAppendResponse { 114 | uint64 group = 1; 115 | uint64 peer = 2; 116 | uint64 index = 3; 117 | bool appended = 4; 118 | bool delayed = 5; 119 | bool failed = 6; 120 | bytes signature = 7; 121 | } 122 | 123 | message GroupId { 124 | uint64 group_id = 1; 125 | } 126 | 127 | message GroupNodesResponse { 128 | repeated Host nodes = 1; 129 | bytes signature = 2; 130 | } 131 | 132 | message GroupMember { 133 | Host host = 1; 134 | Peer peer = 2; 135 | } 136 | 137 | message GroupMembersResponse { 138 | LogEntry last_entry = 2; 139 | repeated GroupMember members = 3; 140 | bytes signature = 4; 141 | } 142 | 143 | message PullGroupLogsResuest { 144 | uint64 group = 1; 145 | uint64 index = 2; 146 | } 147 | 148 | message LogEntries { 149 | repeated LogEntry entries = 1; 150 | bytes signature = 2; 151 | } 152 | 153 | message NodeJoinGroupEntry { 154 | uint64 group = 1; 155 | } 156 | 157 | message GroupLeader { 158 | Host node = 1; 159 | bool Accuate = 2; 160 | bytes signature = 3; 161 | } 162 | 163 | message GroupInvitation { 164 | uint64 group = 1; 165 | uint64 leader = 2; 166 | uint64 node = 3; 167 | bytes signature = 4; 168 | } 169 | 170 | message RaftGroupNodes { 171 | repeated Host nodes = 1; 172 | } 173 | 174 | message Nothing {} -------------------------------------------------------------------------------- /sctripts/refresh_proto.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | protoc -I ./ ./proto/server/server.proto --go_out=plugins=grpc:./ 3 | protoc -I ./ ./proto/client/client.proto --go_out=plugins=grpc:./ -------------------------------------------------------------------------------- /server/alpha.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "context" 5 | "crypto/x509" 6 | spb "github.com/PomeloCloud/BFTRaft4go/proto/server" 7 | "github.com/PomeloCloud/BFTRaft4go/utils" 8 | "github.com/dgraph-io/badger" 9 | "github.com/golang/protobuf/proto" 10 | "log" 11 | ) 12 | 13 | // Alpha group is a group specialized for tracking network members and groups 14 | // All nodes on the network should observe alpha group to provide group routing 15 | // Alpha group will not track leadership changes, only members 16 | // It also responsible for group creation and limit number of members in each group 17 | // Both clients and cluster nodes can benefit form alpha group by bootstrapping with any node in the cluster 18 | // It also provide valuable information for consistent hashing and distributed hash table implementations 19 | 20 | // This file contains all of the functions for cluster nodes to track changes in alpha group 21 | 22 | func (s *BFTRaftServer) ColdStart() { 23 | // cloud start will assign the node as the only member in it's alpha group 24 | alphaGroup := &spb.RaftGroup{ 25 | Id: utils.ALPHA_GROUP, 26 | Replications: 32, 27 | Term: 0, 28 | } 29 | thisPeer := &spb.Peer{ 30 | Id: s.Id, 31 | Group: utils.ALPHA_GROUP, 32 | NextIndex: 0, 33 | MatchIndex: 0, 34 | } 35 | thisHost := &spb.Host{ 36 | Id: s.Id, 37 | LastSeen: 0, 38 | Online: true, 39 | ServerAddr: s.Opts.Address, 40 | } 41 | thisHost.PublicKey, _ = x509.MarshalPKIXPublicKey(utils.PublicKeyFromPrivate(s.PrivateKey)) 42 | if err := s.DB.Update(func(txn *badger.Txn) error { 43 | if err := s.SaveGroup(txn, alphaGroup); err != nil { 44 | return err 45 | } 46 | if err := s.SavePeer(txn, thisPeer); err != nil { 47 | return err 48 | } 49 | return s.SaveHost(txn, thisHost) 50 | }); err != nil { 51 | log.Fatal("cannot save to cold start:", err) 52 | } 53 | newMeta := NewRTGroup( 54 | s, s.Id, 55 | map[uint64]*spb.Peer{thisPeer.Id: thisPeer}, 56 | alphaGroup, LEADER, 57 | ) 58 | newMeta.Role = LEADER 59 | s.SetOnboardGroup(newMeta) 60 | s.Client.AlphaRPCs.ResetBootstrap([]string{s.Opts.Address}) 61 | } 62 | 63 | func (s *BFTRaftServer) SyncAlphaGroup() { 64 | // Force a snapshot sync for group members by asking alpha nodes for it 65 | // This function should be invoked every time it startup 66 | // First we need to get all alpha nodes 67 | // get alpha members from alpha nodes 68 | alphaRPCs := s.Client.AlphaRPCs.Get() 69 | res := utils.MajorityResponse(alphaRPCs, func(client spb.BFTRaftClient) (interface{}, []byte) { 70 | if res, err := client.GroupMembers(context.Background(), &spb.GroupId{ 71 | GroupId: utils.ALPHA_GROUP, 72 | }); err == nil { 73 | features := GetMembersSignData(res.Members) 74 | log.Println("got alpha group member:", len(res.Members)) 75 | return res, features 76 | } else { 77 | log.Println("cannot get group members:", err) 78 | return nil, []byte{} 79 | } 80 | }) 81 | var alphaMemberRes *spb.GroupMembersResponse = nil 82 | if res == nil { 83 | alphaMemberRes = nil 84 | } else { 85 | alphaMemberRes = res.(*spb.GroupMembersResponse) 86 | } 87 | if alphaMemberRes == nil { 88 | log.Println("cannot get alpha members, will try to cold start") 89 | s.ColdStart() 90 | return 91 | } 92 | members := alphaMemberRes.Members 93 | isAlphaMember := false 94 | for _, m := range members { 95 | if m.Peer.Id == s.Id { 96 | isAlphaMember = true 97 | break 98 | } 99 | } 100 | lastEntry := alphaMemberRes.LastEntry 101 | group := s.GetGroupNTXN(utils.ALPHA_GROUP) 102 | if isAlphaMember { 103 | if group == nil { 104 | panic("Alpha member cannot find alpha group") 105 | } 106 | // Nothing should be done here, the raft algorithm should take the rest 107 | } else { 108 | if group == nil { 109 | log.Println("cannot find alpha group at local, will pull from remote") 110 | // alpha group cannot be found, it need to be generated 111 | res := utils.MajorityResponse(alphaRPCs, func(client spb.BFTRaftClient) (interface{}, []byte) { 112 | if res, err := client.GetGroupContent(context.Background(), &spb.GroupId{GroupId: utils.ALPHA_GROUP}); err == nil { 113 | if data, err2 := proto.Marshal(res); err2 == nil { 114 | return res, data 115 | } else { 116 | return nil, []byte{} 117 | } 118 | } else { 119 | log.Println("cannot get group content for sync alpha", err) 120 | return nil, []byte{} 121 | } 122 | }) 123 | if res != nil { 124 | group = res.(*spb.RaftGroup) 125 | log.Println("pulled alpha group at term:", group.Term) 126 | } else { 127 | log.Println("cannot get alpha group from cluster") 128 | } 129 | } 130 | if group != nil { 131 | if lastEntry == nil { 132 | group.Term = 0 133 | } else { 134 | group.Term = lastEntry.Index 135 | } 136 | s.DB.Update(func(txn *badger.Txn) error { 137 | // the index will be used to observe changes 138 | s.SaveGroup(txn, group) 139 | for _, member := range members { 140 | s.SavePeer(txn, member.Peer) 141 | s.SaveHost(txn, member.Host) 142 | } 143 | return nil 144 | }) 145 | // TODO: observe alpha group 146 | } else { 147 | log.Fatal("cannot generate alpha group from cluster") 148 | } 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /server/bftraft.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "crypto/rsa" 5 | "errors" 6 | "flag" 7 | "fmt" 8 | "github.com/PomeloCloud/BFTRaft4go/client" 9 | cpb "github.com/PomeloCloud/BFTRaft4go/proto/client" 10 | pb "github.com/PomeloCloud/BFTRaft4go/proto/server" 11 | "github.com/PomeloCloud/BFTRaft4go/utils" 12 | "github.com/dgraph-io/badger" 13 | "github.com/golang/protobuf/proto" 14 | cmap "github.com/orcaman/concurrent-map" 15 | "github.com/patrickmn/go-cache" 16 | "golang.org/x/net/context" 17 | "log" 18 | "sync" 19 | "time" 20 | ) 21 | 22 | type Options struct { 23 | DBPath string 24 | Address string 25 | Bootstrap []string 26 | ConsensusTimeout time.Duration 27 | } 28 | 29 | type BFTRaftServer struct { 30 | Id uint64 31 | Opts Options 32 | DB *badger.DB 33 | // first 10 is reserved for the alpha group 34 | FuncReg map[uint64]func(arg *[]byte, entry *pb.LogEntry) []byte 35 | GroupsOnboard cmap.ConcurrentMap 36 | GroupInvitations map[uint64]chan *pb.GroupInvitation 37 | PendingNewGroups map[uint64]chan error 38 | Groups *cache.Cache 39 | Hosts *cache.Cache 40 | NodePublicKeys *cache.Cache 41 | ClientPublicKeys *cache.Cache 42 | Client *client.BFTRaftClient 43 | PrivateKey *rsa.PrivateKey 44 | ClientRPCs ClientStore 45 | lock sync.RWMutex 46 | } 47 | 48 | func (s *BFTRaftServer) ExecCommand(ctx context.Context, cmd *pb.CommandRequest) (*pb.CommandResponse, error) { 49 | group_id := cmd.Group 50 | response := &pb.CommandResponse{ 51 | Group: cmd.Group, 52 | LeaderId: 0, 53 | NodeId: s.Id, 54 | RequestId: cmd.RequestId, 55 | Signature: s.Sign(utils.CommandSignData(group_id, s.Id, cmd.RequestId, []byte{})), 56 | Result: []byte{}, 57 | } 58 | m := s.GetOnboardGroup(cmd.Group) 59 | if m != nil && m.Leader == s.Id { 60 | m.Lock.Lock() 61 | defer m.Lock.Unlock() 62 | isRegNewNode := false 63 | log.Println("executing command group:", cmd.Group, "func:", cmd.FuncId, "client:", cmd.ClientId) 64 | if s.GetHostNTXN(cmd.ClientId) == nil && cmd.Group == utils.ALPHA_GROUP && cmd.FuncId == REG_NODE { 65 | // if registering new node, we should skip the signature verification 66 | log.Println("cannot find node and it's trying to register") 67 | isRegNewNode = true 68 | } 69 | if isRegNewNode || s.VerifyCommandSign(cmd) { // the node is the leader to this group 70 | response.LeaderId = s.Id 71 | var index uint64 72 | var hash []byte 73 | var logEntry pb.LogEntry 74 | if err := s.DB.Update(func(txn *badger.Txn) error { 75 | index = m.LastEntryIndex(txn) + 1 76 | hash, _ = utils.LogHash(m.LastEntryHash(txn), index, cmd.FuncId, cmd.Arg) 77 | logEntry = pb.LogEntry{ 78 | Term: m.Group.Term, 79 | Index: index, 80 | Hash: hash, 81 | Command: cmd, 82 | } 83 | return m.AppendEntryToLocal(txn, &logEntry) 84 | }); err == nil { 85 | m.SendFollowersHeartbeat(ctx) 86 | if len(m.GroupPeers) < 2 || m.WaitLogApproved(index) { 87 | response.Result = *m.CommitGroupLog(&logEntry) 88 | } 89 | } else { 90 | log.Println("append entry on leader failed:", err) 91 | } 92 | } 93 | } else { 94 | var host *pb.Host 95 | if m != nil { 96 | host = s.GetHostNTXN(m.Leader) 97 | } else { 98 | s.DB.View(func(txn *badger.Txn) error { 99 | peers := GetGroupPeersFromKV(txn, group_id) 100 | for id := range peers { 101 | host = s.GetHost(txn, id) 102 | break 103 | } 104 | return nil 105 | }) 106 | } 107 | if c, err := utils.GetClusterRPC(host.ServerAddr); err == nil { 108 | return c.ExecCommand(ctx, cmd) 109 | } 110 | } 111 | response.Signature = s.Sign(utils.CommandSignData( 112 | response.Group, response.NodeId, response.RequestId, response.Result, 113 | )) 114 | return response, nil 115 | } 116 | 117 | func (s *BFTRaftServer) AppendEntries(ctx context.Context, req *pb.AppendEntriesRequest) (*pb.AppendEntriesResponse, error) { 118 | groupId := req.Group 119 | groupMeta := s.GetOnboardGroup(groupId) 120 | if groupMeta == nil { 121 | errStr := fmt.Sprint("cannot append, group ", req.Group, " not on ", s.Id) 122 | log.Println(errStr) 123 | return nil, errors.New(errStr) 124 | } 125 | return groupMeta.AppendEntries(ctx, req) 126 | } 127 | 128 | func (s *BFTRaftServer) ApproveAppend(ctx context.Context, req *pb.AppendEntriesResponse) (*pb.ApproveAppendResponse, error) { 129 | groupMeta := s.GetOnboardGroup(req.Group) 130 | if groupMeta == nil { 131 | return nil, errors.New("cannot find the group") 132 | } 133 | return groupMeta.ApproveAppend(ctx, req) 134 | } 135 | 136 | func (s *BFTRaftServer) RequestVote(ctx context.Context, req *pb.RequestVoteRequest) (*pb.RequestVoteResponse, error) { 137 | // all of the leader transfer verification happens here 138 | groupId := req.Group 139 | meta := s.GetOnboardGroup(groupId) 140 | if meta == nil { 141 | return nil, errors.New("cannot find the group") 142 | } 143 | return meta.RequestVote(ctx, req) 144 | } 145 | 146 | func GetMembersSignData(members []*pb.GroupMember) []byte { 147 | signData := []byte{} 148 | for _, member := range members { 149 | memberBytes, _ := proto.Marshal(member) 150 | signData = append(signData, memberBytes...) 151 | } 152 | return signData 153 | } 154 | 155 | func (s *BFTRaftServer) GroupHosts(ctx context.Context, request *pb.GroupId) (*pb.GroupNodesResponse, error) { 156 | // Outlet for group server memberships that contains all of the meta data on the network 157 | // This API is intended to be invoked from any machine to any members in the cluster 158 | result := s.GetGroupHostsNTXN(request.GroupId) 159 | // signature should be optional for clients in case of the client don't know server public keys 160 | signature := s.Sign(utils.NodesSignData(result)) 161 | return &pb.GroupNodesResponse{Nodes: result, Signature: signature}, nil 162 | } 163 | 164 | // this function should be called only on group members 165 | func (s *BFTRaftServer) GroupMembers(ctx context.Context, req *pb.GroupId) (*pb.GroupMembersResponse, error) { 166 | meta := s.GetOnboardGroup(req.GroupId) 167 | if meta == nil { 168 | return nil, errors.New("cannot find group") 169 | } 170 | return meta.RPCGroupMembers(ctx, req) 171 | } 172 | 173 | func (s *BFTRaftServer) GetGroupContent(ctx context.Context, req *pb.GroupId) (*pb.RaftGroup, error) { 174 | group := s.GetGroupNTXN(req.GroupId) 175 | if group == nil { 176 | return nil, errors.New("cannot find group") 177 | } 178 | return group, nil 179 | } 180 | 181 | // TODO: Signature 182 | func (s *BFTRaftServer) PullGroupLogs(ctx context.Context, req *pb.PullGroupLogsResuest) (*pb.LogEntries, error) { 183 | keyPrefix := ComposeKeyPrefix(req.Group, LOG_ENTRIES) 184 | result := []*pb.LogEntry{} 185 | err := s.DB.View(func(txn *badger.Txn) error { 186 | iter := txn.NewIterator(badger.IteratorOptions{}) 187 | iter.Seek(append(keyPrefix, utils.U64Bytes(uint64(req.Index))...)) 188 | if iter.ValidForPrefix(keyPrefix) { 189 | firstEntry := LogEntryFromKVItem(iter.Item()) 190 | if firstEntry.Index == req.Index { 191 | for true { 192 | iter.Next() 193 | if iter.ValidForPrefix(keyPrefix) { 194 | entry := LogEntryFromKVItem(iter.Item()) 195 | result = append(result, entry) 196 | } else { 197 | break 198 | } 199 | } 200 | } else { 201 | log.Println("First entry not match") 202 | } 203 | } else { 204 | log.Println("Requesting non existed") 205 | } 206 | return nil 207 | }) 208 | return &pb.LogEntries{Entries: result}, err 209 | } 210 | 211 | func (s *BFTRaftServer) RegisterRaftFunc(func_id uint64, fn func(arg *[]byte, entry *pb.LogEntry) []byte) { 212 | s.FuncReg[func_id] = fn 213 | } 214 | 215 | func (s *BFTRaftServer) GetGroupLeader(ctx context.Context, req *pb.GroupId) (*pb.GroupLeader, error) { 216 | return s.GroupLeader(req.GroupId), nil 217 | } 218 | 219 | func (s *BFTRaftServer) SendGroupInvitation(ctx context.Context, inv *pb.GroupInvitation) (*pb.Nothing, error) { 220 | // TODO: verify invitation signature 221 | go func() { 222 | s.GroupInvitations[inv.Group] <- inv 223 | }() 224 | return &pb.Nothing{}, nil 225 | } 226 | 227 | func (s *BFTRaftServer) Sign(data []byte) []byte { 228 | return utils.Sign(s.PrivateKey, data) 229 | } 230 | 231 | func GetServer(serverOpts Options) (*BFTRaftServer, error) { 232 | flag.Parse() 233 | dbopt := badger.DefaultOptions 234 | dbopt.Dir = serverOpts.DBPath 235 | dbopt.ValueDir = serverOpts.DBPath 236 | db, err := badger.Open(dbopt) 237 | if err != nil { 238 | log.Panic(err) 239 | return nil, err 240 | } 241 | config, err := GetConfig(db) 242 | if err != nil { 243 | log.Panic(err) 244 | return nil, err 245 | } 246 | privateKey, err := utils.ParsePrivateKey(config.PrivateKey) 247 | if err != nil { 248 | log.Panic("error on parse key", config.PrivateKey, err) 249 | return nil, err 250 | } 251 | id := utils.HashPublicKey(utils.PublicKeyFromPrivate(privateKey)) 252 | nclient, err := client.NewClient(serverOpts.Bootstrap, client.ClientOptions{PrivateKey: config.PrivateKey}) 253 | if err != nil { 254 | log.Panic(err) 255 | return nil, err 256 | } 257 | bftRaftServer := BFTRaftServer{ 258 | Id: id, 259 | Opts: serverOpts, 260 | DB: db, 261 | ClientRPCs: NewClientStore(), 262 | Groups: cache.New(1*time.Minute, 1*time.Minute), 263 | Hosts: cache.New(1*time.Minute, 1*time.Minute), 264 | NodePublicKeys: cache.New(5*time.Minute, 1*time.Minute), 265 | ClientPublicKeys: cache.New(5*time.Minute, 1*time.Minute), 266 | GroupInvitations: map[uint64]chan *pb.GroupInvitation{}, 267 | GroupsOnboard: cmap.New(), 268 | FuncReg: map[uint64]func(arg *[]byte, entry *pb.LogEntry) []byte{}, 269 | PendingNewGroups: map[uint64]chan error{}, 270 | Client: nclient, 271 | PrivateKey: privateKey, 272 | } 273 | log.Println("scanning hosted groups") 274 | bftRaftServer.ScanHostedGroups(id) 275 | log.Println("registering membership contracts") 276 | bftRaftServer.RegisterMembershipCommands() 277 | log.Println("learning network node members") 278 | bftRaftServer.SyncAlphaGroup() 279 | log.Println("server generated:", bftRaftServer.Id) 280 | return &bftRaftServer, nil 281 | } 282 | 283 | func (s *BFTRaftServer) StartServer() error { 284 | log.Println("registering raft server service") 285 | pb.RegisterBFTRaftServer(utils.GetGRPCServer(s.Opts.Address), s) 286 | log.Println("registering raft feedback service") 287 | cpb.RegisterBFTRaftClientServer(utils.GetGRPCServer(s.Opts.Address), &client.FeedbackServer{ClientIns: s.Client}) 288 | log.Println("going to start server with id:", s.Id, "on:", s.Opts.Address) 289 | go utils.GRPCServerListen(s.Opts.Address) 290 | time.Sleep(1 * time.Second) 291 | log.Println("registering this host") 292 | s.RegHost() 293 | return nil 294 | } 295 | 296 | func InitDatabase(dbPath string) { 297 | config := pb.ServerConfig{} 298 | if privateKey, _, err := utils.GenerateKey(); err == nil { 299 | config.PrivateKey = privateKey 300 | dbopt := badger.DefaultOptions 301 | dbopt.Dir = dbPath 302 | dbopt.ValueDir = dbPath 303 | db, err := badger.Open(dbopt) 304 | if err != nil { 305 | panic(err) 306 | } 307 | configBytes, err := proto.Marshal(&config) 308 | db.Update(func(txn *badger.Txn) error { 309 | return txn.Set(ComposeKeyPrefix(CONFIG_GROUP, SERVER_CONF), configBytes, 0x00) 310 | }) 311 | if err := db.Close(); err != nil { 312 | panic(err) 313 | } 314 | log.Println("generated wallet") 315 | } else { 316 | println("Cannot generate private key for the server") 317 | } 318 | } 319 | -------------------------------------------------------------------------------- /server/clients.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | cpb "github.com/PomeloCloud/BFTRaft4go/proto/client" 5 | "github.com/patrickmn/go-cache" 6 | "google.golang.org/grpc" 7 | "sync" 8 | "time" 9 | ) 10 | 11 | type ClientStore struct { 12 | clients *cache.Cache 13 | lock sync.Mutex 14 | } 15 | 16 | type Client struct { 17 | conn *grpc.ClientConn 18 | rpc cpb.BFTRaftClientClient 19 | } 20 | 21 | func (cs *ClientStore) Get(serverAddr string) (*Client, error) { 22 | if cachedClient, cachedFound := cs.clients.Get(serverAddr); cachedFound { 23 | return cachedClient.(*Client), nil 24 | } 25 | cs.lock.Lock() 26 | defer cs.lock.Unlock() 27 | if cachedClient, cachedFound := cs.clients.Get(serverAddr); cachedFound { 28 | return cachedClient.(*Client), nil 29 | } 30 | conn, err := grpc.Dial(serverAddr, grpc.WithInsecure()) 31 | if err != nil { 32 | return nil, err 33 | } 34 | rpcClient := cpb.NewBFTRaftClientClient(conn) 35 | client := Client{conn, rpcClient} 36 | cs.clients.Set(serverAddr, &client, cache.DefaultExpiration) 37 | return &client, nil 38 | } 39 | 40 | func NewClientStore() ClientStore { 41 | store := ClientStore{ 42 | clients: cache.New(10*time.Minute, 5*time.Minute), 43 | } 44 | store.clients.OnEvicted(func(host string, clientI interface{}) { 45 | client := clientI.(*Client) 46 | client.conn.Close() 47 | }) 48 | return store 49 | } 50 | -------------------------------------------------------------------------------- /server/config.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | pb "github.com/PomeloCloud/BFTRaft4go/proto/server" 7 | "github.com/dgraph-io/badger" 8 | "github.com/golang/protobuf/proto" 9 | "io/ioutil" 10 | "log" 11 | ) 12 | 13 | func GetConfig(kv *badger.DB) (*pb.ServerConfig, error) { 14 | var res *pb.ServerConfig = nil 15 | err := kv.View(func(txn *badger.Txn) error { 16 | if item, err := txn.Get(ComposeKeyPrefix(CONFIG_GROUP, SERVER_CONF)); err != nil { 17 | log.Panic(err) 18 | return err 19 | } else { 20 | data := ItemValue(item) 21 | if data == nil { 22 | log.Panic(err) 23 | return errors.New("no data") 24 | } else { 25 | conf := pb.ServerConfig{} 26 | if err := proto.Unmarshal(*data, &conf); err != nil { 27 | log.Panic(err) 28 | return err 29 | } 30 | res = &conf 31 | return nil 32 | } 33 | } 34 | }) 35 | return res, err 36 | } 37 | 38 | type FileConfig struct { 39 | Db string 40 | Address string 41 | Bootstraps []string 42 | } 43 | 44 | func ReadConfigFile(path string) FileConfig { 45 | data, err := ioutil.ReadFile(path) 46 | if err != nil { 47 | panic(err) 48 | } 49 | log.Println(string(data)) 50 | fc := FileConfig{} 51 | if err := json.Unmarshal(data, &fc); err != nil { 52 | panic(err) 53 | } 54 | return fc 55 | } 56 | -------------------------------------------------------------------------------- /server/consensus.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | pb "github.com/PomeloCloud/BFTRaft4go/proto/server" 5 | "github.com/PomeloCloud/BFTRaft4go/utils" 6 | ) 7 | 8 | func (m *RTGroup) prepareApproveChan(groupId uint64, logIndex uint64) { 9 | //cache_key := fmt.Sprint(logIndex) 10 | //if _, existed := s.GroupAppendedLogs.Get(cache_key); !existed { 11 | // s.GroupAppendedLogs.Set(cache_key, make(chan bool, 1), cache.DefaultExpiration) 12 | //} 13 | } 14 | 15 | func (m *RTGroup) WaitLogApproved(logIndex uint64) bool { 16 | // TODO: fix wait approved 17 | //s.prepareApproveChan(groupId, logIndex) 18 | //cache_key := fmt.Sprint(groupId, "-", logIndex) 19 | //cache_chan, _ := s.GroupAppendedLogs.Get(cache_key) 20 | //select { 21 | //case approved := <-cache_chan.(chan bool): 22 | // return approved 23 | //case <-time.After(s.Opts.ConsensusTimeout): 24 | // log.Println("wait apprival timeout, group:", groupId, "log:", logIndex) 25 | // return false 26 | //} 27 | return true 28 | } 29 | 30 | func (m *RTGroup) SetLogAppended(groupId uint64, logIndex uint64, isApproved bool) { 31 | //m.prepareApproveChan(groupId, logIndex) 32 | //cache_key := fmt.Sprint(groupId, "-", logIndex) 33 | //if c, existed := m.GroupAppendedLogs.Get(cache_key); existed { 34 | // go func() { 35 | // c.(chan bool) <- isApproved 36 | // }() 37 | //} 38 | } 39 | 40 | func (m *RTGroup) ExpectedHonestPeers() int { 41 | num_peers := len(m.GroupPeers) 42 | return utils.ExpectedPlayers(num_peers) 43 | } 44 | 45 | func (s *BFTRaftServer) PeerApprovedAppend(groupId uint64, logIndex uint64, peer uint64, group_peers []*pb.Peer, isApproved bool) { 46 | //cache_key := fmt.Sprint(groupId, "-", logIndex) 47 | //if _, existed := s.GroupApprovedLogs.Get(cache_key); !existed { 48 | // s.GroupApprovedLogs.Set(cache_key, map[uint64]bool{}, cache.DefaultExpiration) 49 | //} 50 | //approvedPeers, _ := s.GroupApprovedLogs.Get(cache_key) 51 | //approvedPeersMap := approvedPeers.(map[uint64]bool) 52 | //approvedPeersMap[peer] = isApproved 53 | //expectedVotes := ExpectedHonestPeers(group_peers) 54 | //if len(approvedPeersMap) >= expectedVotes { 55 | // approvedVotes := 0 56 | // rejectedVotes := 0 57 | // for _, vote := range approvedPeersMap { 58 | // if vote { 59 | // approvedVotes++ 60 | // } else { 61 | // rejectedVotes++ 62 | // } 63 | // } 64 | // if approvedVotes >= expectedVotes { 65 | // s.SetLogAppended(groupId, logIndex, true) 66 | // return 67 | // } 68 | // if rejectedVotes >= (len(group_peers) - expectedVotes) { 69 | // s.SetLogAppended(groupId, logIndex, false) 70 | // return 71 | // } 72 | //} 73 | } 74 | -------------------------------------------------------------------------------- /server/conshash.go: -------------------------------------------------------------------------------- 1 | package server 2 | -------------------------------------------------------------------------------- /server/group.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "errors" 7 | "fmt" 8 | "github.com/90TechSAS/go-recursive-mutex" 9 | cpb "github.com/PomeloCloud/BFTRaft4go/proto/client" 10 | pb "github.com/PomeloCloud/BFTRaft4go/proto/server" 11 | "github.com/PomeloCloud/BFTRaft4go/utils" 12 | "github.com/dgraph-io/badger" 13 | "github.com/golang/protobuf/proto" 14 | "github.com/huandu/goroutine" 15 | "github.com/patrickmn/go-cache" 16 | "github.com/tevino/abool" 17 | "log" 18 | "math/rand" 19 | "strconv" 20 | "sync" 21 | "time" 22 | ) 23 | 24 | const ( 25 | LEADER = 0 26 | FOLLOWER = 1 27 | CANDIDATE = 2 28 | OBSERVER = 3 29 | ) 30 | 31 | type RTGroup struct { 32 | Server *BFTRaftServer 33 | Leader uint64 34 | LastVotedTo uint64 35 | LastVotedTerm uint64 36 | GroupPeers map[uint64]*pb.Peer 37 | Group *pb.RaftGroup 38 | Timeout time.Time 39 | Role int 40 | Votes []*pb.RequestVoteResponse 41 | SendVotesForPeers map[uint64]bool // key is peer id 42 | IsBusy *abool.AtomicBool 43 | Lock recmutex.RecursiveMutex 44 | VoteLock sync.Mutex 45 | } 46 | 47 | func NewRTGroup( 48 | server *BFTRaftServer, 49 | leader uint64, 50 | groupPeers map[uint64]*pb.Peer, 51 | group *pb.RaftGroup, role int, 52 | ) *RTGroup { 53 | meta := &RTGroup{ 54 | Server: server, 55 | Leader: leader, 56 | GroupPeers: groupPeers, 57 | Group: group, 58 | Timeout: time.Now().Add(60 * time.Second), 59 | Role: role, 60 | Votes: []*pb.RequestVoteResponse{}, 61 | SendVotesForPeers: map[uint64]bool{}, 62 | IsBusy: abool.NewBool(false), 63 | Lock: recmutex.RecursiveMutex{}, 64 | VoteLock: sync.Mutex{}, 65 | } 66 | meta.StartTimeWheel() 67 | return meta 68 | } 69 | 70 | func GetGroupFromKV(txn *badger.Txn, groupId uint64) *pb.RaftGroup { 71 | group := &pb.RaftGroup{} 72 | keyPrefix := ComposeKeyPrefix(groupId, GROUP_META) 73 | if item, err := txn.Get(keyPrefix); err == nil { 74 | data := ItemValue(item) 75 | if data == nil { 76 | return nil 77 | } else { 78 | proto.Unmarshal(*data, group) 79 | return group 80 | } 81 | } else { 82 | return nil 83 | } 84 | } 85 | 86 | func (s *BFTRaftServer) GetGroup(txn *badger.Txn, groupId uint64) *pb.RaftGroup { 87 | cacheKey := strconv.Itoa(int(groupId)) 88 | cachedGroup, cacheFound := s.Groups.Get(cacheKey) 89 | if cacheFound { 90 | return cachedGroup.(*pb.RaftGroup) 91 | } else { 92 | group := GetGroupFromKV(txn, groupId) 93 | if group != nil { 94 | s.Groups.Set(cacheKey, group, cache.DefaultExpiration) 95 | return group 96 | } else { 97 | return nil 98 | } 99 | } 100 | } 101 | 102 | func (s *BFTRaftServer) GetGroupNTXN(groupId uint64) *pb.RaftGroup { 103 | group := &pb.RaftGroup{} 104 | s.DB.View(func(txn *badger.Txn) error { 105 | group = s.GetGroup(txn, groupId) 106 | return nil 107 | }) 108 | return group 109 | } 110 | 111 | func (s *BFTRaftServer) SaveGroup(txn *badger.Txn, group *pb.RaftGroup) error { 112 | if data, err := proto.Marshal(group); err == nil { 113 | dbKey := ComposeKeyPrefix(group.Id, GROUP_META) 114 | return txn.Set(dbKey, data, 0x00) 115 | } else { 116 | return err 117 | } 118 | } 119 | 120 | func (s *BFTRaftServer) SaveGroupNTXN(group *pb.RaftGroup) error { 121 | return s.DB.Update(func(txn *badger.Txn) error { 122 | return s.SaveGroup(txn, group) 123 | }) 124 | } 125 | 126 | func (s *BFTRaftServer) GetGroupHosts(txn *badger.Txn, groupId uint64) []*pb.Host { 127 | nodes := []*pb.Host{} 128 | peers := GetGroupPeersFromKV(txn, groupId) 129 | for _, peer := range peers { 130 | node := s.GetHost(txn, peer.Id) 131 | if node != nil { 132 | nodes = append(nodes, node) 133 | } else { 134 | log.Println(s.Id, "cannot find group node:", peer.Id) 135 | } 136 | } 137 | return nodes 138 | } 139 | 140 | func (s *BFTRaftServer) GetGroupHostsNTXN(groupId uint64) []*pb.Host { 141 | result := []*pb.Host{} 142 | s.DB.View(func(txn *badger.Txn) error { 143 | result = s.GetGroupHosts(txn, groupId) 144 | return nil 145 | }) 146 | return result 147 | } 148 | 149 | func (s *BFTRaftServer) GroupLeader(groupId uint64) *pb.GroupLeader { 150 | res := &pb.GroupLeader{} 151 | if meta := s.GetOnboardGroup(groupId); meta != nil { 152 | node := s.GetHostNTXN(meta.Leader) 153 | if node == nil { 154 | log.Println("cannot get node for group leader") 155 | } 156 | res = &pb.GroupLeader{ 157 | Node: node, 158 | Accuate: true, 159 | } 160 | } else { 161 | // group not on the host 162 | // will select a host randomly in the group 163 | res := &pb.GroupLeader{ 164 | Accuate: false, 165 | } 166 | s.DB.View(func(txn *badger.Txn) error { 167 | hosts := s.GetGroupHosts(txn, groupId) 168 | if len(hosts) > 0 { 169 | res.Node = hosts[rand.Intn(len(hosts))] 170 | } else { 171 | log.Println("cannot get group leader") 172 | } 173 | return nil 174 | }) 175 | } 176 | return res 177 | } 178 | 179 | func (s *BFTRaftServer) GetOnboardGroup(id uint64) *RTGroup { 180 | k := strconv.Itoa(int(id)) 181 | if meta, found := s.GroupsOnboard.Get(k); found { 182 | return meta.(*RTGroup) 183 | } else { 184 | return nil 185 | } 186 | } 187 | 188 | func (s *BFTRaftServer) SetOnboardGroup(meta *RTGroup) { 189 | k := strconv.Itoa(int(meta.Group.Id)) 190 | if meta == nil { 191 | panic("group is nil") 192 | } 193 | s.GroupsOnboard.Set(k, meta) 194 | } 195 | 196 | func (m *RTGroup) RefreshTimer(mult float32) { 197 | m.Timeout = time.Now().Add(time.Duration(RandomTimeout(mult)) * time.Millisecond) 198 | } 199 | 200 | func (m *RTGroup) StartTimeWheel() { 201 | go func() { 202 | for true { 203 | if m.Timeout.After(time.Now()) { 204 | time.Sleep(100 * time.Millisecond) 205 | continue 206 | } 207 | m.Lock.Lock() 208 | if m.Role == CANDIDATE { 209 | // is candidate but vote expired, start a new vote term 210 | log.Println(m.Group.Id, "started a new election") 211 | m.BecomeCandidate() 212 | } else if m.Role == FOLLOWER { 213 | if m.Leader == m.Server.Id { 214 | panic(fmt.Sprint("Follower is leader for group:", m.Group)) 215 | } 216 | // not leader 217 | log.Println(m.Server.Id, "is candidate") 218 | m.BecomeCandidate() 219 | } else if m.Role == LEADER { 220 | // is leader, send heartbeat 221 | m.SendFollowersHeartbeat(context.Background()) 222 | } else if m.Role == OBSERVER { 223 | // update local data 224 | m.PullAndCommitGroupLogs() 225 | m.RefreshTimer(5) 226 | } 227 | m.Lock.Unlock() 228 | } 229 | }() 230 | } 231 | 232 | func (m *RTGroup) AppendEntries(ctx context.Context, req *pb.AppendEntriesRequest) (*pb.AppendEntriesResponse, error) { 233 | m.Lock.Lock() 234 | defer m.Lock.Unlock() 235 | group := m.Group 236 | groupId := group.Id 237 | reqLeaderId := req.LeaderId 238 | leaderPeer := m.GroupPeers[reqLeaderId] 239 | lastLogHash := m.LastEntryHashNTXN() 240 | // log.Println("append log from", req.LeaderId, "to", s.Id, "entries:", len(req.Entries)) 241 | lastEntryIndex := m.LastEntryIndexNTXN() 242 | response := &pb.AppendEntriesResponse{ 243 | Group: m.Group.Id, 244 | Term: group.Term, 245 | Index: lastEntryIndex, 246 | Successed: false, 247 | Convinced: false, 248 | Hash: lastLogHash, 249 | Signature: m.Server.Sign(lastLogHash), 250 | Peer: m.Server.Id, 251 | } 252 | // verify group and leader existence 253 | if group == nil || leaderPeer == nil { 254 | return nil, errors.New("host or group not existed on append entries") 255 | } 256 | // check leader transfer 257 | if len(req.QuorumVotes) > 0 && req.LeaderId != m.Leader { 258 | if req.Term < m.Group.Term { 259 | return nil, errors.New( 260 | fmt.Sprint("cannot become a follower when append entries due term compare failed", 261 | req.Term, "/", m.Group.Term), 262 | ) 263 | } 264 | if !m.BecomeFollower(req) { 265 | return nil, errors.New("cannot become a follower when append entries due to votes") 266 | } 267 | } else if req.LeaderId != m.Leader { 268 | return nil, errors.New("leader not matches when append entries") 269 | } 270 | log.Println("has last entry index:", lastEntryIndex, "for", groupId) 271 | response.Convinced = true 272 | // verify signature 273 | if leaderPublicKey := m.Server.GetHostPublicKey(m.Leader); leaderPublicKey != nil { 274 | signData := AppendLogEntrySignData(group.Id, group.Term, req.PrevLogIndex, req.PrevLogTerm) 275 | if err := utils.VerifySign(leaderPublicKey, req.Signature, signData); err != nil { 276 | // log.Println("leader signature not right when append entries:", err) 277 | // TODO: Fix signature verification, crypto/rsa: verification error 278 | // return response, nil 279 | } 280 | } else { 281 | return nil, errors.New("cannot get leader public key when append entries") 282 | } 283 | m.Timeout = time.Now().Add(10 * time.Second) 284 | if len(req.Entries) > 0 { 285 | log.Println("appending new entries for:", m.Group.Id, "total", len(req.Entries)) 286 | // check last log matches the first provided by the leader 287 | // this strategy assumes split brain will never happened (on internet) 288 | // the leader will always provide the entries no more than it needed 289 | // if the leader failed to provide the right first entries, the follower 290 | // will not commit the log but response with current log index and term instead 291 | // the leader should response immediately for failed follower response 292 | lastLogIdx := m.LastEntryIndexNTXN() 293 | nextLogIdx := lastLogIdx + 1 294 | lastLog := m.LastLogEntryNTXN() 295 | lastLogTerm := uint64(0) 296 | if lastLog != nil { 297 | lastLogTerm = lastLog.Term 298 | } 299 | if req.PrevLogIndex == lastLogIdx && req.Entries[0].Index == nextLogIdx { // index matched 300 | if req.PrevLogTerm != lastLogTerm { 301 | // log mismatch, cannot preceded 302 | // what to do next will leave to the leader 303 | err := fmt.Sprint("cannot get leader public key when append entries", req.PrevLogTerm, lastLogTerm) 304 | return nil, errors.New(err) 305 | } else { 306 | // first log matched 307 | // but we still need to check hash for upcoming logs 308 | expectedHash := m.LastEntryHashNTXN() 309 | for i := nextLogIdx; i < nextLogIdx+uint64(len(req.Entries)); i++ { 310 | entry := req.Entries[i-nextLogIdx] 311 | cmd := entry.Command 312 | expectedHash, _ = utils.LogHash(expectedHash, i, cmd.FuncId, cmd.Arg) 313 | if entry.Index != i || !bytes.Equal(entry.Hash, expectedHash) { 314 | log.Println("log mismatch", entry.Index, "-", i, ",", entry.Hash, "-", expectedHash) 315 | return response, nil 316 | } 317 | if !m.Server.VerifyCommandSign(entry.Command) { 318 | // TODO: fix signature verification 319 | // log.Println("log verification failed") 320 | // return response, nil 321 | } 322 | } 323 | // here start the loop of sending approve request to all peers 324 | // the followers may have multiply uncommitted entries so we need to 325 | // approve them one by one and wait their response for confirmation. 326 | // this is crucial to ensure log correctness and updated 327 | // to respond to 'ApproveAppend' RPC, the server should response with: 328 | // 1. appended if the server have already committed the entry 329 | // (this follower was fallen behind) 330 | // 2. delayed if the server is also waiting for other peers to confirm or 331 | // it is not yet reach to this log index 332 | // 3. failed if the group/peer/signature check was failed 333 | // so there is 2 way to make the append confirmation 334 | // 1. through the 'ApprovedAppend' from other peers 335 | // 2. through the appended 'ApproveAppendResponse' for catch up 336 | //groupPeers := s.OnboardGroupPeersSlice(groupId) 337 | for _, entry := range req.Entries { 338 | log.Println("trying to append log", entry.Index, "for group", groupId, "total", len(req.Entries)) 339 | //for _, peer := range groupPeers { 340 | // if peer.Host == s.Id { 341 | // continue 342 | // } 343 | // if node := s.GetHostNTXN(peer.Host); node != nil { 344 | // if client, err := utils.GetClusterRPC(node.ServerAddr); err == nil { 345 | // go func() { 346 | // log.Println("ask others for append approval for group:", groupId, "index:", entry.Index) 347 | // if approveRes, err := client.ApproveAppend(ctx, response); err == nil { 348 | // if err := utils.VerifySign( 349 | // s.GetHostPublicKey(node.Id), 350 | // approveRes.Signature, 351 | // ApproveAppendSignData(approveRes), 352 | // ); err == nil { 353 | // if approveRes.Appended && !approveRes.Delayed && !approveRes.Failed { 354 | // // log.Println("node", peer.Host, "approved", approveRes) 355 | // s.PeerApprovedAppend(groupId, entry.Index, peer.Id, groupPeers, true) 356 | // } else { 357 | // log.Println("node", peer.Host, "returned approval", approveRes) 358 | // } 359 | // } else { 360 | // log.Println("error on verify approve signature") 361 | // } 362 | // } 363 | // }() 364 | // } 365 | // } else { 366 | // if node == nil { 367 | // log.Println("cannot get node ", peer.Host, " for send approval append logs") 368 | // } 369 | // } 370 | //} 371 | if m.WaitLogApproved(entry.Index) { 372 | m.Server.DB.Update(func(txn *badger.Txn) error { 373 | m.AppendEntryToLocal(txn, entry) 374 | return nil 375 | }) 376 | } 377 | clientId := entry.Command.ClientId 378 | result := m.CommitGroupLog(entry) 379 | client := m.Server.GetHostNTXN(clientId) 380 | if client != nil { 381 | if rpc, err := m.Server.ClientRPCs.Get(client.ServerAddr); err == nil { 382 | nodeId := m.Server.Id 383 | reqId := entry.Command.RequestId 384 | signData := utils.CommandSignData(m.Group.Id, nodeId, reqId, *result) 385 | if _, err := rpc.rpc.ResponseCommand(ctx, &cpb.CommandResult{ 386 | Group: groupId, 387 | NodeId: nodeId, 388 | RequestId: reqId, 389 | Result: *result, 390 | Signature: m.Server.Sign(signData), 391 | }); err != nil { 392 | log.Println("cannot response command to ", clientId, ":", err) 393 | } 394 | } 395 | response.Term = entry.Term 396 | response.Index = entry.Index 397 | response.Hash = entry.Hash 398 | response.Signature = m.Server.Sign(entry.Hash) 399 | } else { 400 | log.Println("cannot get node", entry.Command.ClientId, "for response command") 401 | } 402 | log.Println("done appending log", entry.Index, "for group", groupId, "total", len(req.Entries)) 403 | } 404 | response.Successed = true 405 | } 406 | } else { 407 | log.Println( 408 | m.Server.Id, "log mismatch: prev index", 409 | req.PrevLogIndex, "-", lastLogIdx, 410 | "next index", req.Entries[0].Index, "-", nextLogIdx, 411 | ) 412 | } 413 | } 414 | log.Println("report back to leader index:", response.Index) 415 | return response, nil 416 | } 417 | 418 | func (m *RTGroup) SendFollowersHeartbeat(ctx context.Context) { 419 | m.Lock.Lock() 420 | defer m.Lock.Unlock() 421 | m.RefreshTimer(1) 422 | num_peers := len(m.GroupPeers) 423 | completion := make(chan *pb.AppendEntriesResponse, num_peers) 424 | sentMsgs := 0 425 | uncommittedEntries := map[uint64][]*pb.LogEntry{} 426 | peerPrevEntry := map[uint64]*pb.LogEntry{} 427 | for peerId, peer := range m.GroupPeers { 428 | if peerId != m.Server.Id { 429 | entries, prevEntry := m.PeerUncommittedLogEntries(peer) 430 | uncommittedEntries[peerId] = entries 431 | peerPrevEntry[peerId] = prevEntry 432 | node := m.Server.GetHostNTXN(peerId) 433 | if node == nil { 434 | log.Println("cannot get node for send peer uncommitted log entries") 435 | completion <- nil 436 | return 437 | } 438 | votes := []*pb.RequestVoteResponse{} 439 | if m.SendVotesForPeers[m.Server.Id] { 440 | votes = m.Votes 441 | } 442 | signData := AppendLogEntrySignData(m.Group.Id, m.Group.Term, prevEntry.Index, prevEntry.Term) 443 | signature := m.Server.Sign(signData) 444 | if client, err := utils.GetClusterRPC(node.ServerAddr); err == nil { 445 | sentMsgs++ 446 | go func() { 447 | if appendResult, err := client.AppendEntries(ctx, &pb.AppendEntriesRequest{ 448 | Group: m.Group.Id, 449 | Term: m.Group.Term, 450 | LeaderId: m.Server.Id, 451 | PrevLogIndex: prevEntry.Index, 452 | PrevLogTerm: prevEntry.Term, 453 | Signature: signature, 454 | QuorumVotes: votes, 455 | Entries: entries, 456 | }); err == nil { 457 | // WARN: the result may not from the peer we requested 458 | completion <- appendResult 459 | } else { 460 | log.Println("append log failed:", err) 461 | completion <- nil 462 | } 463 | 464 | }() 465 | } else { 466 | log.Println("error on append entry logs to followers:", err) 467 | } 468 | } 469 | } 470 | log.Println("sending log for group", m.Group , "to", sentMsgs, "followers with", num_peers, "peers") 471 | for i := 0; i < sentMsgs; i++ { 472 | response := <-completion 473 | if response == nil { 474 | log.Println("append entry response is nil") 475 | continue 476 | } 477 | peerId := response.Peer 478 | peer := m.GroupPeers[peerId] 479 | publicKey := m.Server.GetHostPublicKey(peerId) 480 | if publicKey == nil { 481 | log.Println("cannot find public key for:", peerId) 482 | continue 483 | } 484 | if err := utils.VerifySign(publicKey, response.Signature, response.Hash); err != nil { 485 | // TODO: fix signature verification 486 | // log.Println("cannot verify append response signature:", err) 487 | // continue 488 | } 489 | log.Println(peerId, "append response with last index:", response.Index) 490 | if response.Index != peer.MatchIndex { 491 | log.Println( 492 | "#", goroutine.GoroutineId(), 493 | "peer:", peer.Id, "index changed:", peer.MatchIndex, "->", response.Index) 494 | peer.MatchIndex = response.Index 495 | peer.NextIndex = peer.MatchIndex + 1 496 | m.GroupPeers[peer.Id] = peer 497 | if err := m.Server.DB.Update(func(txn *badger.Txn) error { 498 | return m.Server.SavePeer(txn, peer) 499 | }); err != nil { 500 | log.Println("cannot save peer:", peer.Id, err) 501 | } 502 | } else { 503 | // log.Println(peer.Id, "last index unchanged:", response.Index, "-", peer.MatchIndex) 504 | } 505 | m.SendVotesForPeers[peerId] = !response.Convinced 506 | } 507 | } 508 | 509 | func (m *RTGroup) ApproveAppend(ctx context.Context, req *pb.AppendEntriesResponse) (*pb.ApproveAppendResponse, error) { 510 | groupId := req.Group 511 | response := &pb.ApproveAppendResponse{ 512 | Group: groupId, 513 | Peer: m.Server.Id, 514 | Index: req.Index, 515 | Appended: false, 516 | Delayed: false, 517 | Failed: true, 518 | Signature: []byte{}, 519 | } 520 | //response.Signature = m.Server.Sign(ApproveAppendSignData(response)) 521 | //peerId := req.Peer 522 | //if _, foundPeer := m.GroupPeers[peerId]; !foundPeer { 523 | // log.Println("cannot approve append due to unexisted peer") 524 | // return response, nil 525 | //} 526 | //response.Peer = m.Server.Id 527 | //if utils.VerifySign(m.Server.GetHostPublicKey(peerId), req.Signature, req.Hash) != nil { 528 | // log.Println("cannot approve append due to signature verfication") 529 | // return response, nil 530 | //} 531 | //lastIndex := m.LastEntryIndexNTXN() 532 | //if (lastIndex == req.Index && m.Leader == m.Server.Id) || lastIndex > req.Index { 533 | // // this node will never have a chance to provide it's vote to the log 534 | // // will check correctness and vote specifically for client peer without broadcasting 535 | // m.Server.DB.View(func(txn *badger.Txn) error { 536 | // entry := m.GetLogEntry(txn, req.Index) 537 | // if entry != nil && bytes.Equal(entry.Hash, req.Hash) { 538 | // response.Appended = true 539 | // response.Failed = false 540 | // } 541 | // return nil 542 | // }) 543 | //} else { 544 | // // this log entry have not yet been appended 545 | // // in this case, this peer will just cast client peer vote 546 | // // client peer should receive it's own vote from this peer by async 547 | // groupPeers := m.OnboardGroupPeersSlice() 548 | // m.PeerApprovedAppend(req.Index, peerId, groupPeers, true) 549 | // response.Delayed = true 550 | // response.Failed = false 551 | //} 552 | //response.Signature = s.Sign(ApproveAppendSignData(response)) 553 | //log.Println( 554 | // "approved append from", req.Peer, "for", req.Group, "to", req.Index, 555 | // "failed:", response.Failed, "approved:", response.Appended, "delayed:", response.Delayed) 556 | return response, nil 557 | } 558 | 559 | func (m *RTGroup) RequestVote(ctx context.Context, req *pb.RequestVoteRequest) (*pb.RequestVoteResponse, error) { 560 | m.VoteLock.Lock() 561 | defer m.VoteLock.Unlock() 562 | // all of the leader transfer verification happens here 563 | group := m.Group 564 | groupId := m.Group.Id 565 | groupTerm := group.Term 566 | reqTerm := req.Term 567 | lastLogEntry := m.LastLogEntryNTXN() 568 | vote := &pb.RequestVoteResponse{ 569 | Group: groupId, 570 | Term: group.GetTerm(), 571 | LogIndex: lastLogEntry.Index, 572 | CandidateId: req.CandidateId, 573 | Voter: m.Server.Id, 574 | Granted: false, 575 | Signature: []byte{}, 576 | } 577 | log.Println("vote request from", req.CandidateId, ", term", reqTerm) 578 | vote.Signature = m.Server.Sign(RequestVoteResponseSignData(vote)) 579 | if group == nil { 580 | log.Println("cannot grant vote to", req.CandidateId, ", cannot found group") 581 | return vote, nil 582 | } 583 | if m.Role == LEADER && reqTerm <= m.Group.Term { 584 | log.Println("leader will not vote for peer", req.CandidateId, "term", req.Term, "at term", m.Group.Term) 585 | } 586 | if groupTerm >= reqTerm || lastLogEntry.Index > req.LogIndex { 587 | // leader does not catch up 588 | log.Println("cannot grant vote to", req.CandidateId, ", candidate logs left behind") 589 | return vote, nil 590 | } 591 | if reqTerm < m.LastVotedTerm { 592 | log.Println("voting for term", m.LastVotedTerm, "but got request for term", lastLogEntry.Term) 593 | } 594 | if reqTerm-groupTerm > utils.MAX_TERM_BUMP { 595 | // the candidate bump terms too fast 596 | log.Println("cannot grant vote to", req.CandidateId, ", term bump too fast", group.Term, "->", reqTerm) 597 | return vote, nil 598 | } 599 | if reqTerm == m.LastVotedTerm && m.LastVotedTo != 0 && m.LastVotedTo != req.CandidateId { 600 | // already voted to other peer 601 | log.Println("cannot grant vote to", req.CandidateId, ", already voted to", m.LastVotedTo, ", term", reqTerm) 602 | return vote, nil 603 | } 604 | // TODO: check if the candidate really get the logs it claimed when the voter may fallen behind 605 | // Lazy voting 606 | // the condition for casting lazy voting is to wait until this peer turned into candidate 607 | // we also need to check it the peer candidate term is just what the request indicated 608 | waitedCounts := 0 609 | interval := 500 610 | secsToWait := 10 611 | intervalCount := secsToWait * 1000 / interval 612 | for true { 613 | <-time.After(time.Duration(interval) * time.Millisecond) 614 | waitedCounts++ 615 | if m.Role != FOLLOWER { 616 | vote.Granted = true 617 | vote.Signature = m.Server.Sign(RequestVoteResponseSignData(vote)) 618 | m.LastVotedTo = req.CandidateId 619 | m.LastVotedTerm = req.Term 620 | log.Println("grant vote to", req.CandidateId) 621 | break 622 | } else { 623 | // log.Println("current role is", m.Role, "waiting to become a candidate", ", term", req.Term) 624 | } 625 | if waitedCounts >= intervalCount { 626 | // timeout, will not grant 627 | log.Println("cannot grant vote to", req.CandidateId, ", time out") 628 | break 629 | } 630 | } 631 | return vote, nil 632 | } 633 | 634 | func (m *RTGroup) RPCGroupMembers(ctx context.Context, req *pb.GroupId) (*pb.GroupMembersResponse, error) { 635 | members := []*pb.GroupMember{} 636 | for _, p := range m.GroupPeers { 637 | host := m.Server.GetHostNTXN(p.Id) 638 | if host == nil { 639 | log.Println("cannot get host for group members") 640 | continue 641 | } 642 | members = append(members, &pb.GroupMember{ 643 | Peer: p, 644 | Host: host, 645 | }) 646 | } 647 | return &pb.GroupMembersResponse{ 648 | Members: members, 649 | Signature: m.Server.Sign(GetMembersSignData(members)), 650 | LastEntry: m.LastLogEntryNTXN(), 651 | }, nil 652 | } 653 | -------------------------------------------------------------------------------- /server/hosts.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "crypto/rsa" 5 | pb "github.com/PomeloCloud/BFTRaft4go/proto/server" 6 | "github.com/PomeloCloud/BFTRaft4go/utils" 7 | "github.com/dgraph-io/badger" 8 | "github.com/golang/protobuf/proto" 9 | "github.com/patrickmn/go-cache" 10 | "log" 11 | "strconv" 12 | ) 13 | 14 | type NodeIterator struct { 15 | prefix []byte 16 | data *badger.Iterator 17 | server *BFTRaftServer 18 | } 19 | 20 | func (s *BFTRaftServer) GetHost(txn *badger.Txn, nodeId uint64) *pb.Host { 21 | cacheKey := strconv.Itoa(int(nodeId)) 22 | if cacheNode, cachedFound := s.Hosts.Get(cacheKey); cachedFound { 23 | return cacheNode.(*pb.Host) 24 | } 25 | if item, err := txn.Get(append(ComposeKeyPrefix(HOST_LIST_GROUP, HOST_LIST), utils.U64Bytes(nodeId)...)); err == nil { 26 | data := ItemValue(item) 27 | if data == nil { 28 | return nil 29 | } 30 | node := pb.Host{} 31 | proto.Unmarshal(*data, &node) 32 | s.Hosts.Set(cacheKey, &node, cache.DefaultExpiration) 33 | return &node 34 | } else { 35 | log.Println("cannot get host", nodeId, "on", s.Id, ":", err) 36 | return nil 37 | } 38 | } 39 | 40 | func (s *BFTRaftServer) GetHostNTXN(nodeId uint64) *pb.Host { 41 | node := &pb.Host{} 42 | s.DB.View(func(txn *badger.Txn) error { 43 | node = s.GetHost(txn, nodeId) 44 | return nil 45 | }) 46 | return node 47 | } 48 | 49 | func (s *BFTRaftServer) GetHostPublicKey(nodeId uint64) *rsa.PublicKey { 50 | cacheKey := strconv.Itoa(int(nodeId)) 51 | if cachedKey, cacheFound := s.NodePublicKeys.Get(cacheKey); cacheFound { 52 | return cachedKey.(*rsa.PublicKey) 53 | } 54 | node := s.GetHostNTXN(nodeId) 55 | if node == nil { 56 | log.Println("cannot get node for get it's public key") 57 | return nil 58 | } 59 | if key, err := utils.ParsePublicKey(node.PublicKey); err == nil { 60 | s.NodePublicKeys.Set(cacheKey, key, cache.DefaultExpiration) 61 | return key 62 | } else { 63 | return nil 64 | } 65 | } 66 | 67 | func (s *BFTRaftServer) SaveHost(txn *badger.Txn, node *pb.Host) error { 68 | if data, err := proto.Marshal(node); err == nil { 69 | dbKey := append(ComposeKeyPrefix(HOST_LIST_GROUP, HOST_LIST), utils.U64Bytes(node.Id)...) 70 | return txn.Set(dbKey, data, 0x00) 71 | } else { 72 | return err 73 | } 74 | } 75 | 76 | func (s *BFTRaftServer) SaveHostNTXN(node *pb.Host) error { 77 | return s.DB.Update(func(txn *badger.Txn) error { 78 | return s.SaveHost(txn, node) 79 | }) 80 | } 81 | 82 | func (s *BFTRaftServer) VerifyCommandSign(cmd *pb.CommandRequest) bool { 83 | signData := utils.ExecCommandSignData(cmd) 84 | publicKey := s.GetHostPublicKey(cmd.ClientId) 85 | if publicKey == nil { 86 | return false 87 | } else { 88 | return utils.VerifySign(publicKey, cmd.Signature, signData) == nil 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /server/log_entries.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | pb "github.com/PomeloCloud/BFTRaft4go/proto/server" 7 | "github.com/PomeloCloud/BFTRaft4go/utils" 8 | "github.com/dgraph-io/badger" 9 | "github.com/golang/protobuf/proto" 10 | "github.com/pkg/errors" 11 | "log" 12 | ) 13 | 14 | type LogEntryIterator struct { 15 | prefix []byte 16 | data *badger.Iterator 17 | } 18 | 19 | func LogEntryFromKVItem(item *badger.Item) *pb.LogEntry { 20 | entry := pb.LogEntry{} 21 | itemData := ItemValue(item) 22 | if itemData == nil { 23 | return nil 24 | } 25 | proto.Unmarshal(*itemData, &entry) 26 | return &entry 27 | } 28 | 29 | func (liter *LogEntryIterator) Current() *pb.LogEntry { 30 | if liter.data.ValidForPrefix(liter.prefix) { 31 | return LogEntryFromKVItem(liter.data.Item()) 32 | } else { 33 | return nil 34 | } 35 | } 36 | 37 | func (liter *LogEntryIterator) Next() *pb.LogEntry { 38 | if liter.data.Valid() { 39 | liter.data.Next() 40 | return liter.Current() 41 | } else { 42 | return nil 43 | } 44 | } 45 | 46 | func (liter *LogEntryIterator) Close() { 47 | liter.data.Close() 48 | } 49 | 50 | func (m *RTGroup) ReversedLogIterator(txn *badger.Txn) LogEntryIterator { 51 | keyPrefix := ComposeKeyPrefix(m.Group.Id, LOG_ENTRIES) 52 | iter := txn.NewIterator(badger.IteratorOptions{Reverse: true}) 53 | iter.Seek(append(keyPrefix, utils.U64Bytes(^uint64(0))...)) // search from max possible index 54 | return LogEntryIterator{ 55 | prefix: keyPrefix, 56 | data: iter, 57 | } 58 | } 59 | 60 | func (m *RTGroup) LastLogEntry(txn *badger.Txn) *pb.LogEntry { 61 | iter := m.ReversedLogIterator(txn) 62 | entry := iter.Current() 63 | iter.Close() 64 | return entry 65 | } 66 | 67 | func (m *RTGroup) LastLogEntryNTXN() *pb.LogEntry { 68 | entry := &pb.LogEntry{} 69 | m.Server.DB.View(func(txn *badger.Txn) error { 70 | iter := m.ReversedLogIterator(txn) 71 | entry = iter.Current() 72 | iter.Close() 73 | return nil 74 | }) 75 | return entry 76 | } 77 | 78 | func (m *RTGroup) LastEntryHash(txn *badger.Txn) []byte { 79 | var hash []byte 80 | lastLog := m.LastLogEntry(txn) 81 | if lastLog == nil { 82 | hash, _ = utils.SHA1Hash([]byte(fmt.Sprint("GROUP:", m.Group.Id))) 83 | } else { 84 | hash = lastLog.Hash 85 | } 86 | return hash 87 | } 88 | 89 | func (m *RTGroup) LastEntryIndex(txn *badger.Txn) uint64 { 90 | lastLog := m.LastLogEntry(txn) 91 | index := uint64(0) 92 | if lastLog != nil { 93 | index = lastLog.Index 94 | } 95 | return index 96 | } 97 | 98 | func (m *RTGroup) LastEntryIndexNTXN() uint64 { 99 | index := uint64(0) 100 | m.Server.DB.View(func(txn *badger.Txn) error { 101 | index = m.LastEntryIndex(txn) 102 | return nil 103 | }) 104 | return index 105 | } 106 | 107 | func (m *RTGroup) LastEntryHashNTXN() []byte { 108 | hash := []byte{} 109 | m.Server.DB.View(func(txn *badger.Txn) error { 110 | hash = m.LastEntryHash(txn) 111 | return nil 112 | }) 113 | return hash 114 | } 115 | 116 | func LogEntryKey(groupId uint64, entryIndex uint64) []byte { 117 | return append(ComposeKeyPrefix(groupId, LOG_ENTRIES), utils.U64Bytes(entryIndex)...) 118 | } 119 | 120 | func (m *RTGroup) AppendEntryToLocal(txn *badger.Txn, entry *pb.LogEntry) error { 121 | group_id := entry.Command.Group 122 | if group_id != m.Group.Id { 123 | panic("log group id not match the actual group work on it") 124 | } 125 | key := LogEntryKey(group_id, entry.Index) 126 | _, err := txn.Get(key) 127 | if err == badger.ErrKeyNotFound { 128 | cmd := entry.Command 129 | hash, _ := utils.LogHash(m.LastEntryHash(txn), entry.Index, cmd.FuncId, cmd.Arg) 130 | if !bytes.Equal(hash, entry.Hash) { 131 | return errors.New("Log entry hash mismatch") 132 | } 133 | if data, err := proto.Marshal(entry); err == nil { 134 | txn.Set(key, data, 0x00) 135 | return nil 136 | } else { 137 | log.Println("cannot append log to local:", err) 138 | return err 139 | } 140 | } else if err == nil { 141 | return errors.New("log existed") 142 | } else { 143 | return err 144 | } 145 | } 146 | 147 | func (m *RTGroup) GetLogEntry(txn *badger.Txn, entryIndex uint64) *pb.LogEntry { 148 | key := LogEntryKey(m.Group.Id, entryIndex) 149 | if item, err := txn.Get(key); err == nil { 150 | return LogEntryFromKVItem(item) 151 | } else { 152 | return nil 153 | } 154 | } 155 | 156 | func AppendLogEntrySignData(groupId uint64, term uint64, prevIndex uint64, prevTerm uint64) []byte { 157 | return []byte(fmt.Sprint(groupId, "-", term, "-", prevIndex, "-", prevTerm)) 158 | } 159 | 160 | func EB(b bool) byte { 161 | if b { 162 | return byte(1) 163 | } else { 164 | return byte(0) 165 | } 166 | } 167 | 168 | func ApproveAppendSignData(res *pb.ApproveAppendResponse) []byte { 169 | bs1 := append(utils.U64Bytes(res.Peer), EB(res.Appended), EB(res.Delayed), EB(res.Failed)) 170 | return append(bs1, utils.U64Bytes(res.Index)...) 171 | } 172 | 173 | func (m *RTGroup) CommitGroupLog(entry *pb.LogEntry) *[]byte { 174 | funcId := entry.Command.FuncId 175 | fun := m.Server.FuncReg[funcId] 176 | input := entry.Command.Arg 177 | result := fun(&input, entry) 178 | return &result 179 | } 180 | -------------------------------------------------------------------------------- /server/membership.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "context" 5 | "crypto/x509" 6 | "errors" 7 | "fmt" 8 | pb "github.com/PomeloCloud/BFTRaft4go/proto/server" 9 | "github.com/PomeloCloud/BFTRaft4go/utils" 10 | "github.com/dgraph-io/badger" 11 | "github.com/golang/protobuf/proto" 12 | "log" 13 | "time" 14 | ) 15 | 16 | const ( 17 | NODE_JOIN = 0 18 | REG_NODE = 1 19 | NEW_CLIENT = 2 20 | NEW_GROUP = 3 21 | ) 22 | 23 | func (s *BFTRaftServer) RegisterMembershipCommands() { 24 | s.RegisterRaftFunc(NODE_JOIN, s.SMNodeJoin) 25 | s.RegisterRaftFunc(REG_NODE, s.SMRegHost) 26 | s.RegisterRaftFunc(NEW_CLIENT, s.SMNewClient) 27 | s.RegisterRaftFunc(NEW_GROUP, s.SMNewGroup) 28 | } 29 | 30 | // Register a node into the network 31 | // The node may be new or it was rejoined with new address 32 | func (s *BFTRaftServer) SMRegHost(arg *[]byte, entry *pb.LogEntry) []byte { 33 | node := pb.Host{} 34 | if err := proto.Unmarshal(*arg, &node); err == nil { 35 | node.Id = utils.HashPublicKeyBytes(node.PublicKey) 36 | node.Online = true 37 | s.DB.Update(func(txn *badger.Txn) error { 38 | if err := s.SaveHost(txn, &node); err == nil { 39 | return nil 40 | } else { 41 | log.Println("error on saving host:", err) 42 | return err 43 | } 44 | }) 45 | log.Println("we have node", node.Id, "on the network") 46 | return []byte{1} 47 | } else { 48 | log.Println("error on decoding reg host:", err) 49 | return []byte{0} 50 | } 51 | } 52 | 53 | func (s *BFTRaftServer) SMNodeJoin(arg *[]byte, entry *pb.LogEntry) []byte { 54 | req := pb.NodeJoinGroupEntry{} 55 | if err := proto.Unmarshal(*arg, &req); err == nil { 56 | node := entry.Command.ClientId 57 | // this should be fine, a public key can use both for client and node 58 | groupId := req.Group 59 | peer := pb.Peer{ 60 | Id: node, 61 | Group: groupId, 62 | NextIndex: 0, 63 | MatchIndex: 0, 64 | } 65 | log.Println("SM node", node, "join", groupId) 66 | if err := s.DB.Update(func(txn *badger.Txn) error { 67 | if s.GetHost(txn, node) == nil { 68 | return errors.New("cannot find node") 69 | } 70 | if node == s.Id { 71 | // skip if current node is the joined node 72 | // when joined a groupId, the node should do all of 73 | // those following things by itself after the log is replicated 74 | log.Println("skip add current node join from sm") 75 | return nil 76 | } 77 | group := s.GetGroup(txn, groupId) 78 | // check if this group exceeds it's replication 79 | if len(GetGroupPeersFromKV(txn, groupId)) >= int(group.Replications) { 80 | return errors.New("exceed replications") 81 | } 82 | // first, save the peer 83 | return s.SavePeer(txn, &peer) 84 | }); err != nil { 85 | log.Println("cannot save peer for join", err) 86 | return []byte{0} 87 | } 88 | // next, check if this node is in the groupId. Add it on board if found. 89 | // because membership logs entries will be replicated on every node 90 | // this function will also be executed every where 91 | if meta := s.GetOnboardGroup(groupId); meta != nil { 92 | node := s.GetHostNTXN(entry.Command.ClientId) 93 | if node == nil { 94 | log.Println("cannot get node for SM node join") 95 | } 96 | address := node.ServerAddr 97 | inv := &pb.GroupInvitation{ 98 | Group: groupId, 99 | Leader: meta.Leader, 100 | Node: meta.Server.Id, 101 | } 102 | inv.Signature = s.Sign(InvitationSignature(inv)) 103 | if client, err := utils.GetClusterRPC(address); err == nil { 104 | go client.SendGroupInvitation(context.Background(), inv) 105 | meta.GroupPeers[peer.Id] = &peer 106 | log.Println("we have new node ", node.Id, "join group", groupId) 107 | return []byte{1} 108 | } else { 109 | log.Println("cannot get cluster rpc for node join", err) 110 | return []byte{0} 111 | } 112 | } 113 | return []byte{1} 114 | } else { 115 | log.Println("cannot decode node join data", err) 116 | return []byte{0} 117 | } 118 | } 119 | 120 | func (s *BFTRaftServer) SMNewClient(arg *[]byte, entry *pb.LogEntry) []byte { 121 | // use for those hosts only want to make changes, and does not contribute it's resources 122 | client := pb.Host{} 123 | err := proto.Unmarshal(*arg, &client) 124 | if err != nil { 125 | log.Println("cannot decode new client data", err) 126 | return []byte{0} 127 | } 128 | client.Id = utils.HashPublicKeyBytes(client.PublicKey) 129 | if err := s.SaveHostNTXN(&client); err == nil { 130 | return []byte{1} 131 | } else { 132 | log.Println("cannot save host for new client", err) 133 | return []byte{0} 134 | } 135 | } 136 | 137 | func InvitationSignature(inv *pb.GroupInvitation) []byte { 138 | return []byte(fmt.Sprint(inv.Group, "-", inv.Node, "-", inv.Leader)) 139 | } 140 | 141 | func (s *BFTRaftServer) SMNewGroup(arg *[]byte, entry *pb.LogEntry) []byte { 142 | hostId := entry.Command.ClientId 143 | // create and make the creator the member of this group 144 | group := pb.RaftGroup{} 145 | err := proto.Unmarshal(*arg, &group) 146 | if err != nil { 147 | log.Println("cannot decode new group data", err) 148 | return []byte{0} 149 | } 150 | log.Println("creating new group", group.Id,"for:", hostId) 151 | // replication cannot be below 1 and cannot larger than 100 152 | if group.Replications < 1 || group.Replications > 100 { 153 | log.Println("invalid replications:", group.Replications) 154 | return []byte{0} 155 | } 156 | // generate peer 157 | peer := pb.Peer{ 158 | Id: hostId, 159 | Group: group.Id, 160 | NextIndex: 0, 161 | MatchIndex: 0, 162 | } 163 | if err := s.DB.Update(func(txn *badger.Txn) error { 164 | // the proposer will decide the id for the group, we need to check it's availability 165 | if s.GetGroup(txn, group.Id) != nil { 166 | return errors.New("group existed") 167 | } 168 | // regularize and save group 169 | group.Term = 0 170 | if err := s.SaveGroup(txn, &group); err != nil { 171 | return err 172 | } 173 | if err := s.SavePeer(txn, &peer); err == nil { 174 | return err 175 | } 176 | return nil 177 | }); err == nil { 178 | if s.Id == hostId { 179 | meta := NewRTGroup( 180 | s, hostId, 181 | map[uint64]*pb.Peer{peer.Id: &peer}, 182 | &group, LEADER, 183 | ) 184 | s.SetOnboardGroup(meta) 185 | go func() { 186 | s.PendingNewGroups[group.Id] <- nil 187 | }() 188 | } 189 | return utils.U64Bytes(entry.Index) 190 | } else { 191 | go func() { 192 | s.PendingNewGroups[group.Id] <- err 193 | }() 194 | log.Println("cannot save new group:", err) 195 | return []byte{0} 196 | } 197 | } 198 | 199 | func (s *BFTRaftServer) RegHost() error { 200 | groupId := uint64(utils.ALPHA_GROUP) 201 | publicKey := utils.PublicKeyFromPrivate(s.PrivateKey) 202 | publicKeyBytes, err := x509.MarshalPKIXPublicKey(publicKey) 203 | if err != nil { 204 | return err 205 | } 206 | host := pb.Host{ 207 | Id: s.Id, 208 | LastSeen: 0, 209 | Online: true, 210 | ServerAddr: s.Opts.Address, 211 | PublicKey: publicKeyBytes, 212 | } 213 | hostData, err := proto.Marshal(&host) 214 | if err != nil { 215 | return err 216 | } 217 | s.SaveHostNTXN(&host) // save itself first 218 | res, err := s.Client.ExecCommand(groupId, REG_NODE, hostData) 219 | if err != nil { 220 | return err 221 | } 222 | switch (*res)[0] { 223 | case 0: 224 | return errors.New("remote error") 225 | case 1: 226 | log.Println("node", s.Id, "registed") 227 | return nil 228 | } 229 | return errors.New("unexpected") 230 | } 231 | 232 | //func (s *BFTRaftServer) SyncAlphaLogs() { 233 | // alphaRPCs := s.Client.AlphaRPCs.Get() 234 | // alphaGroup := s.GetOnboardGroup(utils.ALPHA_GROUP) 235 | // lastLog := alphaGroup.LastLogEntryNTXN() 236 | // entris := utils.MajorityResponse(alphaRPCs, func(client pb.BFTRaftClient) (interface{}, []byte) { 237 | // entries, err := client.PullGroupLogs(context.Background(), &pb.PullGroupLogsResuest{ 238 | // Group: utils.ALPHA_GROUP, Index: lastLog.Index, 239 | // }) 240 | // if err != nil { 241 | // log.Println("error on sync alpha group:", err) 242 | // return nil, []byte{0} 243 | // } else { 244 | // return entries, []byte{1} 245 | // } 246 | // }) 247 | // alphaGroup.appe 248 | //} 249 | 250 | func (s *BFTRaftServer) NodeJoin(groupId uint64) error { 251 | log.Println(s.Id, ": join group:", groupId) 252 | joinEntry := pb.NodeJoinGroupEntry{Group: groupId} 253 | joinData, err := proto.Marshal(&joinEntry) 254 | if err != nil { 255 | return err 256 | } 257 | s.GroupInvitations[groupId] = make(chan *pb.GroupInvitation) 258 | res, err := s.Client.ExecCommand(utils.ALPHA_GROUP, NODE_JOIN, joinData) 259 | if err != nil { 260 | log.Println("error on join group:", err) 261 | return err 262 | } 263 | if (*res)[0] == 1 { 264 | hosts := s.GetGroupHostsNTXN(groupId) 265 | hostsMap := map[uint64]bool{} 266 | for _, h := range hosts { 267 | hostsMap[h.Id] = true 268 | } 269 | receivedEnoughInv := make(chan bool, 1) 270 | invitations := map[uint64]*pb.GroupInvitation{} 271 | invLeaders := []uint64{} 272 | expectedResponse := utils.ExpectedPlayers(len(hostsMap)) 273 | go func() { 274 | for inv := range s.GroupInvitations[groupId] { 275 | if _, isMember := hostsMap[inv.Node]; isMember { 276 | if _, hasInv := invitations[inv.Node]; !hasInv { 277 | invitations[inv.Node] = inv 278 | invLeaders = append(invLeaders, inv.Leader) 279 | } 280 | if len(invitations) >= expectedResponse { 281 | receivedEnoughInv <- true 282 | } 283 | } else { 284 | log.Println("cannot accept the invitation for group", groupId, "peer", inv.Node) 285 | } 286 | } 287 | }() 288 | select { 289 | case <-receivedEnoughInv: 290 | group := s.GetGroupNTXN(groupId) 291 | peer := &pb.Peer{ 292 | Id: s.Id, 293 | Group: groupId, 294 | NextIndex: 0, 295 | MatchIndex: 0, 296 | } 297 | groupPeers := map[uint64]*pb.Peer{} 298 | if err := s.DB.Update(func(txn *badger.Txn) error { 299 | groupPeers = GetGroupPeersFromKV(txn, peer.Group) 300 | return s.SavePeer(txn, peer) 301 | }); err == nil { 302 | leader := utils.PickMajority(invLeaders) 303 | log.Println("received enough invitations, will join to group", groupId, "with leader", leader) 304 | groupPeers[s.Id] = peer 305 | role := FOLLOWER 306 | if leader == s.Id { 307 | log.Println("node", s.Id, "joined as a leader for group:", groupId) 308 | leader = LEADER 309 | } 310 | meta := NewRTGroup( 311 | s, leader, groupPeers, 312 | group, role, 313 | ) 314 | s.SetOnboardGroup(meta) 315 | log.Println("node", peer.Id, "joined group", groupId) 316 | } 317 | return nil 318 | case <-time.After(30 * time.Second): 319 | close(s.GroupInvitations[groupId]) 320 | delete(s.GroupInvitations, groupId) 321 | return errors.New("receive invitation timeout") 322 | } 323 | } else { 324 | log.Println("cannot join node, remote end rejected") 325 | return errors.New("remote error") 326 | } 327 | } 328 | 329 | func (s *BFTRaftServer) NewGroup(group *pb.RaftGroup) error { 330 | groupData, err := proto.Marshal(group) 331 | if err != nil { 332 | return err 333 | } 334 | s.PendingNewGroups[group.Id] = make(chan error, 1) 335 | res, err := s.Client.ExecCommand(utils.ALPHA_GROUP, NEW_GROUP, groupData) 336 | if err != nil { 337 | log.Println("cannot decode new group:", err) 338 | return err 339 | } 340 | if len(*res) > 1 { 341 | // wait for the log to be committed 342 | select { 343 | case err := <-s.PendingNewGroups[group.Id]: 344 | return err 345 | case <-time.After(10 * time.Second): 346 | return errors.New("timeout") 347 | } 348 | } else { 349 | return errors.New("remote error") 350 | } 351 | } 352 | -------------------------------------------------------------------------------- /server/observer.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "context" 5 | pb "github.com/PomeloCloud/BFTRaft4go/proto/server" 6 | "github.com/PomeloCloud/BFTRaft4go/utils" 7 | "github.com/dgraph-io/badger" 8 | "log" 9 | ) 10 | 11 | func (m *RTGroup) PullAndCommitGroupLogs() { 12 | peerClients := []*pb.BFTRaftClient{} 13 | for _, peer := range m.GroupPeers { 14 | node := m.Server.GetHostNTXN(peer.Id) 15 | if rpc, err := utils.GetClusterRPC(node.ServerAddr); err == nil { 16 | peerClients = append(peerClients, &rpc) 17 | } 18 | } 19 | req := &pb.PullGroupLogsResuest{ 20 | Group: m.Group.Id, 21 | Index: m.LastEntryIndexNTXN() + 1, 22 | } 23 | // Pull entries 24 | entries := utils.MajorityResponse(peerClients, func(client pb.BFTRaftClient) (interface{}, []byte) { 25 | if entriesRes, err := client.PullGroupLogs(context.Background(), req); err == nil { 26 | entries := entriesRes.Entries 27 | if len(entries) == 0 { 28 | return entries, []byte{1} 29 | } else { 30 | return entries, entries[len(entries)-1].Hash 31 | } 32 | } 33 | return nil, []byte{} 34 | }).([]*pb.LogEntry) 35 | // now append and commit logs one by one 36 | for _, entry := range entries { 37 | needCommit := false 38 | if err := m.Server.DB.Update(func(txn *badger.Txn) error { 39 | if err := m.AppendEntryToLocal(txn, entry); err == nil { 40 | needCommit = true 41 | return nil 42 | } else { 43 | return err 44 | } 45 | }); err != nil { 46 | log.Println("cannot append entry to local when pulling", err) 47 | return 48 | } 49 | if needCommit { 50 | m.CommitGroupLog(entry) 51 | } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /server/peers.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | pb "github.com/PomeloCloud/BFTRaft4go/proto/server" 5 | "github.com/PomeloCloud/BFTRaft4go/utils" 6 | "github.com/dgraph-io/badger" 7 | "github.com/golang/protobuf/proto" 8 | "log" 9 | "strconv" 10 | ) 11 | 12 | func GetGroupPeersFromKV(txn *badger.Txn, group uint64) map[uint64]*pb.Peer { 13 | peers := map[uint64]*pb.Peer{} 14 | keyPrefix := ComposeKeyPrefix(group, GROUP_PEERS) 15 | iter := txn.NewIterator(badger.IteratorOptions{}) 16 | iter.Seek(append(keyPrefix, utils.U64Bytes(0)...)) // seek the head 17 | for iter.ValidForPrefix(keyPrefix) { 18 | item := iter.Item() 19 | item_data := ItemValue(item) 20 | peer := pb.Peer{} 21 | proto.Unmarshal(*item_data, &peer) 22 | peers[peer.Id] = &peer 23 | iter.Next() 24 | } 25 | iter.Close() 26 | return peers 27 | } 28 | 29 | func (m *RTGroup) PeerUncommittedLogEntries(peer *pb.Peer) ([]*pb.LogEntry, *pb.LogEntry) { 30 | entries_ := []*pb.LogEntry{} 31 | prevEntry := &pb.LogEntry{ 32 | Term: 0, 33 | Index: 0, 34 | } 35 | m.Server.DB.View(func(txn *badger.Txn) error { 36 | entries := []*pb.LogEntry{} 37 | iter := m.ReversedLogIterator(txn) 38 | nextLogIdx := peer.NextIndex 39 | for true { 40 | entry := iter.Current() 41 | if entry == nil { 42 | break 43 | } 44 | prevEntry = entry 45 | if entry.Index < nextLogIdx { 46 | break 47 | } 48 | entries = append(entries, entry) 49 | iter.Next() 50 | } 51 | if peer.NextIndex == 0 && peer.MatchIndex == 0 { 52 | // new peer, should set prevEntry = 0 53 | prevEntry = &pb.LogEntry{ 54 | Term: 0, 55 | Index: 0, 56 | } 57 | } 58 | // reverse so the first will be the one with least index 59 | if len(entries) > 1 { 60 | for i := 0; i < len(entries)/2; i++ { 61 | j := len(entries) - i - 1 62 | entries[i], entries[j] = entries[j], entries[i] 63 | } 64 | } 65 | entries_ = entries 66 | return nil 67 | }) 68 | log.Println("prev index for", peer.Id, "is", prevEntry.Index, peer.MatchIndex, "/", peer.NextIndex) 69 | return entries_, prevEntry 70 | } 71 | 72 | func (s *BFTRaftServer) ScanHostedGroups(serverId uint64) map[uint64]*RTGroup { 73 | scanKey := utils.U32Bytes(GROUP_PEERS) 74 | res := map[uint64]*RTGroup{} 75 | s.DB.View(func(txn *badger.Txn) error { 76 | iter := txn.NewIterator(badger.IteratorOptions{}) 77 | iter.Seek(scanKey) 78 | groups := map[uint64]*RTGroup{} 79 | for iter.ValidForPrefix(scanKey) { 80 | item := iter.Item() 81 | val := ItemValue(item) 82 | peer := &pb.Peer{} 83 | proto.Unmarshal(*val, peer) 84 | if peer.Id == serverId { 85 | group := GetGroupFromKV(txn, peer.Group) 86 | if group != nil { 87 | defaultLeader := uint64(0) 88 | if len(s.GetGroupHosts(txn, group.Id)) < 2 { 89 | defaultLeader = s.Id 90 | } 91 | groups[peer.Group] = NewRTGroup( 92 | s, defaultLeader, 93 | GetGroupPeersFromKV(txn, peer.Group), 94 | group, FOLLOWER, 95 | ) 96 | } 97 | } 98 | iter.Next() 99 | } 100 | iter.Close() 101 | res = groups 102 | return nil 103 | }) 104 | log.Println("found", len(res), "hosted groups") 105 | for groupId, meta := range res { 106 | log.Println("scanned group:", meta.Group.Id) 107 | k := strconv.Itoa(int(groupId)) 108 | s.GroupsOnboard.Set(k, meta) 109 | } 110 | return res 111 | } 112 | 113 | func (m *RTGroup) OnboardGroupPeersSlice() []*pb.Peer { 114 | peers := []*pb.Peer{} 115 | for _, peer := range m.GroupPeers { 116 | peers = append(peers, peer) 117 | } 118 | return peers 119 | } 120 | 121 | func (s *BFTRaftServer) SavePeer(txn *badger.Txn, peer *pb.Peer) error { 122 | if data, err := proto.Marshal(peer); err == nil { 123 | dbKey := append(ComposeKeyPrefix(peer.Group, GROUP_PEERS), utils.U64Bytes(peer.Id)...) 124 | return txn.Set(dbKey, data, 0x00) 125 | } else { 126 | return err 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /server/server_test.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "github.com/PomeloCloud/BFTRaft4go/utils" 5 | "os" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | func initDB(dbPath string, t *testing.T) { 11 | if err := os.MkdirAll(dbPath, os.ModePerm); err != nil { 12 | t.Fatal(err) 13 | } 14 | InitDatabase(dbPath) 15 | } 16 | 17 | func getServer(dbPath string, addr string, bootstraps []string, t *testing.T) *BFTRaftServer { 18 | initDB(dbPath, t) 19 | s, err := GetServer(Options{ 20 | DBPath: dbPath, 21 | Address: addr, 22 | Bootstrap: bootstraps, 23 | ConsensusTimeout: 5 * time.Second, 24 | }) 25 | 26 | if err != nil { 27 | t.Fatal(err) 28 | } 29 | return s 30 | } 31 | 32 | func TestServerStartup(t *testing.T) { 33 | dbPath := "test_data/ServerStartup" 34 | s := getServer(dbPath, "localhost:4560", []string{}, t) 35 | defer os.RemoveAll(dbPath) 36 | go func() { 37 | if err := s.StartServer(); err != nil { 38 | t.Fatal(err) 39 | } 40 | }() 41 | } 42 | 43 | func TestColdStart(t *testing.T) { 44 | // test for creating a cold started node and add a member to join it 45 | dbPath1 := "test_data/TestColdStart1" 46 | dbPath2 := "test_data/TestColdStart2" 47 | dbPath3 := "test_data/TestColdStart3" 48 | addr1 := "localhost:4561" 49 | addr2 := "localhost:4562" 50 | addr3 := "localhost:4563" 51 | os.RemoveAll(dbPath1) 52 | os.RemoveAll(dbPath2) 53 | os.RemoveAll(dbPath3) 54 | defer os.RemoveAll(dbPath1) 55 | defer os.RemoveAll(dbPath2) 56 | defer os.RemoveAll(dbPath3) 57 | 58 | println("start server 1") 59 | s1 := getServer(dbPath1, addr1, []string{}, t) 60 | time.Sleep(1 * time.Second) 61 | s1.StartServer() 62 | time.Sleep(1 * time.Second) 63 | 64 | println("start server 2") 65 | s2 := getServer(dbPath2, addr2, []string{addr1}, t) 66 | time.Sleep(1 * time.Second) 67 | s2.StartServer() 68 | time.Sleep(1 * time.Second) 69 | s2.NodeJoin(utils.ALPHA_GROUP) 70 | time.Sleep(5 * time.Second) 71 | 72 | println("start server 3") 73 | s3 := getServer(dbPath3, addr3, []string{addr1, addr2}, t) 74 | time.Sleep(1 * time.Second) 75 | s3.StartServer() 76 | time.Sleep(1 * time.Second) 77 | s3.NodeJoin(utils.ALPHA_GROUP) 78 | time.Sleep(10 * time.Second) 79 | } 80 | -------------------------------------------------------------------------------- /server/store.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "github.com/PomeloCloud/BFTRaft4go/utils" 5 | "github.com/dgraph-io/badger" 6 | "log" 7 | ) 8 | 9 | const ( 10 | LOG_ENTRIES = 0 11 | GROUP_PEERS = 1 12 | GROUP_META = 2 13 | HOST_LIST = 3 14 | GROUP_LAST_IDX = 4 15 | SERVER_CONF = 100 16 | ) 17 | 18 | const ( 19 | HOST_LIST_GROUP = 1 20 | CONFIG_GROUP = 0 21 | ) 22 | 23 | func ComposeKeyPrefix(group uint64, t uint32) []byte { 24 | return append(utils.U32Bytes(t), utils.U64Bytes(group)...) 25 | } 26 | 27 | func ItemValue(item *badger.Item) *[]byte { 28 | if item == nil { 29 | return nil 30 | } 31 | if val, err := item.Value(); err == nil { 32 | return &val 33 | } else { 34 | log.Println("cannot get value:", err) 35 | return nil 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /server/time_wheel.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "math/rand" 5 | ) 6 | 7 | func RandomTimeout(mult float32) int { 8 | lowRange := 500 * mult 9 | highRange := 1000 * mult 10 | return int(lowRange + highRange*rand.Float32()) 11 | } 12 | -------------------------------------------------------------------------------- /server/vote.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | pb "github.com/PomeloCloud/BFTRaft4go/proto/server" 7 | "github.com/PomeloCloud/BFTRaft4go/utils" 8 | "github.com/dgraph-io/badger" 9 | "log" 10 | "sync" 11 | "time" 12 | ) 13 | 14 | func RequestVoteRequestSignData(req *pb.RequestVoteRequest) []byte { 15 | return []byte(fmt.Sprint(req.Group, "-", req.Term, "-", req.LogIndex, "-", req.Term, "-", req.CandidateId)) 16 | } 17 | 18 | func RequestVoteResponseSignData(res *pb.RequestVoteResponse) []byte { 19 | return []byte(fmt.Sprint(res.Group, "-", res.Term, "-", res.LogIndex, "-", res.Term, "-", res.CandidateId, "-", res.Granted)) 20 | } 21 | 22 | func (m *RTGroup) ResetTerm(term uint64) { 23 | m.Group.Term = term 24 | m.Votes = []*pb.RequestVoteResponse{} 25 | m.LastVotedTerm = 0 26 | m.LastVotedTo = 0 27 | for peerId := range m.GroupPeers { 28 | m.SendVotesForPeers[peerId] = true 29 | } 30 | m.Server.SaveGroupNTXN(m.Group) 31 | } 32 | 33 | func (m *RTGroup) BecomeCandidate() { 34 | defer m.RefreshTimer(25) 35 | m.Role = CANDIDATE 36 | group := m.Group 37 | m.ResetTerm(group.Term + 1) 38 | term := group.Term 39 | m.Server.SaveGroupNTXN(m.Group) 40 | lastEntry := m.LastLogEntryNTXN() 41 | var lastIndex uint64 = 0 42 | var lastLogTerm uint64 = 0 43 | if lastEntry != nil { 44 | lastIndex = lastEntry.Index 45 | lastLogTerm = lastEntry.Term 46 | } 47 | request := &pb.RequestVoteRequest{ 48 | Group: group.Id, 49 | Term: term, 50 | LogIndex: lastIndex, 51 | LogTerm: lastLogTerm, 52 | CandidateId: m.Server.Id, 53 | Signature: []byte{}, 54 | } 55 | log.Println("become a candidate", ", term", m.Group.Term) 56 | request.Signature = m.Server.Sign(RequestVoteRequestSignData(request)) 57 | voteReceived := make(chan *pb.RequestVoteResponse) 58 | numPeers := len(m.GroupPeers) 59 | wg := sync.WaitGroup{} 60 | wg.Add(numPeers) 61 | log.Println("sending vote request to", numPeers, "peers") 62 | for _, peer := range m.GroupPeers { 63 | nodeId := peer.Id 64 | node := m.Server.GetHostNTXN(nodeId) 65 | go func() { 66 | if client, err := utils.GetClusterRPC(node.ServerAddr); err == nil { 67 | if voteResponse, err := client.RequestVote(context.Background(), request); err == nil { 68 | publicKey := m.Server.GetHostPublicKey(nodeId) 69 | signData := RequestVoteResponseSignData(voteResponse) 70 | if err := utils.VerifySign(publicKey, voteResponse.Signature, signData); err == nil { 71 | if voteResponse.Granted && voteResponse.LogIndex <= lastEntry.Index { 72 | voteReceived <- voteResponse 73 | } else { 74 | log.Println(nodeId, "peer not granted vote") 75 | } 76 | } else { 77 | log.Println("error on verify vote response:", err) 78 | } 79 | } else { 80 | log.Println("error on request vote:", err) 81 | } 82 | } else { 83 | log.Println("cannot get client for request votes") 84 | } 85 | wg.Done() 86 | }() 87 | } 88 | go func() { 89 | wg.Wait() 90 | close(voteReceived) 91 | log.Println("received all vote response") 92 | }() 93 | expectedVotes := m.ExpectedHonestPeers() // ExpectedHonestPeers(s.OnboardGroupPeersSlice(group.Id)) 94 | adequateVotes := make(chan bool, 1) 95 | log.Println("expecting", expectedVotes, "votes to become a leader, term", m.Group.Term) 96 | go func() { 97 | // Here we can follow the rule of Raft by expecting majority votes 98 | // or follow the PBFT rule by expecting n - f votes 99 | // I will use the rule from Raft first 100 | votes := []*pb.RequestVoteResponse{} 101 | for vote := range voteReceived { 102 | votes = append(votes, vote) 103 | if len(votes) >= expectedVotes { 104 | m.Votes = votes 105 | adequateVotes <- true 106 | break 107 | } 108 | } 109 | log.Println("received", len(votes), "votes, term:", m.Group.Term) 110 | }() 111 | select { 112 | case <-adequateVotes: 113 | if m.Role == CANDIDATE { 114 | log.Println("now transfer to leader, term", m.Group.Term) 115 | m.BecomeLeader() 116 | m.RefreshTimer(1) 117 | } else { 118 | log.Println("this peer have already transfered to other role:", m.Role) 119 | } 120 | case <-time.After(10 * time.Second): 121 | log.Println("vote requesting time out") 122 | } 123 | } 124 | 125 | func (m *RTGroup) BecomeLeader() { 126 | // when this peer become the leader of the group 127 | // it need to send it's vote to followers to claim it's authority 128 | // this only need to be done once in each term 129 | // so we just send the 'AppendEntry' request in this function 130 | // we can use a dedicated rpc protocol for this, but no bother 131 | m.Role = LEADER 132 | m.Leader = m.Server.Id // set self to leader for next following requests 133 | m.Server.DB.Update(func(txn *badger.Txn) error { 134 | return m.Server.SaveGroup(txn, m.Group) 135 | }) 136 | log.Println("send votes heartbeat to followers for term", m.Group.Term) 137 | m.SendFollowersHeartbeat(context.Background()) 138 | } 139 | 140 | func (m *RTGroup) BecomeFollower(appendEntryReq *pb.AppendEntriesRequest) bool { 141 | m.VoteLock.Lock() 142 | defer m.VoteLock.Unlock() 143 | // first we need to verify the leader got all of the votes required 144 | log.Println("trying to become a follower of", appendEntryReq.LeaderId, ", term", appendEntryReq.Term) 145 | expectedVotes := m.ExpectedHonestPeers() 146 | receivedVotes := len(appendEntryReq.QuorumVotes) 147 | if receivedVotes < expectedVotes { 148 | log.Println("did not received enough vote", receivedVotes, "/", expectedVotes) 149 | return false 150 | } 151 | term := appendEntryReq.Term 152 | votes := map[uint64]bool{} 153 | for _, vote := range appendEntryReq.QuorumVotes { 154 | votePeer, foundCandidate := m.GroupPeers[vote.Voter] 155 | if !foundCandidate { 156 | log.Println("invalid candidate:", vote.Voter, "found:", foundCandidate, "term:", term, "-", m.Group.Term) 157 | continue 158 | } 159 | // check their signatures 160 | signData := RequestVoteResponseSignData(vote) 161 | publicKey := m.Server.GetHostPublicKey(votePeer.Id) 162 | if err := utils.VerifySign(publicKey, vote.Signature, signData); err != nil { 163 | log.Println("verify vote from", vote.Voter, "failed:", err) 164 | continue 165 | } 166 | // check their properties to avoid forging 167 | if vote.Group == m.Group.Id && vote.CandidateId == appendEntryReq.LeaderId && vote.Granted { 168 | votes[votePeer.Id] = true 169 | } else { 170 | log.Println("vote properity not match this vote term, grant:", vote.Granted) 171 | } 172 | } 173 | if len(votes) >= expectedVotes { 174 | // received enough votes, will transform to follower 175 | log.Println( 176 | "received enough votes, become a follower of:", 177 | appendEntryReq.LeaderId, 178 | ", term", appendEntryReq.Term) 179 | m.Role = FOLLOWER 180 | m.ResetTerm(term) 181 | m.RefreshTimer(10) 182 | m.Leader = appendEntryReq.LeaderId 183 | m.Server.SaveGroupNTXN(m.Group) 184 | return true 185 | } else { 186 | log.Println( 187 | "did not received enough votes, become a follower of:", 188 | appendEntryReq.LeaderId, 189 | ", term", appendEntryReq.Term, "got", len(votes), "/", expectedVotes) 190 | return false 191 | } 192 | } 193 | -------------------------------------------------------------------------------- /test/server1.json: -------------------------------------------------------------------------------- 1 | { 2 | "Db": "test_data/TestColdStart1", 3 | "Address": "localhost:4561", 4 | "Bootstraps": [] 5 | } -------------------------------------------------------------------------------- /test/server2.json: -------------------------------------------------------------------------------- 1 | { 2 | "Db": "test_data/TestColdStart2", 3 | "Address": "localhost:4562", 4 | "Bootstraps": ["localhost:4561"] 5 | } -------------------------------------------------------------------------------- /test/server3.json: -------------------------------------------------------------------------------- 1 | { 2 | "Db": "test_data/TestColdStart3", 3 | "Address": "localhost:4563", 4 | "Bootstraps": ["localhost:4561", "localhost:4562"] 5 | } -------------------------------------------------------------------------------- /test/server4.json: -------------------------------------------------------------------------------- 1 | { 2 | "Db": "test_data/TestColdStart4", 3 | "Address": "localhost:4564", 4 | "Bootstraps": ["localhost:4561", "localhost:4562", "localhost:4563"] 5 | } -------------------------------------------------------------------------------- /test/server5.json: -------------------------------------------------------------------------------- 1 | { 2 | "Db": "test_data/TestColdStart5", 3 | "Address": "localhost:4565", 4 | "Bootstraps": ["localhost:4561", "localhost:4562", "localhost:4563", "localhost:4564"] 5 | } -------------------------------------------------------------------------------- /test/server6.json: -------------------------------------------------------------------------------- 1 | { 2 | "Db": "test_data/TestColdStart6", 3 | "Address": "localhost:4566", 4 | "Bootstraps": ["localhost:4561", "localhost:4562", "localhost:4563", "localhost:4564"] 5 | } -------------------------------------------------------------------------------- /test/server7.json: -------------------------------------------------------------------------------- 1 | { 2 | "Db": "test_data/TestColdStart7", 3 | "Address": "localhost:4567", 4 | "Bootstraps": ["localhost:4561", "localhost:4562", "localhost:4563", "localhost:4564"] 5 | } -------------------------------------------------------------------------------- /test/testserver.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/PomeloCloud/BFTRaft4go/server" 5 | "github.com/PomeloCloud/BFTRaft4go/utils" 6 | "log" 7 | "os" 8 | "time" 9 | ) 10 | 11 | func initDB(dbPath string) { 12 | if err := os.MkdirAll(dbPath, os.ModePerm); err != nil { 13 | panic(err) 14 | } 15 | server.InitDatabase(dbPath) 16 | } 17 | 18 | func getServer(dbPath string, addr string, bootstraps []string) *server.BFTRaftServer { 19 | initDB(dbPath) 20 | s, err := server.GetServer(server.Options{ 21 | DBPath: dbPath, 22 | Address: addr, 23 | Bootstrap: bootstraps, 24 | ConsensusTimeout: 5 * time.Second, 25 | }) 26 | 27 | if err != nil { 28 | panic(err) 29 | } 30 | return s 31 | } 32 | 33 | func main() { 34 | print("testing") 35 | args := os.Args 36 | cfgPath := args[1] 37 | log.Println(cfgPath) 38 | cfg := server.ReadConfigFile(cfgPath) 39 | os.RemoveAll(cfg.Db) 40 | defer os.RemoveAll(cfg.Db) 41 | println("start server", cfg.Address) 42 | s := getServer(cfg.Db, cfg.Address, cfg.Bootstraps) 43 | time.Sleep(1 * time.Second) 44 | s.StartServer() 45 | if len(cfg.Bootstraps) > 0 { 46 | time.Sleep(1 * time.Second) 47 | s.NodeJoin(utils.ALPHA_GROUP) 48 | } 49 | <-time.After(1 * time.Hour) 50 | } 51 | -------------------------------------------------------------------------------- /utils/alpha.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "context" 5 | spb "github.com/PomeloCloud/BFTRaft4go/proto/server" 6 | "log" 7 | ) 8 | 9 | func AlphaNodes(servers []string) []*spb.Host { 10 | bootstrapServers := []*spb.BFTRaftClient{} 11 | for _, addr := range servers { 12 | if c, err := GetClusterRPC(addr); err == nil { 13 | bootstrapServers = append(bootstrapServers, &c) 14 | } else { 15 | log.Println("cannot get rpc for alpha nodes") 16 | } 17 | } 18 | res := MajorityResponse(bootstrapServers, func(c spb.BFTRaftClient) (interface{}, []byte) { 19 | if nodes, err := c.GroupHosts(context.Background(), &spb.GroupId{ 20 | GroupId: ALPHA_GROUP, 21 | }); err == nil { 22 | return nodes, NodesSignData(nodes.Nodes) 23 | } else { 24 | log.Println("cannot get alpha group hosts when get alpha nodes") 25 | return (*spb.GroupNodesResponse)(nil), []byte{} 26 | } 27 | }) 28 | if res == nil { 29 | return nil 30 | } else { 31 | response := res.(*spb.GroupNodesResponse) 32 | return response.Nodes 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /utils/conns.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "google.golang.org/grpc" 5 | "log" 6 | "sync" 7 | ) 8 | 9 | var ClientConn map[string]*grpc.ClientConn = map[string]*grpc.ClientConn{} 10 | var ConnLock sync.Mutex = sync.Mutex{} 11 | 12 | func GetClientConn(addr string) (*grpc.ClientConn, error) { 13 | ConnLock.Lock() 14 | defer ConnLock.Unlock() 15 | if cachedConn, cacheFound := ClientConn[addr]; cacheFound { 16 | return cachedConn, nil 17 | } 18 | if conn, err := grpc.Dial(addr, grpc.WithInsecure()); err == nil { 19 | ClientConn[addr] = conn 20 | return conn, nil 21 | } else { 22 | log.Println("error on connect node:", err) 23 | return nil, err 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /utils/consensus.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "bytes" 5 | "encoding/gob" 6 | spb "github.com/PomeloCloud/BFTRaft4go/proto/server" 7 | "hash/fnv" 8 | "time" 9 | ) 10 | 11 | type FuncResult struct { 12 | Result interface{} 13 | Feature []byte 14 | } 15 | 16 | func HashData(data []byte) uint64 { 17 | fnv_hasher := fnv.New64a() 18 | fnv_hasher.Write(data) 19 | return fnv_hasher.Sum64() 20 | } 21 | 22 | func GetBytes(key interface{}) ([]byte, error) { 23 | var buf bytes.Buffer 24 | enc := gob.NewEncoder(&buf) 25 | err := enc.Encode(key) 26 | if err != nil { 27 | return nil, err 28 | } 29 | return buf.Bytes(), nil 30 | } 31 | 32 | func ExpectedPlayers(num int) int { 33 | if num == 0 { 34 | return 1 35 | } else if num == 1 { 36 | return 1 37 | } else if num == 2{ 38 | return 2 39 | } else if num == 3 { 40 | return 2 41 | } else if num < 5 { 42 | return 3 43 | } else { 44 | return num / 2 + 1 45 | } 46 | } 47 | 48 | func PickMajority(hashes []uint64) uint64 { 49 | countMap := map[uint64]int{} 50 | for _, hash := range hashes { 51 | num, found := countMap[hash] 52 | if found { 53 | countMap[hash] = num + 1 54 | } else { 55 | countMap[hash] = 1 56 | } 57 | } 58 | expectedConsensus := ExpectedPlayers(len(hashes)) 59 | for hash, count := range countMap { 60 | if count >= expectedConsensus { 61 | return hash 62 | } 63 | } 64 | return 0 65 | } 66 | 67 | func MajorityResponse(clients []*spb.BFTRaftClient, f func(client spb.BFTRaftClient) (interface{}, []byte)) interface{} { 68 | serverResChan := make(chan FuncResult, len(clients)) 69 | for _, c := range clients { 70 | if c != nil { 71 | dataReceived := make(chan FuncResult) 72 | go func() { 73 | res, fea := f(*c) 74 | dataReceived <- FuncResult{ 75 | Result: res, 76 | Feature: fea, 77 | } 78 | }() 79 | go func() { 80 | select { 81 | case res := <-dataReceived: 82 | serverResChan <- res 83 | case <-time.After(10 * time.Second): 84 | serverResChan <- FuncResult{ 85 | Result: nil, 86 | Feature: []byte{}, 87 | } 88 | } 89 | }() 90 | } 91 | } 92 | hashes := []uint64{} 93 | vals := map[uint64]interface{}{} 94 | for i := 0; i < len(clients); i++ { 95 | fr := <-serverResChan 96 | if fr.Result == nil { 97 | continue 98 | } 99 | hash := HashData(fr.Feature) 100 | hashes = append(hashes, hash) 101 | vals[hash] = fr.Result 102 | } 103 | majorityHash := PickMajority(hashes) 104 | if val, found := vals[majorityHash]; found { 105 | return val 106 | } else { 107 | for _, v := range vals { 108 | return v 109 | } 110 | } 111 | return nil 112 | } 113 | -------------------------------------------------------------------------------- /utils/encoding.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import "encoding/binary" 4 | 5 | const ( 6 | UINT32_LEN = 4 7 | UINT64_LEN = 8 8 | ) 9 | 10 | func U32Bytes(t uint32) []byte { 11 | bs := make([]byte, UINT32_LEN) 12 | binary.BigEndian.PutUint32(bs, t) 13 | return bs 14 | } 15 | 16 | func U64Bytes(n uint64) []byte { 17 | bs := make([]byte, UINT64_LEN) 18 | binary.BigEndian.PutUint64(bs, n) 19 | return bs 20 | } 21 | 22 | func BytesU64(bs []byte, offset int) uint64 { 23 | return binary.BigEndian.Uint64(bs[offset : offset+UINT64_LEN]) 24 | } 25 | -------------------------------------------------------------------------------- /utils/rpcs.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | spb "github.com/PomeloCloud/BFTRaft4go/proto/server" 5 | "google.golang.org/grpc" 6 | "log" 7 | "net" 8 | "sync" 9 | ) 10 | 11 | var RPCServers map[string]*grpc.Server = map[string]*grpc.Server{} 12 | var RPCLock sync.Mutex = sync.Mutex{} 13 | 14 | func GetClusterRPC(addr string) (spb.BFTRaftClient, error) { 15 | RPCLock.Lock() 16 | defer RPCLock.Unlock() 17 | if cc, err := GetClientConn(addr); err == nil { 18 | return spb.NewBFTRaftClient(cc), nil 19 | } else { 20 | log.Println("cannot get connection to", addr, err) 21 | return nil, err 22 | } 23 | } 24 | 25 | func GetGRPCServer(addr string) *grpc.Server { 26 | RPCLock.Lock() 27 | defer RPCLock.Unlock() 28 | if cachedPRC, found := RPCServers[addr]; found { 29 | return cachedPRC 30 | } else { 31 | grpcServer := grpc.NewServer() 32 | RPCServers[addr] = grpcServer 33 | return grpcServer 34 | } 35 | } 36 | 37 | func GRPCServerListen(addr string) error { 38 | lis, err := net.Listen("tcp", addr) 39 | if err != nil { 40 | log.Println("RPC Server Listen:", addr, "Error:", err) 41 | return err 42 | } 43 | return GetGRPCServer(addr).Serve(lis) 44 | } 45 | -------------------------------------------------------------------------------- /utils/shares.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | pb "github.com/PomeloCloud/BFTRaft4go/proto/server" 5 | "github.com/golang/protobuf/proto" 6 | ) 7 | 8 | const ( 9 | MAX_TERM_BUMP = 20 10 | ALPHA_GROUP = 1 // the group for recording server members, groups, peers etc 11 | ) 12 | 13 | func NodesSignData(nodes []*pb.Host) []byte { 14 | signData := []byte{} 15 | for _, node := range nodes { 16 | nodeBytes, _ := proto.Marshal(node) 17 | signData = append(signData, nodeBytes...) 18 | } 19 | return signData 20 | } 21 | 22 | func CommandSignData(group uint64, sender uint64, reqId uint64, data []byte) []byte { 23 | groupBytes := U64Bytes(group) 24 | senderBytes := U64Bytes(sender) 25 | reqIdBytes := U64Bytes(reqId) 26 | return append(append(append(groupBytes, senderBytes...), reqIdBytes...), data...) 27 | } 28 | 29 | func ExecCommandSignData(cmd *pb.CommandRequest) []byte { 30 | return CommandSignData( 31 | cmd.Group, 32 | cmd.ClientId, 33 | cmd.RequestId, 34 | append(U64Bytes(cmd.FuncId), cmd.Arg...), 35 | ) 36 | } 37 | -------------------------------------------------------------------------------- /utils/signature.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "crypto" 5 | "crypto/rand" 6 | "crypto/rsa" 7 | "crypto/x509" 8 | "hash/fnv" 9 | ) 10 | 11 | func GenerateKey() ([]byte, []byte, error) { 12 | privateKey, err := rsa.GenerateKey(rand.Reader, 2048) 13 | if err != nil { 14 | return nil, nil, err 15 | } 16 | publicKey := privateKey.Public() 17 | privateKeyBytes := x509.MarshalPKCS1PrivateKey(privateKey) 18 | publicKeyBytes, err := x509.MarshalPKIXPublicKey(publicKey) 19 | if err != nil { 20 | return nil, nil, err 21 | } 22 | return privateKeyBytes, publicKeyBytes, nil 23 | } 24 | 25 | func ParsePrivateKey(data []byte) (*rsa.PrivateKey, error) { 26 | return x509.ParsePKCS1PrivateKey(data) 27 | } 28 | 29 | func ParsePublicKey(data []byte) (*rsa.PublicKey, error) { 30 | key, err := x509.ParsePKIXPublicKey(data) 31 | return key.(*rsa.PublicKey), err 32 | } 33 | 34 | func PublicKeyFromPrivate(key *rsa.PrivateKey) *rsa.PublicKey { 35 | return &key.PublicKey 36 | } 37 | 38 | func HashPublicKeyBytes(keyData []byte) uint64 { 39 | fnv_hasher := fnv.New64a() 40 | fnv_hasher.Write(keyData) 41 | return fnv_hasher.Sum64() 42 | } 43 | 44 | func HashPublicKey(key *rsa.PublicKey) uint64 { 45 | keyData, _ := x509.MarshalPKIXPublicKey(key) 46 | return HashPublicKeyBytes(keyData) 47 | } 48 | 49 | func Sign(privateKey *rsa.PrivateKey, data []byte) []byte { 50 | hashed, hash := SHA1Hash(data) 51 | signature, _ := rsa.SignPKCS1v15(rand.Reader, privateKey, hash, hashed) 52 | return signature 53 | } 54 | 55 | func VerifySign(publicKey *rsa.PublicKey, signature []byte, data []byte) error { 56 | hashed, hash := SHA1Hash(data) 57 | return rsa.VerifyPKCS1v15(publicKey, hash, hashed, signature) 58 | } 59 | 60 | func SHA1Hash(data []byte) ([]byte, crypto.Hash) { 61 | hash := crypto.SHA1 62 | h := hash.New() 63 | h.Write(data) 64 | return h.Sum(nil), hash 65 | } 66 | 67 | func LogHash(prevHash []byte, index uint64, funcId uint64, args []byte) ([]byte, crypto.Hash) { 68 | // combine index, funcId and args to prevent unordered log sequences 69 | return SHA1Hash(append(append(append(prevHash, U64Bytes(index)...), U64Bytes(funcId)...), args...)) 70 | } 71 | -------------------------------------------------------------------------------- /utils/utils_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import "testing" 4 | 5 | func TestGenerateKey(t *testing.T) { 6 | pri, pub, err := GenerateKey() 7 | if err != nil { 8 | panic(err) 9 | } 10 | priK, err := ParsePrivateKey(pri) 11 | if err != nil { 12 | panic(err) 13 | } 14 | pubK, err := ParsePublicKey(pub) 15 | if err != nil { 16 | panic(err) 17 | } 18 | pubFrPriv := PublicKeyFromPrivate(priK) 19 | if pubK.E != pubFrPriv.E { 20 | panic("key gen pub key not match") 21 | } 22 | } 23 | 24 | func TestVerifySign(t *testing.T) { 25 | pri, pub, err := GenerateKey() 26 | if err != nil { 27 | panic(err) 28 | } 29 | priK, err := ParsePrivateKey(pri) 30 | if err != nil { 31 | panic(err) 32 | } 33 | pubK, err := ParsePublicKey(pub) 34 | if err != nil { 35 | panic(err) 36 | } 37 | pubFrPriv := PublicKeyFromPrivate(priK) 38 | data := []byte("test signature") 39 | sig := Sign(priK, data) 40 | if err := VerifySign(pubK, sig, data); err != nil { 41 | panic(err) 42 | } 43 | if err := VerifySign(pubFrPriv, sig, data); err != nil { 44 | panic(err) 45 | } 46 | } 47 | --------------------------------------------------------------------------------