├── run_tests.sh ├── header.png ├── .gitignore ├── cmd ├── ravel_node │ ├── run.sh │ ├── test.yaml │ ├── ravel_node.proto │ ├── kill.go │ ├── helper.go │ ├── README.md │ ├── main.go │ └── start.go └── ravel_cluster_admin │ ├── utils.go │ ├── cluster_admin.proto │ ├── main.go │ ├── consistent_hash_backup.go │ ├── server.go │ ├── README.md │ ├── http_server.go │ ├── consistent_hashing.go │ └── grpc_server.go ├── RavelNodePB ├── README.md └── ravel_node.pb.go ├── RavelClusterAdminPB ├── README.md ├── cluster_admin_grpc.pb.go └── cluster_admin.pb.go ├── node_server ├── README.md └── server.go ├── db ├── init_test.go ├── README.md ├── init.go ├── transactions_test.go └── transactions.go ├── replica.dockerfile ├── admin.dockerfile ├── go.mod ├── node ├── README.md ├── join_leave.go └── node.go ├── fsm ├── snapshot.go ├── fsm_test.go ├── README.md └── fsm.go ├── store ├── utils.go ├── logstore_test.go ├── stablestore_test.go ├── README.md ├── stablestore.go └── logstore.go ├── LICENSE ├── install.sh ├── CONTRIBUTING.md ├── README.md └── go.sum /run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | go test ./db 4 | go test ./store -------------------------------------------------------------------------------- /header.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adityameharia/ravel/HEAD/header.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | .env 3 | ravel 4 | cmd/ravel_node/ravel_node 5 | cmd/ravel_cluster_admin/ravel_cluster_admin 6 | cmd/ravel_client/ravel_client -------------------------------------------------------------------------------- /cmd/ravel_node/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | go build 4 | sudo rm -rf /tmp/badger 5 | ./ravel_node -nodeID=1 -storageDir=/tmp/badger/run4 -gRPCAddr="localhost:50000" -raftAddr="localhost:60000" 6 | -------------------------------------------------------------------------------- /cmd/ravel_node/test.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | storagedir: "/tmp/badger/run" 3 | grpcaddr: "localhost:50000" 4 | raftaddr: "localhost:60000" 5 | adminrpcaddr: "localhost:42000" 6 | leader: true 7 | 8 | -------------------------------------------------------------------------------- /RavelNodePB/README.md: -------------------------------------------------------------------------------- 1 | # RavelNodePB 2 | 3 | The RavelNodePB package is the auto generated package for the gRPC server using protoc for the [ravel_node proto file](https://github.com/adityameharia/ravel/blob/main/cmd/ravel_node/ravel_node.proto) -------------------------------------------------------------------------------- /RavelClusterAdminPB/README.md: -------------------------------------------------------------------------------- 1 | #RavelClusterAdminPB 2 | 3 | The RavelClusterAdminPB package is the auto generated package for the gRPC server using protoc for the [ravel_cluster_admin proto file](https://github.com/adityameharia/ravel/blob/main/cmd/ravel_cluster_admin/cluster_admin.proto) -------------------------------------------------------------------------------- /node_server/README.md: -------------------------------------------------------------------------------- 1 | # node_server 2 | 3 | This package implements the gRPC server interface i.e. it contains the functions defined in the [proto file](https://github.com/adityameharia/ravel/blob/main/cmd/ravel_node/ravel_node.proto). It calls the necessary functions required to execute the incoming requests. 4 | 5 | -------------------------------------------------------------------------------- /db/init_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestClose(t *testing.T) { 8 | path := "/tmp/badger_test" 9 | var r RavelDatabase 10 | err := r.Init(path) 11 | defer r.Close() 12 | 13 | if err != nil { 14 | t.Error("Error in connecting to BadgerDB on Host Machine", err) 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /cmd/ravel_cluster_admin/utils.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "encoding/binary" 4 | 5 | // Converts bytes to an integer 6 | func bytesToUint64(b []byte) uint64 { 7 | return binary.BigEndian.Uint64(b) 8 | } 9 | 10 | // Converts a uint to a byte slice 11 | func uint64ToBytes(u uint64) []byte { 12 | buf := make([]byte, 8) 13 | binary.BigEndian.PutUint64(buf, u) 14 | return buf 15 | } 16 | 17 | -------------------------------------------------------------------------------- /replica.dockerfile: -------------------------------------------------------------------------------- 1 | # parent image 2 | FROM golang:1.15.6-alpine3.12 3 | 4 | # workspace directory 5 | WORKDIR /ravel/ravel_node 6 | 7 | # copy `go.mod` and `go.sum` 8 | ADD ./go.mod ./go.sum ./ 9 | 10 | # install dependencies 11 | RUN go mod download 12 | 13 | RUN apk add build-base 14 | 15 | # copy source code 16 | COPY . . 17 | 18 | # build executable 19 | RUN go build ./cmd/ravel_node 20 | 21 | # expose ports 22 | EXPOSE 50000 60000 23 | 24 | # set entrypoint 25 | ENTRYPOINT [ "./ravel_node" ] -------------------------------------------------------------------------------- /admin.dockerfile: -------------------------------------------------------------------------------- 1 | # parent image 2 | FROM golang:1.15.6-alpine3.12 3 | 4 | # workspace directory 5 | WORKDIR /ravel/ravel_cluster_admin 6 | 7 | # copy `go.mod` and `go.sum` 8 | ADD ./go.mod ./go.sum ./ 9 | 10 | # install dependencies 11 | RUN go mod download 12 | 13 | RUN apk add build-base 14 | 15 | # copy source code 16 | COPY . . 17 | 18 | # build executable 19 | RUN go build ./cmd/ravel_cluster_admin 20 | 21 | # expose ports 22 | EXPOSE 5000 42000 23 | 24 | # set entrypoint 25 | ENTRYPOINT [ "./ravel_cluster_admin" ] -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/adityameharia/ravel 2 | 3 | go 1.13 4 | 5 | require ( 6 | github.com/buraksezer/consistent v0.9.0 7 | github.com/cespare/xxhash v1.1.0 8 | github.com/dgraph-io/badger/v3 v3.2011.1 9 | github.com/gin-gonic/gin v1.7.2 10 | github.com/google/uuid v1.2.0 11 | github.com/hashicorp/raft v1.3.1 12 | github.com/urfave/cli v1.22.5 13 | github.com/urfave/cli/v2 v2.3.0 14 | github.com/vmihailenco/msgpack/v5 v5.3.2 15 | google.golang.org/grpc v1.38.0 16 | google.golang.org/protobuf v1.26.0 17 | gopkg.in/yaml.v2 v2.4.0 18 | ) 19 | -------------------------------------------------------------------------------- /db/README.md: -------------------------------------------------------------------------------- 1 | # db 2 | 3 | The `db` package contains the implementation of the `RavelDatabase` struct and its functions. This is just a simple 4 | overlay on top of [BadgerDB](https://github.com/dgraph-io/badger) exposing common functions like `Init`, `Close`, `Read` 5 | , `Write` and `Delete` 6 | 7 | - `Init` - initialises the `RavelDatabase` struct and opens a connection to BadgerDB. 8 | - `Close` - closes the connection to BadgerDB. 9 | - `Read` - starts a read only transaction and returns the value for the given key. 10 | - `Write` - starts a read-write transaction, writes the key and value to badgerDB and then commits the transaction. 11 | - `Delete` - starts a read-write transaction, deletes the key value pair and commits the transaction. -------------------------------------------------------------------------------- /cmd/ravel_node/ravel_node.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package RavelClusterPB; 4 | 5 | option go_package = "./RavelNodePB"; 6 | 7 | service RavelNode { 8 | rpc Join(Node) returns (Void); 9 | rpc Leave(Node) returns (Void); 10 | rpc Run(Command) returns (Response); 11 | rpc IsLeader(Void) returns (Boolean); 12 | } 13 | 14 | message Node { 15 | string node_id = 1; 16 | string grpc_address = 2; 17 | string raft_address = 3; 18 | string cluster_id = 4; 19 | } 20 | 21 | message Void { 22 | } 23 | 24 | message Boolean { 25 | bool leader = 1; 26 | } 27 | 28 | message Response { 29 | string msg = 1; 30 | bytes data=2; 31 | } 32 | 33 | message Command { 34 | string operation = 1; 35 | bytes key = 2; 36 | bytes value = 3; 37 | } 38 | 39 | 40 | -------------------------------------------------------------------------------- /node/README.md: -------------------------------------------------------------------------------- 1 | # node 2 | 3 | The node package implements some main functions the replica has to perform to get started and respond to requests. 4 | 5 | - The `Open` function initialises structs that satisfy the 6 | - [StableStore](https://pkg.go.dev/github.com/hashicorp/raft#StableStore) interface 7 | - [LogStore](https://pkg.go.dev/github.com/hashicorp/raft#LogStore) interface 8 | - [FSM](https://pkg.go.dev/github.com/hashicorp/raft#FSM) interface 9 | - [SnapshotStore](https://pkg.go.dev/github.com/hashicorp/raft#SnapshotStore) interface and 10 | - [Transport](https://pkg.go.dev/github.com/hashicorp/raft#Transport) interface. 11 | 12 | It then uses these structs to initialise a new raft node. 13 | 14 | - The `Get`, `Set` and `Delete` functions are called by the leader in the cluster on corresponding request from the 15 | admin 16 | - The `Join` and `Leave` functions implements the logic for new replicas joining and leaving the replica. -------------------------------------------------------------------------------- /db/init.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "log" 5 | 6 | badger "github.com/dgraph-io/badger/v3" 7 | ) 8 | 9 | // RavelDatabase represents an overlay on top of BadgerDB, it exposes Init, Close, Read, Write and Delete functions - 10 | // these functions eventually perform mentioned operations on an instance of BadgerDB and these operations persist on disk 11 | type RavelDatabase struct { 12 | Conn *badger.DB 13 | } 14 | 15 | // Init initialises BadgerDB with the path provided. 16 | func (r *RavelDatabase) Init(path string) error { 17 | var err error 18 | 19 | options := badger.DefaultOptions(path) 20 | options.Logger = nil 21 | options.SyncWrites = true 22 | 23 | r.Conn, err = badger.Open(options) 24 | if err != nil { 25 | return err 26 | } 27 | return nil 28 | } 29 | 30 | // Close closes the connection to the BadgerDB instance 31 | func (r *RavelDatabase) Close() { 32 | err := r.Conn.Close() 33 | if err != nil { 34 | log.Fatal(err) 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /fsm/snapshot.go: -------------------------------------------------------------------------------- 1 | package fsm 2 | 3 | import ( 4 | "log" 5 | 6 | "github.com/adityameharia/ravel/db" 7 | "github.com/hashicorp/raft" 8 | ) 9 | 10 | // Snapshot implements the raft.Snapshot interface 11 | type Snapshot struct { 12 | Db *db.RavelDatabase 13 | } 14 | 15 | type KeyValue struct { 16 | Key []byte `json:"key"` 17 | Value []byte `json:"value"` 18 | } 19 | 20 | // Persist writes a backup of the db to the sink 21 | func (f *Snapshot) Persist(sink raft.SnapshotSink) error { 22 | log.Println("Snapshot: Starting Snapshot") 23 | 24 | _, err := f.Db.Conn.Backup(sink, 0) 25 | if err != nil { 26 | log.Println("Snapshot: Unable to take Snapshot") 27 | return err 28 | } 29 | 30 | err = sink.Close() 31 | if err != nil { 32 | log.Println("Snapshot: Unable to close Sink") 33 | return err 34 | } 35 | 36 | return nil 37 | } 38 | 39 | // Release releases the snapshot 40 | func (f *Snapshot) Release() { 41 | log.Println("Snapshot: Snapshot finished") 42 | } 43 | -------------------------------------------------------------------------------- /store/utils.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "encoding/binary" 5 | "encoding/json" 6 | "log" 7 | 8 | "github.com/hashicorp/raft" 9 | ) 10 | 11 | // Converts bytes to an integer 12 | func bytesToUint64(b []byte) uint64 { 13 | return binary.BigEndian.Uint64(b) 14 | } 15 | 16 | // Converts a uint to a byte slice 17 | func uint64ToBytes(u uint64) []byte { 18 | buf := make([]byte, 8) 19 | binary.BigEndian.PutUint64(buf, u) 20 | return buf 21 | } 22 | 23 | // raftLogToBytes converts a raft.Log object to []bytes using msgpack serialization 24 | func raftLogToBytes(l raft.Log) []byte { 25 | bytes, err := json.Marshal(l) 26 | if err != nil { 27 | log.Println(err) 28 | } 29 | 30 | return bytes 31 | } 32 | 33 | // bytesToRaftLog converts []byte to a raft.Log object using msgpack serialization 34 | // and writes it on that pointer 35 | func bytesToRaftLog(b []byte, raftLog *raft.Log) error { 36 | err := json.Unmarshal(b, raftLog) 37 | if err != nil { 38 | return err 39 | } 40 | 41 | return nil 42 | } 43 | -------------------------------------------------------------------------------- /cmd/ravel_cluster_admin/cluster_admin.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package RavelClusterAdminPB; 4 | 5 | option go_package = "./RavelClusterAdminPB"; 6 | 7 | // RavelClusterAdmin is the main service that is exposed to the nodes in a Ravel cluster 8 | service RavelClusterAdmin { 9 | rpc JoinExistingCluster(Node) returns (Cluster); 10 | rpc JoinAsClusterLeader(Node) returns (Cluster); 11 | rpc UpdateClusterLeader(Node) returns (Response); 12 | rpc LeaveCluster(Node) returns (Response); 13 | rpc GetClusterLeader(Cluster) returns (Node); 14 | rpc InitiateDataRelocation(Cluster) returns (Response); 15 | } 16 | 17 | // Cluster represents the information to represent a cluster leader in Ravel 18 | message Cluster { 19 | string cluster_id = 1; 20 | string leader_grpc_address = 2; 21 | string leader_raft_address = 3; 22 | } 23 | 24 | message Response { 25 | string data = 1; 26 | } 27 | 28 | // Node represents the information to represent a node in Ravel 29 | message Node { 30 | string node_id = 1; 31 | string grpc_address = 2; 32 | string raft_address = 3; 33 | string cluster_id = 4; 34 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Aditya Meharia and Junaid Rahim 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /fsm/fsm_test.go: -------------------------------------------------------------------------------- 1 | package fsm 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/hashicorp/raft" 8 | "github.com/vmihailenco/msgpack/v5" 9 | ) 10 | 11 | func TestSnapshot(t *testing.T) { 12 | f, err := NewFSM("/tmp/badger/test/fsm") 13 | if err != nil { 14 | t.Error("Error in newFSM") 15 | } 16 | 17 | snapshot, err := f.Snapshot() 18 | if err != nil { 19 | t.Error("Error in snapshot") 20 | } 21 | t.Log(snapshot) 22 | 23 | } 24 | 25 | func TestApplyAndGet(t *testing.T) { 26 | f, err := NewFSM("/tmp/badger/test/fsm") 27 | if err != nil { 28 | t.Error("Error in newFSM") 29 | } 30 | 31 | com := &LogData{ 32 | Operation: "set", 33 | Key: []byte("testKey"), 34 | Value: []byte("testValue"), 35 | } 36 | comByte, err := msgpack.Marshal(com) 37 | if err != nil { 38 | t.Error("Error marshalling logData") 39 | } 40 | 41 | l := raft.Log{ 42 | Index: 1, 43 | Term: 0, 44 | Type: raft.LogCommand, 45 | Data: comByte, 46 | AppendedAt: time.Now(), 47 | } 48 | 49 | e := f.Apply(&l) 50 | if e != nil { 51 | t.Error("Error in apply fsm") 52 | } 53 | 54 | _, err = f.Get([]byte("testKey")) 55 | if e != nil { 56 | t.Error("Error in getting key which has been set") 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | RED="\e[31m" 4 | YELLOW="\e[33m" 5 | BLUE="\e[34m" 6 | ENDCOLOR="\e[0m" 7 | 8 | echo -e "${RED}" 9 | cat << "EOF" 10 | 11 | | | 12 | _ __ __ ___ _____| | 13 | | '__/ _` \ \ / / _ \ | 14 | | | | (_| |\ V / __/ | 15 | |_| \__,_| \_/ \___|_| 16 | 17 | EOF 18 | echo -n -e "${ENDCOLOR}" 19 | echo "A fault-tolerant, sharded key-value store" 20 | echo "-----------------------------------------" 21 | echo "Downloading ravel_node and ravel_cluster_admin from github: " 22 | echo "" 23 | 24 | ravel_node_url=https://github.com/adityameharia/ravel/releases/download/v0.1-alpha/ravel_node 25 | ravel_cluster_admin_url=https://github.com/adityameharia/ravel/releases/download/v0.1-alpha/ravel_cluster_admin 26 | 27 | curl -LJO $ravel_node_url 28 | sudo mv ./ravel_node /usr/local/bin 29 | curl -LJO $ravel_cluster_admin_url && sudo mv ./ravel_cluster_admin /usr/local/bin 30 | chmod +x /usr/local/bin/ravel_node 31 | chmod +x /usr/local/bin/ravel_cluster_admin 32 | 33 | echo "" 34 | echo -e "${YELLOW}ravel_node and ravel_cluster_admin were downloaded and moved to /usr/local/bin${ENDCOLOR}" 35 | echo "" 36 | echo "You can now run the following commands:" 37 | echo -e "${BLUE}ravel_node --help${ENDCOLOR}" 38 | echo -e "${BLUE}ravel_cluster_admin --help${ENDCOLOR}" -------------------------------------------------------------------------------- /store/logstore_test.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "log" 5 | "testing" 6 | "time" 7 | 8 | "github.com/hashicorp/raft" 9 | ) 10 | 11 | func TestRavelLogStore_StoreLog(t *testing.T) { 12 | r, err := NewRavelLogStore("/tmp/badger/test/log") 13 | if err != nil { 14 | log.Println(err) 15 | } 16 | 17 | var logs []*raft.Log 18 | var i uint64 19 | for i = 0; i < 5; i++ { 20 | l := raft.Log{ 21 | Index: i, 22 | Term: 0, 23 | Type: raft.LogCommand, 24 | Data: []byte("Test Log Data"), 25 | AppendedAt: time.Now(), 26 | } 27 | 28 | logs = append(logs, &l) 29 | } 30 | 31 | err = r.StoreLogs(logs) 32 | if err != nil { 33 | t.Error("Error in StoreLog", err) 34 | } 35 | 36 | var l raft.Log 37 | err = r.GetLog(2, &l) 38 | if err != nil { 39 | t.Error("Error in GetLog", err) 40 | } 41 | 42 | if l.Index != 2 { 43 | t.Error("Error in GetLog, expected l.Index to be 2 got ", l.Index) 44 | } 45 | 46 | var fi uint64 47 | fi, err = r.FirstIndex() 48 | if err != nil { 49 | t.Error("Error in FirstIndex", err) 50 | } 51 | 52 | if fi != 0 { 53 | t.Error("Error in FirstIndex, expected 0 got ", fi) 54 | } 55 | 56 | var li uint64 57 | li, err = r.LastIndex() 58 | if err != nil { 59 | t.Error("Error in LastIndex", err) 60 | } 61 | 62 | if li != 4 { 63 | t.Error("Error in LastIndex, expected 4 got ", li) 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /store/stablestore_test.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestSet(t *testing.T) { 8 | s, err := NewRavelStableStore("/tmp/badger/test/stable") 9 | if err != nil { 10 | t.Error("Error in newstablestore") 11 | } 12 | 13 | err = s.Set([]byte("testKey"), []byte("testValue")) 14 | if err != nil { 15 | t.Error("Error in Set stable store") 16 | } 17 | 18 | } 19 | 20 | func TestSetUint64(t *testing.T) { 21 | s, err := NewRavelStableStore("/tmp/badger/test/stable") 22 | if err != nil { 23 | t.Error("Error in newstablestore") 24 | } 25 | 26 | err = s.SetUint64([]byte("testKey"), 1) 27 | if err != nil { 28 | t.Error("Error in SetUint64 stable store") 29 | } 30 | } 31 | 32 | func TestGet(t *testing.T) { 33 | s, err := NewRavelStableStore("/tmp/badger/test/stable") 34 | if err != nil { 35 | t.Error("Error in newstablestore") 36 | } 37 | 38 | err = s.Set([]byte("testKey"), []byte("testValue")) 39 | if err != nil { 40 | t.Error("Error in Set stable store") 41 | } 42 | 43 | val, err := s.Get([]byte("testKey")) 44 | if err != nil { 45 | t.Error("Error in Get stable store") 46 | } 47 | t.Logf(string(val)) 48 | } 49 | 50 | func TestGetUint64(t *testing.T) { 51 | s, err := NewRavelStableStore("/tmp/badger/test/stable") 52 | if err != nil { 53 | t.Error("Error in newstablestore") 54 | } 55 | 56 | err = s.SetUint64([]byte("testKey"), 234) 57 | if err != nil { 58 | t.Error("Error in Set stable store") 59 | } 60 | 61 | val, err := s.GetUint64([]byte("testKey")) 62 | if err != nil { 63 | t.Error("Error in Get stable store") 64 | } 65 | t.Log(val) 66 | } 67 | -------------------------------------------------------------------------------- /cmd/ravel_node/kill.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "log" 6 | "os" 7 | 8 | "github.com/adityameharia/ravel/RavelClusterAdminPB" 9 | "github.com/adityameharia/ravel/RavelNodePB" 10 | "google.golang.org/grpc" 11 | ) 12 | 13 | func killCluster(replicaCount int) { 14 | if replicaCount == 2 || replicaCount == -1 { 15 | 16 | adminConn, err := grpc.Dial(nodeConfig.AdminGRPCAddr, grpc.WithInsecure()) 17 | if err != nil { 18 | log.Fatal("Error in connecting to the Admin gRPC Server: ", err) 19 | } 20 | defer adminConn.Close() 21 | 22 | adminClient = RavelClusterAdminPB.NewRavelClusterAdminClient(adminConn) 23 | 24 | log.Println(nodeConfig) 25 | 26 | cluster := &RavelClusterAdminPB.Cluster{ClusterId: nodeConfig.ClusterID} 27 | leaderNode, err := adminClient.GetClusterLeader(context.Background(), cluster) 28 | if err != nil { 29 | log.Fatal(err) 30 | } 31 | 32 | err = RequestLeaveToClusterLeader(leaderNode.GrpcAddress, &RavelNodePB.Node{ 33 | NodeId: nodeConfig.NodeID, 34 | ClusterId: nodeConfig.ClusterID, 35 | GrpcAddress: nodeConfig.GRPCAddr, 36 | }) 37 | 38 | if err != nil { 39 | log.Println(err) 40 | } 41 | 42 | _, err = adminClient.LeaveCluster(context.TODO(), &RavelClusterAdminPB.Node{ 43 | NodeId: nodeConfig.NodeID, 44 | ClusterId: nodeConfig.ClusterID, 45 | GrpcAddress: nodeConfig.GRPCAddr, 46 | RaftAddress: nodeConfig.RaftInternalAddr, 47 | }) 48 | 49 | if err != nil { 50 | log.Println(err) 51 | } 52 | 53 | err = os.RemoveAll(nodeConfig.StorageDir) 54 | if err != nil { 55 | log.Fatal(err) 56 | } 57 | 58 | log.Println("Server successfully removed") 59 | 60 | os.Exit(1) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /store/README.md: -------------------------------------------------------------------------------- 1 | # Store 2 | 3 | The store package implements the [StableStore](https://pkg.go.dev/github.com/hashicorp/raft#StableStore) interface and 4 | the [LogStore](https://pkg.go.dev/github.com/hashicorp/raft#LogStore) 5 | which are required for constructing a [new raft node](https://pkg.go.dev/github.com/hashicorp/raft#NewRaft) 6 | 7 | These interfaces are used for storing and retrieving logs and other key configurations of the node. 8 | 9 | ## StableStore Interface 10 | 11 | StableStore is used to provide stable storage of key configurations to ensure safety.The Set/Get and SetUint64/GetUint64 12 | functions are used to set/get key-value pairs of type `[]byte` and `uint64` respectively. 13 | 14 | ## LogStore Interface 15 | 16 | The LogStore interface stores and retrieves the logs in a persistent manner. 17 | 18 | - The FirstIndex and LastIndex functions return the index property of the first and last log respectively. These 19 | functions are used to check whether the logs of the follower are consistent with that of the leader. 20 | 21 | - The GetLog function gets the log with the given index and writes it to the pointer of 22 | type [Log](https://pkg.go.dev/github.com/hashicorp/raft#Log) passed to it. 23 | 24 | - The StoreLog function is used to store a single Log to disk. It calls the StoreLogs function passing it an array of 25 | the given log. 26 | 27 | - The StoreLogs function is perhaps the most important function in this interface. It takes in an array of logs and 28 | actually persists that data onto disk using [BadgerDB](https://github.com/dgraph-io/badger). It is used by the new 29 | nodes to store all the logs it receives from the leader or by the existing nodes to store individual logs. -------------------------------------------------------------------------------- /db/transactions_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "log" 5 | "testing" 6 | 7 | "github.com/dgraph-io/badger/v3" 8 | ) 9 | 10 | var r RavelDatabase 11 | 12 | func Setup() { 13 | path := "/tmp/badger_test" 14 | 15 | err := r.Init(path) 16 | if err != nil { 17 | log.Println("Error in starting connection with Badger") 18 | log.Println(err) 19 | } 20 | 21 | err = r.Conn.Update(func(txn *badger.Txn) error { 22 | err := txn.Set([]byte("k1"), []byte("v1")) 23 | if err != nil { 24 | return err 25 | } 26 | return nil 27 | }) 28 | 29 | if err != nil { 30 | log.Println("Error in Setting up transaction_test.go") 31 | log.Println(err) 32 | } 33 | } 34 | 35 | func TestRead(t *testing.T) { 36 | Setup() 37 | defer r.Close() 38 | 39 | v, err := r.Read([]byte("k1")) 40 | if err != nil { 41 | t.Error("Error in Read", err) 42 | } 43 | 44 | if string(v) != "v1" { 45 | t.Error("Error in read value", err) 46 | } 47 | 48 | } 49 | 50 | func TestWrite(t *testing.T) { 51 | Setup() 52 | defer r.Close() 53 | 54 | err := r.Write([]byte("k2"), []byte("v2")) 55 | if err != nil { 56 | t.Error("Error in writing to Badger", err) 57 | } 58 | 59 | } 60 | 61 | func TestDelete(t *testing.T) { 62 | Setup() 63 | defer r.Close() 64 | 65 | err := r.Delete([]byte("k1")) 66 | if err != nil { 67 | t.Error("Error in deleting from Badger", err) 68 | } 69 | } 70 | 71 | func TestReadAndDelete(t *testing.T) { 72 | Setup() 73 | defer r.Close() 74 | err := r.Write([]byte("k2"), []byte("v2")) 75 | if err != nil { 76 | t.Error("Error in writing to Badger", err) 77 | } 78 | _, err = r.ReadAndDelete([]byte("k1")) 79 | if err != nil { 80 | t.Error("Error in deleting from Badger", err) 81 | } 82 | _, err = r.Read([]byte("k1")) 83 | if err != nil { 84 | t.Error("Error in deleting from Badger sfsdf", err) 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /db/transactions.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "github.com/dgraph-io/badger/v3" 5 | ) 6 | 7 | // Read returns the value with the corresponding key in the db 8 | func (r *RavelDatabase) Read(key []byte) ([]byte, error) { 9 | var value []byte 10 | 11 | err := r.Conn.View(func(txn *badger.Txn) error { 12 | item, err := txn.Get(key) 13 | if err != nil { 14 | return err 15 | } 16 | 17 | err = item.Value(func(val []byte) error { 18 | value = append([]byte{}, val...) 19 | return nil 20 | }) 21 | if err != nil { 22 | return err 23 | } 24 | 25 | return nil 26 | }) 27 | 28 | if err != nil { 29 | return nil, err 30 | } 31 | 32 | return value, nil 33 | } 34 | 35 | // Write writes the key and value to BadgerDB 36 | func (r *RavelDatabase) Write(key []byte, val []byte) error { 37 | err := r.Conn.Update(func(txn *badger.Txn) error { 38 | err := txn.Set(key, val) 39 | if err != nil { 40 | return err 41 | } 42 | return nil 43 | }) 44 | 45 | return err 46 | } 47 | 48 | // Delete deletes the key value pair with the corresponding key from BadgerDB 49 | func (r *RavelDatabase) Delete(key []byte) error { 50 | err := r.Conn.Update(func(txn *badger.Txn) error { 51 | err := txn.Delete(key) 52 | if err != nil { 53 | return err 54 | } 55 | return nil 56 | }) 57 | 58 | return err 59 | } 60 | 61 | func (r *RavelDatabase) ReadAndDelete(key []byte) ([]byte, error) { 62 | 63 | var value []byte 64 | 65 | err := r.Conn.Update(func(txn *badger.Txn) error { 66 | item, err := txn.Get(key) 67 | if err != nil { 68 | return err 69 | } 70 | err = item.Value(func(val []byte) error { 71 | value = append([]byte{}, val...) 72 | return nil 73 | }) 74 | if err != nil { 75 | return err 76 | } 77 | err = txn.Delete(key) 78 | if err != nil { 79 | return err 80 | } 81 | return nil 82 | }) 83 | 84 | if err != nil { 85 | return nil, err 86 | } 87 | 88 | return value, nil 89 | } 90 | -------------------------------------------------------------------------------- /store/stablestore.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "log" 5 | 6 | "github.com/adityameharia/ravel/db" 7 | "github.com/dgraph-io/badger/v3" 8 | ) 9 | 10 | // RavelStableStore implements the raft.StableStore interface. It stores the configuration for raft.Raft 11 | type RavelStableStore struct { 12 | Db *db.RavelDatabase 13 | } 14 | 15 | // NewRavelStableStore creates a new instance of RavelStableStore 16 | func NewRavelStableStore(stableStoreDBPath string) (*RavelStableStore, error) { 17 | var ravelDB db.RavelDatabase 18 | err := ravelDB.Init(stableStoreDBPath) 19 | if err != nil { 20 | log.Fatal("StableStore: Unable to setup new Stable Store") 21 | return nil, err 22 | } 23 | 24 | log.Println("StableStore: Initialised Stable Store") 25 | 26 | return &RavelStableStore{ 27 | Db: &ravelDB, 28 | }, nil 29 | } 30 | 31 | // Set stores Key configuration in a stable manner. 32 | func (s *RavelStableStore) Set(key []byte, val []byte) error { 33 | return s.Db.Write(key, val) 34 | } 35 | 36 | // Get returns the value for the provided key 37 | func (s *RavelStableStore) Get(key []byte) ([]byte, error) { 38 | val, err := s.Db.Read(key) 39 | if err != nil { 40 | if err.Error() == badger.ErrKeyNotFound.Error() { 41 | val = []byte{} 42 | return val, nil 43 | } else { 44 | log.Fatalln("StableStore: Error retrieving key from db") 45 | } 46 | } 47 | 48 | return val, nil 49 | } 50 | 51 | // SetUint64 sets val as uint64 for the provided key 52 | func (s *RavelStableStore) SetUint64(key []byte, val uint64) error { 53 | return s.Db.Write(key, uint64ToBytes(val)) 54 | } 55 | 56 | // GetUint64 returns the value for the given key 57 | func (s *RavelStableStore) GetUint64(key []byte) (uint64, error) { 58 | valBytes, err := s.Db.Read(key) 59 | 60 | var valUInt uint64 61 | if err != nil { 62 | if err.Error() == badger.ErrKeyNotFound.Error() { 63 | valUInt = 0 64 | return valUInt, nil 65 | } else { 66 | log.Fatalln("StableStore: Error retrieving key from db") 67 | } 68 | } 69 | 70 | valUInt = bytesToUint64(valBytes) 71 | return valUInt, nil 72 | } 73 | -------------------------------------------------------------------------------- /node/join_leave.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "errors" 5 | "log" 6 | 7 | "github.com/hashicorp/raft" 8 | ) 9 | 10 | 11 | // Join will join the node available on raftAddr with ID nodeID to this node if its a leader 12 | func (n *RavelNode) Join(nodeID, raftAddr string) error { 13 | log.Printf("received join request for remote node %s, raftAddr %s\n", nodeID, raftAddr) 14 | if n.Raft.State() != raft.Leader { 15 | return errors.New("node is not leader") 16 | } 17 | config := n.Raft.GetConfiguration() 18 | if err := config.Error(); err != nil { 19 | log.Printf("failed to get raft configuration\n") 20 | return err 21 | } 22 | for _, server := range config.Configuration().Servers { 23 | if server.ID == raft.ServerID(nodeID) { 24 | log.Printf("node %s already joined raft cluster\n", nodeID) 25 | return errors.New("node already exists") 26 | } 27 | } 28 | 29 | f := n.Raft.AddVoter(raft.ServerID(nodeID), raft.ServerAddress(raftAddr), 0, 0) 30 | if err := f.Error(); err != nil { 31 | return err 32 | } 33 | log.Printf("node %s at %s joined successfully\n", nodeID, raftAddr) 34 | return nil 35 | } 36 | 37 | // Leave removes the node with nodeID from this leader 38 | func (n *RavelNode) Leave(nodeID string) error { 39 | log.Printf("received leave request for remote node %s", nodeID) 40 | if n.Raft.State() != raft.Leader { 41 | return errors.New("node is not leader") 42 | } 43 | 44 | config := n.Raft.GetConfiguration() 45 | 46 | if err := config.Error(); err != nil { 47 | log.Printf("failed to get raft configuration\n") 48 | return err 49 | } 50 | 51 | for _, server := range config.Configuration().Servers { 52 | if server.ID == raft.ServerID(nodeID) { 53 | f := n.Raft.RemoveServer(server.ID, 0, 0) 54 | if err := f.Error(); err != nil { 55 | log.Printf("failed to remove server %s\n", nodeID) 56 | return err 57 | } 58 | 59 | log.Printf("node %s left successfully\n", nodeID) 60 | return nil 61 | } 62 | } 63 | 64 | log.Printf("node %s not exist in raft group\n", nodeID) 65 | return errors.New("Node doesnt exist in the cluster") 66 | } 67 | -------------------------------------------------------------------------------- /node_server/server.go: -------------------------------------------------------------------------------- 1 | package node_server 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | 7 | "github.com/adityameharia/ravel/RavelNodePB" 8 | "github.com/adityameharia/ravel/node" 9 | "github.com/hashicorp/raft" 10 | ) 11 | 12 | // Server implements the methods exposed via gRPC for a RavelNode 13 | type Server struct { 14 | Node *node.RavelNode 15 | } 16 | 17 | // Join joins the passed in node to this node 18 | func (s *Server) Join(ctx context.Context, req *RavelNodePB.Node) (*RavelNodePB.Void, error) { 19 | err := s.Node.Join(req.NodeId, req.RaftAddress) 20 | if err != nil { 21 | return nil, err 22 | } 23 | 24 | return &RavelNodePB.Void{}, nil 25 | } 26 | 27 | // Leave removes the passed in node from this leader 28 | func (s *Server) Leave(ctx context.Context, req *RavelNodePB.Node) (*RavelNodePB.Void, error) { 29 | err := s.Node.Leave(req.NodeId) 30 | if err != nil { 31 | return nil, err 32 | } 33 | 34 | return &RavelNodePB.Void{}, nil 35 | } 36 | 37 | // IsLeader returns a boolean if this node is a leader or not 38 | func (s *Server) IsLeader(ctx context.Context, v *RavelNodePB.Void) (*RavelNodePB.Boolean, error) { 39 | if s.Node.Raft.State() != raft.Leader { 40 | return &RavelNodePB.Boolean{Leader: false}, nil 41 | } 42 | return &RavelNodePB.Boolean{Leader: true}, nil 43 | 44 | } 45 | 46 | // Run executes the operation specified in "req", it can be {get, set, delete, getAndDelete} 47 | func (s *Server) Run(ctx context.Context, req *RavelNodePB.Command) (*RavelNodePB.Response, error) { 48 | switch req.Operation { 49 | case "get": 50 | val, err := s.Node.Get(req.Key) 51 | if err != nil { 52 | return nil, err 53 | } 54 | return &RavelNodePB.Response{ 55 | Msg: "get successful", 56 | Data: val}, nil 57 | case "set": 58 | err := s.Node.Set(req.Key, req.Value) 59 | if err != nil { 60 | return nil, err 61 | } 62 | return &RavelNodePB.Response{Msg: "set successful", Data: []byte{}}, nil 63 | case "delete": 64 | err := s.Node.Delete(req.Key) 65 | if err != nil { 66 | return nil, err 67 | } 68 | return &RavelNodePB.Response{Msg: "delete successful", Data: []byte{}}, nil 69 | case "getAndDelete": 70 | val, err := s.Node.GetAndDelete(req.Key) 71 | if err != nil { 72 | return nil, err 73 | } 74 | return &RavelNodePB.Response{ 75 | Msg: "get and delete successful", 76 | Data: val}, nil 77 | default: 78 | return nil, errors.New("invalid operation") 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /cmd/ravel_cluster_admin/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "net" 6 | "os" 7 | "sync" 8 | 9 | "github.com/adityameharia/ravel/RavelClusterAdminPB" 10 | "github.com/urfave/cli" 11 | ) 12 | 13 | var RavelClusterAdminGRPCAddr string 14 | var RavelClusterAdminHTTPAddr string 15 | var RavelClusterAdminBackupPath string 16 | 17 | var consistentHash RavelConsistentHash 18 | var clusterAdminGRPCServer *ClusterAdminGRPCServer 19 | var clusterAdminHTTPServer *ClusterAdminHTTPServer 20 | 21 | func startAdminGRPCServer() { 22 | log.Println("Starting Ravel Cluster Admin gRPC Server on", RavelClusterAdminGRPCAddr) 23 | listener, err := net.Listen("tcp", RavelClusterAdminGRPCAddr) 24 | if err != nil { 25 | log.Fatalf("Error in starting Ravel Cluster Admin TCP Listener: %v\n", err) 26 | } 27 | 28 | clusterAdminGRPCServer = NewClusterAdminGRPCServer() 29 | RavelClusterAdminPB.RegisterRavelClusterAdminServer(clusterAdminGRPCServer.Server, clusterAdminGRPCServer) 30 | err = clusterAdminGRPCServer.Server.Serve(listener) 31 | } 32 | 33 | func startAdminHTTPServer() { 34 | log.Println("Starting Ravel Cluster Admin HTTP Server on", RavelClusterAdminHTTPAddr) 35 | clusterAdminHTTPServer = NewClusterAdminHTTPServer() 36 | clusterAdminHTTPServer.Router.Run(RavelClusterAdminHTTPAddr) 37 | } 38 | 39 | func main() { 40 | app := cli.NewApp() 41 | app.Name = "Ravel Cluster Admin" 42 | app.Usage = "Start a Ravel Cluster Admin server" 43 | app.Flags = []cli.Flag{ 44 | cli.StringFlag{ 45 | Name: "http", 46 | Required: true, 47 | Usage: "Address (with port) on which the HTTP server should listen", 48 | Destination: &RavelClusterAdminHTTPAddr, 49 | }, 50 | cli.StringFlag{ 51 | Name: "grpc", 52 | Required: true, 53 | Usage: "Address (with port) on which the gRPC server should listen", 54 | Destination: &RavelClusterAdminGRPCAddr, 55 | }, 56 | cli.StringFlag{ 57 | Name: "backupPath", 58 | Required: true, 59 | Usage: "Path where the Cluster Admin should persist its state on disk", 60 | Destination: &RavelClusterAdminBackupPath, 61 | }, 62 | } 63 | 64 | app.Action = func(c *cli.Context) { 65 | consistentHash.Init(271, 40, 1.2) 66 | go startAdminGRPCServer() 67 | go startAdminHTTPServer() 68 | 69 | wg := sync.WaitGroup{} 70 | wg.Add(1) 71 | wg.Wait() 72 | } 73 | 74 | err := app.Run(os.Args) 75 | if err != nil { 76 | log.Fatalf(err.Error()) 77 | } 78 | 79 | } 80 | -------------------------------------------------------------------------------- /cmd/ravel_node/helper.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "log" 6 | 7 | "github.com/adityameharia/ravel/RavelClusterAdminPB" 8 | "github.com/adityameharia/ravel/RavelNodePB" 9 | "google.golang.org/grpc" 10 | ) 11 | 12 | // RequestJoinToClusterLeader makes a new gRPC client and sends a join request to the leading replica in the cluster 13 | func RequestJoinToClusterLeader(leaderGRPCAddr string, node *RavelNodePB.Node) error { 14 | conn, err := grpc.Dial(leaderGRPCAddr, grpc.WithInsecure()) 15 | if err != nil { 16 | log.Fatalf("can not connect with server %v", err) 17 | return err 18 | } 19 | 20 | defer conn.Close() 21 | 22 | client := RavelNodePB.NewRavelNodeClient(conn) 23 | _, err = client.Join(context.Background(), node) 24 | 25 | if err != nil && err.Error() == "rpc error: code = Unknown desc = node already exists" { 26 | return nil 27 | } else if err != nil { 28 | log.Println(err.Error()) 29 | log.Fatalf("join request falied with server %v", err) 30 | return err 31 | } 32 | 33 | return nil 34 | } 35 | 36 | // RequestLeaveToClusterLeader makes a new gRPC client and sends a leave request to the leading replica in the cluster 37 | func RequestLeaveToClusterLeader(leaderGRPCAddr string, node *RavelNodePB.Node) error { 38 | conn, err := grpc.Dial(leaderGRPCAddr, grpc.WithInsecure()) 39 | if err != nil { 40 | log.Fatalf("Error in RequestLeaveToClusterLeader: %v", err) 41 | return err 42 | } 43 | defer conn.Close() 44 | client := RavelNodePB.NewRavelNodeClient(conn) 45 | 46 | _, err = client.Leave(context.Background(), node) 47 | if err != nil { 48 | log.Printf("leave request failed: %v", err) 49 | return err 50 | } 51 | 52 | return nil 53 | } 54 | 55 | // RequestLeaderUpdateToCluster makes a new gRPC client and 56 | // updates the admin server in case there is a change in leader in its cluster 57 | func RequestLeaderUpdateToCluster(clusterAdminGRPCAddr string, node *RavelClusterAdminPB.Node) error { 58 | conn, err := grpc.Dial(clusterAdminGRPCAddr, grpc.WithInsecure()) 59 | if err != nil { 60 | log.Fatalf("Error in RequestLeaderUpdateToCluster: %v", err) 61 | return err 62 | } 63 | defer conn.Close() 64 | client := RavelClusterAdminPB.NewRavelClusterAdminClient(conn) 65 | resp, err := client.UpdateClusterLeader(context.TODO(), node) 66 | if err != nil { 67 | log.Fatalf("Error in RequestLeaderUpdateToCluster: %v", err) 68 | return err 69 | } 70 | 71 | log.Println(resp.Data) 72 | return nil 73 | } 74 | -------------------------------------------------------------------------------- /fsm/README.md: -------------------------------------------------------------------------------- 1 | # fsm 2 | 3 | > FSM stands for Finite State Machine 4 | 5 | The fsm package contains the `RavelFSM` and `Snapshot` structs that implements 6 | the [FSM](https://pkg.go.dev/github.com/hashicorp/raft#FSM) interface and 7 | the [FSMSnapshot](https://pkg.go.dev/github.com/hashicorp/raft#FSMSnapshot) interface respectively which are required 8 | for constructing a [new raft node](https://pkg.go.dev/github.com/hashicorp/raft#NewRaft) in 9 | the [hashicorp/raft](https://pkg.go.dev/github.com/hashicorp/raft) library 10 | 11 | These interfaces are responsible for actually "applying" the log entries to our BadgerDB instance in a persistent 12 | manner. 13 | 14 | ## RavelFSM 15 | 16 | This struct implements the [FSM](https://pkg.go.dev/github.com/hashicorp/raft#FSM) interface. This interface makes use 17 | of the replicated log to "apply" logs, take snapshots and "restore" from snapshots. 18 | 19 | - The `Apply` function is invoked once a log entry is committed and is responsible for storing the data to the BadgerDB 20 | instance. It checks the type of operation it is required to perform and then calls the corresponding function from 21 | the `db` package. 22 | 23 | - The `Snapshot` function is used for log compaction. Its returns a `Snapshot` object (a struct which implements the 24 | FSMSnapshot interface) which is used to save a snapshot of the FSM at that point in time i.e. its takes the state of 25 | the DB and creates a copy of the state so that previous logs can be deleted. 26 | 27 | - The `Restore` function is used to restore an FSM from a snapshot i.e. restore the state of the DB to when the snapshot 28 | was taken thereby discarding all the previous states. This can be done very easily using BadgerDB's inbuilt functions, 29 | first we drop all the current keys using the DropAll function and then call the Load function to restore the snapshot 30 | from backup. 31 | 32 | ## Snapshot 33 | 34 | This struct implements the [FSMSnapshot](https://pkg.go.dev/github.com/hashicorp/raft#FSMSnapshot) interface. `Snapshot` 35 | is returned by an FSM in response to a Snapshot call. This interface is responsible for dumping the current state of the 36 | DB i.e. snapshot of the FSM to the [WriteCloser](https://pkg.go.dev/github.com/hashicorp/raft#SnapshotSink) 37 | sink which stored by the raft lib. 38 | 39 | - `Persist` is the main function which dumps the snapshot to the sink. We do this by simply taking a backup of our db 40 | and writing it into the sink. 41 | 42 | - `Release` is called when we are finished with the snapshot and all the data has been safely dumped. 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /cmd/ravel_cluster_admin/consistent_hash_backup.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "github.com/adityameharia/ravel/db" 6 | "github.com/dgraph-io/badger/v3" 7 | "log" 8 | ) 9 | 10 | // ReadPartitionOwnersFromDisk reads the RavelConsistentHash.PartitionOwners map from the disk 11 | func ReadPartitionOwnersFromDisk(badgerPath string) (map[uint64]clusterID, error) { 12 | log.Printf("Reading PartitionOwners from path: %v\n", badgerPath) 13 | var backupDB db.RavelDatabase 14 | defer backupDB.Close() 15 | err := backupDB.Init(badgerPath + "/partition_owners") 16 | if err != nil { 17 | return nil, err 18 | } 19 | 20 | partitionOwnerMap := make(map[uint64]clusterID) 21 | err = backupDB.Conn.View(func(txn *badger.Txn) error { 22 | it := txn.NewIterator(badger.DefaultIteratorOptions) 23 | defer it.Close() 24 | 25 | for it.Rewind(); it.Valid(); it.Next() { 26 | item := it.Item() 27 | 28 | err = item.Value(func(val []byte) error { 29 | partitionOwnerMap[bytesToUint64(item.Key())] = clusterID(string(val)) 30 | return nil 31 | }) 32 | 33 | if err != nil { 34 | return err 35 | } 36 | } 37 | 38 | return nil 39 | }) 40 | 41 | if err != nil { 42 | return nil, err 43 | } 44 | return partitionOwnerMap, nil 45 | } 46 | 47 | // ReadPartitionKeyMapFromDisk reads the RavelConsistentHash.PartitionKeyMap from disk 48 | func ReadPartitionKeyMapFromDisk(badgerPath string) (map[uint64]keySet, error) { 49 | log.Printf("Reading PartitionKeyMap from path: %v\n", badgerPath) 50 | var backupDB db.RavelDatabase 51 | defer backupDB.Close() 52 | err := backupDB.Init(badgerPath + "/partition_keymap") 53 | if err != nil { 54 | return nil, err 55 | } 56 | 57 | partitionKeyMap := make(map[uint64]keySet) 58 | err = backupDB.Conn.View(func(txn *badger.Txn) error { 59 | it := txn.NewIterator(badger.DefaultIteratorOptions) 60 | defer it.Close() 61 | 62 | type kSetJSON struct { 63 | Keys []string `json:"keys"` 64 | } 65 | 66 | for it.Rewind(); it.Valid(); it.Next() { 67 | item := it.Item() 68 | err = item.Value(func(val []byte) error { 69 | var k kSetJSON 70 | err := json.Unmarshal(val, &k) 71 | if err != nil { 72 | return err 73 | } 74 | 75 | kSet := newKeySet() 76 | for _, key := range k.Keys { 77 | kSet.Insert([]byte(key)) 78 | } 79 | 80 | partitionKeyMap[bytesToUint64(item.Key())] = kSet 81 | return nil 82 | }) 83 | 84 | if err != nil { 85 | return err 86 | } 87 | } 88 | 89 | return nil 90 | }) 91 | 92 | if err != nil { 93 | return nil, err 94 | } 95 | return partitionKeyMap, nil 96 | } 97 | -------------------------------------------------------------------------------- /cmd/ravel_cluster_admin/server.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | //import ( 4 | // "context" 5 | // "errors" 6 | // "log" 7 | // 8 | // "github.com/adityameharia/ravel/RavelClusterAdminPB" 9 | // "github.com/adityameharia/ravel/RavelClusterPB" 10 | // "google.golang.org/grpc" 11 | //) 12 | // 13 | //func (s *server) GetLeader(ctx context.Context, clusterID *RavelClusterAdminPB.Cluster) (*RavelClusterAdminPB.Response, error) { 14 | // mu.Lock() 15 | // defer mu.Unlock() 16 | // if len(serverList[clusterID.ClusterID]) == 0 { 17 | // return &RavelClusterAdminPB.Response{Data: ""}, nil 18 | // } 19 | // 20 | // conn, err := grpc.Dial(leader[clusterID.ClusterID].gRPCAddress, grpc.WithInsecure()) 21 | // if err != nil { 22 | // log.Printf("cannot connect with server %v", err) 23 | // } 24 | // 25 | // v := &RavelClusterPB.Void{} 26 | // client := RavelClusterPB.NewRavelClusterClient(conn) 27 | // res, err := client.IsLeader(context.Background(), v) 28 | // if err != nil { 29 | // log.Printf("Is Leader request falied with server %v", err) 30 | // } 31 | // 32 | // if res.Leader == true { 33 | // return &RavelClusterAdminPB.Response{Data: leader[clusterID.ClusterID].gRPCAddress}, nil 34 | // } else { 35 | // for _, rep := range serverList[clusterID.ClusterID] { 36 | // 37 | // conn, err = grpc.Dial(rep.gRPCAddress, grpc.WithInsecure()) 38 | // if err != nil { 39 | // log.Printf("cannot connect with server %v", err) 40 | // } 41 | // 42 | // v := &RavelClusterPB.Void{} 43 | // client = RavelClusterPB.NewRavelClusterClient(conn) 44 | // res, err = client.IsLeader(context.Background(), v) 45 | // if err != nil { 46 | // log.Printf("Is Leader request falied with server %v", err) 47 | // } 48 | // 49 | // if res.Leader == true { 50 | // return &RavelClusterAdminPB.Response{Data: rep.gRPCAddress}, nil 51 | // } 52 | // } 53 | // 54 | // return nil, errors.New("No leader found") 55 | // } 56 | //} 57 | // 58 | //func (s *server) AddToReplicaMap(ctx context.Context, clusterID *RavelClusterAdminPB.Node) (*RavelClusterAdminPB.Void, error) { 59 | // rep := Replica{ 60 | // NodeID: clusterID.NodeID, 61 | // gRPCAddress: clusterID.GRPCaddress, 62 | // } 63 | // serverList[clusterID.ClusterID] = append(serverList[clusterID.ClusterID], rep) 64 | // return &RavelClusterAdminPB.Void{}, nil 65 | //} 66 | // 67 | //func (s *server) RemoveReplicaFromMap(ctx context.Context, clusterID *RavelClusterAdminPB.Node) (*RavelClusterAdminPB.Void, error) { 68 | // for i, r := range serverList[clusterID.ClusterID] { 69 | // if r.NodeID == clusterID.NodeID { 70 | // serverList[clusterID.ClusterID] = RemoveIndex(serverList[clusterID.ClusterID], i) 71 | // return &RavelClusterAdminPB.Void{}, nil 72 | // } 73 | // } 74 | // return nil, errors.New("Replica not found in the server list") 75 | //} 76 | // 77 | //func RemoveIndex(sl []Replica, index int) []Replica { 78 | // return append(sl[:index], sl[index+1:]...) 79 | //} 80 | -------------------------------------------------------------------------------- /cmd/ravel_node/README.md: -------------------------------------------------------------------------------- 1 | # Ravel Node 2 | 3 | The `ravel_node` package implements the replicas which eventually form a cluster. It is responsible for storing the 4 | Key-Value pairs in a persistent manner and communicates with other servers in the cluster for fault tolerance. 5 | 6 | Since we use the [Raft consensus algorithm](https://raft.github.io/) for fault tolerance each replica server can be the 7 | leader or follower i.e. if a leader is down the follower can become the leader and handle requests from the admin 8 | servers. This server communicates only with the admin servers and other replicas in its cluster. 9 | 10 | - It has a gRPC server to talk the other replicas and the admin server. 11 | - If it's a follower 12 | + It coordinates with the leader for log replication and snapshots. 13 | - If it's a leader 14 | + Send heartbeats to all its followers to prevent re-election. 15 | + Handles post, get and delete request from the admin server. 16 | + Handles new replicas joining and leaving the cluster. 17 | 18 | - It is also a gRPC client 19 | + It sends heartbeats to all its followers to prevent re-election. 20 | + It is responsible for sending out append entries to its followers. 21 | + It informs the admin in case there is a change in leadership. 22 | 23 | ## Data flow for a POST/DELETE request from admin 24 | 25 | - The leader receives the request from the admin and sends out append entries to its followers. 26 | - On receiving a response from majority of its followers it sends out an "apply" message to its followers to actually 27 | execute the request. 28 | - If the majority of the followers respond without any errors, the leader sends an "ok" message to the admin 29 | 30 | ## Data flow for a GET request from admin 31 | 32 | - A get request is pretty simple to handle and doesn't involve any followers 33 | - The leader receives the request from admin and find the key in the disc and responds with the Key-Value. 34 | 35 | ## Data flow for adding new replicas 36 | 37 | - When a new replica is bootstrapped, it sends a request to the admin,which adds the server to its list of all servers in 38 | the system and responds with the cluster the replica has to join and the address of its leader node. 39 | - The replica then sends a join request to the leader node. 40 | - On successfully joining the cluster the leader sends its latest snapshots and log entries to the node for replication. 41 | 42 | ## Data flow for removing a replica 43 | 44 | - When a replica is taken down, it sends a request to the admin to get the gRPC address of its current leader. 45 | - The admin removes it from the list of all servers in the system and responds with the address of the leader. 46 | - The replica then sends a leave request to the leader for successful removal. 47 | - If the replica suffers a failure it abruptly leaves the cluster and can easily join back the cluster 48 | 49 | 50 | -------------------------------------------------------------------------------- /fsm/fsm.go: -------------------------------------------------------------------------------- 1 | package fsm 2 | 3 | import ( 4 | "encoding/json" 5 | "io" 6 | "log" 7 | 8 | "github.com/adityameharia/ravel/db" 9 | "github.com/hashicorp/raft" 10 | ) 11 | 12 | // RavelFSM implements the raft.FSM interface. It represents the Finite State Machine in a RavelNode. The individual 13 | // logs are "applied" on the FSM on receiving the commit RPC from the leader. 14 | type RavelFSM struct { 15 | Db *db.RavelDatabase 16 | } 17 | 18 | // LogData represents the structure of individual commands on the Logs 19 | type LogData struct { 20 | Operation string `json:"Operation"` 21 | Key []byte `json:"Key"` 22 | Value []byte `json:"Value"` 23 | } 24 | 25 | // NewFSM creates an instance of RavelFSM 26 | func NewFSM(path string) (*RavelFSM, error) { 27 | var r db.RavelDatabase 28 | err := r.Init(path) 29 | if err != nil { 30 | log.Fatal("FSM: Unable to Setup Database") 31 | return nil, err 32 | } 33 | 34 | log.Println("FSM: Initialised FSM") 35 | 36 | return &RavelFSM{ 37 | Db: &r, 38 | }, nil 39 | } 40 | 41 | // Get returns the value for the provided key 42 | func (f *RavelFSM) Get(key []byte) ([]byte, error) { 43 | log.Println("FSM: Get") 44 | v, err := f.Db.Read(key) 45 | if err != nil { 46 | return []byte{}, err 47 | } 48 | return v, nil 49 | } 50 | 51 | // Get returns the value for the provided key 52 | func (f *RavelFSM) GetAndDelete(key []byte) ([]byte, error) { 53 | log.Println("FSM: Get") 54 | v, err := f.Db.ReadAndDelete(key) 55 | if err != nil { 56 | return []byte{}, err 57 | } 58 | return v, nil 59 | } 60 | 61 | // Snapshot returns an raft.FSMSnapshot which captures a snapshot of the data at that moment in time 62 | func (f *RavelFSM) Snapshot() (raft.FSMSnapshot, error) { 63 | log.Println("FSM: Snapshot") 64 | return &Snapshot{ 65 | Db: f.Db, 66 | }, nil 67 | } 68 | 69 | // Apply commits the given log to the database. 70 | func (f *RavelFSM) Apply(l *raft.Log) interface{} { 71 | log.Println("FSM: Apply") 72 | var d LogData 73 | 74 | err := json.Unmarshal(l.Data, &d) 75 | if err != nil { 76 | log.Fatal("FSM: Unable to get data from log") 77 | } 78 | 79 | if d.Operation == "set" { 80 | return f.Db.Write(d.Key, d.Value) 81 | } else { 82 | return f.Db.Delete(d.Key) 83 | } 84 | 85 | } 86 | 87 | // Restore restores from the data from the last captured snapshot 88 | func (f *RavelFSM) Restore(r io.ReadCloser) error { 89 | log.Println("FSM: Restore") 90 | err := f.Db.Conn.DropAll() 91 | if err != nil { 92 | log.Fatal("FSM: Unable to delete previous state") 93 | return err 94 | } 95 | err = f.Db.Conn.Load(r, 100) 96 | if err != nil { 97 | log.Fatal("FSM: Unable to restore Snapshot") 98 | return err 99 | } 100 | return nil 101 | } 102 | 103 | // Close will close the connection to the internal db.RavelDatabase instance 104 | func (f *RavelFSM) Close() { 105 | log.Println("FSM: Close") 106 | f.Db.Close() 107 | } 108 | -------------------------------------------------------------------------------- /cmd/ravel_cluster_admin/README.md: -------------------------------------------------------------------------------- 1 | # Ravel Cluster Admin 2 | 3 | The `ravel_cluster_admin` package implements the cluster admin server which is responsible for handling the http 4 | requests from the client for R/W and gRPC server to talk to the nodes in the clusters. It has the following components: 5 | 6 | - A gRPC server to talk to nodes 7 | - Handles new nodes joining in the cluster 8 | - It chooses and assigns a replica to the correct cluster 9 | - Manages the list of the all the nodes in the system and leader of each cluster. 10 | - An HTTP server that the client can talk to for reading and writing key value pairs. 11 | 12 | The cluster admin server has the logic to shard the keys & values using consistent hashing and then distributing them 13 | across clusters. It keeps a track of partition id's assigned to the various members and of which keys go into which 14 | partition 15 | 16 | ## Data Flow for POST request from client 17 | 18 | + The http server receives the key value from the client through a post request. 19 | + Then it checks whether any clusters are available. 20 | + The key is then passed through hash functions to get the partition on which it will be stored. 21 | + The value is then converted into a byte array then a gRPC request is sent to the cluster leader. 22 | + The key along with the data type of the value is then stored in a map for future reference. 23 | 24 | ## Data Flow for GET request from client 25 | 26 | + The http server receives the key from the client through a get request 27 | + It passes the key through the hash functions to get the cluster on which the key is stored 28 | + A gRPC request is sent to the cluster leader to retrieve the value as byte array. 29 | + The value is then converted to its original data type using the map which stored the key and its data type and then 30 | the response is sent to the client. 31 | 32 | ## Data flow on adding new clusters/replicas 33 | 34 | - When you add a new replica, it is added to the cluster with the least number of replicas. The gRPC server responds 35 | with the required info for the replica node to join into the designated cluster. 36 | - When a node is started as the leader to a new cluster, the gRPC server responds the new cluster info. 37 | - Once the leader node for the newly created cluster has properly started up, it requests the gRPC server on the 38 | admin for data relocation. 39 | - As adding a new cluster changes the configuration of the hash ring in consistent hashing, the location at which 40 | the existing keys are hashed at also changes. So the keys that will now get hashed to this new cluster are 41 | relocated there. 42 | - This hashing is done using [consistent hashing with bounded loads](https://arxiv.org/abs/1608.01350) to ensure that 43 | every cluster gets an even distribution of load with minimal relocation. 44 | - Data relocation also takes place when clusters are removed. 45 | 46 | `protoc` command to compile the protocol buffers file 47 | ```shell 48 | protoc --go_out=. \ 49 | --go-grpc_out=require_unimplemented_servers=false:. \ 50 | cmd/ravel_cluster_admin/cluster_admin.proto 51 | ``` -------------------------------------------------------------------------------- /store/logstore.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "log" 5 | 6 | "github.com/adityameharia/ravel/db" 7 | "github.com/dgraph-io/badger/v3" 8 | "github.com/hashicorp/raft" 9 | ) 10 | 11 | // RavelLogStore implements raft.LogStore interface. The functions define the operations possible on the Logs which is 12 | // maintained by every instance of raft.Raft 13 | type RavelLogStore struct { 14 | Db *db.RavelDatabase 15 | } 16 | 17 | // NewRavelLogStore creates a new instance of RavelLogStore, logDBPath specifies the directory path to 18 | // initialise the internal db.RavelDatabase instance. An entry in the Logs is of type raft.Log 19 | func NewRavelLogStore(logDBPath string) (*RavelLogStore, error) { 20 | var ravelDB db.RavelDatabase 21 | err := ravelDB.Init(logDBPath) 22 | if err != nil { 23 | log.Fatalf("NewRavelLogStore: %v\n", err) 24 | return nil, err 25 | } 26 | 27 | log.Println("LogStore: Initialised Log Store") 28 | return &RavelLogStore{ 29 | Db: &ravelDB, 30 | }, nil 31 | } 32 | 33 | // FirstIndex returns the Index property of the first entry in the Logs. 34 | func (r *RavelLogStore) FirstIndex() (uint64, error) { 35 | var key uint64 36 | err := r.Db.Conn.View(func(txn *badger.Txn) error { 37 | opts := badger.DefaultIteratorOptions 38 | it := txn.NewIterator(opts) 39 | defer it.Close() 40 | 41 | it.Rewind() 42 | if it.Valid() { 43 | firstKey := it.Item().Key() 44 | key = bytesToUint64(firstKey) 45 | } else { 46 | key = 0 47 | } 48 | 49 | return nil 50 | }) 51 | 52 | if err != nil { 53 | log.Fatalf("RavelLogStore.FirstIndex: %v\n", err) 54 | return 0, err 55 | } 56 | 57 | return key, nil 58 | } 59 | 60 | // LastIndex returns the Index property of the last entry in the Logs 61 | func (r *RavelLogStore) LastIndex() (uint64, error) { 62 | var key uint64 63 | err := r.Db.Conn.View(func(txn *badger.Txn) error { 64 | opts := badger.DefaultIteratorOptions 65 | opts.Reverse = true 66 | it := txn.NewIterator(opts) 67 | defer it.Close() 68 | 69 | it.Rewind() 70 | if it.Valid() { 71 | firstKey := it.Item().Key() 72 | key = bytesToUint64(firstKey) 73 | } else { 74 | key = 0 75 | } 76 | 77 | return nil 78 | }) 79 | 80 | if err != nil { 81 | log.Fatalf("RavelLogStore.LastIndex: %v\n", err) 82 | return 0, err 83 | } 84 | return key, nil 85 | } 86 | 87 | // GetLog writes the log on position "index" to the pointer "raftLog" 88 | func (r *RavelLogStore) GetLog(index uint64, raftLog *raft.Log) error { 89 | key := uint64ToBytes(index) 90 | val, err := r.Db.Read(key) 91 | if err != nil { 92 | log.Printf("RavelLogStore.GetLog: %v\n", err) 93 | return err 94 | } 95 | 96 | return bytesToRaftLog(val, raftLog) 97 | } 98 | 99 | // StoreLog inserts a single raft.Log at the end of the Logs 100 | func (r *RavelLogStore) StoreLog(l *raft.Log) error { 101 | return r.StoreLogs([]*raft.Log{l}) 102 | } 103 | 104 | // StoreLogs inserts []raft.Log at the end of the Logs 105 | func (r *RavelLogStore) StoreLogs(logs []*raft.Log) error { 106 | for _, l := range logs { 107 | key := uint64ToBytes(l.Index) 108 | val := raftLogToBytes(*l) 109 | 110 | err := r.Db.Write(key, val) 111 | if err != nil { 112 | log.Fatalf("RavelLogStore.StoreLogs: %v\n", err) 113 | return err 114 | } 115 | } 116 | 117 | return nil 118 | } 119 | 120 | // DeleteRange deletes the entries from "min" to "max" position (both inclusive) in the Logs 121 | func (r *RavelLogStore) DeleteRange(min uint64, max uint64) error { 122 | minKey := uint64ToBytes(min) 123 | 124 | txn := r.Db.Conn.NewTransaction(true) 125 | defer txn.Discard() 126 | 127 | opts := badger.DefaultIteratorOptions 128 | it := txn.NewIterator(opts) 129 | it.Seek(minKey) 130 | 131 | for { 132 | key := it.Item().Key() 133 | 134 | if bytesToUint64(key) > max { 135 | break 136 | } 137 | 138 | err := r.Db.Delete(key) 139 | if err != nil { 140 | return err 141 | } 142 | 143 | it.Next() 144 | } 145 | 146 | if err := txn.Commit(); err != nil { 147 | log.Fatalf("RavelLogStore.DeleteRange: %v\n", err) 148 | return err 149 | } 150 | return nil 151 | } 152 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | When contributing to this repository, please first discuss the change you wish to make via issue, 4 | email, or any other method with the owners of this repository before making a change. 5 | 6 | Please note we have a code of conduct, please follow it in all your interactions with the project. 7 | 8 | ## Pull Request Process 9 | 10 | 1. Ensure any install or build dependencies are removed before the end of the layer when doing a 11 | build. 12 | 2. Update the README.md with details of changes to the interface, this includes new environment 13 | variables, exposed ports, useful file locations and container parameters. 14 | 3. Increase the version numbers in any examples files and the README.md to the new version that this 15 | Pull Request would represent. The versioning scheme we use is [SemVer](http://semver.org/). 16 | 4. You may merge the Pull Request in once you have the sign-off of two other developers, or if you 17 | do not have permission to do that, you may request the second reviewer to merge it for you. 18 | 19 | ## Code of Conduct 20 | 21 | ### Our Pledge 22 | 23 | In the interest of fostering an open and welcoming environment, we as 24 | contributors and maintainers pledge to making participation in our project and 25 | our community a harassment-free experience for everyone, regardless of age, body 26 | size, disability, ethnicity, gender identity and expression, level of experience, 27 | nationality, personal appearance, race, religion, or sexual identity and 28 | orientation. 29 | 30 | ### Our Standards 31 | 32 | Examples of behavior that contributes to creating a positive environment 33 | include: 34 | 35 | * Using welcoming and inclusive language 36 | * Being respectful of differing viewpoints and experiences 37 | * Gracefully accepting constructive criticism 38 | * Focusing on what is best for the community 39 | * Showing empathy towards other community members 40 | 41 | Examples of unacceptable behavior by participants include: 42 | 43 | * The use of sexualized language or imagery and unwelcome sexual attention or 44 | advances 45 | * Trolling, insulting/derogatory comments, and personal or political attacks 46 | * Public or private harassment 47 | * Publishing others' private information, such as a physical or electronic 48 | address, without explicit permission 49 | * Other conduct which could reasonably be considered inappropriate in a 50 | professional setting 51 | 52 | ### Our Responsibilities 53 | 54 | Project maintainers are responsible for clarifying the standards of acceptable 55 | behavior and are expected to take appropriate and fair corrective action in 56 | response to any instances of unacceptable behavior. 57 | 58 | Project maintainers have the right and responsibility to remove, edit, or 59 | reject comments, commits, code, wiki edits, issues, and other contributions 60 | that are not aligned to this Code of Conduct, or to ban temporarily or 61 | permanently any contributor for other behaviors that they deem inappropriate, 62 | threatening, offensive, or harmful. 63 | 64 | ### Scope 65 | 66 | This Code of Conduct applies both within project spaces and in public spaces 67 | when an individual is representing the project or its community. Examples of 68 | representing a project or community include using an official project e-mail 69 | address, posting via an official social media account, or acting as an appointed 70 | representative at an online or offline event. Representation of a project may be 71 | further defined and clarified by project maintainers. 72 | 73 | ### Enforcement 74 | 75 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 76 | reported by contacting the project team at [INSERT EMAIL ADDRESS]. All 77 | complaints will be reviewed and investigated and will result in a response that 78 | is deemed necessary and appropriate to the circumstances. The project team is 79 | obligated to maintain confidentiality with regard to the reporter of an incident. 80 | Further details of specific enforcement policies may be posted separately. 81 | 82 | Project maintainers who do not follow or enforce the Code of Conduct in good 83 | faith may face temporary or permanent repercussions as determined by other 84 | members of the project's leadership. 85 | 86 | ### Attribution 87 | 88 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 89 | available at [http://contributor-covenant.org/version/1/4][version] 90 | 91 | [homepage]: http://contributor-covenant.org 92 | [version]: http://contributor-covenant.org/version/1/4/ -------------------------------------------------------------------------------- /node/node.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "encoding/json" 5 | "log" 6 | "net" 7 | "os" 8 | "time" 9 | 10 | "github.com/adityameharia/ravel/fsm" 11 | "github.com/adityameharia/ravel/store" 12 | "github.com/hashicorp/raft" 13 | ) 14 | 15 | // RavelNode represents a node inside the cluster. 16 | type RavelNode struct { 17 | Fsm *fsm.RavelFSM 18 | Raft *raft.Raft 19 | } 20 | 21 | // Open creates initialises a raft.Raft instance 22 | func (n *RavelNode) Open(enableSingle bool, localID string, badgerPath string, BindAddr string) (*raft.Raft, *fsm.RavelFSM, error) { 23 | log.Println(enableSingle) 24 | log.Println("RavelNode: Opening node") 25 | var raftNode RavelNode 26 | 27 | //setting up Raft Config 28 | config := raft.DefaultConfig() 29 | config.LocalID = raft.ServerID(localID) 30 | 31 | log.Println(config) 32 | 33 | // Setup Raft communication 34 | addr, err := net.ResolveTCPAddr("tcp", BindAddr) 35 | if err != nil { 36 | log.Fatal("RavelNode: Unable to resolve TCP Bind Address") 37 | return nil, nil, err 38 | } 39 | log.Println(addr) 40 | transport, err := raft.NewTCPTransport(BindAddr, addr, 5, 2*time.Second, os.Stderr) 41 | if err != nil { 42 | log.Println(err) 43 | log.Fatal("RavelNode: Unable to create NewTCPTransport") 44 | return nil, nil, err 45 | } 46 | 47 | // Create the snapshot store. This allows the Raft to truncate the log. 48 | snapshot, err := raft.NewFileSnapshotStore(badgerPath+"/snapshot", 1, os.Stderr) 49 | if err != nil { 50 | log.Fatal("RavelNode: Unable to create SnapShot store") 51 | return nil, nil, err 52 | } 53 | 54 | //creating log and stable store 55 | var logStore raft.LogStore 56 | var stableStore raft.StableStore 57 | 58 | logStore, err = store.NewRavelLogStore(badgerPath + "/logs") 59 | if err != nil { 60 | log.Fatal("RavelNode: Unable to create Log store") 61 | return nil, nil, err 62 | } 63 | 64 | f, err := fsm.NewFSM(badgerPath + "/fsm") 65 | if err != nil { 66 | log.Fatal("RavelNode: Unable to create FSM") 67 | return nil, nil, err 68 | } 69 | 70 | stableStore, err = store.NewRavelStableStore(badgerPath + "/stable") 71 | if err != nil { 72 | log.Fatal("RavelNode: Unable to create Stable store") 73 | return nil, nil, err 74 | } 75 | 76 | raftNode.Fsm = f 77 | 78 | r, err := raft.NewRaft(config, f, logStore, stableStore, snapshot, transport) 79 | if err != nil { 80 | log.Println(err) 81 | log.Fatal("RavelNode: Unable initialise raft node") 82 | 83 | return nil, nil, err 84 | } 85 | 86 | raftNode.Raft = r 87 | if enableSingle { 88 | configuration := raft.Configuration{ 89 | Servers: []raft.Server{ 90 | { 91 | ID: config.LocalID, 92 | Address: transport.LocalAddr(), 93 | }, 94 | }, 95 | } 96 | r.BootstrapCluster(configuration) 97 | } 98 | 99 | return r, f, nil 100 | } 101 | 102 | // Get returns the value for the given key 103 | func (n *RavelNode) Get(key []byte) ([]byte, error) { 104 | if n.Raft.State() != raft.Leader { 105 | log.Println("RavelNode: Request sent to non leading replica") 106 | return []byte{}, raft.ErrNotLeader 107 | } 108 | return n.Fsm.Get(key) 109 | } 110 | 111 | // GetAndDelete returns the value for the given key 112 | func (n *RavelNode) GetAndDelete(key []byte) ([]byte, error) { 113 | if n.Raft.State() != raft.Leader { 114 | log.Println("RavelNode: Request sent to non leading replica") 115 | return []byte{}, raft.ErrNotLeader 116 | } 117 | return n.Fsm.GetAndDelete(key) 118 | } 119 | 120 | // Set sets the key with the value 121 | func (n *RavelNode) Set(key []byte, value []byte) error { 122 | if n.Raft.State() != raft.Leader { 123 | log.Println("RavelNode: Request sent to non leading replica") 124 | return raft.ErrNotLeader 125 | } 126 | 127 | var data fsm.LogData 128 | 129 | data.Operation = "set" 130 | data.Key = key 131 | data.Value = value 132 | 133 | dataBuffer, err := json.Marshal(data) 134 | if err != nil { 135 | log.Fatal("RavelNode: Unable to marhsal key value") 136 | return err 137 | } 138 | 139 | f := n.Raft.Apply(dataBuffer, 3*time.Second) 140 | 141 | return f.Error() 142 | } 143 | 144 | // Delete deletes the entry with given key 145 | func (n *RavelNode) Delete(key []byte) error { 146 | if n.Raft.State() != raft.Leader { 147 | log.Println("RavelNode: Request sent to non leading replica") 148 | return raft.ErrNotLeader 149 | } 150 | 151 | var data fsm.LogData 152 | 153 | data.Operation = "delete" 154 | data.Key = key 155 | data.Value = []byte{} 156 | 157 | dataBuffer, err := json.Marshal(data) 158 | if err != nil { 159 | log.Fatal("RavelNode: Unable to marhsal key value") 160 | return err 161 | } 162 | 163 | f := n.Raft.Apply(dataBuffer, 3*time.Second) 164 | 165 | return f.Error() 166 | } 167 | -------------------------------------------------------------------------------- /cmd/ravel_node/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "log" 6 | "os" 7 | 8 | "github.com/adityameharia/ravel/db" 9 | "github.com/urfave/cli/v2" 10 | 11 | "github.com/adityameharia/ravel/RavelClusterAdminPB" 12 | "github.com/google/uuid" 13 | ) 14 | 15 | // Config is the struct containing the configuration details of the node 16 | type Config struct { 17 | ClusterID string // ClusterID is ID of th cluster the node is a part of 18 | NodeID string // NodeID is the nodes unique ID 19 | StorageDir string // StorageDir is the Data Directory for Raft 20 | GRPCAddr string // GRPCAddr is the Address (with port) at which gRPC server is started 21 | RaftInternalAddr string // RaftInternalAddr is the Raft internal communication address with port 22 | AdminGRPCAddr string // AdminGRPCAddr is the address at which the cluster admin gRPC server is hosted 23 | IsLeader bool // IsLeader is a bool defining whether the node is a leader or not 24 | } 25 | 26 | var nodeConfig Config 27 | var adminClient RavelClusterAdminPB.RavelClusterAdminClient 28 | var conf db.RavelDatabase 29 | var dirname string 30 | var yamlFile string 31 | 32 | func init() { 33 | nodeConfig.NodeID = uuid.New().String() 34 | var err error 35 | dirname, err = os.UserHomeDir() 36 | if err != nil { 37 | log.Fatal(err) 38 | } 39 | // nodeConfig.GRPCAddr = "0.0.0.0:50000" 40 | // nodeConfig.RaftInternalAddr = "localhost:60000" 41 | // nodeConfig.StorageDir = "/tmp/ravel_node" 42 | 43 | // flag.StringVar(&nodeConfig.StorageDir, "storageDir", "", "Storage Dir") 44 | // flag.StringVar(&nodeConfig.GRPCAddr, "gRPCAddr", "", "GRPC Addr of this node") 45 | // flag.StringVar(&nodeConfig.RaftInternalAddr, "raftAddr", "", "Raft Internal address for this node") 46 | // flag.StringVar(&nodeConfig.AdminGRPCAddr, "adminRPCAddr", "", "GRPC address of the cluster admin") 47 | // flag.BoolVar(&nodeConfig.IsLeader, "leader", false, "Register this node as a new leader or not") 48 | } 49 | 50 | func main() { 51 | app := &cli.App{} 52 | app.Name = "Ravel Replica" 53 | app.Usage = "Manage a Ravel replica server" 54 | app.Commands = []*cli.Command{ 55 | { 56 | Name: "start", 57 | Usage: "Starts a replica server", 58 | Flags: []cli.Flag{ 59 | &cli.StringFlag{ 60 | Name: "storagedir", 61 | Usage: "Storage Dir", 62 | Value: dirname + "/ravel_replica", 63 | Aliases: []string{"s"}, 64 | Destination: &nodeConfig.StorageDir}, 65 | &cli.StringFlag{ 66 | Name: "grpcaddr", 67 | Usage: "GRPC Addr of this replica", 68 | Value: "localhost:50000", 69 | Aliases: []string{"g"}, 70 | Destination: &nodeConfig.GRPCAddr, 71 | }, 72 | &cli.StringFlag{ 73 | Name: "raftaddr", 74 | Usage: "Raft Internal address for this replica", 75 | Value: "localhost:60000", 76 | Aliases: []string{"r"}, 77 | Destination: &nodeConfig.RaftInternalAddr, 78 | }, 79 | &cli.StringFlag{ 80 | Name: "adminrpcaddr", 81 | Usage: "GRPC address of the cluster admin", 82 | Value: "localhost:42000", 83 | Aliases: []string{"a"}, 84 | Destination: &nodeConfig.AdminGRPCAddr, 85 | }, 86 | &cli.StringFlag{ 87 | Name: "yaml", 88 | Usage: "yaml file containing the config", 89 | Value: "", 90 | Aliases: []string{"y"}, 91 | Destination: &yamlFile, 92 | }, 93 | &cli.BoolFlag{ 94 | Name: "leader", 95 | Usage: "Register this node as a new leader or not", 96 | Value: false, 97 | Aliases: []string{"l"}, 98 | Destination: &nodeConfig.IsLeader, 99 | }, 100 | }, 101 | Action: func(c *cli.Context) error { 102 | setUpConf() 103 | startReplica() 104 | return nil 105 | }, 106 | }, 107 | { 108 | Name: "kill", 109 | Usage: "Removes and deletes all the data in the cluster", 110 | Flags: []cli.Flag{ 111 | &cli.StringFlag{ 112 | Name: "storagedir", 113 | Usage: "Storage Dir", 114 | Value: dirname + "/ravel_replica", 115 | Required: true, 116 | Aliases: []string{"s"}, 117 | Destination: &nodeConfig.StorageDir}, 118 | }, 119 | Action: func(c *cli.Context) error { 120 | setUpConf() 121 | killCluster(-1) 122 | return nil 123 | }, 124 | }, 125 | } 126 | err := app.Run(os.Args) 127 | if err != nil { 128 | log.Fatal(err) 129 | } 130 | } 131 | 132 | func setUpConf() { 133 | err := conf.Init(nodeConfig.StorageDir + "/config") 134 | if err != nil { 135 | log.Println(err) 136 | log.Fatal("Conf: Unable to Setup Database") 137 | } 138 | config, err := conf.Read([]byte("config")) 139 | if err != nil { 140 | log.Println("Error reading config details from file") 141 | } 142 | 143 | err = json.Unmarshal(config, &nodeConfig) 144 | if err != nil { 145 | log.Println("Error reading config details from file") 146 | } 147 | } 148 | -------------------------------------------------------------------------------- /cmd/ravel_cluster_admin/http_server.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/binary" 5 | "encoding/json" 6 | "math" 7 | "strconv" 8 | 9 | "github.com/gin-gonic/gin" 10 | ) 11 | 12 | // float64ToByte converts a float64 to a []byte 13 | func float64ToByte(f float64) []byte { 14 | var buf [8]byte 15 | binary.BigEndian.PutUint64(buf[:], math.Float64bits(f)) 16 | return buf[:] 17 | } 18 | 19 | // byteToFloat64 converts a []byte to float64 20 | func byteToFloat64(bytes []byte) float64 { 21 | bits := binary.BigEndian.Uint64(bytes) 22 | float := math.Float64frombits(bits) 23 | return float 24 | } 25 | 26 | // keyType represents the data type of the value of a key 27 | type keyType string 28 | 29 | // ClusterAdminHTTPServer is the entity that represents the HTTP server on the Cluster Admin 30 | type ClusterAdminHTTPServer struct { 31 | Router *gin.Engine 32 | KeyTypeMap map[string]keyType 33 | } 34 | 35 | // NewClusterAdminHTTPServer constructs and returns a ClusterAdminHTTPServer object 36 | func NewClusterAdminHTTPServer() *ClusterAdminHTTPServer { 37 | var server ClusterAdminHTTPServer 38 | server.Router = gin.Default() 39 | server.setupPaths() 40 | server.KeyTypeMap = make(map[string]keyType) 41 | return &server 42 | } 43 | 44 | // setupPaths sets up HTTP endpoints for ClusterAdminHTTPServer.Router 45 | func (s *ClusterAdminHTTPServer) setupPaths() { 46 | // Data for a "put" request 47 | type putRequest struct { 48 | Key string `json:"key"` 49 | Val interface{} `json:"val"` 50 | } 51 | 52 | // Data for a "get" request 53 | type getRequest struct { 54 | Key string `json:"key"` 55 | } 56 | 57 | s.Router.GET("/", func(c *gin.Context) { 58 | c.JSON(200, "HTTP Server for Ravel Cluster Admin") 59 | }) 60 | 61 | // /delete deletes locates the cluster with the key and deletes the corresponding key-value pair 62 | s.Router.POST("/delete", func(c *gin.Context) { 63 | var req getRequest 64 | if err := c.Bind(&req); err != nil { 65 | c.JSON(400, gin.H{"error": err.Error()}) 66 | return 67 | } 68 | 69 | // get cluster id -> read key and value from that cluster 70 | clusterID := consistentHash.LocateKey([]byte(req.Key)) 71 | err := clusterAdminGRPCServer.DeleteKey([]byte(req.Key), clusterID.String()) 72 | if err != nil { 73 | c.JSON(404, gin.H{"error": err.Error()}) 74 | return 75 | } 76 | c.JSON(200, gin.H{"msg": "ok"}) 77 | }) 78 | 79 | // /get reads the key from the request, locates the cluster with that key 80 | // reads the data from that cluster, decodes it into the appropriate type and returns it 81 | s.Router.POST("/get", func(c *gin.Context) { 82 | var req getRequest 83 | if err := c.Bind(&req); err != nil { 84 | c.JSON(400, gin.H{"error": err.Error()}) 85 | return 86 | } 87 | 88 | // get cluster id -> read key and value from that cluster 89 | clusterID := consistentHash.LocateKey([]byte(req.Key)) 90 | val, err := clusterAdminGRPCServer.ReadKey([]byte(req.Key), clusterID.String()) 91 | if err != nil { 92 | c.JSON(404, gin.H{"error": err.Error()}) 93 | return 94 | } 95 | 96 | // check for data type of value and send response accordingly 97 | switch clusterAdminHTTPServer.KeyTypeMap[req.Key] { 98 | case "float": 99 | c.JSON(200, gin.H{"key": req.Key, "val": byteToFloat64(val)}) 100 | case "string": 101 | c.JSON(200, gin.H{"key": req.Key, "val": string(val)}) 102 | case "json": 103 | var r interface{} 104 | err := json.Unmarshal(val, &r) 105 | if err != nil { 106 | c.JSON(500, gin.H{"error": err.Error()}) 107 | } 108 | 109 | c.JSON(200, gin.H{"key": req.Key, "val": r}) 110 | case "bool": 111 | boolValue, err := strconv.ParseBool(string(val)) 112 | if err != nil { 113 | c.JSON(500, gin.H{"error": err.Error()}) 114 | } 115 | 116 | c.JSON(200, gin.H{"key": req.Key, "val": boolValue}) 117 | } 118 | }) 119 | 120 | // /put reads the key and value from the request, updates the type map, locates the cluster for it 121 | // and writes it to the leader node of that cluster 122 | s.Router.POST("/put", func(c *gin.Context) { 123 | var req putRequest 124 | if err := c.Bind(&req); err != nil { 125 | c.JSON(400, gin.H{"error": err.Error()}) 126 | return 127 | } 128 | 129 | clusterID := consistentHash.LocateKey([]byte(req.Key)) 130 | 131 | switch req.Val.(type) { 132 | case float64: 133 | clusterAdminHTTPServer.KeyTypeMap[req.Key] = "float" 134 | v := float64ToByte(req.Val.(float64)) 135 | err := clusterAdminGRPCServer.WriteKeyValue([]byte(req.Key), v, clusterID.String()) 136 | if err != nil { 137 | c.JSON(500, gin.H{"error": err.Error()}) 138 | return 139 | } 140 | 141 | c.JSON(200, gin.H{"msg": "ok"}) 142 | case string: 143 | clusterAdminHTTPServer.KeyTypeMap[req.Key] = "string" 144 | err := clusterAdminGRPCServer.WriteKeyValue([]byte(req.Key), []byte(req.Val.(string)), clusterID.String()) 145 | if err != nil { 146 | c.JSON(500, gin.H{"error": err.Error()}) 147 | return 148 | } 149 | 150 | c.JSON(200, gin.H{"msg": "ok"}) 151 | case map[string]interface{}: // json object 152 | clusterAdminHTTPServer.KeyTypeMap[req.Key] = "json" 153 | jsonBytes, err := json.Marshal(req.Val) 154 | err = clusterAdminGRPCServer.WriteKeyValue([]byte(req.Key), jsonBytes, clusterID.String()) 155 | if err != nil { 156 | c.JSON(500, gin.H{"error": err.Error()}) 157 | return 158 | } 159 | 160 | c.JSON(200, gin.H{"msg": "ok"}) 161 | case bool: // convert bool to string and write that as a []byte 162 | clusterAdminHTTPServer.KeyTypeMap[req.Key] = "bool" 163 | boolToString := strconv.FormatBool(req.Val.(bool)) 164 | err := clusterAdminGRPCServer.WriteKeyValue([]byte(req.Key), []byte(boolToString), clusterID.String()) 165 | if err != nil { 166 | c.JSON(500, gin.H{"error": err.Error()}) 167 | return 168 | } 169 | 170 | c.JSON(200, gin.H{"msg": "ok"}) 171 | } 172 | }) 173 | } 174 | -------------------------------------------------------------------------------- /cmd/ravel_node/start.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "io/ioutil" 8 | "log" 9 | "net" 10 | "os" 11 | "os/signal" 12 | "time" 13 | 14 | "github.com/adityameharia/ravel/RavelClusterAdminPB" 15 | "github.com/adityameharia/ravel/RavelNodePB" 16 | "github.com/adityameharia/ravel/node" 17 | "github.com/adityameharia/ravel/node_server" 18 | "github.com/hashicorp/raft" 19 | "google.golang.org/grpc" 20 | "gopkg.in/yaml.v2" 21 | ) 22 | 23 | func startReplica() { 24 | if yamlFile != "" { 25 | err := readConf(yamlFile) 26 | if err != nil { 27 | log.Fatal("Unable to get the yaml file") 28 | } 29 | } 30 | 31 | if nodeConfig.AdminGRPCAddr == "" { 32 | log.Fatal("adminRPCAddr has not been initialized") 33 | } 34 | 35 | adminConn, err := grpc.Dial(nodeConfig.AdminGRPCAddr, grpc.WithInsecure()) 36 | if err != nil { 37 | log.Fatal("Error in connecting to the Admin gRPC Server: ", err) 38 | } 39 | defer adminConn.Close() 40 | 41 | adminClient = RavelClusterAdminPB.NewRavelClusterAdminClient(adminConn) 42 | 43 | var ravelNode node.RavelNode 44 | 45 | _, err = conf.Read([]byte("config")) 46 | if err == nil { 47 | ravelNode.Raft, ravelNode.Fsm, err = ravelNode.Open(false, nodeConfig.NodeID, nodeConfig.StorageDir, nodeConfig.RaftInternalAddr) 48 | if err != nil { 49 | log.Fatal(err) 50 | } 51 | 52 | } else { 53 | 54 | if nodeConfig.IsLeader { 55 | ravelCluster, err := adminClient.JoinAsClusterLeader(context.TODO(), &RavelClusterAdminPB.Node{ 56 | NodeId: nodeConfig.NodeID, // id of this node 57 | GrpcAddress: nodeConfig.GRPCAddr, // grpc address of this node 58 | RaftAddress: nodeConfig.RaftInternalAddr, 59 | ClusterId: "", // cluster id is unknown thus empty 60 | }) 61 | 62 | if err != nil { 63 | log.Fatal("Error in JoinAsClusterLeader: ", err) 64 | } else { 65 | nodeConfig.ClusterID = ravelCluster.ClusterId 66 | ravelNode.Raft, ravelNode.Fsm, err = ravelNode.Open(nodeConfig.IsLeader, nodeConfig.NodeID, nodeConfig.StorageDir, nodeConfig.RaftInternalAddr) 67 | if err != nil { 68 | log.Fatal(err) 69 | } 70 | 71 | // this node is the leader 72 | } 73 | } else { 74 | ravelCluster, err := adminClient.JoinExistingCluster(context.TODO(), &RavelClusterAdminPB.Node{ 75 | NodeId: nodeConfig.NodeID, 76 | GrpcAddress: nodeConfig.GRPCAddr, 77 | RaftAddress: nodeConfig.RaftInternalAddr, 78 | ClusterId: "", 79 | }) 80 | 81 | if err != nil { 82 | log.Fatal("Error in JoinExistingCluster: ", err) 83 | } else { 84 | log.Println("Cluster leader is: ", ravelCluster.LeaderGrpcAddress) 85 | nodeConfig.ClusterID = ravelCluster.ClusterId 86 | ravelNode.Raft, ravelNode.Fsm, err = ravelNode.Open(nodeConfig.IsLeader, nodeConfig.NodeID, nodeConfig.StorageDir, nodeConfig.RaftInternalAddr) 87 | if err != nil { 88 | log.Fatal(err) 89 | } 90 | 91 | err = RequestJoinToClusterLeader(ravelCluster.LeaderGrpcAddress, &RavelNodePB.Node{ 92 | NodeId: nodeConfig.NodeID, 93 | ClusterId: nodeConfig.ClusterID, 94 | GrpcAddress: nodeConfig.GRPCAddr, 95 | RaftAddress: nodeConfig.RaftInternalAddr, 96 | }) 97 | if err != nil { 98 | log.Fatal(err) 99 | } 100 | } 101 | } 102 | } 103 | //updates the admin in case there is a change in leader 104 | go func() { 105 | leaderChange := <-ravelNode.Raft.LeaderCh() 106 | log.Println("Sending leader change req") 107 | if leaderChange { 108 | err := RequestLeaderUpdateToCluster(nodeConfig.AdminGRPCAddr, &RavelClusterAdminPB.Node{ 109 | NodeId: nodeConfig.NodeID, 110 | GrpcAddress: nodeConfig.GRPCAddr, 111 | RaftAddress: nodeConfig.RaftInternalAddr, 112 | ClusterId: nodeConfig.ClusterID, 113 | }) 114 | 115 | if err != nil { 116 | log.Fatal(err) 117 | } 118 | } 119 | }() 120 | 121 | onSigInterrupt(ravelNode.Raft) 122 | 123 | replicaCount := len(ravelNode.Raft.GetConfiguration().Configuration().Servers) 124 | 125 | byteConfig, err := json.Marshal(nodeConfig) 126 | if err != nil { 127 | killCluster(replicaCount) 128 | log.Fatal("cannot write config to file") 129 | } 130 | 131 | err = conf.Write([]byte("config"), byteConfig) 132 | if err != nil { 133 | killCluster(replicaCount) 134 | log.Fatal("cannot write config to file") 135 | } 136 | 137 | //starts the gRPC server 138 | listener, err := net.Listen("tcp", nodeConfig.GRPCAddr) 139 | if err != nil { 140 | killCluster(replicaCount) 141 | log.Fatal("Error in starting TCP server: ", err) 142 | } 143 | log.Printf("Starting TCP Server on %v for gRPC\n", nodeConfig.GRPCAddr) 144 | 145 | grpcServer := grpc.NewServer() 146 | RavelNodePB.RegisterRavelNodeServer(grpcServer, &node_server.Server{ 147 | Node: &ravelNode, 148 | }) 149 | 150 | if nodeConfig.IsLeader { 151 | go initiateDataRelocation() 152 | } 153 | 154 | err = grpcServer.Serve(listener) 155 | } 156 | 157 | func readConf(path string) error { 158 | buf, err := ioutil.ReadFile(path) 159 | if err != nil { 160 | return err 161 | } 162 | err = yaml.Unmarshal(buf, &nodeConfig) 163 | if err != nil { 164 | return fmt.Errorf("in file %q: %v", path, err) 165 | } 166 | return nil 167 | } 168 | 169 | func initiateDataRelocation() { 170 | time.Sleep(5 * time.Second) 171 | resp, err := adminClient.InitiateDataRelocation(context.TODO(), &RavelClusterAdminPB.Cluster{ 172 | ClusterId: nodeConfig.ClusterID, 173 | }) 174 | 175 | if err != nil { 176 | log.Fatal(err.Error()) 177 | } 178 | log.Println(resp.Data) 179 | } 180 | 181 | func onSigInterrupt(ra *raft.Raft) { 182 | 183 | ch := make(chan os.Signal, 1) 184 | signal.Notify(ch, os.Interrupt) 185 | go func() { 186 | <-ch 187 | replicaCount := len(ra.GetConfiguration().Configuration().Servers) 188 | log.Println(replicaCount) 189 | if replicaCount == 2 { 190 | log.Println("Permanently deleting server dince only 2 servers are left") 191 | killCluster(replicaCount) 192 | } else { 193 | os.Exit(1) 194 | } 195 | }() 196 | } 197 | -------------------------------------------------------------------------------- /cmd/ravel_cluster_admin/consistent_hashing.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "github.com/adityameharia/ravel/db" 6 | "github.com/buraksezer/consistent" 7 | "github.com/cespare/xxhash" 8 | "log" 9 | "math/rand" 10 | "sync" 11 | "time" 12 | ) 13 | 14 | // clusterID is a string that is used to communicate the ID of a cluster. It implements the consistent.Member interface 15 | type clusterID string 16 | 17 | func (c clusterID) String() string { 18 | return string(c) 19 | } 20 | 21 | // hash implements the consistent.Hasher interface 22 | type hash struct{} 23 | 24 | func (h hash) Sum64(data []byte) uint64 { 25 | return xxhash.Sum64(data) 26 | } 27 | 28 | // ketSet implements a simple Set to store unique values of the keys 29 | type keySet struct { 30 | m map[string]struct{} 31 | } 32 | 33 | func newKeySet() keySet { 34 | var k keySet 35 | k.m = make(map[string]struct{}) 36 | return k 37 | } 38 | 39 | func (k keySet) Insert(key []byte) { 40 | k.m[string(key)] = struct{}{} 41 | } 42 | 43 | func (k keySet) Delete(key []byte) { 44 | delete(k.m, string(key)) 45 | } 46 | 47 | func (k keySet) All() [][]byte { 48 | var all [][]byte 49 | for key := range k.m { 50 | all = append(all, []byte(key)) 51 | } 52 | 53 | return all 54 | } 55 | 56 | func (k keySet) AllStrings() []string { 57 | var all []string 58 | for key := range k.m { 59 | all = append(all, key) 60 | } 61 | 62 | return all 63 | } 64 | 65 | // RavelConsistentHash is the main entity that implements the logic for sharding and data relocation 66 | type RavelConsistentHash struct { 67 | mutex sync.Mutex 68 | config consistent.Config 69 | PartitionKeyMap map[uint64]keySet // PartitionID -> []keySet 70 | PartitionOwners map[uint64]clusterID // PartitionID -> clusterID 71 | HashRing *consistent.Consistent 72 | } 73 | 74 | // Init initialises a RavelConsistentHash object 75 | func (rch *RavelConsistentHash) Init(partitionCount int, replicationFactor int, load float64) { 76 | rch.mutex.Lock() 77 | defer rch.mutex.Unlock() 78 | rand.Seed(time.Now().UTC().UnixNano()) 79 | 80 | diskPartitionKeyMap, err := ReadPartitionKeyMapFromDisk(RavelClusterAdminBackupPath) 81 | if err != nil { 82 | log.Fatal("Error in rch.Init:", err.Error()) 83 | } 84 | 85 | diskPartitionOwnersMap, err := ReadPartitionOwnersFromDisk(RavelClusterAdminBackupPath) 86 | if err != nil { 87 | log.Fatal("Error in rch.Init:", err.Error()) 88 | } 89 | 90 | rch.PartitionKeyMap = diskPartitionKeyMap 91 | rch.PartitionOwners = diskPartitionOwnersMap 92 | 93 | if len(rch.PartitionOwners) == 0 && len(rch.PartitionKeyMap) == 0 { 94 | for i := 0; i < partitionCount; i++ { 95 | rch.PartitionOwners[uint64(i)] = "" 96 | rch.PartitionKeyMap[uint64(i)] = newKeySet() 97 | } 98 | } 99 | 100 | rch.config = consistent.Config{ 101 | PartitionCount: partitionCount, 102 | ReplicationFactor: replicationFactor, 103 | Load: load, 104 | Hasher: hash{}, 105 | } 106 | 107 | rch.HashRing = consistent.New(nil, rch.config) 108 | } 109 | 110 | // Reset resets the RavelConsistentHash object to its initial state 111 | func (rch *RavelConsistentHash) Reset(partitionCount int, replicationFactor int, load float64) { 112 | rch.mutex.Lock() 113 | defer rch.mutex.Unlock() 114 | rand.Seed(time.Now().UTC().UnixNano()) 115 | 116 | rch.PartitionKeyMap = make(map[uint64]keySet) 117 | rch.PartitionOwners = make(map[uint64]clusterID) 118 | 119 | for i := 0; i < partitionCount; i++ { 120 | rch.PartitionOwners[uint64(i)] = "" 121 | rch.PartitionKeyMap[uint64(i)] = newKeySet() 122 | } 123 | 124 | rch.config = consistent.Config{ 125 | PartitionCount: partitionCount, 126 | ReplicationFactor: replicationFactor, 127 | Load: load, 128 | Hasher: hash{}, 129 | } 130 | 131 | rch.HashRing = consistent.New(nil, rch.config) 132 | } 133 | 134 | // BackupToDisk writes the RavelConsistentHash.PartitionKeyMap and RavelConsistentHash.PartitionOwners maps to disk using BadgerDB 135 | func (rch *RavelConsistentHash) BackupToDisk(badgerPath string) error { 136 | log.Println("Running Backup") 137 | var backupDB db.RavelDatabase 138 | 139 | err := backupDB.Init(badgerPath + "/partition_owners") 140 | if err != nil { 141 | return err 142 | } 143 | for partID, cluster := range rch.PartitionOwners { 144 | err = backupDB.Write(uint64ToBytes(partID), []byte(cluster.String())) 145 | } 146 | 147 | backupDB.Close() 148 | 149 | err = backupDB.Init(badgerPath + "/partition_keymap") 150 | if err != nil { 151 | return err 152 | } 153 | 154 | type kSetJSON struct { 155 | Keys []string `json:"keys"` 156 | } 157 | for partID, kSet := range rch.PartitionKeyMap { 158 | kSetJSONBytes, err := json.Marshal(kSetJSON{ 159 | Keys: kSet.AllStrings(), 160 | }) 161 | 162 | if err != nil { 163 | log.Println("Error in RavelConsistentHash.BackupOnDisk:", err.Error()) 164 | } 165 | 166 | err = backupDB.Write(uint64ToBytes(partID), kSetJSONBytes) 167 | } 168 | 169 | backupDB.Close() 170 | return nil 171 | } 172 | 173 | // AddCluster adds a new cluster to the ring, as a result some partitions are relocated to this new cluster, 174 | // the keys in the relocated partition are looked up in the RavelConsistentHash.PartitionKeyMap and are moved 175 | // to the new cluster 176 | func (rch *RavelConsistentHash) AddCluster(clusterName clusterID) { 177 | log.Println("Len Partition") 178 | log.Println("Adding Cluster:", clusterName) 179 | rch.mutex.Lock() 180 | defer rch.mutex.Unlock() 181 | 182 | rch.HashRing.Add(clusterName) 183 | rch.relocatePartitions() // dont do this when this is the very first one 184 | 185 | err := rch.BackupToDisk(RavelClusterAdminBackupPath) 186 | if err != nil { 187 | log.Println("Error in Backing Up to Disk:", err.Error()) 188 | } 189 | } 190 | 191 | // DeleteCluster deletes a cluster from the owners map 192 | func (rch *RavelConsistentHash) DeleteCluster(clusterName clusterID) { 193 | log.Println("Removing Cluster:", clusterName) 194 | rch.mutex.Lock() 195 | defer rch.mutex.Unlock() 196 | 197 | rch.HashRing.Remove(clusterName.String()) 198 | rch.relocatePartitions() 199 | 200 | err := rch.BackupToDisk(RavelClusterAdminBackupPath) 201 | if err != nil { 202 | log.Println("Error in Backing Up to Disk:", err.Error()) 203 | } 204 | } 205 | 206 | // LocateKey returns the cluster for a given key 207 | func (rch *RavelConsistentHash) LocateKey(key []byte) consistent.Member { 208 | rch.mutex.Lock() 209 | defer rch.mutex.Unlock() 210 | 211 | partID := rch.HashRing.FindPartitionID(key) 212 | rch.PartitionKeyMap[uint64(partID)].Insert(key) 213 | 214 | err := rch.BackupToDisk(RavelClusterAdminBackupPath) 215 | if err != nil { 216 | log.Println("Error in Backing Up to Disk:", err.Error()) 217 | } 218 | 219 | return rch.HashRing.LocateKey(key) 220 | } 221 | 222 | func (rch *RavelConsistentHash) DeleteKey(key []byte) { 223 | rch.mutex.Lock() 224 | defer rch.mutex.Unlock() 225 | 226 | partID := rch.HashRing.FindPartitionID(key) 227 | rch.PartitionKeyMap[uint64(partID)].Delete(key) 228 | 229 | err := rch.BackupToDisk(RavelClusterAdminBackupPath) 230 | if err != nil { 231 | log.Println("Error in Backing Up to Disk:", err.Error()) 232 | } 233 | } 234 | 235 | // relocatePartitions checks for owner changes and then relocates the keys in that partition to the new owner 236 | func (rch *RavelConsistentHash) relocatePartitions() { 237 | log.Println("Relocating Partitions") 238 | for partID, owner := range rch.PartitionOwners { 239 | newOwner := rch.HashRing.GetPartitionOwner(int(partID)) 240 | if newOwner != owner { 241 | // relocate this partID to newOwner 242 | keys := rch.PartitionKeyMap[partID].All() 243 | 244 | for i := 0; i < len(keys); i++ { 245 | log.Printf("Relocating key: %v from cluster: %v to cluster: %v\n", string(keys[i]), owner.String(), newOwner.String()) 246 | val, err := clusterAdminGRPCServer.ReadKeyAndDelete(keys[i], owner.String()) 247 | if err != nil { 248 | log.Println(err) 249 | } 250 | 251 | err = clusterAdminGRPCServer.WriteKeyValue(keys[i], val, newOwner.String()) 252 | if err != nil { 253 | log.Println("Yo:", err) 254 | } 255 | } 256 | 257 | rch.PartitionOwners[partID] = clusterID(newOwner.String()) 258 | } 259 | } 260 | } -------------------------------------------------------------------------------- /cmd/ravel_cluster_admin/grpc_server.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "log" 7 | "math" 8 | "sync" 9 | 10 | "github.com/adityameharia/ravel/RavelClusterAdminPB" 11 | "github.com/adityameharia/ravel/RavelNodePB" 12 | "github.com/google/uuid" 13 | "google.golang.org/grpc" 14 | ) 15 | 16 | // clusterInfo holds the information to represent a cluster 17 | type clusterInfo struct { 18 | LeaderNode *RavelClusterAdminPB.Node 19 | ReplicaCount uint64 20 | } 21 | 22 | // ClusterAdminGRPCServer is the entity that implements the gRPC server for the Cluster Admin 23 | type ClusterAdminGRPCServer struct { 24 | mutex sync.Mutex 25 | ClusterLeaderMap map[string]clusterInfo 26 | Server *grpc.Server 27 | } 28 | 29 | // NewClusterAdminGRPCServer constructs and returns a ClusterAdminGRPCServer object 30 | func NewClusterAdminGRPCServer() *ClusterAdminGRPCServer { 31 | var newServer ClusterAdminGRPCServer 32 | newServer.ClusterLeaderMap = make(map[string]clusterInfo) 33 | newServer.Server = grpc.NewServer() 34 | return &newServer 35 | } 36 | 37 | // JoinExistingCluster picks the cluster with the least number of replicas and returns information about that cluster 38 | func (s *ClusterAdminGRPCServer) JoinExistingCluster(ctx context.Context, node *RavelClusterAdminPB.Node) (*RavelClusterAdminPB.Cluster, error) { 39 | s.mutex.Lock() 40 | defer s.mutex.Unlock() 41 | 42 | log.Println("Join Existing Cluster: Request from", node.GrpcAddress) 43 | var minReplicaClusterID string = "" 44 | var minReplicaCount uint64 = math.MaxUint64 45 | for id, cInfo := range s.ClusterLeaderMap { 46 | if cInfo.ReplicaCount < minReplicaCount { 47 | minReplicaClusterID = id 48 | minReplicaCount = cInfo.ReplicaCount 49 | } 50 | } 51 | 52 | if minReplicaClusterID == "" { 53 | return nil, errors.New("no clusters found") 54 | } 55 | 56 | cInfo := s.ClusterLeaderMap[minReplicaClusterID] 57 | cInfo.ReplicaCount += 1 58 | s.ClusterLeaderMap[minReplicaClusterID] = cInfo 59 | 60 | return &RavelClusterAdminPB.Cluster{ 61 | ClusterId: minReplicaClusterID, 62 | LeaderGrpcAddress: s.ClusterLeaderMap[minReplicaClusterID].LeaderNode.GrpcAddress, 63 | LeaderRaftAddress: s.ClusterLeaderMap[minReplicaClusterID].LeaderNode.RaftAddress, 64 | }, nil 65 | } 66 | 67 | // JoinAsClusterLeader creates a new cluster adds "node" as the cluster leader, 68 | // this also adds a member in the RavelConsistentHash entity 69 | func (s *ClusterAdminGRPCServer) JoinAsClusterLeader(ctx context.Context, node *RavelClusterAdminPB.Node) (*RavelClusterAdminPB.Cluster, error) { 70 | log.Println("JoinAsClusterLeader: Request from", node.GrpcAddress) 71 | newClusterID := uuid.New().String() 72 | 73 | s.mutex.Lock() 74 | s.ClusterLeaderMap[newClusterID] = clusterInfo{node, 1} 75 | s.mutex.Unlock() 76 | 77 | log.Println("Adding", node.GrpcAddress, "as a new clusterID with ID:", newClusterID) 78 | 79 | return &RavelClusterAdminPB.Cluster{ 80 | ClusterId: newClusterID, 81 | LeaderGrpcAddress: node.GrpcAddress, // same as the node that sent the request 82 | LeaderRaftAddress: node.RaftAddress, 83 | }, nil 84 | } 85 | 86 | // UpdateClusterLeader updates "node" as the leader of it's cluster. This is called when a leader crashes and another 87 | // leader is picked via the Leader Election in Raft. 88 | func (s *ClusterAdminGRPCServer) UpdateClusterLeader(ctx context.Context, node *RavelClusterAdminPB.Node) (*RavelClusterAdminPB.Response, error) { 89 | s.mutex.Lock() 90 | defer s.mutex.Unlock() 91 | 92 | if cInfo, exists := s.ClusterLeaderMap[node.ClusterId]; exists { 93 | s.ClusterLeaderMap[node.ClusterId] = clusterInfo{node, cInfo.ReplicaCount} 94 | } else { 95 | return nil, errors.New("invalid clusterID id") 96 | } 97 | 98 | log.Println(s.ClusterLeaderMap) 99 | 100 | return &RavelClusterAdminPB.Response{ 101 | Data: "leader updated successfully", 102 | }, nil 103 | } 104 | 105 | // LeaveCluster decrements the replica count of the node's cluster 106 | func (s *ClusterAdminGRPCServer) LeaveCluster(ctx context.Context, node *RavelClusterAdminPB.Node) (*RavelClusterAdminPB.Response, error) { 107 | s.mutex.Lock() 108 | defer s.mutex.Unlock() 109 | 110 | cInfo, exists := s.ClusterLeaderMap[node.ClusterId] 111 | if !exists { 112 | return nil, errors.New("invalid clusterID") 113 | } 114 | 115 | if len(s.ClusterLeaderMap) == 1 && cInfo.ReplicaCount == 1 { 116 | // last remaining cluster in the system -> reset consistentHash -> delete info from ClusterLeaderMap 117 | log.Printf("Node: %v from Cluster: %v is the last standing Cluster Leader in the system\n", node.NodeId, node.ClusterId) 118 | log.Println("Resetting consistentHash, Removing", node.ClusterId, "from ClusterLeaderMap") 119 | 120 | consistentHash.Reset(271, 40, 1.2) 121 | err := consistentHash.BackupToDisk(RavelClusterAdminBackupPath) 122 | if err != nil { 123 | return nil, err 124 | } 125 | delete(s.ClusterLeaderMap, node.ClusterId) 126 | 127 | return &RavelClusterAdminPB.Response{ 128 | Data: "Removing last standing cluster in the system", 129 | }, nil 130 | } else { 131 | if cInfo.ReplicaCount == 1 { 132 | // last remaining replica in the cluster -> remove cluster from ClusterLeaderMap -> remove cluster from consistentHash 133 | log.Printf("Node: %v from Cluster: %v is the last standing replica in the cluster\n", node.NodeId, node.ClusterId) 134 | log.Println("Deleting cluster from consistent hash and removing", node.ClusterId, "from ClusterLeaderMap") 135 | consistentHash.DeleteCluster(clusterID(node.ClusterId)) 136 | delete(s.ClusterLeaderMap, node.ClusterId) 137 | 138 | return &RavelClusterAdminPB.Response{ 139 | Data: "Deleting Cluster: " + node.ClusterId, 140 | }, nil 141 | } else { 142 | cInfo.ReplicaCount -= 1 143 | s.ClusterLeaderMap[node.ClusterId] = cInfo 144 | 145 | log.Println(s.ClusterLeaderMap) 146 | return &RavelClusterAdminPB.Response{ 147 | Data: "replica count reduced", 148 | }, nil 149 | } 150 | } 151 | } 152 | 153 | // GetClusterLeader returns information about the leader node of the provided cluster 154 | func (s *ClusterAdminGRPCServer) GetClusterLeader(ctx context.Context, cluster *RavelClusterAdminPB.Cluster) (*RavelClusterAdminPB.Node, error) { 155 | s.mutex.Lock() 156 | defer s.mutex.Unlock() 157 | 158 | cInfo, exists := s.ClusterLeaderMap[cluster.ClusterId] 159 | if !exists { 160 | return nil, errors.New("invalid clusterID id") 161 | } 162 | 163 | return cInfo.LeaderNode, nil 164 | } 165 | 166 | // InitiateDataRelocation adds the provided cluster as an owner to the consistent hashing setup, 167 | // which in turns takes care of the movement of data 168 | func (s *ClusterAdminGRPCServer) InitiateDataRelocation(ctx context.Context, cluster *RavelClusterAdminPB.Cluster) (*RavelClusterAdminPB.Response, error) { 169 | consistentHash.AddCluster(clusterID(cluster.ClusterId)) 170 | return &RavelClusterAdminPB.Response{ 171 | Data: "data relocation completed", 172 | }, nil 173 | } 174 | 175 | // WriteKeyValue writes the given key and value to the leader of the provided cluster. 176 | // NOTE: this function is not exposed via gRPC 177 | func (s *ClusterAdminGRPCServer) WriteKeyValue(key []byte, val []byte, clusterID string) error { 178 | conn, err := grpc.Dial(s.ClusterLeaderMap[clusterID].LeaderNode.GrpcAddress, grpc.WithInsecure()) 179 | if err != nil { 180 | return err 181 | } 182 | 183 | client := RavelNodePB.NewRavelNodeClient(conn) 184 | resp, err := client.Run(context.TODO(), &RavelNodePB.Command{ 185 | Operation: "set", 186 | Key: key, 187 | Value: val, 188 | }) 189 | 190 | if err != nil { 191 | return err 192 | } 193 | 194 | log.Println(resp.Data) 195 | return nil 196 | } 197 | 198 | // ReadKey reads the value for the given key from the leader of the provided cluster. 199 | // NOTE: this function is not exposed via gRPC 200 | func (s *ClusterAdminGRPCServer) ReadKey(key []byte, clusterID string) ([]byte, error) { 201 | conn, err := grpc.Dial(s.ClusterLeaderMap[clusterID].LeaderNode.GrpcAddress, grpc.WithInsecure()) 202 | if err != nil { 203 | return nil, err 204 | } 205 | 206 | client := RavelNodePB.NewRavelNodeClient(conn) 207 | resp, err := client.Run(context.TODO(), &RavelNodePB.Command{ 208 | Operation: "get", 209 | Key: key, 210 | }) 211 | 212 | if err != nil { 213 | return nil, err 214 | } 215 | 216 | return resp.Data, nil 217 | } 218 | 219 | // DeleteKey deletes the key and value on the server 220 | // NOTE: this function is not exposed via gRPC 221 | func (s *ClusterAdminGRPCServer) DeleteKey(key []byte, clusterID string) error { 222 | conn, err := grpc.Dial(s.ClusterLeaderMap[clusterID].LeaderNode.GrpcAddress, grpc.WithInsecure()) 223 | if err != nil { 224 | return err 225 | } 226 | 227 | consistentHash.DeleteKey(key) 228 | client := RavelNodePB.NewRavelNodeClient(conn) 229 | resp, err := client.Run(context.TODO(), &RavelNodePB.Command{ 230 | Operation: "delete", 231 | Key: key, 232 | }) 233 | 234 | if err != nil { 235 | return err 236 | } 237 | log.Println(resp.Msg) 238 | return nil 239 | } 240 | 241 | // ReadKeyAndDelete reads the key, value and then deletes it on the server 242 | // NOTE: this function is not exposed via gRPC 243 | func (s *ClusterAdminGRPCServer) ReadKeyAndDelete(key []byte, clusterID string) ([]byte, error) { 244 | conn, err := grpc.Dial(s.ClusterLeaderMap[clusterID].LeaderNode.GrpcAddress, grpc.WithInsecure()) 245 | if err != nil { 246 | return nil, err 247 | } 248 | 249 | client := RavelNodePB.NewRavelNodeClient(conn) 250 | resp, err := client.Run(context.TODO(), &RavelNodePB.Command{ 251 | Operation: "getAndDelete", 252 | Key: key, 253 | }) 254 | 255 | if err != nil { 256 | return nil, err 257 | } 258 | 259 | log.Println(resp.Msg) 260 | return resp.Data, nil 261 | } 262 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![](header.png) 2 | 3 | Ravel is a sharded, fault-tolerant key-value store built using [BadgerDB](https://github.com/dgraph-io/badger) 4 | and [hashicorp/raft](https://github.com/hashicorp/raft). You can shard your data across multiple clusters with multiple 5 | replicas, the data is persisted on disk using BadgerDB for high throughput in reads and writes. Replication and 6 | fault-tolerance is done using [Raft](https://raft.github.io/). 7 | 8 | Ravel exposes a simple HTTP API for the user to read and write data and Ravel handles the sharding and the replication 9 | of data across clusters. 10 | 11 | ## Table of Contents 12 | 13 | * [Installation](#installation) 14 | * [Using Curl](#using-curl) 15 | * [From Source](#from-source) 16 | * [Usage](#usage) 17 | * [Setup a Cluster](#setup-a-cluster) 18 | * [Reading and Writing Data](#reading-and-writing-data) 19 | * [Killing A Ravel Instance](#killing-a-ravel-instance) 20 | * [Uninstalling Ravel](#unistalling-ravel) 21 | * [Documentation and Further Reading](#documentation-and-further-reading) 22 | * [Contributing](#contributing) 23 | * [Contact](#contact) 24 | * [License](#license) 25 | 26 | ## Installation 27 | 28 | Ravel has two functional components. A cluster admin server and a replica node, both of them have their separate binary 29 | files. To setup Ravel correctly, you'll need to start one cluster admin server and many replica nodes as per 30 | requirement. 31 | 32 | ### Using `curl` 33 | 34 | This will download the `ravel_node` and `ravel_cluster_admin` binary files and move it to `/usr/local/bin`, make sure 35 | you have it in your `$PATH` 36 | 37 | ```sh 38 | curl https://raw.githubusercontent.com/adityameharia/ravel/main/install.sh | bash 39 | ``` 40 | 41 | ### From Source 42 | 43 | - `cmd/ravel_node` directory has the implementation of `ravel_node` which is the replica node 44 | - `cmd/ravel_cluster_admin` directory has the implementation of `ravel_cluster_admin` which is the cluster admin server 45 | 46 | 1. Clone this repository 47 | 48 | ```shell 49 | git clone https://github.com/adityameharia/ravel 50 | cd ravel 51 | git checkout master 52 | ``` 53 | 54 | 2. Build `ravel_node` and `ravel_cluster_admin` 55 | 56 | ```shell 57 | cd cmd/ravel_node 58 | go build 59 | sudo mv ./ravel_node /usr/local/bin 60 | cd ../ravel_cluster_admin 61 | go build 62 | sudo mv ./ravel_cluster_admin /usr/local/bin 63 | ``` 64 | 65 | This will build the `ravel_node` and `ravel_cluster_admin` binaries in `cmd/ravel_node` 66 | and `cmd/ravel_cluster_admin` respectively and move them to `/usr/local/bin` 67 | 68 | ## Usage 69 | 70 | Usage info for `ravel_cluster_admin` 71 | 72 | ```shell 73 | $ ravel_cluster_admin --help 74 | NAME: 75 | Ravel Cluster Admin - Start a Ravel Cluster Admin server 76 | 77 | USAGE: 78 | ravel_cluster_admin [global options] command [command options] [arguments...] 79 | 80 | COMMANDS: 81 | help, h Shows a list of commands or help for one command 82 | 83 | GLOBAL OPTIONS: 84 | --http value Address (with port) on which the HTTP server should listen 85 | --grpc value Address (with port) on which the gRPC server should listen 86 | --backupPath value Path where the Cluster Admin should persist its state on disk 87 | --help, -h show help 88 | ``` 89 | 90 | Usage info for `ravel_node` 91 | 92 | ```shell 93 | $ ravel_node --help 94 | NAME: 95 | Ravel Replica - Manage a Ravel replica server 96 | 97 | USAGE: 98 | ravel_node [global options] command [command options] [arguments...] 99 | 100 | COMMANDS: 101 | start Starts a replica server 102 | kill Removes and deletes all the data in the cluster 103 | help, h Shows a list of commands or help for one command 104 | 105 | GLOBAL OPTIONS: 106 | --help, -h show help (default: false) 107 | ``` 108 | 109 | Usage info for the `start` command in `ravel_node`. Use this command to start a replica server. 110 | 111 | ```shell 112 | $ ravel_node start --help 113 | NAME: 114 | ravel_node start - Starts a replica server 115 | 116 | USAGE: 117 | ravel_node start [command options] [arguments...] 118 | 119 | OPTIONS: 120 | --storagedir value, -s value Storage Dir (default: "~/ravel_replica") 121 | --grpcaddr value, -g value GRPC Addr of this replica (default: "localhost:50000") 122 | --raftaddr value, -r value Raft Internal address for this replica (default: "localhost:60000") 123 | --adminrpcaddr value, -a value GRPC address of the cluster admin (default: "localhost:42000") 124 | --yaml value, -y value yaml file containing the config 125 | --leader, -l Register this node as a new leader or not (default: false) 126 | --help, -h show help (default: false) 127 | ``` 128 | 129 | ## Setup a Cluster 130 | 131 | Executing the following instructions will setup a sample Ravel instance. The most simple configuration of a Ravel 132 | instance would consist of 2 clusters with 3 replicas each. 133 | 134 | The key value pairs will be sharded across the two clusters and replicated thrice on each cluster. The admin will 135 | automatically decide which replica goes to which cluster. Adding and removing clusters from the system automatically 136 | relocates all the keys in that cluster to some other one. Deleting the last standing cluster deletes all the keys in the 137 | instance. 138 | 139 | 1. Setup the cluster admin server 140 | 141 | ```shell 142 | sudo ravel_cluster_admin --http="localhost:5000" --grpc="localhost:42000" --backupPath="~/ravel_admin" 143 | ``` 144 | 145 | 2. Setting up the cluster leaders 146 | 147 | ```shell 148 | sudo ravel_node start -s="/tmp/ravel_leader1" -l=true -r="localhost:60000" -g="localhost:50000" -a="localhost:42000" 149 | sudo ravel_node start -s="/tmp/ravel_leader2" -l=true -r="localhost:60001" -g="localhost:50001" -a="localhost:42000" 150 | ``` 151 | 152 | 3. Setting up the replicas 153 | 154 | ```shell 155 | sudo ravel_node start -s="/tmp/ravel_replica1" -r="localhost:60002" -g="localhost:50002" -a="localhost:42000" 156 | sudo ravel_node start -s="/tmp/ravel_replica2" -r="localhost:60003" -g="localhost:50003" -a="localhost:42000" 157 | sudo ravel_node start -s="/tmp/ravel_replica3" -r="localhost:60004" -g="localhost:50004" -a="localhost:42000" 158 | sudo ravel_node start -s="/tmp/ravel_replica4" -r="localhost:60005" -g="localhost:50005" -a="localhost:42000" 159 | ``` 160 | 161 | **NOTE** 162 | 163 | - `-l=true` sets up a new cluster, defaults to false 164 | - Dont forget the storage directory as you will need it to delete the replica 165 | - All the commands and flag can be viewed using the `-h` or `--help` flag 166 | 167 | ## Reading and Writing Data 168 | 169 | Once the replicas and admin are set up, we can start sending HTTP requests to our cluster admin server to read, write 170 | and delete key value pairs. 171 | 172 | The cluster admin server exposes 3 HTTP routes: 173 | 174 | - URL: `/put` 175 | - Method: `POST` 176 | - Description: Store a key value pair in the system 177 | - Request Body: `{"key": "", "val":}` 178 | - `key = [string]` 179 | - `val = [string | float | JSON Object]` 180 | - Success Response: `200` with body `{"msg": "ok"}` 181 | 182 | - URL: `/get` 183 | - Method:`POST` 184 | - Description: Get a key value pair from the system 185 | - Request Body: `{"key": ""` 186 | - `key = [string]` 187 | - Success Response: `200` with body `{"key": , "val":}` 188 | 189 | - URL: `/delete` 190 | - Method:`POST` 191 | - Description: Delete a key value pair from the system 192 | - Request Body: `{"key": ""` 193 | - `key = [string]` 194 | - Success Response: `200` with body `{"msg": "ok"}` 195 | 196 | ### Sample Requests 197 | 198 | * Sample `/put` requests 199 | 200 | ```json 201 | { 202 | "key": "the_answer", 203 | "value": 42 204 | } 205 | ``` 206 | 207 | ```json 208 | { 209 | "key": "dogegod", 210 | "value": "Elon Musk" 211 | } 212 | ``` 213 | 214 | ```json 215 | { 216 | "key": "hello_friend", 217 | "value": { 218 | "elliot": "Rami Malek", 219 | "darlene": "Carly Chaikin" 220 | } 221 | } 222 | ``` 223 | 224 | * Sample `/get` request 225 | 226 | ```json 227 | { 228 | "key": "dogegod" 229 | } 230 | ``` 231 | 232 | * Sample `/delete` request 233 | 234 | ```json 235 | { 236 | "key": "dogegod" 237 | } 238 | ``` 239 | 240 | ## Killing A Ravel Instance 241 | 242 | Stopping a ravel instance neither deletes the data/configuration nor removes it from the system, it replicates a crash 243 | with the hope that the node will come back up. Once the node is up, it will sync up all the data from the leader node. 244 | 245 | In order to delete all the data and configuration and remove the instance from the system you need to kill it. 246 | 247 | ```shell 248 | ravel_node kill -s="the storage directory you specified while starting the node" 249 | ``` 250 | 251 | Stopping the ravel_cluster_admin breaks the entire system and renders it useless. It is recommended not to stop/kill the 252 | admin unless all the replicas have been properly killed. 253 | 254 | > The cluster admin server persists its state on disk for recovery. In order to truly reset it, you have to delete its storage 255 | directory. 256 | 257 | ## Uninstalling Ravel 258 | 259 | Ravel can be uninstalled by deleting the binaries from /usr/local/bin 260 | 261 | ```shell 262 | sudo rm /usr/local/bin/ravel_node 263 | sudo rm /usr/local/bin/ravel_cluster_admin 264 | ``` 265 | 266 | ## Documentation and Further Reading 267 | 268 | * API Reference : https://pkg.go.dev/github.com/adityameharia/ravel 269 | * In order to read about the data flow of the system refer 270 | to [data flow in admin](https://github.com/adityameharia/ravel/blob/main/cmd/ravel_cluster_admin/README.md) 271 | and [data flow in replica](https://github.com/adityameharia/ravel/blob/main/cmd/ravel_node/README.md) 272 | * Each package also has its own readme explainin what it does and how it does it. 273 | * Other blogs and resources 274 | * https://raft.github.io/ 275 | * https://blog.dgraph.io/post/badger/ 276 | * [MIT 6.824: Distributed Systems](https://youtube.com/playlist?list=PLrw6a1wE39_tb2fErI4-WkMbsvGQk9_UB) 277 | 278 | ## Contributing 279 | 280 | If you're interested in contributing to Ravel, check out [CONTRIBUTING.md](CONTRIBUTING.md) 281 | 282 | ## Contact 283 | 284 | Reach out to the authors with questions, concerns or ideas about improvement. 285 | 286 | * adityameharia14@gmail.com 287 | * junaidrahim5a@gmail.com 288 | 289 | ## License 290 | 291 | Copyright (c) **Aditya Meharia** and **Junaid Rahim**. All rights reserved. Released under the [MIT](LICENSE) License 292 | -------------------------------------------------------------------------------- /RavelClusterAdminPB/cluster_admin_grpc.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go-grpc. DO NOT EDIT. 2 | 3 | package RavelClusterAdminPB 4 | 5 | import ( 6 | context "context" 7 | grpc "google.golang.org/grpc" 8 | codes "google.golang.org/grpc/codes" 9 | status "google.golang.org/grpc/status" 10 | ) 11 | 12 | // This is a compile-time assertion to ensure that this generated file 13 | // is compatible with the grpc package it is being compiled against. 14 | // Requires gRPC-Go v1.32.0 or later. 15 | const _ = grpc.SupportPackageIsVersion7 16 | 17 | // RavelClusterAdminClient is the client API for RavelClusterAdmin service. 18 | // 19 | // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. 20 | type RavelClusterAdminClient interface { 21 | JoinExistingCluster(ctx context.Context, in *Node, opts ...grpc.CallOption) (*Cluster, error) 22 | JoinAsClusterLeader(ctx context.Context, in *Node, opts ...grpc.CallOption) (*Cluster, error) 23 | UpdateClusterLeader(ctx context.Context, in *Node, opts ...grpc.CallOption) (*Response, error) 24 | LeaveCluster(ctx context.Context, in *Node, opts ...grpc.CallOption) (*Response, error) 25 | GetClusterLeader(ctx context.Context, in *Cluster, opts ...grpc.CallOption) (*Node, error) 26 | InitiateDataRelocation(ctx context.Context, in *Cluster, opts ...grpc.CallOption) (*Response, error) 27 | } 28 | 29 | type ravelClusterAdminClient struct { 30 | cc grpc.ClientConnInterface 31 | } 32 | 33 | func NewRavelClusterAdminClient(cc grpc.ClientConnInterface) RavelClusterAdminClient { 34 | return &ravelClusterAdminClient{cc} 35 | } 36 | 37 | func (c *ravelClusterAdminClient) JoinExistingCluster(ctx context.Context, in *Node, opts ...grpc.CallOption) (*Cluster, error) { 38 | out := new(Cluster) 39 | err := c.cc.Invoke(ctx, "/RavelClusterAdminPB.RavelClusterAdmin/JoinExistingCluster", in, out, opts...) 40 | if err != nil { 41 | return nil, err 42 | } 43 | return out, nil 44 | } 45 | 46 | func (c *ravelClusterAdminClient) JoinAsClusterLeader(ctx context.Context, in *Node, opts ...grpc.CallOption) (*Cluster, error) { 47 | out := new(Cluster) 48 | err := c.cc.Invoke(ctx, "/RavelClusterAdminPB.RavelClusterAdmin/JoinAsClusterLeader", in, out, opts...) 49 | if err != nil { 50 | return nil, err 51 | } 52 | return out, nil 53 | } 54 | 55 | func (c *ravelClusterAdminClient) UpdateClusterLeader(ctx context.Context, in *Node, opts ...grpc.CallOption) (*Response, error) { 56 | out := new(Response) 57 | err := c.cc.Invoke(ctx, "/RavelClusterAdminPB.RavelClusterAdmin/UpdateClusterLeader", in, out, opts...) 58 | if err != nil { 59 | return nil, err 60 | } 61 | return out, nil 62 | } 63 | 64 | func (c *ravelClusterAdminClient) LeaveCluster(ctx context.Context, in *Node, opts ...grpc.CallOption) (*Response, error) { 65 | out := new(Response) 66 | err := c.cc.Invoke(ctx, "/RavelClusterAdminPB.RavelClusterAdmin/LeaveCluster", in, out, opts...) 67 | if err != nil { 68 | return nil, err 69 | } 70 | return out, nil 71 | } 72 | 73 | func (c *ravelClusterAdminClient) GetClusterLeader(ctx context.Context, in *Cluster, opts ...grpc.CallOption) (*Node, error) { 74 | out := new(Node) 75 | err := c.cc.Invoke(ctx, "/RavelClusterAdminPB.RavelClusterAdmin/GetClusterLeader", in, out, opts...) 76 | if err != nil { 77 | return nil, err 78 | } 79 | return out, nil 80 | } 81 | 82 | func (c *ravelClusterAdminClient) InitiateDataRelocation(ctx context.Context, in *Cluster, opts ...grpc.CallOption) (*Response, error) { 83 | out := new(Response) 84 | err := c.cc.Invoke(ctx, "/RavelClusterAdminPB.RavelClusterAdmin/InitiateDataRelocation", in, out, opts...) 85 | if err != nil { 86 | return nil, err 87 | } 88 | return out, nil 89 | } 90 | 91 | // RavelClusterAdminServer is the server API for RavelClusterAdmin service. 92 | // All implementations should embed UnimplementedRavelClusterAdminServer 93 | // for forward compatibility 94 | type RavelClusterAdminServer interface { 95 | JoinExistingCluster(context.Context, *Node) (*Cluster, error) 96 | JoinAsClusterLeader(context.Context, *Node) (*Cluster, error) 97 | UpdateClusterLeader(context.Context, *Node) (*Response, error) 98 | LeaveCluster(context.Context, *Node) (*Response, error) 99 | GetClusterLeader(context.Context, *Cluster) (*Node, error) 100 | InitiateDataRelocation(context.Context, *Cluster) (*Response, error) 101 | } 102 | 103 | // UnimplementedRavelClusterAdminServer should be embedded to have forward compatible implementations. 104 | type UnimplementedRavelClusterAdminServer struct { 105 | } 106 | 107 | func (UnimplementedRavelClusterAdminServer) JoinExistingCluster(context.Context, *Node) (*Cluster, error) { 108 | return nil, status.Errorf(codes.Unimplemented, "method JoinExistingCluster not implemented") 109 | } 110 | func (UnimplementedRavelClusterAdminServer) JoinAsClusterLeader(context.Context, *Node) (*Cluster, error) { 111 | return nil, status.Errorf(codes.Unimplemented, "method JoinAsClusterLeader not implemented") 112 | } 113 | func (UnimplementedRavelClusterAdminServer) UpdateClusterLeader(context.Context, *Node) (*Response, error) { 114 | return nil, status.Errorf(codes.Unimplemented, "method UpdateClusterLeader not implemented") 115 | } 116 | func (UnimplementedRavelClusterAdminServer) LeaveCluster(context.Context, *Node) (*Response, error) { 117 | return nil, status.Errorf(codes.Unimplemented, "method LeaveCluster not implemented") 118 | } 119 | func (UnimplementedRavelClusterAdminServer) GetClusterLeader(context.Context, *Cluster) (*Node, error) { 120 | return nil, status.Errorf(codes.Unimplemented, "method GetClusterLeader not implemented") 121 | } 122 | func (UnimplementedRavelClusterAdminServer) InitiateDataRelocation(context.Context, *Cluster) (*Response, error) { 123 | return nil, status.Errorf(codes.Unimplemented, "method InitiateDataRelocation not implemented") 124 | } 125 | 126 | // UnsafeRavelClusterAdminServer may be embedded to opt out of forward compatibility for this service. 127 | // Use of this interface is not recommended, as added methods to RavelClusterAdminServer will 128 | // result in compilation errors. 129 | type UnsafeRavelClusterAdminServer interface { 130 | mustEmbedUnimplementedRavelClusterAdminServer() 131 | } 132 | 133 | func RegisterRavelClusterAdminServer(s grpc.ServiceRegistrar, srv RavelClusterAdminServer) { 134 | s.RegisterService(&RavelClusterAdmin_ServiceDesc, srv) 135 | } 136 | 137 | func _RavelClusterAdmin_JoinExistingCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 138 | in := new(Node) 139 | if err := dec(in); err != nil { 140 | return nil, err 141 | } 142 | if interceptor == nil { 143 | return srv.(RavelClusterAdminServer).JoinExistingCluster(ctx, in) 144 | } 145 | info := &grpc.UnaryServerInfo{ 146 | Server: srv, 147 | FullMethod: "/RavelClusterAdminPB.RavelClusterAdmin/JoinExistingCluster", 148 | } 149 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 150 | return srv.(RavelClusterAdminServer).JoinExistingCluster(ctx, req.(*Node)) 151 | } 152 | return interceptor(ctx, in, info, handler) 153 | } 154 | 155 | func _RavelClusterAdmin_JoinAsClusterLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 156 | in := new(Node) 157 | if err := dec(in); err != nil { 158 | return nil, err 159 | } 160 | if interceptor == nil { 161 | return srv.(RavelClusterAdminServer).JoinAsClusterLeader(ctx, in) 162 | } 163 | info := &grpc.UnaryServerInfo{ 164 | Server: srv, 165 | FullMethod: "/RavelClusterAdminPB.RavelClusterAdmin/JoinAsClusterLeader", 166 | } 167 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 168 | return srv.(RavelClusterAdminServer).JoinAsClusterLeader(ctx, req.(*Node)) 169 | } 170 | return interceptor(ctx, in, info, handler) 171 | } 172 | 173 | func _RavelClusterAdmin_UpdateClusterLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 174 | in := new(Node) 175 | if err := dec(in); err != nil { 176 | return nil, err 177 | } 178 | if interceptor == nil { 179 | return srv.(RavelClusterAdminServer).UpdateClusterLeader(ctx, in) 180 | } 181 | info := &grpc.UnaryServerInfo{ 182 | Server: srv, 183 | FullMethod: "/RavelClusterAdminPB.RavelClusterAdmin/UpdateClusterLeader", 184 | } 185 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 186 | return srv.(RavelClusterAdminServer).UpdateClusterLeader(ctx, req.(*Node)) 187 | } 188 | return interceptor(ctx, in, info, handler) 189 | } 190 | 191 | func _RavelClusterAdmin_LeaveCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 192 | in := new(Node) 193 | if err := dec(in); err != nil { 194 | return nil, err 195 | } 196 | if interceptor == nil { 197 | return srv.(RavelClusterAdminServer).LeaveCluster(ctx, in) 198 | } 199 | info := &grpc.UnaryServerInfo{ 200 | Server: srv, 201 | FullMethod: "/RavelClusterAdminPB.RavelClusterAdmin/LeaveCluster", 202 | } 203 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 204 | return srv.(RavelClusterAdminServer).LeaveCluster(ctx, req.(*Node)) 205 | } 206 | return interceptor(ctx, in, info, handler) 207 | } 208 | 209 | func _RavelClusterAdmin_GetClusterLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 210 | in := new(Cluster) 211 | if err := dec(in); err != nil { 212 | return nil, err 213 | } 214 | if interceptor == nil { 215 | return srv.(RavelClusterAdminServer).GetClusterLeader(ctx, in) 216 | } 217 | info := &grpc.UnaryServerInfo{ 218 | Server: srv, 219 | FullMethod: "/RavelClusterAdminPB.RavelClusterAdmin/GetClusterLeader", 220 | } 221 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 222 | return srv.(RavelClusterAdminServer).GetClusterLeader(ctx, req.(*Cluster)) 223 | } 224 | return interceptor(ctx, in, info, handler) 225 | } 226 | 227 | func _RavelClusterAdmin_InitiateDataRelocation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 228 | in := new(Cluster) 229 | if err := dec(in); err != nil { 230 | return nil, err 231 | } 232 | if interceptor == nil { 233 | return srv.(RavelClusterAdminServer).InitiateDataRelocation(ctx, in) 234 | } 235 | info := &grpc.UnaryServerInfo{ 236 | Server: srv, 237 | FullMethod: "/RavelClusterAdminPB.RavelClusterAdmin/InitiateDataRelocation", 238 | } 239 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 240 | return srv.(RavelClusterAdminServer).InitiateDataRelocation(ctx, req.(*Cluster)) 241 | } 242 | return interceptor(ctx, in, info, handler) 243 | } 244 | 245 | // RavelClusterAdmin_ServiceDesc is the grpc.ServiceDesc for RavelClusterAdmin service. 246 | // It's only intended for direct use with grpc.RegisterService, 247 | // and not to be introspected or modified (even as a copy) 248 | var RavelClusterAdmin_ServiceDesc = grpc.ServiceDesc{ 249 | ServiceName: "RavelClusterAdminPB.RavelClusterAdmin", 250 | HandlerType: (*RavelClusterAdminServer)(nil), 251 | Methods: []grpc.MethodDesc{ 252 | { 253 | MethodName: "JoinExistingCluster", 254 | Handler: _RavelClusterAdmin_JoinExistingCluster_Handler, 255 | }, 256 | { 257 | MethodName: "JoinAsClusterLeader", 258 | Handler: _RavelClusterAdmin_JoinAsClusterLeader_Handler, 259 | }, 260 | { 261 | MethodName: "UpdateClusterLeader", 262 | Handler: _RavelClusterAdmin_UpdateClusterLeader_Handler, 263 | }, 264 | { 265 | MethodName: "LeaveCluster", 266 | Handler: _RavelClusterAdmin_LeaveCluster_Handler, 267 | }, 268 | { 269 | MethodName: "GetClusterLeader", 270 | Handler: _RavelClusterAdmin_GetClusterLeader_Handler, 271 | }, 272 | { 273 | MethodName: "InitiateDataRelocation", 274 | Handler: _RavelClusterAdmin_InitiateDataRelocation_Handler, 275 | }, 276 | }, 277 | Streams: []grpc.StreamDesc{}, 278 | Metadata: "cmd/ravel_cluster_admin/cluster_admin.proto", 279 | } 280 | -------------------------------------------------------------------------------- /RavelClusterAdminPB/cluster_admin.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go. DO NOT EDIT. 2 | // versions: 3 | // protoc-gen-go v1.26.0 4 | // protoc v3.7.1 5 | // source: cmd/ravel_cluster_admin/cluster_admin.proto 6 | 7 | package RavelClusterAdminPB 8 | 9 | import ( 10 | protoreflect "google.golang.org/protobuf/reflect/protoreflect" 11 | protoimpl "google.golang.org/protobuf/runtime/protoimpl" 12 | reflect "reflect" 13 | sync "sync" 14 | ) 15 | 16 | const ( 17 | // Verify that this generated code is sufficiently up-to-date. 18 | _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) 19 | // Verify that runtime/protoimpl is sufficiently up-to-date. 20 | _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) 21 | ) 22 | 23 | // Cluster represents the information to represent a cluster leader in Ravel 24 | type Cluster struct { 25 | state protoimpl.MessageState 26 | sizeCache protoimpl.SizeCache 27 | unknownFields protoimpl.UnknownFields 28 | 29 | ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` 30 | LeaderGrpcAddress string `protobuf:"bytes,2,opt,name=leader_grpc_address,json=leaderGrpcAddress,proto3" json:"leader_grpc_address,omitempty"` 31 | LeaderRaftAddress string `protobuf:"bytes,3,opt,name=leader_raft_address,json=leaderRaftAddress,proto3" json:"leader_raft_address,omitempty"` 32 | } 33 | 34 | func (x *Cluster) Reset() { 35 | *x = Cluster{} 36 | if protoimpl.UnsafeEnabled { 37 | mi := &file_cmd_ravel_cluster_admin_cluster_admin_proto_msgTypes[0] 38 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 39 | ms.StoreMessageInfo(mi) 40 | } 41 | } 42 | 43 | func (x *Cluster) String() string { 44 | return protoimpl.X.MessageStringOf(x) 45 | } 46 | 47 | func (*Cluster) ProtoMessage() {} 48 | 49 | func (x *Cluster) ProtoReflect() protoreflect.Message { 50 | mi := &file_cmd_ravel_cluster_admin_cluster_admin_proto_msgTypes[0] 51 | if protoimpl.UnsafeEnabled && x != nil { 52 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 53 | if ms.LoadMessageInfo() == nil { 54 | ms.StoreMessageInfo(mi) 55 | } 56 | return ms 57 | } 58 | return mi.MessageOf(x) 59 | } 60 | 61 | // Deprecated: Use Cluster.ProtoReflect.Descriptor instead. 62 | func (*Cluster) Descriptor() ([]byte, []int) { 63 | return file_cmd_ravel_cluster_admin_cluster_admin_proto_rawDescGZIP(), []int{0} 64 | } 65 | 66 | func (x *Cluster) GetClusterId() string { 67 | if x != nil { 68 | return x.ClusterId 69 | } 70 | return "" 71 | } 72 | 73 | func (x *Cluster) GetLeaderGrpcAddress() string { 74 | if x != nil { 75 | return x.LeaderGrpcAddress 76 | } 77 | return "" 78 | } 79 | 80 | func (x *Cluster) GetLeaderRaftAddress() string { 81 | if x != nil { 82 | return x.LeaderRaftAddress 83 | } 84 | return "" 85 | } 86 | 87 | type Response struct { 88 | state protoimpl.MessageState 89 | sizeCache protoimpl.SizeCache 90 | unknownFields protoimpl.UnknownFields 91 | 92 | Data string `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` 93 | } 94 | 95 | func (x *Response) Reset() { 96 | *x = Response{} 97 | if protoimpl.UnsafeEnabled { 98 | mi := &file_cmd_ravel_cluster_admin_cluster_admin_proto_msgTypes[1] 99 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 100 | ms.StoreMessageInfo(mi) 101 | } 102 | } 103 | 104 | func (x *Response) String() string { 105 | return protoimpl.X.MessageStringOf(x) 106 | } 107 | 108 | func (*Response) ProtoMessage() {} 109 | 110 | func (x *Response) ProtoReflect() protoreflect.Message { 111 | mi := &file_cmd_ravel_cluster_admin_cluster_admin_proto_msgTypes[1] 112 | if protoimpl.UnsafeEnabled && x != nil { 113 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 114 | if ms.LoadMessageInfo() == nil { 115 | ms.StoreMessageInfo(mi) 116 | } 117 | return ms 118 | } 119 | return mi.MessageOf(x) 120 | } 121 | 122 | // Deprecated: Use Response.ProtoReflect.Descriptor instead. 123 | func (*Response) Descriptor() ([]byte, []int) { 124 | return file_cmd_ravel_cluster_admin_cluster_admin_proto_rawDescGZIP(), []int{1} 125 | } 126 | 127 | func (x *Response) GetData() string { 128 | if x != nil { 129 | return x.Data 130 | } 131 | return "" 132 | } 133 | 134 | // Node represents the information to represent a node in Ravel 135 | type Node struct { 136 | state protoimpl.MessageState 137 | sizeCache protoimpl.SizeCache 138 | unknownFields protoimpl.UnknownFields 139 | 140 | NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` 141 | GrpcAddress string `protobuf:"bytes,2,opt,name=grpc_address,json=grpcAddress,proto3" json:"grpc_address,omitempty"` 142 | RaftAddress string `protobuf:"bytes,3,opt,name=raft_address,json=raftAddress,proto3" json:"raft_address,omitempty"` 143 | ClusterId string `protobuf:"bytes,4,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` 144 | } 145 | 146 | func (x *Node) Reset() { 147 | *x = Node{} 148 | if protoimpl.UnsafeEnabled { 149 | mi := &file_cmd_ravel_cluster_admin_cluster_admin_proto_msgTypes[2] 150 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 151 | ms.StoreMessageInfo(mi) 152 | } 153 | } 154 | 155 | func (x *Node) String() string { 156 | return protoimpl.X.MessageStringOf(x) 157 | } 158 | 159 | func (*Node) ProtoMessage() {} 160 | 161 | func (x *Node) ProtoReflect() protoreflect.Message { 162 | mi := &file_cmd_ravel_cluster_admin_cluster_admin_proto_msgTypes[2] 163 | if protoimpl.UnsafeEnabled && x != nil { 164 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 165 | if ms.LoadMessageInfo() == nil { 166 | ms.StoreMessageInfo(mi) 167 | } 168 | return ms 169 | } 170 | return mi.MessageOf(x) 171 | } 172 | 173 | // Deprecated: Use Node.ProtoReflect.Descriptor instead. 174 | func (*Node) Descriptor() ([]byte, []int) { 175 | return file_cmd_ravel_cluster_admin_cluster_admin_proto_rawDescGZIP(), []int{2} 176 | } 177 | 178 | func (x *Node) GetNodeId() string { 179 | if x != nil { 180 | return x.NodeId 181 | } 182 | return "" 183 | } 184 | 185 | func (x *Node) GetGrpcAddress() string { 186 | if x != nil { 187 | return x.GrpcAddress 188 | } 189 | return "" 190 | } 191 | 192 | func (x *Node) GetRaftAddress() string { 193 | if x != nil { 194 | return x.RaftAddress 195 | } 196 | return "" 197 | } 198 | 199 | func (x *Node) GetClusterId() string { 200 | if x != nil { 201 | return x.ClusterId 202 | } 203 | return "" 204 | } 205 | 206 | var File_cmd_ravel_cluster_admin_cluster_admin_proto protoreflect.FileDescriptor 207 | 208 | var file_cmd_ravel_cluster_admin_cluster_admin_proto_rawDesc = []byte{ 209 | 0x0a, 0x2b, 0x63, 0x6d, 0x64, 0x2f, 0x72, 0x61, 0x76, 0x65, 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, 210 | 0x74, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 211 | 0x72, 0x5f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x52, 212 | 0x61, 0x76, 0x65, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x64, 0x6d, 0x69, 0x6e, 213 | 0x50, 0x42, 0x22, 0x88, 0x01, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1d, 214 | 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 215 | 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x2e, 0x0a, 216 | 0x13, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x61, 0x64, 0x64, 217 | 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6c, 0x65, 0x61, 0x64, 218 | 0x65, 0x72, 0x47, 0x72, 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2e, 0x0a, 219 | 0x13, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x61, 0x64, 0x64, 220 | 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6c, 0x65, 0x61, 0x64, 221 | 0x65, 0x72, 0x52, 0x61, 0x66, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x1e, 0x0a, 222 | 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 223 | 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x84, 0x01, 224 | 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 225 | 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 226 | 0x21, 0x0a, 0x0c, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 227 | 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x72, 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, 228 | 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 229 | 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x61, 0x66, 0x74, 0x41, 0x64, 230 | 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 231 | 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 232 | 0x65, 0x72, 0x49, 0x64, 0x32, 0xf2, 0x03, 0x0a, 0x11, 0x52, 0x61, 0x76, 0x65, 0x6c, 0x43, 0x6c, 233 | 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x4e, 0x0a, 0x13, 0x4a, 0x6f, 234 | 0x69, 0x6e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 235 | 0x72, 0x12, 0x19, 0x2e, 0x52, 0x61, 0x76, 0x65, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 236 | 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x42, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x1a, 0x1c, 0x2e, 0x52, 237 | 0x61, 0x76, 0x65, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x64, 0x6d, 0x69, 0x6e, 238 | 0x50, 0x42, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x13, 0x4a, 0x6f, 239 | 0x69, 0x6e, 0x41, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x65, 0x61, 0x64, 0x65, 240 | 0x72, 0x12, 0x19, 0x2e, 0x52, 0x61, 0x76, 0x65, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 241 | 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x42, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x1a, 0x1c, 0x2e, 0x52, 242 | 0x61, 0x76, 0x65, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x64, 0x6d, 0x69, 0x6e, 243 | 0x50, 0x42, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x4f, 0x0a, 0x13, 0x55, 0x70, 244 | 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x65, 0x61, 0x64, 0x65, 245 | 0x72, 0x12, 0x19, 0x2e, 0x52, 0x61, 0x76, 0x65, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 246 | 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x42, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x1a, 0x1d, 0x2e, 0x52, 247 | 0x61, 0x76, 0x65, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x64, 0x6d, 0x69, 0x6e, 248 | 0x50, 0x42, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0c, 0x4c, 249 | 0x65, 0x61, 0x76, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x19, 0x2e, 0x52, 0x61, 250 | 0x76, 0x65, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, 251 | 0x42, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x1a, 0x1d, 0x2e, 0x52, 0x61, 0x76, 0x65, 0x6c, 0x43, 0x6c, 252 | 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x42, 0x2e, 0x52, 0x65, 0x73, 253 | 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 254 | 0x74, 0x65, 0x72, 0x4c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x1c, 0x2e, 0x52, 0x61, 0x76, 0x65, 255 | 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x42, 0x2e, 256 | 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x1a, 0x19, 0x2e, 0x52, 0x61, 0x76, 0x65, 0x6c, 0x43, 257 | 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x42, 0x2e, 0x4e, 0x6f, 258 | 0x64, 0x65, 0x12, 0x55, 0x0a, 0x16, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x44, 0x61, 259 | 0x74, 0x61, 0x52, 0x65, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x52, 260 | 0x61, 0x76, 0x65, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x64, 0x6d, 0x69, 0x6e, 261 | 0x50, 0x42, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x1a, 0x1d, 0x2e, 0x52, 0x61, 0x76, 262 | 0x65, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x42, 263 | 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x17, 0x5a, 0x15, 0x2e, 0x2f, 0x52, 264 | 0x61, 0x76, 0x65, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x64, 0x6d, 0x69, 0x6e, 265 | 0x50, 0x42, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 266 | } 267 | 268 | var ( 269 | file_cmd_ravel_cluster_admin_cluster_admin_proto_rawDescOnce sync.Once 270 | file_cmd_ravel_cluster_admin_cluster_admin_proto_rawDescData = file_cmd_ravel_cluster_admin_cluster_admin_proto_rawDesc 271 | ) 272 | 273 | func file_cmd_ravel_cluster_admin_cluster_admin_proto_rawDescGZIP() []byte { 274 | file_cmd_ravel_cluster_admin_cluster_admin_proto_rawDescOnce.Do(func() { 275 | file_cmd_ravel_cluster_admin_cluster_admin_proto_rawDescData = protoimpl.X.CompressGZIP(file_cmd_ravel_cluster_admin_cluster_admin_proto_rawDescData) 276 | }) 277 | return file_cmd_ravel_cluster_admin_cluster_admin_proto_rawDescData 278 | } 279 | 280 | var file_cmd_ravel_cluster_admin_cluster_admin_proto_msgTypes = make([]protoimpl.MessageInfo, 3) 281 | var file_cmd_ravel_cluster_admin_cluster_admin_proto_goTypes = []interface{}{ 282 | (*Cluster)(nil), // 0: RavelClusterAdminPB.Cluster 283 | (*Response)(nil), // 1: RavelClusterAdminPB.Response 284 | (*Node)(nil), // 2: RavelClusterAdminPB.Node 285 | } 286 | var file_cmd_ravel_cluster_admin_cluster_admin_proto_depIdxs = []int32{ 287 | 2, // 0: RavelClusterAdminPB.RavelClusterAdmin.JoinExistingCluster:input_type -> RavelClusterAdminPB.Node 288 | 2, // 1: RavelClusterAdminPB.RavelClusterAdmin.JoinAsClusterLeader:input_type -> RavelClusterAdminPB.Node 289 | 2, // 2: RavelClusterAdminPB.RavelClusterAdmin.UpdateClusterLeader:input_type -> RavelClusterAdminPB.Node 290 | 2, // 3: RavelClusterAdminPB.RavelClusterAdmin.LeaveCluster:input_type -> RavelClusterAdminPB.Node 291 | 0, // 4: RavelClusterAdminPB.RavelClusterAdmin.GetClusterLeader:input_type -> RavelClusterAdminPB.Cluster 292 | 0, // 5: RavelClusterAdminPB.RavelClusterAdmin.InitiateDataRelocation:input_type -> RavelClusterAdminPB.Cluster 293 | 0, // 6: RavelClusterAdminPB.RavelClusterAdmin.JoinExistingCluster:output_type -> RavelClusterAdminPB.Cluster 294 | 0, // 7: RavelClusterAdminPB.RavelClusterAdmin.JoinAsClusterLeader:output_type -> RavelClusterAdminPB.Cluster 295 | 1, // 8: RavelClusterAdminPB.RavelClusterAdmin.UpdateClusterLeader:output_type -> RavelClusterAdminPB.Response 296 | 1, // 9: RavelClusterAdminPB.RavelClusterAdmin.LeaveCluster:output_type -> RavelClusterAdminPB.Response 297 | 2, // 10: RavelClusterAdminPB.RavelClusterAdmin.GetClusterLeader:output_type -> RavelClusterAdminPB.Node 298 | 1, // 11: RavelClusterAdminPB.RavelClusterAdmin.InitiateDataRelocation:output_type -> RavelClusterAdminPB.Response 299 | 6, // [6:12] is the sub-list for method output_type 300 | 0, // [0:6] is the sub-list for method input_type 301 | 0, // [0:0] is the sub-list for extension type_name 302 | 0, // [0:0] is the sub-list for extension extendee 303 | 0, // [0:0] is the sub-list for field type_name 304 | } 305 | 306 | func init() { file_cmd_ravel_cluster_admin_cluster_admin_proto_init() } 307 | func file_cmd_ravel_cluster_admin_cluster_admin_proto_init() { 308 | if File_cmd_ravel_cluster_admin_cluster_admin_proto != nil { 309 | return 310 | } 311 | if !protoimpl.UnsafeEnabled { 312 | file_cmd_ravel_cluster_admin_cluster_admin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { 313 | switch v := v.(*Cluster); i { 314 | case 0: 315 | return &v.state 316 | case 1: 317 | return &v.sizeCache 318 | case 2: 319 | return &v.unknownFields 320 | default: 321 | return nil 322 | } 323 | } 324 | file_cmd_ravel_cluster_admin_cluster_admin_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { 325 | switch v := v.(*Response); i { 326 | case 0: 327 | return &v.state 328 | case 1: 329 | return &v.sizeCache 330 | case 2: 331 | return &v.unknownFields 332 | default: 333 | return nil 334 | } 335 | } 336 | file_cmd_ravel_cluster_admin_cluster_admin_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { 337 | switch v := v.(*Node); i { 338 | case 0: 339 | return &v.state 340 | case 1: 341 | return &v.sizeCache 342 | case 2: 343 | return &v.unknownFields 344 | default: 345 | return nil 346 | } 347 | } 348 | } 349 | type x struct{} 350 | out := protoimpl.TypeBuilder{ 351 | File: protoimpl.DescBuilder{ 352 | GoPackagePath: reflect.TypeOf(x{}).PkgPath(), 353 | RawDescriptor: file_cmd_ravel_cluster_admin_cluster_admin_proto_rawDesc, 354 | NumEnums: 0, 355 | NumMessages: 3, 356 | NumExtensions: 0, 357 | NumServices: 1, 358 | }, 359 | GoTypes: file_cmd_ravel_cluster_admin_cluster_admin_proto_goTypes, 360 | DependencyIndexes: file_cmd_ravel_cluster_admin_cluster_admin_proto_depIdxs, 361 | MessageInfos: file_cmd_ravel_cluster_admin_cluster_admin_proto_msgTypes, 362 | }.Build() 363 | File_cmd_ravel_cluster_admin_cluster_admin_proto = out.File 364 | file_cmd_ravel_cluster_admin_cluster_admin_proto_rawDesc = nil 365 | file_cmd_ravel_cluster_admin_cluster_admin_proto_goTypes = nil 366 | file_cmd_ravel_cluster_admin_cluster_admin_proto_depIdxs = nil 367 | } 368 | -------------------------------------------------------------------------------- /RavelNodePB/ravel_node.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go. DO NOT EDIT. 2 | // versions: 3 | // protoc-gen-go v1.26.0-devel 4 | // protoc v3.17.0 5 | // source: ravel/cmd/ravel_node/ravel_node.proto 6 | 7 | package RavelNodePB 8 | 9 | import ( 10 | context "context" 11 | grpc "google.golang.org/grpc" 12 | codes "google.golang.org/grpc/codes" 13 | status "google.golang.org/grpc/status" 14 | protoreflect "google.golang.org/protobuf/reflect/protoreflect" 15 | protoimpl "google.golang.org/protobuf/runtime/protoimpl" 16 | reflect "reflect" 17 | sync "sync" 18 | ) 19 | 20 | const ( 21 | // Verify that this generated code is sufficiently up-to-date. 22 | _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) 23 | // Verify that runtime/protoimpl is sufficiently up-to-date. 24 | _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) 25 | ) 26 | 27 | type Node struct { 28 | state protoimpl.MessageState 29 | sizeCache protoimpl.SizeCache 30 | unknownFields protoimpl.UnknownFields 31 | 32 | NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` 33 | GrpcAddress string `protobuf:"bytes,2,opt,name=grpc_address,json=grpcAddress,proto3" json:"grpc_address,omitempty"` 34 | RaftAddress string `protobuf:"bytes,3,opt,name=raft_address,json=raftAddress,proto3" json:"raft_address,omitempty"` 35 | ClusterId string `protobuf:"bytes,4,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` 36 | } 37 | 38 | func (x *Node) Reset() { 39 | *x = Node{} 40 | if protoimpl.UnsafeEnabled { 41 | mi := &file_ravel_cmd_ravel_node_ravel_node_proto_msgTypes[0] 42 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 43 | ms.StoreMessageInfo(mi) 44 | } 45 | } 46 | 47 | func (x *Node) String() string { 48 | return protoimpl.X.MessageStringOf(x) 49 | } 50 | 51 | func (*Node) ProtoMessage() {} 52 | 53 | func (x *Node) ProtoReflect() protoreflect.Message { 54 | mi := &file_ravel_cmd_ravel_node_ravel_node_proto_msgTypes[0] 55 | if protoimpl.UnsafeEnabled && x != nil { 56 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 57 | if ms.LoadMessageInfo() == nil { 58 | ms.StoreMessageInfo(mi) 59 | } 60 | return ms 61 | } 62 | return mi.MessageOf(x) 63 | } 64 | 65 | // Deprecated: Use Node.ProtoReflect.Descriptor instead. 66 | func (*Node) Descriptor() ([]byte, []int) { 67 | return file_ravel_cmd_ravel_node_ravel_node_proto_rawDescGZIP(), []int{0} 68 | } 69 | 70 | func (x *Node) GetNodeId() string { 71 | if x != nil { 72 | return x.NodeId 73 | } 74 | return "" 75 | } 76 | 77 | func (x *Node) GetGrpcAddress() string { 78 | if x != nil { 79 | return x.GrpcAddress 80 | } 81 | return "" 82 | } 83 | 84 | func (x *Node) GetRaftAddress() string { 85 | if x != nil { 86 | return x.RaftAddress 87 | } 88 | return "" 89 | } 90 | 91 | func (x *Node) GetClusterId() string { 92 | if x != nil { 93 | return x.ClusterId 94 | } 95 | return "" 96 | } 97 | 98 | type Void struct { 99 | state protoimpl.MessageState 100 | sizeCache protoimpl.SizeCache 101 | unknownFields protoimpl.UnknownFields 102 | } 103 | 104 | func (x *Void) Reset() { 105 | *x = Void{} 106 | if protoimpl.UnsafeEnabled { 107 | mi := &file_ravel_cmd_ravel_node_ravel_node_proto_msgTypes[1] 108 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 109 | ms.StoreMessageInfo(mi) 110 | } 111 | } 112 | 113 | func (x *Void) String() string { 114 | return protoimpl.X.MessageStringOf(x) 115 | } 116 | 117 | func (*Void) ProtoMessage() {} 118 | 119 | func (x *Void) ProtoReflect() protoreflect.Message { 120 | mi := &file_ravel_cmd_ravel_node_ravel_node_proto_msgTypes[1] 121 | if protoimpl.UnsafeEnabled && x != nil { 122 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 123 | if ms.LoadMessageInfo() == nil { 124 | ms.StoreMessageInfo(mi) 125 | } 126 | return ms 127 | } 128 | return mi.MessageOf(x) 129 | } 130 | 131 | // Deprecated: Use Void.ProtoReflect.Descriptor instead. 132 | func (*Void) Descriptor() ([]byte, []int) { 133 | return file_ravel_cmd_ravel_node_ravel_node_proto_rawDescGZIP(), []int{1} 134 | } 135 | 136 | type Boolean struct { 137 | state protoimpl.MessageState 138 | sizeCache protoimpl.SizeCache 139 | unknownFields protoimpl.UnknownFields 140 | 141 | Leader bool `protobuf:"varint,1,opt,name=leader,proto3" json:"leader,omitempty"` 142 | } 143 | 144 | func (x *Boolean) Reset() { 145 | *x = Boolean{} 146 | if protoimpl.UnsafeEnabled { 147 | mi := &file_ravel_cmd_ravel_node_ravel_node_proto_msgTypes[2] 148 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 149 | ms.StoreMessageInfo(mi) 150 | } 151 | } 152 | 153 | func (x *Boolean) String() string { 154 | return protoimpl.X.MessageStringOf(x) 155 | } 156 | 157 | func (*Boolean) ProtoMessage() {} 158 | 159 | func (x *Boolean) ProtoReflect() protoreflect.Message { 160 | mi := &file_ravel_cmd_ravel_node_ravel_node_proto_msgTypes[2] 161 | if protoimpl.UnsafeEnabled && x != nil { 162 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 163 | if ms.LoadMessageInfo() == nil { 164 | ms.StoreMessageInfo(mi) 165 | } 166 | return ms 167 | } 168 | return mi.MessageOf(x) 169 | } 170 | 171 | // Deprecated: Use Boolean.ProtoReflect.Descriptor instead. 172 | func (*Boolean) Descriptor() ([]byte, []int) { 173 | return file_ravel_cmd_ravel_node_ravel_node_proto_rawDescGZIP(), []int{2} 174 | } 175 | 176 | func (x *Boolean) GetLeader() bool { 177 | if x != nil { 178 | return x.Leader 179 | } 180 | return false 181 | } 182 | 183 | type Response struct { 184 | state protoimpl.MessageState 185 | sizeCache protoimpl.SizeCache 186 | unknownFields protoimpl.UnknownFields 187 | 188 | Msg string `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` 189 | Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` 190 | } 191 | 192 | func (x *Response) Reset() { 193 | *x = Response{} 194 | if protoimpl.UnsafeEnabled { 195 | mi := &file_ravel_cmd_ravel_node_ravel_node_proto_msgTypes[3] 196 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 197 | ms.StoreMessageInfo(mi) 198 | } 199 | } 200 | 201 | func (x *Response) String() string { 202 | return protoimpl.X.MessageStringOf(x) 203 | } 204 | 205 | func (*Response) ProtoMessage() {} 206 | 207 | func (x *Response) ProtoReflect() protoreflect.Message { 208 | mi := &file_ravel_cmd_ravel_node_ravel_node_proto_msgTypes[3] 209 | if protoimpl.UnsafeEnabled && x != nil { 210 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 211 | if ms.LoadMessageInfo() == nil { 212 | ms.StoreMessageInfo(mi) 213 | } 214 | return ms 215 | } 216 | return mi.MessageOf(x) 217 | } 218 | 219 | // Deprecated: Use Response.ProtoReflect.Descriptor instead. 220 | func (*Response) Descriptor() ([]byte, []int) { 221 | return file_ravel_cmd_ravel_node_ravel_node_proto_rawDescGZIP(), []int{3} 222 | } 223 | 224 | func (x *Response) GetMsg() string { 225 | if x != nil { 226 | return x.Msg 227 | } 228 | return "" 229 | } 230 | 231 | func (x *Response) GetData() []byte { 232 | if x != nil { 233 | return x.Data 234 | } 235 | return nil 236 | } 237 | 238 | type Command struct { 239 | state protoimpl.MessageState 240 | sizeCache protoimpl.SizeCache 241 | unknownFields protoimpl.UnknownFields 242 | 243 | Operation string `protobuf:"bytes,1,opt,name=operation,proto3" json:"operation,omitempty"` 244 | Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` 245 | Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` 246 | } 247 | 248 | func (x *Command) Reset() { 249 | *x = Command{} 250 | if protoimpl.UnsafeEnabled { 251 | mi := &file_ravel_cmd_ravel_node_ravel_node_proto_msgTypes[4] 252 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 253 | ms.StoreMessageInfo(mi) 254 | } 255 | } 256 | 257 | func (x *Command) String() string { 258 | return protoimpl.X.MessageStringOf(x) 259 | } 260 | 261 | func (*Command) ProtoMessage() {} 262 | 263 | func (x *Command) ProtoReflect() protoreflect.Message { 264 | mi := &file_ravel_cmd_ravel_node_ravel_node_proto_msgTypes[4] 265 | if protoimpl.UnsafeEnabled && x != nil { 266 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 267 | if ms.LoadMessageInfo() == nil { 268 | ms.StoreMessageInfo(mi) 269 | } 270 | return ms 271 | } 272 | return mi.MessageOf(x) 273 | } 274 | 275 | // Deprecated: Use Command.ProtoReflect.Descriptor instead. 276 | func (*Command) Descriptor() ([]byte, []int) { 277 | return file_ravel_cmd_ravel_node_ravel_node_proto_rawDescGZIP(), []int{4} 278 | } 279 | 280 | func (x *Command) GetOperation() string { 281 | if x != nil { 282 | return x.Operation 283 | } 284 | return "" 285 | } 286 | 287 | func (x *Command) GetKey() []byte { 288 | if x != nil { 289 | return x.Key 290 | } 291 | return nil 292 | } 293 | 294 | func (x *Command) GetValue() []byte { 295 | if x != nil { 296 | return x.Value 297 | } 298 | return nil 299 | } 300 | 301 | var File_ravel_cmd_ravel_node_ravel_node_proto protoreflect.FileDescriptor 302 | 303 | var file_ravel_cmd_ravel_node_ravel_node_proto_rawDesc = []byte{ 304 | 0x0a, 0x25, 0x72, 0x61, 0x76, 0x65, 0x6c, 0x2f, 0x63, 0x6d, 0x64, 0x2f, 0x72, 0x61, 0x76, 0x65, 305 | 0x6c, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x72, 0x61, 0x76, 0x65, 0x6c, 0x5f, 0x6e, 0x6f, 0x64, 306 | 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x52, 0x61, 0x76, 0x65, 0x6c, 0x43, 0x6c, 307 | 0x75, 0x73, 0x74, 0x65, 0x72, 0x50, 0x42, 0x22, 0x84, 0x01, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 308 | 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 309 | 0x09, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x67, 0x72, 0x70, 310 | 0x63, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 311 | 0x0b, 0x67, 0x72, 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, 312 | 0x72, 0x61, 0x66, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 313 | 0x28, 0x09, 0x52, 0x0b, 0x72, 0x61, 0x66, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 314 | 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 315 | 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x22, 0x06, 316 | 0x0a, 0x04, 0x56, 0x6f, 0x69, 0x64, 0x22, 0x21, 0x0a, 0x07, 0x42, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 317 | 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 318 | 0x08, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x22, 0x30, 0x0a, 0x08, 0x52, 0x65, 0x73, 319 | 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x73, 0x67, 0x18, 0x01, 0x20, 0x01, 320 | 0x28, 0x09, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 321 | 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x4f, 0x0a, 0x07, 0x43, 322 | 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 323 | 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 324 | 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 325 | 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 326 | 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x32, 0xe9, 0x01, 0x0a, 327 | 0x09, 0x52, 0x61, 0x76, 0x65, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x4a, 0x6f, 328 | 0x69, 0x6e, 0x12, 0x14, 0x2e, 0x52, 0x61, 0x76, 0x65, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 329 | 0x72, 0x50, 0x42, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x1a, 0x14, 0x2e, 0x52, 0x61, 0x76, 0x65, 0x6c, 330 | 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x50, 0x42, 0x2e, 0x56, 0x6f, 0x69, 0x64, 0x12, 0x33, 331 | 0x0a, 0x05, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x12, 0x14, 0x2e, 0x52, 0x61, 0x76, 0x65, 0x6c, 0x43, 332 | 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x50, 0x42, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x1a, 0x14, 0x2e, 333 | 0x52, 0x61, 0x76, 0x65, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x50, 0x42, 0x2e, 0x56, 334 | 0x6f, 0x69, 0x64, 0x12, 0x38, 0x0a, 0x03, 0x52, 0x75, 0x6e, 0x12, 0x17, 0x2e, 0x52, 0x61, 0x76, 335 | 0x65, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x50, 0x42, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 336 | 0x61, 0x6e, 0x64, 0x1a, 0x18, 0x2e, 0x52, 0x61, 0x76, 0x65, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 337 | 0x65, 0x72, 0x50, 0x42, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 338 | 0x08, 0x49, 0x73, 0x4c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x14, 0x2e, 0x52, 0x61, 0x76, 0x65, 339 | 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x50, 0x42, 0x2e, 0x56, 0x6f, 0x69, 0x64, 0x1a, 340 | 0x17, 0x2e, 0x52, 0x61, 0x76, 0x65, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x50, 0x42, 341 | 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x42, 0x0f, 0x5a, 0x0d, 0x2e, 0x2f, 0x52, 0x61, 342 | 0x76, 0x65, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x42, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 343 | 0x33, 344 | } 345 | 346 | var ( 347 | file_ravel_cmd_ravel_node_ravel_node_proto_rawDescOnce sync.Once 348 | file_ravel_cmd_ravel_node_ravel_node_proto_rawDescData = file_ravel_cmd_ravel_node_ravel_node_proto_rawDesc 349 | ) 350 | 351 | func file_ravel_cmd_ravel_node_ravel_node_proto_rawDescGZIP() []byte { 352 | file_ravel_cmd_ravel_node_ravel_node_proto_rawDescOnce.Do(func() { 353 | file_ravel_cmd_ravel_node_ravel_node_proto_rawDescData = protoimpl.X.CompressGZIP(file_ravel_cmd_ravel_node_ravel_node_proto_rawDescData) 354 | }) 355 | return file_ravel_cmd_ravel_node_ravel_node_proto_rawDescData 356 | } 357 | 358 | var file_ravel_cmd_ravel_node_ravel_node_proto_msgTypes = make([]protoimpl.MessageInfo, 5) 359 | var file_ravel_cmd_ravel_node_ravel_node_proto_goTypes = []interface{}{ 360 | (*Node)(nil), // 0: RavelClusterPB.Node 361 | (*Void)(nil), // 1: RavelClusterPB.Void 362 | (*Boolean)(nil), // 2: RavelClusterPB.Boolean 363 | (*Response)(nil), // 3: RavelClusterPB.Response 364 | (*Command)(nil), // 4: RavelClusterPB.Command 365 | } 366 | var file_ravel_cmd_ravel_node_ravel_node_proto_depIdxs = []int32{ 367 | 0, // 0: RavelClusterPB.RavelNode.Join:input_type -> RavelClusterPB.Node 368 | 0, // 1: RavelClusterPB.RavelNode.Leave:input_type -> RavelClusterPB.Node 369 | 4, // 2: RavelClusterPB.RavelNode.Run:input_type -> RavelClusterPB.Command 370 | 1, // 3: RavelClusterPB.RavelNode.IsLeader:input_type -> RavelClusterPB.Void 371 | 1, // 4: RavelClusterPB.RavelNode.Join:output_type -> RavelClusterPB.Void 372 | 1, // 5: RavelClusterPB.RavelNode.Leave:output_type -> RavelClusterPB.Void 373 | 3, // 6: RavelClusterPB.RavelNode.Run:output_type -> RavelClusterPB.Response 374 | 2, // 7: RavelClusterPB.RavelNode.IsLeader:output_type -> RavelClusterPB.Boolean 375 | 4, // [4:8] is the sub-list for method output_type 376 | 0, // [0:4] is the sub-list for method input_type 377 | 0, // [0:0] is the sub-list for extension type_name 378 | 0, // [0:0] is the sub-list for extension extendee 379 | 0, // [0:0] is the sub-list for field type_name 380 | } 381 | 382 | func init() { file_ravel_cmd_ravel_node_ravel_node_proto_init() } 383 | func file_ravel_cmd_ravel_node_ravel_node_proto_init() { 384 | if File_ravel_cmd_ravel_node_ravel_node_proto != nil { 385 | return 386 | } 387 | if !protoimpl.UnsafeEnabled { 388 | file_ravel_cmd_ravel_node_ravel_node_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { 389 | switch v := v.(*Node); i { 390 | case 0: 391 | return &v.state 392 | case 1: 393 | return &v.sizeCache 394 | case 2: 395 | return &v.unknownFields 396 | default: 397 | return nil 398 | } 399 | } 400 | file_ravel_cmd_ravel_node_ravel_node_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { 401 | switch v := v.(*Void); i { 402 | case 0: 403 | return &v.state 404 | case 1: 405 | return &v.sizeCache 406 | case 2: 407 | return &v.unknownFields 408 | default: 409 | return nil 410 | } 411 | } 412 | file_ravel_cmd_ravel_node_ravel_node_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { 413 | switch v := v.(*Boolean); i { 414 | case 0: 415 | return &v.state 416 | case 1: 417 | return &v.sizeCache 418 | case 2: 419 | return &v.unknownFields 420 | default: 421 | return nil 422 | } 423 | } 424 | file_ravel_cmd_ravel_node_ravel_node_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { 425 | switch v := v.(*Response); i { 426 | case 0: 427 | return &v.state 428 | case 1: 429 | return &v.sizeCache 430 | case 2: 431 | return &v.unknownFields 432 | default: 433 | return nil 434 | } 435 | } 436 | file_ravel_cmd_ravel_node_ravel_node_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { 437 | switch v := v.(*Command); i { 438 | case 0: 439 | return &v.state 440 | case 1: 441 | return &v.sizeCache 442 | case 2: 443 | return &v.unknownFields 444 | default: 445 | return nil 446 | } 447 | } 448 | } 449 | type x struct{} 450 | out := protoimpl.TypeBuilder{ 451 | File: protoimpl.DescBuilder{ 452 | GoPackagePath: reflect.TypeOf(x{}).PkgPath(), 453 | RawDescriptor: file_ravel_cmd_ravel_node_ravel_node_proto_rawDesc, 454 | NumEnums: 0, 455 | NumMessages: 5, 456 | NumExtensions: 0, 457 | NumServices: 1, 458 | }, 459 | GoTypes: file_ravel_cmd_ravel_node_ravel_node_proto_goTypes, 460 | DependencyIndexes: file_ravel_cmd_ravel_node_ravel_node_proto_depIdxs, 461 | MessageInfos: file_ravel_cmd_ravel_node_ravel_node_proto_msgTypes, 462 | }.Build() 463 | File_ravel_cmd_ravel_node_ravel_node_proto = out.File 464 | file_ravel_cmd_ravel_node_ravel_node_proto_rawDesc = nil 465 | file_ravel_cmd_ravel_node_ravel_node_proto_goTypes = nil 466 | file_ravel_cmd_ravel_node_ravel_node_proto_depIdxs = nil 467 | } 468 | 469 | // Reference imports to suppress errors if they are not otherwise used. 470 | var _ context.Context 471 | var _ grpc.ClientConnInterface 472 | 473 | // This is a compile-time assertion to ensure that this generated file 474 | // is compatible with the grpc package it is being compiled against. 475 | const _ = grpc.SupportPackageIsVersion6 476 | 477 | // RavelNodeClient is the client API for RavelNode service. 478 | // 479 | // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. 480 | type RavelNodeClient interface { 481 | Join(ctx context.Context, in *Node, opts ...grpc.CallOption) (*Void, error) 482 | Leave(ctx context.Context, in *Node, opts ...grpc.CallOption) (*Void, error) 483 | Run(ctx context.Context, in *Command, opts ...grpc.CallOption) (*Response, error) 484 | IsLeader(ctx context.Context, in *Void, opts ...grpc.CallOption) (*Boolean, error) 485 | } 486 | 487 | type ravelNodeClient struct { 488 | cc grpc.ClientConnInterface 489 | } 490 | 491 | func NewRavelNodeClient(cc grpc.ClientConnInterface) RavelNodeClient { 492 | return &ravelNodeClient{cc} 493 | } 494 | 495 | func (c *ravelNodeClient) Join(ctx context.Context, in *Node, opts ...grpc.CallOption) (*Void, error) { 496 | out := new(Void) 497 | err := c.cc.Invoke(ctx, "/RavelClusterPB.RavelNode/Join", in, out, opts...) 498 | if err != nil { 499 | return nil, err 500 | } 501 | return out, nil 502 | } 503 | 504 | func (c *ravelNodeClient) Leave(ctx context.Context, in *Node, opts ...grpc.CallOption) (*Void, error) { 505 | out := new(Void) 506 | err := c.cc.Invoke(ctx, "/RavelClusterPB.RavelNode/Leave", in, out, opts...) 507 | if err != nil { 508 | return nil, err 509 | } 510 | return out, nil 511 | } 512 | 513 | func (c *ravelNodeClient) Run(ctx context.Context, in *Command, opts ...grpc.CallOption) (*Response, error) { 514 | out := new(Response) 515 | err := c.cc.Invoke(ctx, "/RavelClusterPB.RavelNode/Run", in, out, opts...) 516 | if err != nil { 517 | return nil, err 518 | } 519 | return out, nil 520 | } 521 | 522 | func (c *ravelNodeClient) IsLeader(ctx context.Context, in *Void, opts ...grpc.CallOption) (*Boolean, error) { 523 | out := new(Boolean) 524 | err := c.cc.Invoke(ctx, "/RavelClusterPB.RavelNode/IsLeader", in, out, opts...) 525 | if err != nil { 526 | return nil, err 527 | } 528 | return out, nil 529 | } 530 | 531 | // RavelNodeServer is the server API for RavelNode service. 532 | type RavelNodeServer interface { 533 | Join(context.Context, *Node) (*Void, error) 534 | Leave(context.Context, *Node) (*Void, error) 535 | Run(context.Context, *Command) (*Response, error) 536 | IsLeader(context.Context, *Void) (*Boolean, error) 537 | } 538 | 539 | // UnimplementedRavelNodeServer can be embedded to have forward compatible implementations. 540 | type UnimplementedRavelNodeServer struct { 541 | } 542 | 543 | func (*UnimplementedRavelNodeServer) Join(context.Context, *Node) (*Void, error) { 544 | return nil, status.Errorf(codes.Unimplemented, "method Join not implemented") 545 | } 546 | func (*UnimplementedRavelNodeServer) Leave(context.Context, *Node) (*Void, error) { 547 | return nil, status.Errorf(codes.Unimplemented, "method Leave not implemented") 548 | } 549 | func (*UnimplementedRavelNodeServer) Run(context.Context, *Command) (*Response, error) { 550 | return nil, status.Errorf(codes.Unimplemented, "method Run not implemented") 551 | } 552 | func (*UnimplementedRavelNodeServer) IsLeader(context.Context, *Void) (*Boolean, error) { 553 | return nil, status.Errorf(codes.Unimplemented, "method IsLeader not implemented") 554 | } 555 | 556 | func RegisterRavelNodeServer(s *grpc.Server, srv RavelNodeServer) { 557 | s.RegisterService(&_RavelNode_serviceDesc, srv) 558 | } 559 | 560 | func _RavelNode_Join_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 561 | in := new(Node) 562 | if err := dec(in); err != nil { 563 | return nil, err 564 | } 565 | if interceptor == nil { 566 | return srv.(RavelNodeServer).Join(ctx, in) 567 | } 568 | info := &grpc.UnaryServerInfo{ 569 | Server: srv, 570 | FullMethod: "/RavelClusterPB.RavelNode/Join", 571 | } 572 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 573 | return srv.(RavelNodeServer).Join(ctx, req.(*Node)) 574 | } 575 | return interceptor(ctx, in, info, handler) 576 | } 577 | 578 | func _RavelNode_Leave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 579 | in := new(Node) 580 | if err := dec(in); err != nil { 581 | return nil, err 582 | } 583 | if interceptor == nil { 584 | return srv.(RavelNodeServer).Leave(ctx, in) 585 | } 586 | info := &grpc.UnaryServerInfo{ 587 | Server: srv, 588 | FullMethod: "/RavelClusterPB.RavelNode/Leave", 589 | } 590 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 591 | return srv.(RavelNodeServer).Leave(ctx, req.(*Node)) 592 | } 593 | return interceptor(ctx, in, info, handler) 594 | } 595 | 596 | func _RavelNode_Run_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 597 | in := new(Command) 598 | if err := dec(in); err != nil { 599 | return nil, err 600 | } 601 | if interceptor == nil { 602 | return srv.(RavelNodeServer).Run(ctx, in) 603 | } 604 | info := &grpc.UnaryServerInfo{ 605 | Server: srv, 606 | FullMethod: "/RavelClusterPB.RavelNode/Run", 607 | } 608 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 609 | return srv.(RavelNodeServer).Run(ctx, req.(*Command)) 610 | } 611 | return interceptor(ctx, in, info, handler) 612 | } 613 | 614 | func _RavelNode_IsLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 615 | in := new(Void) 616 | if err := dec(in); err != nil { 617 | return nil, err 618 | } 619 | if interceptor == nil { 620 | return srv.(RavelNodeServer).IsLeader(ctx, in) 621 | } 622 | info := &grpc.UnaryServerInfo{ 623 | Server: srv, 624 | FullMethod: "/RavelClusterPB.RavelNode/IsLeader", 625 | } 626 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 627 | return srv.(RavelNodeServer).IsLeader(ctx, req.(*Void)) 628 | } 629 | return interceptor(ctx, in, info, handler) 630 | } 631 | 632 | var _RavelNode_serviceDesc = grpc.ServiceDesc{ 633 | ServiceName: "RavelClusterPB.RavelNode", 634 | HandlerType: (*RavelNodeServer)(nil), 635 | Methods: []grpc.MethodDesc{ 636 | { 637 | MethodName: "Join", 638 | Handler: _RavelNode_Join_Handler, 639 | }, 640 | { 641 | MethodName: "Leave", 642 | Handler: _RavelNode_Leave_Handler, 643 | }, 644 | { 645 | MethodName: "Run", 646 | Handler: _RavelNode_Run_Handler, 647 | }, 648 | { 649 | MethodName: "IsLeader", 650 | Handler: _RavelNode_IsLeader_Handler, 651 | }, 652 | }, 653 | Streams: []grpc.StreamDesc{}, 654 | Metadata: "ravel/cmd/ravel_node/ravel_node.proto", 655 | } 656 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= 2 | github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= 3 | github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= 4 | github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= 5 | github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= 6 | github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= 7 | github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= 8 | github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= 9 | github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= 10 | github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= 11 | github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= 12 | github.com/buraksezer/consistent v0.9.0 h1:Zfs6bX62wbP3QlbPGKUhqDw7SmNkOzY5bHZIYXYpR5g= 13 | github.com/buraksezer/consistent v0.9.0/go.mod h1:6BrVajWq7wbKZlTOUPs/XVfR8c0maujuPowduSpZqmw= 14 | github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= 15 | github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= 16 | github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= 17 | github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= 18 | github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= 19 | github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= 20 | github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= 21 | github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= 22 | github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= 23 | github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= 24 | github.com/cosiner/argv v0.1.0/go.mod h1:EusR6TucWKX+zFgtdUsKT2Cvg45K5rtpCcWz4hK06d8= 25 | github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= 26 | github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= 27 | github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= 28 | github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= 29 | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= 30 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 31 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 32 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 33 | github.com/dgraph-io/badger/v3 v3.2011.1 h1:Hmyof0WMEF/QtutX5SQHzIMnJQxb/IrSzhjckV2SD6g= 34 | github.com/dgraph-io/badger/v3 v3.2011.1/go.mod h1:0rLLrQpKVQAL0or/lBLMQznhr6dWWX7h5AKnmnqx268= 35 | github.com/dgraph-io/ristretto v0.0.4-0.20210122082011-bb5d392ed82d h1:eQYOG6A4td1tht0NdJB9Ls6DsXRGb2Ft6X9REU/MbbE= 36 | github.com/dgraph-io/ristretto v0.0.4-0.20210122082011-bb5d392ed82d/go.mod h1:tv2ec8nA7vRpSYX7/MbP52ihrUMXIHit54CQMq8npXQ= 37 | github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= 38 | github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= 39 | github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= 40 | github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= 41 | github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= 42 | github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= 43 | github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= 44 | github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= 45 | github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= 46 | github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= 47 | github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= 48 | github.com/gin-gonic/gin v1.7.2 h1:Tg03T9yM2xa8j6I3Z3oqLaQRSmKvxPd6g/2HJ6zICFA= 49 | github.com/gin-gonic/gin v1.7.2/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= 50 | github.com/go-delve/delve v1.5.0/go.mod h1:c6b3a1Gry6x8a4LGCe/CWzrocrfaHvkUxCj3k4bvSUQ= 51 | github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= 52 | github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= 53 | github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= 54 | github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= 55 | github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= 56 | github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= 57 | github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= 58 | github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= 59 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= 60 | github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= 61 | github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= 62 | github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= 63 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 64 | github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= 65 | github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 66 | github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 67 | github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= 68 | github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= 69 | github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= 70 | github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= 71 | github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= 72 | github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= 73 | github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= 74 | github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= 75 | github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4= 76 | github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= 77 | github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= 78 | github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 79 | github.com/google/flatbuffers v1.12.0 h1:/PtAHvnBY4Kqnx/xCQ3OIV9uYcSFGScBsWI3Oogeh6w= 80 | github.com/google/flatbuffers v1.12.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= 81 | github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= 82 | github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 83 | github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 84 | github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 85 | github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 86 | github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= 87 | github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 88 | github.com/google/go-dap v0.2.0/go.mod h1:5q8aYQFnHOAZEMP+6vmq25HKYAEwE+LF5yh7JKrrhSQ= 89 | github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= 90 | github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 91 | github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= 92 | github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 93 | github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= 94 | github.com/hashicorp/go-hclog v0.9.1 h1:9PZfAcVEvez4yhLH2TBU64/h/z4xlFI80cWXRrxuKuM= 95 | github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= 96 | github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= 97 | github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= 98 | github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= 99 | github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= 100 | github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= 101 | github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= 102 | github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= 103 | github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= 104 | github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= 105 | github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= 106 | github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= 107 | github.com/hashicorp/raft v1.3.1 h1:zDT8ke8y2aP4wf9zPTB2uSIeavJ3Hx/ceY4jxI2JxuY= 108 | github.com/hashicorp/raft v1.3.1/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= 109 | github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= 110 | github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= 111 | github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= 112 | github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= 113 | github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= 114 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 115 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 116 | github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= 117 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 118 | github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= 119 | github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= 120 | github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= 121 | github.com/mattn/go-colorable v0.0.0-20170327083344-ded68f7a9561/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= 122 | github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= 123 | github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= 124 | github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= 125 | github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= 126 | github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= 127 | github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= 128 | github.com/mmcloughlin/avo v0.0.0-20201105074841-5d2f697d268f/go.mod h1:6aKT4zZIrpGqB3RpFU14ByCSSyKY6LfJz4J/JJChHfI= 129 | github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= 130 | github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 131 | github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= 132 | github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= 133 | github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= 134 | github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= 135 | github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= 136 | github.com/peterh/liner v0.0.0-20170317030525-88609521dc4b/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= 137 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 138 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 139 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 140 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 141 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 142 | github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= 143 | github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= 144 | github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= 145 | github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= 146 | github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= 147 | github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= 148 | github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= 149 | github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= 150 | github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= 151 | github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= 152 | github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= 153 | github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= 154 | github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= 155 | github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= 156 | github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= 157 | github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= 158 | github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= 159 | github.com/spf13/cobra v0.0.0-20170417170307-b6cb39589372/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= 160 | github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= 161 | github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= 162 | github.com/spf13/pflag v0.0.0-20170417173400-9e4c21054fa1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= 163 | github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= 164 | github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= 165 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 166 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 167 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 168 | github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= 169 | github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= 170 | github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= 171 | github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= 172 | github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 173 | github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= 174 | github.com/twitchyliquid64/golang-asm v0.15.0/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= 175 | github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= 176 | github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= 177 | github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= 178 | github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= 179 | github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= 180 | github.com/urfave/cli v1.22.5 h1:lNq9sAHXK2qfdI8W+GRItjCEkI+2oR4d+MEHy1CKXoU= 181 | github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= 182 | github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M= 183 | github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= 184 | github.com/vmihailenco/msgpack/v5 v5.3.2 h1:MsXyN2rqdM8NM0lLiIpTn610e8Zcoj8ZuHxsMOi9qhI= 185 | github.com/vmihailenco/msgpack/v5 v5.3.2/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= 186 | github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= 187 | github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= 188 | github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= 189 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 190 | go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0= 191 | go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= 192 | go.starlark.net v0.0.0-20190702223751-32f345186213/go.mod h1:c1/X6cHgvdXj6pUlmWKMkuqRnW4K8x2vwt6JAaaircg= 193 | golang.org/x/arch v0.0.0-20190927153633-4e8777c89be4/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4= 194 | golang.org/x/arch v0.0.0-20201008161808-52c3e6f60cff/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4= 195 | golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= 196 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 197 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 198 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= 199 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 200 | golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= 201 | golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= 202 | golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= 203 | golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= 204 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 205 | golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 206 | golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 207 | golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 208 | golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 209 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 210 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 211 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 212 | golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= 213 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 214 | golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= 215 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 216 | golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 217 | golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 218 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 219 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 220 | golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 221 | golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 222 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 223 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 224 | golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 225 | golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 226 | golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 227 | golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 228 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= 229 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 230 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 231 | golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= 232 | golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= 233 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 234 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 235 | golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 236 | golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= 237 | golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 238 | golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= 239 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 240 | golang.org/x/tools v0.0.0-20191127201027-ecd32218bd7f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 241 | golang.org/x/tools v0.0.0-20201105001634-bc3cf281b174/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= 242 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 243 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 244 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 245 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= 246 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 247 | google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= 248 | google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= 249 | google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= 250 | google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= 251 | google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= 252 | google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= 253 | google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= 254 | google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= 255 | google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= 256 | google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= 257 | google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= 258 | google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= 259 | google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= 260 | google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= 261 | google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= 262 | google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= 263 | google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= 264 | google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= 265 | google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= 266 | google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 267 | google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 268 | google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 269 | google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= 270 | google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= 271 | google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= 272 | google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= 273 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 274 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= 275 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 276 | gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 277 | gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= 278 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 279 | gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 280 | gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= 281 | gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 282 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 283 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 284 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= 285 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 286 | honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 287 | honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 288 | rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= 289 | --------------------------------------------------------------------------------