├── .github └── workflows │ └── ci.yml ├── .gitignore ├── AUTHORS ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── README_en.md ├── cmd ├── kvcli │ └── kvcli.go ├── kvraft │ └── kvserver.go ├── metacli │ └── metacli.go ├── metasvr │ └── metasvr.go ├── shardcli │ └── shardcli.go └── shardsvr │ └── shardsvr.go ├── common ├── common.go ├── common_test.go └── random_utils.go ├── doc ├── benchmark_result.txt └── eraftbook.jpeg ├── go.mod ├── go.sum ├── kvserver ├── server.go └── state_machine.go ├── logger └── logger.go ├── metaserver ├── client.go ├── meta.go ├── metaserver.go ├── metaserver_test.go └── metastm.go ├── pbs └── raftbasic.proto ├── raftcore ├── consts.go ├── persistent_log.go ├── persistent_log_test.go ├── raft.go ├── raft_client_end.go ├── raft_log.go ├── raft_log_test.go ├── utils.go └── utils_test.go ├── raftpb ├── raftbasic.pb.go └── raftbasic_grpc.pb.go ├── scripts ├── build_dev.sh ├── gen_proto.sh └── run_tests.sh ├── shardkvserver ├── bucket.go ├── client.go └── shard_kvserver.go ├── storage ├── kv.go ├── kv_leveldb.go └── kv_leveldb_test.go └── tests └── integration_test.go /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | schedule: 5 | - cron: '0 10 * * *' 6 | push: 7 | jobs: 8 | test-ubuntu: 9 | runs-on: ubuntu-20.04 10 | steps: 11 | - uses: actions/checkout@v4 12 | - uses: actions/setup-go@v5 13 | with: 14 | go-version: '>=1.19.0' 15 | - run: go version 16 | - name: Run test 17 | run: | 18 | go test -run TestClusterSingleShardRwBench tests/integration_test.go 19 | go test -run TestClusterRwBench tests/integration_test.go 20 | go test storage/* 21 | go test raftcore/* 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | output 2 | .DS_Store 3 | .vscode/ 4 | deps 5 | build/ 6 | build_/ 7 | grpc/ 8 | grpc 9 | rocksdb/ 10 | rocksdb 11 | protobuf/ 12 | spdlog/ 13 | spdlog 14 | logs 15 | logs/ 16 | data 17 | data/ 18 | gflags 19 | gflags/ 20 | vendor/ 21 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | ERaftGroup. 2 | 3 | # Initial version authors: 4 | JieLiu 5 | FengLinZhou 6 | BiYongLiu 7 | 8 | # Partial list of contributors: 9 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | ### How to Contribute 2 | 3 | We'd love to accept your patches and contributions to this project. There are just a few small guidelines you need to 4 | follow. 5 | 6 | ## Create a pull request 7 | 8 | Please 9 | follow [making a pull request](https://docs.github.com/en/get-started/quickstart/contributing-to-projects#making-a-pull-request) 10 | guide. 11 | 12 | Note that the pr to master branch must have a feature_ [Date] [English abbreviation for function] or 13 | bugfix [Date]_ [Functional Abbreviations] Reference Examples 14 | 15 | examples: 16 | 17 | ``` 18 | feature_20230323_initciflow 19 | bugfix_20230323_fixciflow 20 | ``` 21 | 22 | After submitting a PR, at least one person must approve the review before the code can be merged 23 | 24 | ## Code Style 25 | 26 | Strictly comply with the Valley C++style specification, pay attention to specification checking during cr, or install 27 | plug-ins for specification checking in the ide. 28 | 29 | [https://zh-google-styleguide.readthedocs.io/en/latest/google-cpp-styleguide/contents/](https://zh-google-styleguide.readthedocs.io/en/latest/google-cpp-styleguide/contents/) 30 | 31 | Execute the following command under the project directory to check the specifications 32 | 33 | ``` 34 | find src -type f \( -name '*.h' -or -name '*.hpp' -or -name '*.cpp' -or -name '*.c' -or -name '*.cc' \) -print | xargs clang-format -style=file --sort-includes -i -n -Werror 35 | ``` 36 | 37 | Auto Format Code 38 | 39 | ``` 40 | find src -type f \( -name '*.h' -or -name '*.hpp' -or -name '*.cpp' -or -name '*.c' -or -name '*.cc' \) -print | xargs clang-format -style=file --sort-includes -i 41 | ``` 42 | 43 | ## Last 44 | 45 | We are very open and welcome to submit code to harass us~ 46 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.04 2 | 3 | RUN apt-get update 4 | 5 | RUN apt-get install make wget -y 6 | 7 | RUN wget https://go.dev/dl/go1.23.0.linux-arm64.tar.gz -O go.tar.gz \ 8 | && tar -xzvf go.tar.gz -C /usr/local 9 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 ERaftGroup 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # MIT License 2 | 3 | # Copyright (c) 2022 eraft dev group 4 | 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to deal 7 | # in the Software without restriction, including without limitation the rights 8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | # copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | # SOFTWARE. 22 | 23 | IMAGE_VERSION := v0.0.1 24 | 25 | BUILDER_IMAGE := $(or $(BUILDER_IMAGE),eraft/eraftbook:$(IMAGE_VERSION)) 26 | 27 | default: meta_cli shard_server shard_cli meta_server kv_server kv_cli 28 | 29 | image: 30 | docker build -f Dockerfile --network=host -t $(BUILDER_IMAGE) . 31 | 32 | build-dev: 33 | chmod +x scripts/build_dev.sh 34 | docker run -it --rm -v $(realpath .):/eraft eraft/eraftbook:$(IMAGE_VERSION) /eraft/scripts/build_dev.sh 35 | 36 | run-test: 37 | chmod +x scripts/run_tests.sh 38 | docker run --name test-cli-node --network mytestnetwork --ip 172.18.0.5 -it --rm -v $(realpath .):/eraft eraft/eraftbook:$(IMAGE_VERSION) /eraft/scripts/run_tests.sh 39 | 40 | meta_cli: 41 | go build -o output/metacli cmd/metacli/metacli.go 42 | 43 | meta_server: 44 | go build -o output/metaserver cmd/metasvr/metasvr.go 45 | 46 | shard_server: 47 | go build -o output/shardserver cmd/shardsvr/shardsvr.go 48 | 49 | shard_cli: 50 | go build -o output/shardcli cmd/shardcli/shardcli.go 51 | 52 | kv_server: 53 | go build -o output/kvserver cmd/kvraft/kvserver.go 54 | 55 | kv_cli: 56 | go build -o output/kvcli cmd/kvcli/kvcli.go 57 | 58 | clean: 59 | rm -rf output/* 60 | 61 | create-net: 62 | docker network create --subnet=172.18.0.0/16 mytestnetwork 63 | 64 | run-demo: 65 | docker run --name metaserver-node1 --network mytestnetwork --ip 172.18.0.2 --privileged=true -d --rm -v $(realpath .):/eraft eraft/eraftbook:$(IMAGE_VERSION) /eraft/output/metaserver 0 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090 66 | docker run --name metaserver-node2 --network mytestnetwork --ip 172.18.0.3 --privileged=true -d --rm -v $(realpath .):/eraft eraft/eraftbook:$(IMAGE_VERSION) /eraft/output/metaserver 1 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090 67 | docker run --name metaserver-node3 --network mytestnetwork --ip 172.18.0.4 --privileged=true -d --rm -v $(realpath .):/eraft eraft/eraftbook:$(IMAGE_VERSION) /eraft/output/metaserver 2 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090 68 | sleep 1 69 | docker run --name kvserver-node1 --network mytestnetwork --ip 172.18.0.10 --privileged=true -d --rm -v $(realpath .):/eraft eraft/eraftbook:$(IMAGE_VERSION) /eraft/output/shardserver 0 1 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090 172.18.0.10:8088,172.18.0.11:8089,172.18.0.12:8090 70 | docker run --name kvserver-node2 --network mytestnetwork --ip 172.18.0.11 --privileged=true -d --rm -v $(realpath .):/eraft eraft/eraftbook:$(IMAGE_VERSION) /eraft/output/shardserver 1 1 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090 172.18.0.10:8088,172.18.0.11:8089,172.18.0.12:8090 71 | docker run --name kvserver-node3 --network mytestnetwork --ip 172.18.0.12 --privileged=true -d --rm -v $(realpath .):/eraft eraft/eraftbook:$(IMAGE_VERSION) /eraft/output/shardserver 2 1 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090 172.18.0.10:8088,172.18.0.11:8089,172.18.0.12:8090 72 | docker run --name kvserver-node4 --network mytestnetwork --ip 172.18.0.13 --privileged=true -d --rm -v $(realpath .):/eraft eraft/eraftbook:$(IMAGE_VERSION) /eraft/output/shardserver 0 2 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090 172.18.0.13:8088,172.18.0.14:8089,172.18.0.15:8090 73 | docker run --name kvserver-node5 --network mytestnetwork --ip 172.18.0.14 --privileged=true -d --rm -v $(realpath .):/eraft eraft/eraftbook:$(IMAGE_VERSION) /eraft/output/shardserver 1 2 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090 172.18.0.13:8088,172.18.0.14:8089,172.18.0.15:8090 74 | docker run --name kvserver-node6 --network mytestnetwork --ip 172.18.0.15 --privileged=true -d --rm -v $(realpath .):/eraft eraft/eraftbook:$(IMAGE_VERSION) /eraft/output/shardserver 2 2 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090 172.18.0.13:8088,172.18.0.14:8089,172.18.0.15:8090 75 | 76 | stop-demo: 77 | docker stop kvserver-node1 kvserver-node2 kvserver-node3 kvserver-node4 kvserver-node5 kvserver-node6 metaserver-node1 metaserver-node2 metaserver-node3 78 | 79 | 80 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Language](https://img.shields.io/badge/Language-Go-blue.svg)](https://golang.org/) 2 | [![License](https://img.shields.io/badge/license-MIT-green)](https://opensource.org/licenses/MIT) 3 | 4 | 中文 | [English](README_en.md) 5 | 6 | ### 概述 7 | 8 | 书籍名称:《分布式数据服务:事务模型、处理语言、一致性与体系结构》ISBN:978-7-111-73737-7 9 | 10 | 本书详细地介绍了分布式数据服务协议库的 eRaft 原型系统实现原理和代码解剖。 11 | 12 | eraft 项目的是将 mit6.824 lab 大作业工业化成一个分布式存储系统,我们会用全网最简单,直白的语言介绍分布式系统的原理,并带着你设计和实现一个工业化的分布式存储系统。 13 | 14 | ### 最新的文档 15 | 16 | 如果你想查看最新的文档,请访问 [eraft 官网](https://eraft.cn) 17 | 18 | ### 书籍配套视频教程 19 | 20 | [bilibili](https://space.bilibili.com/389476201/channel/collectiondetail?sid=481263&spm_id_from=333.788.0.0) 21 | 22 | ### 书籍信息 23 | 24 | [官方购买链接](https://3.cn/1W-jAWMR) 25 | 26 | ### 为什么需要分布式? 27 | 28 | 首先我们看传统的单节点 C/S 或者 B/S 系统有啥缺点: 29 | 单节点意味着只用一台机器,机器的性能是有上限的,而且性能越好的机器价格越贵,想 IBM 的大型机,价格是很贵的。同时,这台机器如果挂掉或者因为写的代码有 30 | bug 导致进程异常,就无法容错,系统直接不可用。 31 | 32 | 我们分析完单节点系统的缺点后,可以总结一下分布式系统的设计目标 33 | 34 | #### 1.可扩展性(Scalability) 35 | 36 | 我们设计的分布式系统要具有可扩展性,这里的可扩展其实就是我们可以通过使用更多的机器来获取更高的系统总吞吐以及更好的性能,当然也不是机器越多性能越好,针对一些复杂的计算场景,节点越多性能并不一定会更好。 37 | 38 | #### 2.可用性(Availability) 39 | 40 | 分布式系统不会因为系统中的某台机器故障而直接停止服务,某台机器故障后,系统可以快速切换流量到正常的机器上,继续提供服务。 41 | 42 | #### 3.一致性 (Consistency) 43 | 44 | 我们要实现这一点,最重要的一个算法就是复制算法(replication),我们需要一种复制算法来保证挂掉的机器和切上去顶替它的机器数据是一致的,通常在分布式系统领域有专门一致性算法去保证复制的顺利进行。 45 | 46 | ### 一致性算法 47 | 48 | 建议先看 [raft 小论文](https://raft.github.io/raft.pdf) 49 | 50 | 带着下面的问题去看: 51 | 52 | ##### 什么是分布式系统中的脑裂? 53 | 54 | ##### 面对脑裂,我们的解决办法是什么? 55 | 56 | ##### 为什么多数派选举协议可以避免脑裂? 57 | 58 | ##### 为什么 raft 需要使用日志? 59 | 60 | ##### 为什么 raft 协议中只允许一个 leader? 61 | 62 | ##### 怎么保证在一个任期内只有一个 leader 的? 63 | 64 | ##### 集群中的节点怎么知道一个新的 leader 节点被选出了? 65 | 66 | ##### 如果选举失败了,会发生什么? 67 | 68 | ##### 如果两个节点都拿到了同样的票数,怎么选 leader? 69 | 70 | ##### 如果老任期的 leader 不知道集群中新 leader 出现了怎么办? 71 | 72 | ##### 随机的选举超时时间的作用,如何去选取它的值? 73 | 74 | ##### 节点中的日志什么时候会出现不一致?Raft 怎么去保证最终日志会一致的? 75 | 76 | ##### 为什么不选择日志最长的服务器作为 leader? 77 | 78 | ##### 在服务器突然崩溃的时候,会发生什么事情? 79 | 80 | ##### 如果 raft 服务崩溃后重启了,raft 会记住哪些东西? 81 | 82 | ##### 什么是 raft 系统中常见的性能瓶颈? 83 | 84 | ##### 基于 raft 的服务崩溃重启后,是如何恢复的? 85 | 86 | ##### 哪些日志条目 raft 节点不能删除? 87 | 88 | ##### raft 日志是无限制增长的吗?如果不是,那么大规模的日志是怎么存储的? 89 | 90 | 再看 [大论文](https://github.com/ongardie/dissertation) 91 | 92 | ### 数据分片 93 | 94 | 好的,通过 Raft 基本算法,我们可以实现一个高可用的 raft 服务器组。我们已经解决了前面可用性和一致性的问题,但是问题还是存在的。一个 95 | raft 服务器组中只有一个 leader 来接收读写流量,当然你可以用 follower 分担部分读流量提高性能(这里会涉及到事务的一些问题,我们会在后面讨论)。 96 | 但是系统能提供的能力还是有上限的。 97 | 98 | 这时候我们就要思考,将客户端写入过来的请求进行分片处理,就像 map reduce,map 的阶段一下,先把超大的数据集切割成一个个小的去处理。 99 | 100 | eraft 中使用了 hash 分片的方法,我们将数据通过哈希算法映射到一个个桶 (bucket) 里面,然后不同的 raft 组负责一部分桶,一个 101 | raft 组可以负责多少个桶,是可以调整的。 102 | 103 | ### 集群架构 104 | 105 | #### 概念介绍 106 | 107 | ##### bucket 108 | 109 | 它是集群做数据管理的逻辑单元,一个分组的服务可以负责多个 bucket 的数据 110 | 111 | ##### config table 112 | 113 | 集群配置表,它主要维护了集群服务分组与 bucket 的映射关系,客户端访问集群数据之前需要先到这个表查询要访问 bucket 所在的服务分组列表 114 | 115 | #### 服务模块 116 | 117 | ##### metaserver 118 | 119 | 它主要负责集群配置表版本管理,它内部维护了一个集群配置表的版本链,可以记录集群配置的变更。 120 | 121 | ##### shardkvserver 122 | 123 | 它主要负责集群数据存储,一般有三台机器组成一个 raft 组,对外提供高可用的服务。 124 | 125 | ### 在容器里面运行 126 | 127 | 构建镜像 128 | 129 | ``` 130 | make image 131 | ``` 132 | 133 | 编译代码 134 | 135 | ``` 136 | make build-dev 137 | ``` 138 | 139 | 运行 demo 集群 140 | 141 | ``` 142 | make run-demo 143 | ``` 144 | 145 | 运行读写测试 146 | 147 | ``` 148 | make run-test 149 | ``` 150 | 151 | 停止集群 152 | 153 | ``` 154 | make stop-demo 155 | ``` 156 | 157 | ### 项目构建 158 | 159 | 构建依赖 160 | 161 | ``` 162 | go version >= go 1.21 163 | ``` 164 | 165 | 编译 166 | 167 | ``` 168 | git clone https://github.com/eraft-io/eraft.git 169 | cd eraft 170 | make 171 | ``` 172 | 173 | 运行集群基本读写测试 174 | 175 | ``` 176 | go test -run TestBasicClusterRW tests/integration_test.go -v 177 | ``` 178 | 179 | 运行集群读写基准测试 180 | 181 | ``` 182 | go test -run TestClusterRwBench tests/integration_test.go -v 183 | ``` 184 | 185 | 运行单分片集群读写基准测试 186 | 187 | ``` 188 | go test -run TestClusterSingleShardRwBench tests/integration_test.go -v 189 | ``` 190 | -------------------------------------------------------------------------------- /README_en.md: -------------------------------------------------------------------------------- 1 | [![Language](https://img.shields.io/badge/Language-Go-blue.svg)](https://golang.org/) 2 | [![License](https://img.shields.io/badge/license-MIT-green)](https://opensource.org/licenses/MIT) 3 | 4 | [中文](README.md) | English 5 | 6 | ### Overview 7 | 8 | Book title: 【Distributed Data Services: Transaction Models, Processing Language, Consistency and Architecture.】 9 | 10 | ISBN:978-7-111-73737-7 11 | 12 | This book provides a detailed introduction to the implementation principles and code analysis of the eRaft prototype 13 | system in the distributed data services protocol library. 14 | 15 | The eraft t project is to industrialize the mit6.824 lab operation into a distributed storage system. We will introduce 16 | the principles of distributed systems in the simplest and straightforward way, and guide you to design and implement an 17 | industrialized distributed storage system. 18 | 19 | ### Newest document 20 | 21 | If you want to check the latest documents, please visit [eraft official website](https://eraft.cn) 22 | 23 | ### Video tutorials 24 | 25 | [bilibili](https://space.bilibili.com/389476201/channel/collectiondetail?sid=481263&spm_id_from=333.788.0.0) 26 | 27 | ### Ebook for this project 28 | 29 | [Shopping link](https://3.cn/1W-jAWMR) 30 | 31 | ### Why we need build a distributed system? 32 | 33 | First, let's look at the shortcomings of traditional single-node C/S or B/S systems: 34 | 35 | A single node means that only one machine is used, the performance of the machine is limited, and the machine with 36 | better performance is more expensive, like IBM's mainframe, the price is very expensive. At the same time, if the 37 | machine hangs up or the process is abnormal due to a bug in the code written, it cannot tolerate faults, and the system 38 | is directly unavailable. 39 | 40 | After we analyze the shortcomings of single-node systems, we can summarize the design goals of distributed systems 41 | 42 | #### 1. Scalability 43 | 44 | The distributed system we design must be scalable. The scalability here is that we can obtain higher total system 45 | throughput and better performance by using more machines. Of course, it is not that the more machines, the better the 46 | performance. For some complex computing scenarios, more nodes are not necessarily better performance. 47 | 48 | #### 2. Availability 49 | 50 | The distributed system will not stop services directly if a machine in the system fails. After a machine fails, the 51 | system can quickly switch traffic to a normal machine and continue to provide services. 52 | 53 | #### 3. Consistency 54 | 55 | To achieve this, the most important algorithm is the replication algorithm. We need a replication algorithm to ensure 56 | that the data of the dead machine and the machine that is cut to replace it are consistent, usually in the field of 57 | distributed systems. Consistency algorithm to ensure the smooth progress of replication. 58 | 59 | ### Consistency Algorithm 60 | 61 | It is recommended to read [raft small paper](https://raft.github.io/raft.pdf) 62 | 63 | Take a look with the following questions: 64 | 65 | - what is split brain? 66 | 67 | - What is our solution to the split brain? 68 | 69 | - why does majority help avoid split brain? 70 | 71 | - why the logs? 72 | 73 | - why a leader? 74 | 75 | - how to ensure at most one leader in a term? 76 | 77 | - how does a server learn about a newly elected leader? 78 | 79 | - what happens if an election doesn't succeed? 80 | 81 | - how does Raft avoid split votes? 82 | 83 | - how to choose the election timeout? 84 | 85 | - what if old leader isn't aware a new leader is elected? 86 | 87 | - how can logs disagree? 88 | 89 | - what would we like to happen after a server crashes? 90 | 91 | ### data sharding 92 | 93 | Well, through the basic Raft algorithm, we can achieve a highly available raft server group. We have solved the previous 94 | issues of availability and consistency, but the problem still exists. There is only one leader in a raft server group to 95 | receive read and write traffic. Of course, you can use followers to share part of the read traffic to improve 96 | performance (there will be some issues related to transactions, which we will discuss later). But there is a limit to 97 | what the system can provide. 98 | 99 | At this time, we need to think about slicing the requests written by the client, just like map reduce, in the first 100 | stage of map, first cut the huge data set into small ones for processing. 101 | 102 | The hash sharding method is used in eraft. We map data into buckets through a hash algorithm, and then different raft 103 | groups are responsible for some of the buckets. How many buckets a raft group can be responsible for can be adjusted. 104 | 105 | ### System Architecture 106 | 107 | #### Concept introduction 108 | 109 | ##### bucket 110 | 111 | It is the logical unit of data management in the cluster, and a grouped service can be responsible for the data of 112 | multiple buckets 113 | 114 | ##### config table 115 | 116 | Cluster configuration table, which mainly maintains the mapping relationship between cluster service groups and buckets. 117 | Before clients access cluster data, they need to go to this table to query the list of service groups where the bucket 118 | is located. 119 | 120 | #### service module 121 | 122 | ##### metaserver 123 | 124 | It is mainly responsible for the version management of the cluster configuration table. It internally maintains a 125 | version chain of the cluster configuration table, which can record changes to the cluster configuration. 126 | 127 | ##### shardkvserver 128 | 129 | It is mainly responsible for cluster data storage. Generally, three machines form a raft group to provide 130 | high-availability services to the outside world. 131 | 132 | ### Build 133 | 134 | pre-dependencies 135 | 136 | go version >= go 1.21 137 | 138 | download code and make it 139 | 140 | ``` 141 | git clone https://github.com/eraft-io/eraft.git 142 | 143 | cd eraft 144 | make 145 | ``` 146 | 147 | run basic cluster 148 | 149 | ``` 150 | go test -run TestBasicClusterRW tests/integration_test.go -v 151 | ``` 152 | 153 | run basic cluster bench 154 | 155 | ``` 156 | go test -run TestClusterRwBench tests/integration_test.go -v 157 | ``` 158 | -------------------------------------------------------------------------------- /cmd/kvcli/kvcli.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | 25 | package main 26 | 27 | import ( 28 | "context" 29 | "crypto/rand" 30 | "fmt" 31 | "math/big" 32 | "os" 33 | "os/signal" 34 | "strconv" 35 | "time" 36 | 37 | "github.com/eraft-io/eraft/common" 38 | pb "github.com/eraft-io/eraft/raftpb" 39 | 40 | "github.com/eraft-io/eraft/raftcore" 41 | ) 42 | 43 | type KvClient struct { 44 | rpcCli *raftcore.RaftClientEnd 45 | leaderId int64 46 | clientId int64 47 | commandId int64 48 | } 49 | 50 | func (kvCli *KvClient) Close() { 51 | kvCli.rpcCli.CloseAllConn() 52 | } 53 | 54 | func nrand() int64 { 55 | maxi := big.NewInt(int64(1) << 62) 56 | bigx, _ := rand.Int(rand.Reader, maxi) 57 | return bigx.Int64() 58 | } 59 | 60 | func MakeKvClient(targetId int, targetAddr string) *KvClient { 61 | cli := raftcore.MakeRaftClientEnd(targetAddr, uint64(targetId)) 62 | return &KvClient{ 63 | rpcCli: cli, 64 | leaderId: 0, 65 | clientId: nrand(), 66 | commandId: 0, 67 | } 68 | } 69 | 70 | func (kvCli *KvClient) Get(key string) string { 71 | cmdReq := &pb.CommandRequest{ 72 | Key: key, 73 | OpType: pb.OpType_OpGet, 74 | ClientId: kvCli.clientId, 75 | } 76 | resp, err := (*kvCli.rpcCli.GetRaftServiceCli()).DoCommand(context.Background(), cmdReq) 77 | if err != nil { 78 | return "err" 79 | } 80 | return resp.Value 81 | } 82 | 83 | func (kvCli *KvClient) Put(key, value string) string { 84 | cmdReq := &pb.CommandRequest{ 85 | Key: key, 86 | Value: value, 87 | ClientId: kvCli.clientId, 88 | OpType: pb.OpType_OpPut, 89 | } 90 | _, err := (*kvCli.rpcCli.GetRaftServiceCli()).DoCommand(context.Background(), cmdReq) 91 | if err != nil { 92 | return "err" 93 | } 94 | return "ok" 95 | } 96 | 97 | func main() { 98 | if len(os.Args) < 2 { 99 | fmt.Println("usage: kvcli [serveraddr] [count]") 100 | return 101 | } 102 | sigs := make(chan os.Signal, 1) 103 | 104 | kvCli := MakeKvClient(99, os.Args[1]) 105 | 106 | count, err := strconv.Atoi(os.Args[2]) 107 | if err != nil { 108 | panic(err) 109 | } 110 | 111 | sigChan := make(chan os.Signal) 112 | signal.Notify(sigChan) 113 | 114 | go func() { 115 | sig := <-sigs 116 | fmt.Println(sig) 117 | kvCli.rpcCli.CloseAllConn() 118 | os.Exit(-1) 119 | }() 120 | 121 | keys := make([]string, count) 122 | values := make([]string, count) 123 | 124 | for i := 0; i < count; i++ { 125 | rndK := common.RandStringRunes(8) 126 | rndV := common.RandStringRunes(8) 127 | keys[i] = rndK 128 | values[i] = rndV 129 | } 130 | 131 | startTs := time.Now() 132 | for i := 0; i < count; i++ { 133 | kvCli.Put(keys[i], values[i]) 134 | } 135 | elapsed := time.Since(startTs).Seconds() 136 | fmt.Printf("total cost %f s\n", elapsed) 137 | 138 | } 139 | -------------------------------------------------------------------------------- /cmd/kvraft/kvserver.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | 25 | package main 26 | 27 | import ( 28 | "fmt" 29 | "net" 30 | "os" 31 | "os/signal" 32 | "strconv" 33 | "syscall" 34 | 35 | kvsvr "github.com/eraft-io/eraft/kvserver" 36 | pb "github.com/eraft-io/eraft/raftpb" 37 | "google.golang.org/grpc" 38 | "google.golang.org/grpc/reflection" 39 | ) 40 | 41 | func main() { 42 | if len(os.Args) < 2 { 43 | fmt.Println("usage: server [nodeId]") 44 | return 45 | } 46 | sigs := make(chan os.Signal, 1) 47 | 48 | signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) 49 | 50 | nodeIdStr := os.Args[1] 51 | nodeId, err := strconv.Atoi(nodeIdStr) 52 | if err != nil { 53 | panic(err) 54 | } 55 | 56 | kvServer := kvsvr.MakeKvServer(nodeId) 57 | lis, err := net.Listen("tcp", kvsvr.PeersMap[nodeId]) 58 | if err != nil { 59 | fmt.Printf("failed to listen: %v", err) 60 | return 61 | } 62 | fmt.Printf("server listen on: %s \n", kvsvr.PeersMap[nodeId]) 63 | s := grpc.NewServer() 64 | pb.RegisterRaftServiceServer(s, kvServer) 65 | 66 | sigChan := make(chan os.Signal) 67 | 68 | signal.Notify(sigChan) 69 | 70 | go func() { 71 | sig := <-sigs 72 | fmt.Println(sig) 73 | kvServer.Rf.CloseEndsConn() 74 | os.Exit(-1) 75 | }() 76 | 77 | reflection.Register(s) 78 | err = s.Serve(lis) 79 | if err != nil { 80 | fmt.Printf("failed to serve: %v", err) 81 | return 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /cmd/metacli/metacli.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | 25 | package main 26 | 27 | import ( 28 | "encoding/json" 29 | "fmt" 30 | "os" 31 | "os/signal" 32 | "strconv" 33 | "strings" 34 | 35 | "github.com/eraft-io/eraft/common" 36 | "github.com/eraft-io/eraft/metaserver" 37 | "github.com/eraft-io/eraft/raftcore" 38 | ) 39 | 40 | func main() { 41 | if len(os.Args) < 3 { 42 | fmt.Printf("usage: \n " + 43 | "join a group: kvcli [config server addrs] join [gid] [svraddr1,svraddr2,svraddr3]\n " + 44 | "leave a group: kvcli [config server addrs] leave [gid]\n " + 45 | "query newlest config: kvcli [config server addrs] query\n " + 46 | "move a bucket: kvcli [config server addrs] move [start-end] [gid]\n") 47 | return 48 | } 49 | sigs := make(chan os.Signal, 1) 50 | 51 | addrs := strings.Split(os.Args[1], ",") 52 | metaCli := metaserver.MakeMetaSvrClient(common.UnUsedTid, addrs) 53 | 54 | sigChan := make(chan os.Signal, 1) 55 | signal.Notify(sigChan) 56 | 57 | go func() { 58 | sig := <-sigs 59 | fmt.Println(sig) 60 | for _, cli := range metaCli.GetRpcClis() { 61 | cli.CloseAllConn() 62 | } 63 | os.Exit(-1) 64 | }() 65 | 66 | switch os.Args[2] { 67 | case "join": 68 | { 69 | gid, _ := strconv.Atoi(os.Args[3]) 70 | addrMap := make(map[int64]string) 71 | addrMap[int64(gid)] = os.Args[4] 72 | metaCli.Join(addrMap) 73 | } 74 | case "leave": 75 | { 76 | gid, _ := strconv.Atoi(os.Args[3]) 77 | metaCli.Leave([]int64{int64(gid)}) 78 | } 79 | case "query": 80 | { 81 | lastConf := metaCli.Query(-1) 82 | outBytes, _ := json.Marshal(lastConf) 83 | raftcore.PrintDebugLog("latest configuration: " + string(outBytes)) 84 | } 85 | case "move": 86 | { 87 | items := strings.Split(os.Args[3], "-") 88 | if len(items) != 2 { 89 | raftcore.PrintDebugLog("bucket range args error") 90 | return 91 | } 92 | start, _ := strconv.Atoi(items[0]) 93 | end, _ := strconv.Atoi(items[1]) 94 | gid, _ := strconv.Atoi(os.Args[4]) 95 | 96 | for i := start; i <= end; i++ { 97 | metaCli.Move(i, gid) 98 | } 99 | } 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /cmd/metasvr/metasvr.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | 25 | package main 26 | 27 | import ( 28 | "fmt" 29 | "net" 30 | "os" 31 | "os/signal" 32 | "strconv" 33 | "strings" 34 | "syscall" 35 | 36 | "github.com/eraft-io/eraft/metaserver" 37 | pb "github.com/eraft-io/eraft/raftpb" 38 | 39 | "google.golang.org/grpc" 40 | "google.golang.org/grpc/reflection" 41 | ) 42 | 43 | func main() { 44 | 45 | if len(os.Args) < 3 { 46 | fmt.Println("usage: server [nodeId] [configserveraddr1,configserveraddr2,configserveraddr3]") 47 | return 48 | } 49 | 50 | sigs := make(chan os.Signal, 1) 51 | signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) 52 | 53 | nodeIdStr := os.Args[1] 54 | nodeID, err := strconv.Atoi(nodeIdStr) 55 | if err != nil { 56 | panic(err) 57 | } 58 | metaSvrAddrs := strings.Split(os.Args[2], ",") 59 | cfPeerMap := make(map[int]string) 60 | for i, addr := range metaSvrAddrs { 61 | cfPeerMap[i] = addr 62 | } 63 | 64 | metaSvr := metaserver.MakeMetaServer(cfPeerMap, nodeID) 65 | lis, err := net.Listen("tcp", cfPeerMap[nodeID]) 66 | if err != nil { 67 | fmt.Printf("failed to listen: %v", err) 68 | return 69 | } 70 | s := grpc.NewServer() 71 | 72 | pb.RegisterRaftServiceServer(s, metaSvr) 73 | 74 | sigChan := make(chan os.Signal, 1) 75 | 76 | signal.Notify(sigChan) 77 | 78 | go func() { 79 | sig := <-sigs 80 | fmt.Println(sig) 81 | metaSvr.Rf.CloseEndsConn() 82 | metaSvr.StopApply() 83 | os.Exit(-1) 84 | }() 85 | 86 | reflection.Register(s) 87 | err = s.Serve(lis) 88 | if err != nil { 89 | fmt.Printf("failed to serve: %v", err) 90 | return 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /cmd/shardcli/shardcli.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | 25 | package main 26 | 27 | import ( 28 | "encoding/json" 29 | "fmt" 30 | "os" 31 | "os/signal" 32 | "strconv" 33 | "strings" 34 | 35 | "github.com/eraft-io/eraft/shardkvserver" 36 | ) 37 | 38 | func main() { 39 | if len(os.Args) < 3 { 40 | fmt.Printf("usage: \n" + 41 | "kvcli [configserver addr] put [key] [value]\n" + 42 | "kvcli [configserver addr] get [key]\n" + 43 | "kvcli [configserver addr] getbuckets [gid] [id1,id2,...]\n" + 44 | "kvcli [configserver addr] delbuckets [gid] [id1,id2,...]\n" + 45 | "kvcli [configserver addr] insertbucketkv [gid] [bid] [key] [value]\n") 46 | return 47 | } 48 | sigs := make(chan os.Signal, 1) 49 | 50 | shardKvCli := shardkvserver.MakeKvClient(os.Args[1]) 51 | 52 | sigChan := make(chan os.Signal, 1) 53 | signal.Notify(sigChan) 54 | 55 | switch os.Args[2] { 56 | case "put": 57 | if err := shardKvCli.Put(os.Args[3], os.Args[4]); err != nil { 58 | fmt.Println("err: " + err.Error()) 59 | return 60 | } 61 | case "get": 62 | v, err := shardKvCli.Get(os.Args[3]) 63 | if err != nil { 64 | fmt.Println("err: " + err.Error()) 65 | return 66 | } 67 | fmt.Println("got value: " + v) 68 | case "getbuckets": 69 | gid, _ := strconv.Atoi(os.Args[3]) 70 | bidsStr := os.Args[4] 71 | bids := []int64{} 72 | bidsStrArr := strings.Split(bidsStr, ",") 73 | for _, bidStr := range bidsStrArr { 74 | bid, _ := strconv.Atoi(bidStr) 75 | bids = append(bids, int64(bid)) 76 | } 77 | datas := shardKvCli.GetBucketDatas(gid, bids) 78 | fmt.Println("get buckets datas: " + datas) 79 | case "delbuckets": 80 | gid, _ := strconv.Atoi(os.Args[3]) 81 | bidsStr := os.Args[4] 82 | bids := []int64{} 83 | bidsStrArr := strings.Split(bidsStr, ",") 84 | for _, bidStr := range bidsStrArr { 85 | bid, _ := strconv.Atoi(bidStr) 86 | bids = append(bids, int64(bid)) 87 | } 88 | shardKvCli.DeleteBucketDatas(gid, bids) 89 | case "insertbucketkv": 90 | gid, _ := strconv.Atoi(os.Args[3]) 91 | bid, _ := strconv.Atoi(os.Args[4]) 92 | bucketDatas := &shardkvserver.BucketDatasVo{} 93 | bucketDatas.Datas = make(map[int]map[string]string) 94 | kv := map[string]string{os.Args[5]: os.Args[6]} 95 | bucketDatas.Datas[bid] = kv 96 | datas, _ := json.Marshal(bucketDatas) 97 | shardKvCli.InsertBucketDatas(gid, []int64{int64(bid)}, datas) 98 | } 99 | go func() { 100 | sig := <-sigs 101 | fmt.Println(sig) 102 | for _, cli := range shardKvCli.GetCsClient().GetRpcClis() { 103 | cli.CloseAllConn() 104 | } 105 | shardKvCli.GetRpcClient().CloseAllConn() 106 | os.Exit(-1) 107 | }() 108 | } 109 | -------------------------------------------------------------------------------- /cmd/shardsvr/shardsvr.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | 25 | package main 26 | 27 | import ( 28 | "fmt" 29 | "net" 30 | "os" 31 | "os/signal" 32 | "strconv" 33 | "strings" 34 | "syscall" 35 | 36 | pb "github.com/eraft-io/eraft/raftpb" 37 | 38 | "github.com/eraft-io/eraft/shardkvserver" 39 | "google.golang.org/grpc" 40 | ) 41 | 42 | func main() { 43 | if len(os.Args) < 5 { 44 | fmt.Println("usage: server [nodeId] [gId] [csAddr] [server1addr,server2addr,server3addr]") 45 | return 46 | } 47 | 48 | sigs := make(chan os.Signal, 1) 49 | signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) 50 | 51 | nodeIdStr := os.Args[1] 52 | nodeID, err := strconv.Atoi(nodeIdStr) 53 | if err != nil { 54 | panic(err) 55 | } 56 | 57 | gidStr := os.Args[2] 58 | gid, err := strconv.Atoi(gidStr) 59 | if err != nil { 60 | panic(err) 61 | } 62 | 63 | svrAddrs := strings.Split(os.Args[4], ",") 64 | svrPeerMap := make(map[int]string) 65 | for i, addr := range svrAddrs { 66 | svrPeerMap[i] = addr 67 | } 68 | 69 | shardSvr := shardkvserver.MakeShardKVServer(svrPeerMap, int64(nodeID), gid, os.Args[3]) 70 | lis, err := net.Listen("tcp", svrPeerMap[nodeID]) 71 | if err != nil { 72 | fmt.Printf("failed to listen: %v", err) 73 | return 74 | } 75 | fmt.Printf("server listen on: %s \n", svrPeerMap[nodeID]) 76 | s := grpc.NewServer() 77 | pb.RegisterRaftServiceServer(s, shardSvr) 78 | 79 | sigChan := make(chan os.Signal, 1) 80 | 81 | signal.Notify(sigChan) 82 | 83 | go func() { 84 | sig := <-sigs 85 | fmt.Println(sig) 86 | shardSvr.GetRf().CloseEndsConn() 87 | shardSvr.CloseApply() 88 | os.Exit(-1) 89 | }() 90 | 91 | err = s.Serve(lis) 92 | if err != nil { 93 | fmt.Printf("failed to serve: %v", err) 94 | return 95 | } 96 | 97 | } 98 | -------------------------------------------------------------------------------- /common/common.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | 25 | package common 26 | 27 | import ( 28 | "hash/crc32" 29 | "io/ioutil" 30 | "os" 31 | "path" 32 | ) 33 | 34 | const NBuckets = 10 35 | 36 | const UnUsedTid = 999 37 | 38 | const ( 39 | ErrCodeNoErr int64 = iota 40 | ErrCodeWrongGroup 41 | ErrCodeWrongLeader 42 | ErrCodeExecTimeout 43 | ) 44 | 45 | func Key2BucketID(key string) int { 46 | return CRC32KeyHash(key, NBuckets) 47 | } 48 | 49 | func CRC32KeyHash(k string, base int) int { 50 | bucketID := 0 51 | crc32q := crc32.MakeTable(0xD5828281) 52 | sum := crc32.Checksum([]byte(k), crc32q) 53 | bucketID = int(sum) % NBuckets 54 | return bucketID 55 | } 56 | 57 | func Int64ArrToIntArr(in []int64) []int { 58 | out := []int{} 59 | for _, item := range in { 60 | out = append(out, int(item)) 61 | } 62 | return out 63 | } 64 | 65 | func RemoveDir(in string) { 66 | dir, _ := ioutil.ReadDir(in) 67 | for _, d := range dir { 68 | os.RemoveAll(path.Join([]string{in, d.Name()}...)) 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /common/common_test.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | 25 | package common 26 | 27 | import ( 28 | "strconv" 29 | "testing" 30 | ) 31 | 32 | func TestKeyToBid(t *testing.T) { 33 | for i := 0; i < 20; i++ { 34 | t.Log(Key2BucketID(strconv.Itoa(i))) 35 | } 36 | } 37 | 38 | func TestRandom(t *testing.T) { 39 | t.Log(RandStringRunes(1024)) 40 | } 41 | -------------------------------------------------------------------------------- /common/random_utils.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "math/rand" 5 | "time" 6 | ) 7 | 8 | func init() { 9 | rand.Seed(time.Now().UnixNano()) 10 | } 11 | 12 | var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") 13 | 14 | func RandStringRunes(n int) string { 15 | b := make([]rune, n) 16 | for i := range b { 17 | b[i] = letterRunes[rand.Intn(len(letterRunes))] 18 | } 19 | return string(b) 20 | } 21 | -------------------------------------------------------------------------------- /doc/benchmark_result.txt: -------------------------------------------------------------------------------- 1 | 10:46:00 pm CST 2 | Wednesday, January 31, 2024 3 | 4 | 2024年 1月31日 星期三 22时43分09秒 CST 5 | 6 | total request: 1000 7 | total time cost: 2133.000000 8 | avg time cost: 2.133000 9 | max time cost: 7.000000 10 | min time cost: 0.000000 11 | -------------------------------------------------------------------------------- /doc/eraftbook.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eraft-io/eraft/a2f530a9c62ebc9dd01b703309c372aa55c19ad9/doc/eraftbook.jpeg -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/eraft-io/eraft 2 | 3 | go 1.21 4 | 5 | require ( 6 | github.com/syndtr/goleveldb v1.0.0 7 | // github.com/vmware/go-pmem-transaction v0.0.0-20210401173707-6e2b76e1a20d 8 | google.golang.org/grpc v1.46.0 9 | google.golang.org/protobuf v1.28.0 10 | ) 11 | 12 | require ( 13 | github.com/stretchr/testify v1.8.1 14 | go.uber.org/zap v1.26.0 15 | ) 16 | 17 | require ( 18 | github.com/davecgh/go-spew v1.1.1 // indirect 19 | github.com/golang/protobuf v1.5.2 // indirect 20 | github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect 21 | github.com/onsi/ginkgo v1.16.5 // indirect 22 | github.com/onsi/gomega v1.18.1 // indirect 23 | github.com/pmezard/go-difflib v1.0.0 // indirect 24 | go.uber.org/multierr v1.10.0 // indirect 25 | golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 // indirect 26 | golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect 27 | golang.org/x/text v0.3.6 // indirect 28 | google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect 29 | gopkg.in/yaml.v3 v3.0.1 // indirect 30 | ) 31 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= 2 | cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= 3 | github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= 4 | github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= 5 | github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= 6 | github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 7 | github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= 8 | github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= 9 | github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= 10 | github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= 11 | github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= 12 | github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= 13 | github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= 14 | github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= 15 | github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= 16 | github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= 17 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 18 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 19 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 20 | github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= 21 | github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= 22 | github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= 23 | github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= 24 | github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= 25 | github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= 26 | github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= 27 | github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= 28 | github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= 29 | github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= 30 | github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= 31 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= 32 | github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= 33 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 34 | github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 35 | github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= 36 | github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= 37 | github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= 38 | github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= 39 | github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= 40 | github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= 41 | github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= 42 | github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= 43 | github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= 44 | github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= 45 | github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= 46 | github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= 47 | github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= 48 | github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 49 | github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= 50 | github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 51 | github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 52 | github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 53 | github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 54 | github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 55 | github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= 56 | github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 57 | github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= 58 | github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 59 | github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= 60 | github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= 61 | github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= 62 | github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= 63 | github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= 64 | github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= 65 | github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 66 | github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 67 | github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= 68 | github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= 69 | github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= 70 | github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= 71 | github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= 72 | github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= 73 | github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= 74 | github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= 75 | github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= 76 | github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= 77 | github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= 78 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 79 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 80 | github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= 81 | github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= 82 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 83 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= 84 | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= 85 | github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= 86 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 87 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 88 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 89 | github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= 90 | github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= 91 | github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= 92 | github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= 93 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 94 | go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= 95 | go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= 96 | go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= 97 | go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= 98 | go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= 99 | go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= 100 | go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= 101 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 102 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 103 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 104 | golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= 105 | golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= 106 | golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= 107 | golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= 108 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 109 | golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 110 | golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 111 | golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 112 | golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 113 | golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 114 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 115 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 116 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 117 | golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= 118 | golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= 119 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 120 | golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= 121 | golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= 122 | golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= 123 | golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= 124 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 125 | golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 126 | golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 127 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 128 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 129 | golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 130 | golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 131 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 132 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 133 | golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 134 | golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 135 | golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 136 | golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 137 | golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 138 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 139 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 140 | golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 141 | golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 142 | golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 143 | golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= 144 | golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 145 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 146 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 147 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 148 | golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= 149 | golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 150 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 151 | golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 152 | golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= 153 | golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 154 | golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= 155 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 156 | golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= 157 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 158 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 159 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 160 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= 161 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 162 | google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= 163 | google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= 164 | google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= 165 | google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= 166 | google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= 167 | google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= 168 | google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= 169 | google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= 170 | google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= 171 | google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= 172 | google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= 173 | google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= 174 | google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= 175 | google.golang.org/grpc v1.46.0 h1:oCjezcn6g6A75TGoKYBPgKmVBLexhYLM6MebdrPApP8= 176 | google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= 177 | google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= 178 | google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= 179 | google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= 180 | google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= 181 | google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= 182 | google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 183 | google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 184 | google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 185 | google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= 186 | google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= 187 | google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= 188 | google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= 189 | google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= 190 | google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= 191 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 192 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 193 | gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= 194 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= 195 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= 196 | gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 197 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 198 | gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 199 | gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 200 | gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 201 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 202 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 203 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 204 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 205 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 206 | honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 207 | honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 208 | -------------------------------------------------------------------------------- /kvserver/server.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | // 25 | // This is a single raft group system, you can use this to debug your raftcore 26 | // 27 | 28 | package kvserver 29 | 30 | import ( 31 | "bytes" 32 | "context" 33 | "encoding/gob" 34 | "encoding/json" 35 | "fmt" 36 | "strconv" 37 | "sync" 38 | "sync/atomic" 39 | "time" 40 | 41 | "github.com/eraft-io/eraft/common" 42 | "github.com/eraft-io/eraft/raftcore" 43 | pb "github.com/eraft-io/eraft/raftpb" 44 | "github.com/eraft-io/eraft/storage" 45 | ) 46 | 47 | var DnsMap = map[int]string{0: "eraft-kvserver-0.eraft-kvserver:8088", 1: "eraft-kvserver-1.eraft-kvserver:8089", 2: "eraft-kvserver-2.eraft-kvserver:8090"} 48 | var PeersMap = map[int]string{0: ":8088", 1: ":8089", 2: ":8090"} 49 | 50 | const ExecCmdTimeout = 1 * time.Second 51 | 52 | type OperationContext struct { 53 | MaxAppliedCommandId int64 54 | LastResponse *pb.CommandResponse 55 | } 56 | 57 | type KvServer struct { 58 | mu sync.RWMutex 59 | dead int32 60 | Rf *raftcore.Raft 61 | applyCh chan *pb.ApplyMsg 62 | 63 | lastApplied int 64 | 65 | stm StateMachine 66 | lastOperations map[int64]OperationContext 67 | 68 | notifyChs map[int]chan *pb.CommandResponse 69 | stopApplyCh chan interface{} 70 | 71 | pb.UnimplementedRaftServiceServer 72 | } 73 | 74 | func MakeKvServer(nodeId int) *KvServer { 75 | clientEnds := []*raftcore.RaftClientEnd{} 76 | for id, addr := range PeersMap { 77 | newEnd := raftcore.MakeRaftClientEnd(addr, uint64(id)) 78 | clientEnds = append(clientEnds, newEnd) 79 | } 80 | newApplyCh := make(chan *pb.ApplyMsg) 81 | 82 | logDbEng, err := storage.MakeLevelDBKvStore("./data/kv_server" + "/node_" + strconv.Itoa(nodeId)) 83 | if err != nil { 84 | raftcore.PrintDebugLog("boot storage engine err!") 85 | panic(err) 86 | } 87 | 88 | newRf := raftcore.MakeRaft(clientEnds, nodeId, logDbEng, newApplyCh, 2000, 6000) 89 | kvSvr := &KvServer{Rf: newRf, applyCh: newApplyCh, dead: 0, lastApplied: 0, stm: NewMemKV(), notifyChs: make(map[int]chan *pb.CommandResponse)} 90 | kvSvr.stopApplyCh = make(chan interface{}) 91 | kvSvr.restoreSnapshot(newRf.ReadSnapshot()) 92 | 93 | go kvSvr.ApplyingToStm(kvSvr.stopApplyCh) 94 | 95 | return kvSvr 96 | } 97 | 98 | func (s *KvServer) restoreSnapshot(snapData []byte) { 99 | if snapData == nil { 100 | return 101 | } 102 | buf := bytes.NewBuffer(snapData) 103 | data := gob.NewDecoder(buf) 104 | var stm MemKV 105 | if data.Decode(&stm) != nil { 106 | raftcore.PrintDebugLog("decode stm data error") 107 | } 108 | stmBytes, _ := json.Marshal(stm) 109 | raftcore.PrintDebugLog("recover stm -> " + string(stmBytes)) 110 | s.stm = &stm 111 | } 112 | 113 | func (s *KvServer) takeSnapshot(index int) { 114 | var bytesState bytes.Buffer 115 | enc := gob.NewEncoder(&bytesState) 116 | enc.Encode(s.stm) 117 | // snapshot 118 | s.Rf.Snapshot(index, bytesState.Bytes()) 119 | } 120 | 121 | func (s *KvServer) RequestVote(ctx context.Context, req *pb.RequestVoteRequest) (*pb.RequestVoteResponse, error) { 122 | resp := &pb.RequestVoteResponse{} 123 | raftcore.PrintDebugLog("HandleRequestVote -> " + req.String()) 124 | s.Rf.HandleRequestVote(req, resp) 125 | raftcore.PrintDebugLog("SendRequestVoteResp -> " + resp.String()) 126 | return resp, nil 127 | } 128 | 129 | func (s *KvServer) AppendEntries(ctx context.Context, req *pb.AppendEntriesRequest) (*pb.AppendEntriesResponse, error) { 130 | resp := &pb.AppendEntriesResponse{} 131 | raftcore.PrintDebugLog("HandleAppendEntries -> " + req.String()) 132 | s.Rf.HandleAppendEntries(req, resp) 133 | raftcore.PrintDebugLog("AppendEntriesResp -> " + resp.String()) 134 | return resp, nil 135 | } 136 | 137 | func (s *KvServer) Snapshot(ctx context.Context, req *pb.InstallSnapshotRequest) (*pb.InstallSnapshotResponse, error) { 138 | resp := &pb.InstallSnapshotResponse{} 139 | raftcore.PrintDebugLog("HandleSnapshot -> " + req.String()) 140 | s.Rf.HandleInstallSnapshot(req, resp) 141 | raftcore.PrintDebugLog("HandleSnapshotResp -> " + resp.String()) 142 | return resp, nil 143 | } 144 | 145 | func (s *KvServer) getNotifyChan(index int) chan *pb.CommandResponse { 146 | if _, ok := s.notifyChs[index]; !ok { 147 | s.notifyChs[index] = make(chan *pb.CommandResponse, 1) 148 | } 149 | return s.notifyChs[index] 150 | } 151 | 152 | func (s *KvServer) IsKilled() bool { 153 | return atomic.LoadInt32(&s.dead) == 1 154 | } 155 | 156 | func (s *KvServer) ApplyingToStm(done <-chan interface{}) { 157 | for !s.IsKilled() { 158 | select { 159 | case <-done: 160 | return 161 | case appliedMsg := <-s.applyCh: 162 | if appliedMsg.CommandValid { 163 | s.mu.Lock() 164 | req := &pb.CommandRequest{} 165 | if err := json.Unmarshal(appliedMsg.Command, req); err != nil { 166 | raftcore.PrintDebugLog("Unmarshal CommandRequest err") 167 | s.mu.Unlock() 168 | continue 169 | } 170 | // Time-out check 171 | if appliedMsg.CommandIndex <= int64(s.lastApplied) { 172 | s.mu.Unlock() 173 | continue 174 | } 175 | 176 | s.lastApplied = int(appliedMsg.CommandIndex) 177 | 178 | var value string 179 | switch req.OpType { 180 | case pb.OpType_OpPut: 181 | s.stm.Put(req.Key, req.Value) 182 | case pb.OpType_OpAppend: 183 | s.stm.Append(req.Key, req.Value) 184 | case pb.OpType_OpGet: 185 | value, _ = s.stm.Get(req.Key) 186 | } 187 | 188 | cmdResp := &pb.CommandResponse{} 189 | cmdResp.Value = value 190 | ch := s.getNotifyChan(int(appliedMsg.CommandIndex)) 191 | ch <- cmdResp 192 | 193 | if s.Rf.GetLogCount() > 50 { 194 | s.takeSnapshot(int(appliedMsg.CommandIndex)) 195 | } 196 | 197 | s.mu.Unlock() 198 | 199 | } else if appliedMsg.SnapshotValid { 200 | raftcore.PrintDebugLog("apply snapshot now") 201 | s.mu.Lock() 202 | if s.Rf.CondInstallSnapshot(int(appliedMsg.SnapshotTerm), int(appliedMsg.SnapshotIndex), appliedMsg.Snapshot) { 203 | s.restoreSnapshot(appliedMsg.Snapshot) 204 | s.lastApplied = int(appliedMsg.SnapshotIndex) 205 | } 206 | s.mu.Unlock() 207 | } 208 | } 209 | } 210 | } 211 | 212 | func (s *KvServer) DoCommand(ctx context.Context, req *pb.CommandRequest) (*pb.CommandResponse, error) { 213 | raftcore.PrintDebugLog(fmt.Sprintf("do cmd %s", req.String())) 214 | 215 | cmdResp := &pb.CommandResponse{} 216 | 217 | if req != nil { 218 | reqBytes, err := json.Marshal(req) 219 | if err != nil { 220 | return nil, err 221 | } 222 | idx, _, isLeader := s.Rf.Propose(reqBytes) 223 | if !isLeader { 224 | cmdResp.ErrCode = common.ErrCodeWrongLeader 225 | return cmdResp, nil 226 | } 227 | 228 | s.mu.Lock() 229 | ch := s.getNotifyChan(idx) 230 | s.mu.Unlock() 231 | 232 | select { 233 | case res := <-ch: 234 | cmdResp.Value = res.Value 235 | case <-time.After(ExecCmdTimeout): 236 | cmdResp.ErrCode = common.ErrCodeExecTimeout 237 | cmdResp.Value = "exec cmd timeout" 238 | } 239 | 240 | go func() { 241 | s.mu.Lock() 242 | delete(s.notifyChs, idx) 243 | s.mu.Unlock() 244 | }() 245 | 246 | } 247 | 248 | return cmdResp, nil 249 | } 250 | -------------------------------------------------------------------------------- /kvserver/state_machine.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | 25 | package kvserver 26 | 27 | import "errors" 28 | 29 | type StateMachine interface { 30 | Get(key string) (string, error) 31 | Put(key, value string) error 32 | Append(key, value string) error 33 | } 34 | 35 | type MemKV struct { 36 | KV map[string]string 37 | } 38 | 39 | func NewMemKV() *MemKV { 40 | return &MemKV{make(map[string]string)} 41 | } 42 | 43 | func (memKv *MemKV) Get(key string) (string, error) { 44 | if v, ok := memKv.KV[key]; ok { 45 | return v, nil 46 | } 47 | return "", errors.New("KeyNotFound") 48 | } 49 | 50 | func (memKv *MemKV) Put(key, value string) error { 51 | memKv.KV[key] = value 52 | return nil 53 | } 54 | 55 | func (memKv *MemKV) Append(key, value string) error { 56 | memKv.KV[key] += value 57 | return nil 58 | } 59 | -------------------------------------------------------------------------------- /logger/logger.go: -------------------------------------------------------------------------------- 1 | package logger 2 | 3 | import "go.uber.org/zap" 4 | 5 | func ELogger() *zap.Logger { 6 | log, err := zap.NewDevelopment() 7 | if err != nil { 8 | panic(err) 9 | } 10 | return log 11 | } 12 | -------------------------------------------------------------------------------- /metaserver/client.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | // TODO: this is a debug client version, need to deal with more detail handle 25 | // 26 | 27 | package metaserver 28 | 29 | import ( 30 | "context" 31 | "crypto/rand" 32 | "math/big" 33 | "strings" 34 | 35 | "github.com/eraft-io/eraft/common" 36 | "github.com/eraft-io/eraft/logger" 37 | "github.com/eraft-io/eraft/raftcore" 38 | pb "github.com/eraft-io/eraft/raftpb" 39 | ) 40 | 41 | type MetaSvrCli struct { 42 | endpoints []*raftcore.RaftClientEnd 43 | leaderID int64 44 | clientID int64 45 | commandID int64 46 | } 47 | 48 | func nrand() int64 { 49 | maxi := big.NewInt(int64(1) << 62) 50 | bigx, _ := rand.Int(rand.Reader, maxi) 51 | return bigx.Int64() 52 | } 53 | 54 | func MakeMetaSvrClient(targetID uint64, targetAddrs []string) *MetaSvrCli { 55 | mateSvrCli := &MetaSvrCli{ 56 | leaderID: 0, 57 | clientID: nrand(), 58 | commandID: 0, 59 | } 60 | 61 | for _, addr := range targetAddrs { 62 | cli := raftcore.MakeRaftClientEnd(addr, targetID) 63 | mateSvrCli.endpoints = append(mateSvrCli.endpoints, cli) 64 | } 65 | 66 | return mateSvrCli 67 | } 68 | 69 | func (metaSvrCli *MetaSvrCli) GetRpcClis() []*raftcore.RaftClientEnd { 70 | return metaSvrCli.endpoints 71 | } 72 | 73 | func (metaSvrCli *MetaSvrCli) Query(ver int64) *Config { 74 | confReq := &pb.ConfigRequest{ 75 | OpType: pb.ConfigOpType_OpQuery, 76 | ConfigVersion: ver, 77 | } 78 | resp := metaSvrCli.CallDoConfigRpc(confReq) 79 | cf := &Config{} 80 | if resp != nil && resp.Config != nil { 81 | cf.Version = int(resp.Config.ConfigVersion) 82 | for i := 0; i < common.NBuckets; i++ { 83 | cf.Buckets[i] = int(resp.Config.Buckets[i]) 84 | } 85 | cf.Groups = make(map[int][]string) 86 | for k, v := range resp.Config.Groups { 87 | serverList := strings.Split(v, ",") 88 | cf.Groups[int(k)] = serverList 89 | } 90 | } 91 | return cf 92 | } 93 | 94 | func (metaSvrCli *MetaSvrCli) Move(bucketID, gid int) *pb.ConfigResponse { 95 | confReq := &pb.ConfigRequest{ 96 | OpType: pb.ConfigOpType_OpMove, 97 | BucketId: int64(bucketID), 98 | Gid: int64(gid), 99 | } 100 | return metaSvrCli.CallDoConfigRpc(confReq) 101 | } 102 | 103 | func (metaSvrCli *MetaSvrCli) Join(servers map[int64]string) *pb.ConfigResponse { 104 | confReq := &pb.ConfigRequest{ 105 | OpType: pb.ConfigOpType_OpJoin, 106 | Servers: servers, 107 | } 108 | return metaSvrCli.CallDoConfigRpc(confReq) 109 | } 110 | 111 | func (metaSvrCli *MetaSvrCli) Leave(gids []int64) *pb.ConfigResponse { 112 | confReq := &pb.ConfigRequest{ 113 | OpType: pb.ConfigOpType_OpLeave, 114 | Gids: gids, 115 | } 116 | return metaSvrCli.CallDoConfigRpc(confReq) 117 | } 118 | 119 | func (metaSvrCli *MetaSvrCli) CallDoConfigRpc(req *pb.ConfigRequest) *pb.ConfigResponse { 120 | var err error 121 | confResp := &pb.ConfigResponse{} 122 | confResp.Config = &pb.ServerConfig{} 123 | for _, end := range metaSvrCli.endpoints { 124 | confResp, err = (*end.GetRaftServiceCli()).DoConfig(context.Background(), req) 125 | if err != nil { 126 | continue 127 | } 128 | switch confResp.ErrCode { 129 | case common.ErrCodeNoErr: 130 | metaSvrCli.commandID++ 131 | return confResp 132 | case common.ErrCodeWrongLeader: 133 | confResp, err := (*metaSvrCli.endpoints[confResp.LeaderId].GetRaftServiceCli()).DoConfig(context.Background(), req) 134 | if err != nil { 135 | logger.ELogger().Sugar().Debug("a node in cluster is down :", err.Error()) 136 | continue 137 | } 138 | if confResp.ErrCode == common.ErrCodeNoErr { 139 | metaSvrCli.commandID++ 140 | return confResp 141 | } 142 | if confResp.ErrCode == common.ErrCodeExecTimeout { 143 | logger.ELogger().Sugar().Debug("exec timeout") 144 | return confResp 145 | } 146 | return confResp 147 | } 148 | } 149 | return confResp 150 | } 151 | -------------------------------------------------------------------------------- /metaserver/meta.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | 25 | package metaserver 26 | 27 | import ( 28 | "time" 29 | 30 | "github.com/eraft-io/eraft/common" 31 | ) 32 | 33 | type Config struct { 34 | Version int 35 | Buckets [common.NBuckets]int 36 | Groups map[int][]string 37 | } 38 | 39 | func DefaultConfig() Config { 40 | return Config{Groups: make(map[int][]string)} 41 | } 42 | 43 | func (cf *Config) GetGroup2Buckets() map[int][]int { 44 | s2g := make(map[int][]int) 45 | for gid := range cf.Groups { 46 | s2g[gid] = make([]int, 0) 47 | } 48 | for bid, gid := range cf.Buckets { 49 | s2g[gid] = append(s2g[gid], bid) 50 | } 51 | return s2g 52 | } 53 | 54 | const ExecTimeout = 3 * time.Second 55 | 56 | func deepCopy(groups map[int][]string) map[int][]string { 57 | newGroup := make(map[int][]string) 58 | for gID, severs := range groups { 59 | newSevers := make([]string, len(severs)) 60 | copy(newSevers, severs) 61 | newGroup[gID] = newSevers 62 | } 63 | return newGroup 64 | } 65 | -------------------------------------------------------------------------------- /metaserver/metaserver.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | 25 | package metaserver 26 | 27 | import ( 28 | "context" 29 | "encoding/json" 30 | "errors" 31 | "strconv" 32 | "strings" 33 | "sync" 34 | "sync/atomic" 35 | "time" 36 | 37 | "github.com/eraft-io/eraft/common" 38 | "github.com/eraft-io/eraft/logger" 39 | pb "github.com/eraft-io/eraft/raftpb" 40 | "github.com/eraft-io/eraft/storage" 41 | 42 | "github.com/eraft-io/eraft/raftcore" 43 | ) 44 | 45 | type MetaServer struct { 46 | mu sync.RWMutex 47 | dead int32 48 | Rf *raftcore.Raft 49 | applyCh chan *pb.ApplyMsg 50 | stm ConfigStm 51 | notifyChs map[int]chan *pb.ConfigResponse 52 | stopApplyCh chan interface{} 53 | 54 | pb.UnimplementedRaftServiceServer 55 | } 56 | 57 | func MakeMetaServer(peerMaps map[int]string, nodeId int) *MetaServer { 58 | clientEnds := []*raftcore.RaftClientEnd{} 59 | for id, addr := range peerMaps { 60 | newEnd := raftcore.MakeRaftClientEnd(addr, uint64(id)) 61 | clientEnds = append(clientEnds, newEnd) 62 | } 63 | newApplyCh := make(chan *pb.ApplyMsg) 64 | 65 | newDBEng := storage.EngineFactory("leveldb", "./data/db/metanode_"+strconv.Itoa(nodeId)) 66 | logDBEng := storage.EngineFactory("leveldb", "./data/log/metanode_"+strconv.Itoa(nodeId)) 67 | 68 | newRf := raftcore.MakeRaft(clientEnds, nodeId, logDBEng, newApplyCh, 50, 150) 69 | metaServer := &MetaServer{ 70 | Rf: newRf, 71 | applyCh: newApplyCh, 72 | dead: 0, 73 | stm: NewMemConfigStm(newDBEng), 74 | notifyChs: make(map[int]chan *pb.ConfigResponse), 75 | } 76 | 77 | metaServer.stopApplyCh = make(chan interface{}) 78 | 79 | go metaServer.ApplyingToStm(metaServer.stopApplyCh) 80 | return metaServer 81 | } 82 | 83 | func (s *MetaServer) StopApply() { 84 | close(s.applyCh) 85 | } 86 | 87 | func (s *MetaServer) getNotifyChan(index int) chan *pb.ConfigResponse { 88 | if _, ok := s.notifyChs[index]; !ok { 89 | s.notifyChs[index] = make(chan *pb.ConfigResponse, 1) 90 | } 91 | return s.notifyChs[index] 92 | } 93 | 94 | func (s *MetaServer) DoConfig(ctx context.Context, req *pb.ConfigRequest) (*pb.ConfigResponse, error) { 95 | logger.ELogger().Sugar().Debugf("DoConfig %s", req.String()) 96 | 97 | cmdResp := &pb.ConfigResponse{} 98 | 99 | reqBytes, err := json.Marshal(req) 100 | if err != nil { 101 | cmdResp.ErrMsg = err.Error() 102 | return cmdResp, err 103 | } 104 | 105 | index, _, isLeader := s.Rf.Propose(reqBytes) 106 | if !isLeader { 107 | cmdResp.ErrMsg = "is not leader" 108 | cmdResp.ErrCode = common.ErrCodeWrongLeader 109 | cmdResp.LeaderId = s.Rf.GetLeaderId() 110 | return cmdResp, nil 111 | } 112 | 113 | s.mu.Lock() 114 | ch := s.getNotifyChan(index) 115 | s.mu.Unlock() 116 | 117 | select { 118 | case res := <-ch: 119 | cmdResp.Config = res.Config 120 | cmdResp.ErrMsg = res.ErrMsg 121 | cmdResp.ErrCode = common.ErrCodeNoErr 122 | case <-time.After(ExecTimeout): 123 | cmdResp.ErrMsg = "server exec timeout" 124 | cmdResp.ErrCode = common.ErrCodeExecTimeout 125 | return cmdResp, errors.New("ExecTimeout") 126 | } 127 | 128 | go func() { 129 | s.mu.Lock() 130 | delete(s.notifyChs, index) 131 | s.mu.Unlock() 132 | }() 133 | 134 | return cmdResp, nil 135 | } 136 | 137 | func (s *MetaServer) RequestVote(ctx context.Context, req *pb.RequestVoteRequest) (*pb.RequestVoteResponse, error) { 138 | resp := &pb.RequestVoteResponse{} 139 | logger.ELogger().Sugar().Debugf("HandleRequestVote -> " + req.String()) 140 | s.Rf.HandleRequestVote(req, resp) 141 | logger.ELogger().Sugar().Debugf("SendRequestVoteResp -> " + resp.String()) 142 | return resp, nil 143 | } 144 | 145 | func (s *MetaServer) AppendEntries(ctx context.Context, req *pb.AppendEntriesRequest) (*pb.AppendEntriesResponse, error) { 146 | resp := &pb.AppendEntriesResponse{} 147 | logger.ELogger().Sugar().Debugf("HandleAppendEntries -> " + req.String()) 148 | s.Rf.HandleAppendEntries(req, resp) 149 | logger.ELogger().Sugar().Debugf("AppendEntriesResp -> " + resp.String()) 150 | return resp, nil 151 | } 152 | 153 | func (s *MetaServer) ApplyingToStm(done <-chan interface{}) { 154 | for !s.IsKilled() { 155 | select { 156 | case <-done: 157 | return 158 | case appliedMsg := <-s.applyCh: 159 | req := &pb.ConfigRequest{} 160 | if err := json.Unmarshal(appliedMsg.Command, req); err != nil { 161 | logger.ELogger().Sugar().Errorf(err.Error()) 162 | continue 163 | } 164 | logger.ELogger().Sugar().Debugf("appling msg -> " + appliedMsg.String()) 165 | var conf Config 166 | var err error 167 | resp := &pb.ConfigResponse{} 168 | switch req.OpType { 169 | case pb.ConfigOpType_OpJoin: 170 | groups := map[int][]string{} 171 | for gid, serverAddrs := range req.Servers { 172 | groups[int(gid)] = strings.Split(serverAddrs, ",") 173 | } 174 | err = s.stm.Join(groups) 175 | case pb.ConfigOpType_OpLeave: 176 | gids := []int{} 177 | for _, id := range req.Gids { 178 | gids = append(gids, int(id)) 179 | } 180 | err = s.stm.Leave(gids) 181 | case pb.ConfigOpType_OpMove: 182 | err = s.stm.Move(int(req.BucketId), int(req.Gid)) 183 | case pb.ConfigOpType_OpQuery: 184 | conf, err = s.stm.Query(int(req.ConfigVersion)) 185 | if err != nil { 186 | resp.ErrMsg = err.Error() 187 | } 188 | out, err := json.Marshal(conf) 189 | if err != nil { 190 | resp.ErrMsg = err.Error() 191 | } 192 | logger.ELogger().Sugar().Debugf("query configs: " + string(out)) 193 | resp.Config = &pb.ServerConfig{} 194 | resp.Config.ConfigVersion = int64(conf.Version) 195 | for _, sd := range conf.Buckets { 196 | resp.Config.Buckets = append(resp.Config.Buckets, int64(sd)) 197 | } 198 | resp.Config.Groups = make(map[int64]string) 199 | for gid, servers := range conf.Groups { 200 | resp.Config.Groups[int64(gid)] = strings.Join(servers, ",") 201 | } 202 | } 203 | logger.ELogger().Sugar().Debugf("query resp: " + resp.String()) 204 | if err != nil { 205 | resp.ErrMsg = err.Error() 206 | } 207 | 208 | ch := s.getNotifyChan(int(appliedMsg.CommandIndex)) 209 | ch <- resp 210 | } 211 | } 212 | } 213 | 214 | func (s *MetaServer) IsKilled() bool { 215 | return atomic.LoadInt32(&s.dead) == 1 216 | } 217 | -------------------------------------------------------------------------------- /metaserver/metaserver_test.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | 25 | package metaserver 26 | 27 | import ( 28 | "testing" 29 | 30 | "github.com/eraft-io/eraft/common" 31 | "github.com/eraft-io/eraft/raftcore" 32 | "github.com/eraft-io/eraft/storage" 33 | ) 34 | 35 | func TestRangeArr(t *testing.T) { 36 | var newBuckets [common.NBuckets]int 37 | newBuckets[0] = 2 38 | for k, v := range newBuckets { 39 | t.Logf("k -> %d, v -> %d", k, v) 40 | } 41 | } 42 | 43 | func TestAddGroups(t *testing.T) { 44 | newDbEng, err := storage.MakeLevelDBKvStore("./conf_data/" + "/test") 45 | if err != nil { 46 | raftcore.PrintDebugLog("boot storage engine err!") 47 | panic(err) 48 | } 49 | memConfStm := NewMemConfigStm(newDbEng) 50 | for i := 0; i < 1000; i++ { 51 | conf, _ := memConfStm.Query(-1) 52 | t.Logf("%v %d", conf, i) 53 | } 54 | common.RemoveDir("./conf_data") 55 | } 56 | -------------------------------------------------------------------------------- /metaserver/metastm.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | 25 | package metaserver 26 | 27 | import ( 28 | "encoding/json" 29 | "strconv" 30 | 31 | "github.com/eraft-io/eraft/common" 32 | "github.com/eraft-io/eraft/logger" 33 | "github.com/eraft-io/eraft/storage" 34 | ) 35 | 36 | const CfPrefix = "CF_" 37 | 38 | const CurVersionKey = "CUR_CONF_VERSION" 39 | 40 | type ConfigStm interface { 41 | Join(groups map[int][]string) error 42 | Leave(gids []int) error 43 | Move(bucketID, gID int) error 44 | Query(num int) (Config, error) 45 | } 46 | 47 | type MemConfigStm struct { 48 | dbEng storage.KvStore 49 | curConfVersion int 50 | } 51 | 52 | func NewMemConfigStm(dbEng storage.KvStore) *MemConfigStm { 53 | // check if it has default conf 54 | _, err := dbEng.Get(CfPrefix + strconv.Itoa(0)) 55 | confStm := &MemConfigStm{dbEng: dbEng, curConfVersion: 0} 56 | if err != nil { 57 | defaultConfig := DefaultConfig() 58 | defaultConfigBytes, err := json.Marshal(defaultConfig) 59 | if err != nil { 60 | panic(err) 61 | } 62 | // init conf 63 | logger.ELogger().Sugar().Debugf("init conf -> " + string(defaultConfigBytes)) 64 | if err := confStm.dbEng.Put(CfPrefix+strconv.Itoa(0), string(defaultConfigBytes)); err != nil { 65 | panic(err) 66 | } 67 | if err := confStm.dbEng.Put(CurVersionKey, strconv.Itoa(confStm.curConfVersion)); err != nil { 68 | panic(err) 69 | } 70 | return confStm 71 | } 72 | versionStr, err := dbEng.Get(CurVersionKey) 73 | if err != nil { 74 | panic(err) 75 | } 76 | versionInt, _ := strconv.Atoi(versionStr) 77 | confStm.curConfVersion = versionInt 78 | return confStm 79 | } 80 | 81 | func (cfStm *MemConfigStm) Join(groups map[int][]string) error { 82 | confBytes, err := cfStm.dbEng.Get(CfPrefix + strconv.Itoa(cfStm.curConfVersion)) 83 | if err != nil { 84 | return err 85 | } 86 | lastConf := &Config{} 87 | json.Unmarshal([]byte(confBytes), lastConf) 88 | newConfig := Config{cfStm.curConfVersion + 1, lastConf.Buckets, deepCopy(lastConf.Groups)} 89 | for gid, servers := range groups { 90 | if _, ok := newConfig.Groups[gid]; !ok { 91 | newSvrs := make([]string, len(servers)) 92 | copy(newSvrs, servers) 93 | newConfig.Groups[gid] = newSvrs 94 | } 95 | } 96 | s2g := newConfig.GetGroup2Buckets() 97 | var newBuckets [common.NBuckets]int 98 | for gid, buckets := range s2g { 99 | for _, bid := range buckets { 100 | newBuckets[bid] = gid 101 | } 102 | } 103 | newConfig.Buckets = newBuckets 104 | newConfigBytes, _ := json.Marshal(newConfig) 105 | cfStm.dbEng.Put(CurVersionKey, strconv.Itoa(cfStm.curConfVersion+1)) 106 | cfStm.dbEng.Put(CfPrefix+strconv.Itoa(cfStm.curConfVersion+1), string(newConfigBytes)) 107 | cfStm.curConfVersion += 1 108 | return nil 109 | } 110 | 111 | func (cfStm *MemConfigStm) Leave(gids []int) error { 112 | confBytes, err := cfStm.dbEng.Get(CfPrefix + strconv.Itoa(cfStm.curConfVersion)) 113 | if err != nil { 114 | return err 115 | } 116 | lastConf := &Config{} 117 | json.Unmarshal([]byte(confBytes), lastConf) 118 | newConf := Config{cfStm.curConfVersion + 1, lastConf.Buckets, deepCopy(lastConf.Groups)} 119 | for _, gid := range gids { 120 | delete(newConf.Groups, gid) 121 | } 122 | var newBuckets [common.NBuckets]int 123 | newConf.Buckets = newBuckets 124 | newConfigBytes, _ := json.Marshal(newConf) 125 | cfStm.dbEng.Put(CurVersionKey, strconv.Itoa(cfStm.curConfVersion+1)) 126 | cfStm.dbEng.Put(CfPrefix+strconv.Itoa(cfStm.curConfVersion+1), string(newConfigBytes)) 127 | cfStm.curConfVersion += 1 128 | return nil 129 | } 130 | 131 | func (cfStm *MemConfigStm) Move(bid, gid int) error { 132 | confBytes, err := cfStm.dbEng.Get(CfPrefix + strconv.Itoa(cfStm.curConfVersion)) 133 | if err != nil { 134 | return err 135 | } 136 | lastConf := &Config{} 137 | json.Unmarshal([]byte(confBytes), lastConf) 138 | newConf := Config{cfStm.curConfVersion + 1, lastConf.Buckets, deepCopy(lastConf.Groups)} 139 | newConf.Buckets[bid] = gid 140 | newConfigBytes, _ := json.Marshal(newConf) 141 | cfStm.dbEng.Put(CurVersionKey, strconv.Itoa(cfStm.curConfVersion+1)) 142 | cfStm.dbEng.Put(CfPrefix+strconv.Itoa(cfStm.curConfVersion+1), string(newConfigBytes)) 143 | cfStm.curConfVersion += 1 144 | return nil 145 | } 146 | 147 | func (cfStm *MemConfigStm) Query(version int) (Config, error) { 148 | if version < 0 || version >= cfStm.curConfVersion { 149 | lastConf := &Config{} 150 | logger.ELogger().Sugar().Debugf("query cur version -> " + strconv.Itoa(cfStm.curConfVersion)) 151 | confBytes, err := cfStm.dbEng.Get(CfPrefix + strconv.Itoa(cfStm.curConfVersion)) 152 | if err != nil { 153 | return DefaultConfig(), err 154 | } 155 | json.Unmarshal([]byte(confBytes), lastConf) 156 | return *lastConf, nil 157 | } 158 | confBytes, err := cfStm.dbEng.Get(CfPrefix + strconv.Itoa(version)) 159 | if err != nil { 160 | return DefaultConfig(), err 161 | } 162 | specConf := &Config{} 163 | json.Unmarshal([]byte(confBytes), specConf) 164 | return *specConf, nil 165 | } 166 | -------------------------------------------------------------------------------- /pbs/raftbasic.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package pbs; 4 | 5 | option go_package = "../raftpb"; 6 | 7 | // 8 | // raft basic request vote definition 9 | // 10 | message RequestVoteRequest { 11 | int64 term = 1; 12 | int64 candidate_id = 2; 13 | int64 last_log_index = 3; 14 | int64 last_log_term = 4; 15 | } 16 | 17 | // 18 | // raft basic request vote response 19 | // 20 | message RequestVoteResponse { 21 | int64 term = 1; 22 | bool vote_granted = 2; 23 | } 24 | 25 | // 26 | // the log entry type 27 | // 1.normal -> like put, get key 28 | // 2.conf change -> cluster config change 29 | // 30 | enum EntryType { 31 | EntryNormal = 0; 32 | EntryConfChange = 1; 33 | } 34 | 35 | // 36 | // raft basic log entry definition 37 | // 38 | message Entry { 39 | EntryType entry_type = 1; 40 | uint64 term = 2; 41 | int64 index = 3; 42 | bytes data = 4; 43 | } 44 | 45 | // 46 | // raft basic append entries request definition 47 | // 48 | message AppendEntriesRequest { 49 | int64 term = 1; 50 | int64 leader_id = 2; 51 | int64 prev_log_index = 3; 52 | int64 prev_log_term = 4; 53 | int64 leader_commit = 5; 54 | repeated Entry entries = 6; 55 | } 56 | 57 | // 58 | // raft basic append entries response definition 59 | // 60 | message AppendEntriesResponse { 61 | int64 term = 1; 62 | bool success = 2; 63 | int64 conflict_index = 3; 64 | int64 conflict_term = 4; 65 | } 66 | 67 | // 68 | // apply message definition 69 | // 70 | message ApplyMsg { 71 | bool CommandValid = 1; 72 | bytes Command = 2; 73 | int64 CommandTerm = 3; 74 | int64 CommandIndex = 4; 75 | bool SnapshotValid = 5; 76 | bytes Snapshot = 6; 77 | int64 SnapshotTerm = 7; 78 | int64 SnapshotIndex = 8; 79 | } 80 | 81 | // 82 | // client op type 83 | // 84 | enum OpType { 85 | OpPut = 0; 86 | OpAppend = 1; 87 | OpGet = 2; 88 | OpConfigChange = 3; 89 | OpDeleteBuckets = 4; 90 | OpInsertBuckets = 5; 91 | } 92 | 93 | // 94 | // client command request 95 | // 96 | message CommandRequest { 97 | string key = 1; 98 | string value = 2; 99 | OpType op_type = 3; 100 | int64 client_id = 4; 101 | int64 command_id = 5; 102 | bytes context = 6; 103 | } 104 | 105 | // 106 | // client command response 107 | // 108 | message CommandResponse { 109 | string value = 1; 110 | int64 leader_id = 2; 111 | int64 err_code = 3; 112 | } 113 | 114 | // 115 | // config client op type 116 | // 117 | enum ConfigOpType { 118 | OpJoin = 0; 119 | OpLeave = 1; 120 | OpMove = 2; 121 | OpQuery = 3; 122 | OpSetBucket = 4; 123 | OpMigBucket = 5; 124 | } 125 | 126 | // 127 | // config server request 128 | // to manager the server config table 129 | // 130 | message ConfigRequest { 131 | map servers = 1; // gid -> [s1, s2, s3] 132 | repeated int64 gids = 2; 133 | int64 bucket_id = 3; 134 | int64 gid = 4; 135 | int64 config_version = 5; 136 | ConfigOpType op_type = 6; 137 | int64 client_id = 7; 138 | int64 command_id = 8; 139 | } 140 | 141 | // 142 | // 143 | // 144 | message ServerConfig { 145 | int64 config_version = 1; 146 | repeated int64 buckets = 2; 147 | map groups = 3; // gid -> [s1, s2, s3] 148 | } 149 | 150 | message ConfigResponse { 151 | string err_msg = 1; 152 | ServerConfig config = 2; 153 | int64 leader_id = 3; 154 | int64 err_code = 4; 155 | } 156 | 157 | enum BucketOpType { 158 | OpGetData = 0; 159 | OpDeleteData = 1; 160 | OpInsertData = 2; 161 | } 162 | 163 | message BucketOperationRequest { 164 | int64 config_version = 1; 165 | repeated int64 bucket_ids = 2; 166 | BucketOpType bucket_op_type = 3; 167 | int64 gid = 4; 168 | bytes buckets_datas = 5; 169 | } 170 | 171 | message BucketOperationResponse { 172 | int64 config_version = 1; 173 | bytes buckets_datas = 2; 174 | string err_msg = 3; 175 | } 176 | 177 | message InstallSnapshotRequest { 178 | int64 term = 1; 179 | int64 leader_id = 2; 180 | int64 last_included_index = 3; 181 | int64 last_included_term = 4; 182 | bytes data = 5; 183 | } 184 | 185 | message InstallSnapshotResponse { 186 | int64 term = 1; 187 | } 188 | 189 | service RaftService { 190 | rpc RequestVote (RequestVoteRequest) returns (RequestVoteResponse) {} 191 | rpc AppendEntries (AppendEntriesRequest) returns (AppendEntriesResponse) {} 192 | rpc DoCommand (CommandRequest) returns (CommandResponse) {} 193 | rpc DoConfig (ConfigRequest) returns (ConfigResponse) {} 194 | rpc DoBucketsOperation (BucketOperationRequest) returns (BucketOperationResponse) {} 195 | rpc Snapshot (InstallSnapshotRequest) returns (InstallSnapshotResponse) {} 196 | } 197 | -------------------------------------------------------------------------------- /raftcore/consts.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | 25 | package raftcore 26 | 27 | const ( 28 | VoteForNoOne = -1 29 | ) 30 | 31 | var RaftLogPrefix = []byte{0x11, 0x11, 0x19, 0x96} 32 | 33 | var FirstIdxKey = []byte{0x88, 0x88} 34 | 35 | var LastIdxKey = []byte{0x99, 0x99} 36 | 37 | var RaftStateKey = []byte{0x19, 0x49} 38 | 39 | const InitLogIndex = 0 40 | 41 | var SnapshotStateKey = []byte{0x19, 0x97} 42 | -------------------------------------------------------------------------------- /raftcore/persistent_log.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | // this file defines the models that raft needs to persist and their operations 25 | package raftcore 26 | 27 | import ( 28 | "bytes" 29 | "encoding/binary" 30 | "encoding/gob" 31 | "fmt" 32 | 33 | pb "github.com/eraft-io/eraft/raftpb" 34 | "github.com/eraft-io/eraft/storage" 35 | ) 36 | 37 | type RaftPersistentState struct { 38 | CurTerm int64 39 | VotedFor int64 40 | AppliedIdx int64 41 | } 42 | 43 | // MakePersistRaftLog make a persist raft log model 44 | // 45 | // newDBEng: a LevelDBKvStore storage engine 46 | func MakePersistRaftLog(newDBEng storage.KvStore) *RaftLog { 47 | _, _, err := newDBEng.SeekPrefixFirst(string(RaftLogPrefix)) 48 | if err != nil { 49 | empEnt := &pb.Entry{} 50 | empEntEncode := EncodeEntry(empEnt) 51 | newDBEng.PutBytesKv(EncodeRaftLogKey(InitLogIndex), empEntEncode) 52 | } 53 | return &RaftLog{dbEng: newDBEng} 54 | } 55 | 56 | // PersistRaftState Persistent storage raft state 57 | // (curTerm, and votedFor) 58 | // you can find this design in raft paper figure2 State definition 59 | func (rfLog *RaftLog) PersistRaftState(curTerm int64, votedFor int64, appliedIdx int64) { 60 | rfState := &RaftPersistentState{ 61 | CurTerm: curTerm, 62 | VotedFor: votedFor, 63 | AppliedIdx: appliedIdx, 64 | } 65 | rfLog.dbEng.PutBytesKv(RaftStateKey, EncodeRaftState(rfState)) 66 | } 67 | 68 | func (rfLog *RaftLog) PersisSnapshot(snapContext []byte) { 69 | rfLog.dbEng.PutBytesKv(SnapshotStateKey, snapContext) 70 | } 71 | 72 | func (rfLog *RaftLog) ReadSnapshot() ([]byte, error) { 73 | value, err := rfLog.dbEng.GetBytesValue(SnapshotStateKey) 74 | if err != nil { 75 | return nil, err 76 | } 77 | return value, nil 78 | } 79 | 80 | // ReadRaftState 81 | // read the persisted curTerm, votedFor for node from storage engine 82 | func (rfLog *RaftLog) ReadRaftState() (curTerm int64, votedFor int64, appliedIdx int64) { 83 | rfBytes, err := rfLog.dbEng.GetBytesValue(RaftStateKey) 84 | if err != nil { 85 | return 0, -1, -1 86 | } 87 | rfState := DecodeRaftState(rfBytes) 88 | return rfState.CurTerm, rfState.VotedFor, rfState.AppliedIdx 89 | } 90 | 91 | // GetFirstLogId 92 | // get the first log id from storage engine 93 | func (rfLog *RaftLog) GetFirstLogId() uint64 { 94 | kBytes, _, err := rfLog.dbEng.SeekPrefixFirst(string(RaftLogPrefix)) 95 | if err != nil { 96 | panic(err) 97 | } 98 | return DecodeRaftLogKey(kBytes) 99 | } 100 | 101 | // GetLastLogId 102 | // 103 | // get the last log id from storage engine 104 | func (rfLog *RaftLog) GetLastLogId() uint64 { 105 | idMax, err := rfLog.dbEng.SeekPrefixKeyIdMax(RaftLogPrefix) 106 | if err != nil { 107 | panic(err) 108 | } 109 | return idMax 110 | } 111 | 112 | // 113 | // SetEntFirstData 114 | // 115 | 116 | func (rfLog *RaftLog) SetEntFirstData(d []byte) error { 117 | rfLog.mu.Lock() 118 | defer rfLog.mu.Unlock() 119 | firstIdx := rfLog.GetFirstLogId() 120 | encodeValue, err := rfLog.dbEng.GetBytesValue(EncodeRaftLogKey(firstIdx)) 121 | if err != nil { 122 | PrintDebugLog(fmt.Sprintf("get log entry with id %d error!", firstIdx)) 123 | return err 124 | } 125 | ent := DecodeEntry(encodeValue) 126 | ent.Index = int64(firstIdx) 127 | ent.Data = d 128 | newEntEncode := EncodeEntry(ent) 129 | return rfLog.dbEng.PutBytesKv(EncodeRaftLogKey(firstIdx), newEntEncode) 130 | } 131 | 132 | // ReInitLogs 133 | // make logs to init state 134 | func (rfLog *RaftLog) ReInitLogs() error { 135 | rfLog.mu.Lock() 136 | defer rfLog.mu.Unlock() 137 | // delete all log 138 | if err := rfLog.dbEng.DelPrefixKeys(string(RaftLogPrefix)); err != nil { 139 | return err 140 | } 141 | // add an empty 142 | empEnt := &pb.Entry{} 143 | empEntEncode := EncodeEntry(empEnt) 144 | return rfLog.dbEng.PutBytesKv(EncodeRaftLogKey(InitLogIndex), empEntEncode) 145 | } 146 | 147 | // 148 | // SetEntFirstTermAndIndex 149 | // 150 | 151 | func (rfLog *RaftLog) SetEntFirstTermAndIndex(term, index int64) error { 152 | rfLog.mu.Lock() 153 | defer rfLog.mu.Unlock() 154 | firstIdx := rfLog.GetFirstLogId() 155 | encodeValue, err := rfLog.dbEng.GetBytesValue(EncodeRaftLogKey(firstIdx)) 156 | if err != nil { 157 | PrintDebugLog(fmt.Sprintf("get log entry with id %d error!", firstIdx)) 158 | panic(err) 159 | } 160 | // del olf first ent 161 | if err := rfLog.dbEng.DeleteBytesK(EncodeRaftLogKey(firstIdx)); err != nil { 162 | return err 163 | } 164 | ent := DecodeEntry(encodeValue) 165 | ent.Term = uint64(term) 166 | ent.Index = index 167 | PrintDebugLog("change first ent to -> " + ent.String()) 168 | newEntEncode := EncodeEntry(ent) 169 | return rfLog.dbEng.PutBytesKv(EncodeRaftLogKey(uint64(index)), newEntEncode) 170 | } 171 | 172 | // GetFirst 173 | // 174 | // get the first entry from storage engine 175 | func (rfLog *RaftLog) GetFirst() *pb.Entry { 176 | rfLog.mu.RLock() 177 | defer rfLog.mu.RUnlock() 178 | kBytes, vBytes, err := rfLog.dbEng.SeekPrefixFirst(string(RaftLogPrefix)) 179 | if err != nil { 180 | panic(err) 181 | } 182 | logId := DecodeRaftLogKey(kBytes) 183 | PrintDebugLog(fmt.Sprintf("get first log with id -> %d", logId)) 184 | return DecodeEntry(vBytes) 185 | } 186 | 187 | // func (rfLog *RaftLog) SetFirstLog(index int64, term int64) error { 188 | // rfLog.mu.Lock() 189 | // defer rfLog.mu.Unlock() 190 | // empEnt := &pb.Entry{ 191 | // Index: index, 192 | // Term: uint64(term), 193 | // Data: make([]byte, 0), 194 | // } 195 | // empEntEncode := EncodeEntry(empEnt) 196 | // return rfLog.dbEng.PutBytesKv(EncodeRaftLogKey(uint64(index)), empEntEncode) 197 | // } 198 | 199 | // GetLast 200 | // 201 | // get the last entry from storage engine 202 | func (rfLog *RaftLog) GetLast() *pb.Entry { 203 | rfLog.mu.RLock() 204 | defer rfLog.mu.RUnlock() 205 | lastLogId, err := rfLog.dbEng.SeekPrefixKeyIdMax(RaftLogPrefix) 206 | if err != nil { 207 | panic(err) 208 | } 209 | firstIdx := rfLog.GetFirstLogId() 210 | PrintDebugLog(fmt.Sprintf("get last log with id -> %d", lastLogId)) 211 | return rfLog.GetEnt(int64(lastLogId) - int64(firstIdx)) 212 | } 213 | 214 | // LogItemCount 215 | // 216 | // get total log count from storage engine 217 | func (rfLog *RaftLog) LogItemCount() int { 218 | rfLog.mu.RLock() 219 | defer rfLog.mu.RUnlock() 220 | kBytes, _, err := rfLog.dbEng.SeekPrefixFirst(string(RaftLogPrefix)) 221 | if err != nil { 222 | panic(err) 223 | } 224 | logIdFirst := DecodeRaftLogKey(kBytes) 225 | logIdLast, err := rfLog.dbEng.SeekPrefixKeyIdMax(RaftLogPrefix) 226 | if err != nil { 227 | panic(err) 228 | } 229 | return int(logIdLast) - int(logIdFirst) + 1 230 | } 231 | 232 | // Append 233 | // 234 | // append a new entry to raft logs, put it to storage engine 235 | func (rfLog *RaftLog) Append(newEnt *pb.Entry) { 236 | rfLog.mu.Lock() 237 | defer rfLog.mu.Unlock() 238 | logIdLast, err := rfLog.dbEng.SeekPrefixKeyIdMax(RaftLogPrefix) 239 | if err != nil { 240 | panic(err) 241 | } 242 | newEntEncode := EncodeEntry(newEnt) 243 | rfLog.dbEng.PutBytesKv(EncodeRaftLogKey(uint64(logIdLast)+1), newEntEncode) 244 | } 245 | 246 | // EraseBefore 247 | // erase log before from idx, and copy [idx:] log return 248 | // this operation don't modify log in storage engine 249 | func (rfLog *RaftLog) EraseBefore(idx int64) []*pb.Entry { 250 | rfLog.mu.Lock() 251 | defer rfLog.mu.Unlock() 252 | entries := []*pb.Entry{} 253 | lastLogId := rfLog.GetLastLogId() 254 | firstLogId := rfLog.GetFirstLogId() 255 | for i := int64(firstLogId) + idx; i <= int64(lastLogId); i++ { 256 | entries = append(entries, rfLog.GetEnt(i-int64(firstLogId))) 257 | } 258 | return entries 259 | } 260 | 261 | func (rfLog *RaftLog) EraseBeforeWithDel(idx int64) error { 262 | rfLog.mu.Lock() 263 | defer rfLog.mu.Unlock() 264 | firstLogId := rfLog.GetFirstLogId() 265 | for i := firstLogId; i < firstLogId+uint64(idx); i++ { 266 | if err := rfLog.dbEng.DeleteBytesK(EncodeRaftLogKey(i)); err != nil { 267 | return err 268 | } 269 | PrintDebugLog(fmt.Sprintf("del log with id %d success", i)) 270 | } 271 | return nil 272 | } 273 | 274 | // EraseAfter 275 | // erase after idx, !!!WARNING!!! is withDel is true, this operation will delete log key 276 | // in storage engine 277 | func (rfLog *RaftLog) EraseAfter(idx int64, withDel bool) []*pb.Entry { 278 | rfLog.mu.Lock() 279 | defer rfLog.mu.Unlock() 280 | firstLogId := rfLog.GetFirstLogId() 281 | if withDel { 282 | for i := int64(firstLogId) + idx; i <= int64(rfLog.GetLastLogId()); i++ { 283 | if err := rfLog.dbEng.DeleteBytesK(EncodeRaftLogKey(uint64(i))); err != nil { 284 | panic(err) 285 | } 286 | } 287 | } 288 | entries := []*pb.Entry{} 289 | for i := firstLogId; i < firstLogId+uint64(idx); i++ { 290 | entries = append(entries, rfLog.GetEnt(int64(i)-int64(firstLogId))) 291 | } 292 | return entries 293 | } 294 | 295 | // GetRange 296 | // get range log from storage engine, and return the copy 297 | // [lo, hi) 298 | func (rfLog *RaftLog) GetRange(lo, hi int64) []*pb.Entry { 299 | rfLog.mu.RLock() 300 | defer rfLog.mu.RUnlock() 301 | ents := []*pb.Entry{} 302 | for i := lo; i < hi; i++ { 303 | ents = append(ents, rfLog.GetEnt(i)) 304 | } 305 | return ents 306 | } 307 | 308 | // GetEntry 309 | // get log entry with idx 310 | func (rfLog *RaftLog) GetEntry(idx int64) *pb.Entry { 311 | rfLog.mu.RLock() 312 | defer rfLog.mu.RUnlock() 313 | return rfLog.GetEnt(idx) 314 | } 315 | 316 | func (rfLog *RaftLog) GetEnt(offset int64) *pb.Entry { 317 | firstLogId := rfLog.GetFirstLogId() 318 | encodeValue, err := rfLog.dbEng.GetBytesValue(EncodeRaftLogKey(firstLogId + uint64(offset))) 319 | if err != nil { 320 | PrintDebugLog(fmt.Sprintf("get log entry with id %d error!", offset+int64(firstLogId))) 321 | panic(err) 322 | } 323 | return DecodeEntry(encodeValue) 324 | } 325 | 326 | // EncodeRaftLogKey 327 | // encode raft log key with prefix -> RaftLogPrefix 328 | func EncodeRaftLogKey(idx uint64) []byte { 329 | var outBuf bytes.Buffer 330 | outBuf.Write(RaftLogPrefix) 331 | b := make([]byte, 8) 332 | binary.LittleEndian.PutUint64(b, idx) 333 | outBuf.Write(b) 334 | return outBuf.Bytes() 335 | } 336 | 337 | // DecodeRaftLogKey 338 | // decode raft log key, return log id 339 | func DecodeRaftLogKey(bts []byte) uint64 { 340 | return binary.LittleEndian.Uint64(bts[4:]) 341 | } 342 | 343 | // EncodeEntry 344 | // encode log entry to bytes sequence 345 | func EncodeEntry(ent *pb.Entry) []byte { 346 | var bytesEnt bytes.Buffer 347 | enc := gob.NewEncoder(&bytesEnt) 348 | enc.Encode(ent) 349 | return bytesEnt.Bytes() 350 | } 351 | 352 | // DecodeEntry 353 | // decode log entry from bytes sequence 354 | func DecodeEntry(in []byte) *pb.Entry { 355 | dec := gob.NewDecoder(bytes.NewBuffer(in)) 356 | ent := pb.Entry{} 357 | dec.Decode(&ent) 358 | return &ent 359 | } 360 | 361 | // EncodeRaftState 362 | // encode RaftPersistentState to bytes sequence 363 | func EncodeRaftState(rfState *RaftPersistentState) []byte { 364 | var bytesState bytes.Buffer 365 | enc := gob.NewEncoder(&bytesState) 366 | enc.Encode(rfState) 367 | return bytesState.Bytes() 368 | } 369 | 370 | // DecodeRaftState 371 | // decode RaftPersistentState from bytes sequence 372 | func DecodeRaftState(in []byte) *RaftPersistentState { 373 | dec := gob.NewDecoder(bytes.NewBuffer(in)) 374 | rfState := RaftPersistentState{} 375 | dec.Decode(&rfState) 376 | return &rfState 377 | } 378 | -------------------------------------------------------------------------------- /raftcore/persistent_log_test.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | 25 | package raftcore 26 | 27 | import ( 28 | "io/ioutil" 29 | "os" 30 | "path" 31 | "testing" 32 | 33 | pb "github.com/eraft-io/eraft/raftpb" 34 | "github.com/eraft-io/eraft/storage" 35 | ) 36 | 37 | func RemoveDir(in string) { 38 | dir, _ := ioutil.ReadDir(in) 39 | for _, d := range dir { 40 | os.RemoveAll(path.Join([]string{in, d.Name()}...)) 41 | } 42 | } 43 | 44 | func TestTestPersisLogGetInit(t *testing.T) { 45 | newDBEng := storage.EngineFactory("leveldb", "./log_data_test") 46 | raftLog := MakePersistRaftLog(newDBEng) 47 | firstEnt := raftLog.GetFirst() 48 | t.Logf("first log %s", firstEnt.String()) 49 | lastEnt := raftLog.GetLast() 50 | t.Logf("last log %s", lastEnt.String()) 51 | t.Logf("log items count %d", raftLog.LogItemCount()) 52 | RemoveDir("./log_data_test") 53 | } 54 | 55 | func TestEraseBefore1(t *testing.T) { 56 | newDBEng := storage.EngineFactory("leveldb", "./log_data_test") 57 | raftLog := MakePersistRaftLog(newDBEng) 58 | firstEnt := raftLog.GetFirst() 59 | t.Logf("first log %s", firstEnt.String()) 60 | lastEnt := raftLog.GetLast() 61 | t.Logf("last log %s", lastEnt.String()) 62 | ents := raftLog.EraseBefore(1) 63 | t.Logf("%v", ents) 64 | RemoveDir("./log_data_test") 65 | } 66 | 67 | func TestPersisEraseAfter1(t *testing.T) { 68 | newDBEng := storage.EngineFactory("leveldb", "./log_data_test") 69 | raftLog := MakePersistRaftLog(newDBEng) 70 | firstEnt := raftLog.GetFirst() 71 | t.Logf("first log %s", firstEnt.String()) 72 | lastEnt := raftLog.GetLast() 73 | t.Logf("last log %s", lastEnt.String()) 74 | ents := raftLog.EraseAfter(1, false) 75 | t.Logf("%v", ents) 76 | RemoveDir("./log_data_test") 77 | } 78 | 79 | func TestPersisEraseAfter0And1(t *testing.T) { 80 | newDBEng := storage.EngineFactory("leveldb", "./log_data_test") 81 | raftLog := MakePersistRaftLog(newDBEng) 82 | firstEnt := raftLog.GetFirst() 83 | t.Logf("first log %s", firstEnt.String()) 84 | lastEnt := raftLog.GetLast() 85 | t.Logf("last log %s", lastEnt.String()) 86 | ents := raftLog.EraseAfter(0, false) 87 | t.Logf("%v", ents) 88 | raftLog.Append(&pb.Entry{ 89 | Index: 1, 90 | Term: 1, 91 | }) 92 | ents = raftLog.EraseAfter(1, false) 93 | t.Logf("%v", ents) 94 | t.Logf("%d", raftLog.LogItemCount()) 95 | RemoveDir("./log_data_test") 96 | } 97 | 98 | func TestPersisEraseBefore0And1(t *testing.T) { 99 | newDBEng := storage.EngineFactory("leveldb", "./log_data_test") 100 | raftLog := MakePersistRaftLog(newDBEng) 101 | firstEnt := raftLog.GetFirst() 102 | t.Logf("first log %s", firstEnt.String()) 103 | lastEnt := raftLog.GetLast() 104 | t.Logf("last log %s", lastEnt.String()) 105 | ents := raftLog.EraseBefore(0) 106 | t.Logf("%v", ents) 107 | raftLog.Append(&pb.Entry{ 108 | Index: 1, 109 | Term: 1, 110 | }) 111 | raftLog.Append(&pb.Entry{ 112 | Index: 2, 113 | Term: 1, 114 | }) 115 | ents = raftLog.EraseBefore(1) 116 | t.Logf("%v", ents) 117 | t.Logf("%d", raftLog.LogItemCount()) 118 | RemoveDir("./log_data_test") 119 | } 120 | 121 | func TestPersisEraseAfter0(t *testing.T) { 122 | newDBEng := storage.EngineFactory("leveldb", "./log_data_test") 123 | raftLog := MakePersistRaftLog(newDBEng) 124 | firstEnt := raftLog.GetFirst() 125 | t.Logf("first log %s", firstEnt.String()) 126 | lastEnt := raftLog.GetLast() 127 | t.Logf("last log %s", lastEnt.String()) 128 | ents := raftLog.EraseAfter(0, false) 129 | t.Logf("%v", ents) 130 | RemoveDir("./log_data_test") 131 | } 132 | 133 | func TestTestPersisLogAppend(t *testing.T) { 134 | newDBEng := storage.EngineFactory("leveldb", "./log_data_test") 135 | raftLog := MakePersistRaftLog(newDBEng) 136 | for i := 0; i < 1000; i++ { 137 | raftLog.Append(&pb.Entry{ 138 | Index: int64(i), 139 | Term: 1, 140 | Data: []byte{0x01, 0x02}, 141 | }) 142 | } 143 | firstEnt := raftLog.GetFirst() 144 | t.Logf("first log %s", firstEnt.String()) 145 | lastEnt := raftLog.GetLast() 146 | t.Logf("last log %s", lastEnt.String()) 147 | t.Logf("log items count %d", raftLog.LogItemCount()) 148 | t.Logf("get log item with id 1 -> %s", raftLog.GetEntry(1).String()) 149 | RemoveDir("./log_data_test") 150 | } 151 | 152 | func TestTestPersisLogErase(t *testing.T) { 153 | newDBEng := storage.EngineFactory("leveldb", "./log_data_test") 154 | raftLog := MakePersistRaftLog(newDBEng) 155 | raftLog.Append(&pb.Entry{ 156 | Index: 1, 157 | Term: 1, 158 | Data: []byte{0x01, 0x02}, 159 | }) 160 | raftLog.Append(&pb.Entry{ 161 | Index: 2, 162 | Term: 1, 163 | Data: []byte{0x01, 0x02}, 164 | }) 165 | raftLog.Append(&pb.Entry{ 166 | Index: 3, 167 | Term: 1, 168 | Data: []byte{0x01, 0x02}, 169 | }) 170 | raftLog.Append(&pb.Entry{ 171 | Index: 4, 172 | Term: 1, 173 | Data: []byte{0x01, 0x02}, 174 | }) 175 | raftLog.EraseBefore(0) 176 | firstEnt := raftLog.GetFirst() 177 | t.Logf("first log %s", firstEnt.String()) 178 | lastEnt := raftLog.GetLast() 179 | t.Logf("last log %s", lastEnt.String()) 180 | t.Logf("log items count %d", raftLog.LogItemCount()) 181 | t.Logf("get log item with id 2 -> %s", raftLog.GetEntry(2).String()) 182 | raftLog.EraseAfter(3, false) 183 | firstEnt = raftLog.GetFirst() 184 | t.Logf("first log %s", firstEnt.String()) 185 | lastEnt = raftLog.GetLast() 186 | t.Logf("last log %s", lastEnt.String()) 187 | t.Logf("get log item with id 3 -> %s", raftLog.GetEntry(3).String()) 188 | RemoveDir("./log_data_test") 189 | } 190 | 191 | func TestSliceSplit(t *testing.T) { 192 | seq := []int{0, 1, 2} 193 | t.Logf("%+v", seq[1:]) 194 | t.Logf("%+v", seq[:1]) 195 | } 196 | 197 | func TestRaftStatePersis(t *testing.T) { 198 | newDBEng := storage.EngineFactory("leveldb", "./log_data_test") 199 | raftLog := MakePersistRaftLog(newDBEng) 200 | curterm, votedFor, _ := raftLog.ReadRaftState() 201 | t.Logf("%d", curterm) 202 | t.Logf("%d", votedFor) 203 | raftLog.PersistRaftState(5, 5, 6) 204 | curterm, votedFor, _ = raftLog.ReadRaftState() 205 | t.Logf("%d", curterm) 206 | t.Logf("%d", votedFor) 207 | RemoveDir("./log_data_test") 208 | } 209 | 210 | func TestPersisLogGetRange(t *testing.T) { 211 | newDBEng := storage.EngineFactory("leveldb", "./log_data_test") 212 | raftLog := MakePersistRaftLog(newDBEng) 213 | raftLog.Append(&pb.Entry{ 214 | Index: 1, 215 | Term: 1, 216 | Data: []byte{0x01, 0x02}, 217 | }) 218 | raftLog.Append(&pb.Entry{ 219 | Index: 2, 220 | Term: 1, 221 | Data: []byte{0x01, 0x02}, 222 | }) 223 | raftLog.Append(&pb.Entry{ 224 | Index: 3, 225 | Term: 1, 226 | Data: []byte{0x01, 0x02}, 227 | }) 228 | raftLog.Append(&pb.Entry{ 229 | Index: 4, 230 | Term: 1, 231 | Data: []byte{0x01, 0x02}, 232 | }) 233 | 234 | ents := raftLog.GetRange(2, 3) 235 | for _, ent := range ents { 236 | t.Logf("got ent %s", ent.String()) 237 | } 238 | RemoveDir("./log_data_test") 239 | } 240 | 241 | func TestPersisLogGetRangeAfterGc(t *testing.T) { 242 | newDBEng := storage.EngineFactory("leveldb", "./log_data_test") 243 | raftLog := MakePersistRaftLog(newDBEng) 244 | raftLog.Append(&pb.Entry{ 245 | Index: 1, 246 | Term: 1, 247 | Data: []byte{0x01, 0x02}, 248 | }) 249 | raftLog.Append(&pb.Entry{ 250 | Index: 2, 251 | Term: 1, 252 | Data: []byte{0x01, 0x02}, 253 | }) 254 | raftLog.Append(&pb.Entry{ 255 | Index: 3, 256 | Term: 1, 257 | Data: []byte{0x01, 0x02}, 258 | }) 259 | raftLog.Append(&pb.Entry{ 260 | Index: 4, 261 | Term: 1, 262 | Data: []byte{0x01, 0x02}, 263 | }) 264 | raftLog.EraseBeforeWithDel(2) 265 | ents := raftLog.GetRange(1, 2) 266 | for _, ent := range ents { 267 | t.Logf("got ent %s", ent.String()) 268 | } 269 | RemoveDir("./log_data_test") 270 | } 271 | -------------------------------------------------------------------------------- /raftcore/raft.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | 25 | package raftcore 26 | 27 | import ( 28 | "context" 29 | "fmt" 30 | "sort" 31 | "sync" 32 | "sync/atomic" 33 | "time" 34 | 35 | pb "github.com/eraft-io/eraft/raftpb" 36 | "github.com/eraft-io/eraft/storage" 37 | ) 38 | 39 | type NodeRole uint8 40 | 41 | // raft node state 42 | const ( 43 | NodeRoleFollower NodeRole = iota 44 | NodeRoleCandidate 45 | NodeRoleLeader 46 | ) 47 | 48 | func NodeToString(role NodeRole) string { 49 | switch role { 50 | case NodeRoleCandidate: 51 | return "Candidate" 52 | case NodeRoleFollower: 53 | return "Follower" 54 | case NodeRoleLeader: 55 | return "Leader" 56 | } 57 | return "unknown" 58 | } 59 | 60 | // Raft raft stack definition 61 | type Raft struct { 62 | mu sync.RWMutex 63 | peers []*RaftClientEnd // rpc client end 64 | me_ int 65 | dead int32 66 | applyCh chan *pb.ApplyMsg 67 | applyCond *sync.Cond 68 | replicatorCond []*sync.Cond 69 | role NodeRole 70 | curTerm int64 71 | votedFor int64 72 | grantedVotes int 73 | logs *RaftLog 74 | persister *RaftLog 75 | commitIdx int64 76 | lastApplied int64 77 | nextIdx []int 78 | matchIdx []int 79 | isSnapshotting bool 80 | 81 | leaderId int64 82 | electionTimer *time.Timer 83 | heartbeatTimer *time.Timer 84 | heartBeatTimeout uint64 85 | baseElectTimeout uint64 86 | } 87 | 88 | func MakeRaft(peers []*RaftClientEnd, me int, newDBEng storage.KvStore, applyCh chan *pb.ApplyMsg, heartbeatTimeOutMs uint64, baseElectionTimeOutMs uint64) *Raft { 89 | rf := &Raft{ 90 | peers: peers, 91 | me_: me, 92 | dead: 0, 93 | applyCh: applyCh, 94 | replicatorCond: make([]*sync.Cond, len(peers)), 95 | role: NodeRoleFollower, 96 | curTerm: 0, 97 | votedFor: VoteForNoOne, 98 | grantedVotes: 0, 99 | isSnapshotting: false, 100 | logs: MakePersistRaftLog(newDBEng), 101 | persister: MakePersistRaftLog(newDBEng), 102 | nextIdx: make([]int, len(peers)), 103 | matchIdx: make([]int, len(peers)), 104 | heartbeatTimer: time.NewTimer(time.Millisecond * time.Duration(heartbeatTimeOutMs)), 105 | electionTimer: time.NewTimer(time.Millisecond * time.Duration(MakeAnRandomElectionTimeout(int(baseElectionTimeOutMs)))), 106 | baseElectTimeout: baseElectionTimeOutMs, 107 | heartBeatTimeout: heartbeatTimeOutMs, 108 | } 109 | rf.curTerm, rf.votedFor, rf.lastApplied = rf.persister.ReadRaftState() 110 | rf.applyCond = sync.NewCond(&rf.mu) 111 | lastLog := rf.logs.GetLast() 112 | for _, peer := range peers { 113 | fmt.Printf("peer addr %s id %d", peer.addr, peer.id) 114 | rf.matchIdx[peer.id], rf.nextIdx[peer.id] = 0, int(lastLog.Index+1) 115 | if int(peer.id) != me { 116 | rf.replicatorCond[peer.id] = sync.NewCond(&sync.Mutex{}) 117 | go rf.Replicator(peer) 118 | } 119 | } 120 | 121 | go rf.Tick() 122 | 123 | go rf.Applier() 124 | 125 | return rf 126 | } 127 | 128 | func (rf *Raft) PersistRaftState() { 129 | rf.persister.PersistRaftState(rf.curTerm, rf.votedFor, rf.lastApplied) 130 | } 131 | 132 | func (rf *Raft) Kill() { 133 | atomic.StoreInt32(&rf.dead, 1) 134 | } 135 | 136 | func (rf *Raft) IsKilled() bool { 137 | return atomic.LoadInt32(&rf.dead) == 1 138 | } 139 | 140 | func (rf *Raft) GetFirstLogEnt() *pb.Entry { 141 | return rf.logs.GetFirst() 142 | } 143 | 144 | func (rf *Raft) SwitchRaftNodeRole(role NodeRole) { 145 | if rf.role == role { 146 | return 147 | } 148 | rf.role = role 149 | fmt.Printf("note change state to -> %s \n", NodeToString(role)) 150 | switch role { 151 | case NodeRoleFollower: 152 | rf.heartbeatTimer.Stop() 153 | rf.electionTimer.Reset(time.Duration(MakeAnRandomElectionTimeout(int(rf.baseElectTimeout))) * time.Millisecond) 154 | case NodeRoleCandidate: 155 | case NodeRoleLeader: 156 | // become leader,set replica (matchIdx and nextIdx) processes table 157 | lastLog := rf.logs.GetLast() 158 | rf.leaderId = int64(rf.me_) 159 | for i := 0; i < len(rf.peers); i++ { 160 | rf.matchIdx[i], rf.nextIdx[i] = 0, int(lastLog.Index+1) 161 | } 162 | rf.electionTimer.Stop() 163 | rf.heartbeatTimer.Reset(time.Duration(rf.heartBeatTimeout) * time.Millisecond) 164 | } 165 | } 166 | 167 | func (rf *Raft) IncrCurrentTerm() { 168 | rf.curTerm += 1 169 | } 170 | 171 | func (rf *Raft) GetState() (int, bool) { 172 | rf.mu.RLock() 173 | defer rf.mu.RUnlock() 174 | return int(rf.curTerm), rf.role == NodeRoleLeader 175 | } 176 | 177 | func (rf *Raft) IncrGrantedVotes() { 178 | rf.grantedVotes += 1 179 | } 180 | 181 | func (rf *Raft) ReInitLog() { 182 | rf.logs.ReInitLogs() 183 | } 184 | 185 | // HandleRequestVote handle request vote from other node 186 | func (rf *Raft) HandleRequestVote(req *pb.RequestVoteRequest, resp *pb.RequestVoteResponse) { 187 | rf.mu.Lock() 188 | defer rf.mu.Unlock() 189 | defer rf.PersistRaftState() 190 | 191 | if req.Term < rf.curTerm || (req.Term == rf.curTerm && rf.votedFor != -1 && rf.votedFor != req.CandidateId) { 192 | resp.Term, resp.VoteGranted = rf.curTerm, false 193 | return 194 | } 195 | 196 | if req.Term > rf.curTerm { 197 | rf.SwitchRaftNodeRole(NodeRoleFollower) 198 | rf.curTerm, rf.votedFor = req.Term, -1 199 | } 200 | 201 | lastLog := rf.logs.GetLast() 202 | 203 | if !(req.LastLogTerm > int64(lastLog.Term) || (req.LastLogTerm == int64(lastLog.Term) && req.LastLogIndex >= lastLog.Index)) { 204 | resp.Term, resp.VoteGranted = rf.curTerm, false 205 | return 206 | } 207 | 208 | rf.votedFor = req.CandidateId 209 | rf.electionTimer.Reset(time.Millisecond * time.Duration(MakeAnRandomElectionTimeout(int(rf.baseElectTimeout)))) 210 | resp.Term, resp.VoteGranted = rf.curTerm, true 211 | } 212 | 213 | // 214 | // HandleRequestVote handle append entries from other node 215 | // 216 | 217 | func (rf *Raft) GetLeaderId() int64 { 218 | rf.mu.RLock() 219 | defer rf.mu.RUnlock() 220 | return rf.leaderId 221 | } 222 | 223 | func (rf *Raft) HandleAppendEntries(req *pb.AppendEntriesRequest, resp *pb.AppendEntriesResponse) { 224 | rf.mu.Lock() 225 | defer rf.mu.Unlock() 226 | defer rf.PersistRaftState() 227 | 228 | if req.Term < rf.curTerm { 229 | resp.Term = rf.curTerm 230 | resp.Success = false 231 | return 232 | } 233 | 234 | if req.Term > rf.curTerm { 235 | rf.curTerm = req.Term 236 | rf.votedFor = VoteForNoOne 237 | } 238 | 239 | rf.SwitchRaftNodeRole(NodeRoleFollower) 240 | rf.leaderId = req.LeaderId 241 | rf.electionTimer.Reset(time.Millisecond * time.Duration(MakeAnRandomElectionTimeout(int(rf.baseElectTimeout)))) 242 | 243 | if req.PrevLogIndex < rf.logs.GetFirst().Index { 244 | resp.Term = 0 245 | resp.Success = false 246 | PrintDebugLog(fmt.Sprintf("peer %d reject append entires request from %d", rf.me_, req.LeaderId)) 247 | return 248 | } 249 | 250 | if !rf.MatchLog(req.PrevLogTerm, req.PrevLogIndex) { 251 | resp.Term = rf.curTerm 252 | resp.Success = false 253 | lastIndex := rf.logs.GetLast().Index 254 | if lastIndex < req.PrevLogIndex { 255 | PrintDebugLog(fmt.Sprintf("log confict with term %d, index %d", -1, lastIndex+1)) 256 | resp.ConflictTerm = -1 257 | resp.ConflictIndex = lastIndex + 1 258 | } else { 259 | firstIndex := rf.logs.GetFirst().Index 260 | resp.ConflictTerm = int64(rf.logs.GetEntry(req.PrevLogIndex - firstIndex).Term) 261 | index := req.PrevLogIndex - 1 262 | for index >= firstIndex && rf.logs.GetEntry(index-firstIndex).Term == uint64(resp.ConflictTerm) { 263 | index-- 264 | } 265 | resp.ConflictIndex = index 266 | } 267 | return 268 | } 269 | 270 | firstIndex := rf.logs.GetFirst().Index 271 | for index, entry := range req.Entries { 272 | if int(entry.Index-firstIndex) >= rf.logs.LogItemCount() || rf.logs.GetEntry(entry.Index-firstIndex).Term != entry.Term { 273 | rf.logs.EraseAfter(entry.Index-firstIndex, true) 274 | for _, newEnt := range req.Entries[index:] { 275 | rf.logs.Append(newEnt) 276 | } 277 | break 278 | } 279 | } 280 | 281 | rf.advanceCommitIndexForFollower(int(req.LeaderCommit)) 282 | resp.Term = rf.curTerm 283 | resp.Success = true 284 | } 285 | 286 | func (rf *Raft) CondInstallSnapshot(lastIncludedTerm int, lastIncludedIndex int, snapshot []byte) bool { 287 | rf.mu.Lock() 288 | defer rf.mu.Unlock() 289 | 290 | if lastIncludedIndex <= int(rf.commitIdx) { 291 | return false 292 | } 293 | 294 | if lastIncludedIndex > int(rf.logs.GetLast().Index) { 295 | PrintDebugLog("lastIncludedIndex > last log id") 296 | rf.logs.ReInitLogs() 297 | } else { 298 | PrintDebugLog("install snapshot del old log") 299 | rf.logs.EraseBeforeWithDel(int64(lastIncludedIndex) - rf.logs.GetFirst().Index) 300 | rf.logs.SetEntFirstData([]byte{}) 301 | } 302 | // update dummy entry with lastIncludedTerm and lastIncludedIndex 303 | rf.logs.SetEntFirstTermAndIndex(int64(lastIncludedTerm), int64(lastIncludedIndex)) 304 | 305 | rf.lastApplied = int64(lastIncludedIndex) 306 | rf.commitIdx = int64(lastIncludedIndex) 307 | 308 | // rf.logs.PersisSnapshot(snapshot) 309 | return true 310 | } 311 | 312 | // take a snapshot 313 | func (rf *Raft) Snapshot(index int, snapshot []byte) { 314 | rf.mu.Lock() 315 | defer rf.mu.Unlock() 316 | rf.isSnapshotting = true 317 | snapshotIndex := rf.logs.GetFirstLogId() 318 | if index <= int(snapshotIndex) { 319 | rf.isSnapshotting = false 320 | PrintDebugLog("reject snapshot, current snapshotIndex is larger in cur term") 321 | return 322 | } 323 | rf.logs.EraseBeforeWithDel(int64(index) - int64(snapshotIndex)) 324 | rf.logs.SetEntFirstData([]byte{}) // 第一个操作日志号设为空 325 | PrintDebugLog(fmt.Sprintf("del log entry before idx %d", index)) 326 | rf.isSnapshotting = false 327 | rf.logs.PersisSnapshot(snapshot) 328 | } 329 | 330 | func (rf *Raft) ReadSnapshot() []byte { 331 | b, err := rf.logs.ReadSnapshot() 332 | if err != nil { 333 | // panic(err) 334 | fmt.Println(err.Error()) 335 | } 336 | return b 337 | } 338 | 339 | // install snapshot from leader 340 | func (rf *Raft) HandleInstallSnapshot(request *pb.InstallSnapshotRequest, response *pb.InstallSnapshotResponse) { 341 | rf.mu.Lock() 342 | defer rf.mu.Unlock() 343 | 344 | response.Term = rf.curTerm 345 | 346 | if request.Term < rf.curTerm { 347 | return 348 | } 349 | 350 | if request.Term > rf.curTerm { 351 | rf.curTerm = request.Term 352 | rf.votedFor = -1 353 | rf.PersistRaftState() 354 | } 355 | 356 | rf.SwitchRaftNodeRole(NodeRoleFollower) 357 | rf.electionTimer.Reset(time.Millisecond * time.Duration(MakeAnRandomElectionTimeout(int(rf.baseElectTimeout)))) 358 | 359 | if request.LastIncludedIndex <= rf.commitIdx { 360 | return 361 | } 362 | 363 | go func() { 364 | rf.applyCh <- &pb.ApplyMsg{ 365 | SnapshotValid: true, 366 | Snapshot: request.Data, 367 | SnapshotTerm: request.LastIncludedTerm, 368 | SnapshotIndex: request.LastIncludedIndex, 369 | } 370 | }() 371 | 372 | } 373 | 374 | func (rf *Raft) GetLogCount() int { 375 | rf.mu.Lock() 376 | defer rf.mu.Unlock() 377 | return rf.logs.LogItemCount() 378 | } 379 | 380 | func (rf *Raft) advanceCommitIndexForLeader() { 381 | sort.Ints(rf.matchIdx) 382 | n := len(rf.matchIdx) 383 | newCommitIndex := rf.matchIdx[n-(n/2+1)] 384 | if newCommitIndex > int(rf.commitIdx) { 385 | if rf.MatchLog(rf.curTerm, int64(newCommitIndex)) { 386 | PrintDebugLog(fmt.Sprintf("peer %d advance commit index %d at term %d", rf.me_, rf.commitIdx, rf.curTerm)) 387 | rf.commitIdx = int64(newCommitIndex) 388 | rf.applyCond.Signal() 389 | } 390 | } 391 | } 392 | 393 | func (rf *Raft) advanceCommitIndexForFollower(leaderCommit int) { 394 | newCommitIndex := Min(leaderCommit, int(rf.logs.GetLast().Index)) 395 | if newCommitIndex > int(rf.commitIdx) { 396 | PrintDebugLog(fmt.Sprintf("peer %d advance commit index %d at term %d", rf.me_, rf.commitIdx, rf.curTerm)) 397 | rf.commitIdx = int64(newCommitIndex) 398 | rf.applyCond.Signal() 399 | } 400 | } 401 | 402 | // MatchLog is log matched 403 | func (rf *Raft) MatchLog(term, index int64) bool { 404 | return index <= rf.logs.GetLast().Index && rf.logs.GetEntry(index-rf.logs.GetFirst().Index).Term == uint64(term) 405 | } 406 | 407 | // Election make a new election 408 | func (rf *Raft) Election() { 409 | fmt.Printf("%d start election \n", rf.me_) 410 | rf.IncrGrantedVotes() 411 | rf.votedFor = int64(rf.me_) 412 | voteReq := &pb.RequestVoteRequest{ 413 | Term: rf.curTerm, 414 | CandidateId: int64(rf.me_), 415 | LastLogIndex: rf.logs.GetLast().Index, 416 | LastLogTerm: int64(rf.logs.GetLast().Term), 417 | } 418 | rf.PersistRaftState() 419 | for _, peer := range rf.peers { 420 | if int(peer.id) == rf.me_ { 421 | continue 422 | } 423 | go func(peer *RaftClientEnd) { 424 | PrintDebugLog(fmt.Sprintf("send request vote to %s %s\n", peer.addr, voteReq.String())) 425 | 426 | requestVoteResp, err := (*peer.raftServiceCli).RequestVote(context.Background(), voteReq) 427 | if err != nil { 428 | PrintDebugLog(fmt.Sprintf("send request vote to %s failed %v\n", peer.addr, err.Error())) 429 | } 430 | if requestVoteResp != nil { 431 | rf.mu.Lock() 432 | defer rf.mu.Unlock() 433 | PrintDebugLog(fmt.Sprintf("send request vote to %s recive -> %s, curterm %d, req term %d", peer.addr, requestVoteResp.String(), rf.curTerm, voteReq.Term)) 434 | if rf.curTerm == voteReq.Term && rf.role == NodeRoleCandidate { 435 | if requestVoteResp.VoteGranted { 436 | // success granted the votes 437 | PrintDebugLog("I grant vote") 438 | rf.IncrGrantedVotes() 439 | if rf.grantedVotes > len(rf.peers)/2 { 440 | PrintDebugLog(fmt.Sprintf("node %d get majority votes int term %d ", rf.me_, rf.curTerm)) 441 | rf.SwitchRaftNodeRole(NodeRoleLeader) 442 | rf.BroadcastHeartbeat() 443 | rf.grantedVotes = 0 444 | } 445 | } else if requestVoteResp.Term > rf.curTerm { 446 | // request vote reject 447 | rf.SwitchRaftNodeRole(NodeRoleFollower) 448 | rf.curTerm, rf.votedFor = requestVoteResp.Term, -1 449 | rf.PersistRaftState() 450 | } 451 | } 452 | } 453 | }(peer) 454 | } 455 | } 456 | 457 | // 458 | // BroadcastAppend broadcast append to peers 459 | // 460 | 461 | func (rf *Raft) BroadcastAppend() { 462 | for _, peer := range rf.peers { 463 | if peer.id == uint64(rf.me_) { 464 | continue 465 | } 466 | rf.replicatorCond[peer.id].Signal() 467 | } 468 | } 469 | 470 | // BroadcastHeartbeat broadcast heartbeat to peers 471 | func (rf *Raft) BroadcastHeartbeat() { 472 | for _, peer := range rf.peers { 473 | if int(peer.id) == rf.me_ { 474 | continue 475 | } 476 | PrintDebugLog(fmt.Sprintf("send heart beat to %s", peer.addr)) 477 | go func(peer *RaftClientEnd) { 478 | rf.replicateOneRound(peer) 479 | }(peer) 480 | } 481 | } 482 | 483 | // Tick raft heart, this ticket trigger raft main flow running 484 | func (rf *Raft) Tick() { 485 | for !rf.IsKilled() { 486 | select { 487 | case <-rf.electionTimer.C: 488 | { 489 | rf.SwitchRaftNodeRole(NodeRoleCandidate) 490 | rf.IncrCurrentTerm() 491 | rf.Election() 492 | rf.electionTimer.Reset(time.Millisecond * time.Duration(MakeAnRandomElectionTimeout(int(rf.baseElectTimeout)))) 493 | } 494 | case <-rf.heartbeatTimer.C: 495 | { 496 | if rf.role == NodeRoleLeader { 497 | rf.BroadcastHeartbeat() 498 | rf.heartbeatTimer.Reset(time.Millisecond * time.Duration(rf.heartBeatTimeout)) 499 | } 500 | } 501 | } 502 | } 503 | } 504 | 505 | // 506 | // Propose the interface to the application propose an operation 507 | // 508 | 509 | func (rf *Raft) Propose(payload []byte) (int, int, bool) { 510 | rf.mu.Lock() 511 | defer rf.mu.Unlock() 512 | if rf.role != NodeRoleLeader { 513 | return -1, -1, false 514 | } 515 | if rf.isSnapshotting { 516 | return -1, -1, false 517 | } 518 | newLog := rf.Append(payload) 519 | rf.BroadcastAppend() 520 | return int(newLog.Index), int(newLog.Term), true 521 | } 522 | 523 | // 524 | // Append append a new command to it's logs 525 | // 526 | 527 | func (rf *Raft) Append(command []byte) *pb.Entry { 528 | lastLog := rf.logs.GetLast() 529 | newLog := &pb.Entry{ 530 | Index: lastLog.Index + 1, 531 | Term: uint64(rf.curTerm), 532 | Data: command, 533 | } 534 | rf.logs.Append(newLog) 535 | rf.matchIdx[rf.me_] = int(newLog.Index) 536 | rf.nextIdx[rf.me_] = int(newLog.Index) + 1 537 | rf.PersistRaftState() 538 | return newLog 539 | } 540 | 541 | // CloseEndsConn close rpc client connect 542 | func (rf *Raft) CloseEndsConn() { 543 | for _, peer := range rf.peers { 544 | peer.CloseAllConn() 545 | } 546 | } 547 | 548 | // Replicator manager duplicate run 549 | func (rf *Raft) Replicator(peer *RaftClientEnd) { 550 | rf.replicatorCond[peer.id].L.Lock() 551 | defer rf.replicatorCond[peer.id].L.Unlock() 552 | for !rf.IsKilled() { 553 | PrintDebugLog("peer id wait for replicating...") 554 | for !(rf.role == NodeRoleLeader && rf.matchIdx[peer.id] < int(rf.logs.GetLast().Index)) { 555 | rf.replicatorCond[peer.id].Wait() 556 | } 557 | rf.replicateOneRound(peer) 558 | } 559 | } 560 | 561 | // replicateOneRound duplicate log entries to other nodes in the cluster 562 | func (rf *Raft) replicateOneRound(peer *RaftClientEnd) { 563 | rf.mu.RLock() 564 | if rf.role != NodeRoleLeader { 565 | rf.mu.RUnlock() 566 | return 567 | } 568 | prevLogIndex := uint64(rf.nextIdx[peer.id] - 1) 569 | PrintDebugLog(fmt.Sprintf("leader prevLogIndex %d", prevLogIndex)) 570 | if prevLogIndex < uint64(rf.logs.GetFirst().Index) { 571 | firstLog := rf.logs.GetFirst() 572 | snapShotReq := &pb.InstallSnapshotRequest{ 573 | Term: rf.curTerm, 574 | LeaderId: int64(rf.me_), 575 | LastIncludedIndex: firstLog.Index, 576 | LastIncludedTerm: int64(firstLog.Term), 577 | Data: rf.ReadSnapshot(), 578 | } 579 | 580 | rf.mu.RUnlock() 581 | 582 | PrintDebugLog(fmt.Sprintf("send snapshot to %s with %s\n", peer.addr, snapShotReq.String())) 583 | 584 | snapShotResp, err := (*peer.raftServiceCli).Snapshot(context.Background(), snapShotReq) 585 | if err != nil { 586 | PrintDebugLog(fmt.Sprintf("send snapshot to %s failed %v\n", peer.addr, err.Error())) 587 | } 588 | 589 | rf.mu.Lock() 590 | PrintDebugLog(fmt.Sprintf("send snapshot to %s with resp %s\n", peer.addr, snapShotResp.String())) 591 | 592 | if snapShotResp != nil { 593 | if rf.role == NodeRoleLeader && rf.curTerm == snapShotReq.Term { 594 | if snapShotResp.Term > rf.curTerm { 595 | rf.SwitchRaftNodeRole(NodeRoleFollower) 596 | rf.curTerm = snapShotResp.Term 597 | rf.votedFor = -1 598 | rf.PersistRaftState() 599 | } else { 600 | PrintDebugLog(fmt.Sprintf("set peer %d matchIdx %d\n", peer.id, snapShotReq.LastIncludedIndex)) 601 | rf.matchIdx[peer.id] = int(snapShotReq.LastIncludedIndex) 602 | rf.nextIdx[peer.id] = int(snapShotReq.LastIncludedIndex) + 1 603 | } 604 | } 605 | } 606 | rf.mu.Unlock() 607 | } else { 608 | firstIndex := rf.logs.GetFirst().Index 609 | PrintDebugLog(fmt.Sprintf("first log index %d", firstIndex)) 610 | entries := make([]*pb.Entry, len(rf.logs.EraseBefore(int64(prevLogIndex)+1-firstIndex))) 611 | copy(entries, rf.logs.EraseBefore(int64(prevLogIndex)+1-firstIndex)) 612 | appendEntReq := &pb.AppendEntriesRequest{ 613 | Term: rf.curTerm, 614 | LeaderId: int64(rf.me_), 615 | PrevLogIndex: int64(prevLogIndex), 616 | PrevLogTerm: int64(rf.logs.GetEntry(int64(prevLogIndex) - firstIndex).Term), 617 | Entries: entries, 618 | LeaderCommit: rf.commitIdx, 619 | } 620 | rf.mu.RUnlock() 621 | 622 | // send empty ae to peers 623 | resp, err := (*peer.raftServiceCli).AppendEntries(context.Background(), appendEntReq) 624 | if err != nil { 625 | PrintDebugLog(fmt.Sprintf("send append entries to %s failed %v\n", peer.addr, err.Error())) 626 | } 627 | if rf.role == NodeRoleLeader && rf.curTerm == appendEntReq.Term { 628 | if resp != nil { 629 | // deal with appendRnt resp 630 | if resp.Success { 631 | PrintDebugLog(fmt.Sprintf("send heart beat to %s success", peer.addr)) 632 | rf.matchIdx[peer.id] = int(appendEntReq.PrevLogIndex) + len(appendEntReq.Entries) 633 | rf.nextIdx[peer.id] = rf.matchIdx[peer.id] + 1 634 | rf.advanceCommitIndexForLeader() 635 | } else { 636 | // there is a new leader in group 637 | if resp.Term > rf.curTerm { 638 | rf.SwitchRaftNodeRole(NodeRoleFollower) 639 | rf.curTerm = resp.Term 640 | rf.votedFor = VoteForNoOne 641 | rf.PersistRaftState() 642 | } else if resp.Term == rf.curTerm { 643 | rf.nextIdx[peer.id] = int(resp.ConflictIndex) 644 | if resp.ConflictTerm != -1 { 645 | for i := appendEntReq.PrevLogIndex; i >= firstIndex; i-- { 646 | if rf.logs.GetEntry(i-firstIndex).Term == uint64(resp.ConflictTerm) { 647 | rf.nextIdx[peer.id] = int(i + 1) 648 | break 649 | } 650 | } 651 | } 652 | } 653 | } 654 | } 655 | } 656 | } 657 | } 658 | 659 | // Applier Write the committed message to the applyCh channel 660 | // and update lastApplied 661 | func (rf *Raft) Applier() { 662 | for !rf.IsKilled() { 663 | rf.mu.Lock() 664 | for rf.lastApplied >= rf.commitIdx { 665 | PrintDebugLog("applier ...") 666 | rf.applyCond.Wait() 667 | } 668 | 669 | firstIndex, commitIndex, lastApplied := rf.logs.GetFirst().Index, rf.commitIdx, rf.lastApplied 670 | entries := make([]*pb.Entry, commitIndex-lastApplied) 671 | copy(entries, rf.logs.GetRange(lastApplied+1-firstIndex, commitIndex+1-firstIndex)) 672 | PrintDebugLog(fmt.Sprintf("%d, applies entries %d-%d in term %d", rf.me_, rf.lastApplied, commitIndex, rf.curTerm)) 673 | 674 | rf.mu.Unlock() 675 | for _, entry := range entries { 676 | rf.applyCh <- &pb.ApplyMsg{ 677 | CommandValid: true, 678 | Command: entry.Data, 679 | CommandTerm: int64(entry.Term), 680 | CommandIndex: entry.Index, 681 | } 682 | } 683 | 684 | rf.mu.Lock() 685 | rf.lastApplied = int64(Max(int(rf.lastApplied), int(commitIndex))) 686 | rf.PersistRaftState() 687 | rf.mu.Unlock() 688 | } 689 | } 690 | -------------------------------------------------------------------------------- /raftcore/raft_client_end.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | 25 | package raftcore 26 | 27 | import ( 28 | "fmt" 29 | 30 | "github.com/eraft-io/eraft/raftpb" 31 | "google.golang.org/grpc" 32 | ) 33 | 34 | type RaftClientEnd struct { 35 | id uint64 36 | addr string 37 | conns []*grpc.ClientConn 38 | raftServiceCli *raftpb.RaftServiceClient 39 | } 40 | 41 | func (rfEnd *RaftClientEnd) Id() uint64 { 42 | return rfEnd.id 43 | } 44 | 45 | func (rfEnd *RaftClientEnd) GetRaftServiceCli() *raftpb.RaftServiceClient { 46 | return rfEnd.raftServiceCli 47 | } 48 | 49 | func MakeRaftClientEnd(addr string, id uint64) *RaftClientEnd { 50 | conn, err := grpc.Dial(addr, grpc.WithInsecure()) 51 | if err != nil { 52 | fmt.Printf("faild to connect: %v", err) 53 | } 54 | conns := []*grpc.ClientConn{} 55 | conns = append(conns, conn) 56 | rpcClient := raftpb.NewRaftServiceClient(conn) 57 | return &RaftClientEnd{ 58 | id: id, 59 | addr: addr, 60 | conns: conns, 61 | raftServiceCli: &rpcClient, 62 | } 63 | } 64 | 65 | func (rfEnd *RaftClientEnd) CloseAllConn() { 66 | // PrintDebugLog(fmt.Sprintf("%s close rpc connect", rfEnd.addr)) 67 | for _, conn := range rfEnd.conns { 68 | conn.Close() 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /raftcore/raft_log.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | 25 | package raftcore 26 | 27 | import ( 28 | "sync" 29 | 30 | pb "github.com/eraft-io/eraft/raftpb" 31 | "github.com/eraft-io/eraft/storage" 32 | ) 33 | 34 | type RaftLog struct { 35 | mu sync.RWMutex 36 | firstIdx uint64 37 | lastIdx uint64 38 | dbEng storage.KvStore 39 | items []*pb.Entry 40 | } 41 | 42 | type LogOp interface { 43 | GetFirst() *pb.Entry 44 | 45 | LogItemCount() int 46 | 47 | EraseBefore(idx int64) []*pb.Entry 48 | 49 | EraseAfter(idx int64) []*pb.Entry 50 | 51 | GetRange(lo, hi int64) []*pb.Entry 52 | 53 | Append(newEnt *pb.Entry) 54 | 55 | GetEntry(idx int64) *pb.Entry 56 | 57 | GetLast() *pb.Entry 58 | } 59 | 60 | // 61 | // Mem 62 | // 63 | 64 | func MakeMemRaftLog() *RaftLog { 65 | empEnt := &pb.Entry{} 66 | newItems := []*pb.Entry{} 67 | newItems = append(newItems, empEnt) 68 | return &RaftLog{items: newItems, firstIdx: InitLogIndex, lastIdx: InitLogIndex + 1} 69 | } 70 | 71 | func (rfLog *RaftLog) GetMemFirst() *pb.Entry { 72 | return rfLog.items[0] 73 | } 74 | 75 | func (rfLog *RaftLog) MemLogItemCount() int { 76 | return len(rfLog.items) 77 | } 78 | 79 | func (rfLog *RaftLog) EraseMemBefore(idx int64) []*pb.Entry { 80 | return rfLog.items[idx:] 81 | } 82 | 83 | func (rfLog *RaftLog) EraseMemAfter(idx int64) []*pb.Entry { 84 | return rfLog.items[:idx] 85 | } 86 | 87 | func (rfLog *RaftLog) GetMemRange(lo, hi int64) []*pb.Entry { 88 | return rfLog.items[lo:hi] 89 | } 90 | 91 | func (rfLog *RaftLog) MemAppend(newEnt *pb.Entry) { 92 | rfLog.items = append(rfLog.items, newEnt) 93 | } 94 | 95 | func (rfLog *RaftLog) GetMemEntry(idx int64) *pb.Entry { 96 | return rfLog.items[idx] 97 | } 98 | 99 | func (rfLog *RaftLog) GetMemLast() *pb.Entry { 100 | return rfLog.items[len(rfLog.items)-1] 101 | } 102 | -------------------------------------------------------------------------------- /raftcore/raft_log_test.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | 25 | package raftcore 26 | 27 | import ( 28 | "testing" 29 | 30 | pb "github.com/eraft-io/eraft/raftpb" 31 | ) 32 | 33 | func TestEncodeLogKey(t *testing.T) { 34 | encodeData := EncodeRaftLogKey(10) 35 | t.Log(encodeData) 36 | t.Log(DecodeRaftLogKey(encodeData)) 37 | } 38 | 39 | func TestMemLogGetInit(t *testing.T) { 40 | raftLog := MakeMemRaftLog() 41 | t.Log(raftLog.GetMemFirst()) 42 | t.Log(raftLog.GetMemLast()) 43 | t.Log(len(raftLog.items)) 44 | RemoveDir("./log_data_test") 45 | } 46 | 47 | func TestMenEraseBefore1(t *testing.T) { 48 | raftLog := MakeMemRaftLog() 49 | firstEnt := raftLog.GetMemFirst() 50 | t.Logf("first log %s", firstEnt.String()) 51 | lastEnt := raftLog.GetMemLast() 52 | t.Logf("last log %s", lastEnt.String()) 53 | ents := raftLog.EraseMemBefore(1) 54 | t.Logf("%v", ents) 55 | } 56 | 57 | func TestEraseAfter1(t *testing.T) { 58 | raftLog := MakeMemRaftLog() 59 | firstEnt := raftLog.GetMemFirst() 60 | t.Logf("first log %s", firstEnt.String()) 61 | lastEnt := raftLog.GetMemLast() 62 | t.Logf("last log %s", lastEnt.String()) 63 | ents := raftLog.EraseMemAfter(1) 64 | t.Logf("%v", ents) 65 | } 66 | 67 | func TestEraseAfter0And1(t *testing.T) { 68 | raftLog := MakeMemRaftLog() 69 | firstEnt := raftLog.GetMemFirst() 70 | t.Logf("first log %s", firstEnt.String()) 71 | lastEnt := raftLog.GetMemLast() 72 | t.Logf("last log %s", lastEnt.String()) 73 | ents := raftLog.EraseMemAfter(0) 74 | t.Logf("%v", ents) 75 | raftLog.MemAppend(&pb.Entry{ 76 | Index: 1, 77 | Term: 1, 78 | }) 79 | ents = raftLog.EraseMemAfter(1) 80 | t.Logf("%v", ents) 81 | t.Logf("%d", len(raftLog.items)) 82 | } 83 | 84 | func TestEraseBefore0And1(t *testing.T) { 85 | raftLog := MakeMemRaftLog() 86 | firstEnt := raftLog.GetMemFirst() 87 | t.Logf("first log %s", firstEnt.String()) 88 | lastEnt := raftLog.GetMemLast() 89 | t.Logf("last log %s", lastEnt.String()) 90 | ents := raftLog.EraseMemBefore(0) 91 | t.Logf("%v", ents) 92 | raftLog.MemAppend(&pb.Entry{ 93 | Index: 1, 94 | Term: 1, 95 | }) 96 | raftLog.MemAppend(&pb.Entry{ 97 | Index: 2, 98 | Term: 1, 99 | }) 100 | ents = raftLog.EraseMemBefore(1) 101 | t.Logf("%v", ents) 102 | t.Logf("%d", len(raftLog.items)) 103 | } 104 | -------------------------------------------------------------------------------- /raftcore/utils.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | 25 | package raftcore 26 | 27 | import ( 28 | "math/rand" 29 | "time" 30 | 31 | "github.com/eraft-io/eraft/logger" 32 | ) 33 | 34 | func RandIntRange(min int, max int) int { 35 | s1 := rand.NewSource(time.Now().UnixNano()) 36 | r1 := rand.New(s1) 37 | return r1.Intn(max-min) + min 38 | } 39 | 40 | func MakeAnRandomElectionTimeout(base int) int { 41 | return RandIntRange(base, 2*base) 42 | } 43 | 44 | func PrintDebugLog(msg string) { 45 | logger.ELogger().Sugar().Debugf("%s %s \n", time.Now().Format("2006-01-02 15:04:05"), msg) 46 | } 47 | 48 | func Min(x, y int) int { 49 | if x > y { 50 | return y 51 | } 52 | return x 53 | } 54 | 55 | func Max(x, y int) int { 56 | if x < y { 57 | return y 58 | } 59 | return x 60 | } 61 | -------------------------------------------------------------------------------- /raftcore/utils_test.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | 25 | package raftcore 26 | 27 | import ( 28 | "testing" 29 | ) 30 | 31 | func TestRandRange(t *testing.T) { 32 | for i := 0; i < 20; i++ { 33 | t.Logf("get rand number %d", RandIntRange(100, 200)) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /raftpb/raftbasic_grpc.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go-grpc. DO NOT EDIT. 2 | // versions: 3 | // - protoc-gen-go-grpc v1.2.0 4 | // - protoc v3.19.4 5 | // source: raftbasic.proto 6 | 7 | package raftpb 8 | 9 | import ( 10 | context "context" 11 | grpc "google.golang.org/grpc" 12 | codes "google.golang.org/grpc/codes" 13 | status "google.golang.org/grpc/status" 14 | ) 15 | 16 | // This is a compile-time assertion to ensure that this generated file 17 | // is compatible with the grpc package it is being compiled against. 18 | // Requires gRPC-Go v1.32.0 or later. 19 | const _ = grpc.SupportPackageIsVersion7 20 | 21 | // RaftServiceClient is the client API for RaftService service. 22 | // 23 | // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. 24 | type RaftServiceClient interface { 25 | RequestVote(ctx context.Context, in *RequestVoteRequest, opts ...grpc.CallOption) (*RequestVoteResponse, error) 26 | AppendEntries(ctx context.Context, in *AppendEntriesRequest, opts ...grpc.CallOption) (*AppendEntriesResponse, error) 27 | DoCommand(ctx context.Context, in *CommandRequest, opts ...grpc.CallOption) (*CommandResponse, error) 28 | DoConfig(ctx context.Context, in *ConfigRequest, opts ...grpc.CallOption) (*ConfigResponse, error) 29 | DoBucketsOperation(ctx context.Context, in *BucketOperationRequest, opts ...grpc.CallOption) (*BucketOperationResponse, error) 30 | Snapshot(ctx context.Context, in *InstallSnapshotRequest, opts ...grpc.CallOption) (*InstallSnapshotResponse, error) 31 | } 32 | 33 | type raftServiceClient struct { 34 | cc grpc.ClientConnInterface 35 | } 36 | 37 | func NewRaftServiceClient(cc grpc.ClientConnInterface) RaftServiceClient { 38 | return &raftServiceClient{cc} 39 | } 40 | 41 | func (c *raftServiceClient) RequestVote(ctx context.Context, in *RequestVoteRequest, opts ...grpc.CallOption) (*RequestVoteResponse, error) { 42 | out := new(RequestVoteResponse) 43 | err := c.cc.Invoke(ctx, "/pbs.RaftService/RequestVote", in, out, opts...) 44 | if err != nil { 45 | return nil, err 46 | } 47 | return out, nil 48 | } 49 | 50 | func (c *raftServiceClient) AppendEntries(ctx context.Context, in *AppendEntriesRequest, opts ...grpc.CallOption) (*AppendEntriesResponse, error) { 51 | out := new(AppendEntriesResponse) 52 | err := c.cc.Invoke(ctx, "/pbs.RaftService/AppendEntries", in, out, opts...) 53 | if err != nil { 54 | return nil, err 55 | } 56 | return out, nil 57 | } 58 | 59 | func (c *raftServiceClient) DoCommand(ctx context.Context, in *CommandRequest, opts ...grpc.CallOption) (*CommandResponse, error) { 60 | out := new(CommandResponse) 61 | err := c.cc.Invoke(ctx, "/pbs.RaftService/DoCommand", in, out, opts...) 62 | if err != nil { 63 | return nil, err 64 | } 65 | return out, nil 66 | } 67 | 68 | func (c *raftServiceClient) DoConfig(ctx context.Context, in *ConfigRequest, opts ...grpc.CallOption) (*ConfigResponse, error) { 69 | out := new(ConfigResponse) 70 | err := c.cc.Invoke(ctx, "/pbs.RaftService/DoConfig", in, out, opts...) 71 | if err != nil { 72 | return nil, err 73 | } 74 | return out, nil 75 | } 76 | 77 | func (c *raftServiceClient) DoBucketsOperation(ctx context.Context, in *BucketOperationRequest, opts ...grpc.CallOption) (*BucketOperationResponse, error) { 78 | out := new(BucketOperationResponse) 79 | err := c.cc.Invoke(ctx, "/pbs.RaftService/DoBucketsOperation", in, out, opts...) 80 | if err != nil { 81 | return nil, err 82 | } 83 | return out, nil 84 | } 85 | 86 | func (c *raftServiceClient) Snapshot(ctx context.Context, in *InstallSnapshotRequest, opts ...grpc.CallOption) (*InstallSnapshotResponse, error) { 87 | out := new(InstallSnapshotResponse) 88 | err := c.cc.Invoke(ctx, "/pbs.RaftService/Snapshot", in, out, opts...) 89 | if err != nil { 90 | return nil, err 91 | } 92 | return out, nil 93 | } 94 | 95 | // RaftServiceServer is the server API for RaftService service. 96 | // All implementations must embed UnimplementedRaftServiceServer 97 | // for forward compatibility 98 | type RaftServiceServer interface { 99 | RequestVote(context.Context, *RequestVoteRequest) (*RequestVoteResponse, error) 100 | AppendEntries(context.Context, *AppendEntriesRequest) (*AppendEntriesResponse, error) 101 | DoCommand(context.Context, *CommandRequest) (*CommandResponse, error) 102 | DoConfig(context.Context, *ConfigRequest) (*ConfigResponse, error) 103 | DoBucketsOperation(context.Context, *BucketOperationRequest) (*BucketOperationResponse, error) 104 | Snapshot(context.Context, *InstallSnapshotRequest) (*InstallSnapshotResponse, error) 105 | mustEmbedUnimplementedRaftServiceServer() 106 | } 107 | 108 | // UnimplementedRaftServiceServer must be embedded to have forward compatible implementations. 109 | type UnimplementedRaftServiceServer struct { 110 | } 111 | 112 | func (UnimplementedRaftServiceServer) RequestVote(context.Context, *RequestVoteRequest) (*RequestVoteResponse, error) { 113 | return nil, status.Errorf(codes.Unimplemented, "method RequestVote not implemented") 114 | } 115 | func (UnimplementedRaftServiceServer) AppendEntries(context.Context, *AppendEntriesRequest) (*AppendEntriesResponse, error) { 116 | return nil, status.Errorf(codes.Unimplemented, "method AppendEntries not implemented") 117 | } 118 | func (UnimplementedRaftServiceServer) DoCommand(context.Context, *CommandRequest) (*CommandResponse, error) { 119 | return nil, status.Errorf(codes.Unimplemented, "method DoCommand not implemented") 120 | } 121 | func (UnimplementedRaftServiceServer) DoConfig(context.Context, *ConfigRequest) (*ConfigResponse, error) { 122 | return nil, status.Errorf(codes.Unimplemented, "method DoConfig not implemented") 123 | } 124 | func (UnimplementedRaftServiceServer) DoBucketsOperation(context.Context, *BucketOperationRequest) (*BucketOperationResponse, error) { 125 | return nil, status.Errorf(codes.Unimplemented, "method DoBucketsOperation not implemented") 126 | } 127 | func (UnimplementedRaftServiceServer) Snapshot(context.Context, *InstallSnapshotRequest) (*InstallSnapshotResponse, error) { 128 | return nil, status.Errorf(codes.Unimplemented, "method Snapshot not implemented") 129 | } 130 | func (UnimplementedRaftServiceServer) mustEmbedUnimplementedRaftServiceServer() {} 131 | 132 | // UnsafeRaftServiceServer may be embedded to opt out of forward compatibility for this service. 133 | // Use of this interface is not recommended, as added methods to RaftServiceServer will 134 | // result in compilation errors. 135 | type UnsafeRaftServiceServer interface { 136 | mustEmbedUnimplementedRaftServiceServer() 137 | } 138 | 139 | func RegisterRaftServiceServer(s grpc.ServiceRegistrar, srv RaftServiceServer) { 140 | s.RegisterService(&RaftService_ServiceDesc, srv) 141 | } 142 | 143 | func _RaftService_RequestVote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 144 | in := new(RequestVoteRequest) 145 | if err := dec(in); err != nil { 146 | return nil, err 147 | } 148 | if interceptor == nil { 149 | return srv.(RaftServiceServer).RequestVote(ctx, in) 150 | } 151 | info := &grpc.UnaryServerInfo{ 152 | Server: srv, 153 | FullMethod: "/pbs.RaftService/RequestVote", 154 | } 155 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 156 | return srv.(RaftServiceServer).RequestVote(ctx, req.(*RequestVoteRequest)) 157 | } 158 | return interceptor(ctx, in, info, handler) 159 | } 160 | 161 | func _RaftService_AppendEntries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 162 | in := new(AppendEntriesRequest) 163 | if err := dec(in); err != nil { 164 | return nil, err 165 | } 166 | if interceptor == nil { 167 | return srv.(RaftServiceServer).AppendEntries(ctx, in) 168 | } 169 | info := &grpc.UnaryServerInfo{ 170 | Server: srv, 171 | FullMethod: "/pbs.RaftService/AppendEntries", 172 | } 173 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 174 | return srv.(RaftServiceServer).AppendEntries(ctx, req.(*AppendEntriesRequest)) 175 | } 176 | return interceptor(ctx, in, info, handler) 177 | } 178 | 179 | func _RaftService_DoCommand_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 180 | in := new(CommandRequest) 181 | if err := dec(in); err != nil { 182 | return nil, err 183 | } 184 | if interceptor == nil { 185 | return srv.(RaftServiceServer).DoCommand(ctx, in) 186 | } 187 | info := &grpc.UnaryServerInfo{ 188 | Server: srv, 189 | FullMethod: "/pbs.RaftService/DoCommand", 190 | } 191 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 192 | return srv.(RaftServiceServer).DoCommand(ctx, req.(*CommandRequest)) 193 | } 194 | return interceptor(ctx, in, info, handler) 195 | } 196 | 197 | func _RaftService_DoConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 198 | in := new(ConfigRequest) 199 | if err := dec(in); err != nil { 200 | return nil, err 201 | } 202 | if interceptor == nil { 203 | return srv.(RaftServiceServer).DoConfig(ctx, in) 204 | } 205 | info := &grpc.UnaryServerInfo{ 206 | Server: srv, 207 | FullMethod: "/pbs.RaftService/DoConfig", 208 | } 209 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 210 | return srv.(RaftServiceServer).DoConfig(ctx, req.(*ConfigRequest)) 211 | } 212 | return interceptor(ctx, in, info, handler) 213 | } 214 | 215 | func _RaftService_DoBucketsOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 216 | in := new(BucketOperationRequest) 217 | if err := dec(in); err != nil { 218 | return nil, err 219 | } 220 | if interceptor == nil { 221 | return srv.(RaftServiceServer).DoBucketsOperation(ctx, in) 222 | } 223 | info := &grpc.UnaryServerInfo{ 224 | Server: srv, 225 | FullMethod: "/pbs.RaftService/DoBucketsOperation", 226 | } 227 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 228 | return srv.(RaftServiceServer).DoBucketsOperation(ctx, req.(*BucketOperationRequest)) 229 | } 230 | return interceptor(ctx, in, info, handler) 231 | } 232 | 233 | func _RaftService_Snapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 234 | in := new(InstallSnapshotRequest) 235 | if err := dec(in); err != nil { 236 | return nil, err 237 | } 238 | if interceptor == nil { 239 | return srv.(RaftServiceServer).Snapshot(ctx, in) 240 | } 241 | info := &grpc.UnaryServerInfo{ 242 | Server: srv, 243 | FullMethod: "/pbs.RaftService/Snapshot", 244 | } 245 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 246 | return srv.(RaftServiceServer).Snapshot(ctx, req.(*InstallSnapshotRequest)) 247 | } 248 | return interceptor(ctx, in, info, handler) 249 | } 250 | 251 | // RaftService_ServiceDesc is the grpc.ServiceDesc for RaftService service. 252 | // It's only intended for direct use with grpc.RegisterService, 253 | // and not to be introspected or modified (even as a copy) 254 | var RaftService_ServiceDesc = grpc.ServiceDesc{ 255 | ServiceName: "pbs.RaftService", 256 | HandlerType: (*RaftServiceServer)(nil), 257 | Methods: []grpc.MethodDesc{ 258 | { 259 | MethodName: "RequestVote", 260 | Handler: _RaftService_RequestVote_Handler, 261 | }, 262 | { 263 | MethodName: "AppendEntries", 264 | Handler: _RaftService_AppendEntries_Handler, 265 | }, 266 | { 267 | MethodName: "DoCommand", 268 | Handler: _RaftService_DoCommand_Handler, 269 | }, 270 | { 271 | MethodName: "DoConfig", 272 | Handler: _RaftService_DoConfig_Handler, 273 | }, 274 | { 275 | MethodName: "DoBucketsOperation", 276 | Handler: _RaftService_DoBucketsOperation_Handler, 277 | }, 278 | { 279 | MethodName: "Snapshot", 280 | Handler: _RaftService_Snapshot_Handler, 281 | }, 282 | }, 283 | Streams: []grpc.StreamDesc{}, 284 | Metadata: "raftbasic.proto", 285 | } 286 | -------------------------------------------------------------------------------- /scripts/build_dev.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -xe 3 | 4 | export PATH=$HOME/go/bin:/usr/local/go/bin:$PATH 5 | 6 | cd /eraft && make 7 | -------------------------------------------------------------------------------- /scripts/gen_proto.sh: -------------------------------------------------------------------------------- 1 | # MIT License 2 | 3 | # Copyright (c) 2022 eraft dev group 4 | 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to deal 7 | # in the Software without restriction, including without limitation the rights 8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | # copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | # SOFTWARE. 22 | 23 | #!/bin/sh 24 | export PATH="$PATH:$(go env GOPATH)/bin" 25 | 26 | go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.28 27 | go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.2 28 | 29 | protoc -I ../pbs ../pbs/raftbasic.proto --go_out=../pbs/ --go-grpc_out=../pbs/ 30 | -------------------------------------------------------------------------------- /scripts/run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -xe 3 | 4 | /eraft/output/metacli 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090 join 1 172.18.0.10:8088,172.18.0.11:8089,172.18.0.12:8090 5 | /eraft/output/metacli 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090 join 2 172.18.0.13:8088,172.18.0.14:8089,172.18.0.15:8090 6 | /eraft/output/metacli 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090 move 0-4 1 7 | /eraft/output/metacli 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090 move 5-9 2 8 | 9 | sleep 10 10 | 11 | /eraft/output/shardcli 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090 put testkey testvalue 12 | /eraft/output/shardcli 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090 get testkey 13 | -------------------------------------------------------------------------------- /shardkvserver/bucket.go: -------------------------------------------------------------------------------- 1 | // // 2 | // // MIT License 3 | 4 | // // Copyright (c) 2022 eraft dev group 5 | 6 | // // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // // of this software and associated documentation files (the "Software"), to deal 8 | // // in the Software without restriction, including without limitation the rights 9 | // // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // // copies of the Software, and to permit persons to whom the Software is 11 | // // furnished to do so, subject to the following conditions: 12 | 13 | // // The above copyright notice and this permission notice shall be included in 14 | // // all copies or substantial portions of the Software. 15 | 16 | // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // // SOFTWARE. 23 | // // 24 | 25 | package shardkvserver 26 | 27 | import ( 28 | "strconv" 29 | 30 | "github.com/eraft-io/eraft/storage" 31 | ) 32 | 33 | type buketStatus uint8 34 | 35 | const ( 36 | Running buketStatus = iota 37 | Stopped 38 | ) 39 | 40 | const SPLIT = "$^$" 41 | 42 | // a bucket is a logical partition in a distributed system 43 | // 44 | // it has a unique id, a pointer to db engine, and status 45 | type Bucket struct { 46 | ID int 47 | KvDB storage.KvStore 48 | Status buketStatus 49 | } 50 | 51 | // this is all of a bucket datas view 52 | // use to query a buckets data or insert data to bucket 53 | type BucketDatasVo struct { 54 | Datas map[int]map[string]string 55 | } 56 | 57 | // make a new bucket 58 | func NewBucket(eng storage.KvStore, id int) *Bucket { 59 | return &Bucket{id, eng, Running} 60 | } 61 | 62 | // get encode key data from engine 63 | func (bu *Bucket) Get(key string) (string, error) { 64 | encodeKey := strconv.Itoa(bu.ID) + SPLIT + key 65 | v, err := bu.KvDB.Get(encodeKey) 66 | if err != nil { 67 | return "", err 68 | } 69 | return v, nil 70 | } 71 | 72 | // put key, value data to db engine 73 | func (bu *Bucket) Put(key, value string) error { 74 | encodeKey := strconv.Itoa(bu.ID) + SPLIT + key 75 | return bu.KvDB.Put(encodeKey, value) 76 | } 77 | 78 | // append data to engine 79 | func (bu *Bucket) Append(key, value string) error { 80 | oldValue, err := bu.Get(key) 81 | if err != nil { 82 | return err 83 | } 84 | return bu.Put(key, oldValue+value) 85 | } 86 | 87 | // copy all the data in a bucket 88 | func (bu *Bucket) deepCopy(trimPrefix bool) (map[string]string, error) { 89 | encodeKeyPrefix := strconv.Itoa(bu.ID) + SPLIT 90 | kvs, err := bu.KvDB.DumpPrefixKey(encodeKeyPrefix, trimPrefix) 91 | if err != nil { 92 | return nil, err 93 | } 94 | return kvs, nil 95 | } 96 | 97 | // delete all the data in a bucket 98 | func (bu *Bucket) deleteBucketData() error { 99 | encodeKeyPrefix := strconv.Itoa(bu.ID) + SPLIT 100 | return bu.KvDB.DelPrefixKeys(encodeKeyPrefix) 101 | } 102 | -------------------------------------------------------------------------------- /shardkvserver/client.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | // 25 | // TODO: this is a debug client version, need to deal with more detail handle 26 | // 27 | 28 | package shardkvserver 29 | 30 | import ( 31 | "context" 32 | "crypto/rand" 33 | "errors" 34 | "math/big" 35 | "strings" 36 | 37 | "github.com/eraft-io/eraft/common" 38 | "github.com/eraft-io/eraft/logger" 39 | "github.com/eraft-io/eraft/metaserver" 40 | "github.com/eraft-io/eraft/raftcore" 41 | pb "github.com/eraft-io/eraft/raftpb" 42 | ) 43 | 44 | // a client is defined for the shard_kvserver 45 | type KvClient struct { 46 | // raft group rpc client 47 | rpcCli *raftcore.RaftClientEnd 48 | 49 | connectsCache map[string]*raftcore.RaftClientEnd 50 | 51 | // config server group client 52 | csCli *metaserver.MetaSvrCli 53 | // current config, got from config server group 54 | config *metaserver.Config 55 | 56 | groupLeaderAddrs map[int64]string 57 | 58 | // the client id, use to identify a client 59 | clientId int64 60 | // the command id, use to identify a command 61 | commandId int64 62 | } 63 | 64 | func (cli *KvClient) AddConnToCache(svrAddr string, rpcCli *raftcore.RaftClientEnd) { 65 | cli.connectsCache[svrAddr] = rpcCli 66 | } 67 | 68 | func (cli *KvClient) GetConnFromCache(svrAddr string) *raftcore.RaftClientEnd { 69 | if conn, ok := cli.connectsCache[svrAddr]; ok { 70 | return conn 71 | } 72 | return nil 73 | } 74 | 75 | func (cli *KvClient) CloseRpcCliConn() { 76 | cli.rpcCli.CloseAllConn() 77 | } 78 | 79 | // expose config server group clients to the outside 80 | func (cli *KvClient) GetCsClient() *metaserver.MetaSvrCli { 81 | return cli.csCli 82 | } 83 | 84 | // expose raft group rpc client to the outside 85 | func (cli *KvClient) GetRpcClient() *raftcore.RaftClientEnd { 86 | return cli.rpcCli 87 | } 88 | 89 | // make a random id 90 | func nrand() int64 { 91 | maxi := big.NewInt(int64(1) << 62) 92 | bigx, _ := rand.Int(rand.Reader, maxi) 93 | return bigx.Int64() 94 | } 95 | 96 | // make a kv client 97 | func MakeKvClient(csAddrs string) *KvClient { 98 | metaSvrCli := metaserver.MakeMetaSvrClient(common.UnUsedTid, strings.Split(csAddrs, ",")) 99 | kvCli := &KvClient{ 100 | csCli: metaSvrCli, 101 | rpcCli: nil, 102 | groupLeaderAddrs: map[int64]string{}, 103 | connectsCache: make(map[string]*raftcore.RaftClientEnd), 104 | clientId: nrand(), 105 | commandId: 0, 106 | } 107 | kvCli.config = kvCli.csCli.Query(-1) 108 | return kvCli 109 | } 110 | 111 | // get interface to client, use to get a key's data from the cluster 112 | func (cli *KvClient) Get(key string) (string, error) { 113 | return cli.Command(&pb.CommandRequest{ 114 | Key: key, 115 | OpType: pb.OpType_OpGet, 116 | }) 117 | } 118 | 119 | // put interface to client, use to put key, value data to the cluster 120 | func (cli *KvClient) Put(key, value string) error { 121 | _, err := cli.Command(&pb.CommandRequest{ 122 | Key: key, 123 | Value: value, 124 | OpType: pb.OpType_OpPut, 125 | }) 126 | return err 127 | } 128 | 129 | // GetBucketDatas 130 | // get all the data in a bucket, this is not an efficient approach to data migration 131 | // and needs to be optimized 132 | func (cli *KvClient) GetBucketDatas(gid int, bucketIds []int64) string { 133 | return cli.BucketOpCommand(&pb.BucketOperationRequest{ 134 | BucketOpType: pb.BucketOpType_OpGetData, 135 | Gid: int64(gid), 136 | ConfigVersion: int64(cli.config.Version), 137 | BucketIds: bucketIds, 138 | }) 139 | } 140 | 141 | // DeleteBucketDatas 142 | // delete all the data in a bucket, this is not an efficient approach to data migration 143 | // and needs to be optimized 144 | func (cli *KvClient) DeleteBucketDatas(gid int, bucketIds []int64) string { 145 | return cli.BucketOpCommand(&pb.BucketOperationRequest{ 146 | BucketOpType: pb.BucketOpType_OpDeleteData, 147 | Gid: int64(gid), 148 | ConfigVersion: int64(cli.config.Version), 149 | BucketIds: bucketIds, 150 | }) 151 | } 152 | 153 | // InsertBucketDatas 154 | // insert all the data into a bucket, this is not an efficient approach to data migration 155 | // and needs to be optimized 156 | func (cli *KvClient) InsertBucketDatas(gid int, bucketIds []int64, datas []byte) string { 157 | return cli.BucketOpCommand(&pb.BucketOperationRequest{ 158 | BucketOpType: pb.BucketOpType_OpInsertData, 159 | BucketsDatas: datas, 160 | Gid: int64(gid), 161 | BucketIds: bucketIds, 162 | ConfigVersion: int64(cli.config.Version), 163 | }) 164 | } 165 | 166 | // Command 167 | // do user normal command 168 | func (cli *KvClient) Command(req *pb.CommandRequest) (string, error) { 169 | bucketId := common.Key2BucketID(req.Key) 170 | gid := cli.config.Buckets[bucketId] 171 | if gid == 0 { 172 | return "", errors.New("there is no shard in charge of this bucket, please join the server group before") 173 | } 174 | if servers, ok := cli.config.Groups[gid]; ok { 175 | for _, svrAddr := range servers { 176 | if cli.GetConnFromCache(svrAddr) == nil { 177 | cli.rpcCli = raftcore.MakeRaftClientEnd(svrAddr, common.UnUsedTid) 178 | cli.AddConnToCache(svrAddr, cli.rpcCli) 179 | } else { 180 | if cli.groupLeaderAddrs[int64(gid)] != "" { 181 | svrAddr = cli.groupLeaderAddrs[int64(gid)] 182 | } 183 | cli.rpcCli = cli.GetConnFromCache(svrAddr) 184 | } 185 | resp, err := (*cli.rpcCli.GetRaftServiceCli()).DoCommand(context.Background(), req) 186 | if err != nil { 187 | // node down 188 | logger.ELogger().Sugar().Debugf("there is a node down is cluster, but we can continue with outher node") 189 | continue 190 | } 191 | switch resp.ErrCode { 192 | case common.ErrCodeNoErr: 193 | cli.commandId++ 194 | return resp.Value, nil 195 | case common.ErrCodeWrongGroup: 196 | cli.config = cli.csCli.Query(-1) 197 | return "", errors.New("WrongGroup") 198 | case common.ErrCodeWrongLeader: 199 | cli.groupLeaderAddrs[int64(gid)] = servers[resp.LeaderId] 200 | cli.rpcCli = raftcore.MakeRaftClientEnd(servers[resp.LeaderId], common.UnUsedTid) 201 | cli.AddConnToCache(servers[resp.LeaderId], cli.rpcCli) 202 | resp, err := (*cli.rpcCli.GetRaftServiceCli()).DoCommand(context.Background(), req) 203 | if err != nil { 204 | logger.ELogger().Sugar().Error("send command to server error", err.Error()) 205 | } 206 | if resp != nil && resp.ErrCode == common.ErrCodeNoErr { 207 | cli.commandId++ 208 | return resp.Value, nil 209 | } 210 | default: 211 | return "", errors.New("unknown code") 212 | } 213 | } 214 | } else { 215 | return "", errors.New("please join the server group first") 216 | } 217 | return "", errors.New("unknown code") 218 | } 219 | 220 | // BucketOpCommand 221 | // do user bucket operation command 222 | func (cli *KvClient) BucketOpCommand(req *pb.BucketOperationRequest) string { 223 | for { 224 | if servers, ok := cli.config.Groups[int(req.Gid)]; ok { 225 | for _, svrAddr := range servers { 226 | cli.rpcCli = raftcore.MakeRaftClientEnd(svrAddr, common.UnUsedTid) 227 | resp, err := (*cli.rpcCli.GetRaftServiceCli()).DoBucketsOperation(context.Background(), req) 228 | if err == nil { 229 | if resp != nil { 230 | return string(resp.BucketsDatas) 231 | } else { 232 | return "" 233 | } 234 | } else { 235 | logger.ELogger().Sugar().Error("send command to server error", err.Error()) 236 | return "" 237 | } 238 | } 239 | } 240 | } 241 | } 242 | -------------------------------------------------------------------------------- /shardkvserver/shard_kvserver.go: -------------------------------------------------------------------------------- 1 | // // 2 | // // MIT License 3 | 4 | // // Copyright (c) 2022 eraft dev group 5 | 6 | // // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // // of this software and associated documentation files (the "Software"), to deal 8 | // // in the Software without restriction, including without limitation the rights 9 | // // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // // copies of the Software, and to permit persons to whom the Software is 11 | // // furnished to do so, subject to the following conditions: 12 | 13 | // // The above copyright notice and this permission notice shall be included in 14 | // // all copies or substantial portions of the Software. 15 | 16 | // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // // SOFTWARE. 23 | // // 24 | 25 | package shardkvserver 26 | 27 | import ( 28 | "bytes" 29 | "context" 30 | "encoding/gob" 31 | "encoding/json" 32 | "errors" 33 | "maps" 34 | "strconv" 35 | "strings" 36 | "sync" 37 | "sync/atomic" 38 | "time" 39 | 40 | "github.com/eraft-io/eraft/common" 41 | "github.com/eraft-io/eraft/logger" 42 | "github.com/eraft-io/eraft/metaserver" 43 | pb "github.com/eraft-io/eraft/raftpb" 44 | "github.com/eraft-io/eraft/storage" 45 | 46 | "github.com/eraft-io/eraft/raftcore" 47 | ) 48 | 49 | type ShardKV struct { 50 | mu sync.RWMutex 51 | dead int32 52 | rf *raftcore.Raft 53 | applyCh chan *pb.ApplyMsg 54 | gid_ int 55 | cvCli *metaserver.MetaSvrCli 56 | 57 | lastApplied int 58 | lastConfig metaserver.Config 59 | curConfig metaserver.Config 60 | 61 | stm map[int]*Bucket 62 | 63 | dbEng storage.KvStore 64 | 65 | notifyChs map[int]chan *pb.CommandResponse 66 | 67 | stopApplyCh chan interface{} 68 | 69 | lastSnapShotIdx int 70 | 71 | pb.UnimplementedRaftServiceServer 72 | } 73 | 74 | type MemSnapshotDB struct { 75 | KV map[string]string 76 | } 77 | 78 | // MakeShardKVServer make a new shard kv server 79 | // peerMaps: init peer map in the raft group 80 | // nodeId: the peer's nodeId in the raft group 81 | // gid: the node's raft group id 82 | // configServerAddr: config server addr (leader addr, need to optimized into config server peer map) 83 | func MakeShardKVServer(peerMaps map[int]string, nodeId int64, gid int, configServerAddrs string) *ShardKV { 84 | clientEnds := []*raftcore.RaftClientEnd{} 85 | for id, addr := range peerMaps { 86 | newEnd := raftcore.MakeRaftClientEnd(addr, uint64(id)) 87 | clientEnds = append(clientEnds, newEnd) 88 | } 89 | newApplyCh := make(chan *pb.ApplyMsg) 90 | 91 | logDBEng := storage.EngineFactory("leveldb", "./data/log/datanode_group_"+strconv.Itoa(gid)+"_nodeid_"+strconv.Itoa(int(nodeId))) 92 | newRf := raftcore.MakeRaft(clientEnds, int(nodeId), logDBEng, newApplyCh, 50, 150) 93 | newDBEng := storage.EngineFactory("leveldb", "./data/db/datanode_group_"+strconv.Itoa(gid)+"_nodeid_"+strconv.Itoa(int(nodeId))) 94 | 95 | shardKv := &ShardKV{ 96 | dead: 0, 97 | rf: newRf, 98 | applyCh: newApplyCh, 99 | gid_: gid, 100 | cvCli: metaserver.MakeMetaSvrClient(common.UnUsedTid, strings.Split(configServerAddrs, ",")), 101 | lastApplied: 0, 102 | curConfig: metaserver.DefaultConfig(), 103 | lastConfig: metaserver.DefaultConfig(), 104 | stm: make(map[int]*Bucket), 105 | dbEng: newDBEng, 106 | lastSnapShotIdx: 0, 107 | notifyChs: map[int]chan *pb.CommandResponse{}, 108 | } 109 | 110 | shardKv.initStm(shardKv.dbEng) 111 | 112 | shardKv.curConfig = *shardKv.cvCli.Query(-1) 113 | shardKv.lastConfig = *shardKv.cvCli.Query(-1) 114 | 115 | shardKv.stopApplyCh = make(chan interface{}) 116 | shardKv.restoreSnapshot(newRf.ReadSnapshot()) 117 | // start applier 118 | go shardKv.ApplingToStm(shardKv.stopApplyCh) 119 | 120 | go shardKv.ConfigAction() 121 | 122 | return shardKv 123 | } 124 | 125 | // CloseApply close the stopApplyCh to stop commit entries apply 126 | func (s *ShardKV) CloseApply() { 127 | close(s.stopApplyCh) 128 | } 129 | 130 | func (s *ShardKV) GetRf() *raftcore.Raft { 131 | return s.rf 132 | } 133 | 134 | // ConfigAction sync the config action from configserver 135 | func (s *ShardKV) ConfigAction() { 136 | for !s.IsKilled() { 137 | if _, isLeader := s.rf.GetState(); isLeader { 138 | logger.ELogger().Sugar().Debugf("timeout into config action") 139 | 140 | s.mu.RLock() 141 | canPerformNextConf := true 142 | for _, bucket := range s.stm { 143 | if bucket.Status != Running { 144 | canPerformNextConf = false 145 | logger.ELogger().Sugar().Errorf("cano't perform next conf") 146 | break 147 | } 148 | } 149 | if canPerformNextConf { 150 | logger.ELogger().Sugar().Debug("can perform next conf") 151 | } 152 | curConfVersion := s.curConfig.Version 153 | s.mu.RUnlock() 154 | if canPerformNextConf { 155 | nextConfig := s.cvCli.Query(int64(curConfVersion) + 1) 156 | if nextConfig == nil { 157 | continue 158 | } 159 | nextCfBytes, _ := json.Marshal(nextConfig) 160 | curCfBytes, _ := json.Marshal(s.curConfig) 161 | logger.ELogger().Sugar().Debugf("next config %s ", string(nextCfBytes)) 162 | logger.ELogger().Sugar().Debugf("cur config %s ", string(curCfBytes)) 163 | if nextConfig.Version == curConfVersion+1 { 164 | req := &pb.CommandRequest{} 165 | nextCfBytes, _ := json.Marshal(nextConfig) 166 | logger.ELogger().Sugar().Debugf("can perform next conf %s ", string(nextCfBytes)) 167 | req.Context = nextCfBytes 168 | req.OpType = pb.OpType_OpConfigChange 169 | reqBytes, _ := json.Marshal(req) 170 | idx, _, isLeader := s.rf.Propose(reqBytes) 171 | if !isLeader { 172 | return 173 | } 174 | s.mu.Lock() 175 | ch := s.getNotifyChan(idx) 176 | s.mu.Unlock() 177 | 178 | cmdResp := &pb.CommandResponse{} 179 | 180 | select { 181 | case res := <-ch: 182 | cmdResp.Value = res.Value 183 | case <-time.After(metaserver.ExecTimeout): 184 | } 185 | 186 | logger.ELogger().Sugar().Debug("propose config change ok") 187 | 188 | go func() { 189 | s.mu.Lock() 190 | delete(s.notifyChs, idx) 191 | s.mu.Unlock() 192 | }() 193 | } 194 | } 195 | } 196 | time.Sleep(time.Second * 1) 197 | } 198 | } 199 | 200 | func (s *ShardKV) CanServe(bucketId int) bool { 201 | return s.curConfig.Buckets[bucketId] == s.gid_ && (s.stm[bucketId].Status == Running) 202 | } 203 | 204 | func (s *ShardKV) getNotifyChan(index int) chan *pb.CommandResponse { 205 | if _, ok := s.notifyChs[index]; !ok { 206 | s.notifyChs[index] = make(chan *pb.CommandResponse, 1) 207 | } 208 | return s.notifyChs[index] 209 | } 210 | 211 | func (s *ShardKV) IsKilled() bool { 212 | return atomic.LoadInt32(&s.dead) == 1 213 | } 214 | 215 | // DoCommand do client put get command 216 | func (s *ShardKV) DoCommand(ctx context.Context, req *pb.CommandRequest) (*pb.CommandResponse, error) { 217 | 218 | cmdResp := &pb.CommandResponse{} 219 | 220 | if !s.CanServe(common.Key2BucketID(req.Key)) { 221 | cmdResp.ErrCode = common.ErrCodeWrongGroup 222 | return cmdResp, nil 223 | } 224 | reqBytes, err := json.Marshal(req) 225 | if err != nil { 226 | return nil, err 227 | } 228 | // propose to raft 229 | idx, _, isLeader := s.rf.Propose(reqBytes) 230 | if !isLeader { 231 | cmdResp.ErrCode = common.ErrCodeWrongLeader 232 | cmdResp.LeaderId = s.GetRf().GetLeaderId() 233 | return cmdResp, nil 234 | } 235 | 236 | s.mu.Lock() 237 | ch := s.getNotifyChan(idx) 238 | s.mu.Unlock() 239 | 240 | select { 241 | case res := <-ch: 242 | if res != nil { 243 | cmdResp.ErrCode = common.ErrCodeNoErr 244 | cmdResp.Value = res.Value 245 | } 246 | case <-time.After(metaserver.ExecTimeout): 247 | return cmdResp, errors.New("ExecTimeout") 248 | } 249 | 250 | go func() { 251 | s.mu.Lock() 252 | delete(s.notifyChs, idx) 253 | s.mu.Unlock() 254 | }() 255 | 256 | return cmdResp, nil 257 | } 258 | 259 | // ApplingToStm apply the commit operation to state machine 260 | func (s *ShardKV) ApplingToStm(done <-chan interface{}) { 261 | for !s.IsKilled() { 262 | select { 263 | case <-done: 264 | return 265 | case appliedMsg := <-s.applyCh: 266 | logger.ELogger().Sugar().Debugf("appling msg %s", appliedMsg.String()) 267 | 268 | if appliedMsg.SnapshotValid { 269 | s.mu.Lock() 270 | if s.rf.CondInstallSnapshot(int(appliedMsg.SnapshotTerm), int(appliedMsg.SnapshotIndex), appliedMsg.Snapshot) { 271 | s.restoreSnapshot(appliedMsg.Snapshot) 272 | s.lastApplied = int(appliedMsg.SnapshotIndex) 273 | } 274 | s.mu.Unlock() 275 | return 276 | } 277 | 278 | if appliedMsg.CommandValid { 279 | s.mu.Lock() 280 | 281 | req := &pb.CommandRequest{} 282 | if err := json.Unmarshal(appliedMsg.Command, req); err != nil { 283 | logger.ELogger().Sugar().Error("Unmarshal CommandRequest err", err.Error()) 284 | s.mu.Unlock() 285 | continue 286 | } 287 | 288 | // outdate checked 289 | if appliedMsg.CommandIndex <= int64(s.lastApplied) { 290 | s.mu.Unlock() 291 | continue 292 | } 293 | 294 | s.lastApplied = int(appliedMsg.CommandIndex) 295 | logger.ELogger().Sugar().Debugf("shard_kvserver last applied %d", s.lastApplied) 296 | 297 | cmdResp := &pb.CommandResponse{} 298 | value := "" 299 | var err error 300 | switch req.OpType { 301 | // Normal Op 302 | case pb.OpType_OpPut: 303 | bucketID := common.Key2BucketID(req.Key) 304 | if s.CanServe(bucketID) { 305 | logger.ELogger().Sugar().Debug("WRITE put " + req.Key + " value " + req.Value + " to bucket " + strconv.Itoa(bucketID)) 306 | s.stm[bucketID].Put(req.Key, req.Value) 307 | } 308 | case pb.OpType_OpAppend: 309 | bucketID := common.Key2BucketID(req.Key) 310 | if s.CanServe(bucketID) { 311 | s.stm[bucketID].Append(req.Key, req.Value) 312 | } 313 | case pb.OpType_OpGet: 314 | bucketID := common.Key2BucketID(req.Key) 315 | if s.CanServe(bucketID) { 316 | value, err = s.stm[bucketID].Get(req.Key) 317 | logger.ELogger().Sugar().Debug("get " + req.Key + " value " + value + " from bucket " + strconv.Itoa(bucketID)) 318 | } 319 | cmdResp.Value = value 320 | case pb.OpType_OpConfigChange: 321 | nextConfig := &metaserver.Config{} 322 | json.Unmarshal(req.Context, nextConfig) 323 | if nextConfig.Version == s.curConfig.Version+1 { 324 | for i := 0; i < common.NBuckets; i++ { 325 | if s.curConfig.Buckets[i] != s.gid_ && nextConfig.Buckets[i] == s.gid_ { 326 | gid := s.curConfig.Buckets[i] 327 | if gid != 0 { 328 | s.stm[i].Status = Running 329 | } 330 | } 331 | if s.curConfig.Buckets[i] == s.gid_ && nextConfig.Buckets[i] != s.gid_ { 332 | gid := nextConfig.Buckets[i] 333 | if gid != 0 { 334 | s.stm[i].Status = Stopped 335 | } 336 | } 337 | } 338 | s.lastConfig = s.curConfig 339 | s.curConfig = *nextConfig 340 | cfBytes, _ := json.Marshal(s.curConfig) 341 | logger.ELogger().Sugar().Debugf("applied config to server %s ", string(cfBytes)) 342 | } 343 | case pb.OpType_OpDeleteBuckets: 344 | bucketOpReqs := &pb.BucketOperationRequest{} 345 | json.Unmarshal(req.Context, bucketOpReqs) 346 | for _, bid := range bucketOpReqs.BucketIds { 347 | s.stm[int(bid)].deleteBucketData() 348 | logger.ELogger().Sugar().Debugf("del buckets data list %d", bid) 349 | } 350 | case pb.OpType_OpInsertBuckets: 351 | bucketOpReqs := &pb.BucketOperationRequest{} 352 | json.Unmarshal(req.Context, bucketOpReqs) 353 | bucketDatas := &BucketDatasVo{} 354 | json.Unmarshal(bucketOpReqs.BucketsDatas, bucketDatas) 355 | for bucketId, kvs := range bucketDatas.Datas { 356 | s.stm[bucketId] = NewBucket(s.dbEng, bucketId) 357 | for k, v := range kvs { 358 | s.stm[bucketId].Put(k, v) 359 | logger.ELogger().Sugar().Debug("insert kv data to buckets k -> " + k + " v-> " + v) 360 | } 361 | } 362 | } 363 | if err != nil { 364 | raftcore.PrintDebugLog(err.Error()) 365 | } 366 | 367 | if _, isLeader := s.rf.GetState(); isLeader { 368 | ch := s.getNotifyChan(int(appliedMsg.CommandIndex)) 369 | ch <- cmdResp 370 | } 371 | 372 | if s.GetRf().GetLogCount() > 50 { 373 | s.takeSnapshot(uint64(appliedMsg.CommandIndex)) 374 | } 375 | 376 | s.mu.Unlock() 377 | 378 | } 379 | } 380 | } 381 | } 382 | 383 | // init the status machine 384 | func (s *ShardKV) initStm(eng storage.KvStore) { 385 | for i := 0; i < common.NBuckets; i++ { 386 | if _, ok := s.stm[i]; !ok { 387 | s.stm[i] = NewBucket(eng, i) 388 | } 389 | } 390 | } 391 | 392 | // takeSnapshot 393 | func (s *ShardKV) takeSnapshot(index uint64) { 394 | logger.ELogger().Sugar().Infof("start take snapshot at % d", index) 395 | var bytesState bytes.Buffer 396 | enc := gob.NewEncoder(&bytesState) 397 | memSnapshotDB := MemSnapshotDB{} 398 | memSnapshotDB.KV = map[string]string{} 399 | for i := 0; i < common.NBuckets; i++ { 400 | if s.CanServe(i) { 401 | kvs, err := s.stm[i].deepCopy(true) 402 | if err != nil { 403 | logger.ELogger().Sugar().Errorf(err.Error()) 404 | } 405 | maps.Copy(memSnapshotDB.KV, kvs) 406 | } 407 | } 408 | enc.Encode(memSnapshotDB) 409 | s.GetRf().Snapshot(int(index), bytesState.Bytes()) 410 | } 411 | 412 | // restoreSnapshot 413 | func (s *ShardKV) restoreSnapshot(snapData []byte) { 414 | if snapData == nil { 415 | return 416 | } 417 | buf := bytes.NewBuffer(snapData) 418 | data := gob.NewDecoder(buf) 419 | var memSnapshotDB MemSnapshotDB 420 | if data.Decode(&memSnapshotDB) != nil { 421 | logger.ELogger().Sugar().Error("decode memsnapshot error") 422 | } 423 | for k, v := range memSnapshotDB.KV { 424 | bucketID := common.Key2BucketID(k) 425 | if s.CanServe(bucketID) { 426 | s.stm[bucketID].Put(k, v) 427 | } 428 | } 429 | } 430 | 431 | // rpc interface 432 | func (s *ShardKV) RequestVote(ctx context.Context, req *pb.RequestVoteRequest) (*pb.RequestVoteResponse, error) { 433 | resp := &pb.RequestVoteResponse{} 434 | logger.ELogger().Sugar().Debugf("handle request vote %s ", req.String()) 435 | 436 | s.rf.HandleRequestVote(req, resp) 437 | logger.ELogger().Sugar().Debugf("send request vote resp %s ", resp.String()) 438 | 439 | return resp, nil 440 | } 441 | 442 | // rpc interface 443 | func (s *ShardKV) AppendEntries(ctx context.Context, req *pb.AppendEntriesRequest) (*pb.AppendEntriesResponse, error) { 444 | resp := &pb.AppendEntriesResponse{} 445 | logger.ELogger().Sugar().Debugf("handle append entry %s ", req.String()) 446 | 447 | s.rf.HandleAppendEntries(req, resp) 448 | logger.ELogger().Sugar().Debugf("append entries %s ", resp.String()) 449 | return resp, nil 450 | } 451 | 452 | // snapshot rpc interface 453 | func (s *ShardKV) Snapshot(ctx context.Context, req *pb.InstallSnapshotRequest) (*pb.InstallSnapshotResponse, error) { 454 | resp := &pb.InstallSnapshotResponse{} 455 | logger.ELogger().Sugar().Debugf("handle snapshot req %s ", req.String()) 456 | 457 | s.rf.HandleInstallSnapshot(req, resp) 458 | logger.ELogger().Sugar().Debugf("handle snapshot resp %s ", resp.String()) 459 | 460 | return resp, nil 461 | } 462 | 463 | // DoBucketsOperation rpc interface 464 | // handle bucket data get, delete and insert 465 | func (s *ShardKV) DoBucketsOperation(ctx context.Context, req *pb.BucketOperationRequest) (*pb.BucketOperationResponse, error) { 466 | opResp := &pb.BucketOperationResponse{} 467 | if _, isLeader := s.rf.GetState(); !isLeader { 468 | return opResp, errors.New("ErrorWrongLeader") 469 | } 470 | switch req.BucketOpType { 471 | case pb.BucketOpType_OpGetData: 472 | { 473 | s.mu.RLock() 474 | if s.curConfig.Version < int(req.ConfigVersion) { 475 | s.mu.RUnlock() 476 | return opResp, errors.New("ErrNotReady") 477 | } 478 | bucketDatas := &BucketDatasVo{} 479 | bucketDatas.Datas = map[int]map[string]string{} 480 | for _, bucketID := range req.BucketIds { 481 | sDatas, err := s.stm[int(bucketID)].deepCopy(false) 482 | if err != nil { 483 | s.mu.RUnlock() 484 | return opResp, err 485 | } 486 | bucketDatas.Datas[int(bucketID)] = sDatas 487 | } 488 | buketDataBytes, _ := json.Marshal(bucketDatas) 489 | opResp.BucketsDatas = buketDataBytes 490 | opResp.ConfigVersion = req.ConfigVersion 491 | s.mu.RUnlock() 492 | } 493 | case pb.BucketOpType_OpDeleteData: 494 | { 495 | s.mu.RLock() 496 | if int64(s.curConfig.Version) > req.ConfigVersion { 497 | s.mu.RUnlock() 498 | return opResp, nil 499 | } 500 | s.mu.RUnlock() 501 | commandReq := &pb.CommandRequest{} 502 | bucketOpReqBytes, _ := json.Marshal(req) 503 | commandReq.Context = bucketOpReqBytes 504 | commandReq.OpType = pb.OpType_OpDeleteBuckets 505 | commandReqBytes, _ := json.Marshal(commandReq) 506 | // async 507 | _, _, isLeader := s.rf.Propose(commandReqBytes) 508 | if !isLeader { 509 | return opResp, nil 510 | } 511 | } 512 | case pb.BucketOpType_OpInsertData: 513 | { 514 | s.mu.RLock() 515 | if int64(s.curConfig.Version) > req.ConfigVersion { 516 | s.mu.RUnlock() 517 | return opResp, nil 518 | } 519 | s.mu.RUnlock() 520 | commandReq := &pb.CommandRequest{} 521 | bucketOpReqBytes, _ := json.Marshal(req) 522 | commandReq.Context = bucketOpReqBytes 523 | commandReq.OpType = pb.OpType_OpInsertBuckets 524 | commandReqBytes, _ := json.Marshal(commandReq) 525 | // async 526 | _, _, isLeader := s.rf.Propose(commandReqBytes) 527 | if !isLeader { 528 | return opResp, nil 529 | } 530 | } 531 | } 532 | return opResp, nil 533 | } 534 | -------------------------------------------------------------------------------- /storage/kv.go: -------------------------------------------------------------------------------- 1 | // 2 | // MIT License 3 | 4 | // Copyright (c) 2022 eraft dev group 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | // 24 | 25 | package storage 26 | 27 | // If you want to contribute a new engine implementation, you need to implement these interfaces 28 | type KvStore interface { 29 | Put(string, string) error 30 | Get(string) (string, error) 31 | Delete(string) error 32 | DumpPrefixKey(string, bool) (map[string]string, error) 33 | PutBytesKv(k []byte, v []byte) error 34 | DeleteBytesK(k []byte) error 35 | GetBytesValue(k []byte) ([]byte, error) 36 | SeekPrefixLast(prefix []byte) ([]byte, []byte, error) 37 | SeekPrefixFirst(prefix string) ([]byte, []byte, error) 38 | DelPrefixKeys(prefix string) error 39 | SeekPrefixKeyIdMax(prefix []byte) (uint64, error) 40 | FlushDB() 41 | } 42 | 43 | func EngineFactory(name string, dbPath string) KvStore { 44 | switch name { 45 | case "leveldb": 46 | levelDB, err := MakeLevelDBKvStore(dbPath) 47 | if err != nil { 48 | panic(err) 49 | } 50 | return levelDB 51 | default: 52 | panic("No such engine type support") 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /storage/kv_leveldb.go: -------------------------------------------------------------------------------- 1 | // MIT License 2 | 3 | // Copyright (c) 2022 eraft dev group 4 | 5 | // Permission is hereby granted, free of charge, to any person obtaining a copy 6 | // of this software and associated documentation files (the "Software"), to deal 7 | // in the Software without restriction, including without limitation the rights 8 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | // copies of the Software, and to permit persons to whom the Software is 10 | // furnished to do so, subject to the following conditions: 11 | 12 | // The above copyright notice and this permission notice shall be included in 13 | // all copies or substantial portions of the Software. 14 | 15 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | // SOFTWARE. 22 | 23 | package storage 24 | 25 | import ( 26 | "encoding/binary" 27 | "errors" 28 | "strings" 29 | 30 | "github.com/syndtr/goleveldb/leveldb" 31 | "github.com/syndtr/goleveldb/leveldb/opt" 32 | "github.com/syndtr/goleveldb/leveldb/util" 33 | ) 34 | 35 | type LevelDBKvStore struct { 36 | Path string 37 | db *leveldb.DB 38 | } 39 | 40 | func MakeLevelDBKvStore(path string) (*LevelDBKvStore, error) { 41 | newDB, err := leveldb.OpenFile(path, &opt.Options{}) 42 | if err != nil { 43 | return nil, err 44 | } 45 | return &LevelDBKvStore{ 46 | Path: path, 47 | db: newDB, 48 | }, nil 49 | } 50 | 51 | func (levelDB *LevelDBKvStore) PutBytesKv(k []byte, v []byte) error { 52 | return levelDB.db.Put(k, v, nil) 53 | } 54 | 55 | func (levelDB *LevelDBKvStore) DeleteBytesK(k []byte) error { 56 | return levelDB.db.Delete(k, nil) 57 | } 58 | 59 | func (levelDB *LevelDBKvStore) GetBytesValue(k []byte) ([]byte, error) { 60 | return levelDB.db.Get(k, nil) 61 | } 62 | 63 | func (levelDB *LevelDBKvStore) Put(k string, v string) error { 64 | return levelDB.db.Put([]byte(k), []byte(v), nil) 65 | } 66 | 67 | func (levelDB *LevelDBKvStore) Get(k string) (string, error) { 68 | v, err := levelDB.db.Get([]byte(k), nil) 69 | if err != nil { 70 | return "", err 71 | } 72 | return string(v), nil 73 | } 74 | 75 | func (levelDB *LevelDBKvStore) Delete(k string) error { 76 | return levelDB.db.Delete([]byte(k), nil) 77 | } 78 | 79 | func (levelDB *LevelDBKvStore) DumpPrefixKey(prefix string, trimPrefix bool) (map[string]string, error) { 80 | kvs := make(map[string]string) 81 | iter := levelDB.db.NewIterator(util.BytesPrefix([]byte(prefix)), nil) 82 | for iter.Next() { 83 | k := string(iter.Key()) 84 | if trimPrefix { 85 | k = strings.TrimPrefix(k, prefix) 86 | } 87 | v := string(iter.Value()) 88 | kvs[k] = v 89 | } 90 | iter.Release() 91 | return kvs, iter.Error() 92 | } 93 | 94 | func (levelDB *LevelDBKvStore) FlushDB() { 95 | 96 | } 97 | 98 | func (levelDB *LevelDBKvStore) SeekPrefixLast(prefix []byte) ([]byte, []byte, error) { 99 | iter := levelDB.db.NewIterator(util.BytesPrefix(prefix), nil) 100 | defer iter.Release() 101 | ok := iter.Last() 102 | var keyBytes, valBytes []byte 103 | if ok { 104 | keyBytes = iter.Key() 105 | valBytes = iter.Value() 106 | } 107 | return keyBytes, valBytes, nil 108 | } 109 | 110 | func (levelDB *LevelDBKvStore) SeekPrefixKeyIdMax(prefix []byte) (uint64, error) { 111 | iter := levelDB.db.NewIterator(util.BytesPrefix(prefix), nil) 112 | defer iter.Release() 113 | var maxKeyId uint64 114 | maxKeyId = 0 115 | for iter.Next() { 116 | if iter.Error() != nil { 117 | return maxKeyId, iter.Error() 118 | } 119 | kBytes := iter.Key() 120 | KeyId := binary.LittleEndian.Uint64(kBytes[len(prefix):]) 121 | if KeyId > maxKeyId { 122 | maxKeyId = KeyId 123 | } 124 | } 125 | return maxKeyId, nil 126 | } 127 | 128 | func (levelDB *LevelDBKvStore) SeekPrefixFirst(prefix string) ([]byte, []byte, error) { 129 | iter := levelDB.db.NewIterator(util.BytesPrefix([]byte(prefix)), nil) 130 | defer iter.Release() 131 | if iter.Next() { 132 | return iter.Key(), iter.Value(), nil 133 | } 134 | return []byte{}, []byte{}, errors.New("seek not find key") 135 | } 136 | 137 | func (levelDB *LevelDBKvStore) DelPrefixKeys(prefix string) error { 138 | iter := levelDB.db.NewIterator(util.BytesPrefix([]byte(prefix)), nil) 139 | for iter.Next() { 140 | err := levelDB.db.Delete(iter.Key(), nil) 141 | if err != nil { 142 | return err 143 | } 144 | } 145 | iter.Release() 146 | return nil 147 | } 148 | -------------------------------------------------------------------------------- /storage/kv_leveldb_test.go: -------------------------------------------------------------------------------- 1 | // MIT License 2 | 3 | // Copyright (c) 2022 eraft dev group 4 | 5 | // Permission is hereby granted, free of charge, to any person obtaining a copy 6 | // of this software and associated documentation files (the "Software"), to deal 7 | // in the Software without restriction, including without limitation the rights 8 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | // copies of the Software, and to permit persons to whom the Software is 10 | // furnished to do so, subject to the following conditions: 11 | 12 | // The above copyright notice and this permission notice shall be included in 13 | // all copies or substantial portions of the Software. 14 | 15 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | // SOFTWARE. 22 | 23 | package storage 24 | 25 | import ( 26 | "bytes" 27 | "encoding/binary" 28 | "io/ioutil" 29 | "os" 30 | "path" 31 | "testing" 32 | ) 33 | 34 | func RemoveDir(in string) { 35 | dir, _ := ioutil.ReadDir(in) 36 | for _, d := range dir { 37 | os.RemoveAll(path.Join([]string{in, d.Name()}...)) 38 | } 39 | } 40 | 41 | func TestPrefixRange(t *testing.T) { 42 | ldb, err := MakeLevelDBKvStore("./test_data") 43 | if err != nil { 44 | t.Log(err) 45 | return 46 | } 47 | 48 | prefixBytes := []byte{0x11, 0x11, 0x19, 0x96} 49 | for i := 0; i < 300; i++ { 50 | var outBuf bytes.Buffer 51 | outBuf.Write(prefixBytes) 52 | b := make([]byte, 8) 53 | binary.LittleEndian.PutUint64(b, uint64(i)) 54 | outBuf.Write(b) 55 | t.Logf("write %v", outBuf.Bytes()) 56 | ldb.PutBytesKv(outBuf.Bytes(), []byte{byte(i)}) 57 | } 58 | 59 | idMax, err := ldb.SeekPrefixKeyIdMax(prefixBytes) 60 | if err != nil { 61 | t.Log(err) 62 | return 63 | } 64 | 65 | t.Logf("idMax -> %d", idMax) 66 | 67 | ldb.db.Close() 68 | RemoveDir("./test_data") 69 | } 70 | -------------------------------------------------------------------------------- /tests/integration_test.go: -------------------------------------------------------------------------------- 1 | package tests 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | "os" 7 | "os/signal" 8 | "strconv" 9 | "strings" 10 | "syscall" 11 | "testing" 12 | "time" 13 | 14 | "github.com/eraft-io/eraft/common" 15 | "github.com/eraft-io/eraft/logger" 16 | pb "github.com/eraft-io/eraft/raftpb" 17 | "github.com/eraft-io/eraft/shardkvserver" 18 | "github.com/stretchr/testify/assert" 19 | 20 | "github.com/eraft-io/eraft/metaserver" 21 | "google.golang.org/grpc" 22 | "google.golang.org/grpc/reflection" 23 | ) 24 | 25 | func RunMetaServer(peerMaps map[int]string, nodeId int) { 26 | sigs := make(chan os.Signal, 1) 27 | signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) 28 | 29 | metaSvr := metaserver.MakeMetaServer(peerMaps, nodeId) 30 | lis, err := net.Listen("tcp", peerMaps[nodeId]) 31 | if err != nil { 32 | fmt.Printf("failed to listen: %v", err) 33 | return 34 | } 35 | s := grpc.NewServer() 36 | 37 | pb.RegisterRaftServiceServer(s, metaSvr) 38 | 39 | sigChan := make(chan os.Signal, 1) 40 | 41 | signal.Notify(sigChan) 42 | 43 | go func() { 44 | sig := <-sigs 45 | fmt.Println(sig) 46 | metaSvr.Rf.CloseEndsConn() 47 | metaSvr.StopApply() 48 | os.Exit(-1) 49 | }() 50 | 51 | reflection.Register(s) 52 | err = s.Serve(lis) 53 | if err != nil { 54 | fmt.Printf("failed to serve: %v", err) 55 | return 56 | } 57 | } 58 | 59 | func RunShardKvServer(svrPeerMaps map[int]string, nodeId int, groupId int, metaaddrs string) { 60 | sigs := make(chan os.Signal, 1) 61 | signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) 62 | 63 | shardSvr := shardkvserver.MakeShardKVServer(svrPeerMaps, int64(nodeId), groupId, metaaddrs) 64 | lis, err := net.Listen("tcp", svrPeerMaps[nodeId]) 65 | if err != nil { 66 | fmt.Printf("failed to listen: %v", err) 67 | return 68 | } 69 | fmt.Printf("server listen on: %s \n", svrPeerMaps[nodeId]) 70 | s := grpc.NewServer() 71 | pb.RegisterRaftServiceServer(s, shardSvr) 72 | 73 | sigChan := make(chan os.Signal, 1) 74 | 75 | signal.Notify(sigChan) 76 | 77 | go func() { 78 | sig := <-sigs 79 | fmt.Println(sig) 80 | shardSvr.GetRf().CloseEndsConn() 81 | shardSvr.CloseApply() 82 | os.Exit(-1) 83 | }() 84 | 85 | reflection.Register(s) 86 | err = s.Serve(lis) 87 | if err != nil { 88 | fmt.Printf("failed to serve: %v", err) 89 | return 90 | } 91 | } 92 | 93 | func AddServerGroup(metaaddrs string, groupId int64, shardserveraddrs string) { 94 | cfgCli := metaserver.MakeMetaSvrClient(common.UnUsedTid, strings.Split(metaaddrs, ",")) 95 | addrMap := make(map[int64]string) 96 | addrMap[groupId] = shardserveraddrs 97 | cfgCli.Join(addrMap) 98 | } 99 | 100 | func MoveSlotToServerGroup(metaaddrs string, startSlot int, endSlot int, groupId int) { 101 | cfgCli := metaserver.MakeMetaSvrClient(common.UnUsedTid, strings.Split(metaaddrs, ",")) 102 | for i := startSlot; i <= endSlot; i++ { 103 | cfgCli.Move(i, groupId) 104 | } 105 | } 106 | 107 | func TestBasicClusterRW(t *testing.T) { 108 | // start metaserver cluster 109 | go RunMetaServer(map[int]string{0: "127.0.0.1:8088", 1: "127.0.0.1:8089", 2: "127.0.0.1:8090"}, 0) 110 | go RunMetaServer(map[int]string{0: "127.0.0.1:8088", 1: "127.0.0.1:8089", 2: "127.0.0.1:8090"}, 1) 111 | go RunMetaServer(map[int]string{0: "127.0.0.1:8088", 1: "127.0.0.1:8089", 2: "127.0.0.1:8090"}, 2) 112 | time.Sleep(time.Second * 5) 113 | // start shardserver cluster 114 | go RunShardKvServer(map[int]string{0: "127.0.0.1:6088", 1: "127.0.0.1:6089", 2: "127.0.0.1:6090"}, 0, 1, "127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090") 115 | go RunShardKvServer(map[int]string{0: "127.0.0.1:6088", 1: "127.0.0.1:6089", 2: "127.0.0.1:6090"}, 1, 1, "127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090") 116 | go RunShardKvServer(map[int]string{0: "127.0.0.1:6088", 1: "127.0.0.1:6089", 2: "127.0.0.1:6090"}, 2, 1, "127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090") 117 | time.Sleep(time.Second * 5) 118 | go RunShardKvServer(map[int]string{0: "127.0.0.1:7088", 1: "127.0.0.1:7089", 2: "127.0.0.1:7090"}, 0, 2, "127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090") 119 | go RunShardKvServer(map[int]string{0: "127.0.0.1:7088", 1: "127.0.0.1:7089", 2: "127.0.0.1:7090"}, 1, 2, "127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090") 120 | go RunShardKvServer(map[int]string{0: "127.0.0.1:7088", 1: "127.0.0.1:7089", 2: "127.0.0.1:7090"}, 2, 2, "127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090") 121 | time.Sleep(time.Second * 5) 122 | // init meta server 123 | AddServerGroup("127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090", 1, "127.0.0.1:6088,127.0.0.1:6089,127.0.0.1:6090") 124 | AddServerGroup("127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090", 2, "127.0.0.1:7088,127.0.0.1:7089,127.0.0.1:7090") 125 | MoveSlotToServerGroup("127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090", 0, 4, 1) 126 | MoveSlotToServerGroup("127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090", 5, 9, 2) 127 | time.Sleep(time.Second * 20) 128 | 129 | // R-W test 130 | shardkvcli := shardkvserver.MakeKvClient("127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090") 131 | 132 | shardkvcli.Put("testKey", "testValue") 133 | 134 | time.Sleep(time.Second * 10) 135 | val, err := shardkvcli.Get("testKey") 136 | if err != nil { 137 | panic(err.Error()) 138 | } 139 | assert.Equal(t, val, "testValue") 140 | time.Sleep(time.Second * 3) 141 | common.RemoveDir("./data") 142 | } 143 | 144 | func TestClusterSingleShardRwBench(t *testing.T) { 145 | // start metaserver cluster 146 | go RunMetaServer(map[int]string{0: "127.0.0.1:8088", 1: "127.0.0.1:8089", 2: "127.0.0.1:8090"}, 0) 147 | go RunMetaServer(map[int]string{0: "127.0.0.1:8088", 1: "127.0.0.1:8089", 2: "127.0.0.1:8090"}, 1) 148 | go RunMetaServer(map[int]string{0: "127.0.0.1:8088", 1: "127.0.0.1:8089", 2: "127.0.0.1:8090"}, 2) 149 | time.Sleep(time.Second * 5) 150 | // start shardserver cluster 151 | go RunShardKvServer(map[int]string{0: "127.0.0.1:6088", 1: "127.0.0.1:6089", 2: "127.0.0.1:6090"}, 0, 1, "127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090") 152 | go RunShardKvServer(map[int]string{0: "127.0.0.1:6088", 1: "127.0.0.1:6089", 2: "127.0.0.1:6090"}, 1, 1, "127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090") 153 | go RunShardKvServer(map[int]string{0: "127.0.0.1:6088", 1: "127.0.0.1:6089", 2: "127.0.0.1:6090"}, 2, 1, "127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090") 154 | time.Sleep(time.Second * 5) 155 | // init meta server 156 | AddServerGroup("127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090", 1, "127.0.0.1:6088,127.0.0.1:6089,127.0.0.1:6090") 157 | MoveSlotToServerGroup("127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090", 0, 9, 1) 158 | time.Sleep(time.Second * 20) 159 | 160 | // R-W test 161 | shardkvcli := shardkvserver.MakeKvClient("127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090") 162 | 163 | N := 64 164 | KeySize := 64 165 | ValSize := 64 166 | benchKvs := map[string]string{} 167 | for i := 0; i < N; i++ { 168 | k := strconv.Itoa(i) + "-" + common.RandStringRunes(KeySize) 169 | v := common.RandStringRunes(ValSize) 170 | benchKvs[k] = v 171 | } 172 | costTime := []int64{} 173 | 174 | for key, val := range benchKvs { 175 | start := time.Now() 176 | shardkvcli.Put(key, val) 177 | elapsed := time.Since(start) 178 | costTime = append(costTime, elapsed.Milliseconds()) 179 | } 180 | 181 | sum := 0.0 182 | avg := 0.0 183 | maxi := 0.0 184 | mini := 9999999999999999.0 185 | 186 | for _, cost := range costTime { 187 | sum += float64(cost) 188 | if cost > int64(maxi) { 189 | maxi = float64(cost) 190 | } 191 | if cost < int64(mini) { 192 | mini = float64(cost) 193 | } 194 | } 195 | avg = sum / float64(len(costTime)) 196 | logger.ELogger().Sugar().Debugf("total request: %d", N) 197 | logger.ELogger().Sugar().Debugf("total time cost: %f", sum) 198 | logger.ELogger().Sugar().Debugf("avg time cost: %f", avg) 199 | logger.ELogger().Sugar().Debugf("max time cost: %f", maxi) 200 | logger.ELogger().Sugar().Debugf("min time cost: %f", mini) 201 | time.Sleep(time.Second * 2) 202 | common.RemoveDir("./data") 203 | } 204 | 205 | func TestClusterRwBench(t *testing.T) { 206 | // start metaserver cluster 207 | go RunMetaServer(map[int]string{0: "127.0.0.1:8088", 1: "127.0.0.1:8089", 2: "127.0.0.1:8090"}, 0) 208 | go RunMetaServer(map[int]string{0: "127.0.0.1:8088", 1: "127.0.0.1:8089", 2: "127.0.0.1:8090"}, 1) 209 | go RunMetaServer(map[int]string{0: "127.0.0.1:8088", 1: "127.0.0.1:8089", 2: "127.0.0.1:8090"}, 2) 210 | time.Sleep(time.Second * 5) 211 | // start shardserver cluster 212 | go RunShardKvServer(map[int]string{0: "127.0.0.1:6088", 1: "127.0.0.1:6089", 2: "127.0.0.1:6090"}, 0, 1, "127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090") 213 | go RunShardKvServer(map[int]string{0: "127.0.0.1:6088", 1: "127.0.0.1:6089", 2: "127.0.0.1:6090"}, 1, 1, "127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090") 214 | go RunShardKvServer(map[int]string{0: "127.0.0.1:6088", 1: "127.0.0.1:6089", 2: "127.0.0.1:6090"}, 2, 1, "127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090") 215 | time.Sleep(time.Second * 5) 216 | go RunShardKvServer(map[int]string{0: "127.0.0.1:7088", 1: "127.0.0.1:7089", 2: "127.0.0.1:7090"}, 0, 2, "127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090") 217 | go RunShardKvServer(map[int]string{0: "127.0.0.1:7088", 1: "127.0.0.1:7089", 2: "127.0.0.1:7090"}, 1, 2, "127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090") 218 | go RunShardKvServer(map[int]string{0: "127.0.0.1:7088", 1: "127.0.0.1:7089", 2: "127.0.0.1:7090"}, 2, 2, "127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090") 219 | time.Sleep(time.Second * 5) 220 | // init meta server 221 | AddServerGroup("127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090", 1, "127.0.0.1:6088,127.0.0.1:6089,127.0.0.1:6090") 222 | AddServerGroup("127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090", 2, "127.0.0.1:7088,127.0.0.1:7089,127.0.0.1:7090") 223 | MoveSlotToServerGroup("127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090", 0, 4, 1) 224 | MoveSlotToServerGroup("127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090", 5, 9, 2) 225 | time.Sleep(time.Second * 20) 226 | 227 | // R-W test 228 | shardKVCli := shardkvserver.MakeKvClient("127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090") 229 | 230 | N := 64 231 | KeySize := 64 232 | ValSize := 64 233 | benchKvs := map[string]string{} 234 | for i := 0; i < N; i++ { 235 | k := strconv.Itoa(i) + "-" + common.RandStringRunes(KeySize) 236 | v := common.RandStringRunes(ValSize) 237 | benchKvs[k] = v 238 | } 239 | costTime := []int64{} 240 | 241 | for key, val := range benchKvs { 242 | start := time.Now() 243 | shardKVCli.Put(key, val) 244 | elapsed := time.Since(start) 245 | costTime = append(costTime, elapsed.Milliseconds()) 246 | } 247 | 248 | sum := 0.0 249 | avg := 0.0 250 | maxi := 0.0 251 | mini := 9999999999999999.0 252 | 253 | for _, cost := range costTime { 254 | sum += float64(cost) 255 | if cost > int64(maxi) { 256 | maxi = float64(cost) 257 | } 258 | if cost < int64(mini) { 259 | mini = float64(cost) 260 | } 261 | } 262 | avg = sum / float64(len(costTime)) 263 | logger.ELogger().Sugar().Debugf("total request: %d", N) 264 | logger.ELogger().Sugar().Debugf("total time cost: %f", sum) 265 | logger.ELogger().Sugar().Debugf("avg time cost: %f", avg) 266 | logger.ELogger().Sugar().Debugf("max time cost: %f", maxi) 267 | logger.ELogger().Sugar().Debugf("min time cost: %f", mini) 268 | 269 | time.Sleep(time.Second * 5) 270 | common.RemoveDir("./data") 271 | } 272 | --------------------------------------------------------------------------------