├── src ├── skipList │ └── CMakeLists.txt ├── fiber │ ├── utils.cpp │ ├── include │ │ ├── monsoon.h │ │ ├── noncopyable.hpp │ │ ├── thread.hpp │ │ ├── singleton.hpp │ │ ├── fiber.hpp │ │ ├── fd_manager.hpp │ │ ├── iomanager.hpp │ │ ├── timer.hpp │ │ ├── utils.hpp │ │ ├── hook.hpp │ │ ├── mutex.hpp │ │ └── scheduler.hpp │ ├── CMakeLists.txt │ ├── thread.cpp │ ├── fd_manager.cpp │ ├── fiber.cpp │ ├── timer.cpp │ ├── scheduler.cpp │ ├── iomanager.cpp │ └── hook.cpp ├── common │ ├── CMakeLists.txt │ ├── include │ │ ├── config.h │ │ └── util.h │ └── util.cpp ├── raftClerk │ ├── CMakeLists.txt │ ├── include │ │ ├── raftServerRpcUtil.h │ │ └── clerk.h │ ├── raftServerRpcUtil.cpp │ └── clerk.cpp ├── rpc │ ├── rpcheader.proto │ ├── CMakeLists.txt │ ├── include │ │ ├── mprpcconfig.h │ │ ├── mprpccontroller.h │ │ ├── mprpcchannel.h │ │ └── rpcprovider.h │ ├── mprpccontroller.cpp │ ├── mprpcconfig.cpp │ ├── mprpcchannel.cpp │ ├── rpcprovider.cpp │ └── rpcheader.pb.cpp ├── raftRpcPro │ ├── CMakeLists.txt │ ├── kvServerRPC.proto │ └── raftRPC.proto ├── raftCore │ ├── CMakeLists.txt │ ├── include │ │ ├── ApplyMsg.h │ │ ├── raftRpcUtil.h │ │ ├── Persister.h │ │ ├── kvServer.h │ │ └── raft.h │ ├── raftRpcUtil.cpp │ └── Persister.cpp └── CMakeLists.txt ├── example ├── rpcExample │ ├── CMakeLists.txt │ ├── callee │ │ ├── CMakeLists.txt │ │ └── friendService.cpp │ ├── caller │ │ ├── CMakeLists.txt │ │ └── callFriendService.cpp │ ├── rpc_example.md │ └── friend.proto ├── CMakeLists.txt ├── raftCoreExample │ ├── CMakeLists.txt │ ├── caller.cpp │ └── raftKvDB.cpp └── fiberExample │ ├── CMakeLists.txt │ ├── test_thread.cc │ ├── test_hook.cpp │ ├── server.cpp │ ├── test_scheduler.cpp │ └── test_iomanager.cpp ├── .gitignore ├── docs ├── images │ ├── img.png │ ├── raft.jpg │ ├── rpc1.jpg │ └── rpc2.jpg ├── 目录导览.md └── rpc编码方式的改进.html ├── bin └── test.conf ├── format.sh ├── .github └── workflows │ ├── update_readme_contributors.yml │ └── greetings.yml ├── test ├── defer_run.cpp ├── format.cpp ├── include │ └── defer.h ├── run.cpp └── 测试文件运行说明.md ├── .run ├── provider.run.xml └── raftCoreRun.run.xml ├── CMakeLists.txt ├── .clang-tidy ├── README.md └── .clang-format /src/skipList/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /example/rpcExample/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | 2 | add_subdirectory(callee) 3 | add_subdirectory(caller) -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | !bin/test.conf 3 | .idea 4 | bin 5 | cmake-build-debug 6 | lib 7 | 8 | *.out 9 | *.a -------------------------------------------------------------------------------- /docs/images/img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/youngyangyang04/KVstorageBaseRaft-cpp/HEAD/docs/images/img.png -------------------------------------------------------------------------------- /docs/images/raft.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/youngyangyang04/KVstorageBaseRaft-cpp/HEAD/docs/images/raft.jpg -------------------------------------------------------------------------------- /docs/images/rpc1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/youngyangyang04/KVstorageBaseRaft-cpp/HEAD/docs/images/rpc1.jpg -------------------------------------------------------------------------------- /docs/images/rpc2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/youngyangyang04/KVstorageBaseRaft-cpp/HEAD/docs/images/rpc2.jpg -------------------------------------------------------------------------------- /example/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(fiberExample) 2 | 3 | add_subdirectory(rpcExample) 4 | 5 | add_subdirectory(raftCoreExample) -------------------------------------------------------------------------------- /bin/test.conf: -------------------------------------------------------------------------------- 1 | node0ip=127.0.1.1 2 | node0port=29016 3 | node1ip=127.0.1.1 4 | node1port=29017 5 | node2ip=127.0.1.1 6 | node2port=29018 7 | node1ip=127.0.1.1 8 | node1port=7788 9 | -------------------------------------------------------------------------------- /format.sh: -------------------------------------------------------------------------------- 1 | # https://www.cnblogs.com/__tudou__/p/13322854.html 2 | find . -regex '.*\.\(cpp\|hpp\|cu\|c\|h\)' ! -regex '.*\(pb\.h\|pb\.cc\)$' -exec clang-format -style=file -i {} \; -------------------------------------------------------------------------------- /example/rpcExample/callee/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | 2 | set(SRC_LIST friendService.cpp ../friend.pb.cc) 3 | 4 | 5 | add_executable(provider ${SRC_LIST}) 6 | target_link_libraries(provider rpc_lib protobuf muduo_net muduo_base pthread) 7 | -------------------------------------------------------------------------------- /src/fiber/utils.cpp: -------------------------------------------------------------------------------- 1 | #include "utils.hpp" 2 | 3 | namespace monsoon { 4 | pid_t GetThreadId() { return syscall(SYS_gettid); } 5 | 6 | u_int32_t GetFiberId() { 7 | // TODO 8 | return 0; 9 | } 10 | } // namespace monsoon -------------------------------------------------------------------------------- /example/rpcExample/caller/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # set(SRC_LIST calluserservice.cc ../user.pb.cc) 2 | set(SRC_LIST callFriendService.cpp ../friend.pb.cc) 3 | add_executable(consumer ${SRC_LIST}) 4 | target_link_libraries(consumer rpc_lib protobuf) -------------------------------------------------------------------------------- /src/fiber/include/monsoon.h: -------------------------------------------------------------------------------- 1 | #ifndef __MONSOON_MONSOON_H__ 2 | #define __MONSOON_MONSOON_H__ 3 | 4 | #include "fd_manager.hpp" 5 | #include "fiber.hpp" 6 | #include "hook.hpp" 7 | #include "iomanager.hpp" 8 | #include "thread.hpp" 9 | #include "utils.hpp" 10 | 11 | #endif -------------------------------------------------------------------------------- /src/common/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # 获取当前源文件所在目录的绝对路径 2 | get_filename_component(SRC_DIR "${CMAKE_CURRENT_SOURCE_DIR}" ABSOLUTE) 3 | 4 | # 将该目录下所有源文件添加到 SRC_LIST 变量中 5 | aux_source_directory(${SRC_DIR} SRC_LIST) 6 | 7 | set(src_common ${SRC_LIST} CACHE INTERNAL "Description of the variable") -------------------------------------------------------------------------------- /src/raftClerk/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # 获取当前源文件所在目录的绝对路径 2 | get_filename_component(SRC_DIR "${CMAKE_CURRENT_SOURCE_DIR}" ABSOLUTE) 3 | 4 | # 将该目录下所有源文件添加到 SRC_LIST 变量中 5 | aux_source_directory(${SRC_DIR} SRC_LIST) 6 | 7 | set(src_raftClerk ${SRC_LIST} CACHE INTERNAL "Description of the variable") 8 | -------------------------------------------------------------------------------- /src/rpc/rpcheader.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package RPC; 4 | 5 | message RpcHeader 6 | { 7 | bytes service_name = 1; 8 | bytes method_name = 2; 9 | uint32 args_size = 3; //这里虽然是uint32,但是protobuf编码的时候默认就是变长编码,可见:https://www.cnblogs.com/yangwenhuan/p/10328960.html 10 | } -------------------------------------------------------------------------------- /src/raftRpcPro/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | 2 | # 获取当前源文件所在目录的绝对路径 3 | get_filename_component(SRC_DIR "${CMAKE_CURRENT_SOURCE_DIR}" ABSOLUTE) 4 | 5 | # 将该目录下所有源文件添加到 SRC_LIST 变量中 6 | aux_source_directory(${SRC_DIR} SRC_LIST) 7 | 8 | 9 | set(src_raftRpcPro ${SRC_LIST} CACHE INTERNAL "Description of the variable") -------------------------------------------------------------------------------- /example/rpcExample/rpc_example.md: -------------------------------------------------------------------------------- 1 | 2 | # rpc_example 3 | 4 | 5 | 6 | 1.库准备 7 | - proctoc 8 | 9 | 2.编写自己想要发送的rpc实例 10 | 参考`friend.proto`文件即可 11 | 12 | 3.生成对应的pb.h和pb.cc文件 13 | ``` 14 | protoc friend.proto --cpp_out=. 15 | ``` 16 | 17 | 4.编写rpc客户端和服务端 18 | 19 | 代码可参考`friendServer.cpp`和`callFriendService.cpp`文件。 -------------------------------------------------------------------------------- /src/fiber/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # 获取当前源文件所在目录的绝对路径 2 | get_filename_component(SRC_DIR "${CMAKE_CURRENT_SOURCE_DIR}" ABSOLUTE) 3 | 4 | # 将该目录下所有源文件添加到 SRC_LIST 变量中 5 | aux_source_directory(${SRC_DIR} SRC_LIST) 6 | 7 | add_library(fiber_lib ${SRC_LIST}) 8 | target_link_libraries(fiber_lib -ldl) 9 | set(src_fiber ${SRC_LIST} CACHE INTERNAL "Description of the variable") -------------------------------------------------------------------------------- /src/raftCore/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | 2 | # ${src_raftRpcPro} 3 | 4 | 5 | 6 | # 获取当前源文件所在目录的绝对路径 7 | get_filename_component(SRC_DIR "${CMAKE_CURRENT_SOURCE_DIR}" ABSOLUTE) 8 | 9 | # 将该目录下所有源文件添加到 SRC_LIST 变量中 10 | aux_source_directory(${SRC_DIR} SRC_LIST) 11 | 12 | set(src_raftCore ${SRC_LIST} CACHE INTERNAL "Description of the variable") 13 | 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /src/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | add_subdirectory(skipList) 4 | 5 | add_subdirectory(common) #需要注意如果rpc中需要依靠common中的代码,则需要在rpc在前,其cmake文件中的set才对rpc文件夹可见 6 | # 使用include_directories()函数应该也是可行的 7 | add_subdirectory(rpc) 8 | 9 | add_subdirectory(fiber) 10 | 11 | add_subdirectory(raftRpcPro) 12 | 13 | add_subdirectory(raftCore) 14 | 15 | add_subdirectory(raftClerk) 16 | 17 | -------------------------------------------------------------------------------- /src/fiber/include/noncopyable.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __SYLAR_NONCOPYABLE_H__ 2 | #define __SYLAR_NONCOPYABLE_H__ 3 | 4 | namespace monsoon { 5 | class Nonecopyable { 6 | public: 7 | Nonecopyable() = default; 8 | ~Nonecopyable() = default; 9 | Nonecopyable(const Nonecopyable &) = delete; 10 | Nonecopyable operator=(const Nonecopyable) = delete; 11 | }; 12 | } // namespace monsoon 13 | 14 | #endif -------------------------------------------------------------------------------- /src/rpc/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | 2 | # 获取当前源文件所在目录的绝对路径 3 | get_filename_component(SRC_DIR "${CMAKE_CURRENT_SOURCE_DIR}" ABSOLUTE) 4 | 5 | # 将该目录下所有源文件添加到 SRC_LIST 变量中 6 | aux_source_directory(${SRC_DIR} SRC_LIST) 7 | 8 | add_library(rpc_lib ${SRC_LIST} ${src_common} ) 9 | target_link_libraries(rpc_lib boost_serialization) 10 | set(src_rpc ${SRC_LIST} CACHE INTERNAL "Description of the variable") 11 | 12 | -------------------------------------------------------------------------------- /example/raftCoreExample/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(SRC_LIST raftKvDB.cpp) 2 | 3 | 4 | add_executable(raftCoreRun ${SRC_LIST}) 5 | target_link_libraries(raftCoreRun skip_list_on_raft rpc_lib protobuf muduo_net muduo_base pthread ) 6 | 7 | ################################# 8 | 9 | set(SRC_LIST2 caller.cpp) 10 | add_executable(callerMain ${src_raftClerk} ${SRC_LIST2} ${src_common}) 11 | target_link_libraries(callerMain skip_list_on_raft protobuf boost_serialization ) -------------------------------------------------------------------------------- /.github/workflows/update_readme_contributors.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: 4 | - main 5 | 6 | jobs: 7 | contrib-readme-job: 8 | runs-on: ubuntu-latest 9 | name: Update contributors in README.md 10 | steps: 11 | - name: Contribute List 12 | uses: akhilmhdh/contributors-readme-action@v2.3.6 13 | with: 14 | image_size: 80 15 | columns_per_row: 8 16 | env: 17 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 18 | -------------------------------------------------------------------------------- /example/raftCoreExample/caller.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by swx on 23-6-4. 3 | // 4 | #include 5 | #include "clerk.h" 6 | #include "util.h" 7 | int main() { 8 | Clerk client; 9 | client.Init("test.conf"); 10 | auto start = now(); 11 | int count = 500; 12 | int tmp = count; 13 | while (tmp--) { 14 | client.Put("x", std::to_string(tmp)); 15 | 16 | std::string get1 = client.Get("x"); 17 | std::printf("get return :{%s}\r\n", get1.c_str()); 18 | } 19 | return 0; 20 | } -------------------------------------------------------------------------------- /src/rpc/include/mprpcconfig.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | // rpcserverip rpcserverport zookeeperip zookeeperport 7 | // 框架读取配置文件类 8 | class MprpcConfig { 9 | public: 10 | // 负责解析加载配置文件 11 | void LoadConfigFile(const char *config_file); 12 | // 查询配置项信息 13 | std::string Load(const std::string &key); 14 | 15 | private: 16 | std::unordered_map m_configMap; 17 | // 去掉字符串前后的空格 18 | void Trim(std::string &src_buf); 19 | }; -------------------------------------------------------------------------------- /.github/workflows/greetings.yml: -------------------------------------------------------------------------------- 1 | name: Greetings 2 | 3 | on: [pull_request_target, issues] 4 | 5 | jobs: 6 | greeting: 7 | runs-on: ubuntu-latest 8 | permissions: 9 | issues: write 10 | pull-requests: write 11 | steps: 12 | - uses: actions/first-interaction@v1 13 | with: 14 | repo-token: ${{ secrets.GITHUB_TOKEN }} 15 | issue-message: "Message that will be displayed on users' first issue" 16 | pr-message: "Message that will be displayed on users' first pull request" 17 | -------------------------------------------------------------------------------- /test/defer_run.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by henry on 24-1-23. 3 | // 4 | #include 5 | #include 6 | #include "include/defer.h" 7 | 8 | using namespace std; 9 | 10 | void testFun1(const string& name) { cout << name; } 11 | 12 | void testFun2(const string& name) { cout << name; } 13 | 14 | int main() { 15 | cout << "begin..." << endl; 16 | string str1 = "Hello"; 17 | string str2 = " world"; 18 | DEFER { 19 | testFun1(str1); 20 | testFun2(str2); 21 | }; 22 | cout << "end..." << endl; 23 | return 0; 24 | } -------------------------------------------------------------------------------- /.run/provider.run.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 6 | 7 | -------------------------------------------------------------------------------- /test/format.cpp: -------------------------------------------------------------------------------- 1 | #include "../src/common/include/util.h" 2 | 3 | void myAssert(bool condition, std::string message) { 4 | if (!condition) { 5 | std::cerr << "Error: " << message << std::endl; 6 | std::exit(EXIT_FAILURE); 7 | } 8 | } 9 | 10 | int main() { 11 | // 测试格式化函数 12 | myAssert(false, format("[func-AppendEntries-rf{%d}] 两节点logIndex{%d}和term{%d}相同,但是其command{%d:%d} " 13 | " {%d:%d}却不同!!\n", 14 | 1, 2, 3, 4, 5, 6, 7)); 15 | return 0; 16 | } 17 | 18 | // 编译命令: g++ test.cpp -o test -lboost_serialization 19 | 20 | 21 | -------------------------------------------------------------------------------- /example/rpcExample/friend.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package fixbug; //所在的命名空间 4 | 5 | option cc_generic_services = true; //开启桩stub服务 6 | 7 | message ResultCode 8 | { 9 | int32 errcode = 1; 10 | bytes errmsg = 2; 11 | } 12 | 13 | message GetFriendsListRequest //请求,响应 14 | { 15 | uint32 userid = 1; 16 | } 17 | 18 | message GetFriendsListResponse //请求,响应 19 | { 20 | ResultCode result = 1; 21 | repeated bytes friends = 2; 22 | } 23 | 24 | // 好友模块 25 | service FiendServiceRpc //具体的服务模块和服务方法 26 | { 27 | rpc GetFriendsList(GetFriendsListRequest) returns(GetFriendsListResponse); 28 | } -------------------------------------------------------------------------------- /src/raftCore/include/ApplyMsg.h: -------------------------------------------------------------------------------- 1 | #ifndef APPLYMSG_H 2 | #define APPLYMSG_H 3 | #include 4 | class ApplyMsg { 5 | public: 6 | bool CommandValid; 7 | std::string Command; 8 | int CommandIndex; 9 | bool SnapshotValid; 10 | std::string Snapshot; 11 | int SnapshotTerm; 12 | int SnapshotIndex; 13 | 14 | public: 15 | //两个valid最开始要赋予false!! 16 | ApplyMsg() 17 | : CommandValid(false), 18 | Command(), 19 | CommandIndex(-1), 20 | SnapshotValid(false), 21 | SnapshotTerm(-1), 22 | SnapshotIndex(-1){ 23 | 24 | }; 25 | }; 26 | 27 | #endif // APPLYMSG_H -------------------------------------------------------------------------------- /src/rpc/include/mprpccontroller.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | 5 | class MprpcController : public google::protobuf::RpcController { 6 | public: 7 | MprpcController(); 8 | void Reset(); 9 | bool Failed() const; 10 | std::string ErrorText() const; 11 | void SetFailed(const std::string& reason); 12 | 13 | // 目前未实现具体的功能 14 | void StartCancel(); 15 | bool IsCanceled() const; 16 | void NotifyOnCancel(google::protobuf::Closure* callback); 17 | 18 | private: 19 | bool m_failed; // RPC方法执行过程中的状态 20 | std::string m_errText; // RPC方法执行过程中的错误信息 21 | }; -------------------------------------------------------------------------------- /.run/raftCoreRun.run.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 6 | 7 | -------------------------------------------------------------------------------- /test/include/defer.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by henry on 24-1-23. 3 | // 4 | 5 | #ifndef KVRAFTCPP_DEFER_H 6 | #define KVRAFTCPP_DEFER_H 7 | #include 8 | #include 9 | 10 | template 11 | class Defer { 12 | public: 13 | Defer(F&& f) : m_func(std::forward(f)) {} 14 | Defer(const F& f) : m_func(f) {} 15 | ~Defer() { m_func(); } 16 | 17 | Defer(const Defer& e) = delete; 18 | Defer& operator=(const Defer& e) = delete; 19 | 20 | private: 21 | F m_func; 22 | }; 23 | 24 | #define _CONCAT(a, b) a##b 25 | #define _MAKE_DEFER_(line) Defer _CONCAT(defer, line) = [&]() 26 | 27 | #undef DEFER 28 | #define DEFER _MAKE_DEFER_(__LINE__) 29 | 30 | #endif // KVRAFTCPP_DEFER_H 31 | -------------------------------------------------------------------------------- /example/fiberExample/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set( 2 | LIB_LIB 3 | fiber_lib 4 | pthread 5 | dl 6 | ) 7 | 8 | add_executable(test_server server.cpp) 9 | target_link_libraries(test_server ${LIB_LIB}) 10 | #add_dependencies(test_server monsoon) 11 | 12 | add_executable(test_scheduler test_scheduler.cpp) 13 | target_link_libraries(test_scheduler ${LIB_LIB}) 14 | #add_dependencies(test_scheduler monsoon) 15 | 16 | add_executable(test_iomanager test_iomanager.cpp) 17 | target_link_libraries(test_iomanager ${LIB_LIB}) 18 | #add_dependencies(test_iomanager monsoon) 19 | 20 | add_executable(test_hook test_hook.cpp) 21 | target_link_libraries(test_hook ${LIB_LIB}) 22 | #add_dependencies(test_hook monsoon) 23 | 24 | -------------------------------------------------------------------------------- /src/rpc/mprpccontroller.cpp: -------------------------------------------------------------------------------- 1 | #include "mprpccontroller.h" 2 | 3 | MprpcController::MprpcController() { 4 | m_failed = false; 5 | m_errText = ""; 6 | } 7 | 8 | void MprpcController::Reset() { 9 | m_failed = false; 10 | m_errText = ""; 11 | } 12 | 13 | bool MprpcController::Failed() const { return m_failed; } 14 | 15 | std::string MprpcController::ErrorText() const { return m_errText; } 16 | 17 | void MprpcController::SetFailed(const std::string& reason) { 18 | m_failed = true; 19 | m_errText = reason; 20 | } 21 | 22 | // 目前未实现具体的功能 23 | void MprpcController::StartCancel() {} 24 | bool MprpcController::IsCanceled() const { return false; } 25 | void MprpcController::NotifyOnCancel(google::protobuf::Closure* callback) {} -------------------------------------------------------------------------------- /docs/目录导览.md: -------------------------------------------------------------------------------- 1 | 2 | 更新时间:2024年3月19日 3 | 4 | 5 | 6 | 7 | 8 | 9 | ```angular2html 10 | . 11 | ├── bin 生成的可执行文件存放地 12 | ├── cmake-build-debug 项目编译目录,默认是没有的,需要自己创建 13 | ├── docs 项目文档存放地 14 | │   └── images 项目文档图片存放地 15 | ├── example 范例代码存放地 16 | │   ├── fiberExample 协程相关代码 17 | │   ├── raftCoreExample raft核心代码 18 | │   └── rpcExample rpc相关代码 19 | ├── lib 项目编译后的库文件存放地 20 | ├── src 【重点】项目源代码存放地,按照子模块组织 21 | │   ├── common 子模块共用的,一般是一些util,日志,配置文件 22 | │   ├── fiber 协程相关代码 23 | │   ├── raftClerk raft客户端代码 24 | │   ├── raftCore raft核心代码 25 | │   ├── raftRpcPro raft中rpc涉及的protoc文件 26 | │   ├── rpc rpc库相关代码 27 | │   └── skipList 跳表(上层状态机)相关代码 28 | └── test 测试代码存放地,作用不大,一般是对一些不确定的特性进行测试 29 | ``` 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /src/common/include/config.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by swx on 23-12-23. 3 | // 4 | 5 | #ifndef CONFIG_H 6 | #define CONFIG_H 7 | 8 | const bool Debug = true; 9 | 10 | const int debugMul = 1; // 时间单位:time.Millisecond,不同网络环境rpc速度不同,因此需要乘以一个系数 11 | const int HeartBeatTimeout = 25 * debugMul; // 心跳时间一般要比选举超时小一个数量级 12 | const int ApplyInterval = 10 * debugMul; // 13 | 14 | const int minRandomizedElectionTime = 300 * debugMul; // ms 15 | const int maxRandomizedElectionTime = 500 * debugMul; // ms 16 | 17 | const int CONSENSUS_TIMEOUT = 500 * debugMul; // ms 18 | 19 | // 协程相关设置 20 | 21 | const int FIBER_THREAD_NUM = 1; // 协程库中线程池大小 22 | const bool FIBER_USE_CALLER_THREAD = false; // 是否使用caller_thread执行调度任务 23 | 24 | #endif // CONFIG_H 25 | -------------------------------------------------------------------------------- /example/fiberExample/test_thread.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "monsoon.h" 4 | 5 | void func1() 6 | { 7 | std::cout << "name: " << monsoon::Thread::GetThis()->GetName() << ",id: " << monsoon::GetThreadId() << std::endl; 8 | } 9 | 10 | void func2() 11 | { 12 | std::cout << "name: " << monsoon::Thread::GetName() << ",id: " << monsoon::GetThreadId() << std::endl; 13 | } 14 | 15 | int main(int argc, char **argv) 16 | { 17 | std::vector tpool; 18 | for (int i = 0; i < 5; i++) 19 | { 20 | // std::cout<<"haha"; 21 | monsoon::Thread::ptr t(new monsoon::Thread(&func1, "name_" + std::to_string(i))); 22 | tpool.push_back(t); 23 | } 24 | 25 | for (int i = 0; i < 5; i++) 26 | { 27 | tpool[i]->join(); 28 | } 29 | 30 | std::cout << "-----thread_test end-----" << std::endl; 31 | } 32 | -------------------------------------------------------------------------------- /src/raftClerk/include/raftServerRpcUtil.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by swx on 24-1-4. 3 | // 4 | 5 | #ifndef RAFTSERVERRPC_H 6 | #define RAFTSERVERRPC_H 7 | 8 | #include 9 | #include "kvServerRPC.pb.h" 10 | #include "mprpcchannel.h" 11 | #include "mprpccontroller.h" 12 | #include "rpcprovider.h" 13 | 14 | /// @brief 维护当前节点对其他某一个结点的所有rpc通信,包括接收其他节点的rpc和发送 15 | // 对于一个节点来说,对于任意其他的节点都要维护一个rpc连接, 16 | class raftServerRpcUtil { 17 | private: 18 | raftKVRpcProctoc::kvServerRpc_Stub* stub; 19 | 20 | public: 21 | //主动调用其他节点的三个方法,可以按照mit6824来调用,但是别的节点调用自己的好像就不行了,要继承protoc提供的service类才行 22 | 23 | //响应其他节点的方法 24 | bool Get(raftKVRpcProctoc::GetArgs* GetArgs, raftKVRpcProctoc::GetReply* reply); 25 | bool PutAppend(raftKVRpcProctoc::PutAppendArgs* args, raftKVRpcProctoc::PutAppendReply* reply); 26 | 27 | raftServerRpcUtil(std::string ip, short port); 28 | ~raftServerRpcUtil(); 29 | }; 30 | 31 | #endif // RAFTSERVERRPC_H 32 | -------------------------------------------------------------------------------- /src/raftCore/include/raftRpcUtil.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by swx on 23-12-28. 3 | // 4 | 5 | #ifndef RAFTRPC_H 6 | #define RAFTRPC_H 7 | 8 | #include "raftRPC.pb.h" 9 | 10 | /// @brief 维护当前节点对其他某一个结点的所有rpc发送通信的功能 11 | // 对于一个raft节点来说,对于任意其他的节点都要维护一个rpc连接,即MprpcChannel 12 | class RaftRpcUtil { 13 | private: 14 | raftRpcProctoc::raftRpc_Stub *stub_; 15 | 16 | public: 17 | //主动调用其他节点的三个方法,可以按照mit6824来调用,但是别的节点调用自己的好像就不行了,要继承protoc提供的service类才行 18 | bool AppendEntries(raftRpcProctoc::AppendEntriesArgs *args, raftRpcProctoc::AppendEntriesReply *response); 19 | bool InstallSnapshot(raftRpcProctoc::InstallSnapshotRequest *args, raftRpcProctoc::InstallSnapshotResponse *response); 20 | bool RequestVote(raftRpcProctoc::RequestVoteArgs *args, raftRpcProctoc::RequestVoteReply *response); 21 | //响应其他节点的方法 22 | /** 23 | * 24 | * @param ip 远端ip 25 | * @param port 远端端口 26 | */ 27 | RaftRpcUtil(std::string ip, short port); 28 | ~RaftRpcUtil(); 29 | }; 30 | 31 | #endif // RAFTRPC_H 32 | -------------------------------------------------------------------------------- /src/fiber/include/thread.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __SYLAR_THREAD_H_ 2 | #define __SYLAR_THREAD_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | namespace monsoon { 16 | class Thread { 17 | public: 18 | typedef std::shared_ptr ptr; 19 | Thread(std::function cb, const std::string &name); 20 | ~Thread(); 21 | pid_t getId() const { return id_; } 22 | const std::string &getName() const { return name_; } 23 | void join(); 24 | static Thread *GetThis(); 25 | static const std::string &GetName(); 26 | static void SetName(const std::string &name); 27 | 28 | private: 29 | Thread(const Thread &) = delete; 30 | Thread(const Thread &&) = delete; 31 | Thread operator=(const Thread &) = delete; 32 | 33 | static void *run(void *args); 34 | 35 | private: 36 | pid_t id_; 37 | pthread_t thread_; 38 | std::function cb_; 39 | std::string name_; 40 | }; 41 | } // namespace monsoon 42 | 43 | #endif -------------------------------------------------------------------------------- /src/raftClerk/raftServerRpcUtil.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by swx on 24-1-4. 3 | // 4 | #include "raftServerRpcUtil.h" 5 | 6 | // kvserver不同于raft节点之间,kvserver的rpc是用于clerk向kvserver调用,不会被调用,因此只用写caller功能,不用写callee功能 7 | //先开启服务器,再尝试连接其他的节点,中间给一个间隔时间,等待其他的rpc服务器节点启动 8 | raftServerRpcUtil::raftServerRpcUtil(std::string ip, short port) { 9 | //********************************************* */ 10 | // 接收rpc设置 11 | //********************************************* */ 12 | //发送rpc设置 13 | stub = new raftKVRpcProctoc::kvServerRpc_Stub(new MprpcChannel(ip, port, false)); 14 | } 15 | 16 | raftServerRpcUtil::~raftServerRpcUtil() { delete stub; } 17 | 18 | bool raftServerRpcUtil::Get(raftKVRpcProctoc::GetArgs *GetArgs, raftKVRpcProctoc::GetReply *reply) { 19 | MprpcController controller; 20 | stub->Get(&controller, GetArgs, reply, nullptr); 21 | return !controller.Failed(); 22 | } 23 | 24 | bool raftServerRpcUtil::PutAppend(raftKVRpcProctoc::PutAppendArgs *args, raftKVRpcProctoc::PutAppendReply *reply) { 25 | MprpcController controller; 26 | stub->PutAppend(&controller, args, reply, nullptr); 27 | if (controller.Failed()) { 28 | std::cout << controller.ErrorText() << endl; 29 | } 30 | return !controller.Failed(); 31 | } 32 | -------------------------------------------------------------------------------- /src/raftClerk/include/clerk.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by swx on 23-6-4. 3 | // 4 | 5 | #ifndef SKIP_LIST_ON_RAFT_CLERK_H 6 | #define SKIP_LIST_ON_RAFT_CLERK_H 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include "kvServerRPC.pb.h" 17 | #include "mprpcconfig.h" 18 | class Clerk { 19 | private: 20 | std::vector> 21 | m_servers; //保存所有raft节点的fd //todo:全部初始化为-1,表示没有连接上 22 | std::string m_clientId; 23 | int m_requestId; 24 | int m_recentLeaderId; //只是有可能是领导 25 | 26 | std::string Uuid() { 27 | return std::to_string(rand()) + std::to_string(rand()) + std::to_string(rand()) + std::to_string(rand()); 28 | } //用于返回随机的clientId 29 | 30 | // MakeClerk todo 31 | void PutAppend(std::string key, std::string value, std::string op); 32 | 33 | public: 34 | //对外暴露的三个功能和初始化 35 | void Init(std::string configFileName); 36 | std::string Get(std::string key); 37 | 38 | void Put(std::string key, std::string value); 39 | void Append(std::string key, std::string value); 40 | 41 | public: 42 | Clerk(); 43 | }; 44 | 45 | #endif // SKIP_LIST_ON_RAFT_CLERK_H 46 | -------------------------------------------------------------------------------- /src/raftCore/include/Persister.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by swx on 23-5-30. 3 | // 4 | 5 | #ifndef SKIP_LIST_ON_RAFT_PERSISTER_H 6 | #define SKIP_LIST_ON_RAFT_PERSISTER_H 7 | #include 8 | #include 9 | class Persister { 10 | private: 11 | std::mutex m_mtx; 12 | std::string m_raftState; 13 | std::string m_snapshot; 14 | /** 15 | * m_raftStateFileName: raftState文件名 16 | */ 17 | const std::string m_raftStateFileName; 18 | /** 19 | * m_snapshotFileName: snapshot文件名 20 | */ 21 | const std::string m_snapshotFileName; 22 | /** 23 | * 保存raftState的输出流 24 | */ 25 | std::ofstream m_raftStateOutStream; 26 | /** 27 | * 保存snapshot的输出流 28 | */ 29 | std::ofstream m_snapshotOutStream; 30 | /** 31 | * 保存raftStateSize的大小 32 | * 避免每次都读取文件来获取具体的大小 33 | */ 34 | long long m_raftStateSize; 35 | 36 | public: 37 | void Save(std::string raftstate, std::string snapshot); 38 | std::string ReadSnapshot(); 39 | void SaveRaftState(const std::string& data); 40 | long long RaftStateSize(); 41 | std::string ReadRaftState(); 42 | explicit Persister(int me); 43 | ~Persister(); 44 | 45 | private: 46 | void clearRaftState(); 47 | void clearSnapshot(); 48 | void clearRaftStateAndSnapshot(); 49 | }; 50 | 51 | #endif // SKIP_LIST_ON_RAFT_PERSISTER_H 52 | -------------------------------------------------------------------------------- /src/fiber/include/singleton.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __MONSOON_SINGLETON_H__ 2 | #define __MONSOON_SINGLETON_H__ 3 | 4 | #include 5 | 6 | namespace monsoon { 7 | namespace { 8 | template 9 | T &GetInstanceX() { 10 | static T v; 11 | return v; 12 | } 13 | 14 | template 15 | std::shared_ptr GetInstancePtr() { 16 | static std::shared_ptr v(new T); 17 | return v; 18 | } 19 | } // namespace 20 | 21 | /** 22 | * @brief 单例模式封装类 23 | * @details T 类型 24 | * X 为了创造多个实例对应的Tag 25 | * N 同一个Tag创造多个实例索引 26 | */ 27 | template 28 | class Singleton { 29 | public: 30 | /** 31 | * @brief 返回单例裸指针 32 | */ 33 | static T *GetInstance() { 34 | static T v; 35 | return &v; 36 | // return &GetInstanceX(); 37 | } 38 | }; 39 | 40 | /** 41 | * @brief 单例模式智能指针封装类 42 | * @details T 类型 43 | * X 为了创造多个实例对应的Tag 44 | * N 同一个Tag创造多个实例索引 45 | */ 46 | template 47 | class SingletonPtr { 48 | public: 49 | // 返回单例智能指针 50 | static std::shared_ptr GetInstance() { 51 | static std::shared_ptr v(new T); 52 | return v; 53 | // return GetInstancePtr(); 54 | } 55 | }; 56 | 57 | } // namespace monsoon 58 | 59 | #endif -------------------------------------------------------------------------------- /src/raftCore/raftRpcUtil.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by swx on 23-12-28. 3 | // 4 | 5 | #include "raftRpcUtil.h" 6 | 7 | #include 8 | #include 9 | 10 | bool RaftRpcUtil::AppendEntries(raftRpcProctoc::AppendEntriesArgs *args, raftRpcProctoc::AppendEntriesReply *response) { 11 | MprpcController controller; 12 | stub_->AppendEntries(&controller, args, response, nullptr); 13 | return !controller.Failed(); 14 | } 15 | 16 | bool RaftRpcUtil::InstallSnapshot(raftRpcProctoc::InstallSnapshotRequest *args, 17 | raftRpcProctoc::InstallSnapshotResponse *response) { 18 | MprpcController controller; 19 | stub_->InstallSnapshot(&controller, args, response, nullptr); 20 | return !controller.Failed(); 21 | } 22 | 23 | bool RaftRpcUtil::RequestVote(raftRpcProctoc::RequestVoteArgs *args, raftRpcProctoc::RequestVoteReply *response) { 24 | MprpcController controller; 25 | stub_->RequestVote(&controller, args, response, nullptr); 26 | return !controller.Failed(); 27 | } 28 | 29 | //先开启服务器,再尝试连接其他的节点,中间给一个间隔时间,等待其他的rpc服务器节点启动 30 | 31 | RaftRpcUtil::RaftRpcUtil(std::string ip, short port) { 32 | //********************************************* */ 33 | //发送rpc设置 34 | stub_ = new raftRpcProctoc::raftRpc_Stub(new MprpcChannel(ip, port, true)); 35 | } 36 | 37 | RaftRpcUtil::~RaftRpcUtil() { delete stub_; } 38 | -------------------------------------------------------------------------------- /src/rpc/include/mprpcchannel.h: -------------------------------------------------------------------------------- 1 | #ifndef MPRPCCHANNEL_H 2 | #define MPRPCCHANNEL_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include // 包含 std::generate_n() 和 std::generate() 函数的头文件 9 | #include 10 | #include 11 | #include 12 | #include // 包含 std::uniform_int_distribution 类型的头文件 13 | #include 14 | #include 15 | #include 16 | using namespace std; 17 | 18 | // 真正负责发送和接受的前后处理工作 19 | // 如消息的组织方式,向哪个节点发送等等 20 | class MprpcChannel : public google::protobuf::RpcChannel { 21 | public: 22 | // 所有通过stub代理对象调用的rpc方法,都走到这里了,统一做rpc方法调用的数据数据序列化和网络发送 那一步 23 | void CallMethod(const google::protobuf::MethodDescriptor *method, google::protobuf::RpcController *controller, 24 | const google::protobuf::Message *request, google::protobuf::Message *response, 25 | google::protobuf::Closure *done) override; 26 | MprpcChannel(string ip, short port, bool connectNow); 27 | 28 | private: 29 | int m_clientFd; 30 | const std::string m_ip; //保存ip和端口,如果断了可以尝试重连 31 | const uint16_t m_port; 32 | /// @brief 连接ip和端口,并设置m_clientFd 33 | /// @param ip ip地址,本机字节序 34 | /// @param port 端口,本机字节序 35 | /// @return 成功返回空字符串,否则返回失败信息 36 | bool newConnect(const char *ip, uint16_t port, string *errMsg); 37 | }; 38 | 39 | #endif // MPRPCCHANNEL_H -------------------------------------------------------------------------------- /src/rpc/include/rpcprovider.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "google/protobuf/service.h" 11 | 12 | // 框架提供的专门发布rpc服务的网络对象类 13 | // todo:现在rpc客户端变成了 长连接,因此rpc服务器这边最好提供一个定时器,用以断开很久没有请求的连接。 14 | // todo:为了配合这个,那么rpc客户端那边每次发送之前也需要真正的 15 | class RpcProvider { 16 | public: 17 | // 这里是框架提供给外部使用的,可以发布rpc方法的函数接口 18 | void NotifyService(google::protobuf::Service *service); 19 | 20 | // 启动rpc服务节点,开始提供rpc远程网络调用服务 21 | void Run(int nodeIndex, short port); 22 | 23 | private: 24 | // 组合EventLoop 25 | muduo::net::EventLoop m_eventLoop; 26 | std::shared_ptr m_muduo_server; 27 | 28 | // service服务类型信息 29 | struct ServiceInfo { 30 | google::protobuf::Service *m_service; // 保存服务对象 31 | std::unordered_map m_methodMap; // 保存服务方法 32 | }; 33 | // 存储注册成功的服务对象和其服务方法的所有信息 34 | std::unordered_map m_serviceMap; 35 | 36 | // 新的socket连接回调 37 | void OnConnection(const muduo::net::TcpConnectionPtr &); 38 | // 已建立连接用户的读写事件回调 39 | void OnMessage(const muduo::net::TcpConnectionPtr &, muduo::net::Buffer *, muduo::Timestamp); 40 | // Closure的回调操作,用于序列化rpc的响应和网络发送 41 | void SendRpcResponse(const muduo::net::TcpConnectionPtr &, google::protobuf::Message *); 42 | 43 | public: 44 | ~RpcProvider(); 45 | }; -------------------------------------------------------------------------------- /src/raftRpcPro/kvServerRPC.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | 4 | package raftKVRpcProctoc; //所在的命名空间 5 | 6 | option cc_generic_services = true; //开启stub服务 7 | 8 | // 日志实体 9 | message GetArgs{ 10 | bytes Key = 1 ; 11 | bytes ClientId = 2 ; 12 | int32 RequestId = 3; 13 | } 14 | 15 | 16 | message GetReply { 17 | // 下面几个参数和论文中相同 18 | bytes Err = 1; 19 | bytes Value = 2; 20 | 21 | } 22 | 23 | 24 | // Put or Append 25 | message PutAppendArgs { 26 | bytes Key = 1; 27 | bytes Value = 2 ; 28 | bytes Op = 3; 29 | // "Put" or "Append" 30 | // You'll have to add definitions here. 31 | // Field names must start with capital letters, 32 | // otherwise RPC will break. 33 | bytes ClientId = 4; 34 | int32 RequestId = 5; 35 | } 36 | 37 | message PutAppendReply { 38 | bytes Err = 1; 39 | } 40 | 41 | 42 | //只有raft节点之间才会涉及rpc通信 43 | service kvServerRpc 44 | { 45 | //PutAppend(args *PutAppendArgs, reply *PutAppendReply) 46 | //Get(args *GetArgs, reply *GetReply) 47 | 48 | rpc PutAppend(PutAppendArgs) returns(PutAppendReply); 49 | rpc Get (GetArgs) returns (GetReply); 50 | } 51 | // message ResultCode 52 | // { 53 | // int32 errcode = 1; 54 | // bytes errmsg = 2; 55 | // } 56 | 57 | // message GetFriendsListRequest //请求,响应 58 | // { 59 | // uint32 userid = 1; 60 | // } 61 | 62 | // message GetFriendsListResponse //请求,响应 63 | // { 64 | // ResultCode result = 1; 65 | // repeated bytes friends = 2; 66 | // } 67 | 68 | // // 好友模块 69 | // service FiendServiceRpc //具体的服务模块和服务方法 70 | // { 71 | // rpc GetFriendsList(GetFriendsListRequest) returns(GetFriendsListResponse); 72 | // } 73 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # 设置cmake的最低版本和项目名称 2 | cmake_minimum_required(VERSION 3.22) 3 | project(KVRaftCpp) 4 | 5 | 6 | 7 | 8 | 9 | 10 | set(CMAKE_CXX_STANDARD 20) 11 | # 生成debug版本,可以进行gdb调试 12 | set(CMAKE_BUILD_TYPE "Debug") 13 | 14 | # 设置项目可执行文件输出的路径 15 | set(EXECUTABLE_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/bin) 16 | # 设置项目库文件输出的路径 17 | set(LIBRARY_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/lib) 18 | 19 | # 设置项目编译头文件搜索路径 -I 20 | # 目前可能存在路径污染的问题,需要进一步解决 21 | include_directories(${PROJECT_SOURCE_DIR}/src/common/include) 22 | include_directories(${PROJECT_SOURCE_DIR}/src/fiber/include) 23 | include_directories(${PROJECT_SOURCE_DIR}/src/rpc/include) 24 | include_directories(${PROJECT_SOURCE_DIR}/example) 25 | include_directories(${PROJECT_SOURCE_DIR}/src/raftCore/include) 26 | include_directories(${PROJECT_SOURCE_DIR}/src/raftRpcPro/include) 27 | include_directories(${PROJECT_SOURCE_DIR}/src/raftClerk/include) 28 | include_directories(${PROJECT_SOURCE_DIR}/src/skipList/include) 29 | 30 | 31 | # 设置项目库文件搜索路径 -L 32 | link_directories(${PROJECT_SOURCE_DIR}/lib) 33 | 34 | # src包含了所有的相关代码 35 | add_subdirectory(src) 36 | # example包含了使用的示例代码 37 | add_subdirectory(example) 38 | 39 | add_library(skip_list_on_raft STATIC ${src_rpc} ${src_fiber} ${rpc_example} ${raftsource} ${src_raftCore} ${src_raftRpcPro}) 40 | target_link_libraries(skip_list_on_raft muduo_net muduo_base pthread dl) 41 | # 添加格式化目标 start 42 | # from : https://blog.csdn.net/guotianqing/article/details/121661067 43 | 44 | add_custom_target(format 45 | COMMAND bash ${PROJECT_SOURCE_DIR}/format.sh 46 | WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} 47 | COMMAND echo "format done!" 48 | ) 49 | 50 | 51 | # 添加格式化目标 end -------------------------------------------------------------------------------- /test/run.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by swx on 24-1-5. 3 | // 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | class SerializablePair { 12 | public: 13 | SerializablePair() = default; 14 | SerializablePair(const std::string& first, const std::string& second) : first(first), second(second) {} 15 | 16 | template 17 | void serialize(Archive& ar, const unsigned int version) { 18 | ar& first; 19 | ar& second; 20 | } 21 | 22 | private: 23 | std::string first; 24 | std::string second; 25 | }; 26 | 27 | int main() { 28 | // 创建 vector> 对象 29 | std::vector> data; 30 | data.emplace_back("key1", "value1"); 31 | data.emplace_back("key2", "value2"); 32 | data.emplace_back("key3", "value3"); 33 | 34 | // 打开一个输出文件流 35 | std::ofstream ofs("data_vector.txt"); 36 | 37 | // 创建文本输出存档对象 38 | boost::archive::text_oarchive oa(ofs); 39 | 40 | // 序列化 vector> 到文本输出存档 41 | oa << data; 42 | 43 | // 关闭输出文件流 44 | ofs.close(); 45 | 46 | // 打开一个输入文件流 47 | std::ifstream ifs("data_vector.txt"); 48 | 49 | // 创建文本输入存档对象 50 | boost::archive::text_iarchive ia(ifs); 51 | 52 | // 创建空的 vector> 对象 53 | std::vector> loadedData; 54 | 55 | // 反序列化数据到 vector> 对象 56 | ia >> loadedData; 57 | 58 | // 输出反序列化后的数据 59 | for (const auto& pair : loadedData) { 60 | std::cout << "Key: " << pair.first << ", Value: " << pair.second << std::endl; 61 | } 62 | 63 | return 0; 64 | } 65 | -------------------------------------------------------------------------------- /example/rpcExample/callee/friendService.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by swx on 23-12-21. 3 | // 4 | #include 5 | #include 6 | #include 7 | #include "rpcExample/friend.pb.h" 8 | 9 | #include 10 | #include "rpcprovider.h" 11 | 12 | class FriendService : public fixbug::FiendServiceRpc { 13 | public: 14 | std::vector GetFriendsList(uint32_t userid) { 15 | std::cout << "local do GetFriendsList service! userid:" << userid << std::endl; 16 | std::vector vec; 17 | vec.push_back("gao yang"); 18 | vec.push_back("liu hong"); 19 | vec.push_back("wang shuo"); 20 | return vec; 21 | } 22 | 23 | // 重写基类方法 24 | void GetFriendsList(::google::protobuf::RpcController *controller, const ::fixbug::GetFriendsListRequest *request, 25 | ::fixbug::GetFriendsListResponse *response, ::google::protobuf::Closure *done) { 26 | uint32_t userid = request->userid(); 27 | std::vector friendsList = GetFriendsList(userid); 28 | response->mutable_result()->set_errcode(0); 29 | response->mutable_result()->set_errmsg(""); 30 | for (std::string &name : friendsList) { 31 | std::string *p = response->add_friends(); 32 | *p = name; 33 | } 34 | done->Run(); 35 | } 36 | }; 37 | 38 | int main(int argc, char **argv) { 39 | std::string ip = "127.0.0.1"; 40 | short port = 7788; 41 | auto stub = new fixbug::FiendServiceRpc_Stub(new MprpcChannel(ip, port, false)); 42 | // provider是一个rpc网络服务对象。把UserService对象发布到rpc节点上 43 | RpcProvider provider; 44 | provider.NotifyService(new FriendService()); 45 | 46 | // 启动一个rpc服务发布节点 Run以后,进程进入阻塞状态,等待远程的rpc调用请求 47 | provider.Run(1, 7788); 48 | 49 | return 0; 50 | } 51 | -------------------------------------------------------------------------------- /src/fiber/include/fiber.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __MONSOON_FIBER_H__ 2 | #define __MONSOON_FIBER_H__ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "utils.hpp" 11 | 12 | namespace monsoon { 13 | class Fiber : public std::enable_shared_from_this { 14 | public: 15 | typedef std::shared_ptr ptr; 16 | // Fiber状态机 17 | enum State { 18 | // 就绪态,刚创建后者yield后状态 19 | READY, 20 | // 运行态,resume之后的状态 21 | RUNNING, 22 | // 结束态,协程的回调函数执行完之后的状态 23 | TERM, 24 | }; 25 | 26 | private: 27 | // 初始化当前线程的协程功能,构造线程主协程对象 28 | Fiber(); 29 | 30 | public: 31 | // 构造子协程 32 | Fiber(std::function cb, size_t stackSz = 0, bool run_in_scheduler = true); 33 | ~Fiber(); 34 | // 重置协程状态,复用栈空间 35 | void reset(std::function cb); 36 | // 切换协程到运行态 37 | void resume(); 38 | // 让出协程执行权 39 | void yield(); 40 | // 获取协程Id 41 | uint64_t getId() const { return id_; } 42 | // 获取协程状态 43 | State getState() const { return state_; } 44 | 45 | // 设置当前正在运行的协程 46 | static void SetThis(Fiber *f); 47 | // 获取当前线程中的执行线程 48 | // 如果当前线程没有创建协程,则创建第一个协程,且该协程为当前线程的 49 | // 主协程,其他协程通过该协程来调度 50 | static Fiber::ptr GetThis(); 51 | // 协程总数 52 | static uint64_t TotalFiberNum(); 53 | // 协程回调函数 54 | static void MainFunc(); 55 | // 获取当前协程Id 56 | static uint64_t GetCurFiberID(); 57 | 58 | private: 59 | // 协程ID 60 | uint64_t id_ = 0; 61 | // 协程栈大小 62 | uint32_t stackSize_ = 0; 63 | // 协程状态 64 | State state_ = READY; 65 | // 协程上下文 66 | ucontext_t ctx_; 67 | // 协程栈地址 68 | void *stack_ptr = nullptr; 69 | // 协程回调函数 70 | std::function cb_; 71 | // 本协程是否参与调度器调度 72 | bool isRunInScheduler_; 73 | }; 74 | } // namespace monsoon 75 | 76 | #endif -------------------------------------------------------------------------------- /src/rpc/mprpcconfig.cpp: -------------------------------------------------------------------------------- 1 | #include "mprpcconfig.h" 2 | 3 | #include 4 | #include 5 | 6 | // 负责解析加载配置文件 7 | void MprpcConfig::LoadConfigFile(const char *config_file) { 8 | FILE *pf = fopen(config_file, "r"); 9 | if (nullptr == pf) { 10 | std::cout << config_file << " is note exist!" << std::endl; 11 | exit(EXIT_FAILURE); 12 | } 13 | 14 | // 1.注释 2.正确的配置项 = 3.去掉开头的多余的空格 15 | while (!feof(pf)) { 16 | char buf[512] = {0}; 17 | fgets(buf, 512, pf); 18 | 19 | // 去掉字符串前面多余的空格 20 | std::string read_buf(buf); 21 | Trim(read_buf); 22 | 23 | // 判断#的注释 24 | if (read_buf[0] == '#' || read_buf.empty()) { 25 | continue; 26 | } 27 | 28 | // 解析配置项 29 | int idx = read_buf.find('='); 30 | if (idx == -1) { 31 | // 配置项不合法 32 | continue; 33 | } 34 | 35 | std::string key; 36 | std::string value; 37 | key = read_buf.substr(0, idx); 38 | Trim(key); 39 | // rpcserverip=127.0.0.1\n 40 | int endidx = read_buf.find('\n', idx); 41 | value = read_buf.substr(idx + 1, endidx - idx - 1); 42 | Trim(value); 43 | m_configMap.insert({key, value}); 44 | } 45 | 46 | fclose(pf); 47 | } 48 | 49 | // 查询配置项信息 50 | std::string MprpcConfig::Load(const std::string &key) { 51 | auto it = m_configMap.find(key); 52 | if (it == m_configMap.end()) { 53 | return ""; 54 | } 55 | return it->second; 56 | } 57 | 58 | // 去掉字符串前后的空格 59 | void MprpcConfig::Trim(std::string &src_buf) { 60 | int idx = src_buf.find_first_not_of(' '); 61 | if (idx != -1) { 62 | // 说明字符串前面有空格 63 | src_buf = src_buf.substr(idx, src_buf.size() - idx); 64 | } 65 | // 去掉字符串后面多余的空格 66 | idx = src_buf.find_last_not_of(' '); 67 | if (idx != -1) { 68 | // 说明字符串后面有空格 69 | src_buf = src_buf.substr(0, idx + 1); 70 | } 71 | } -------------------------------------------------------------------------------- /src/fiber/thread.cpp: -------------------------------------------------------------------------------- 1 | #include "thread.hpp" 2 | #include "utils.hpp" 3 | 4 | namespace monsoon { 5 | // 指向当前线程 6 | static thread_local Thread *cur_thread = nullptr; 7 | static thread_local std::string cur_thread_name = "UNKNOW"; 8 | 9 | Thread::Thread(std::function cb, const std::string &name = "UNKNOW") : cb_(cb), name_(name) { 10 | if (name.empty()) { 11 | name_ = "UNKNOW"; 12 | } 13 | 14 | int rt = pthread_create(&thread_, nullptr, &Thread::run, this); 15 | if (rt) { 16 | std::cout << "pthread_create error,name:" << name_ << std::endl; 17 | throw std::logic_error("pthread_create"); 18 | } 19 | } 20 | 21 | void *Thread::run(void *arg) { 22 | Thread *thread = (Thread *)arg; 23 | cur_thread = thread; 24 | cur_thread_name = thread->name_; 25 | thread->id_ = monsoon::GetThreadId(); 26 | // 给线程命名 27 | pthread_setname_np(pthread_self(), thread->name_.substr(0, 15).c_str()); 28 | std::function cb; 29 | cb.swap(thread->cb_); 30 | // 启动回调函数 31 | // std::cout << "begin callback " << std::endl; 32 | cb(); 33 | return 0; 34 | } 35 | 36 | Thread::~Thread() { 37 | if (thread_) { 38 | pthread_detach(thread_); 39 | } 40 | } 41 | 42 | void Thread::join() { 43 | if (thread_) { 44 | int rt = pthread_join(thread_, nullptr); 45 | if (rt) { 46 | std::cout << "pthread_join error,name:" << name_ << std::endl; 47 | throw std::logic_error("pthread_join"); 48 | } 49 | thread_ = 0; 50 | } 51 | } 52 | 53 | Thread *Thread::GetThis() { return cur_thread; } 54 | 55 | const std::string &Thread::GetName() { return cur_thread_name; } 56 | 57 | void Thread::SetName(const std::string &name) { 58 | if (name.empty()) { 59 | return; 60 | } 61 | if (cur_thread) { 62 | cur_thread->name_ = name; 63 | } 64 | cur_thread_name = name; 65 | } 66 | 67 | } // namespace monsoon 68 | -------------------------------------------------------------------------------- /src/fiber/include/fd_manager.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __FD_MANAGER_H__ 2 | #define __FD_MANAGER_H__ 3 | 4 | #include 5 | #include 6 | #include "mutex.hpp" 7 | #include "singleton.hpp" 8 | #include "thread.hpp" 9 | 10 | namespace monsoon { 11 | // 文件句柄上下文,管理文件句柄类型,阻塞,关闭,读写超时 12 | class FdCtx : public std::enable_shared_from_this { 13 | public: 14 | typedef std::shared_ptr ptr; 15 | 16 | FdCtx(int fd); 17 | ~FdCtx(); 18 | // 是否完成初始化 19 | bool isInit() const { return m_isInit; } 20 | // 是否是socket 21 | bool isSocket() const { return m_isSocket; } 22 | // 是否已经关闭 23 | bool isClose() const { return m_isClosed; } 24 | // 用户主动设置非阻塞 25 | void setUserNonblock(bool v) { m_userNonblock = v; } 26 | // 用户是否主动设置了非阻塞 27 | bool getUserNonblock() const { return m_userNonblock; } 28 | // 设置系统非阻塞 29 | void setSysNonblock(bool v) { m_sysNonblock = v; } 30 | // 获取系统是否非阻塞 31 | bool getSysNonblock() const { return m_sysNonblock; } 32 | // 设置超时时间 33 | void setTimeout(int type, uint64_t v); 34 | uint64_t getTimeout(int type); 35 | 36 | private: 37 | bool init(); 38 | 39 | private: 40 | /// 是否初始化 41 | bool m_isInit : 1; 42 | /// 是否socket 43 | bool m_isSocket : 1; 44 | /// 是否hook非阻塞 45 | bool m_sysNonblock : 1; 46 | /// 是否用户主动设置非阻塞 47 | bool m_userNonblock : 1; 48 | /// 是否关闭 49 | bool m_isClosed : 1; 50 | /// 文件句柄 51 | int m_fd; 52 | /// 读超时时间毫秒 53 | uint64_t m_recvTimeout; 54 | /// 写超时时间毫秒 55 | uint64_t m_sendTimeout; 56 | }; 57 | // 文件句柄管理 58 | class FdManager { 59 | public: 60 | typedef RWMutex RWMutexType; 61 | 62 | FdManager(); 63 | // 获取/创建文件句柄类 64 | // auto_create 是否自动创建 65 | FdCtx::ptr get(int fd, bool auto_create = false); 66 | // 删除文件句柄 67 | void del(int fd); 68 | 69 | private: 70 | /// 读写锁 71 | RWMutexType m_mutex; 72 | /// 文件句柄集合 73 | std::vector m_datas; 74 | }; 75 | 76 | /// 文件句柄单例 77 | typedef Singleton FdMgr; 78 | 79 | } // namespace monsoon 80 | 81 | #endif -------------------------------------------------------------------------------- /src/fiber/include/iomanager.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __SYLAR_IOMANAGER_H__ 2 | #define __SYLAR_IOMANAGER_H__ 3 | 4 | #include "fcntl.h" 5 | #include "scheduler.hpp" 6 | #include "string.h" 7 | #include "sys/epoll.h" 8 | #include "timer.hpp" 9 | 10 | namespace monsoon { 11 | enum Event { 12 | NONE = 0x0, 13 | READ = 0x1, 14 | WRITE = 0x4, 15 | }; 16 | 17 | struct EventContext { 18 | Scheduler *scheduler = nullptr; 19 | Fiber::ptr fiber; 20 | std::function cb; 21 | }; 22 | 23 | class FdContext { 24 | friend class IOManager; 25 | 26 | public: 27 | // 获取事件上下文 28 | EventContext &getEveContext(Event event); 29 | // 重置事件上下文 30 | void resetEveContext(EventContext &ctx); 31 | // 触发事件 32 | void triggerEvent(Event event); 33 | 34 | private: 35 | EventContext read; 36 | EventContext write; 37 | int fd = 0; 38 | Event events = NONE; 39 | Mutex mutex; 40 | }; 41 | 42 | class IOManager : public Scheduler, public TimerManager { 43 | public: 44 | typedef std::shared_ptr ptr; 45 | 46 | IOManager(size_t threads = 1, bool use_caller = true, const std::string &name = "IOManager"); 47 | ~IOManager(); 48 | // 添加事件 49 | int addEvent(int fd, Event event, std::function cb = nullptr); 50 | // 删除事件 51 | bool delEvent(int fd, Event event); 52 | // 取消事件 53 | bool cancelEvent(int fd, Event event); 54 | // 取消所有事件 55 | bool cancelAll(int fd); 56 | static IOManager *GetThis(); 57 | 58 | protected: 59 | // 通知调度器有任务要调度 60 | void tickle() override; 61 | // 判断是否可以停止 62 | bool stopping() override; 63 | // idle协程 64 | void idle() override; 65 | // 判断是否可以停止,同时获取最近一个定时超时时间 66 | bool stopping(uint64_t &timeout); 67 | 68 | void OnTimerInsertedAtFront() override; 69 | void contextResize(size_t size); 70 | 71 | private: 72 | int epfd_ = 0; 73 | int tickleFds_[2]; 74 | // 正在等待执行的IO事件数量 75 | std::atomic pendingEventCnt_ = {0}; 76 | RWMutex mutex_; 77 | std::vector fdContexts_; 78 | }; 79 | } // namespace monsoon 80 | 81 | #endif -------------------------------------------------------------------------------- /src/common/util.cpp: -------------------------------------------------------------------------------- 1 | #include "util.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | void myAssert(bool condition, std::string message) { 9 | if (!condition) { 10 | std::cerr << "Error: " << message << std::endl; 11 | std::exit(EXIT_FAILURE); 12 | } 13 | } 14 | 15 | std::chrono::_V2::system_clock::time_point now() { return std::chrono::high_resolution_clock::now(); } 16 | 17 | std::chrono::milliseconds getRandomizedElectionTimeout() { 18 | std::random_device rd; 19 | std::mt19937 rng(rd()); 20 | std::uniform_int_distribution dist(minRandomizedElectionTime, maxRandomizedElectionTime); 21 | 22 | return std::chrono::milliseconds(dist(rng)); 23 | } 24 | 25 | void sleepNMilliseconds(int N) { std::this_thread::sleep_for(std::chrono::milliseconds(N)); }; 26 | 27 | bool getReleasePort(short &port) { 28 | short num = 0; 29 | while (!isReleasePort(port) && num < 30) { 30 | ++port; 31 | ++num; 32 | } 33 | if (num >= 30) { 34 | port = -1; 35 | return false; 36 | } 37 | return true; 38 | } 39 | 40 | bool isReleasePort(unsigned short usPort) { 41 | int s = socket(AF_INET, SOCK_STREAM, IPPROTO_IP); 42 | sockaddr_in addr; 43 | addr.sin_family = AF_INET; 44 | addr.sin_port = htons(usPort); 45 | addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); 46 | int ret = ::bind(s, (sockaddr *)&addr, sizeof(addr)); 47 | if (ret != 0) { 48 | close(s); 49 | return false; 50 | } 51 | close(s); 52 | return true; 53 | } 54 | 55 | void DPrintf(const char *format, ...) { 56 | if (Debug) { 57 | // 获取当前的日期,然后取日志信息,写入相应的日志文件当中 a+ 58 | time_t now = time(nullptr); 59 | tm *nowtm = localtime(&now); 60 | va_list args; 61 | va_start(args, format); 62 | std::printf("[%d-%d-%d-%d-%d-%d] ", nowtm->tm_year + 1900, nowtm->tm_mon + 1, nowtm->tm_mday, nowtm->tm_hour, 63 | nowtm->tm_min, nowtm->tm_sec); 64 | std::vprintf(format, args); 65 | std::printf("\n"); 66 | va_end(args); 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /example/rpcExample/caller/callFriendService.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by swx on 23-12-21. 3 | // 4 | #include 5 | 6 | // #include "mprpcapplication.h" 7 | #include "rpcExample/friend.pb.h" 8 | 9 | #include "mprpcchannel.h" 10 | #include "mprpccontroller.h" 11 | #include "rpcprovider.h" 12 | 13 | int main(int argc, char **argv) { 14 | // https://askubuntu.com/questions/754213/what-is-difference-between-localhost-address-127-0-0-1-and-127-0-1-1 15 | std::string ip = "127.0.1.1"; 16 | short port = 7788; 17 | 18 | // 演示调用远程发布的rpc方法Login 19 | fixbug::FiendServiceRpc_Stub stub( 20 | new MprpcChannel(ip, port, true)); //注册进自己写的channel类,channel类用于自定义发送格式和负责序列化等操作 21 | // rpc方法的请求参数 22 | fixbug::GetFriendsListRequest request; 23 | request.set_userid(1000); 24 | // rpc方法的响应 25 | fixbug::GetFriendsListResponse response; 26 | // 发起rpc方法的调用,消费这的stub最后都会调用到channel的 call_method方法 同步的rpc调用过程 MprpcChannel::callmethod 27 | MprpcController controller; 28 | //長連接測試 ,發送10次請求 29 | int count = 10; 30 | while (count--) { 31 | std::cout << " 倒数" << count << "次发起RPC请求" << std::endl; 32 | stub.GetFriendsList(&controller, &request, &response, nullptr); 33 | // RpcChannel->RpcChannel::callMethod 集中来做所有rpc方法调用的参数序列化和网络发送 34 | 35 | // 一次rpc调用完成,读调用的结果 36 | // rpc调用是否失败由框架来决定(rpc调用失败 != 业务逻辑返回false) 37 | // rpc和业务本质上是隔离的 38 | if (controller.Failed()) { 39 | std::cout << controller.ErrorText() << std::endl; 40 | } else { 41 | if (0 == response.result().errcode()) { 42 | std::cout << "rpc GetFriendsList response success!" << std::endl; 43 | int size = response.friends_size(); 44 | for (int i = 0; i < size; i++) { 45 | std::cout << "index:" << (i + 1) << " name:" << response.friends(i) << std::endl; 46 | } 47 | } else { 48 | //这里不是rpc失败, 49 | // 而是业务逻辑的返回值是失败 50 | // 两者要区分清楚 51 | std::cout << "rpc GetFriendsList response error : " << response.result().errmsg() << std::endl; 52 | } 53 | } 54 | sleep(5); // sleep 5 seconds 55 | } 56 | return 0; 57 | } 58 | -------------------------------------------------------------------------------- /src/fiber/include/timer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __MONSOON_TIMER_H__ 2 | # define __MONSSON_TIMER_H__ 3 | 4 | # include 5 | # include 6 | # include 7 | # include "mutex.hpp" 8 | 9 | namespace monsoon { 10 | class TimerManager; 11 | 12 | class Timer : public std::enable_shared_from_this { 13 | friend class TimerManager; 14 | 15 | public: 16 | typedef std::shared_ptr ptr; 17 | 18 | bool cancel(); 19 | bool refresh(); 20 | bool reset(uint64_t ms, bool from_now); 21 | 22 | private: 23 | Timer(uint64_t ms, std::function cb, bool recuring, TimerManager *manager); 24 | Timer(uint64_t next); 25 | 26 | // 是否是循环定时器 27 | bool recurring_ = false; 28 | // 执行周期 29 | uint64_t ms_ = 0; 30 | // 精确的执行时间 31 | uint64_t next_ = 0; 32 | // 回调函数 33 | std::function cb_; 34 | // 管理器 35 | TimerManager *manager_ = nullptr; 36 | 37 | private: 38 | struct Comparator { 39 | bool operator()(const Timer::ptr &lhs, const Timer::ptr &rhs) const; 40 | }; 41 | }; 42 | 43 | class TimerManager { 44 | friend class Timer; 45 | 46 | public: 47 | TimerManager(); 48 | virtual ~TimerManager(); 49 | Timer::ptr addTimer(uint64_t ms, std::function cb, bool recuring = false); 50 | Timer::ptr addConditionTimer(uint64_t ms, std::function cb, std::weak_ptr weak_cond, 51 | bool recurring = false); 52 | // 到最近一个定时器的时间间隔(ms) 53 | uint64_t getNextTimer(); 54 | // 获取需要执行的定时器的回调函数列表 55 | void listExpiredCb(std::vector> &cbs); 56 | // 是否有定时器 57 | bool hasTimer(); 58 | 59 | protected: 60 | // 当有新的定时器插入到定时器首部,执行该函数 61 | virtual void OnTimerInsertedAtFront() = 0; 62 | // 将定时器添加到管理器 63 | void addTimer(Timer::ptr val, RWMutex::WriteLock &lock); 64 | 65 | private: 66 | // 检测服务器时间是否被调后了 67 | bool detectClockRolllover(uint64_t now_ms); 68 | 69 | RWMutex mutex_; 70 | // 定时器集合 71 | std::set timers_; 72 | // 是否触发OnTimerInsertedAtFront 73 | bool tickled_ = false; 74 | // 上次执行时间 75 | uint64_t previouseTime_ = 0; 76 | }; 77 | } // namespace monsoon 78 | 79 | #endif -------------------------------------------------------------------------------- /example/fiberExample/test_hook.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include "monsoon.h" 7 | 8 | const std::string LOG_HEAD = "[TASK] "; 9 | 10 | void test_sleep() { 11 | std::cout << LOG_HEAD << "tid = " << monsoon::GetThreadId() << ",test_fiber_sleep begin" << std::endl; 12 | monsoon::IOManager iom(1, true); 13 | 14 | iom.scheduler([] { 15 | while (1) { 16 | sleep(6); 17 | std::cout << "task 1 sleep for 6s" << std::endl; 18 | } 19 | }); 20 | 21 | iom.scheduler([] { 22 | while (1) { 23 | sleep(2); 24 | std::cout << "task2 sleep for 2s" << std::endl; 25 | } 26 | }); 27 | 28 | std::cout << LOG_HEAD << "tid = " << monsoon::GetThreadId() << ",test_fiber_sleep finish" << std::endl; 29 | } 30 | 31 | void test_sock() { 32 | int sock = socket(AF_INET, SOCK_STREAM, 0); 33 | 34 | sockaddr_in addr; 35 | memset(&addr, 0, sizeof(addr)); 36 | addr.sin_family = AF_INET; 37 | addr.sin_port = htons(80); 38 | inet_pton(AF_INET, "36.152.44.96", &addr.sin_addr.s_addr); 39 | 40 | std::cout << "begin connect" << std::endl; 41 | int rt = connect(sock, (const sockaddr *)&addr, sizeof(addr)); 42 | std::cout << "connect rt=" << rt << " errno=" << errno << std::endl; 43 | 44 | if (rt) { 45 | return; 46 | } 47 | 48 | const char data[] = "GET / HTTP/1.0\r\n\r\n"; 49 | rt = send(sock, data, sizeof(data), 0); 50 | std::cout << "send rt=" << rt << " errno=" << errno << std::endl; 51 | 52 | if (rt <= 0) { 53 | return; 54 | } 55 | 56 | std::string buff; 57 | buff.resize(4096); 58 | 59 | rt = recv(sock, &buff[0], buff.size(), 0); 60 | std::cout << "recv rt=" << rt << " errno=" << errno << std::endl; 61 | 62 | if (rt <= 0) { 63 | return; 64 | } 65 | 66 | buff.resize(rt); 67 | std::cout << "--------------------------------" << std::endl; 68 | std::cout << buff << std::endl; 69 | std::cout << "--------------------------------" << std::endl; 70 | } 71 | 72 | int main() { 73 | // monsoon::IOManager iom; 74 | // iom.scheduler(test_sock); 75 | 76 | test_sleep(); 77 | } -------------------------------------------------------------------------------- /example/raftCoreExample/raftKvDB.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by swx on 23-12-28. 3 | // 4 | #include 5 | #include "raft.h" 6 | // #include "kvServer.h" 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | void ShowArgsHelp(); 13 | 14 | int main(int argc, char **argv) { 15 | //////////////////////////////////读取命令参数:节点数量、写入raft节点节点信息到哪个文件 16 | if (argc < 2) { 17 | ShowArgsHelp(); 18 | exit(EXIT_FAILURE); 19 | } 20 | int c = 0; 21 | int nodeNum = 0; 22 | std::string configFileName; 23 | std::random_device rd; 24 | std::mt19937 gen(rd()); 25 | std::uniform_int_distribution<> dis(10000, 29999); 26 | unsigned short startPort = dis(gen); 27 | while ((c = getopt(argc, argv, "n:f:")) != -1) { 28 | switch (c) { 29 | case 'n': 30 | nodeNum = atoi(optarg); 31 | break; 32 | case 'f': 33 | configFileName = optarg; 34 | break; 35 | default: 36 | ShowArgsHelp(); 37 | exit(EXIT_FAILURE); 38 | } 39 | } 40 | std::ofstream file(configFileName, std::ios::out | std::ios::app); 41 | file.close(); 42 | file = std::ofstream(configFileName, std::ios::out | std::ios::trunc); 43 | if (file.is_open()) { 44 | file.close(); 45 | std::cout << configFileName << " 已清空" << std::endl; 46 | } else { 47 | std::cout << "无法打开 " << configFileName << std::endl; 48 | exit(EXIT_FAILURE); 49 | } 50 | for (int i = 0; i < nodeNum; i++) { 51 | short port = startPort + static_cast(i); 52 | std::cout << "start to create raftkv node:" << i << " port:" << port << " pid:" << getpid() << std::endl; 53 | pid_t pid = fork(); // 创建新进程 54 | if (pid == 0) { 55 | // 如果是子进程 56 | // 子进程的代码 57 | 58 | auto kvServer = new KvServer(i, 500, configFileName, port); 59 | pause(); // 子进程进入等待状态,不会执行 return 语句 60 | } else if (pid > 0) { 61 | // 如果是父进程 62 | // 父进程的代码 63 | sleep(1); 64 | } else { 65 | // 如果创建进程失败 66 | std::cerr << "Failed to create child process." << std::endl; 67 | exit(EXIT_FAILURE); 68 | } 69 | } 70 | pause(); 71 | return 0; 72 | } 73 | 74 | void ShowArgsHelp() { std::cout << "format: command -n -f " << std::endl; } 75 | -------------------------------------------------------------------------------- /src/raftRpcPro/raftRPC.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | 4 | package raftRpcProctoc; //所在的命名空间 5 | 6 | option cc_generic_services = true; //开启stub服务 7 | 8 | // 日志实体 9 | message LogEntry{ 10 | bytes Command =1; 11 | int32 LogTerm =2; 12 | int32 LogIndex = 3; 13 | } 14 | // AppendEntriesArgs 由leader复制log条目,也可以当做是心跳连接,注释中的rf为leader节点 15 | message AppendEntriesArgs { 16 | // 下面几个参数和论文中相同 17 | int32 Term =1; 18 | int32 LeaderId =2; 19 | int32 PrevLogIndex =3; 20 | int32 PrevLogTerm =4; 21 | repeated LogEntry Entries = 5; 22 | int32 LeaderCommit = 6; 23 | } 24 | 25 | // AppendEntriesReply 论文中没有提及返回要设置哪些状态 26 | message AppendEntriesReply { 27 | int32 Term =1; // leader的term可能是与Follower不同的, 28 | bool Success =2; 29 | int32 UpdateNextIndex = 3; //快速调整leader对应的nextIndex 30 | int32 AppState =4; // 用来标识节点(网络)状态 31 | } 32 | 33 | message RequestVoteArgs { 34 | int32 Term =1; 35 | int32 CandidateId =2; 36 | int32 LastLogIndex =3; 37 | int32 LastLogTerm =4; 38 | } 39 | 40 | // RequestVoteReply 41 | // example RequestVote RPC reply structure. 42 | // field names must start with capital letters! 43 | message RequestVoteReply { 44 | // Your data here (2A). 45 | int32 Term =1; 46 | bool VoteGranted =2; 47 | int32 VoteState =3; 48 | } 49 | 50 | 51 | message InstallSnapshotRequest { 52 | int32 LeaderId =1; 53 | int32 Term =2; 54 | int32 LastSnapShotIncludeIndex =3; 55 | int32 LastSnapShotIncludeTerm =4; 56 | bytes Data =5;//快照信息,当然是用bytes来传递 57 | } 58 | 59 | // InstallSnapshotResponse 只用返回Term,因为对于快照只要Term是符合的就是无条件接受的 60 | message InstallSnapshotResponse { 61 | int32 Term = 1; 62 | } 63 | //只有raft节点之间才会涉及rpc通信 64 | service raftRpc 65 | { 66 | rpc AppendEntries(AppendEntriesArgs) returns(AppendEntriesReply); 67 | rpc InstallSnapshot (InstallSnapshotRequest) returns (InstallSnapshotResponse); 68 | rpc RequestVote (RequestVoteArgs) returns (RequestVoteReply); 69 | } 70 | // message ResultCode 71 | // { 72 | // int32 errcode = 1; 73 | // bytes errmsg = 2; 74 | // } 75 | 76 | // message GetFriendsListRequest //请求,响应 77 | // { 78 | // uint32 userid = 1; 79 | // } 80 | 81 | // message GetFriendsListResponse //请求,响应 82 | // { 83 | // ResultCode result = 1; 84 | // repeated bytes friends = 2; 85 | // } 86 | 87 | // // 好友模块 88 | // service FiendServiceRpc //具体的服务模块和服务方法 89 | // { 90 | // rpc GetFriendsList(GetFriendsListRequest) returns(GetFriendsListResponse); 91 | // } 92 | -------------------------------------------------------------------------------- /test/测试文件运行说明.md: -------------------------------------------------------------------------------- 1 | 2 | ## defer_run.cpp 3 | 编译命令:g++ defer_run.cpp 4 | 5 | 注意:如果编译失败提示如下: 6 | ``` 7 | ✗ g++ defer_run.cpp -o test_defer 8 | In file included from defer_run.cpp:6: 9 | defer_run.cpp: In function ‘int main()’: 10 | include/defer.h:27:55: error: missing template arguments before ‘defer_only_places22’ 11 | 27 | #define _MAKE_DEFER_(line) ExecuteOnScopeExit _CONCAT(defer_only_places, line) = [&]() 12 | | ^~~~~~~~~~~~~~~~~ 13 | include/defer.h:26:23: note: in definition of macro ‘_CONCAT’ 14 | 26 | #define _CONCAT(a, b) a##b 15 | | ^ 16 | include/defer.h:30:15: note: in expansion of macro ‘_MAKE_DEFER_’ 17 | 30 | #define DEFER _MAKE_DEFER_(__LINE__) 18 | | ^~~~~~~~~~~~ 19 | defer_run.cpp:22:5: note: in expansion of macro ‘DEFER’ 20 | 22 | DEFER { 21 | | ^~~~~ 22 | ``` 23 | 尝试指定编译的c++标准到17及其以上,比如20。或者升级g++版本,一个可行的版本是 24 | ``` 25 | ✗ g++ -v 26 | Using built-in specs. 27 | COLLECT_GCC=g++ 28 | COLLECT_LTO_WRAPPER=/usr/lib/gcc/x86_64-linux-gnu/11/lto-wrapper 29 | OFFLOAD_TARGET_NAMES=nvptx-none:amdgcn-amdhsa 30 | OFFLOAD_TARGET_DEFAULT=1 31 | Target: x86_64-linux-gnu 32 | Configured with: ../src/configure -v --with-pkgversion='Ubuntu 11.4.0-1ubuntu1~22.04' --with-bugurl=file:///usr/share/doc/gcc-11/README.Bugs --enable-languages=c,ada,c++,go,brig,d,fortran,objc,obj-c++,m2 --prefix=/usr --with-gcc-major-version-only --program-suffix=-11 --program-prefix=x86_64-linux-gnu- --enable-shared --enable-linker-build-id --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --libdir=/usr/lib --enable-nls --enable-bootstrap --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-plugin --enable-default-pie --with-system-zlib --enable-libphobos-checking=release --with-target-system-zlib=auto --enable-objc-gc=auto --enable-multiarch --disable-werror --enable-cet --with-arch-32=i686 --with-abi=m64 --with-multilib-list=m32,m64,mx32 --enable-multilib --with-tune=generic --enable-offload-targets=nvptx-none=/build/gcc-11-XeT9lY/gcc-11-11.4.0/debian/tmp-nvptx/usr,amdgcn-amdhsa=/build/gcc-11-XeT9lY/gcc-11-11.4.0/debian/tmp-gcn/usr --without-cuda-driver --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu --with-build-config=bootstrap-lto-lean --enable-link-serialization=2 33 | Thread model: posix 34 | Supported LTO compression algorithms: zlib zstd 35 | gcc version 11.4.0 (Ubuntu 11.4.0-1ubuntu1~22.04) 36 | ``` -------------------------------------------------------------------------------- /src/fiber/include/utils.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __MONSOON_UTIL_H__ 2 | #define __MONSOON_UTIL_H__ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | namespace monsoon { 19 | pid_t GetThreadId(); 20 | u_int32_t GetFiberId(); 21 | // 获取当前启动的毫秒数 22 | // 系统从启动到当前时刻的毫秒数 23 | static uint64_t GetElapsedMS() { 24 | struct timespec ts = {0}; 25 | clock_gettime(CLOCK_MONOTONIC_RAW, &ts); 26 | return ts.tv_sec * 1000 + ts.tv_nsec / 1000000; 27 | } 28 | 29 | // 将原始函数名解析为可读函数名 30 | static std::string demangle(const char *str) { 31 | size_t size = 0; 32 | int status = 0; 33 | std::string rt; 34 | rt.resize(256); 35 | if (1 == sscanf(str, "%*[^(]%*[^_]%255[^)+]", &rt[0])) { 36 | // 解析函数 37 | char *v = abi::__cxa_demangle(&rt[0], nullptr, &size, &status); 38 | if (v) { 39 | std::string result(v); 40 | free(v); 41 | return result; 42 | } 43 | } 44 | // 解析失败,返回原始函数名 45 | if (1 == sscanf(str, "%255s", &rt[0])) { 46 | return rt; 47 | } 48 | return str; 49 | } 50 | // 获取当前线程的调用栈信息 51 | static void Backtrace(std::vector &bt, int size, int skip) { 52 | // 分配用于存储调用栈信息的数组 53 | void **array = (void **)malloc((sizeof(void *) * size)); 54 | size_t s = ::backtrace(array, size); 55 | 56 | // 获取调用栈信息 57 | char **strings = backtrace_symbols(array, s); 58 | if (strings == NULL) { 59 | std::cout << "backtrace_synbols error" << std::endl; 60 | return; 61 | } 62 | // 解析每一个调用栈的信息,并将解析后的函数名添加到bt中 63 | for (size_t i = skip; i < s; ++i) { 64 | bt.push_back(demangle(strings[i])); 65 | } 66 | 67 | free(strings); 68 | free(array); 69 | } 70 | 71 | static std::string BacktraceToString(int size, int skip, const std::string &prefix) { 72 | std::vector bt; 73 | Backtrace(bt, size, skip); 74 | std::stringstream ss; 75 | for (size_t i = 0; i < bt.size(); ++i) { 76 | ss << prefix << bt[i] << std::endl; 77 | } 78 | return ss.str(); 79 | } 80 | 81 | // 断言处理 82 | static void CondPanic(bool condition, std::string err) { 83 | if (!condition) { 84 | std::cout << "[assert by] (" << __FILE__ << ":" << __LINE__ << "),err: " << err << std::endl; 85 | std::cout << "[backtrace]\n" << BacktraceToString(6, 3, "") << std::endl; 86 | assert(condition); 87 | } 88 | } 89 | } // namespace monsoon 90 | 91 | #endif -------------------------------------------------------------------------------- /src/fiber/fd_manager.cpp: -------------------------------------------------------------------------------- 1 | #include "fd_manager.hpp" 2 | #include 3 | #include 4 | #include 5 | #include "hook.hpp" 6 | 7 | namespace monsoon { 8 | 9 | FdCtx::FdCtx(int fd) 10 | : m_isInit(false), 11 | m_isSocket(false), 12 | m_sysNonblock(false), 13 | m_userNonblock(false), 14 | m_isClosed(false), 15 | m_fd(fd), 16 | m_recvTimeout(-1), 17 | m_sendTimeout(-1) { 18 | init(); 19 | } 20 | 21 | FdCtx::~FdCtx() {} 22 | 23 | bool FdCtx::init() { 24 | if (m_isInit) { 25 | return true; 26 | } 27 | m_recvTimeout = -1; 28 | m_sendTimeout = -1; 29 | 30 | // 获取文件状态信息 31 | struct stat fd_stat; 32 | if (-1 == fstat(m_fd, &fd_stat)) { 33 | m_isInit = false; 34 | m_isSocket = false; 35 | } else { 36 | m_isInit = true; 37 | // 判断是否是socket 38 | m_isSocket = S_ISSOCK(fd_stat.st_mode); 39 | } 40 | 41 | // 对socket设置非阻塞 42 | if (m_isSocket) { 43 | int flags = fcntl_f(m_fd, F_GETFL, 0); 44 | if (!(flags & O_NONBLOCK)) { 45 | fcntl_f(m_fd, F_SETFL, flags | O_NONBLOCK); 46 | } 47 | m_sysNonblock = true; 48 | } else { 49 | m_sysNonblock = false; 50 | } 51 | 52 | m_userNonblock = false; 53 | m_isClosed = false; 54 | return m_isInit; 55 | } 56 | 57 | void FdCtx::setTimeout(int type, uint64_t v) { 58 | if (type == SO_RCVTIMEO) { 59 | m_recvTimeout = v; 60 | } else { 61 | m_sendTimeout = v; 62 | } 63 | } 64 | 65 | uint64_t FdCtx::getTimeout(int type) { 66 | if (type == SO_RCVTIMEO) { 67 | return m_recvTimeout; 68 | } else { 69 | return m_sendTimeout; 70 | } 71 | } 72 | 73 | FdManager::FdManager() { m_datas.resize(64); } 74 | 75 | FdCtx::ptr FdManager::get(int fd, bool auto_create) { 76 | if (fd == -1) { 77 | return nullptr; 78 | } 79 | RWMutexType::ReadLock lock(m_mutex); 80 | if ((int)m_datas.size() <= fd) { 81 | if (auto_create == false) { 82 | return nullptr; 83 | } 84 | } else { 85 | if (m_datas[fd] || !auto_create) { 86 | return m_datas[fd]; 87 | } 88 | } 89 | lock.unlock(); 90 | 91 | RWMutexType::WriteLock lock2(m_mutex); 92 | FdCtx::ptr ctx(new FdCtx(fd)); 93 | if (fd >= (int)m_datas.size()) { 94 | m_datas.resize(fd * 1.5); 95 | } 96 | m_datas[fd] = ctx; 97 | return ctx; 98 | } 99 | 100 | void FdManager::del(int fd) { 101 | RWMutexType::WriteLock lock(m_mutex); 102 | if ((int)m_datas.size() <= fd) { 103 | return; 104 | } 105 | m_datas[fd].reset(); 106 | } 107 | } // namespace monsoon -------------------------------------------------------------------------------- /example/fiberExample/server.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include "monsoon.h" 10 | 11 | static int listen_sock = -1; 12 | 13 | void test_accept(); 14 | 15 | // task 16 | void watch_io_read() { monsoon::IOManager::GetThis()->addEvent(listen_sock, monsoon::READ, test_accept); } 17 | 18 | void test_accept() { 19 | struct sockaddr_in addr; 20 | memset(&addr, 0, sizeof(addr)); 21 | socklen_t len = sizeof(addr); 22 | int fd = accept(listen_sock, (struct sockaddr *)&addr, &len); 23 | if (fd < 0) { 24 | std::cout << "fd = " << fd << ",accept error" << std::endl; 25 | } else { 26 | std::cout << "fd = " << fd << ",accept success" << std::endl; 27 | fcntl(fd, F_SETFL, O_NONBLOCK); 28 | monsoon::IOManager::GetThis()->addEvent(fd, monsoon::READ, [fd]() { 29 | char buffer[1024]; 30 | memset(buffer, 0, sizeof(buffer)); 31 | while (true) { 32 | int ret = recv(fd, buffer, sizeof(buffer), 0); 33 | if (ret > 0) { 34 | std::cout << "client say: " << buffer << std::endl; 35 | ret = send(fd, buffer, ret, 0); 36 | } 37 | if (ret <= 0) { 38 | if (errno == EAGAIN) continue; 39 | close(fd); 40 | break; 41 | } 42 | } 43 | }); 44 | } 45 | monsoon::IOManager::GetThis()->scheduler(watch_io_read); 46 | } 47 | 48 | void test_iomanager() { 49 | int port = 8080; 50 | struct sockaddr_in svr_addr; 51 | // socklen_t cli_len = sizeof(cli_addr); 52 | listen_sock = socket(AF_INET, SOCK_STREAM, 0); 53 | if (listen_sock < 0) { 54 | std::cout << "creating listen socket error" << std::endl; 55 | return; 56 | } 57 | 58 | int opt = 1; 59 | setsockopt(listen_sock, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)); 60 | 61 | memset((char *)&svr_addr, 0, sizeof(svr_addr)); 62 | svr_addr.sin_family = AF_INET; 63 | svr_addr.sin_port = htons(port); 64 | svr_addr.sin_addr.s_addr = INADDR_ANY; 65 | 66 | if (bind(listen_sock, (struct sockaddr *)&svr_addr, sizeof(svr_addr)) < 0) { 67 | std::cout << "bind error" << std::endl; 68 | return; 69 | } 70 | 71 | if (listen(listen_sock, 1024) < 0) { 72 | std::cout << "listen error" << std::endl; 73 | return; 74 | } else { 75 | std::cout << "listen success on port: " << port << std::endl; 76 | } 77 | 78 | fcntl(listen_sock, F_SETFL, O_NONBLOCK); 79 | 80 | monsoon::IOManager iomanager; 81 | iomanager.addEvent(listen_sock, monsoon::READ, test_accept); 82 | } 83 | 84 | int main(int argc, char *argv[]) { 85 | test_iomanager(); 86 | return 0; 87 | } 88 | -------------------------------------------------------------------------------- /example/fiberExample/test_scheduler.cpp: -------------------------------------------------------------------------------- 1 | #include "monsoon.h" 2 | 3 | const std::string LOG_HEAD = "[TASK] "; 4 | 5 | void test_fiber_1() { 6 | std::cout << LOG_HEAD << "tid = " << monsoon::GetThreadId() << ",test_fiber_1 begin" << std::endl; 7 | 8 | std::cout << LOG_HEAD << "tid = " << monsoon::GetThreadId() << ",test_fiber_1 finish" << std::endl; 9 | } 10 | 11 | void test_fiber_2() { 12 | std::cout << LOG_HEAD << "tid = " << monsoon::GetThreadId() << ",test_fiber_2 begin" << std::endl; 13 | // no hook 直接将当前协程阻塞,等效于将当前线程阻塞 14 | sleep(3); 15 | std::cout << LOG_HEAD << "tid = " << monsoon::GetThreadId() << ",test_fiber_2 finish" << std::endl; 16 | } 17 | 18 | void test_fiber_3() { 19 | std::cout << LOG_HEAD << "tid = " << monsoon::GetThreadId() << ",test_fiber_3 begin" << std::endl; 20 | 21 | std::cout << LOG_HEAD << "tid = " << monsoon::GetThreadId() << ",test_fiber_3 finish" << std::endl; 22 | } 23 | 24 | void test_fiber_4() { 25 | std::cout << LOG_HEAD << "tid = " << monsoon::GetThreadId() << ",test_fiber_4 begin" << std::endl; 26 | 27 | std::cout << LOG_HEAD << "tid = " << monsoon::GetThreadId() << ",test_fiber_4 finish" << std::endl; 28 | } 29 | 30 | // user_caller = true,使用main函数进行协程的调度,且单线程 31 | void test_user_caller_1() { 32 | std::cout << "main begin" << std::endl; 33 | 34 | // 默认条件下 threads = 1,即只是用main函数进行协程的调度,等价于 35 | // 先攒下一些协程,然后切换到调度器的run方法执行这些协程,然后再返回 36 | // main函数 37 | monsoon::Scheduler sc; 38 | 39 | sc.scheduler(test_fiber_1); 40 | sc.scheduler(test_fiber_2); 41 | 42 | monsoon::Fiber::ptr fiber(new monsoon::Fiber(&test_fiber_3)); 43 | sc.scheduler(fiber); 44 | 45 | // 创建调度线程,开始任务调度,如果只 46 | // 使用main函数线程进行调度,那start相当于什么也没做 47 | sc.start(); 48 | 49 | // sc.scheduler(test_fiber_4); 50 | /* 51 | * 停止调度,如果未使用当前线程进行调度,那么只需要简单地等所有调度线程退出即可 52 | * 如果使用了当前线程进行调度,那么要先执行当前线程的协程调度函数,等其执行完后再返回caller协程继续往下执行 53 | */ 54 | sc.stop(); 55 | 56 | std::cout << "main end" << std::endl; 57 | } 58 | 59 | // user_caller = true,使用main函数进行协程的调度,且多线程 60 | void test_user_caller_2() { 61 | std::cout << "main begin" << std::endl; 62 | 63 | // 默认条件下 threads = 1,即只是用main函数进行协程的调度,等价于 64 | // 先攒下一些协程,然后切换到调度器的run方法执行这些协程,然后再返回 65 | // main函数 66 | monsoon::Scheduler sc(3, true); 67 | 68 | sc.scheduler(test_fiber_1); 69 | sc.scheduler(test_fiber_2); 70 | 71 | monsoon::Fiber::ptr fiber(new monsoon::Fiber(&test_fiber_3)); 72 | sc.scheduler(fiber); 73 | 74 | // 创建调度线程,开始任务调度,如果只 75 | // 使用main函数线程进行调度,那start相当于什么也没做 76 | sc.start(); 77 | 78 | sc.scheduler(test_fiber_4); 79 | 80 | sleep(5); 81 | /* 82 | * 停止调度,如果未使用当前线程进行调度,那么只需要简单地等所有调度线程退出即可 83 | * 如果使用了当前线程进行调度,那么要先执行当前线程的协程调度函数,等其执行完后再返回caller协程继续往下执行 84 | */ 85 | sc.stop(); 86 | 87 | std::cout << "main end" << std::endl; 88 | } 89 | 90 | int main() { test_user_caller_2(); } -------------------------------------------------------------------------------- /example/fiberExample/test_iomanager.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include "monsoon.h" 7 | 8 | int sockfd; 9 | void watch_io_read(); 10 | 11 | // 写事件回调,只执行一次,用于判断非阻塞套接字connect成功 12 | void do_io_write() { 13 | std::cout << "write callback" << std::endl; 14 | int so_err; 15 | socklen_t len = size_t(so_err); 16 | getsockopt(sockfd, SOL_SOCKET, SO_ERROR, &so_err, &len); 17 | if (so_err) { 18 | std::cout << "connect fail" << std::endl; 19 | return; 20 | } 21 | std::cout << "connect success" << std::endl; 22 | } 23 | 24 | // 读事件回调,每次读取之后如果套接字未关闭,需要重新添加 25 | void do_io_read() { 26 | std::cout << "read callback" << std::endl; 27 | char buf[1024] = {0}; 28 | int readlen = 0; 29 | readlen = read(sockfd, buf, sizeof(buf)); 30 | if (readlen > 0) { 31 | buf[readlen] = '\0'; 32 | std::cout << "read " << readlen << " bytes, read: " << buf << std::endl; 33 | } else if (readlen == 0) { 34 | std::cout << "peer closed"; 35 | close(sockfd); 36 | return; 37 | } else { 38 | std::cout << "err, errno=" << errno << ", errstr=" << strerror(errno) << std::endl; 39 | close(sockfd); 40 | return; 41 | } 42 | // read之后重新添加读事件回调,这里不能直接调用addEvent,因为在当前位置fd的读事件上下文还是有效的,直接调用addEvent相当于重复添加相同事件 43 | monsoon::IOManager::GetThis()->scheduler(watch_io_read); 44 | } 45 | 46 | void watch_io_read() { 47 | std::cout << "watch_io_read" << std::endl; 48 | monsoon::IOManager::GetThis()->addEvent(sockfd, monsoon::READ, do_io_read); 49 | } 50 | 51 | void test_io() { 52 | sockfd = socket(AF_INET, SOCK_STREAM, 0); 53 | monsoon::CondPanic(sockfd > 0, "scoket should >0"); 54 | fcntl(sockfd, F_SETFL, O_NONBLOCK); 55 | 56 | sockaddr_in servaddr; 57 | memset(&servaddr, 0, sizeof(servaddr)); 58 | servaddr.sin_family = AF_INET; 59 | servaddr.sin_port = htons(80); 60 | inet_pton(AF_INET, "36.152.44.96", &servaddr.sin_addr.s_addr); 61 | 62 | int rt = connect(sockfd, (const sockaddr *)&servaddr, sizeof(servaddr)); 63 | if (rt != 0) { 64 | if (errno == EINPROGRESS) { 65 | std::cout << "EINPROGRESS" << std::endl; 66 | // 注册写事件回调,只用于判断connect是否成功 67 | // 非阻塞的TCP套接字connect一般无法立即建立连接,要通过套接字可写来判断connect是否已经成功 68 | monsoon::IOManager::GetThis()->addEvent(sockfd, monsoon::WRITE, do_io_write); 69 | // 注册读事件回调,注意事件是一次性的 70 | monsoon::IOManager::GetThis()->addEvent(sockfd, monsoon::READ, do_io_read); 71 | } else { 72 | std::cout << "connect error, errno:" << errno << ", errstr:" << strerror(errno) << std::endl; 73 | } 74 | } else { 75 | std::cout << "else, errno:" << errno << ", errstr:" << strerror(errno) << std::endl; 76 | } 77 | } 78 | 79 | void test_iomanager() { 80 | monsoon::IOManager iom; 81 | // monsoon::IOManager iom(10); // 演示多线程下IO协程在不同线程之间切换 82 | iom.scheduler(test_io); 83 | } 84 | 85 | int main(int argc, char *argv[]) { 86 | test_iomanager(); 87 | 88 | return 0; 89 | } -------------------------------------------------------------------------------- /src/fiber/include/hook.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __MONSOON_HOOK_H__ 2 | #define __MONSOON_HOOK_H__ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include "hook.hpp" 12 | 13 | namespace monsoon { 14 | // 当前线程是否hook 15 | bool is_hook_enable(); 16 | // 设置当前线程hook 17 | void set_hook_enable(bool flag); 18 | } // namespace monsoon 19 | 20 | extern "C" { 21 | // sleep 22 | typedef unsigned int (*sleep_fun)(unsigned int seconds); 23 | extern sleep_fun sleep_f; 24 | 25 | typedef int (*usleep_fun)(useconds_t usec); 26 | extern usleep_fun usleep_f; 27 | 28 | typedef int (*nanosleep_fun)(const struct timespec *req, struct timespec *rem); 29 | extern nanosleep_fun nanosleep_f; 30 | 31 | // socket 32 | typedef int (*socket_fun)(int domain, int type, int protocol); 33 | extern socket_fun socket_f; 34 | 35 | typedef int (*connect_fun)(int sockfd, const struct sockaddr *addr, socklen_t addrlen); 36 | extern connect_fun connect_f; 37 | 38 | typedef int (*accept_fun)(int s, struct sockaddr *addr, socklen_t *addrlen); 39 | extern accept_fun accept_f; 40 | 41 | // read 42 | typedef ssize_t (*read_fun)(int fd, void *buf, size_t count); 43 | extern read_fun read_f; 44 | 45 | typedef ssize_t (*readv_fun)(int fd, const struct iovec *iov, int iovcnt); 46 | extern readv_fun readv_f; 47 | 48 | typedef ssize_t (*recv_fun)(int sockfd, void *buf, size_t len, int flags); 49 | extern recv_fun recv_f; 50 | 51 | typedef ssize_t (*recvfrom_fun)(int sockfd, void *buf, size_t len, int flags, struct sockaddr *src_addr, 52 | socklen_t *addrlen); 53 | extern recvfrom_fun recvfrom_f; 54 | 55 | typedef ssize_t (*recvmsg_fun)(int sockfd, struct msghdr *msg, int flags); 56 | extern recvmsg_fun recvmsg_f; 57 | 58 | // write 59 | typedef ssize_t (*write_fun)(int fd, const void *buf, size_t count); 60 | extern write_fun write_f; 61 | 62 | typedef ssize_t (*writev_fun)(int fd, const struct iovec *iov, int iovcnt); 63 | extern writev_fun writev_f; 64 | 65 | typedef ssize_t (*send_fun)(int s, const void *msg, size_t len, int flags); 66 | extern send_fun send_f; 67 | 68 | typedef ssize_t (*sendto_fun)(int s, const void *msg, size_t len, int flags, const struct sockaddr *to, 69 | socklen_t tolen); 70 | extern sendto_fun sendto_f; 71 | 72 | typedef ssize_t (*sendmsg_fun)(int s, const struct msghdr *msg, int flags); 73 | extern sendmsg_fun sendmsg_f; 74 | 75 | typedef int (*close_fun)(int fd); 76 | extern close_fun close_f; 77 | 78 | // 79 | typedef int (*fcntl_fun)(int fd, int cmd, ... /* arg */); 80 | extern fcntl_fun fcntl_f; 81 | 82 | typedef int (*ioctl_fun)(int d, unsigned long int request, ...); 83 | extern ioctl_fun ioctl_f; 84 | 85 | typedef int (*getsockopt_fun)(int sockfd, int level, int optname, void *optval, socklen_t *optlen); 86 | extern getsockopt_fun getsockopt_f; 87 | 88 | typedef int (*setsockopt_fun)(int sockfd, int level, int optname, const void *optval, socklen_t optlen); 89 | extern setsockopt_fun setsockopt_f; 90 | 91 | extern int connect_with_timeout(int fd, const struct sockaddr *addr, socklen_t addrlen, uint64_t timeout_ms); 92 | } 93 | 94 | #endif -------------------------------------------------------------------------------- /src/raftClerk/clerk.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by swx on 23-6-4. 3 | // 4 | #include "clerk.h" 5 | 6 | #include "raftServerRpcUtil.h" 7 | 8 | #include "util.h" 9 | 10 | #include 11 | #include 12 | std::string Clerk::Get(std::string key) { 13 | m_requestId++; 14 | auto requestId = m_requestId; 15 | int server = m_recentLeaderId; 16 | raftKVRpcProctoc::GetArgs args; 17 | args.set_key(key); 18 | args.set_clientid(m_clientId); 19 | args.set_requestid(requestId); 20 | 21 | while (true) { 22 | raftKVRpcProctoc::GetReply reply; 23 | bool ok = m_servers[server]->Get(&args, &reply); 24 | if (!ok || 25 | reply.err() == 26 | ErrWrongLeader) { //会一直重试,因为requestId没有改变,因此可能会因为RPC的丢失或者其他情况导致重试,kvserver层来保证不重复执行(线性一致性) 27 | server = (server + 1) % m_servers.size(); 28 | continue; 29 | } 30 | if (reply.err() == ErrNoKey) { 31 | return ""; 32 | } 33 | if (reply.err() == OK) { 34 | m_recentLeaderId = server; 35 | return reply.value(); 36 | } 37 | } 38 | return ""; 39 | } 40 | 41 | void Clerk::PutAppend(std::string key, std::string value, std::string op) { 42 | // You will have to modify this function. 43 | m_requestId++; 44 | auto requestId = m_requestId; 45 | auto server = m_recentLeaderId; 46 | while (true) { 47 | raftKVRpcProctoc::PutAppendArgs args; 48 | args.set_key(key); 49 | args.set_value(value); 50 | args.set_op(op); 51 | args.set_clientid(m_clientId); 52 | args.set_requestid(requestId); 53 | raftKVRpcProctoc::PutAppendReply reply; 54 | bool ok = m_servers[server]->PutAppend(&args, &reply); 55 | if (!ok || reply.err() == ErrWrongLeader) { 56 | DPrintf("【Clerk::PutAppend】原以为的leader:{%d}请求失败,向新leader{%d}重试 ,操作:{%s}", server, server + 1, 57 | op.c_str()); 58 | if (!ok) { 59 | DPrintf("重试原因 ,rpc失敗 ,"); 60 | } 61 | if (reply.err() == ErrWrongLeader) { 62 | DPrintf("重試原因:非leader"); 63 | } 64 | server = (server + 1) % m_servers.size(); // try the next server 65 | continue; 66 | } 67 | if (reply.err() == OK) { //什么时候reply errno为ok呢??? 68 | m_recentLeaderId = server; 69 | return; 70 | } 71 | } 72 | } 73 | 74 | void Clerk::Put(std::string key, std::string value) { PutAppend(key, value, "Put"); } 75 | 76 | void Clerk::Append(std::string key, std::string value) { PutAppend(key, value, "Append"); } 77 | //初始化客户端 78 | void Clerk::Init(std::string configFileName) { 79 | //获取所有raft节点ip、port ,并进行连接 80 | MprpcConfig config; 81 | config.LoadConfigFile(configFileName.c_str()); 82 | std::vector> ipPortVt; 83 | for (int i = 0; i < INT_MAX - 1; ++i) { 84 | std::string node = "node" + std::to_string(i); 85 | 86 | std::string nodeIp = config.Load(node + "ip"); 87 | std::string nodePortStr = config.Load(node + "port"); 88 | if (nodeIp.empty()) { 89 | break; 90 | } 91 | ipPortVt.emplace_back(nodeIp, atoi(nodePortStr.c_str())); //沒有atos方法,可以考慮自己实现 92 | } 93 | //进行连接 94 | for (const auto& item : ipPortVt) { 95 | std::string ip = item.first; 96 | short port = item.second; 97 | // 2024-01-04 todo:bug fix 98 | auto* rpc = new raftServerRpcUtil(ip, port); 99 | m_servers.push_back(std::shared_ptr(rpc)); 100 | } 101 | } 102 | 103 | Clerk::Clerk() : m_clientId(Uuid()), m_requestId(0), m_recentLeaderId(0) {} 104 | -------------------------------------------------------------------------------- /src/raftCore/Persister.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by swx on 23-5-30. 3 | // 4 | #include "Persister.h" 5 | #include "util.h" 6 | 7 | // todo:会涉及反复打开文件的操作,没有考虑如果文件出现问题会怎么办?? 8 | void Persister::Save(const std::string raftstate, const std::string snapshot) { 9 | std::lock_guard lg(m_mtx); 10 | clearRaftStateAndSnapshot(); 11 | // 将raftstate和snapshot写入本地文件 12 | m_raftStateOutStream << raftstate; 13 | m_snapshotOutStream << snapshot; 14 | } 15 | 16 | std::string Persister::ReadSnapshot() { 17 | std::lock_guard lg(m_mtx); 18 | if (m_snapshotOutStream.is_open()) { 19 | m_snapshotOutStream.close(); 20 | } 21 | 22 | DEFER { 23 | m_snapshotOutStream.open(m_snapshotFileName); //默认是追加 24 | }; 25 | std::fstream ifs(m_snapshotFileName, std::ios_base::in); 26 | if (!ifs.good()) { 27 | return ""; 28 | } 29 | std::string snapshot; 30 | ifs >> snapshot; 31 | ifs.close(); 32 | return snapshot; 33 | } 34 | 35 | void Persister::SaveRaftState(const std::string &data) { 36 | std::lock_guard lg(m_mtx); 37 | // 将raftstate和snapshot写入本地文件 38 | clearRaftState(); 39 | m_raftStateOutStream << data; 40 | m_raftStateSize += data.size(); 41 | } 42 | 43 | long long Persister::RaftStateSize() { 44 | std::lock_guard lg(m_mtx); 45 | 46 | return m_raftStateSize; 47 | } 48 | 49 | std::string Persister::ReadRaftState() { 50 | std::lock_guard lg(m_mtx); 51 | 52 | std::fstream ifs(m_raftStateFileName, std::ios_base::in); 53 | if (!ifs.good()) { 54 | return ""; 55 | } 56 | std::string snapshot; 57 | ifs >> snapshot; 58 | ifs.close(); 59 | return snapshot; 60 | } 61 | 62 | Persister::Persister(const int me) 63 | : m_raftStateFileName("raftstatePersist" + std::to_string(me) + ".txt"), 64 | m_snapshotFileName("snapshotPersist" + std::to_string(me) + ".txt"), 65 | m_raftStateSize(0) { 66 | /** 67 | * 检查文件状态并清空文件 68 | */ 69 | bool fileOpenFlag = true; 70 | std::fstream file(m_raftStateFileName, std::ios::out | std::ios::trunc); 71 | if (file.is_open()) { 72 | file.close(); 73 | } else { 74 | fileOpenFlag = false; 75 | } 76 | file = std::fstream(m_snapshotFileName, std::ios::out | std::ios::trunc); 77 | if (file.is_open()) { 78 | file.close(); 79 | } else { 80 | fileOpenFlag = false; 81 | } 82 | if (!fileOpenFlag) { 83 | DPrintf("[func-Persister::Persister] file open error"); 84 | } 85 | /** 86 | * 绑定流 87 | */ 88 | m_raftStateOutStream.open(m_raftStateFileName); 89 | m_snapshotOutStream.open(m_snapshotFileName); 90 | } 91 | 92 | Persister::~Persister() { 93 | if (m_raftStateOutStream.is_open()) { 94 | m_raftStateOutStream.close(); 95 | } 96 | if (m_snapshotOutStream.is_open()) { 97 | m_snapshotOutStream.close(); 98 | } 99 | } 100 | 101 | void Persister::clearRaftState() { 102 | m_raftStateSize = 0; 103 | // 关闭文件流 104 | if (m_raftStateOutStream.is_open()) { 105 | m_raftStateOutStream.close(); 106 | } 107 | // 重新打开文件流并清空文件内容 108 | m_raftStateOutStream.open(m_raftStateFileName, std::ios::out | std::ios::trunc); 109 | } 110 | 111 | void Persister::clearSnapshot() { 112 | if (m_snapshotOutStream.is_open()) { 113 | m_snapshotOutStream.close(); 114 | } 115 | m_snapshotOutStream.open(m_snapshotFileName, std::ios::out | std::ios::trunc); 116 | } 117 | 118 | void Persister::clearRaftStateAndSnapshot() { 119 | clearRaftState(); 120 | clearSnapshot(); 121 | } 122 | -------------------------------------------------------------------------------- /src/fiber/include/mutex.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __MONSOON_MUTEX_H_ 2 | #define __MONSOON_MUTEX_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include "noncopyable.hpp" 16 | #include "utils.hpp" 17 | 18 | namespace monsoon { 19 | // TODO:具体实现 20 | // 信号量 21 | class Semaphore : Nonecopyable { 22 | public: 23 | Semaphore(uint32_t count = 0); 24 | ~Semaphore(); 25 | 26 | void wait(); 27 | void notify(); 28 | 29 | private: 30 | sem_t semaphore_; 31 | }; 32 | 33 | // 局部锁类模板 34 | template 35 | struct ScopedLockImpl { 36 | public: 37 | ScopedLockImpl(T &mutex) : m_(mutex) { 38 | // std::cout << "n lock" << std::endl; 39 | m_.lock(); 40 | isLocked_ = true; 41 | } 42 | 43 | void lock() { 44 | if (!isLocked_) { 45 | std::cout << "lock" << std::endl; 46 | m_.lock(); 47 | isLocked_ = true; 48 | } 49 | } 50 | 51 | void unlock() { 52 | if (isLocked_) { 53 | // std::cout << "unlock" << std::endl; 54 | m_.unlock(); 55 | isLocked_ = false; 56 | } 57 | } 58 | 59 | ~ScopedLockImpl() { 60 | // std::cout << "unlock" << std::endl; 61 | unlock(); 62 | } 63 | 64 | private: 65 | // mutex 66 | T &m_; 67 | // 是否已经上锁 68 | bool isLocked_; 69 | }; 70 | 71 | template 72 | struct ReadScopedLockImpl { 73 | public: 74 | ReadScopedLockImpl(T &mutex) : mutex_(mutex) { 75 | mutex_.rdlock(); 76 | isLocked_ = true; 77 | } 78 | ~ReadScopedLockImpl() { unlock(); } 79 | void lock() { 80 | if (!isLocked_) { 81 | mutex_.rdlock(); 82 | isLocked_ = true; 83 | } 84 | } 85 | void unlock() { 86 | if (isLocked_) { 87 | mutex_.unlock(); 88 | isLocked_ = false; 89 | } 90 | } 91 | 92 | private: 93 | /// mutex 94 | T &mutex_; 95 | /// 是否已上锁 96 | bool isLocked_; 97 | }; 98 | 99 | template 100 | struct WriteScopedLockImpl { 101 | public: 102 | WriteScopedLockImpl(T &mutex) : mutex_(mutex) { 103 | mutex_.wrlock(); 104 | isLocked_ = true; 105 | } 106 | 107 | ~WriteScopedLockImpl() { unlock(); } 108 | void lock() { 109 | if (!isLocked_) { 110 | mutex_.wrlock(); 111 | isLocked_ = true; 112 | } 113 | } 114 | void unlock() { 115 | if (isLocked_) { 116 | mutex_.unlock(); 117 | isLocked_ = false; 118 | } 119 | } 120 | 121 | private: 122 | /// Mutex 123 | T &mutex_; 124 | /// 是否已上锁 125 | bool isLocked_; 126 | }; 127 | 128 | class Mutex : Nonecopyable { 129 | public: 130 | typedef ScopedLockImpl Lock; 131 | 132 | Mutex() { CondPanic(0 == pthread_mutex_init(&m_, nullptr), "lock init success"); } 133 | 134 | void lock() { CondPanic(0 == pthread_mutex_lock(&m_), "lock error"); } 135 | 136 | void unlock() { CondPanic(0 == pthread_mutex_unlock(&m_), "unlock error"); } 137 | 138 | ~Mutex() { CondPanic(0 == pthread_mutex_destroy(&m_), "destroy lock error"); } 139 | 140 | private: 141 | pthread_mutex_t m_; 142 | }; 143 | 144 | class RWMutex : Nonecopyable { 145 | public: 146 | // 局部读锁 147 | typedef ReadScopedLockImpl ReadLock; 148 | // 局部写锁 149 | typedef WriteScopedLockImpl WriteLock; 150 | 151 | RWMutex() { pthread_rwlock_init(&m_, nullptr); } 152 | ~RWMutex() { pthread_rwlock_destroy(&m_); } 153 | 154 | void rdlock() { pthread_rwlock_rdlock(&m_); } 155 | 156 | void wrlock() { pthread_rwlock_wrlock(&m_); } 157 | 158 | void unlock() { pthread_rwlock_unlock(&m_); } 159 | 160 | private: 161 | pthread_rwlock_t m_; 162 | }; 163 | } // namespace monsoon 164 | 165 | #endif -------------------------------------------------------------------------------- /src/fiber/include/scheduler.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __MONSOON_SCHEDULER_H__ 2 | #define __MONSOON_SCHEDULER_H__ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include "fiber.hpp" 12 | #include "mutex.hpp" 13 | #include "thread.hpp" 14 | #include "utils.hpp" 15 | 16 | namespace monsoon { 17 | // 调度任务 18 | class SchedulerTask { 19 | public: 20 | friend class Scheduler; 21 | SchedulerTask() { thread_ = -1; } 22 | SchedulerTask(Fiber::ptr f, int t) : fiber_(f), thread_(t) {} 23 | SchedulerTask(Fiber::ptr *f, int t) { 24 | fiber_.swap(*f); 25 | thread_ = t; 26 | } 27 | SchedulerTask(std::function f, int t) { 28 | // std::cout << "function task" << std::endl; 29 | cb_ = f; 30 | thread_ = t; 31 | } 32 | // 清空任务 33 | void reset() { 34 | fiber_ = nullptr; 35 | cb_ = nullptr; 36 | thread_ = -1; 37 | } 38 | 39 | private: 40 | Fiber::ptr fiber_; 41 | std::function cb_; 42 | int thread_; 43 | }; 44 | 45 | // N->M协程调度器 46 | class Scheduler { 47 | public: 48 | typedef std::shared_ptr ptr; 49 | 50 | Scheduler(size_t threads = 1, bool use_caller = true, const std::string &name = "Scheduler"); 51 | virtual ~Scheduler(); 52 | const std::string &getName() const { return name_; } 53 | // 获取当前线程调度器 54 | static Scheduler *GetThis(); 55 | // 获取当前线程的调度器协程 56 | static Fiber *GetMainFiber(); 57 | 58 | /** 59 | * \brief 添加调度任务 60 | * \tparam TaskType 任务类型,可以是协程对象或函数指针 61 | * \param task 任务 62 | * \param thread 指定执行函数的线程,-1为不指定 63 | */ 64 | template 65 | void scheduler(TaskType task, int thread = -1) { 66 | bool isNeedTickle = false; 67 | { 68 | Mutex::Lock lock(mutex_); 69 | isNeedTickle = schedulerNoLock(task, thread); 70 | // std::cout << "isNeedTickle: " << isNeedTickle << std::endl; 71 | } 72 | 73 | if (isNeedTickle) { 74 | tickle(); // 唤醒idle协程 75 | } 76 | // log 77 | // std::string tp = "[Callback Func]"; 78 | // if (boost::typeindex::type_id_with_cvr().pretty_name() != "void (*)()") 79 | // { 80 | // tp = "[Fiber]"; 81 | // } 82 | // std::cout << "[scheduler] add scheduler task: " << tp << " success" << std::endl; 83 | // std::cout << "[scheduler] add scheduler task success" << std::endl; 84 | } 85 | // 启动调度器 86 | void start(); 87 | // 停止调度器,等待所有任务结束 88 | void stop(); 89 | 90 | protected: 91 | // 通知调度器任务到达 92 | virtual void tickle(); 93 | /** 94 | * \brief 协程调度函数, 95 | * 默认会启用hook 96 | */ 97 | void run(); 98 | // 无任务时执行idle协程 99 | virtual void idle(); 100 | // 返回是否可以停止 101 | virtual bool stopping(); 102 | // 设置当前线程调度器 103 | void setThis(); 104 | // 返回是否有空闲进程 105 | bool isHasIdleThreads() { return idleThreadCnt_ > 0; } 106 | 107 | private: 108 | // 无锁下,添加调度任务 109 | // todo 可以加入使用clang的锁检查 110 | template 111 | bool schedulerNoLock(TaskType t, int thread) { 112 | bool isNeedTickle = tasks_.empty(); 113 | SchedulerTask task(t, thread); 114 | if (task.fiber_ || task.cb_) { 115 | // std::cout << "有效task" << std::endl; 116 | tasks_.push_back(task); 117 | } 118 | // std::cout << "scheduler noblock: isNeedTickle = " << isNeedTickle << std::endl; 119 | return isNeedTickle; 120 | } 121 | // 调度器名称 122 | std::string name_; 123 | // 互斥锁 124 | Mutex mutex_; 125 | // 线程池 126 | std::vector threadPool_; 127 | // 任务队列 128 | std::list tasks_; 129 | // 线程池id数组 130 | std::vector threadIds_; 131 | // 工作线程数量(不包含use_caller的主线程) 132 | size_t threadCnt_ = 0; 133 | // 活跃线程数 134 | std::atomic activeThreadCnt_ = {0}; 135 | // IDLE线程数 136 | std::atomic idleThreadCnt_ = {0}; 137 | // 是否是use caller 138 | bool isUseCaller_; 139 | // use caller= true,调度器所在线程的调度协程 140 | Fiber::ptr rootFiber_; 141 | // use caller = true,调度器协程所在线程的id 142 | int rootThread_ = 0; 143 | bool isStopped_ = false; 144 | }; 145 | } // namespace monsoon 146 | 147 | #endif -------------------------------------------------------------------------------- /src/raftCore/include/kvServer.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by swx on 23-6-1. 3 | // 4 | 5 | #ifndef SKIP_LIST_ON_RAFT_KVSERVER_H 6 | #define SKIP_LIST_ON_RAFT_KVSERVER_H 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include "kvServerRPC.pb.h" 22 | #include "raft.h" 23 | #include "skipList.h" 24 | 25 | class KvServer : raftKVRpcProctoc::kvServerRpc { 26 | private: 27 | std::mutex m_mtx; 28 | int m_me; 29 | std::shared_ptr m_raftNode; 30 | std::shared_ptr > applyChan; // kvServer和raft节点的通信管道 31 | int m_maxRaftState; // snapshot if log grows this big 32 | 33 | // Your definitions here. 34 | std::string m_serializedKVData; // todo : 序列化后的kv数据,理论上可以不用,但是目前没有找到特别好的替代方法 35 | SkipList m_skipList; 36 | std::unordered_map m_kvDB; 37 | 38 | std::unordered_map *> waitApplyCh; 39 | // index(raft) -> chan //???字段含义 waitApplyCh是一个map,键是int,值是Op类型的管道 40 | 41 | std::unordered_map m_lastRequestId; // clientid -> requestID //一个kV服务器可能连接多个client 42 | 43 | // last SnapShot point , raftIndex 44 | int m_lastSnapShotRaftLogIndex; 45 | 46 | public: 47 | KvServer() = delete; 48 | 49 | KvServer(int me, int maxraftstate, std::string nodeInforFileName, short port); 50 | 51 | void StartKVServer(); 52 | 53 | void DprintfKVDB(); 54 | 55 | void ExecuteAppendOpOnKVDB(Op op); 56 | 57 | void ExecuteGetOpOnKVDB(Op op, std::string *value, bool *exist); 58 | 59 | void ExecutePutOpOnKVDB(Op op); 60 | 61 | void Get(const raftKVRpcProctoc::GetArgs *args, 62 | raftKVRpcProctoc::GetReply 63 | *reply); //将 GetArgs 改为rpc调用的,因为是远程客户端,即服务器宕机对客户端来说是无感的 64 | /** 65 | * 從raft節點中獲取消息 (不要誤以爲是執行【GET】命令) 66 | * @param message 67 | */ 68 | void GetCommandFromRaft(ApplyMsg message); 69 | 70 | bool ifRequestDuplicate(std::string ClientId, int RequestId); 71 | 72 | // clerk 使用RPC远程调用 73 | void PutAppend(const raftKVRpcProctoc::PutAppendArgs *args, raftKVRpcProctoc::PutAppendReply *reply); 74 | 75 | ////一直等待raft传来的applyCh 76 | void ReadRaftApplyCommandLoop(); 77 | 78 | void ReadSnapShotToInstall(std::string snapshot); 79 | 80 | bool SendMessageToWaitChan(const Op &op, int raftIndex); 81 | 82 | // 检查是否需要制作快照,需要的话就向raft之下制作快照 83 | void IfNeedToSendSnapShotCommand(int raftIndex, int proportion); 84 | 85 | // Handler the SnapShot from kv.rf.applyCh 86 | void GetSnapShotFromRaft(ApplyMsg message); 87 | 88 | std::string MakeSnapShot(); 89 | 90 | public: // for rpc 91 | void PutAppend(google::protobuf::RpcController *controller, const ::raftKVRpcProctoc::PutAppendArgs *request, 92 | ::raftKVRpcProctoc::PutAppendReply *response, ::google::protobuf::Closure *done) override; 93 | 94 | void Get(google::protobuf::RpcController *controller, const ::raftKVRpcProctoc::GetArgs *request, 95 | ::raftKVRpcProctoc::GetReply *response, ::google::protobuf::Closure *done) override; 96 | 97 | /////////////////serialiazation start /////////////////////////////// 98 | // notice : func serialize 99 | private: 100 | friend class boost::serialization::access; 101 | 102 | // When the class Archive corresponds to an output archive, the 103 | // & operator is defined similar to <<. Likewise, when the class Archive 104 | // is a type of input archive the & operator is defined similar to >>. 105 | template 106 | void serialize(Archive &ar, const unsigned int version) //这里面写需要序列话和反序列化的字段 107 | { 108 | ar &m_serializedKVData; 109 | 110 | // ar & m_kvDB; 111 | ar &m_lastRequestId; 112 | } 113 | 114 | std::string getSnapshotData() { 115 | m_serializedKVData = m_skipList.dump_file(); 116 | std::stringstream ss; 117 | boost::archive::text_oarchive oa(ss); 118 | oa << *this; 119 | m_serializedKVData.clear(); 120 | return ss.str(); 121 | } 122 | 123 | void parseFromString(const std::string &str) { 124 | std::stringstream ss(str); 125 | boost::archive::text_iarchive ia(ss); 126 | ia >> *this; 127 | m_skipList.load_file(m_serializedKVData); 128 | m_serializedKVData.clear(); 129 | } 130 | 131 | /////////////////serialiazation end /////////////////////////////// 132 | }; 133 | 134 | #endif // SKIP_LIST_ON_RAFT_KVSERVER_H 135 | -------------------------------------------------------------------------------- /src/fiber/fiber.cpp: -------------------------------------------------------------------------------- 1 | #include "fiber.hpp" 2 | #include 3 | #include 4 | #include "scheduler.hpp" 5 | #include "utils.hpp" 6 | 7 | namespace monsoon { 8 | const bool DEBUG = true; 9 | // 当前线程正在运行的协程 10 | static thread_local Fiber *cur_fiber = nullptr; 11 | // 当前线程的主协程 12 | static thread_local Fiber::ptr cur_thread_fiber = nullptr; 13 | // 用于生成协程Id 14 | static std::atomic cur_fiber_id{0}; 15 | // 统计当前协程数 16 | static std::atomic fiber_count{0}; 17 | // 协议栈默认大小 128k 18 | static int g_fiber_stack_size = 128 * 1024; 19 | 20 | class StackAllocator { 21 | public: 22 | static void *Alloc(size_t size) { return malloc(size); } 23 | static void Delete(void *vp, size_t size) { return free(vp); } 24 | }; 25 | // only for GetThis 26 | Fiber::Fiber() { 27 | SetThis(this); 28 | state_ = RUNNING; 29 | CondPanic(getcontext(&ctx_) == 0, "getcontext error"); 30 | ++fiber_count; 31 | id_ = cur_fiber_id++; 32 | std::cout << "[fiber] create fiber , id = " << id_ << std::endl; 33 | //",backtrace:\n"<< BacktraceToString(6, 3, "") << std::endl; 34 | } 35 | 36 | // 设置当前协程 37 | void Fiber::SetThis(Fiber *f) { cur_fiber = f; } 38 | // 获取当前执行协程,不存在则创建 39 | Fiber::ptr Fiber::GetThis() { 40 | if (cur_fiber) { 41 | return cur_fiber->shared_from_this(); 42 | } 43 | // 创建主协程并初始化 44 | Fiber::ptr main_fiber(new Fiber); 45 | CondPanic(cur_fiber == main_fiber.get(), "cur_fiber need to be main_fiber"); 46 | cur_thread_fiber = main_fiber; 47 | return cur_fiber->shared_from_this(); 48 | } 49 | 50 | // 有参构造,并为新的子协程创建栈空间 51 | Fiber::Fiber(std::function cb, size_t stacksize, bool run_inscheduler) 52 | : id_(cur_fiber_id++), cb_(cb), isRunInScheduler_(run_inscheduler) { 53 | ++fiber_count; 54 | stackSize_ = stacksize > 0 ? stacksize : g_fiber_stack_size; 55 | stack_ptr = StackAllocator::Alloc(stackSize_); 56 | CondPanic(getcontext(&ctx_) == 0, "getcontext error"); 57 | // 初始化协程上下文 58 | ctx_.uc_link = nullptr; 59 | ctx_.uc_stack.ss_sp = stack_ptr; 60 | ctx_.uc_stack.ss_size = stackSize_; 61 | makecontext(&ctx_, &Fiber::MainFunc, 0); 62 | 63 | // std::cout << "create son fiber , id = " << id_ << ",backtrace:\n" 64 | // << BacktraceToString(6, 3, "") << std::endl; 65 | // std::cout << "[fiber]create son fiber , id = " << id_ << std::endl; 66 | } 67 | 68 | // 切换当前协程到执行态,并保存主协程的上下文 69 | void Fiber::resume() { 70 | CondPanic(state_ != TERM && state_ != RUNNING, "state error"); 71 | SetThis(this); 72 | state_ = RUNNING; 73 | 74 | if (isRunInScheduler_) { 75 | // 当前协程参与调度器调度,则与调度器主协程进行swap 76 | CondPanic(0 == swapcontext(&(Scheduler::GetMainFiber()->ctx_), &ctx_), 77 | "isRunInScheduler_ = true,swapcontext error"); 78 | } else { 79 | // 切换主协程到当前协程,并保存主协程上下文到子协程ctx_ 80 | CondPanic(0 == swapcontext(&(cur_thread_fiber->ctx_), &ctx_), "isRunInScheduler_ = false,swapcontext error"); 81 | } 82 | } 83 | 84 | // 当前协程让出执行权 85 | // 协程执行完成之后胡会自动yield,回到主协程,此时状态为TEAM 86 | void Fiber::yield() { 87 | CondPanic(state_ == TERM || state_ == RUNNING, "state error"); 88 | SetThis(cur_thread_fiber.get()); 89 | if (state_ != TERM) { 90 | state_ = READY; 91 | } 92 | if (isRunInScheduler_) { 93 | CondPanic(0 == swapcontext(&ctx_, &(Scheduler::GetMainFiber()->ctx_)), 94 | "isRunInScheduler_ = true,swapcontext error"); 95 | } else { 96 | // 切换当前协程到主协程,并保存子协程的上下文到主协程ctx_ 97 | CondPanic(0 == swapcontext(&ctx_, &(cur_thread_fiber->ctx_)), "swapcontext failed"); 98 | } 99 | } 100 | 101 | // 协程入口函数 102 | void Fiber::MainFunc() { 103 | Fiber::ptr cur = GetThis(); 104 | CondPanic(cur != nullptr, "cur is nullptr"); 105 | 106 | cur->cb_(); 107 | cur->cb_ = nullptr; 108 | cur->state_ = TERM; 109 | // 手动使得cur_fiber引用计数减1 110 | auto raw_ptr = cur.get(); 111 | cur.reset(); 112 | // 协程结束,自动yield,回到主协程 113 | // 访问原始指针原因:reset后cur已经被释放 114 | raw_ptr->yield(); 115 | } 116 | 117 | // 协程重置(复用已经结束的协程,复用其栈空间,创建新协程) 118 | // TODO:暂时不允许Ready状态下的重置 119 | void Fiber::reset(std::function cb) { 120 | CondPanic(stack_ptr, "stack is nullptr"); 121 | CondPanic(state_ == TERM, "state isn't TERM"); 122 | cb_ = cb; 123 | CondPanic(0 == getcontext(&ctx_), "getcontext failed"); 124 | ctx_.uc_link = nullptr; 125 | ctx_.uc_stack.ss_sp = stack_ptr; 126 | ctx_.uc_stack.ss_size = stackSize_; 127 | 128 | makecontext(&ctx_, &Fiber::MainFunc, 0); 129 | state_ = READY; 130 | } 131 | 132 | Fiber::~Fiber() { 133 | --fiber_count; 134 | if (stack_ptr) { 135 | // 有栈空间,说明是子协程 136 | CondPanic(state_ == TERM, "fiber state should be term"); 137 | StackAllocator::Delete(stack_ptr, stackSize_); 138 | // std::cout << "dealloc stack,id = " << id_ << std::endl; 139 | } else { 140 | // 没有栈空间,说明是线程的主协程 141 | CondPanic(!cb_, "main fiber no callback"); 142 | CondPanic(state_ == RUNNING, "main fiber state should be running"); 143 | 144 | Fiber *cur = cur_fiber; 145 | if (cur == this) { 146 | SetThis(nullptr); 147 | } 148 | } 149 | } 150 | 151 | } // namespace monsoon -------------------------------------------------------------------------------- /.clang-tidy: -------------------------------------------------------------------------------- 1 | # Generated from CLion Inspection settings 2 | --- 3 | Checks: '-*, 4 | bugprone-argument-comment, 5 | bugprone-assert-side-effect, 6 | bugprone-bad-signal-to-kill-thread, 7 | bugprone-branch-clone, 8 | bugprone-copy-constructor-init, 9 | bugprone-dangling-handle, 10 | bugprone-dynamic-static-initializers, 11 | bugprone-fold-init-type, 12 | bugprone-forward-declaration-namespace, 13 | bugprone-forwarding-reference-overload, 14 | bugprone-inaccurate-erase, 15 | bugprone-incorrect-roundings, 16 | bugprone-integer-division, 17 | bugprone-lambda-function-name, 18 | bugprone-macro-parentheses, 19 | bugprone-macro-repeated-side-effects, 20 | bugprone-misplaced-operator-in-strlen-in-alloc, 21 | bugprone-misplaced-pointer-arithmetic-in-alloc, 22 | bugprone-misplaced-widening-cast, 23 | bugprone-move-forwarding-reference, 24 | bugprone-multiple-statement-macro, 25 | bugprone-no-escape, 26 | bugprone-parent-virtual-call, 27 | bugprone-posix-return, 28 | bugprone-reserved-identifier, 29 | bugprone-sizeof-container, 30 | bugprone-sizeof-expression, 31 | bugprone-spuriously-wake-up-functions, 32 | bugprone-string-constructor, 33 | bugprone-string-integer-assignment, 34 | bugprone-string-literal-with-embedded-nul, 35 | bugprone-suspicious-enum-usage, 36 | bugprone-suspicious-include, 37 | bugprone-suspicious-memset-usage, 38 | bugprone-suspicious-missing-comma, 39 | bugprone-suspicious-semicolon, 40 | bugprone-suspicious-string-compare, 41 | bugprone-suspicious-memory-comparison, 42 | bugprone-suspicious-realloc-usage, 43 | bugprone-swapped-arguments, 44 | bugprone-terminating-continue, 45 | bugprone-throw-keyword-missing, 46 | bugprone-too-small-loop-variable, 47 | bugprone-undefined-memory-manipulation, 48 | bugprone-undelegated-constructor, 49 | bugprone-unhandled-self-assignment, 50 | bugprone-unused-raii, 51 | bugprone-unused-return-value, 52 | bugprone-use-after-move, 53 | bugprone-virtual-near-miss, 54 | cert-dcl21-cpp, 55 | cert-dcl58-cpp, 56 | cert-err34-c, 57 | cert-err52-cpp, 58 | cert-err60-cpp, 59 | cert-flp30-c, 60 | cert-msc50-cpp, 61 | cert-msc51-cpp, 62 | cert-str34-c, 63 | cppcoreguidelines-interfaces-global-init, 64 | cppcoreguidelines-narrowing-conversions, 65 | cppcoreguidelines-pro-type-member-init, 66 | cppcoreguidelines-pro-type-static-cast-downcast, 67 | cppcoreguidelines-slicing, 68 | google-default-arguments, 69 | google-explicit-constructor, 70 | google-runtime-operator, 71 | hicpp-exception-baseclass, 72 | hicpp-multiway-paths-covered, 73 | misc-misplaced-const, 74 | misc-new-delete-overloads, 75 | misc-no-recursion, 76 | misc-non-copyable-objects, 77 | misc-throw-by-value-catch-by-reference, 78 | misc-unconventional-assign-operator, 79 | misc-uniqueptr-reset-release, 80 | modernize-avoid-bind, 81 | modernize-concat-nested-namespaces, 82 | modernize-deprecated-headers, 83 | modernize-deprecated-ios-base-aliases, 84 | modernize-loop-convert, 85 | modernize-make-shared, 86 | modernize-make-unique, 87 | modernize-pass-by-value, 88 | modernize-raw-string-literal, 89 | modernize-redundant-void-arg, 90 | modernize-replace-auto-ptr, 91 | modernize-replace-disallow-copy-and-assign-macro, 92 | modernize-replace-random-shuffle, 93 | modernize-return-braced-init-list, 94 | modernize-shrink-to-fit, 95 | modernize-unary-static-assert, 96 | modernize-use-auto, 97 | modernize-use-bool-literals, 98 | modernize-use-emplace, 99 | modernize-use-equals-default, 100 | modernize-use-equals-delete, 101 | modernize-use-nodiscard, 102 | modernize-use-noexcept, 103 | modernize-use-nullptr, 104 | modernize-use-override, 105 | modernize-use-transparent-functors, 106 | modernize-use-uncaught-exceptions, 107 | mpi-buffer-deref, 108 | mpi-type-mismatch, 109 | openmp-use-default-none, 110 | performance-faster-string-find, 111 | performance-for-range-copy, 112 | performance-implicit-conversion-in-loop, 113 | performance-inefficient-algorithm, 114 | performance-inefficient-string-concatenation, 115 | performance-inefficient-vector-operation, 116 | performance-move-const-arg, 117 | performance-move-constructor-init, 118 | performance-no-automatic-move, 119 | performance-noexcept-move-constructor, 120 | performance-trivially-destructible, 121 | performance-type-promotion-in-math-fn, 122 | performance-unnecessary-copy-initialization, 123 | performance-unnecessary-value-param, 124 | portability-simd-intrinsics, 125 | readability-avoid-const-params-in-decls, 126 | readability-const-return-type, 127 | readability-container-size-empty, 128 | readability-convert-member-functions-to-static, 129 | readability-delete-null-pointer, 130 | readability-deleted-default, 131 | readability-inconsistent-declaration-parameter-name, 132 | readability-make-member-function-const, 133 | readability-misleading-indentation, 134 | readability-misplaced-array-index, 135 | readability-non-const-parameter, 136 | readability-redundant-control-flow, 137 | readability-redundant-declaration, 138 | readability-redundant-function-ptr-dereference, 139 | readability-redundant-smartptr-get, 140 | readability-redundant-string-cstr, 141 | readability-redundant-string-init, 142 | readability-simplify-subscript-expr, 143 | readability-static-accessed-through-instance, 144 | readability-static-definition-in-anonymous-namespace, 145 | readability-string-compare, 146 | readability-uniqueptr-delete-release, 147 | readability-use-anyofallof' -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # KVstorageBaseRaft-cpp 2 | 3 | > **本项目目前只在[知识星球](https://programmercarl.com/other/kstar.html)答疑并维护**。 4 | 5 | [代码随想录知识星球](https://programmercarl.com/other/kstar.html)分布式存储项目目前已经做了全面升级: 6 | 7 |
8 | 9 | 相对于第一版补充如下内容: 10 | 11 | 学习这个项目的前提知识: 12 | 13 | * 学习这个项目需要掌握的技能部分,做了更详细的说明。比如c++基础要求、raft的学习博客、KV的学习博客、RPC的博客提供学习。 14 | * 补充了学习raft或rpc的连接、视频等,以及RPC在本项目中运用的测试文件提供学习。 15 | 16 | 代码模块讲解补充: 17 | 18 | * 补充了部分注释,简化了原文档冗余的注释。 19 | * 增加了部分关键函数的流程解释。 20 | * 做了更清晰的目录。 21 | * 补充了RPC部分在原文档中的仓库位置,实现了方便阅读方便查看。 22 | * 对原文的内容进行整合基本原文档的部分内容有关代码部分全在该模块,并且通过目录就可以学习你想要的。 23 | 24 | ## 项目背景相关 25 | 26 | ### 背景 27 | 28 | 在当今大规模分布式系统的背景下,需要可靠、高可用性的分布式数据存储系统。 29 | 30 | 传统的集中式数据库在面对大规模数据和高并发访问时可能面临单点故障和性能瓶颈的问题。 31 | 32 | 为了解决这些问题,本项目致力于构建一种基于Raft一致性算法的分布式键值存储数据库,以确保数据的一致性、可用性和分区容错性。 33 | 34 | ### 目的 35 | 36 | 学习了Raft算法之后手动实现,**并基于此搭建了一个k-v存储的分布式数据库**。 37 | 38 | ### 解决的问题 39 | 40 | - **一致性:** 通过Raft算法确保数据的强一致性,使得系统在正常和异常情况下都能够提供一致的数据视图。 41 | - **可用性:** 通过分布式节点的复制和自动故障转移,实现高可用性,即使在部分节点故障的情况下,系统依然能够提供服务。 42 | - **分区容错:** 处理网络分区的情况,确保系统在分区恢复后能够自动合并数据一致性。 43 | 44 | ### 技术栈 45 | 46 | - **Raft一致性算法:** 作为核心算法,确保数据的一致性和容错性。 47 | - **存储引擎:** 使用适当的存储引擎作为底层存储引擎,提供高效的键值对操作。目前选择的是跳表,但是可以替换为任意k-v数据库。 48 | 49 | ### 项目范围 50 | 51 | 项目的初始版本将实现基本的Raft协议和键值存储功能。 52 | 53 | 后续版本可能包括性能优化、安全性增强、监控和管理工具的开发等。 54 | 55 | ## 前置知识储备 56 | 57 | 在学习该项目之前,必须知道的内容有: 58 | 59 | * 语言基础,比如:`mutex` ,什么是序列化和反序列化 60 | * RPC相关,至少要知道什么是RPC 61 | 62 | 最好知道的内容有: 63 | 64 | - c11的部分新特性:`auto` 、`RAII`等 65 | - 分布式的基础概念:容错、复制等 66 | 67 | ## 你的收获 68 | 69 | - Raft共识算法的快速理解 70 | - 基于共识算法怎么搭建一个分布式的k-v数据库 71 | 72 | 需要注意的是,分布式式的共识算法实现本身是一个比较严谨的过程。 73 | 74 | 因为其本身的存在是为了多个服务器之间通过共识算法达成一致性的状态,从而避免单个节点不可用而导致整个集群不可用。 75 | 76 | 因此在学习过程中必须要考虑不同情况下节点宕机、断网情况下的影响。 77 | 78 | 许多情况需要仔细思考并实验以验证算法正确性,其中的思考别人无法代替,本项目的内容**只能作为分布式共识算法Raft的一个入门的实现,方便大家快速理解Raft算法**,从而写到简历上,如果想全部理解分布式算法的精髓只能多思考多看多总结。 79 | 80 | mit6.824课程,如果你已经学习过该课程,那么已经不需要本项目了,本项目的难度和内容小于该课程。 81 | 82 | ## 最佳食用指南 83 | 84 | **关注Raft算法本身**:首先整个项目最重点也是最难点的地方就是Raft算法本身的理解与实现,其他的部分都是辅助,因此在学习的过程中也最好关注Raft算法本身的实现与Raft类对外暴露的一些接口。 85 | 86 | **多思考错误情况下的算法正确性**:Raft算法本身并不难理解,代码也并不多,但是简单的代码如何保证在复杂情况下的容错呢?需要在完成代码后多思考在代码不同运行阶段如果发生宕机等错误时的正确性。 87 | 88 | ## 项目大纲 89 | 90 | 项目的大概框图如下: 91 | 92 |
93 | 94 | 项目大概可以分为以下几个部分: 95 | 96 | - **raft节点**:raft算法实现的核心层,负责与其他机器的raft节点沟通,达到 分布式共识 的目的。 97 | - **raftServer**:负责raft节点与k-v数据库中间的协调服务;负责持久化k-v数据库的数据(可选)。 98 | - **上层状态机**(k-v数据库):负责数据存储。 99 | - **持久层**:负责相关数据的落盘,对于raft节点,根据共识算法要求,必须对一些关键数据进行落盘处理,以保证节点宕机后重启程序可以恢复关键数据;对于raftServer,可能会有一些k-v数据库的东西需要落盘持久化。 100 | - **RPC通信**:在 领导者选举、日志复制、数据查询、心跳等多个Raft重要过程中提供多节点快速简单的通信能力。 101 | 102 | > 目前规划中没有实现节点变更功能或对数据库的切片等更进阶的功能,后面考虑学习加入。 103 | 104 | 在多个机器启动后,各个机器之间通过网络通信,构建成一个集群,对这样的集群,其对外表现的就像一台单机的k-v数据库一样,且少数节点出现故障不会影响整个集群的工作。 105 | 106 | 因此有了Raft算法的集群k-v数据库相对于单机的k-v数据库: 107 | 108 | 优势:集群有了容错的能力,可以理解成Raft算法可以保证各个机器上的k-v数据库(也称状态机)以相同的顺序执行外部命令。 109 | 110 | 劣势:容错能力需要算法提供,因此程序会变得复杂;需要额外对数据进行备份;需要额外的网络通信开销。 111 | 112 | 也是因此,其实上层的k-v数据库可以替换成其他的组件,毕竟只是一个状态机而已。 113 | 114 | 目前设计的后续主要内容: 115 | 116 | 1.`Raft`算法的一些概念性内容,比如:Raft算法是什么?Raft算法怎么完成公式?完成Raft算法需要哪几个主要函数?需要哪几个主要的变量维护? 117 | 118 | 2.`Raft`算法的主要函数实现思路及代码,主要函数包括:`AppendEntries` `sendRequestVote` `sendAppendEntries` `RequestVote` 等 119 | 120 | 3.其他部分组件,包括:RPC通信组件、k-v数据库、中间沟通数据库和raft节点的`raftServer` 121 | 122 | ## 项目难点 123 | 124 | 难点就是项目主要的几个功能模块的实现。 125 | 126 | - Raft算法的理解与实现 127 | - RPC通信框架的理解与实现 128 | - k-v数据库 129 | 130 | ## 简历写法 131 | 132 | 学习完本项目,如何写到简历上呢? 133 | 134 | 在知识星球专栏里会给出本项目的简历写法,为了不让 这些写法重复率太高,所以公众号上是打码的。 135 | 136 |
137 | 138 | ## 本项目常见问题 139 | 140 | 同时项目专栏也会针对本项目的常见问题,经行归类总结,并持续更新 141 | 142 |
143 | 144 | ## 项目优化点 145 | 146 | 大家做这个项目,完成基础功能之后,可以按照如下方向继续优化这个项目: 147 | 148 |
149 | 150 | ## 项目专栏部分截图 151 | 152 |
153 | 154 |
155 | 156 |
157 | 158 | ------------------- 159 | 160 | ## 分布式存储项目专栏 161 | 162 | **本文档仅为星球内部专享,大家可以加入[知识星球](https://programmercarl.com/other/kstar.html)里获取,在星球置顶一** 163 | 164 | ## Star History 165 | 166 | 167 | 168 | 169 | 170 | Star History Chart 171 | 172 | 173 | 174 | 175 | -------------------------------------------------------------------------------- /src/fiber/timer.cpp: -------------------------------------------------------------------------------- 1 | #include "timer.hpp" 2 | #include "utils.hpp" 3 | 4 | namespace monsoon { 5 | bool Timer::Comparator::operator()(const Timer::ptr &lhs, const Timer::ptr &rhs) const { 6 | if (!lhs && !rhs) { 7 | return false; 8 | } 9 | if (!lhs) { 10 | return true; 11 | } 12 | if (!rhs) { 13 | return false; 14 | } 15 | if (lhs->next_ < rhs->next_) { 16 | return true; 17 | } 18 | if (rhs->next_ < lhs->next_) { 19 | return false; 20 | } 21 | return lhs.get() < rhs.get(); 22 | } 23 | Timer::Timer(uint64_t ms, std::function cb, bool recuring, TimerManager *manager) 24 | : recurring_(recuring), ms_(ms), cb_(cb), manager_(manager) { 25 | next_ = GetElapsedMS() + ms_; 26 | } 27 | Timer::Timer(uint64_t next) : next_(next) {} 28 | bool Timer::cancel() { 29 | RWMutex::WriteLock lock(manager_->mutex_); 30 | if (cb_) { 31 | cb_ = nullptr; 32 | auto it = manager_->timers_.find(shared_from_this()); 33 | manager_->timers_.erase(it); 34 | return true; 35 | } 36 | return false; 37 | } 38 | bool Timer::refresh() { 39 | RWMutex::RWMutex::WriteLock lock(manager_->mutex_); 40 | if (!cb_) { 41 | return false; 42 | } 43 | auto it = manager_->timers_.find(shared_from_this()); 44 | if (it == manager_->timers_.end()) { 45 | return false; 46 | } 47 | manager_->timers_.erase(it); 48 | next_ = GetElapsedMS() + ms_; 49 | manager_->timers_.insert(shared_from_this()); 50 | return true; 51 | } 52 | 53 | // 重置定时器,重新设置定时器触发时间 54 | // from_now = true: 下次出发时间从当前时刻开始计算 55 | // from_now = false: 下次触发时间从上一次开始计算 56 | bool Timer::reset(uint64_t ms, bool from_now) { 57 | if (ms == ms_ && !from_now) { 58 | return true; 59 | } 60 | RWMutex::WriteLock lock(manager_->mutex_); 61 | if (!cb_) { 62 | return true; 63 | } 64 | auto it = manager_->timers_.find(shared_from_this()); 65 | if (it == manager_->timers_.end()) { 66 | return false; 67 | } 68 | manager_->timers_.erase(it); 69 | uint64_t start = 0; 70 | if (from_now) { 71 | start = GetElapsedMS(); 72 | } else { 73 | start = next_ - ms_; 74 | } 75 | ms_ = ms; 76 | next_ = start + ms_; 77 | manager_->addTimer(shared_from_this(), lock); 78 | return true; 79 | } 80 | 81 | TimerManager::TimerManager() { previouseTime_ = GetElapsedMS(); } 82 | 83 | TimerManager::~TimerManager() {} 84 | 85 | Timer::ptr TimerManager::addTimer(uint64_t ms, std::function cb, bool recurring) { 86 | Timer::ptr timer(new Timer(ms, cb, recurring, this)); 87 | RWMutex::WriteLock lock(mutex_); 88 | addTimer(timer, lock); 89 | return timer; 90 | } 91 | 92 | static void OnTimer(std::weak_ptr weak_cond, std::function cb) { 93 | std::shared_ptr tmp = weak_cond.lock(); 94 | if (tmp) { 95 | cb(); 96 | } 97 | } 98 | 99 | Timer::ptr TimerManager::addConditionTimer(uint64_t ms, std::function cb, std::weak_ptr weak_cond, 100 | bool recurring) { 101 | return addTimer(ms, std::bind(&OnTimer, weak_cond, cb), recurring); 102 | } 103 | 104 | uint64_t TimerManager::getNextTimer() { 105 | RWMutex::ReadLock lock(mutex_); 106 | tickled_ = false; 107 | if (timers_.empty()) { 108 | return ~0ull; 109 | } 110 | const Timer::ptr &next = *timers_.begin(); 111 | uint64_t now_ms = GetElapsedMS(); 112 | if (now_ms >= next->next_) { 113 | return 0; 114 | } else { 115 | return next->next_ - now_ms; 116 | } 117 | } 118 | 119 | void TimerManager::listExpiredCb(std::vector> &cbs) { 120 | uint64_t now_ms = GetElapsedMS(); 121 | std::vector expired; 122 | { 123 | RWMutex::ReadLock lock(mutex_); 124 | if (timers_.empty()) { 125 | return; 126 | } 127 | } 128 | RWMutex::WriteLock lock(mutex_); 129 | if (timers_.empty()) { 130 | return; 131 | } 132 | bool rollover = false; 133 | if (detectClockRolllover(now_ms)) { 134 | rollover = true; 135 | } 136 | if (!rollover && ((*timers_.begin())->next_ > now_ms)) { 137 | return; 138 | } 139 | 140 | Timer::ptr now_timer(new Timer(now_ms)); 141 | auto it = rollover ? timers_.end() : timers_.lower_bound(now_timer); 142 | while (it != timers_.end() && (*it)->next_ == now_ms) { 143 | ++it; 144 | } 145 | expired.insert(expired.begin(), timers_.begin(), it); 146 | timers_.erase(timers_.begin(), it); 147 | 148 | cbs.reserve(expired.size()); 149 | for (auto &timer : expired) { 150 | cbs.push_back(timer->cb_); 151 | if (timer->recurring_) { 152 | // 循环计时,重新加入堆中 153 | timer->next_ = now_ms + timer->ms_; 154 | timers_.insert(timer); 155 | } else { 156 | timer->cb_ = nullptr; 157 | } 158 | } 159 | } 160 | 161 | void TimerManager::addTimer(Timer::ptr val, RWMutex::WriteLock &lock) { 162 | auto it = timers_.insert(val).first; 163 | bool at_front = (it == timers_.begin()) && !tickled_; 164 | if (at_front) { 165 | tickled_ = true; 166 | } 167 | lock.unlock(); 168 | if (at_front) { 169 | OnTimerInsertedAtFront(); 170 | } 171 | } 172 | 173 | bool TimerManager::detectClockRolllover(uint64_t now_ms) { 174 | bool rollover = false; 175 | if (now_ms < previouseTime_ && now_ms < (previouseTime_ - 60 * 60 * 1000)) { 176 | rollover = true; 177 | } 178 | previouseTime_ = now_ms; 179 | return rollover; 180 | } 181 | 182 | bool TimerManager::hasTimer() { 183 | RWMutex::ReadLock lock(mutex_); 184 | return !timers_.empty(); 185 | } 186 | 187 | } // namespace monsoon -------------------------------------------------------------------------------- /src/fiber/scheduler.cpp: -------------------------------------------------------------------------------- 1 | #include "scheduler.hpp" 2 | #include "fiber.hpp" 3 | #include "hook.hpp" 4 | 5 | namespace monsoon { 6 | // 当前线程的调度器,同一调度器下的所有线程共享同一调度器实例 (线程级调度器) 7 | static thread_local Scheduler *cur_scheduler = nullptr; 8 | // 当前线程的调度协程,每个线程一个 (协程级调度器) 9 | static thread_local Fiber *cur_scheduler_fiber = nullptr; 10 | 11 | const std::string LOG_HEAD = "[scheduler] "; 12 | 13 | Scheduler::Scheduler(size_t threads, bool use_caller, const std::string &name) { 14 | CondPanic(threads > 0, "threads <= 0"); 15 | 16 | isUseCaller_ = use_caller; 17 | name_ = name; 18 | 19 | // use_caller:是否将当前线程也作为被调度线程 20 | if (use_caller) { 21 | std::cout << LOG_HEAD << "current thread as called thread" << std::endl; 22 | // 总线程数减1 23 | --threads; 24 | // 初始化caller线程的主协程 25 | Fiber::GetThis(); 26 | std::cout << LOG_HEAD << "init caller thread's main fiber success" << std::endl; 27 | CondPanic(GetThis() == nullptr, "GetThis err:cur scheduler is not nullptr"); 28 | // 设置当前线程为调度器线程(caller thread) 29 | cur_scheduler = this; 30 | // 初始化当前线程的调度协程 (该线程不会被调度器带哦都),调度结束后,返回主协程 31 | rootFiber_.reset(new Fiber(std::bind(&Scheduler::run, this), 0, false)); 32 | std::cout << LOG_HEAD << "init caller thread's caller fiber success" << std::endl; 33 | 34 | Thread::SetName(name_); 35 | cur_scheduler_fiber = rootFiber_.get(); 36 | rootThread_ = GetThreadId(); 37 | threadIds_.push_back(rootThread_); 38 | } else { 39 | rootThread_ = -1; 40 | } 41 | threadCnt_ = threads; 42 | std::cout << "-------scheduler init success-------" << std::endl; 43 | } 44 | 45 | Scheduler *Scheduler::GetThis() { return cur_scheduler; } 46 | Fiber *Scheduler::GetMainFiber() { return cur_scheduler_fiber; } 47 | void Scheduler::setThis() { cur_scheduler = this; } 48 | Scheduler::~Scheduler() { 49 | CondPanic(isStopped_, "isstopped is false"); 50 | if (GetThis() == this) { 51 | cur_scheduler = nullptr; 52 | } 53 | } 54 | 55 | // 调度器启动 56 | // 初始化调度线程池 57 | void Scheduler::start() { 58 | std::cout << LOG_HEAD << "scheduler start" << std::endl; 59 | Mutex::Lock lock(mutex_); 60 | if (isStopped_) { 61 | std::cout << "scheduler has stopped" << std::endl; 62 | return; 63 | } 64 | CondPanic(threadPool_.empty(), "thread pool is not empty"); 65 | threadPool_.resize(threadCnt_); 66 | for (size_t i = 0; i < threadCnt_; i++) { 67 | threadPool_[i].reset(new Thread(std::bind(&Scheduler::run, this), name_ + "_" + std::to_string(i))); 68 | threadIds_.push_back(threadPool_[i]->getId()); 69 | } 70 | } 71 | 72 | // 调度协程 73 | void Scheduler::run() { 74 | std::cout << LOG_HEAD << "begin run" << std::endl; 75 | set_hook_enable(true); 76 | setThis(); 77 | if (GetThreadId() != rootThread_) { 78 | // 如果当前线程不是caller线程,则初始化该线程的调度协程 79 | cur_scheduler_fiber = Fiber::GetThis().get(); 80 | } 81 | 82 | // 创建idle协程 83 | Fiber::ptr idleFiber(new Fiber(std::bind(&Scheduler::idle, this))); 84 | Fiber::ptr cbFiber; 85 | 86 | SchedulerTask task; 87 | while (true) { 88 | task.reset(); 89 | // 是否通知其他线程进行任务调度 90 | bool tickle_me = false; 91 | { 92 | Mutex::Lock lock(mutex_); 93 | auto it = tasks_.begin(); 94 | while (it != tasks_.end()) { 95 | // 发现已经指定调度线程,但是不是在当前线程进行调度 96 | // 需要通知其他线程进行调度,并跳过当前任务 97 | if (it->thread_ != -1 && it->thread_ != GetThreadId()) { 98 | ++it; 99 | tickle_me = true; 100 | continue; 101 | } 102 | CondPanic(it->fiber_ || it->cb_, "task is nullptr"); 103 | if (it->fiber_) { 104 | CondPanic(it->fiber_->getState() == Fiber::READY, "fiber task state error"); 105 | } 106 | // 找到一个可进行任务,准备开始调度,从任务队列取出,活动线程加1 107 | task = *it; 108 | tasks_.erase(it++); 109 | ++activeThreadCnt_; 110 | break; 111 | } 112 | // 当前线程拿出一个任务后,同时任务队列不空,那么告诉其他线程 113 | tickle_me |= (it != tasks_.end()); 114 | } 115 | if (tickle_me) { 116 | tickle(); 117 | } 118 | 119 | if (task.fiber_) { 120 | // 开始执行 协程任务 121 | task.fiber_->resume(); 122 | // 执行结束 123 | --activeThreadCnt_; 124 | task.reset(); 125 | } else if (task.cb_) { 126 | if (cbFiber) { 127 | cbFiber->reset(task.cb_); 128 | } else { 129 | cbFiber.reset(new Fiber(task.cb_)); 130 | } 131 | task.reset(); 132 | cbFiber->resume(); 133 | --activeThreadCnt_; 134 | cbFiber.reset(); 135 | } else { 136 | // 任务队列为空 137 | if (idleFiber->getState() == Fiber::TERM) { 138 | std::cout << "idle fiber term" << std::endl; 139 | break; 140 | } 141 | // idle协程不断空轮转 142 | ++idleThreadCnt_; 143 | idleFiber->resume(); 144 | --idleThreadCnt_; 145 | } 146 | } 147 | std::cout << "run exit" << std::endl; 148 | } 149 | 150 | void Scheduler::tickle() { std::cout << "tickle" << std::endl; } 151 | 152 | bool Scheduler::stopping() { 153 | Mutex::Lock lock(mutex_); 154 | return isStopped_ && tasks_.empty() && activeThreadCnt_ == 0; 155 | } 156 | 157 | void Scheduler::idle() { 158 | while (!stopping()) { 159 | Fiber::GetThis()->yield(); 160 | } 161 | } 162 | 163 | // 使用caller线程,则调度线程依赖stop()来执行caller线程的调度协程 164 | // 不使用caller线程,只用caller线程去调度,则调度器真正开始执行的位置是stop() 165 | void Scheduler::stop() { 166 | std::cout << LOG_HEAD << "stop" << std::endl; 167 | if (stopping()) { 168 | return; 169 | } 170 | isStopped_ = true; 171 | 172 | // stop指令只能由caller线程发起 173 | if (isUseCaller_) { 174 | CondPanic(GetThis() == this, "cur thread is not caller thread"); 175 | } else { 176 | CondPanic(GetThis() != this, "cur thread is caller thread"); 177 | } 178 | 179 | for (size_t i = 0; i < threadCnt_; i++) { 180 | tickle(); 181 | } 182 | if (rootFiber_) { 183 | tickle(); 184 | } 185 | 186 | // 在user_caller情况下,调度器协程(rootFiber)结束后,应该返回caller协程 187 | if (rootFiber_) { 188 | // 切换到调度协程,开始调度 189 | rootFiber_->resume(); 190 | std::cout << "root fiber end" << std::endl; 191 | } 192 | 193 | std::vector threads; 194 | { 195 | Mutex::Lock lock(mutex_); 196 | threads.swap(threadPool_); 197 | } 198 | for (auto &i : threads) { 199 | i->join(); 200 | } 201 | } 202 | 203 | } // namespace monsoon 204 | -------------------------------------------------------------------------------- /.clang-format: -------------------------------------------------------------------------------- 1 | 2 | # from https://github.com/OpenAtomFoundation/pikiwidb/blob/5ea7bc0949cbd49122633cf086322143121c5985/.clang-format 3 | Language: Cpp 4 | # BasedOnStyle: Google 5 | AccessModifierOffset: -1 6 | AlignAfterOpenBracket: Align 7 | AlignConsecutiveMacros: None 8 | AlignConsecutiveAssignments: None 9 | AlignConsecutiveBitFields: None 10 | AlignConsecutiveDeclarations: None 11 | AlignEscapedNewlines: Left 12 | AlignOperands: Align 13 | AlignTrailingComments: true 14 | AllowAllArgumentsOnNextLine: true 15 | AllowAllConstructorInitializersOnNextLine: true 16 | AllowAllParametersOfDeclarationOnNextLine: true 17 | AllowShortEnumsOnASingleLine: true 18 | AllowShortBlocksOnASingleLine: Never 19 | AllowShortCaseLabelsOnASingleLine: false 20 | AllowShortFunctionsOnASingleLine: All 21 | AllowShortLambdasOnASingleLine: All 22 | AllowShortIfStatementsOnASingleLine: WithoutElse 23 | AllowShortLoopsOnASingleLine: true 24 | AlwaysBreakAfterDefinitionReturnType: None 25 | AlwaysBreakAfterReturnType: None 26 | AlwaysBreakBeforeMultilineStrings: true 27 | AlwaysBreakTemplateDeclarations: Yes 28 | AttributeMacros: 29 | - __capability 30 | BinPackArguments: true 31 | BinPackParameters: true 32 | BraceWrapping: 33 | AfterCaseLabel: false 34 | AfterClass: false 35 | AfterControlStatement: Never 36 | AfterEnum: false 37 | AfterFunction: false 38 | AfterNamespace: false 39 | AfterObjCDeclaration: false 40 | AfterStruct: false 41 | AfterUnion: false 42 | AfterExternBlock: false 43 | BeforeCatch: false 44 | BeforeElse: false 45 | BeforeLambdaBody: false 46 | BeforeWhile: false 47 | IndentBraces: false 48 | SplitEmptyFunction: true 49 | SplitEmptyRecord: true 50 | SplitEmptyNamespace: true 51 | BreakBeforeBinaryOperators: None 52 | BreakBeforeConceptDeclarations: true 53 | BreakBeforeBraces: Attach 54 | BreakBeforeInheritanceComma: false 55 | BreakInheritanceList: BeforeColon 56 | BreakBeforeTernaryOperators: true 57 | BreakConstructorInitializersBeforeComma: false 58 | BreakConstructorInitializers: BeforeColon 59 | BreakAfterJavaFieldAnnotations: false 60 | BreakStringLiterals: true 61 | ColumnLimit: 120 62 | CommentPragmas: '^ IWYU pragma:' 63 | CompactNamespaces: false 64 | ConstructorInitializerAllOnOneLineOrOnePerLine: true 65 | ConstructorInitializerIndentWidth: 4 66 | ContinuationIndentWidth: 4 67 | Cpp11BracedListStyle: true 68 | DeriveLineEnding: true 69 | DerivePointerAlignment: true 70 | DisableFormat: false 71 | EmptyLineBeforeAccessModifier: LogicalBlock 72 | ExperimentalAutoDetectBinPacking: false 73 | FixNamespaceComments: true 74 | ForEachMacros: 75 | - foreach 76 | - Q_FOREACH 77 | - BOOST_FOREACH 78 | StatementAttributeLikeMacros: 79 | - Q_EMIT 80 | IncludeBlocks: Preserve 81 | IncludeCategories: 82 | - Regex: '^' 83 | Priority: 2 84 | SortPriority: 0 85 | CaseSensitive: false 86 | - Regex: '^<.*\.h>' 87 | Priority: 1 88 | SortPriority: 0 89 | CaseSensitive: false 90 | - Regex: '^<.*' 91 | Priority: 2 92 | SortPriority: 0 93 | CaseSensitive: false 94 | - Regex: '.*' 95 | Priority: 3 96 | SortPriority: 0 97 | CaseSensitive: false 98 | IncludeIsMainRegex: '([-_](test|unittest))?$' 99 | IncludeIsMainSourceRegex: '' 100 | IndentCaseLabels: true 101 | IndentCaseBlocks: false 102 | IndentGotoLabels: true 103 | IndentPPDirectives: AfterHash 104 | IndentExternBlock: AfterExternBlock 105 | IndentWidth: 2 106 | IndentWrappedFunctionNames: false 107 | InsertTrailingCommas: None 108 | JavaScriptQuotes: Leave 109 | JavaScriptWrapImports: true 110 | KeepEmptyLinesAtTheStartOfBlocks: false 111 | MacroBlockBegin: '' 112 | MacroBlockEnd: '' 113 | MaxEmptyLinesToKeep: 1 114 | NamespaceIndentation: None 115 | ObjCBinPackProtocolList: Never 116 | ObjCBlockIndentWidth: 2 117 | ObjCBreakBeforeNestedBlockParam: true 118 | ObjCSpaceAfterProperty: false 119 | ObjCSpaceBeforeProtocolList: true 120 | PenaltyBreakAssignment: 2 121 | PenaltyBreakBeforeFirstCallParameter: 1 122 | PenaltyBreakComment: 300 123 | PenaltyBreakFirstLessLess: 120 124 | PenaltyBreakString: 1000 125 | PenaltyBreakTemplateDeclaration: 10 126 | PenaltyExcessCharacter: 1000000 127 | PenaltyReturnTypeOnItsOwnLine: 200 128 | PenaltyIndentedWhitespace: 0 129 | PointerAlignment: Left 130 | RawStringFormats: 131 | - Language: Cpp 132 | Delimiters: 133 | - cc 134 | - CC 135 | - cpp 136 | - Cpp 137 | - CPP 138 | - 'c++' 139 | - 'C++' 140 | CanonicalDelimiter: '' 141 | BasedOnStyle: google 142 | - Language: TextProto 143 | Delimiters: 144 | - pb 145 | - PB 146 | - proto 147 | - PROTO 148 | EnclosingFunctions: 149 | - EqualsProto 150 | - EquivToProto 151 | - PARSE_PARTIAL_TEXT_PROTO 152 | - PARSE_TEST_PROTO 153 | - PARSE_TEXT_PROTO 154 | - ParseTextOrDie 155 | - ParseTextProtoOrDie 156 | - ParseTestProto 157 | - ParsePartialTestProto 158 | CanonicalDelimiter: '' 159 | BasedOnStyle: google 160 | ReflowComments: true 161 | SortIncludes: true 162 | SortJavaStaticImport: Before 163 | SortUsingDeclarations: true 164 | SpaceAfterCStyleCast: false 165 | SpaceAfterLogicalNot: false 166 | SpaceAfterTemplateKeyword: true 167 | SpaceBeforeAssignmentOperators: true 168 | SpaceBeforeCaseColon: false 169 | SpaceBeforeCpp11BracedList: false 170 | SpaceBeforeCtorInitializerColon: true 171 | SpaceBeforeInheritanceColon: true 172 | SpaceBeforeParens: ControlStatements 173 | SpaceAroundPointerQualifiers: Default 174 | SpaceBeforeRangeBasedForLoopColon: true 175 | SpaceInEmptyBlock: false 176 | SpaceInEmptyParentheses: false 177 | SpacesBeforeTrailingComments: 2 178 | SpacesInAngles: false 179 | SpacesInConditionalStatement: false 180 | SpacesInContainerLiterals: true 181 | SpacesInCStyleCastParentheses: false 182 | SpacesInParentheses: false 183 | SpacesInSquareBrackets: false 184 | SpaceBeforeSquareBrackets: false 185 | BitFieldColonSpacing: Both 186 | Standard: Auto 187 | StatementMacros: 188 | - Q_UNUSED 189 | - QT_REQUIRE_VERSION 190 | TabWidth: 8 191 | UseCRLF: false 192 | UseTab: Never 193 | WhitespaceSensitiveMacros: 194 | - STRINGIZE 195 | - PP_STRINGIZE 196 | - BOOST_PP_STRINGIZE 197 | - NS_SWIFT_NAME 198 | - CF_SWIFT_NAME 199 | -------------------------------------------------------------------------------- /src/rpc/mprpcchannel.cpp: -------------------------------------------------------------------------------- 1 | #include "mprpcchannel.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include "mprpccontroller.h" 9 | #include "rpcheader.pb.h" 10 | #include "util.h" 11 | 12 | /* 13 | header_size + service_name method_name args_size + args 14 | */ 15 | // 所有通过stub代理对象调用的rpc方法,都会走到这里了, 16 | // 统一通过rpcChannel来调用方法 17 | // 统一做rpc方法调用的数据数据序列化和网络发送 18 | void MprpcChannel::CallMethod(const google::protobuf::MethodDescriptor* method, 19 | google::protobuf::RpcController* controller, const google::protobuf::Message* request, 20 | google::protobuf::Message* response, google::protobuf::Closure* done) { 21 | if (m_clientFd == -1) { 22 | std::string errMsg; 23 | bool rt = newConnect(m_ip.c_str(), m_port, &errMsg); 24 | if (!rt) { 25 | DPrintf("[func-MprpcChannel::CallMethod]重连接ip:{%s} port{%d}失败", m_ip.c_str(), m_port); 26 | controller->SetFailed(errMsg); 27 | return; 28 | } else { 29 | DPrintf("[func-MprpcChannel::CallMethod]连接ip:{%s} port{%d}成功", m_ip.c_str(), m_port); 30 | } 31 | } 32 | 33 | const google::protobuf::ServiceDescriptor* sd = method->service(); 34 | std::string service_name = sd->name(); // service_name 35 | std::string method_name = method->name(); // method_name 36 | 37 | // 获取参数的序列化字符串长度 args_size 38 | uint32_t args_size{}; 39 | std::string args_str; 40 | if (request->SerializeToString(&args_str)) { 41 | args_size = args_str.size(); 42 | } else { 43 | controller->SetFailed("serialize request error!"); 44 | return; 45 | } 46 | RPC::RpcHeader rpcHeader; 47 | rpcHeader.set_service_name(service_name); 48 | rpcHeader.set_method_name(method_name); 49 | rpcHeader.set_args_size(args_size); 50 | 51 | std::string rpc_header_str; 52 | if (!rpcHeader.SerializeToString(&rpc_header_str)) { 53 | controller->SetFailed("serialize rpc header error!"); 54 | return; 55 | } 56 | 57 | // 使用protobuf的CodedOutputStream来构建发送的数据流 58 | std::string send_rpc_str; // 用来存储最终发送的数据 59 | { 60 | // 创建一个StringOutputStream用于写入send_rpc_str 61 | google::protobuf::io::StringOutputStream string_output(&send_rpc_str); 62 | google::protobuf::io::CodedOutputStream coded_output(&string_output); 63 | 64 | // 先写入header的长度(变长编码) 65 | coded_output.WriteVarint32(static_cast(rpc_header_str.size())); 66 | 67 | // 不需要手动写入header_size,因为上面的WriteVarint32已经包含了header的长度信息 68 | // 然后写入rpc_header本身 69 | coded_output.WriteString(rpc_header_str); 70 | } 71 | 72 | // 最后,将请求参数附加到send_rpc_str后面 73 | send_rpc_str += args_str; 74 | 75 | // 打印调试信息 76 | // std::cout << "============================================" << std::endl; 77 | // std::cout << "header_size: " << header_size << std::endl; 78 | // std::cout << "rpc_header_str: " << rpc_header_str << std::endl; 79 | // std::cout << "service_name: " << service_name << std::endl; 80 | // std::cout << "method_name: " << method_name << std::endl; 81 | // std::cout << "args_str: " << args_str << std::endl; 82 | // std::cout << "============================================" << std::endl; 83 | 84 | // 发送rpc请求 85 | //失败会重试连接再发送,重试连接失败会直接return 86 | while (-1 == send(m_clientFd, send_rpc_str.c_str(), send_rpc_str.size(), 0)) { 87 | char errtxt[512] = {0}; 88 | sprintf(errtxt, "send error! errno:%d", errno); 89 | std::cout << "尝试重新连接,对方ip:" << m_ip << " 对方端口" << m_port << std::endl; 90 | close(m_clientFd); 91 | m_clientFd = -1; 92 | std::string errMsg; 93 | bool rt = newConnect(m_ip.c_str(), m_port, &errMsg); 94 | if (!rt) { 95 | controller->SetFailed(errMsg); 96 | return; 97 | } 98 | } 99 | /* 100 | 从时间节点来说,这里将请求发送过去之后rpc服务的提供者就会开始处理,返回的时候就代表着已经返回响应了 101 | */ 102 | 103 | // 接收rpc请求的响应值 104 | char recv_buf[1024] = {0}; 105 | int recv_size = 0; 106 | if (-1 == (recv_size = recv(m_clientFd, recv_buf, 1024, 0))) { 107 | close(m_clientFd); 108 | m_clientFd = -1; 109 | char errtxt[512] = {0}; 110 | sprintf(errtxt, "recv error! errno:%d", errno); 111 | controller->SetFailed(errtxt); 112 | return; 113 | } 114 | 115 | // 反序列化rpc调用的响应数据 116 | // std::string response_str(recv_buf, 0, recv_size); // 117 | // bug:出现问题,recv_buf中遇到\0后面的数据就存不下来了,导致反序列化失败 if 118 | // (!response->ParseFromString(response_str)) 119 | if (!response->ParseFromArray(recv_buf, recv_size)) { 120 | char errtxt[1050] = {0}; 121 | sprintf(errtxt, "parse error! response_str:%s", recv_buf); 122 | controller->SetFailed(errtxt); 123 | return; 124 | } 125 | } 126 | 127 | bool MprpcChannel::newConnect(const char* ip, uint16_t port, string* errMsg) { 128 | int clientfd = socket(AF_INET, SOCK_STREAM, 0); 129 | if (-1 == clientfd) { 130 | char errtxt[512] = {0}; 131 | sprintf(errtxt, "create socket error! errno:%d", errno); 132 | m_clientFd = -1; 133 | *errMsg = errtxt; 134 | return false; 135 | } 136 | 137 | struct sockaddr_in server_addr; 138 | server_addr.sin_family = AF_INET; 139 | server_addr.sin_port = htons(port); 140 | server_addr.sin_addr.s_addr = inet_addr(ip); 141 | // 连接rpc服务节点 142 | if (-1 == connect(clientfd, (struct sockaddr*)&server_addr, sizeof(server_addr))) { 143 | close(clientfd); 144 | char errtxt[512] = {0}; 145 | sprintf(errtxt, "connect fail! errno:%d", errno); 146 | m_clientFd = -1; 147 | *errMsg = errtxt; 148 | return false; 149 | } 150 | m_clientFd = clientfd; 151 | return true; 152 | } 153 | 154 | MprpcChannel::MprpcChannel(string ip, short port, bool connectNow) : m_ip(ip), m_port(port), m_clientFd(-1) { 155 | // 使用tcp编程,完成rpc方法的远程调用,使用的是短连接,因此每次都要重新连接上去,待改成长连接。 156 | // 没有连接或者连接已经断开,那么就要重新连接呢,会一直不断地重试 157 | // 读取配置文件rpcserver的信息 158 | // std::string ip = MprpcApplication::GetInstance().GetConfig().Load("rpcserverip"); 159 | // uint16_t port = atoi(MprpcApplication::GetInstance().GetConfig().Load("rpcserverport").c_str()); 160 | // rpc调用方想调用service_name的method_name服务,需要查询zk上该服务所在的host信息 161 | // /UserServiceRpc/Login 162 | if (!connectNow) { 163 | return; 164 | } //可以允许延迟连接 165 | std::string errMsg; 166 | auto rt = newConnect(ip.c_str(), port, &errMsg); 167 | int tryCount = 3; 168 | while (!rt && tryCount--) { 169 | std::cout << errMsg << std::endl; 170 | rt = newConnect(ip.c_str(), port, &errMsg); 171 | } 172 | } -------------------------------------------------------------------------------- /src/common/include/util.h: -------------------------------------------------------------------------------- 1 | 2 | #ifndef UTIL_H 3 | #define UTIL_H 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include // pthread_condition_t 12 | #include 13 | #include 14 | #include // pthread_mutex_t 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include "config.h" 20 | 21 | template 22 | class DeferClass { 23 | public: 24 | DeferClass(F&& f) : m_func(std::forward(f)) {} 25 | DeferClass(const F& f) : m_func(f) {} 26 | ~DeferClass() { m_func(); } 27 | 28 | DeferClass(const DeferClass& e) = delete; 29 | DeferClass& operator=(const DeferClass& e) = delete; 30 | 31 | private: 32 | F m_func; 33 | }; 34 | 35 | #define _CONCAT(a, b) a##b 36 | #define _MAKE_DEFER_(line) DeferClass _CONCAT(defer_placeholder, line) = [&]() 37 | 38 | #undef DEFER 39 | #define DEFER _MAKE_DEFER_(__LINE__) 40 | 41 | void DPrintf(const char* format, ...); 42 | 43 | void myAssert(bool condition, std::string message = "Assertion failed!"); 44 | 45 | template 46 | std::string format(const char* format_str, Args... args) { 47 | int size_s = std::snprintf(nullptr, 0, format_str, args...) + 1; // "\0" 48 | if (size_s <= 0) { throw std::runtime_error("Error during formatting."); } 49 | auto size = static_cast(size_s); 50 | std::vector buf(size); 51 | std::snprintf(buf.data(), size, format_str, args...); 52 | return std::string(buf.data(), buf.data() + size - 1); // remove '\0' 53 | } 54 | 55 | std::chrono::_V2::system_clock::time_point now(); 56 | 57 | std::chrono::milliseconds getRandomizedElectionTimeout(); 58 | void sleepNMilliseconds(int N); 59 | 60 | // ////////////////////////异步写日志的日志队列 61 | // read is blocking!!! LIKE go chan 62 | template 63 | class LockQueue { 64 | public: 65 | // 多个worker线程都会写日志queue 66 | void Push(const T& data) { 67 | std::lock_guard lock(m_mutex); //使用lock_gurad,即RAII的思想保证锁正确释放 68 | m_queue.push(data); 69 | m_condvariable.notify_one(); 70 | } 71 | 72 | // 一个线程读日志queue,写日志文件 73 | T Pop() { 74 | std::unique_lock lock(m_mutex); 75 | while (m_queue.empty()) { 76 | // 日志队列为空,线程进入wait状态 77 | m_condvariable.wait(lock); //这里用unique_lock是因为lock_guard不支持解锁,而unique_lock支持 78 | } 79 | T data = m_queue.front(); 80 | m_queue.pop(); 81 | return data; 82 | } 83 | 84 | bool timeOutPop(int timeout, T* ResData) // 添加一个超时时间参数,默认为 50 毫秒 85 | { 86 | std::unique_lock lock(m_mutex); 87 | 88 | // 获取当前时间点,并计算出超时时刻 89 | auto now = std::chrono::system_clock::now(); 90 | auto timeout_time = now + std::chrono::milliseconds(timeout); 91 | 92 | // 在超时之前,不断检查队列是否为空 93 | while (m_queue.empty()) { 94 | // 如果已经超时了,就返回一个空对象 95 | if (m_condvariable.wait_until(lock, timeout_time) == std::cv_status::timeout) { 96 | return false; 97 | } else { 98 | continue; 99 | } 100 | } 101 | 102 | T data = m_queue.front(); 103 | m_queue.pop(); 104 | *ResData = data; 105 | return true; 106 | } 107 | 108 | private: 109 | std::queue m_queue; 110 | std::mutex m_mutex; 111 | std::condition_variable m_condvariable; 112 | }; 113 | // 两个对锁的管理用到了RAII的思想,防止中途出现问题而导致资源无法释放的问题!!! 114 | // std::lock_guard 和 std::unique_lock 都是 C++11 中用来管理互斥锁的工具类,它们都封装了 RAII(Resource Acquisition Is 115 | // Initialization)技术,使得互斥锁在需要时自动加锁,在不需要时自动解锁,从而避免了很多手动加锁和解锁的繁琐操作。 116 | // std::lock_guard 是一个模板类,它的模板参数是一个互斥量类型。当创建一个 std::lock_guard 117 | // 对象时,它会自动地对传入的互斥量进行加锁操作,并在该对象被销毁时对互斥量进行自动解锁操作。std::lock_guard 118 | // 不能手动释放锁,因为其所提供的锁的生命周期与其绑定对象的生命周期一致。 std::unique_lock 119 | // 也是一个模板类,同样的,其模板参数也是互斥量类型。不同的是,std::unique_lock 提供了更灵活的锁管理功能。可以通过 120 | // lock()、unlock()、try_lock() 等方法手动控制锁的状态。当然,std::unique_lock 也支持 RAII 121 | // 技术,即在对象被销毁时会自动解锁。另外, std::unique_lock 还支持超时等待和可中断等待的操作。 122 | 123 | // 这个Op是kv传递给raft的command 124 | class Op { 125 | public: 126 | // Your definitions here. 127 | // Field names must start with capital letters, 128 | // otherwise RPC will break. 129 | std::string Operation; // "Get" "Put" "Append" 130 | std::string Key; 131 | std::string Value; 132 | std::string ClientId; //客户端号码 133 | int RequestId; //客户端号码请求的Request的序列号,为了保证线性一致性 134 | // IfDuplicate bool // Duplicate command can't be applied twice , but only for PUT and APPEND 135 | 136 | public: 137 | // todo 138 | //为了协调raftRPC中的command只设置成了string,这个的限制就是正常字符中不能包含| 139 | //当然后期可以换成更高级的序列化方法,比如protobuf 140 | std::string asString() const { 141 | std::stringstream ss; 142 | boost::archive::text_oarchive oa(ss); 143 | 144 | // write class instance to archive 145 | oa << *this; 146 | // close archive 147 | 148 | return ss.str(); 149 | } 150 | 151 | bool parseFromString(std::string str) { 152 | std::stringstream iss(str); 153 | boost::archive::text_iarchive ia(iss); 154 | // read class state from archive 155 | ia >> *this; 156 | return true; // todo : 解析失敗如何處理,要看一下boost庫了 157 | } 158 | 159 | public: 160 | friend std::ostream& operator<<(std::ostream& os, const Op& obj) { 161 | os << "[MyClass:Operation{" + obj.Operation + "},Key{" + obj.Key + "},Value{" + obj.Value + "},ClientId{" + 162 | obj.ClientId + "},RequestId{" + std::to_string(obj.RequestId) + "}"; // 在这里实现自定义的输出格式 163 | return os; 164 | } 165 | 166 | private: 167 | friend class boost::serialization::access; 168 | template 169 | void serialize(Archive& ar, const unsigned int version) { 170 | ar& Operation; 171 | ar& Key; 172 | ar& Value; 173 | ar& ClientId; 174 | ar& RequestId; 175 | } 176 | }; 177 | 178 | ///////////////////////////////////////////////kvserver reply err to clerk 179 | 180 | const std::string OK = "OK"; 181 | const std::string ErrNoKey = "ErrNoKey"; 182 | const std::string ErrWrongLeader = "ErrWrongLeader"; 183 | 184 | ////////////////////////////////////获取可用端口 185 | 186 | bool isReleasePort(unsigned short usPort); 187 | 188 | bool getReleasePort(short& port); 189 | 190 | // int main(int argc, char** argv) 191 | //{ 192 | // short port = 9060; 193 | // if(getReleasePort(port)) //在port的基础上获取一个可用的port 194 | // { 195 | // std::cout << "可用的端口号为:" << port << std::endl; 196 | // } 197 | // else 198 | // { 199 | // std::cout << "获取可用端口号失败!" << std::endl; 200 | // } 201 | // return 0; 202 | // } 203 | 204 | #endif // UTIL_H -------------------------------------------------------------------------------- /src/raftCore/include/raft.h: -------------------------------------------------------------------------------- 1 | #ifndef RAFT_H 2 | #define RAFT_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include "ApplyMsg.h" 15 | #include "Persister.h" 16 | #include "boost/any.hpp" 17 | #include "boost/serialization/serialization.hpp" 18 | #include "config.h" 19 | #include "monsoon.h" 20 | #include "raftRpcUtil.h" 21 | #include "util.h" 22 | /// @brief //////////// 网络状态表示 todo:可以在rpc中删除该字段,实际生产中是用不到的. 23 | constexpr int Disconnected = 24 | 0; // 方便网络分区的时候debug,网络异常的时候为disconnected,只要网络正常就为AppNormal,防止matchIndex[]数组异常减小 25 | constexpr int AppNormal = 1; 26 | 27 | ///////////////投票状态 28 | 29 | constexpr int Killed = 0; 30 | constexpr int Voted = 1; //本轮已经投过票了 31 | constexpr int Expire = 2; //投票(消息、竞选者)过期 32 | constexpr int Normal = 3; 33 | 34 | class Raft : public raftRpcProctoc::raftRpc { 35 | private: 36 | std::mutex m_mtx; 37 | std::vector> m_peers; 38 | std::shared_ptr m_persister; 39 | int m_me; 40 | int m_currentTerm; 41 | int m_votedFor; 42 | std::vector m_logs; //// 日志条目数组,包含了状态机要执行的指令集,以及收到领导时的任期号 43 | // 这两个状态所有结点都在维护,易失 44 | int m_commitIndex; 45 | int m_lastApplied; // 已经汇报给状态机(上层应用)的log 的index 46 | 47 | // 这两个状态是由服务器来维护,易失 48 | std::vector 49 | m_nextIndex; // 这两个状态的下标1开始,因为通常commitIndex和lastApplied从0开始,应该是一个无效的index,因此下标从1开始 50 | std::vector m_matchIndex; 51 | enum Status { Follower, Candidate, Leader }; 52 | // 身份 53 | Status m_status; 54 | 55 | std::shared_ptr> applyChan; // client从这里取日志(2B),client与raft通信的接口 56 | // ApplyMsgQueue chan ApplyMsg // raft内部使用的chan,applyChan是用于和服务层交互,最后好像没用上 57 | 58 | // 选举超时 59 | 60 | std::chrono::_V2::system_clock::time_point m_lastResetElectionTime; 61 | // 心跳超时,用于leader 62 | std::chrono::_V2::system_clock::time_point m_lastResetHearBeatTime; 63 | 64 | // 2D中用于传入快照点 65 | // 储存了快照中的最后一个日志的Index和Term 66 | int m_lastSnapshotIncludeIndex; 67 | int m_lastSnapshotIncludeTerm; 68 | 69 | // 协程 70 | std::unique_ptr m_ioManager = nullptr; 71 | 72 | public: 73 | void AppendEntries1(const raftRpcProctoc::AppendEntriesArgs *args, raftRpcProctoc::AppendEntriesReply *reply); 74 | void applierTicker(); 75 | bool CondInstallSnapshot(int lastIncludedTerm, int lastIncludedIndex, std::string snapshot); 76 | void doElection(); 77 | /** 78 | * \brief 发起心跳,只有leader才需要发起心跳 79 | */ 80 | void doHeartBeat(); 81 | // 每隔一段时间检查睡眠时间内有没有重置定时器,没有则说明超时了 82 | // 如果有则设置合适睡眠时间:睡眠到重置时间+超时时间 83 | void electionTimeOutTicker(); 84 | std::vector getApplyLogs(); 85 | int getNewCommandIndex(); 86 | void getPrevLogInfo(int server, int *preIndex, int *preTerm); 87 | void GetState(int *term, bool *isLeader); 88 | void InstallSnapshot(const raftRpcProctoc::InstallSnapshotRequest *args, 89 | raftRpcProctoc::InstallSnapshotResponse *reply); 90 | void leaderHearBeatTicker(); 91 | void leaderSendSnapShot(int server); 92 | void leaderUpdateCommitIndex(); 93 | bool matchLog(int logIndex, int logTerm); 94 | void persist(); 95 | void RequestVote(const raftRpcProctoc::RequestVoteArgs *args, raftRpcProctoc::RequestVoteReply *reply); 96 | bool UpToDate(int index, int term); 97 | int getLastLogIndex(); 98 | int getLastLogTerm(); 99 | void getLastLogIndexAndTerm(int *lastLogIndex, int *lastLogTerm); 100 | int getLogTermFromLogIndex(int logIndex); 101 | int GetRaftStateSize(); 102 | int getSlicesIndexFromLogIndex(int logIndex); 103 | 104 | bool sendRequestVote(int server, std::shared_ptr args, 105 | std::shared_ptr reply, std::shared_ptr votedNum); 106 | bool sendAppendEntries(int server, std::shared_ptr args, 107 | std::shared_ptr reply, std::shared_ptr appendNums); 108 | 109 | // rf.applyChan <- msg //不拿锁执行 可以单独创建一个线程执行,但是为了同意使用std:thread 110 | // ,避免使用pthread_create,因此专门写一个函数来执行 111 | void pushMsgToKvServer(ApplyMsg msg); 112 | void readPersist(std::string data); 113 | std::string persistData(); 114 | 115 | void Start(Op command, int *newLogIndex, int *newLogTerm, bool *isLeader); 116 | 117 | // Snapshot the service says it has created a snapshot that has 118 | // all info up to and including index. this means the 119 | // service no longer needs the log through (and including) 120 | // that index. Raft should now trim its log as much as possible. 121 | // index代表是快照apply应用的index,而snapshot代表的是上层service传来的快照字节流,包括了Index之前的数据 122 | // 这个函数的目的是把安装到快照里的日志抛弃,并安装快照数据,同时更新快照下标,属于peers自身主动更新,与leader发送快照不冲突 123 | // 即服务层主动发起请求raft保存snapshot里面的数据,index是用来表示snapshot快照执行到了哪条命令 124 | void Snapshot(int index, std::string snapshot); 125 | 126 | public: 127 | // 重写基类方法,因为rpc远程调用真正调用的是这个方法 128 | //序列化,反序列化等操作rpc框架都已经做完了,因此这里只需要获取值然后真正调用本地方法即可。 129 | void AppendEntries(google::protobuf::RpcController *controller, const ::raftRpcProctoc::AppendEntriesArgs *request, 130 | ::raftRpcProctoc::AppendEntriesReply *response, ::google::protobuf::Closure *done) override; 131 | void InstallSnapshot(google::protobuf::RpcController *controller, 132 | const ::raftRpcProctoc::InstallSnapshotRequest *request, 133 | ::raftRpcProctoc::InstallSnapshotResponse *response, ::google::protobuf::Closure *done) override; 134 | void RequestVote(google::protobuf::RpcController *controller, const ::raftRpcProctoc::RequestVoteArgs *request, 135 | ::raftRpcProctoc::RequestVoteReply *response, ::google::protobuf::Closure *done) override; 136 | 137 | public: 138 | void init(std::vector> peers, int me, std::shared_ptr persister, 139 | std::shared_ptr> applyCh); 140 | 141 | private: 142 | // for persist 143 | 144 | class BoostPersistRaftNode { 145 | public: 146 | friend class boost::serialization::access; 147 | // When the class Archive corresponds to an output archive, the 148 | // & operator is defined similar to <<. Likewise, when the class Archive 149 | // is a type of input archive the & operator is defined similar to >>. 150 | template 151 | void serialize(Archive &ar, const unsigned int version) { 152 | ar &m_currentTerm; 153 | ar &m_votedFor; 154 | ar &m_lastSnapshotIncludeIndex; 155 | ar &m_lastSnapshotIncludeTerm; 156 | ar &m_logs; 157 | } 158 | int m_currentTerm; 159 | int m_votedFor; 160 | int m_lastSnapshotIncludeIndex; 161 | int m_lastSnapshotIncludeTerm; 162 | std::vector m_logs; 163 | std::unordered_map umap; 164 | 165 | public: 166 | }; 167 | }; 168 | 169 | #endif // RAFT_H -------------------------------------------------------------------------------- /docs/rpc编码方式的改进.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | raft-kv不同的header编码方式.html 6 | 7 | 8 | 9 |
10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /src/rpc/rpcprovider.cpp: -------------------------------------------------------------------------------- 1 | #include "rpcprovider.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include "rpcheader.pb.h" 9 | #include "util.h" 10 | /* 11 | service_name => service描述 12 | =》 service* 记录服务对象 13 | method_name => method方法对象 14 | json protobuf 15 | */ 16 | // 这里是框架提供给外部使用的,可以发布rpc方法的函数接口 17 | // 只是简单的把服务描述符和方法描述符全部保存在本地而已 18 | // todo 待修改 要把本机开启的ip和端口写在文件里面 19 | void RpcProvider::NotifyService(google::protobuf::Service *service) { 20 | ServiceInfo service_info; 21 | 22 | // 获取了服务对象的描述信息 23 | const google::protobuf::ServiceDescriptor *pserviceDesc = service->GetDescriptor(); 24 | // 获取服务的名字 25 | std::string service_name = pserviceDesc->name(); 26 | // 获取服务对象service的方法的数量 27 | int methodCnt = pserviceDesc->method_count(); 28 | 29 | std::cout << "service_name:" << service_name << std::endl; 30 | 31 | for (int i = 0; i < methodCnt; ++i) { 32 | // 获取了服务对象指定下标的服务方法的描述(抽象描述) UserService Login 33 | const google::protobuf::MethodDescriptor *pmethodDesc = pserviceDesc->method(i); 34 | std::string method_name = pmethodDesc->name(); 35 | service_info.m_methodMap.insert({method_name, pmethodDesc}); 36 | } 37 | service_info.m_service = service; 38 | m_serviceMap.insert({service_name, service_info}); 39 | } 40 | 41 | // 启动rpc服务节点,开始提供rpc远程网络调用服务 42 | void RpcProvider::Run(int nodeIndex, short port) { 43 | //获取可用ip 44 | char *ipC; 45 | char hname[128]; 46 | struct hostent *hent; 47 | gethostname(hname, sizeof(hname)); 48 | hent = gethostbyname(hname); 49 | for (int i = 0; hent->h_addr_list[i]; i++) { 50 | ipC = inet_ntoa(*(struct in_addr *)(hent->h_addr_list[i])); // IP地址 51 | } 52 | std::string ip = std::string(ipC); 53 | // // 获取端口 54 | // if(getReleasePort(port)) //在port的基础上获取一个可用的port,不知道为何没有效果 55 | // { 56 | // std::cout << "可用的端口号为:" << port << std::endl; 57 | // } 58 | // else 59 | // { 60 | // std::cout << "获取可用端口号失败!" << std::endl; 61 | // } 62 | //写入文件 "test.conf" 63 | std::string node = "node" + std::to_string(nodeIndex); 64 | std::ofstream outfile; 65 | outfile.open("test.conf", std::ios::app); //打开文件并追加写入 66 | if (!outfile.is_open()) { 67 | std::cout << "打开文件失败!" << std::endl; 68 | exit(EXIT_FAILURE); 69 | } 70 | outfile << node + "ip=" + ip << std::endl; 71 | outfile << node + "port=" + std::to_string(port) << std::endl; 72 | outfile.close(); 73 | 74 | //创建服务器 75 | muduo::net::InetAddress address(ip, port); 76 | 77 | // 创建TcpServer对象 78 | m_muduo_server = std::make_shared(&m_eventLoop, address, "RpcProvider"); 79 | 80 | // 绑定连接回调和消息读写回调方法 分离了网络代码和业务代码 81 | /* 82 | bind的作用: 83 | 如果不使用std::bind将回调函数和TcpConnection对象绑定起来,那么在回调函数中就无法直接访问和修改TcpConnection对象的状态。因为回调函数是作为一个独立的函数被调用的,它没有当前对象的上下文信息(即this指针),也就无法直接访问当前对象的状态。 84 | 如果要在回调函数中访问和修改TcpConnection对象的状态,需要通过参数的形式将当前对象的指针传递进去,并且保证回调函数在当前对象的上下文环境中被调用。这种方式比较复杂,容易出错,也不便于代码的编写和维护。因此,使用std::bind将回调函数和TcpConnection对象绑定起来,可以更加方便、直观地访问和修改对象的状态,同时也可以避免一些常见的错误。 85 | */ 86 | m_muduo_server->setConnectionCallback(std::bind(&RpcProvider::OnConnection, this, std::placeholders::_1)); 87 | m_muduo_server->setMessageCallback( 88 | std::bind(&RpcProvider::OnMessage, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3)); 89 | 90 | // 设置muduo库的线程数量 91 | m_muduo_server->setThreadNum(4); 92 | 93 | // rpc服务端准备启动,打印信息 94 | std::cout << "RpcProvider start service at ip:" << ip << " port:" << port << std::endl; 95 | 96 | // 启动网络服务 97 | m_muduo_server->start(); 98 | m_eventLoop.loop(); 99 | /* 100 | 这段代码是在启动网络服务和事件循环,其中server是一个TcpServer对象,m_eventLoop是一个EventLoop对象。 101 | 102 | 首先调用server.start()函数启动网络服务。在Muduo库中,TcpServer类封装了底层网络操作,包括TCP连接的建立和关闭、接收客户端数据、发送数据给客户端等等。通过调用TcpServer对象的start函数,可以启动底层网络服务并监听客户端连接的到来。 103 | 104 | 接下来调用m_eventLoop.loop()函数启动事件循环。在Muduo库中,EventLoop类封装了事件循环的核心逻辑,包括定时器、IO事件、信号等等。通过调用EventLoop对象的loop函数,可以启动事件循环,等待事件的到来并处理事件。 105 | 106 | 在这段代码中,首先启动网络服务,然后进入事件循环阶段,等待并处理各种事件。网络服务和事件循环是两个相对独立的模块,它们的启动顺序和调用方式都是确定的。启动网络服务通常是在事件循环之前,因为网络服务是事件循环的基础。启动事件循环则是整个应用程序的核心,所有的事件都在事件循环中被处理。 107 | */ 108 | } 109 | 110 | // 新的socket连接回调 111 | void RpcProvider::OnConnection(const muduo::net::TcpConnectionPtr &conn) { 112 | // 如果是新连接就什么都不干,即正常的接收连接即可 113 | if (!conn->connected()) { 114 | // 和rpc client的连接断开了 115 | conn->shutdown(); 116 | } 117 | } 118 | 119 | /* 120 | 在框架内部,RpcProvider和RpcConsumer协商好之间通信用的protobuf数据类型 121 | service_name method_name args 定义proto的message类型,进行数据头的序列化和反序列化 122 | service_name method_name args_size 123 | 16UserServiceLoginzhang san123456 124 | 125 | header_size(4个字节) + header_str + args_str 126 | 10 "10" 127 | 10000 "1000000" 128 | std::string insert和copy方法 129 | */ 130 | // 已建立连接用户的读写事件回调 如果远程有一个rpc服务的调用请求,那么OnMessage方法就会响应 131 | // 这里来的肯定是一个远程调用请求 132 | // 因此本函数需要:解析请求,根据服务名,方法名,参数,来调用service的来callmethod来调用本地的业务 133 | void RpcProvider::OnMessage(const muduo::net::TcpConnectionPtr &conn, muduo::net::Buffer *buffer, muduo::Timestamp) { 134 | // 网络上接收的远程rpc调用请求的字符流 Login args 135 | std::string recv_buf = buffer->retrieveAllAsString(); 136 | 137 | // 使用protobuf的CodedInputStream来解析数据流 138 | google::protobuf::io::ArrayInputStream array_input(recv_buf.data(), recv_buf.size()); 139 | google::protobuf::io::CodedInputStream coded_input(&array_input); 140 | uint32_t header_size{}; 141 | 142 | coded_input.ReadVarint32(&header_size); // 解析header_size 143 | 144 | // 根据header_size读取数据头的原始字符流,反序列化数据,得到rpc请求的详细信息 145 | std::string rpc_header_str; 146 | RPC::RpcHeader rpcHeader; 147 | std::string service_name; 148 | std::string method_name; 149 | 150 | // 设置读取限制,不必担心数据读多 151 | google::protobuf::io::CodedInputStream::Limit msg_limit = coded_input.PushLimit(header_size); 152 | coded_input.ReadString(&rpc_header_str, header_size); 153 | // 恢复之前的限制,以便安全地继续读取其他数据 154 | coded_input.PopLimit(msg_limit); 155 | uint32_t args_size{}; 156 | if (rpcHeader.ParseFromString(rpc_header_str)) { 157 | // 数据头反序列化成功 158 | service_name = rpcHeader.service_name(); 159 | method_name = rpcHeader.method_name(); 160 | args_size = rpcHeader.args_size(); 161 | } else { 162 | // 数据头反序列化失败 163 | std::cout << "rpc_header_str:" << rpc_header_str << " parse error!" << std::endl; 164 | return; 165 | } 166 | 167 | // 获取rpc方法参数的字符流数据 168 | std::string args_str; 169 | // 直接读取args_size长度的字符串数据 170 | bool read_args_success = coded_input.ReadString(&args_str, args_size); 171 | 172 | if (!read_args_success) { 173 | // 处理错误:参数数据读取失败 174 | return; 175 | } 176 | 177 | // 打印调试信息 178 | // std::cout << "============================================" << std::endl; 179 | // std::cout << "header_size: " << header_size << std::endl; 180 | // std::cout << "rpc_header_str: " << rpc_header_str << std::endl; 181 | // std::cout << "service_name: " << service_name << std::endl; 182 | // std::cout << "method_name: " << method_name << std::endl; 183 | // std::cout << "args_str: " << args_str << std::endl; 184 | // std::cout << "============================================" << std::endl; 185 | 186 | // 获取service对象和method对象 187 | auto it = m_serviceMap.find(service_name); 188 | if (it == m_serviceMap.end()) { 189 | std::cout << "服务:" << service_name << " is not exist!" << std::endl; 190 | std::cout << "当前已经有的服务列表为:"; 191 | for (auto item : m_serviceMap) { 192 | std::cout << item.first << " "; 193 | } 194 | std::cout << std::endl; 195 | return; 196 | } 197 | 198 | auto mit = it->second.m_methodMap.find(method_name); 199 | if (mit == it->second.m_methodMap.end()) { 200 | std::cout << service_name << ":" << method_name << " is not exist!" << std::endl; 201 | return; 202 | } 203 | 204 | google::protobuf::Service *service = it->second.m_service; // 获取service对象 new UserService 205 | const google::protobuf::MethodDescriptor *method = mit->second; // 获取method对象 Login 206 | 207 | // 生成rpc方法调用的请求request和响应response参数,由于是rpc的请求,因此请求需要通过request来序列化 208 | google::protobuf::Message *request = service->GetRequestPrototype(method).New(); 209 | if (!request->ParseFromString(args_str)) { 210 | std::cout << "request parse error, content:" << args_str << std::endl; 211 | return; 212 | } 213 | google::protobuf::Message *response = service->GetResponsePrototype(method).New(); 214 | 215 | // 给下面的method方法的调用,绑定一个Closure的回调函数 216 | // closure是执行完本地方法之后会发生的回调,因此需要完成序列化和反向发送请求的操作 217 | google::protobuf::Closure *done = 218 | google::protobuf::NewCallback( 219 | this, &RpcProvider::SendRpcResponse, conn, response); 220 | 221 | // 在框架上根据远端rpc请求,调用当前rpc节点上发布的方法 222 | // new UserService().Login(controller, request, response, done) 223 | 224 | /* 225 | 为什么下面这个service->CallMethod 要这么写?或者说为什么这么写就可以直接调用远程业务方法了 226 | 这个service在运行的时候会是注册的service 227 | // 用户注册的service类 继承 .protoc生成的serviceRpc类 继承 google::protobuf::Service 228 | // 用户注册的service类里面没有重写CallMethod方法,是 .protoc生成的serviceRpc类 里面重写了google::protobuf::Service中 229 | 的纯虚函数CallMethod,而 .protoc生成的serviceRpc类 会根据传入参数自动调取 生成的xx方法(如Login方法), 230 | 由于xx方法被 用户注册的service类 重写了,因此这个方法运行的时候会调用 用户注册的service类 的xx方法 231 | 真的是妙呀 232 | */ 233 | //真正调用方法 234 | service->CallMethod(method, nullptr, request, response, done); 235 | } 236 | 237 | // Closure的回调操作,用于序列化rpc的响应和网络发送,发送响应回去 238 | void RpcProvider::SendRpcResponse(const muduo::net::TcpConnectionPtr &conn, google::protobuf::Message *response) { 239 | std::string response_str; 240 | if (response->SerializeToString(&response_str)) // response进行序列化 241 | { 242 | // 序列化成功后,通过网络把rpc方法执行的结果发送会rpc的调用方 243 | conn->send(response_str); 244 | } else { 245 | std::cout << "serialize response_str error!" << std::endl; 246 | } 247 | // conn->shutdown(); // 模拟http的短链接服务,由rpcprovider主动断开连接 //改为长连接,不主动断开 248 | } 249 | 250 | RpcProvider::~RpcProvider() { 251 | std::cout << "[func - RpcProvider::~RpcProvider()]: ip和port信息:" << m_muduo_server->ipPort() << std::endl; 252 | m_eventLoop.quit(); 253 | // m_muduo_server. 怎么没有stop函数,奇奇怪怪,看csdn上面的教程也没有要停止,甚至上面那个都没有 254 | } 255 | -------------------------------------------------------------------------------- /src/fiber/iomanager.cpp: -------------------------------------------------------------------------------- 1 | #include "iomanager.hpp" 2 | 3 | namespace monsoon { 4 | // 获取事件上下文 5 | EventContext &FdContext::getEveContext(Event event) { 6 | switch (event) { 7 | case READ: 8 | return read; 9 | case WRITE: 10 | return write; 11 | default: 12 | CondPanic(false, "getContext error: unknow event"); 13 | } 14 | throw std::invalid_argument("getContext invalid event"); 15 | } 16 | // 重置事件上下文 17 | void FdContext::resetEveContext(EventContext &ctx) { 18 | ctx.scheduler = nullptr; 19 | ctx.fiber.reset(); 20 | ctx.cb = nullptr; 21 | } 22 | // 触发事件(只是将对应的fiber or cb 加入scheduler tasklist) 23 | void FdContext::triggerEvent(Event event) { 24 | CondPanic(events & event, "event hasn't been registed"); 25 | events = (Event)(events & ~event); 26 | EventContext &ctx = getEveContext(event); 27 | if (ctx.cb) { 28 | ctx.scheduler->scheduler(ctx.cb); 29 | } else { 30 | ctx.scheduler->scheduler(ctx.fiber); 31 | } 32 | resetEveContext(ctx); 33 | return; 34 | } 35 | 36 | IOManager::IOManager(size_t threads, bool use_caller, const std::string &name) : Scheduler(threads, use_caller, name) { 37 | epfd_ = epoll_create(5000); 38 | int ret = pipe(tickleFds_); 39 | CondPanic(ret == 0, "pipe error"); 40 | 41 | // 注册pipe读句柄的可读事件,用于tickle调度协程 42 | epoll_event event{}; 43 | memset(&event, 0, sizeof(epoll_event)); 44 | event.events = EPOLLIN | EPOLLET; 45 | event.data.fd = tickleFds_[0]; 46 | // 边缘触发,设置非阻塞 47 | ret = fcntl(tickleFds_[0], F_SETFL, O_NONBLOCK); 48 | CondPanic(ret == 0, "set fd nonblock error"); 49 | // 注册管道读描述符 50 | ret = epoll_ctl(epfd_, EPOLL_CTL_ADD, tickleFds_[0], &event); 51 | CondPanic(ret == 0, "epoll_ctl error"); 52 | 53 | contextResize(32); 54 | 55 | // 启动scheduler,开始进行协程调度 56 | start(); 57 | } 58 | IOManager::~IOManager() { 59 | stop(); 60 | close(epfd_); 61 | close(tickleFds_[0]); 62 | close(tickleFds_[1]); 63 | 64 | for (size_t i = 0; i < fdContexts_.size(); i++) { 65 | if (fdContexts_[i]) { 66 | delete fdContexts_[i]; 67 | } 68 | } 69 | } 70 | 71 | // 添加事件 72 | int IOManager::addEvent(int fd, Event event, std::function cb) { 73 | FdContext *fd_ctx = nullptr; 74 | RWMutex::ReadLock lock(mutex_); 75 | // TODO:可以使用map代替 76 | // 找到fd对应的fdCOntext,没有则创建 77 | if ((int)fdContexts_.size() > fd) { 78 | fd_ctx = fdContexts_[fd]; 79 | lock.unlock(); 80 | } else { 81 | lock.unlock(); 82 | RWMutex::WriteLock lock2(mutex_); 83 | contextResize(fd * 1.5); 84 | fd_ctx = fdContexts_[fd]; 85 | } 86 | 87 | // 同一个fd不允许注册重复事件 88 | Mutex::Lock ctxLock(fd_ctx->mutex); 89 | CondPanic(!(fd_ctx->events & event), "addevent error, fd = " + fd); 90 | 91 | int op = fd_ctx->events ? EPOLL_CTL_MOD : EPOLL_CTL_ADD; 92 | epoll_event epevent; 93 | epevent.events = EPOLLET | fd_ctx->events | event; 94 | epevent.data.ptr = fd_ctx; 95 | 96 | int ret = epoll_ctl(epfd_, op, fd, &epevent); 97 | if (ret) { 98 | std::cout << "addevent: epoll ctl error" << std::endl; 99 | return -1; 100 | } 101 | // 待执行IO事件数量 102 | ++pendingEventCnt_; 103 | 104 | // 赋值fd对应的event事件的EventContext 105 | fd_ctx->events = (Event)(fd_ctx->events | event); 106 | EventContext &event_ctx = fd_ctx->getEveContext(event); 107 | CondPanic(!event_ctx.scheduler && !event_ctx.fiber && !event_ctx.cb, "event_ctx is nullptr"); 108 | 109 | event_ctx.scheduler = Scheduler::GetThis(); 110 | if (cb) { 111 | // 设置了回调函数 112 | event_ctx.cb.swap(cb); 113 | } else { 114 | // 未设置回调函数,则将当前协程设置为回调任务 115 | event_ctx.fiber = Fiber::GetThis(); 116 | CondPanic(event_ctx.fiber->getState() == Fiber::RUNNING, "state=" + event_ctx.fiber->getState()); 117 | } 118 | std::cout << "add event success,fd = " << fd << std::endl; 119 | return 0; 120 | } 121 | // 删除事件 (删除前不会主动触发事件) 122 | bool IOManager::delEvent(int fd, Event event) { 123 | RWMutex::ReadLock lock(mutex_); 124 | if ((int)fdContexts_.size() <= fd) { 125 | // 找不到当前事件,返回 126 | return false; 127 | } 128 | FdContext *fd_ctx = fdContexts_[fd]; 129 | lock.unlock(); 130 | 131 | Mutex::Lock ctxLock(fd_ctx->mutex); 132 | if (!(fd_ctx->events & event)) { 133 | return false; 134 | } 135 | // 清理指定事件 136 | Event new_events = (Event)(fd_ctx->events & ~event); 137 | int op = new_events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL; 138 | epoll_event epevent; 139 | epevent.events = EPOLLET | new_events; 140 | epevent.data.ptr = fd_ctx; 141 | // 注册删除事件 142 | int ret = epoll_ctl(epfd_, op, fd, &epevent); 143 | if (ret) { 144 | std::cout << "delevent: epoll_ctl error" << std::endl; 145 | return false; 146 | } 147 | --pendingEventCnt_; 148 | fd_ctx->events = new_events; 149 | EventContext &event_ctx = fd_ctx->getEveContext(event); 150 | fd_ctx->resetEveContext(event_ctx); 151 | return true; 152 | } 153 | 154 | // 取消事件 (取消前会主动触发事件) 155 | bool IOManager::cancelEvent(int fd, Event event) { 156 | RWMutex::ReadLock lock(mutex_); 157 | if ((int)fdContexts_.size() <= fd) { 158 | // 找不到当前事件,返回 159 | return false; 160 | } 161 | FdContext *fd_ctx = fdContexts_[fd]; 162 | lock.unlock(); 163 | 164 | Mutex::Lock ctxLock(fd_ctx->mutex); 165 | if (!(fd_ctx->events & event)) { 166 | return false; 167 | } 168 | // 清理指定事件 169 | Event new_events = (Event)(fd_ctx->events & ~event); 170 | int op = new_events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL; 171 | epoll_event epevent; 172 | epevent.events = EPOLLET | new_events; 173 | epevent.data.ptr = fd_ctx; 174 | // 注册删除事件 175 | int ret = epoll_ctl(epfd_, op, fd, &epevent); 176 | if (ret) { 177 | std::cout << "delevent: epoll_ctl error" << std::endl; 178 | return false; 179 | } 180 | // 删除之前,触发以此事件 181 | fd_ctx->triggerEvent(event); 182 | --pendingEventCnt_; 183 | return true; 184 | } 185 | // 取消fd所有事件 186 | bool IOManager::cancelAll(int fd) { 187 | RWMutex::ReadLock lock(mutex_); 188 | if ((int)fdContexts_.size() <= fd) { 189 | // 找不到当前事件,返回 190 | return false; 191 | } 192 | FdContext *fd_ctx = fdContexts_[fd]; 193 | lock.unlock(); 194 | 195 | Mutex::Lock ctxLock(fd_ctx->mutex); 196 | if (!fd_ctx->events) { 197 | return false; 198 | } 199 | 200 | int op = EPOLL_CTL_DEL; 201 | epoll_event epevent; 202 | epevent.events = 0; 203 | epevent.data.ptr = fd_ctx; 204 | // 注册删除事件 205 | int ret = epoll_ctl(epfd_, op, fd, &epevent); 206 | if (ret) { 207 | std::cout << "delevent: epoll_ctl error" << std::endl; 208 | return false; 209 | } 210 | // 触发全部已注册事件 211 | if (fd_ctx->events & READ) { 212 | fd_ctx->triggerEvent(READ); 213 | --pendingEventCnt_; 214 | } 215 | if (fd_ctx->events & WRITE) { 216 | fd_ctx->triggerEvent(WRITE); 217 | --pendingEventCnt_; 218 | } 219 | CondPanic(fd_ctx->events == 0, "fd not totally clear"); 220 | return true; 221 | } 222 | IOManager *IOManager::GetThis() { return dynamic_cast(Scheduler::GetThis()); } 223 | 224 | // 通知调度器有任务到来 225 | void IOManager::tickle() { 226 | if (!isHasIdleThreads()) { 227 | // 此时没有空闲的调度线程 228 | return; 229 | } 230 | // 写pipe管道,使得idle协程凑够epoll_wait退出,开始调度任务 231 | int rt = write(tickleFds_[1], "T", 1); 232 | CondPanic(rt == 1, "write pipe error"); 233 | } 234 | 235 | // 调度器无任务则阻塞在idle线程上 236 | // 当有新事件触发,则退出idle状态,则执行回调函数 237 | // 当有新的调度任务,则退出idle状态,并执行对应任务 238 | void IOManager::idle() { 239 | // 以此最多检测256个就绪事件 240 | const uint64_t MAX_EVENTS = 256; 241 | epoll_event *events = new epoll_event[MAX_EVENTS](); 242 | std::shared_ptr shared_events(events, [](epoll_event *ptr) { delete[] ptr; }); 243 | 244 | while (true) { 245 | // std::cout << "[IOManager] idle begin..." << std::endl; 246 | // 获取下一个定时器超时时间,同时判断调度器是否已经stop 247 | uint64_t next_timeout = 0; 248 | if (stopping(next_timeout)) { 249 | std::cout << "name=" << getName() << "idle stopping exit"; 250 | break; 251 | } 252 | 253 | // 阻塞等待,等待事件发生 或者 定时器超时 254 | int ret = 0; 255 | do { 256 | static const int MAX_TIMEOUT = 5000; 257 | 258 | if (next_timeout != ~0ull) { 259 | next_timeout = std::min((int)next_timeout, MAX_TIMEOUT); 260 | } else { 261 | next_timeout = MAX_TIMEOUT; 262 | } 263 | // 阻塞等待事件就绪 264 | ret = epoll_wait(epfd_, events, MAX_EVENTS, (int)next_timeout); 265 | // std::cout << "wait..." << std::endl; 266 | if (ret < 0) { 267 | if (errno == EINTR) { 268 | // 系统调用被信号中断 269 | continue; 270 | } 271 | std::cout << "epoll_wait [" << epfd_ << "] errno,err: " << errno << std::endl; 272 | break; 273 | } else { 274 | break; 275 | } 276 | } while (true); 277 | 278 | // 收集所有超时定时器,执行回调函数 279 | std::vector> cbs; 280 | listExpiredCb(cbs); 281 | if (!cbs.empty()) { 282 | for (const auto &cb : cbs) { 283 | scheduler(cb); 284 | } 285 | cbs.clear(); 286 | } 287 | 288 | for (int i = 0; i < ret; i++) { 289 | epoll_event &event = events[i]; 290 | if (event.data.fd == tickleFds_[0]) { 291 | // pipe管道内数据无意义,只是tickle意义,读完即可 292 | uint8_t dummy[256]; 293 | // TODO:ET下阻塞读取可能有问题 294 | while (read(tickleFds_[0], dummy, sizeof(dummy)) > 0) 295 | ; 296 | continue; 297 | } 298 | 299 | // 通过epoll_event的私有指针获取FdContext 300 | FdContext *fd_ctx = (FdContext *)event.data.ptr; 301 | Mutex::Lock lock(fd_ctx->mutex); 302 | 303 | // 错误事件 or 挂起事件(对端关闭) 304 | if (event.events & (EPOLLERR | EPOLLHUP)) { 305 | std::cout << "error events" << std::endl; 306 | event.events |= (EPOLLIN | EPOLLOUT) & fd_ctx->events; 307 | } 308 | // 实际发生的事件类型 309 | int real_events = NONE; 310 | if (event.events & EPOLLIN) { 311 | real_events |= READ; 312 | } 313 | if (event.events & EPOLLOUT) { 314 | real_events |= WRITE; 315 | } 316 | if ((fd_ctx->events & real_events) == NONE) { 317 | // 触发的事件类型与注册的事件类型无交集 318 | continue; 319 | } 320 | // 剔除已经发生的事件,将剩余的事件重新加入epoll_wait 321 | // issue: 在处理 EPOLLERR 或 EPOLLHUP 事件时,可能需要重新注 322 | // 册 EPOLLIN 或 EPOLLOUT 事件,以确保后续的 IO 可以正常进行 323 | int left_events = (fd_ctx->events & ~real_events); 324 | int op = left_events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL; 325 | event.events = EPOLLET | left_events; 326 | 327 | int ret2 = epoll_ctl(epfd_, op, fd_ctx->fd, &event); 328 | if (ret2) { 329 | std::cout << "epoll_wait [" << epfd_ << "] errno,err: " << errno << std::endl; 330 | continue; 331 | } 332 | // 处理已就绪事件 (加入scheduler tasklist,未调度执行) 333 | if (real_events & READ) { 334 | fd_ctx->triggerEvent(READ); 335 | --pendingEventCnt_; 336 | } 337 | if (real_events & WRITE) { 338 | fd_ctx->triggerEvent(WRITE); 339 | --pendingEventCnt_; 340 | } 341 | } 342 | // 处理结束,idle协程yield,此时调度协程可以执行run去tasklist中 343 | // 检测,拿取新任务去调度 344 | Fiber::ptr cur = Fiber::GetThis(); 345 | auto raw_ptr = cur.get(); 346 | cur.reset(); 347 | // std::cout << "[IOManager] idle yield..." << std::endl; 348 | raw_ptr->yield(); 349 | } 350 | } 351 | 352 | bool IOManager::stopping() { 353 | uint64_t timeout = 0; 354 | return stopping(timeout); 355 | } 356 | 357 | bool IOManager::stopping(uint64_t &timeout) { 358 | // 所有待调度的Io事件执行结束后,才允许退出 359 | timeout = getNextTimer(); 360 | return timeout == ~0ull && pendingEventCnt_ == 0 && Scheduler::stopping(); 361 | } 362 | 363 | void IOManager::contextResize(size_t size) { 364 | fdContexts_.resize(size); 365 | for (size_t i = 0; i < fdContexts_.size(); i++) { 366 | if (!fdContexts_[i]) { 367 | fdContexts_[i] = new FdContext; 368 | fdContexts_[i]->fd = i; 369 | } 370 | } 371 | } 372 | void IOManager::OnTimerInsertedAtFront() { tickle(); } 373 | 374 | } // namespace monsoon 375 | -------------------------------------------------------------------------------- /src/fiber/hook.cpp: -------------------------------------------------------------------------------- 1 | #include "hook.hpp" 2 | #include 3 | #include 4 | #include 5 | #include "fd_manager.hpp" 6 | #include "fiber.hpp" 7 | #include "iomanager.hpp" 8 | namespace monsoon { 9 | // 当前线程是否启用hook 10 | static thread_local bool t_hook_enable = false; 11 | static int g_tcp_connect_timeout = 5000; 12 | 13 | #define HOOK_FUN(XX) \ 14 | XX(sleep) \ 15 | XX(usleep) \ 16 | XX(nanosleep) \ 17 | XX(socket) \ 18 | XX(connect) \ 19 | XX(accept) \ 20 | XX(read) \ 21 | XX(readv) \ 22 | XX(recv) \ 23 | XX(recvfrom) \ 24 | XX(recvmsg) \ 25 | XX(write) \ 26 | XX(writev) \ 27 | XX(send) \ 28 | XX(sendto) \ 29 | XX(sendmsg) \ 30 | XX(close) \ 31 | XX(fcntl) \ 32 | XX(ioctl) \ 33 | XX(getsockopt) \ 34 | XX(setsockopt) 35 | 36 | void hook_init() { 37 | static bool is_inited = false; 38 | if (is_inited) { 39 | return; 40 | } 41 | // dlsym:Dynamic LinKinf Library.返回指定符号的地址 42 | #define XX(name) name##_f = (name##_fun)dlsym(RTLD_NEXT, #name); 43 | HOOK_FUN(XX); 44 | #undef XX 45 | } 46 | 47 | // hook_init放在静态对象中,则在main函数执行之前就会获取各个符号地址并 48 | // 保存到全局变量中 49 | static uint64_t s_connect_timeout = -1; 50 | struct _HOOKIniter { 51 | _HOOKIniter() { 52 | hook_init(); 53 | s_connect_timeout = g_tcp_connect_timeout; 54 | // std::cout << "hook init success" << std::endl; 55 | } 56 | }; 57 | static _HOOKIniter s_hook_initer; 58 | 59 | bool is_hook_enable() { return t_hook_enable; } 60 | 61 | void set_hook_enable(const bool flag) { t_hook_enable = flag; } 62 | 63 | struct timer_info { 64 | int cnacelled = 0; 65 | }; 66 | 67 | template 68 | static ssize_t do_io(int fd, OriginFun fun, const char *hook_fun_name, uint32_t event, int timeout_so, Args &&...args) { 69 | if (!t_hook_enable) { 70 | return fun(fd, std::forward(args)...); 71 | } 72 | // 为当前文件描述符创建上下文ctx 73 | FdCtx::ptr ctx = FdMgr::GetInstance()->get(fd); 74 | if (!ctx) { 75 | return fun(fd, std::forward(args)...); 76 | } 77 | // 文件已经关闭 78 | if (ctx->isClose()) { 79 | errno = EBADF; 80 | return -1; 81 | } 82 | 83 | if (!ctx->isSocket() || ctx->getUserNonblock()) { 84 | return fun(fd, std::forward(args)...); 85 | } 86 | // 获取对应type的fd超时时间 87 | uint64_t to = ctx->getTimeout(timeout_so); 88 | std::shared_ptr tinfo(new timer_info); 89 | 90 | retry: 91 | ssize_t n = fun(fd, std::forward(args)...); 92 | while (n == -1 && errno == EINTR) { 93 | // 读取操作被信号中断,继续尝试 94 | n = fun(fd, std::forward(args)...); 95 | } 96 | if (n == -1 && errno == EAGAIN) { 97 | // 数据未就绪 98 | IOManager *iom = IOManager::GetThis(); 99 | Timer::ptr timer; 100 | std::weak_ptr winfo(tinfo); 101 | 102 | if (to != (uint64_t)-1) { 103 | timer = iom->addConditionTimer( 104 | to, 105 | [winfo, fd, iom, event]() { 106 | auto t = winfo.lock(); 107 | if (!t || t->cnacelled) { 108 | return; 109 | } 110 | t->cnacelled = ETIMEDOUT; 111 | iom->cancelEvent(fd, (Event)(event)); 112 | }, 113 | winfo); 114 | } 115 | 116 | int rt = iom->addEvent(fd, (Event)(event)); 117 | if (rt) { 118 | std::cout << hook_fun_name << " addEvent(" << fd << ", " << event << ")"; 119 | if (timer) { 120 | timer->cancel(); 121 | } 122 | return -1; 123 | } else { 124 | Fiber::GetThis()->yield(); 125 | if (timer) { 126 | timer->cancel(); 127 | } 128 | if (tinfo->cnacelled) { 129 | errno = tinfo->cnacelled; 130 | return -1; 131 | } 132 | goto retry; 133 | } 134 | } 135 | 136 | return n; 137 | } 138 | 139 | extern "C" { 140 | #define XX(name) name##_fun name##_f = nullptr; 141 | HOOK_FUN(XX); 142 | #undef XX 143 | 144 | /** 145 | * \brief 146 | * \param seconds 睡眠的秒数 147 | * \return 148 | */ 149 | unsigned int sleep(unsigned int seconds) { 150 | // std::cout << "HOOK SLEEP" << std::endl; 151 | if (!t_hook_enable) { 152 | // 不允许hook,则直接使用系统调用 153 | return sleep_f(seconds); 154 | } 155 | // 允许hook,则直接让当前协程退出,seconds秒后再重启(by定时器) 156 | Fiber::ptr fiber = Fiber::GetThis(); 157 | IOManager *iom = IOManager::GetThis(); 158 | iom->addTimer(seconds * 1000, 159 | std::bind((void(Scheduler::*)(Fiber::ptr, int thread)) & IOManager::scheduler, iom, fiber, -1)); 160 | Fiber::GetThis()->yield(); 161 | return 0; 162 | } 163 | // usleep 在指定的微妙数内暂停线程运行 164 | int usleep(useconds_t usec) { 165 | // std::cout << "HOOK USLEEP START" << std::endl; 166 | if (!t_hook_enable) { 167 | // 不允许hook,则直接使用系统调用 168 | // std::cout << "THIS THREAD NOT ALLOW HOOK" << std::endl; 169 | auto ret = usleep_f(usec); 170 | // std::cout << "THIS THREAD WAKE UP" << std::endl; 171 | return 0; 172 | } 173 | // std::cout << "HOOK USLEEP REAL START" << std::endl; 174 | // 允许hook,则直接让当前协程退出,seconds秒后再重启(by定时器) 175 | Fiber::ptr fiber = Fiber::GetThis(); 176 | IOManager *iom = IOManager::GetThis(); 177 | iom->addTimer(usec / 1000, 178 | std::bind((void(Scheduler::*)(Fiber::ptr, int thread)) & IOManager::scheduler, iom, fiber, -1)); 179 | Fiber::GetThis()->yield(); 180 | return 0; 181 | } 182 | // nanosleep 在指定的纳秒数内暂停当前线程的执行 183 | int nanosleep(const struct timespec *req, struct timespec *rem) { 184 | if (!t_hook_enable) { 185 | // 不允许hook,则直接使用系统调用 186 | return nanosleep_f(req, rem); 187 | } 188 | // 允许hook,则直接让当前协程退出,seconds秒后再重启(by定时器) 189 | Fiber::ptr fiber = Fiber::GetThis(); 190 | IOManager *iom = IOManager::GetThis(); 191 | int timeout_s = req->tv_sec * 1000 + req->tv_nsec / 1000 / 1000; 192 | iom->addTimer(timeout_s, 193 | std::bind((void(Scheduler::*)(Fiber::ptr, int thread)) & IOManager::scheduler, iom, fiber, -1)); 194 | Fiber::GetThis()->yield(); 195 | return 0; 196 | } 197 | 198 | int socket(int domain, int type, int protocol) { 199 | // std::cout << "HOOK SOCKET" << std::endl; 200 | if (!t_hook_enable) { 201 | return socket_f(domain, type, protocol); 202 | } 203 | int fd = socket_f(domain, type, protocol); 204 | if (fd == -1) { 205 | return fd; 206 | } 207 | // 将fd加入Fdmanager中 208 | FdMgr::GetInstance()->get(fd, true); 209 | return fd; 210 | } 211 | 212 | int connect_with_timeout(int fd, const struct sockaddr *addr, socklen_t addrlen, uint64_t timeout_ms) { 213 | // std::cout << "HOOK CONNECT_WITH_TIMEOUT" << std::endl; 214 | if (!t_hook_enable) { 215 | return connect_f(fd, addr, addrlen); 216 | } 217 | FdCtx::ptr ctx = FdMgr::GetInstance()->get(fd); 218 | if (!ctx || ctx->isClose()) { 219 | errno = EBADF; 220 | return -1; 221 | } 222 | 223 | if (!ctx->isSocket()) { 224 | return connect_f(fd, addr, addrlen); 225 | } 226 | 227 | // fd是否被显示设置为非阻塞模式 228 | if (ctx->getUserNonblock()) { 229 | return connect_f(fd, addr, addrlen); 230 | } 231 | 232 | // 系统调用connect(fd为非阻塞) 233 | int n = connect_f(fd, addr, addrlen); 234 | if (n == 0) { 235 | return 0; 236 | } else if (n != -1 || errno != EINPROGRESS) { 237 | return n; 238 | } 239 | // 返回EINPEOGRESS:正在进行,但是尚未完成 240 | IOManager *iom = IOManager::GetThis(); 241 | Timer::ptr timer; 242 | std::shared_ptr tinfo(new timer_info); 243 | std::weak_ptr winfo(tinfo); 244 | 245 | // 保证超时参数有效 246 | if (timeout_ms != (uint64_t)-1) { 247 | // 添加条件定时器 248 | timer = iom->addConditionTimer( 249 | timeout_ms, 250 | [winfo, fd, iom]() { 251 | auto t = winfo.lock(); 252 | if (!t || t->cnacelled) { 253 | return; 254 | } 255 | //定时时间到达,设置超时标志,触发一次WRITE事件 256 | t->cnacelled = ETIMEDOUT; 257 | iom->cancelEvent(fd, WRITE); 258 | }, 259 | winfo); 260 | } 261 | 262 | // 添加WRITE事件,并yield,等待WRITE事件触发再往下执行 263 | int rt = iom->addEvent(fd, WRITE); 264 | if (rt == 0) { 265 | Fiber::GetThis()->yield(); 266 | // 等待超时or套接字可写,协程返回 267 | if (timer) { 268 | timer->cancel(); 269 | } 270 | // 超时返回,通过超时标志设置errno并返回-1 271 | if (tinfo->cnacelled) { 272 | errno = tinfo->cnacelled; 273 | return -1; 274 | } 275 | } else { 276 | // addevennt error 277 | if (timer) { 278 | timer->cancel(); 279 | } 280 | std::cout << "connect addEvent(" << fd << ", WRITE) error" << std::endl; 281 | } 282 | 283 | int error = 0; 284 | socklen_t len = sizeof(int); 285 | // 获取套接字的错误状态 286 | if (-1 == getsockopt(fd, SOL_SOCKET, SO_ERROR, &error, &len)) { 287 | return -1; 288 | } 289 | if (!error) { 290 | return 0; 291 | } else { 292 | errno = error; 293 | return -1; 294 | } 295 | } 296 | 297 | int connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen) { 298 | return monsoon::connect_with_timeout(sockfd, addr, addrlen, s_connect_timeout); 299 | } 300 | 301 | int accept(int s, struct sockaddr *addr, socklen_t *addrlen) { 302 | int fd = do_io(s, accept_f, "accept", READ, SO_RCVTIMEO, addr, addrlen); 303 | if (fd >= 0) { 304 | FdMgr::GetInstance()->get(fd, true); 305 | } 306 | return fd; 307 | } 308 | 309 | ssize_t read(int fd, void *buf, size_t count) { return do_io(fd, read_f, "read", READ, SO_RCVTIMEO, buf, count); } 310 | 311 | ssize_t readv(int fd, const struct iovec *iov, int iovcnt) { 312 | return do_io(fd, readv_f, "readv", READ, SO_RCVTIMEO, iov, iovcnt); 313 | } 314 | 315 | ssize_t recv(int sockfd, void *buf, size_t len, int flags) { 316 | return do_io(sockfd, recv_f, "recv", READ, SO_RCVTIMEO, buf, len, flags); 317 | } 318 | 319 | ssize_t recvfrom(int sockfd, void *buf, size_t len, int flags, struct sockaddr *src_addr, socklen_t *addrlen) { 320 | return do_io(sockfd, recvfrom_f, "recvfrom", READ, SO_RCVTIMEO, buf, len, flags, src_addr, addrlen); 321 | } 322 | 323 | ssize_t recvmsg(int sockfd, struct msghdr *msg, int flags) { 324 | return do_io(sockfd, recvmsg_f, "recvmsg", READ, SO_RCVTIMEO, msg, flags); 325 | } 326 | 327 | ssize_t write(int fd, const void *buf, size_t count) { 328 | return do_io(fd, write_f, "write", WRITE, SO_SNDTIMEO, buf, count); 329 | } 330 | 331 | ssize_t writev(int fd, const struct iovec *iov, int iovcnt) { 332 | return do_io(fd, writev_f, "writev", WRITE, SO_SNDTIMEO, iov, iovcnt); 333 | } 334 | 335 | ssize_t send(int s, const void *msg, size_t len, int flags) { 336 | return do_io(s, send_f, "send", WRITE, SO_SNDTIMEO, msg, len, flags); 337 | } 338 | 339 | ssize_t sendto(int s, const void *msg, size_t len, int flags, const struct sockaddr *to, socklen_t tolen) { 340 | return do_io(s, sendto_f, "sendto", WRITE, SO_SNDTIMEO, msg, len, flags, to, tolen); 341 | } 342 | 343 | ssize_t sendmsg(int s, const struct msghdr *msg, int flags) { 344 | return do_io(s, sendmsg_f, "sendmsg", WRITE, SO_SNDTIMEO, msg, flags); 345 | } 346 | 347 | int close(int fd) { 348 | if (!t_hook_enable) { 349 | return close_f(fd); 350 | } 351 | 352 | FdCtx::ptr ctx = FdMgr::GetInstance()->get(fd); 353 | if (ctx) { 354 | auto iom = IOManager::GetThis(); 355 | if (iom) { 356 | iom->cancelAll(fd); 357 | } 358 | FdMgr::GetInstance()->del(fd); 359 | } 360 | return close_f(fd); 361 | } 362 | int fcntl(int fd, int cmd, ... /* arg */) { 363 | va_list va; 364 | va_start(va, cmd); 365 | switch (cmd) { 366 | case F_SETFL: { 367 | int arg = va_arg(va, int); 368 | va_end(va); 369 | FdCtx::ptr ctx = FdMgr::GetInstance()->get(fd); 370 | if (!ctx || ctx->isClose() || !ctx->isSocket()) { 371 | return fcntl_f(fd, cmd, arg); 372 | } 373 | ctx->setUserNonblock(arg & O_NONBLOCK); 374 | if (ctx->getSysNonblock()) { 375 | arg |= O_NONBLOCK; 376 | } else { 377 | arg &= ~O_NONBLOCK; 378 | } 379 | return fcntl_f(fd, cmd, arg); 380 | } break; 381 | case F_GETFL: { 382 | va_end(va); 383 | int arg = fcntl_f(fd, cmd); 384 | FdCtx::ptr ctx = FdMgr::GetInstance()->get(fd); 385 | if (!ctx || ctx->isClose() || !ctx->isSocket()) { 386 | return arg; 387 | } 388 | if (ctx->getUserNonblock()) { 389 | return arg | O_NONBLOCK; 390 | } else { 391 | return arg & ~O_NONBLOCK; 392 | } 393 | } break; 394 | case F_DUPFD: 395 | case F_DUPFD_CLOEXEC: 396 | case F_SETFD: 397 | case F_SETOWN: 398 | case F_SETSIG: 399 | case F_SETLEASE: 400 | case F_NOTIFY: 401 | #ifdef F_SETPIPE_SZ 402 | case F_SETPIPE_SZ: 403 | #endif 404 | { 405 | int arg = va_arg(va, int); 406 | va_end(va); 407 | return fcntl_f(fd, cmd, arg); 408 | } break; 409 | case F_GETFD: 410 | case F_GETOWN: 411 | case F_GETSIG: 412 | case F_GETLEASE: 413 | #ifdef F_GETPIPE_SZ 414 | case F_GETPIPE_SZ: 415 | #endif 416 | { 417 | va_end(va); 418 | return fcntl_f(fd, cmd); 419 | } break; 420 | case F_SETLK: 421 | case F_SETLKW: 422 | case F_GETLK: { 423 | struct flock *arg = va_arg(va, struct flock *); 424 | va_end(va); 425 | return fcntl_f(fd, cmd, arg); 426 | } break; 427 | case F_GETOWN_EX: 428 | case F_SETOWN_EX: { 429 | struct f_owner_exlock *arg = va_arg(va, struct f_owner_exlock *); 430 | va_end(va); 431 | return fcntl_f(fd, cmd, arg); 432 | } break; 433 | default: 434 | va_end(va); 435 | return fcntl_f(fd, cmd); 436 | } 437 | } 438 | 439 | int ioctl(int d, unsigned long int request, ...) { 440 | va_list va; 441 | va_start(va, request); 442 | void *arg = va_arg(va, void *); 443 | va_end(va); 444 | 445 | if (FIONBIO == request) { 446 | bool user_nonblock = !!*(int *)arg; 447 | FdCtx::ptr ctx = FdMgr::GetInstance()->get(d); 448 | if (!ctx || ctx->isClose() || !ctx->isSocket()) { 449 | return ioctl_f(d, request, arg); 450 | } 451 | ctx->setUserNonblock(user_nonblock); 452 | } 453 | return ioctl_f(d, request, arg); 454 | } 455 | 456 | int getsockopt(int sockfd, int level, int optname, void *optval, socklen_t *optlen) { 457 | return getsockopt_f(sockfd, level, optname, optval, optlen); 458 | } 459 | 460 | int setsockopt(int sockfd, int level, int optname, const void *optval, socklen_t optlen) { 461 | if (!t_hook_enable) { 462 | return setsockopt_f(sockfd, level, optname, optval, optlen); 463 | } 464 | if (level == SOL_SOCKET) { 465 | if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO) { 466 | FdCtx::ptr ctx = FdMgr::GetInstance()->get(sockfd); 467 | if (ctx) { 468 | const timeval *v = (const timeval *)optval; 469 | ctx->setTimeout(optname, v->tv_sec * 1000 + v->tv_usec / 1000); 470 | } 471 | } 472 | } 473 | return setsockopt_f(sockfd, level, optname, optval, optlen); 474 | } 475 | } 476 | } // namespace monsoon -------------------------------------------------------------------------------- /src/rpc/rpcheader.pb.cpp: -------------------------------------------------------------------------------- 1 | // Generated by the protocol buffer compiler. DO NOT EDIT! 2 | // source: rpcheader.proto 3 | 4 | #include "rpcheader.pb.h" 5 | 6 | #include 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | // @@protoc_insertion_point(includes) 16 | #include 17 | namespace RPC { 18 | class RpcHeaderDefaultTypeInternal { 19 | public: 20 | ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed _instance; 21 | } _RpcHeader_default_instance_; 22 | } // namespace RPC 23 | static void InitDefaultsscc_info_RpcHeader_rpcheader_2eproto() { 24 | GOOGLE_PROTOBUF_VERIFY_VERSION; 25 | 26 | { 27 | void* ptr = &::RPC::_RpcHeader_default_instance_; 28 | new (ptr)::RPC::RpcHeader(); 29 | ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr); 30 | } 31 | ::RPC::RpcHeader::InitAsDefaultInstance(); 32 | } 33 | 34 | ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_RpcHeader_rpcheader_2eproto = { 35 | {ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, 0, 36 | InitDefaultsscc_info_RpcHeader_rpcheader_2eproto}, 37 | {}}; 38 | 39 | static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_rpcheader_2eproto[1]; 40 | static constexpr ::PROTOBUF_NAMESPACE_ID::EnumDescriptor const** file_level_enum_descriptors_rpcheader_2eproto = 41 | nullptr; 42 | static constexpr ::PROTOBUF_NAMESPACE_ID::ServiceDescriptor const** file_level_service_descriptors_rpcheader_2eproto = 43 | nullptr; 44 | 45 | const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_rpcheader_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE( 46 | protodesc_cold) = { 47 | ~0u, // no _has_bits_ 48 | PROTOBUF_FIELD_OFFSET(::RPC::RpcHeader, _internal_metadata_), 49 | ~0u, // no _extensions_ 50 | ~0u, // no _oneof_case_ 51 | ~0u, // no _weak_field_map_ 52 | PROTOBUF_FIELD_OFFSET(::RPC::RpcHeader, service_name_), 53 | PROTOBUF_FIELD_OFFSET(::RPC::RpcHeader, method_name_), 54 | PROTOBUF_FIELD_OFFSET(::RPC::RpcHeader, args_size_), 55 | }; 56 | static const ::PROTOBUF_NAMESPACE_ID::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { 57 | {0, -1, sizeof(::RPC::RpcHeader)}, 58 | }; 59 | 60 | static ::PROTOBUF_NAMESPACE_ID::Message const* const file_default_instances[] = { 61 | reinterpret_cast(&::RPC::_RpcHeader_default_instance_), 62 | }; 63 | 64 | const char descriptor_table_protodef_rpcheader_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = 65 | "\n\017rpcheader.proto\022\003RPC\"I\n\tRpcHeader\022\024\n\014s" 66 | "ervice_name\030\001 \001(\014\022\023\n\013method_name\030\002 \001(\014\022\021" 67 | "\n\targs_size\030\003 \001(\rb\006proto3"; 68 | static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable* const descriptor_table_rpcheader_2eproto_deps[1] = {}; 69 | static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase* const descriptor_table_rpcheader_2eproto_sccs[1] = { 70 | &scc_info_RpcHeader_rpcheader_2eproto.base, 71 | }; 72 | static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_rpcheader_2eproto_once; 73 | const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_rpcheader_2eproto = { 74 | false, 75 | false, 76 | descriptor_table_protodef_rpcheader_2eproto, 77 | "rpcheader.proto", 78 | 105, 79 | &descriptor_table_rpcheader_2eproto_once, 80 | descriptor_table_rpcheader_2eproto_sccs, 81 | descriptor_table_rpcheader_2eproto_deps, 82 | 1, 83 | 0, 84 | schemas, 85 | file_default_instances, 86 | TableStruct_rpcheader_2eproto::offsets, 87 | file_level_metadata_rpcheader_2eproto, 88 | 1, 89 | file_level_enum_descriptors_rpcheader_2eproto, 90 | file_level_service_descriptors_rpcheader_2eproto, 91 | }; 92 | 93 | // Force running AddDescriptors() at dynamic initialization time. 94 | static bool dynamic_init_dummy_rpcheader_2eproto = 95 | (static_cast(::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_rpcheader_2eproto)), true); 96 | namespace RPC { 97 | 98 | // =================================================================== 99 | 100 | void RpcHeader::InitAsDefaultInstance() {} 101 | class RpcHeader::_Internal { 102 | public: 103 | }; 104 | 105 | RpcHeader::RpcHeader(::PROTOBUF_NAMESPACE_ID::Arena* arena) : ::PROTOBUF_NAMESPACE_ID::Message(arena) { 106 | SharedCtor(); 107 | RegisterArenaDtor(arena); 108 | // @@protoc_insertion_point(arena_constructor:RPC.RpcHeader) 109 | } 110 | RpcHeader::RpcHeader(const RpcHeader& from) : ::PROTOBUF_NAMESPACE_ID::Message() { 111 | _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); 112 | service_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); 113 | if (!from._internal_service_name().empty()) { 114 | service_name_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from._internal_service_name(), 115 | GetArena()); 116 | } 117 | method_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); 118 | if (!from._internal_method_name().empty()) { 119 | method_name_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from._internal_method_name(), 120 | GetArena()); 121 | } 122 | args_size_ = from.args_size_; 123 | // @@protoc_insertion_point(copy_constructor:RPC.RpcHeader) 124 | } 125 | 126 | void RpcHeader::SharedCtor() { 127 | ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_RpcHeader_rpcheader_2eproto.base); 128 | service_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); 129 | method_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); 130 | args_size_ = 0u; 131 | } 132 | 133 | RpcHeader::~RpcHeader() { 134 | // @@protoc_insertion_point(destructor:RPC.RpcHeader) 135 | SharedDtor(); 136 | _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); 137 | } 138 | 139 | void RpcHeader::SharedDtor() { 140 | GOOGLE_DCHECK(GetArena() == nullptr); 141 | service_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); 142 | method_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); 143 | } 144 | 145 | void RpcHeader::ArenaDtor(void* object) { 146 | RpcHeader* _this = reinterpret_cast(object); 147 | (void)_this; 148 | } 149 | void RpcHeader::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {} 150 | void RpcHeader::SetCachedSize(int size) const { _cached_size_.Set(size); } 151 | const RpcHeader& RpcHeader::default_instance() { 152 | ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_RpcHeader_rpcheader_2eproto.base); 153 | return *internal_default_instance(); 154 | } 155 | 156 | void RpcHeader::Clear() { 157 | // @@protoc_insertion_point(message_clear_start:RPC.RpcHeader) 158 | ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; 159 | // Prevent compiler warnings about cached_has_bits being unused 160 | (void)cached_has_bits; 161 | 162 | service_name_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArena()); 163 | method_name_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArena()); 164 | args_size_ = 0u; 165 | _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); 166 | } 167 | 168 | const char* RpcHeader::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { 169 | #define CHK_(x) \ 170 | if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure 171 | ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArena(); 172 | (void)arena; 173 | while (!ctx->Done(&ptr)) { 174 | ::PROTOBUF_NAMESPACE_ID::uint32 tag; 175 | ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); 176 | CHK_(ptr); 177 | switch (tag >> 3) { 178 | // bytes service_name = 1; 179 | case 1: 180 | if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { 181 | auto str = _internal_mutable_service_name(); 182 | ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); 183 | CHK_(ptr); 184 | } else 185 | goto handle_unusual; 186 | continue; 187 | // bytes method_name = 2; 188 | case 2: 189 | if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { 190 | auto str = _internal_mutable_method_name(); 191 | ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); 192 | CHK_(ptr); 193 | } else 194 | goto handle_unusual; 195 | continue; 196 | // uint32 args_size = 3; 197 | case 3: 198 | if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 24)) { 199 | args_size_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); 200 | CHK_(ptr); 201 | } else 202 | goto handle_unusual; 203 | continue; 204 | default: { 205 | handle_unusual: 206 | if ((tag & 7) == 4 || tag == 0) { 207 | ctx->SetLastTag(tag); 208 | goto success; 209 | } 210 | ptr = UnknownFieldParse( 211 | tag, _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), ptr, ctx); 212 | CHK_(ptr != nullptr); 213 | continue; 214 | } 215 | } // switch 216 | } // while 217 | success: 218 | return ptr; 219 | failure: 220 | ptr = nullptr; 221 | goto success; 222 | #undef CHK_ 223 | } 224 | 225 | ::PROTOBUF_NAMESPACE_ID::uint8* RpcHeader::_InternalSerialize( 226 | ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { 227 | // @@protoc_insertion_point(serialize_to_array_start:RPC.RpcHeader) 228 | ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; 229 | (void)cached_has_bits; 230 | 231 | // bytes service_name = 1; 232 | if (this->service_name().size() > 0) { 233 | target = stream->WriteBytesMaybeAliased(1, this->_internal_service_name(), target); 234 | } 235 | 236 | // bytes method_name = 2; 237 | if (this->method_name().size() > 0) { 238 | target = stream->WriteBytesMaybeAliased(2, this->_internal_method_name(), target); 239 | } 240 | 241 | // uint32 args_size = 3; 242 | if (this->args_size() != 0) { 243 | target = stream->EnsureSpace(target); 244 | target = 245 | ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteUInt32ToArray(3, this->_internal_args_size(), target); 246 | } 247 | 248 | if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { 249 | target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( 250 | _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>( 251 | ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), 252 | target, stream); 253 | } 254 | // @@protoc_insertion_point(serialize_to_array_end:RPC.RpcHeader) 255 | return target; 256 | } 257 | 258 | size_t RpcHeader::ByteSizeLong() const { 259 | // @@protoc_insertion_point(message_byte_size_start:RPC.RpcHeader) 260 | size_t total_size = 0; 261 | 262 | ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; 263 | // Prevent compiler warnings about cached_has_bits being unused 264 | (void)cached_has_bits; 265 | 266 | // bytes service_name = 1; 267 | if (this->service_name().size() > 0) { 268 | total_size += 1 + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::BytesSize(this->_internal_service_name()); 269 | } 270 | 271 | // bytes method_name = 2; 272 | if (this->method_name().size() > 0) { 273 | total_size += 1 + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::BytesSize(this->_internal_method_name()); 274 | } 275 | 276 | // uint32 args_size = 3; 277 | if (this->args_size() != 0) { 278 | total_size += 1 + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::UInt32Size(this->_internal_args_size()); 279 | } 280 | 281 | if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { 282 | return ::PROTOBUF_NAMESPACE_ID::internal::ComputeUnknownFieldsSize(_internal_metadata_, total_size, &_cached_size_); 283 | } 284 | int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size); 285 | SetCachedSize(cached_size); 286 | return total_size; 287 | } 288 | 289 | void RpcHeader::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { 290 | // @@protoc_insertion_point(generalized_merge_from_start:RPC.RpcHeader) 291 | GOOGLE_DCHECK_NE(&from, this); 292 | const RpcHeader* source = ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated(&from); 293 | if (source == nullptr) { 294 | // @@protoc_insertion_point(generalized_merge_from_cast_fail:RPC.RpcHeader) 295 | ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this); 296 | } else { 297 | // @@protoc_insertion_point(generalized_merge_from_cast_success:RPC.RpcHeader) 298 | MergeFrom(*source); 299 | } 300 | } 301 | 302 | void RpcHeader::MergeFrom(const RpcHeader& from) { 303 | // @@protoc_insertion_point(class_specific_merge_from_start:RPC.RpcHeader) 304 | GOOGLE_DCHECK_NE(&from, this); 305 | _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); 306 | ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; 307 | (void)cached_has_bits; 308 | 309 | if (from.service_name().size() > 0) { 310 | _internal_set_service_name(from._internal_service_name()); 311 | } 312 | if (from.method_name().size() > 0) { 313 | _internal_set_method_name(from._internal_method_name()); 314 | } 315 | if (from.args_size() != 0) { 316 | _internal_set_args_size(from._internal_args_size()); 317 | } 318 | } 319 | 320 | void RpcHeader::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { 321 | // @@protoc_insertion_point(generalized_copy_from_start:RPC.RpcHeader) 322 | if (&from == this) return; 323 | Clear(); 324 | MergeFrom(from); 325 | } 326 | 327 | void RpcHeader::CopyFrom(const RpcHeader& from) { 328 | // @@protoc_insertion_point(class_specific_copy_from_start:RPC.RpcHeader) 329 | if (&from == this) return; 330 | Clear(); 331 | MergeFrom(from); 332 | } 333 | 334 | bool RpcHeader::IsInitialized() const { return true; } 335 | 336 | void RpcHeader::InternalSwap(RpcHeader* other) { 337 | using std::swap; 338 | _internal_metadata_.Swap<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(&other->_internal_metadata_); 339 | service_name_.Swap(&other->service_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), 340 | GetArena()); 341 | method_name_.Swap(&other->method_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), 342 | GetArena()); 343 | swap(args_size_, other->args_size_); 344 | } 345 | 346 | ::PROTOBUF_NAMESPACE_ID::Metadata RpcHeader::GetMetadata() const { return GetMetadataStatic(); } 347 | 348 | // @@protoc_insertion_point(namespace_scope) 349 | } // namespace RPC 350 | PROTOBUF_NAMESPACE_OPEN 351 | template <> 352 | PROTOBUF_NOINLINE ::RPC::RpcHeader* Arena::CreateMaybeMessage<::RPC::RpcHeader>(Arena* arena) { 353 | return Arena::CreateMessageInternal<::RPC::RpcHeader>(arena); 354 | } 355 | PROTOBUF_NAMESPACE_CLOSE 356 | 357 | // @@protoc_insertion_point(global_scope) 358 | #include --------------------------------------------------------------------------------