├── .gitignore
├── distrBoltX.zip
├── img
└── c18e797d7c4525afd03a7ff1e85e014.png
├── cmd
├── populate.sh
├── luanch.sh
└── bench
│ └── main.go
├── config
├── sharding.toml
├── config_test.go
└── config.go
├── go.mod
├── api
├── nextKeyValue.proto
├── nextKeyValue_grpc.pb.go
└── nextKeyValue.pb.go
├── internal
├── rpc
│ └── serv
│ │ └── serv.go
├── web
│ ├── web.go
│ └── web_test.go
└── db
│ ├── db_test.go
│ └── db.go
├── .github
└── workflows
│ ├── go-ossf-slsa3-publish.yml
│ └── generator-generic-ossf-slsa3-publish.yml
├── main.go
├── pkg
└── replication
│ └── replication.go
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 | distrb.db.lock
2 | distrb.db
3 | go.sum
4 | .idea/**
5 | .idea/vcs.xml
--------------------------------------------------------------------------------
/distrBoltX.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/s5364733/distrBoltX/HEAD/distrBoltX.zip
--------------------------------------------------------------------------------
/img/c18e797d7c4525afd03a7ff1e85e014.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/s5364733/distrBoltX/HEAD/img/c18e797d7c4525afd03a7ff1e85e014.png
--------------------------------------------------------------------------------
/cmd/populate.sh:
--------------------------------------------------------------------------------
1 | #echo $RANDOM
2 | ## 准备数据
3 | for shard in localhost:8080 localhost:8081; do
4 | echo $shard
5 | for i in {1..1000}; do
6 | echo curl "http//:$shard/set?key=key-$RANDOM&value=value-$RANDOM"
7 | done
8 | done
--------------------------------------------------------------------------------
/config/sharding.toml:
--------------------------------------------------------------------------------
1 | [[shards]]
2 | name = "shard0"
3 | idx = 0
4 | address = "127.0.0.2:8080"
5 | replicas = ["127.0.0.22:8080"]
6 |
7 | [[shards]]
8 | name = "shard1"
9 | idx = 1
10 | address = "127.0.0.3:8081"
11 | replicas = ["127.0.0.33:8080"]
12 |
13 | [[shards]]
14 | name = "shard2"
15 | idx = 2
16 | address = "127.0.0.4:8082"
17 | replicas = ["127.0.0.44:8080"]
18 |
19 | [[shards]]
20 | name = "shard3"
21 | idx = 3
22 | address = "127.0.0.5:8083"
23 | replicas = ["127.0.0.55:8080"]
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/s5364733/distrBoltX
2 |
3 | go 1.17
4 |
5 | require (
6 | //github.com/boltdb/bolt
7 | github.com/BurntSushi/toml v1.2.1
8 | go.etcd.io/bbolt v1.3.7
9 | )
10 |
11 | require (
12 | google.golang.org/grpc v1.54.0
13 | google.golang.org/protobuf v1.30.0
14 | )
15 |
16 | require (
17 | github.com/golang/protobuf v1.5.2 // indirect
18 | golang.org/x/net v0.8.0 // indirect
19 | golang.org/x/sys v0.6.0 // indirect
20 | golang.org/x/text v0.8.0 // indirect
21 | google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect
22 | )
23 |
--------------------------------------------------------------------------------
/api/nextKeyValue.proto:
--------------------------------------------------------------------------------
1 |
2 | syntax = "proto3";
3 |
4 | option go_package = "github.com/s5364733/distrBoltX/work/workspace/distrBoltX/rpc";
5 | option java_multiple_files = true;
6 | option java_outer_classname = "NextKeyValueProto";
7 |
8 | package proto;
9 |
10 | // The AckSyncDialer service definition.
11 | service AckSyncDialer {
12 | // Sends a ack
13 | rpc dial (stream SyncD) returns (stream NextKeyValue) {}
14 | }
15 | //
16 | message NextKeyValue {
17 | string key = 1;
18 | string value = 2;
19 |
20 | }
21 | // sync flag for serve
22 | message SyncD {
23 | bool ack = 1;
24 | }
25 |
--------------------------------------------------------------------------------
/cmd/luanch.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | trap 'killall main' SIGINT
6 |
7 | cd $(dirname $0)
8 |
9 | killall main || true
10 |
11 | sleep 0.1
12 |
13 | go install -v
14 |
15 | main -db-location=shard0.db -http-addr=127.0.0.2:8080 -grpc-addr=127.0.0.2:50030 -config-file=sharding.toml -shard=shard0
16 | main -db-location=shard0-r.db -http-addr=127.0.0.22:8080 -grpc-addr=127.0.0.2:50030 -config-file=sharding.toml -shard=shard0 -replica &
17 |
18 | main -db-location=shard1.db -http-addr=127.0.0.3:8080 -grpc-addr=127.0.0.2:5003 -config-file=sharding.toml -shard=shard1 &
19 | main -db-location=shard1-r.db -http-addr=127.0.0.33:8080 -grpc-addr=127.0.0.2:5003 -config-file=sharding.toml -shard=shard1 -replica=true &
20 |
21 | main -db-location=shard2.db -http-addr=127.0.0.4:8080 -grpc-addr=127.0.0.2:5003 -config-file=sharding.toml -shard=shard2 &
22 | main -db-location=shard2-r.db -http-addr=127.0.0.44:8080 -grpc-addr=127.0.0.2:5003 -config-file=sharding.toml -shard=shard2 -replica=true &
23 |
24 | main -db-location=shard3.db -http-addr=127.0.0.5:8080 -grpc-addr=127.0.0.2:5003 -config-file=sharding.toml -shard=shard3 &
25 | main -db-location=shard3-r.db -http-addr=127.0.0.55:8080 -grpc-addr=127.0.0.2:5003 -config-file=sharding.toml -shard=shard3 -replica=true &
26 |
27 | wait
--------------------------------------------------------------------------------
/internal/rpc/serv/serv.go:
--------------------------------------------------------------------------------
1 | package serv
2 |
3 | import (
4 | "fmt"
5 | "github.com/s5364733/distrBoltX/api"
6 | "github.com/s5364733/distrBoltX/internal/web"
7 | "io"
8 | "time"
9 | )
10 |
11 | type AckSyncDialerService struct {
12 | api.UnimplementedAckSyncDialerServer
13 | s *web.Server
14 | }
15 |
16 | func NewAckSyncDialerService(serv *web.Server) *AckSyncDialerService {
17 | return &AckSyncDialerService{
18 | s: serv,
19 | }
20 | }
21 |
22 | func (c *AckSyncDialerService) Dial(stream api.AckSyncDialer_DialServer) error {
23 | for {
24 | //1. 拿到最新KEY
25 | key, v, err2 := c.s.Db.GetNextKeyForReplication()
26 | if err2 != nil || key == nil || v == nil {
27 | time.Sleep(time.Millisecond * 100)
28 | continue
29 | }
30 | //2.发送到副本数据同步到副本节点bucket
31 | err2 = stream.Send(&api.NextKeyValue{
32 | Key: string(key),
33 | Value: string(v),
34 | })
35 | if err2 != nil {
36 | fmt.Errorf("err %v", err2)
37 | }
38 | fmt.Printf("Data sent to the replica is synchronized to the replica node key = %q,value=%q", key, v)
39 | //3.副本节点同步成功后发送ACK 标识
40 | ack, err := stream.Recv()
41 | if err == io.EOF {
42 | return nil
43 | }
44 | if err != nil {
45 | return err
46 | }
47 | fmt.Printf("The ACK identifier of the replica node synchronization is completed ack = %q", ack)
48 | //4 删除主节点副本
49 | if ack != nil && ack.Ack {
50 | fmt.Printf("The key asynchronized from the master ,which has been deleted key %q val %q ", string(key), string(v))
51 | err2 := c.s.Db.DeleteReplicationKey(key, v)
52 | if err2 != nil {
53 | fmt.Errorf("%v", err2)
54 | }
55 | fmt.Printf("Data replica sync have been done,deleting local key = %q", key)
56 | }
57 | }
58 | }
59 |
--------------------------------------------------------------------------------
/config/config_test.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import (
4 | "os"
5 | "reflect"
6 | "testing"
7 | )
8 |
9 | func createConfig(t *testing.T, content string) Config {
10 | t.Helper()
11 |
12 | f, err := os.CreateTemp(os.TempDir(), "config.toml")
13 | if err != nil {
14 | t.Fatalf("Coundn't create the temp file : %v", err)
15 | }
16 | defer f.Close()
17 | name := f.Name()
18 | defer os.Remove(name)
19 |
20 | _, err = f.WriteString(content)
21 | if err != nil {
22 | t.Fatalf("Write file occurs error ,:%v", err)
23 | }
24 |
25 | c, err := ParseConfig(name)
26 | if err != nil {
27 | t.Fatalf("Couldn't parse config :%v ", err)
28 | }
29 | return c
30 | }
31 |
32 | func TestParseConfig(t *testing.T) {
33 | got := createConfig(t, `[[shards]]
34 | name = "shard0"
35 | idx = 0
36 | address = "localhost:8080"
37 | `)
38 |
39 | expect := &Config{
40 | Shards: []Shard{
41 | {
42 | Name: "shard0",
43 | Idx: 0,
44 | Address: "localhost:8080",
45 | },
46 | },
47 | }
48 | //I really fuck expect a pointer type for an hour
49 | //!reflect.DeepEqual(c, expect)
50 | if !reflect.DeepEqual(got, *expect) {
51 | t.Errorf("The config not match source:%#v expect :%#v", got, expect)
52 | }
53 | }
54 |
55 | func TestParseShards(t *testing.T) {
56 | c := createConfig(t, `
57 | [[shards]]
58 | name = "shard0"
59 | idx = 0
60 | address = "localhost:8080"
61 | [[shards]]
62 | name = "shard1"
63 | idx = 1
64 | address = "localhost:8081"
65 |
66 | `)
67 | got, err := ParseShards(c.Shards, "shard1")
68 |
69 | if err != nil {
70 | t.Fatalf("Cound not parse shards %#v:%v", c.Shards, err)
71 | }
72 |
73 | expect := &Shards{
74 | Count: 2,
75 | CurIdx: 1,
76 | Addrs: map[int]string{
77 | 0: "localhost:8080",
78 | 1: "localhost:8081",
79 | },
80 | }
81 | if !reflect.DeepEqual(got, expect) {
82 | t.Errorf("The shards config does match source:%#v expect :%#v", got, expect)
83 | }
84 | }
85 |
--------------------------------------------------------------------------------
/.github/workflows/go-ossf-slsa3-publish.yml:
--------------------------------------------------------------------------------
1 | # This workflow uses actions that are not certified by GitHub.
2 | # They are provided by a third-party and are governed by
3 | # separate terms of service, privacy policy, and support
4 | # documentation.
5 |
6 | # This workflow lets you compile your Go project using a SLSA3 compliant builder.
7 | # This workflow will generate a so-called "provenance" file describing the steps
8 | # that were performed to generate the final binary.
9 | # The project is an initiative of the OpenSSF (openssf.org) and is developed at
10 | # https://github.com/slsa-framework/slsa-github-generator.
11 | # The provenance file can be verified using https://github.com/slsa-framework/slsa-verifier.
12 | # For more information about SLSA and how it improves the supply-chain, visit slsa.dev.
13 |
14 | name: SLSA Go releaser
15 | on:
16 | workflow_dispatch:
17 | release:
18 | types: [created]
19 |
20 | permissions: read-all
21 |
22 | jobs:
23 | # ========================================================================================================================================
24 | # Prerequesite: Create a .slsa-goreleaser.yml in the root directory of your project.
25 | # See format in https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/go/README.md#configuration-file
26 | #=========================================================================================================================================
27 | build:
28 | permissions:
29 | id-token: write # To sign.
30 | contents: write # To upload release assets.
31 | actions: read # To read workflow path.
32 | uses: slsa-framework/slsa-github-generator/.github/workflows/builder_go_slsa3.yml@v1.4.0
33 | with:
34 | go-version: 1.17
35 | # =============================================================================================================
36 | # Optional: For more options, see https://github.com/slsa-framework/slsa-github-generator#golang-projects
37 | # =============================================================================================================
38 |
39 |
--------------------------------------------------------------------------------
/config/config.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import (
4 | "fmt"
5 | "github.com/BurntSushi/toml"
6 | "hash/fnv"
7 | )
8 |
9 | // Shard describes a config that holds the key
10 | // Each key has unique the set of key
11 | type Shard struct {
12 | Name string
13 | Idx int
14 | Address string
15 | }
16 |
17 | // Config describes the sharding config
18 | type Config struct {
19 | Shards []Shard
20 | }
21 |
22 | // Shards represent an easier to use representation of
23 | // the sharding config : the shards count, current index and
24 | // the addresses of all other shards too
25 | type Shards struct {
26 | Count int //分片总数
27 | CurIdx int //当前分片索引
28 | Addrs map[int]string //分片索引对地址MAPPING
29 | }
30 |
31 | // ParseConfig parses the config and returns it ~~~~~~~~~~~~~~~~~~~~
32 | func ParseConfig(filename string) (Config, error) {
33 | var c Config
34 | if _, err := toml.DecodeFile(filename, &c); err != nil {
35 | return Config{}, err
36 | }
37 | return c, nil
38 | }
39 |
40 | // ParseShards converts and verifies the list of shards
41 | // specified in the config into a form that can be used
42 | // for routing
43 | // 稍微抽象一下路由解析方法
44 | func ParseShards(shards []Shard, curShardName string) (*Shards, error) {
45 | shardCount := len(shards)
46 | shardIdx := -1
47 | addrs := make(map[int]string) //[Idx,addr]
48 | //遍历所有分片
49 | for _, s := range shards {
50 | if _, ok := addrs[s.Idx]; ok {
51 | //ok == true 说明有值,反之说明没有值
52 | return nil, fmt.Errorf("duplicate shard index:%d", s.Idx)
53 | }
54 | addrs[s.Idx] = s.Address
55 | if s.Name == curShardName {
56 | shardIdx = s.Idx //拿到当前shardIdx
57 | }
58 | }
59 | for i := 0; i < shardCount; i++ {
60 | if _, ok := addrs[i]; !ok {
61 | return nil, fmt.Errorf("shard %d is not found", i)
62 | }
63 | }
64 | if shardIdx < 0 {
65 | return nil, fmt.Errorf("shard %q was not found", curShardName)
66 | }
67 | //拿到所有分片和地址
68 | //拿到当前分片Idx
69 | return &Shards{
70 | Addrs: addrs,
71 | Count: shardCount,
72 | CurIdx: shardIdx,
73 | }, nil
74 | }
75 |
76 | // Index return the shard number for the corresponding key
77 | func (s *Shards) Index(key string) int {
78 | h := fnv.New64()
79 | h.Write([]byte(key))
80 | return int(h.Sum64() % uint64(s.Count))
81 | }
82 |
--------------------------------------------------------------------------------
/.github/workflows/generator-generic-ossf-slsa3-publish.yml:
--------------------------------------------------------------------------------
1 | # This workflow uses actions that are not certified by GitHub.
2 | # They are provided by a third-party and are governed by
3 | # separate terms of service, privacy policy, and support
4 | # documentation.
5 |
6 | # This workflow lets you generate SLSA provenance file for your project.
7 | # The generation satisfies level 3 for the provenance requirements - see https://slsa.dev/spec/v0.1/requirements
8 | # The project is an initiative of the OpenSSF (openssf.org) and is developed at
9 | # https://github.com/slsa-framework/slsa-github-generator.
10 | # The provenance file can be verified using https://github.com/slsa-framework/slsa-verifier.
11 | # For more information about SLSA and how it improves the supply-chain, visit slsa.dev.
12 |
13 | name: SLSA generic generator
14 | on:
15 | workflow_dispatch:
16 | release:
17 | types: [created]
18 |
19 | jobs:
20 | build:
21 | runs-on: ubuntu-latest
22 | outputs:
23 | digests: ${{ steps.hash.outputs.digests }}
24 |
25 | steps:
26 | - uses: actions/checkout@v3
27 |
28 | # ========================================================
29 | #
30 | # Step 1: Build your artifacts.
31 | #
32 | # ========================================================
33 | - name: Build artifacts
34 | run: |
35 | # These are some amazing artifacts.
36 | echo "artifact1" > artifact1
37 | echo "artifact2" > artifact2
38 |
39 | # ========================================================
40 | #
41 | # Step 2: Add a step to generate the provenance subjects
42 | # as shown below. Update the sha256 sum arguments
43 | # to include all binaries that you generate
44 | # provenance for.
45 | #
46 | # ========================================================
47 | - name: Generate subject for provenance
48 | id: hash
49 | run: |
50 | set -euo pipefail
51 |
52 | # List the artifacts the provenance will refer to.
53 | files=$(ls artifact*)
54 | # Generate the subjects (base64 encoded).
55 | echo "hashes=$(sha256sum $files | base64 -w0)" >> "${GITHUB_OUTPUT}"
56 |
57 | provenance:
58 | needs: [build]
59 | permissions:
60 | actions: read # To read the workflow path.
61 | id-token: write # To sign the provenance.
62 | contents: write # To add assets to a release.
63 | uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.4.0
64 | with:
65 | base64-subjects: "${{ needs.build.outputs.digests }}"
66 | upload-assets: true # Optional: Upload to a new release
67 |
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "flag"
5 | "fmt"
6 | "github.com/s5364733/distrBoltX/api"
7 | "github.com/s5364733/distrBoltX/config"
8 | "github.com/s5364733/distrBoltX/internal/db"
9 | "github.com/s5364733/distrBoltX/internal/rpc/serv"
10 | "github.com/s5364733/distrBoltX/internal/web"
11 | "github.com/s5364733/distrBoltX/pkg/replication"
12 | "google.golang.org/grpc"
13 | "log"
14 | "net"
15 | "net/http"
16 | )
17 |
18 | var (
19 | dbLocation = flag.String("db-location", "", "the path to the bolt database")
20 | httpAddr = flag.String("http-addr", "127.0.0.1:8080", "HTTP post and host")
21 | configFile = flag.String("config-file", "sharding.toml", "Config file for static sharding")
22 | shard = flag.String("config", "", "the name of config for the data")
23 | replica = flag.Bool("replica", false, "Whether or not run as a read-only replica")
24 | grpcAddr = flag.String("grpc-addr", "127.0.0.1:50030", "grpc's inner port register ")
25 | )
26 |
27 | func parseFlag() {
28 | flag.Parse()
29 | if *dbLocation == "" {
30 | log.Fatalf("Must be Provide db-location")
31 | }
32 | if *shard == "" {
33 | log.Fatalf("Must be Provide config")
34 | }
35 | }
36 |
37 | func main() {
38 | // Open the XXXX.db data file in your current directory.
39 | // It will be created if it doesn't exist.
40 | parseFlag()
41 |
42 | c, err := config.ParseConfig(*configFile)
43 | if err != nil {
44 | log.Fatalf("Error parsing config %q: %v", *configFile, err)
45 | }
46 |
47 | shards, err := config.ParseShards(c.Shards, *shard)
48 | if err != nil {
49 | log.Fatalf("Error parsing shards config :%v", err)
50 | }
51 | log.Printf("Shard count is %d current shard :%d cur config %#v:", len(c.Shards), shards.CurIdx, &c)
52 |
53 | db, close, err := db.NewDatabase(*dbLocation, *replica)
54 | if err != nil {
55 | log.Fatalf("NewDatabase(%q) : %v", *dbLocation, err)
56 | }
57 |
58 | defer close()
59 | //如果当前是副本
60 | //shard0 shard1 shard2 分别放在三个数据库
61 | srv := registerHttpFuncHandler(db, shards)
62 |
63 | if *replica {
64 | //拿到当前主分片节点 fetch current master node address
65 | //name = "shard0"
66 | //idx = 0
67 | //address = "127.0.0.2:8080"
68 | //replicas = ["127.0.0.22:8080"]
69 | //这里就是 shard0 == 127.0.0.2:8080
70 | leaderAddr, ok := shards.Addrs[shards.CurIdx]
71 | if !ok {
72 | log.Fatalf("Could not find address for leader for shard %d", shards.CurIdx)
73 | }
74 | //启动一个协程去轮询
75 | go replication.ClientGrpcLoop(db, leaderAddr)
76 | } else { //GRPC 端口注册
77 | fmt.Printf("execute init for grpc register %#v register node ip addr : %q", srv, *grpcAddr)
78 | go registerGrpcPort(srv, *grpcAddr)
79 | }
80 |
81 | //开启主节点同步端口
82 | srv.ListenAndServe(*httpAddr)
83 |
84 | }
85 |
86 | func registerHttpFuncHandler(db *db.Database, shards *config.Shards) *web.Server {
87 | srv := web.NewServer(db, shards)
88 | http.HandleFunc("/set", srv.SetHandler)
89 | http.HandleFunc("/get", srv.GetHandler)
90 | http.HandleFunc("/purge", srv.DeleteExtraKeyHandler)
91 | http.HandleFunc("/next-replication-key", srv.GetNextKeyForReplication)
92 | http.HandleFunc("/delete-replication-key", srv.DeleteReplicationKey)
93 | return srv
94 | }
95 |
96 | func registerGrpcPort(server *web.Server, grpcAddr string) {
97 | lis, err := net.Listen("tcp", grpcAddr)
98 | if err != nil {
99 | log.Fatalf("failed to listen: %v", err)
100 | }
101 | s := grpc.NewServer()
102 | api.RegisterAckSyncDialerServer(s, serv.NewAckSyncDialerService(server))
103 | log.Printf("server listening at %v", lis.Addr())
104 | if err := s.Serve(lis); err != nil {
105 | log.Fatalf("failed to serve: %v", err)
106 | s.GracefulStop()
107 | }
108 | }
109 |
--------------------------------------------------------------------------------
/internal/web/web.go:
--------------------------------------------------------------------------------
1 | package web
2 |
3 | //jack.lei
4 | import (
5 | "encoding/json"
6 | "fmt"
7 | "github.com/s5364733/distrBoltX/config"
8 | "github.com/s5364733/distrBoltX/internal/db"
9 | "github.com/s5364733/distrBoltX/pkg/replication"
10 | "io"
11 | "net/http"
12 | )
13 |
14 | // Server contains HTTP method handler to be used for the database
15 | type Server struct {
16 | Db *db.Database
17 | //shardIdx int
18 | //shardCount int
19 | //addr map[int]string
20 | shards *config.Shards
21 | }
22 |
23 | // NewServer for used to be http endpoint handler
24 | func NewServer(db *db.Database, s *config.Shards) *Server {
25 | return &Server{
26 | Db: db,
27 | shards: s,
28 | }
29 | }
30 |
31 | func (s *Server) redirect(shard int, w http.ResponseWriter, r *http.Request) {
32 | url := "http://" + s.shards.Addrs[shard] + r.RequestURI
33 | resp, err := http.Get(url)
34 | fmt.Fprintf(w, "redirect from shard %d to shard %d(%q)\n", s.shards.CurIdx, shard, url)
35 | if err != nil {
36 | w.WriteHeader(500)
37 | fmt.Fprintf(w, "Error's redirect request url: %v", err)
38 | return
39 | }
40 | //调用者关闭BODY
41 | defer resp.Body.Close()
42 | //写入响应体stream
43 | io.Copy(w, resp.Body)
44 | }
45 |
46 | // GetHandler handles get endpoint
47 | func (s *Server) GetHandler(w http.ResponseWriter, r *http.Request) {
48 | r.ParseForm()
49 | key := r.Form.Get("key")
50 | shard := s.shards.Index(key)
51 |
52 | //当前有可能不是拿的当前分区的数据,例如当前key计算出来的HASH取模分片之后为0 但是请求的是1分区的库,
53 | //所以这里导航到0分区即可
54 | if shard != s.shards.CurIdx {
55 | s.redirect(shard, w, r)
56 | return
57 | }
58 | value, err := s.Db.GetKey(key)
59 | fmt.Fprintf(w, "ShardIdx: %d , cur config :%d ,addr : %q , value = %q ,error=%v ",
60 | shard, //KEY 对应的分片路由ID
61 | s.shards.CurIdx, //当前分区
62 | s.shards.Addrs[shard], //应该拿分区库所在的地址
63 | value,
64 | err)
65 | }
66 |
67 | // SetHandler handles set endpoint
68 | func (s *Server) SetHandler(w http.ResponseWriter, r *http.Request) {
69 | r.ParseForm()
70 | key := r.Form.Get("key")
71 | value := r.Form.Get("value")
72 | shard := s.shards.Index(key)
73 |
74 | if shard != s.shards.CurIdx {
75 | s.redirect(shard, w, r)
76 | return
77 | }
78 | err := s.Db.SetKey(key, []byte(value))
79 | fmt.Fprintf(w, "Error=%v, shardIdx %d , current shard: %d", err, shard, s.shards.CurIdx)
80 | }
81 |
82 | // ListenAndServe starts accept request
83 | func (s *Server) ListenAndServe(addr string) error {
84 | return http.ListenAndServe(addr, nil)
85 | }
86 |
87 | // DeleteExtraKeyHandler deletes keys that don't belong to current shard
88 | func (s *Server) DeleteExtraKeyHandler(w http.ResponseWriter, r *http.Request) {
89 | fmt.Fprintf(w, "Error = %v", s.Db.DeleteExtraKeys(func(key string) bool {
90 | return s.shards.CurIdx != s.shards.Index(key)
91 | }))
92 | }
93 |
94 | // GetNextKeyForReplication returns the next key for replication.
95 | func (s *Server) GetNextKeyForReplication(w http.ResponseWriter, r *http.Request) {
96 | enc := json.NewEncoder(w)
97 | k, v, err := s.Db.GetNextKeyForReplication()
98 | enc.Encode(&replication.NextKeyValue{
99 | Key: string(k),
100 | Value: string(v),
101 | Err: err,
102 | })
103 | }
104 |
105 | // DeleteReplicationKey deletes the key from replicas queue.
106 | func (s *Server) DeleteReplicationKey(w http.ResponseWriter, r *http.Request) {
107 | r.ParseForm()
108 | key := r.Form.Get("key")
109 | value := r.Form.Get("value")
110 |
111 | err := s.Db.DeleteReplicationKey([]byte(key), []byte(value))
112 | if err != nil {
113 | w.WriteHeader(http.StatusExpectationFailed)
114 | fmt.Fprintf(w, "error: %v", err)
115 | return
116 | }
117 |
118 | fmt.Fprintf(w, "ok")
119 | }
120 |
--------------------------------------------------------------------------------
/cmd/bench/main.go:
--------------------------------------------------------------------------------
1 | package bench
2 |
3 | import (
4 | "flag"
5 | "fmt"
6 | "io"
7 | "log"
8 | "math/rand"
9 | "net/http"
10 | "net/url"
11 | "sync"
12 | "time"
13 | )
14 |
15 | var (
16 | addr = flag.String("addr", "localhost:8080", "The HTTP host port for the instance that is benchmarked.")
17 | iterations = flag.Int("iterations", 1000, "The number of iterations for writing")
18 | readIterations = flag.Int("read-iterations", 100000, "The number of iterations for reading")
19 | concurrency = flag.Int("concurrency", 1, "How many goroutines to run in parallel when doing writes")
20 | )
21 |
22 | var httpClient = &http.Client{
23 | Transport: &http.Transport{
24 | IdleConnTimeout: time.Second * 60,
25 | MaxIdleConns: 300,
26 | MaxConnsPerHost: 300,
27 | MaxIdleConnsPerHost: 300,
28 | },
29 | }
30 |
31 | func benchmark(name string, iter int, fn func() string) (qps float64, strs []string) {
32 | var max time.Duration
33 | var min = time.Hour
34 |
35 | start := time.Now()
36 | for i := 0; i < iter; i++ {
37 | iterStart := time.Now()
38 | strs = append(strs, fn())
39 | iterTime := time.Since(iterStart)
40 | if iterTime > max {
41 | max = iterTime
42 | }
43 | if iterTime < min {
44 | min = iterTime
45 | }
46 | }
47 |
48 | avg := time.Since(start) / time.Duration(iter)
49 | //单个请求的QPS
50 | qps = float64(iter) / (float64(time.Since(start)) / float64(time.Second))
51 | fmt.Printf("Func %s ,took %s avg, %.1f QPS, %s max, %s min\n", name, avg, qps, max, min)
52 |
53 | return qps, strs
54 | }
55 |
56 | func writeRand() (key string) {
57 | key = fmt.Sprintf("key-%d", rand.Intn(1000000))
58 | value := fmt.Sprintf("value-%d", rand.Intn(1000000))
59 |
60 | values := url.Values{}
61 | values.Set("key", key)
62 | values.Set("value", value)
63 |
64 | resp, err := httpClient.Get("http://" + (*addr) + "/set?" + values.Encode())
65 | if err != nil {
66 | log.Fatalf("Error during set: %v", err)
67 | }
68 |
69 | io.Copy(io.Discard(), resp.Body)
70 | defer resp.Body.Close()
71 |
72 | return key
73 | }
74 |
75 | func readRand(allKeys []string) (key string) {
76 | key = allKeys[rand.Intn(len(allKeys))]
77 |
78 | values := url.Values{}
79 | values.Set("key", key)
80 |
81 | resp, err := httpClient.Get("http://" + (*addr) + "/get?" + values.Encode())
82 | if err != nil {
83 | log.Fatalf("Error during get: %v", err)
84 | }
85 | io.Copy(io.Discard, resp.Body)
86 | defer resp.Body.Close()
87 |
88 | return key
89 | }
90 |
91 | func benchmarkWrite() (allKeys []string) {
92 | var wg sync.WaitGroup
93 | var mu sync.Mutex
94 | var totalQPS float64
95 |
96 | for i := 0; i < *concurrency; i++ {
97 | wg.Add(1)
98 | go func() {
99 | qps, strs := benchmark("write", *iterations, writeRand)
100 | mu.Lock()
101 | totalQPS += qps
102 | allKeys = append(allKeys, strs...)
103 | mu.Unlock()
104 |
105 | wg.Done()
106 | }()
107 | }
108 |
109 | wg.Wait()
110 |
111 | log.Printf("Write total QPS: %.1f, set %d keys", totalQPS, len(allKeys))
112 |
113 | return allKeys
114 | }
115 |
116 | func benchmarkRead(allKeys []string) {
117 | var totalQPS float64
118 | var mu sync.Mutex
119 | var wg sync.WaitGroup
120 |
121 | for i := 0; i < *concurrency; i++ {
122 | wg.Add(1)
123 | go func() {
124 | qps, _ := benchmark("read", *readIterations, func() string { return readRand(allKeys) })
125 | mu.Lock()
126 | totalQPS += qps
127 | mu.Unlock()
128 |
129 | wg.Done()
130 | }()
131 | }
132 |
133 | wg.Wait()
134 |
135 | log.Printf("Read total QPS: %.1f", totalQPS)
136 | }
137 |
138 | func mainV() {
139 | rand.Seed(time.Now().UnixNano())
140 | flag.Parse()
141 |
142 | fmt.Printf("Running with %d iterations and concurrency level %d\n", *iterations, *concurrency)
143 |
144 | allKeys := benchmarkWrite()
145 |
146 | go benchmarkWrite()
147 | benchmarkRead(allKeys)
148 | }
149 |
--------------------------------------------------------------------------------
/internal/web/web_test.go:
--------------------------------------------------------------------------------
1 | package web
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "github.com/s5364733/distrBoltX/config"
7 | "github.com/s5364733/distrBoltX/internal/db"
8 | "io"
9 |
10 | "log"
11 | "net/http"
12 | "net/http/httptest"
13 | "os"
14 | "strings"
15 | "testing"
16 | )
17 |
18 | func createShardDb(t *testing.T, idx int) *db.Database {
19 | t.Helper()
20 |
21 | tmpFile, err := os.CreateTemp(os.TempDir(), fmt.Sprintf("db%d", idx))
22 | if err != nil {
23 | t.Fatalf("Could not create a temp db %d: %v", idx, err)
24 | }
25 |
26 | tmpFile.Close()
27 |
28 | name := tmpFile.Name()
29 | t.Cleanup(func() { os.Remove(name) })
30 |
31 | db, closeFunc, err := db.NewDatabase(name, false)
32 | if err != nil {
33 | t.Fatalf("Could not create new database %q: %v", name, err)
34 | }
35 | t.Cleanup(func() { closeFunc() })
36 |
37 | return db
38 | }
39 |
40 | func createShardServer(t *testing.T, idx int, addrs map[int]string) (*db.Database, *Server) {
41 | t.Helper()
42 | db := createShardDb(t, idx)
43 |
44 | cfg := &config.Shards{
45 | Addrs: addrs,
46 | Count: len(addrs),
47 | CurIdx: idx,
48 | }
49 |
50 | s := NewServer(db, cfg)
51 | return db, s
52 | }
53 |
54 | func TestWebServer(t *testing.T) {
55 | var ts1GetHandler, ts1SetHandler func(w http.ResponseWriter, r *http.Request)
56 | var ts2GetHandler, ts2SetHandler func(w http.ResponseWriter, r *http.Request)
57 |
58 | ts1 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
59 | if strings.HasPrefix(r.RequestURI, "/get") {
60 | ts1GetHandler(w, r)
61 | } else if strings.HasPrefix(r.RequestURI, "/set") {
62 | ts1SetHandler(w, r)
63 | }
64 | }))
65 | defer ts1.Close()
66 |
67 | ts2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
68 | if strings.HasPrefix(r.RequestURI, "/get") {
69 | ts2GetHandler(w, r)
70 | } else if strings.HasPrefix(r.RequestURI, "/set") {
71 | ts2SetHandler(w, r)
72 | }
73 | }))
74 | defer ts2.Close()
75 |
76 | addrs := map[int]string{
77 | 0: strings.TrimPrefix(ts1.URL, "http://"),
78 | 1: strings.TrimPrefix(ts2.URL, "http://"),
79 | }
80 |
81 | db1, web1 := createShardServer(t, 0, addrs)
82 | db2, web2 := createShardServer(t, 1, addrs)
83 |
84 | // Calculated manually and depends on the sharding function.
85 | keys := map[string]int{
86 | "shard1": 1,
87 | "shard2": 0,
88 | }
89 |
90 | ts1GetHandler = web1.GetHandler
91 | ts1SetHandler = web1.SetHandler
92 | ts2GetHandler = web2.GetHandler
93 | ts2SetHandler = web2.SetHandler
94 |
95 | for key := range keys {
96 | // Send all to first shard to test redirects.
97 | _, err := http.Get(fmt.Sprintf(ts1.URL+"/set?key=%s&value=value-%s", key, key))
98 | if err != nil {
99 | t.Fatalf("Could not set the key %q: %v", key, err)
100 | }
101 | }
102 |
103 | for key := range keys {
104 | // Send all to first shard to test redirects.
105 | resp, err := http.Get(fmt.Sprintf(ts1.URL+"/get?key=%s", key))
106 | if err != nil {
107 | t.Fatalf("Get key %q error: %v", key, err)
108 | }
109 | contents, err := io.ReadAll(resp.Body)
110 | if err != nil {
111 | t.Fatalf("Could read contents of the key %q: %v", key, err)
112 | }
113 |
114 | want := []byte("value-" + key)
115 | if !bytes.Contains(contents, want) {
116 | t.Errorf("Unexpected contents of the key %q: got %q, want the result to contain %q", key, contents, want)
117 | }
118 |
119 | log.Printf("Contents of key %q: %s", key, contents)
120 | }
121 |
122 | //当前DB
123 | value1, err := db1.GetKey("shard1")
124 | if err != nil {
125 | t.Fatalf("shard1 key error: %v", err)
126 | }
127 |
128 | want1 := "value-shard1"
129 | if !bytes.Equal(value1, []byte(want1)) {
130 | t.Errorf("Unexpected value of USA key: got %q, want %q", value1, want1)
131 | }
132 |
133 | //key, _ := db1.GetKey("shard2")
134 | //fmt.Println(key)
135 |
136 | //分片之后的DB
137 | value2, err := db2.GetKey("shard2")
138 | if err != nil {
139 | t.Fatalf("shard2 key error: %v", err)
140 | }
141 |
142 | want2 := "value-shard2"
143 | if !bytes.Equal(value2, []byte(want2)) {
144 | t.Errorf("Unexpected value of Soviet key: got %q, want %q", value2, want2)
145 | }
146 | }
147 |
--------------------------------------------------------------------------------
/api/nextKeyValue_grpc.pb.go:
--------------------------------------------------------------------------------
1 | // Code generated by protoc-gen-go-grpc. DO NOT EDIT.
2 | // versions:
3 | // - protoc-gen-go-grpc v1.2.0
4 | // - protoc v4.22.2
5 | // source: nextKeyValue.proto
6 |
7 | package api
8 |
9 | import (
10 | context "context"
11 | grpc "google.golang.org/grpc"
12 | codes "google.golang.org/grpc/codes"
13 | status "google.golang.org/grpc/status"
14 | )
15 |
16 | // This is a compile-time assertion to ensure that this generated file
17 | // is compatible with the grpc package it is being compiled against.
18 | // Requires gRPC-Go v1.32.0 or later.
19 | const _ = grpc.SupportPackageIsVersion7
20 |
21 | // AckSyncDialerClient is the client API for AckSyncDialer service.
22 | //
23 | // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
24 | type AckSyncDialerClient interface {
25 | // Sends a ack
26 | Dial(ctx context.Context, opts ...grpc.CallOption) (AckSyncDialer_DialClient, error)
27 | }
28 |
29 | type ackSyncDialerClient struct {
30 | cc grpc.ClientConnInterface
31 | }
32 |
33 | func NewAckSyncDialerClient(cc grpc.ClientConnInterface) AckSyncDialerClient {
34 | return &ackSyncDialerClient{cc}
35 | }
36 |
37 | func (c *ackSyncDialerClient) Dial(ctx context.Context, opts ...grpc.CallOption) (AckSyncDialer_DialClient, error) {
38 | stream, err := c.cc.NewStream(ctx, &AckSyncDialer_ServiceDesc.Streams[0], "/proto.AckSyncDialer/dial", opts...)
39 | if err != nil {
40 | return nil, err
41 | }
42 | x := &ackSyncDialerDialClient{stream}
43 | return x, nil
44 | }
45 |
46 | type AckSyncDialer_DialClient interface {
47 | Send(*SyncD) error
48 | Recv() (*NextKeyValue, error)
49 | grpc.ClientStream
50 | }
51 |
52 | type ackSyncDialerDialClient struct {
53 | grpc.ClientStream
54 | }
55 |
56 | func (x *ackSyncDialerDialClient) Send(m *SyncD) error {
57 | return x.ClientStream.SendMsg(m)
58 | }
59 |
60 | func (x *ackSyncDialerDialClient) Recv() (*NextKeyValue, error) {
61 | m := new(NextKeyValue)
62 | if err := x.ClientStream.RecvMsg(m); err != nil {
63 | return nil, err
64 | }
65 | return m, nil
66 | }
67 |
68 | // AckSyncDialerServer is the server API for AckSyncDialer service.
69 | // All implementations must embed UnimplementedAckSyncDialerServer
70 | // for forward compatibility
71 | type AckSyncDialerServer interface {
72 | // Sends a ack
73 | Dial(AckSyncDialer_DialServer) error
74 | mustEmbedUnimplementedAckSyncDialerServer()
75 | }
76 |
77 | // UnimplementedAckSyncDialerServer must be embedded to have forward compatible implementations.
78 | type UnimplementedAckSyncDialerServer struct {
79 | }
80 |
81 | func (UnimplementedAckSyncDialerServer) Dial(AckSyncDialer_DialServer) error {
82 | return status.Errorf(codes.Unimplemented, "method Dial not implemented")
83 | }
84 | func (UnimplementedAckSyncDialerServer) mustEmbedUnimplementedAckSyncDialerServer() {}
85 |
86 | // UnsafeAckSyncDialerServer may be embedded to opt out of forward compatibility for this service.
87 | // Use of this interface is not recommended, as added methods to AckSyncDialerServer will
88 | // result in compilation errors.
89 | type UnsafeAckSyncDialerServer interface {
90 | mustEmbedUnimplementedAckSyncDialerServer()
91 | }
92 |
93 | func RegisterAckSyncDialerServer(s grpc.ServiceRegistrar, srv AckSyncDialerServer) {
94 | s.RegisterService(&AckSyncDialer_ServiceDesc, srv)
95 | }
96 |
97 | func _AckSyncDialer_Dial_Handler(srv interface{}, stream grpc.ServerStream) error {
98 | return srv.(AckSyncDialerServer).Dial(&ackSyncDialerDialServer{stream})
99 | }
100 |
101 | type AckSyncDialer_DialServer interface {
102 | Send(*NextKeyValue) error
103 | Recv() (*SyncD, error)
104 | grpc.ServerStream
105 | }
106 |
107 | type ackSyncDialerDialServer struct {
108 | grpc.ServerStream
109 | }
110 |
111 | func (x *ackSyncDialerDialServer) Send(m *NextKeyValue) error {
112 | return x.ServerStream.SendMsg(m)
113 | }
114 |
115 | func (x *ackSyncDialerDialServer) Recv() (*SyncD, error) {
116 | m := new(SyncD)
117 | if err := x.ServerStream.RecvMsg(m); err != nil {
118 | return nil, err
119 | }
120 | return m, nil
121 | }
122 |
123 | // AckSyncDialer_ServiceDesc is the grpc.ServiceDesc for AckSyncDialer service.
124 | // It's only intended for direct use with grpc.RegisterService,
125 | // and not to be introspected or modified (even as a copy)
126 | var AckSyncDialer_ServiceDesc = grpc.ServiceDesc{
127 | ServiceName: "proto.AckSyncDialer",
128 | HandlerType: (*AckSyncDialerServer)(nil),
129 | Methods: []grpc.MethodDesc{},
130 | Streams: []grpc.StreamDesc{
131 | {
132 | StreamName: "dial",
133 | Handler: _AckSyncDialer_Dial_Handler,
134 | ServerStreams: true,
135 | ClientStreams: true,
136 | },
137 | },
138 | Metadata: "nextKeyValue.proto",
139 | }
140 |
--------------------------------------------------------------------------------
/internal/db/db_test.go:
--------------------------------------------------------------------------------
1 | package db
2 |
3 | import (
4 | "bytes"
5 | "os"
6 | "testing"
7 | )
8 |
9 | //func createConfig(t *testing.T, content string) config.Config {
10 | // t.Helper()
11 | //
12 | // f, err := os.CreateTemp(os.TempDir(), "test.db")
13 | // if err != nil {
14 | // t.Fatalf("Coundn't create the temp file : %v", err)
15 | // }
16 | // defer f.Close()
17 | // name := f.Name()
18 | // defer os.Remove(name)
19 | //
20 | // _, err = f.WriteString(content)
21 | // if err != nil {
22 | // t.Fatalf("Write file occurs error ,:%v", err)
23 | // }
24 | //
25 | // c, err := ParseConfig(name)
26 | // if err != nil {
27 | // t.Fatalf("Couldn't parse config :%v ", err)
28 | // }
29 | // return c
30 | //}
31 |
32 | // createTempDb 创建temp_db文件
33 | func createTempDb(t *testing.T, readOnly bool) *Database {
34 | t.Helper()
35 |
36 | f, err := os.CreateTemp(os.TempDir(), "kvdb")
37 | if err != nil {
38 | t.Fatalf("Could not create temp file: %v", err)
39 | }
40 | name := f.Name()
41 | f.Close()
42 | t.Cleanup(func() { os.Remove(name) })
43 |
44 | db, closeFunc, err := NewDatabase(name, readOnly)
45 | if err != nil {
46 | t.Fatalf("Could not create a new database: %v", err)
47 | }
48 | t.Cleanup(func() { closeFunc() })
49 |
50 | return db
51 | }
52 |
53 | func TestGetSet(t *testing.T) {
54 | db := createTempDb(t, false)
55 |
56 | if err := db.SetKey("party", []byte("Great")); err != nil {
57 | t.Fatalf("Could not write key: %v", err)
58 | }
59 |
60 | value, err := db.GetKey("party")
61 | if err != nil {
62 | t.Fatalf(`Could not get the key "party": %v`, err)
63 | }
64 |
65 | if !bytes.Equal(value, []byte("Great")) {
66 | t.Errorf(`Unexpected value for key "party": got %q, want %q`, value, "Great")
67 | }
68 |
69 | k, v, err := db.GetNextKeyForReplication()
70 | if err != nil {
71 | t.Fatalf(`Unexpected error for GetNextKeyForReplication(): %v`, err)
72 | }
73 |
74 | if !bytes.Equal(k, []byte("party")) || !bytes.Equal(v, []byte("Great")) {
75 | t.Errorf(`GetNextKeyForReplication(): got %q, %q; want %q, %q`, k, v, "party", "Great")
76 | }
77 | }
78 |
79 | func TestDeleteReplicationKey(t *testing.T) {
80 | db := createTempDb(t, false)
81 |
82 | setKey(t, db, "party", "Great")
83 | //setKey(t, db, "party1", "Great1")
84 | k, v, err := db.GetNextKeyForReplication()
85 | //k1, v1, err := db.GetNextKeyForReplication()
86 | //t.Logf("key,k1 %q v1 %q err: %v", k1, v1, err)
87 | if err != nil {
88 | t.Fatalf(`Unexpected error for GetNextKeyForReplication(): %v`, err)
89 | }
90 |
91 | if !bytes.Equal(k, []byte("party")) || !bytes.Equal(v, []byte("Great")) {
92 | t.Errorf(`GetNextKeyForReplication(): got %q, %q; want %q, %q`, k, v, "party", "Great")
93 | }
94 |
95 | if err := db.DeleteReplicationKey([]byte("party"), []byte("Bad")); err == nil {
96 | t.Fatalf(`DeleteReplicationKey("party", "Bad"): got nil error, want non-nil error`)
97 | }
98 |
99 | if err := db.DeleteReplicationKey([]byte("party"), []byte("Great")); err != nil {
100 | t.Fatalf(`DeleteReplicationKey("party", "Great"): got %q, want nil error`, err)
101 | }
102 |
103 | k, v, err = db.GetNextKeyForReplication()
104 | if err != nil {
105 | t.Fatalf(`Unexpected error for GetNextKeyForReplication(): %v`, err)
106 | }
107 |
108 | if k == nil || v == nil {
109 | t.Errorf(`GetNextKeyForReplication(): got %v, %v; want nil, nil`, k, v)
110 | }
111 | }
112 |
113 | func TestSetReadOnly(t *testing.T) {
114 | db := createTempDb(t, true)
115 |
116 | if err := db.SetKey("party", []byte("Bad")); err == nil {
117 | t.Fatalf("SetKey(%q, %q): got nil error, want non-nil error", "party", []byte("Bad"))
118 | }
119 | }
120 |
121 | func setKey(t *testing.T, d *Database, key, value string) {
122 | t.Helper()
123 |
124 | if err := d.SetKey(key, []byte(value)); err != nil {
125 | t.Fatalf("SetKey(%q, %q) failed: %v", key, value, err)
126 | }
127 | }
128 |
129 | func getKey(t *testing.T, d *Database, key string) string {
130 | t.Helper()
131 | value, err := d.GetKey(key)
132 | if err != nil {
133 | t.Fatalf("GetKey(%q) failed: %v", key, err)
134 | }
135 |
136 | return string(value)
137 | }
138 |
139 | func TestDeleteExtraKeys(t *testing.T) {
140 | db := createTempDb(t, false)
141 |
142 | setKey(t, db, "party", "Great")
143 | setKey(t, db, "us", "CapitalistPigs")
144 | //db.db.View(func(tx *bolt.Tx) error {
145 | // b := tx.Bucket([]byte("default"))
146 | // key, value := b.Cursor().First()
147 | // fmt.Sprintf("value %s %s", key, value)
148 | // return errors.New("s")
149 | //})
150 | if err := db.DeleteExtraKeys(func(name string) bool { return name == "us" }); err != nil {
151 | t.Fatalf("Could not delete extra keys: %v", err)
152 | }
153 |
154 | if value := getKey(t, db, "party"); value != "Great" {
155 | t.Errorf(`Unexpected value for key "party": got %q, want %q`, value, "Great")
156 | }
157 |
158 | if value := getKey(t, db, "us"); value != "" {
159 | t.Errorf(`Unexpected value for key "us": got %q, want %q`, value, "")
160 | }
161 | }
162 |
--------------------------------------------------------------------------------
/pkg/replication/replication.go:
--------------------------------------------------------------------------------
1 | package replication
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "encoding/json"
7 | "errors"
8 | "fmt"
9 | "github.com/s5364733/distrBoltX/api"
10 | "github.com/s5364733/distrBoltX/internal/db"
11 | "google.golang.org/grpc"
12 | "google.golang.org/grpc/credentials/insecure"
13 | "io"
14 | "log"
15 | "net/http"
16 | "net/url"
17 | "time"
18 | )
19 |
20 | // NextKeyValue contains the response for GetNextKeyForReplication.
21 | type NextKeyValue struct {
22 | Key string
23 | Value string
24 | Err error
25 | }
26 |
27 | type client struct {
28 | db *db.Database
29 | leaderAddr string
30 | }
31 |
32 | // ClientLoop continuously downloads new keys from the master and applies them.
33 | func ClientLoop(db *db.Database, leaderAddr string) {
34 | c := &client{db: db, leaderAddr: leaderAddr}
35 | for {
36 | present, err := c.loop()
37 | if err != nil {
38 | log.Printf("Loop error: %v", err)
39 | time.Sleep(time.Second)
40 | continue
41 | }
42 |
43 | if !present {
44 | time.Sleep(time.Millisecond * 100)
45 | }
46 | }
47 | }
48 |
49 | // ClientGrpcLoop continuously stream rpc for grpc sync data's
50 | func ClientGrpcLoop(db *db.Database, leaderAddr string) {
51 | c := &client{db: db, leaderAddr: leaderAddr}
52 | //for {
53 | err := c.grpcLoop()
54 | if err != nil {
55 | log.Printf("grpcLoop error: %v", err)
56 | time.Sleep(time.Second)
57 | //continue
58 | }
59 | //}
60 | }
61 |
62 | // grpcLoop grpc
63 | // the default keepalive tpc syn ack is opened for this link
64 | func (c *client) grpcLoop() (err error) {
65 | conn, err := grpc.Dial("127.0.0.2:50030", grpc.WithTransportCredentials(insecure.NewCredentials()))
66 | if err != nil {
67 | log.Fatalf("fail to dial: %v", err)
68 | return err
69 | }
70 | defer conn.Close()
71 | dialerClient := api.NewAckSyncDialerClient(conn)
72 | //ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
73 | //defer cancel()
74 | stream, err := dialerClient.Dial(context.TODO())
75 | if err != nil {
76 | return err
77 | }
78 | waitc := make(chan struct{})
79 | go func() {
80 | for {
81 | res, err := stream.Recv()
82 | if err == io.EOF {
83 | // read done.
84 | close(waitc)
85 | return
86 | }
87 | //set to current replication
88 | c.syncReplicationBolt(res)
89 | err = stream.Send(&api.SyncD{
90 | Ack: true,
91 | })
92 | if err != nil {
93 | fmt.Errorf("err %v", err)
94 | }
95 | }
96 | }()
97 |
98 | select {
99 | case <-waitc:
100 | stream.CloseSend()
101 | }
102 | return nil
103 | }
104 |
105 | func (c *client) syncReplicationBolt(res *api.NextKeyValue) {
106 | //设置到当前节点
107 | if err := c.db.SetKeyOnReplica(res.Key, []byte(res.Value)); err != nil {
108 | fmt.Errorf("err for operation of sync , key = %q,value = %q, err = %+v", res.Key, res.Value, err)
109 | }
110 | log.Printf("The key asynchronized from the master , which has been loaded (key:%q,value:%q)", res.Key, res.Value)
111 | }
112 |
113 | // loop Return false used to do wait 100 millis
114 | // the default keepalive tpc syn ack is opened for this link
115 | func (c *client) loop() (present bool, err error) {
116 | //Sync
117 | //拿到主分片的副本分片数据
118 | resp, err := http.Get("http://" + c.leaderAddr + "/next-replication-key")
119 | if err != nil {
120 | return false, err
121 | }
122 |
123 | var res NextKeyValue
124 | //解析成功
125 | if err := json.NewDecoder(resp.Body).Decode(&res); err != nil {
126 | return false, err
127 | }
128 |
129 | defer resp.Body.Close()
130 |
131 | //报错
132 | if res.Err != nil {
133 | fmt.Sprintf("The value of sync to master which occurs err ")
134 | return false, err
135 | }
136 | //没有这个KEY
137 | if res.Key == "" {
138 | fmt.Sprintf("The value of sync to master which is nil ")
139 | return false, nil
140 | }
141 | //errors.New()
142 | //设置到当前节点
143 | if err := c.db.SetKeyOnReplica(res.Key, []byte(res.Value)); err != nil {
144 | err := errors.New("error")
145 | panic(err) //throws error
146 | return false, err
147 | }
148 |
149 | log.Printf("The key asynchronized from the master , which has been loaded (key:%q,value:%q)", res.Key, res.Value)
150 | //Deletes the key of replica's bucket of master
151 | if err := c.deleteFromReplicationQueue(res.Key, res.Value); err != nil {
152 | log.Printf("DeleteKeyFromReplication failed: %v", err)
153 | }
154 |
155 | return true, nil
156 | }
157 |
158 | func (c *client) deleteFromReplicationQueue(key, value string) error {
159 | u := url.Values{}
160 | u.Set("key", key)
161 | u.Set("value", value)
162 |
163 | log.Printf("Deleting key=%q, value=%q from replication queue on %q", key, value, c.leaderAddr)
164 |
165 | resp, err := http.Get("http://" + c.leaderAddr + "/delete-replication-key?" + u.Encode())
166 | if err != nil {
167 | return err
168 | }
169 | defer resp.Body.Close()
170 |
171 | result, err := io.ReadAll(resp.Body)
172 | if err != nil {
173 | return err
174 | }
175 |
176 | if !bytes.Equal(result, []byte("ok")) {
177 | return errors.New(string(result))
178 | }
179 |
180 | return nil
181 | }
182 |
--------------------------------------------------------------------------------
/internal/db/db.go:
--------------------------------------------------------------------------------
1 | package db
2 |
3 | import (
4 | "bytes"
5 | "errors"
6 | "fmt"
7 | bolt "go.etcd.io/bbolt"
8 | "log"
9 | "unsafe"
10 | )
11 |
12 | var defaultBucket = []byte("defaultBucket")
13 | var replicaBucket = []byte("replicaBucket")
14 |
15 | // Database is an open bolt database
16 | type Database struct {
17 | db *bolt.DB
18 | readOnly bool
19 | }
20 |
21 | // NewDatabase return an instance of a database that we can work with
22 | func NewDatabase(dbPath string, readOnly bool) (db *Database, closeFunc func() error, err error) {
23 | boltDb, err := bolt.Open(dbPath, 0600, nil)
24 | if err != nil {
25 | log.Fatal(err)
26 | }
27 |
28 | db = &Database{
29 | db: boltDb,
30 | readOnly: readOnly, //副本节点分片只读
31 | }
32 | closeFunc = boltDb.Close
33 | if err := db.createBuckets(); err != nil {
34 | closeFunc()
35 | return nil, nil, fmt.Errorf("create default bucket: %w", err)
36 | }
37 | return db, closeFunc, nil
38 | }
39 |
40 | // createBuckets 创建副本bucket
41 | func (d *Database) createBuckets() error {
42 | return d.db.Update(func(tx *bolt.Tx) error {
43 | if _, err := tx.CreateBucketIfNotExists(defaultBucket); err != nil {
44 | return err
45 | }
46 | if _, err := tx.CreateBucketIfNotExists(replicaBucket); err != nil {
47 | return err
48 | }
49 | return nil
50 | })
51 | }
52 |
53 | // SetKey sets the key to the requested value into the default database or returns an error
54 | func (d *Database) SetKey(key string, value []byte) error {
55 | if d.readOnly {
56 | return errors.New("read-only mode")
57 | }
58 | return d.db.Update(func(tx *bolt.Tx) error {
59 | //设置当前bucket成功
60 | if err := tx.Bucket(defaultBucket).Put([]byte(key), value); err != nil {
61 | return err
62 | }
63 | return tx.Bucket(replicaBucket).Put([]byte(key), value)
64 | })
65 | }
66 |
67 | // SetKeyOnReplica sets the key to the requested value into the default database and does not write
68 | // to the replication queue.
69 | // This method is intended to be used only on replicas.
70 | // 该方法适用副本节点,不使用副本bucket
71 | func (d *Database) SetKeyOnReplica(key string, value []byte) error {
72 | return d.db.Update(func(tx *bolt.Tx) error {
73 | return tx.Bucket(defaultBucket).Put([]byte(key), value)
74 | })
75 | }
76 |
77 | func copyByteSlice(b []byte) []byte {
78 | if b == nil {
79 | return nil
80 | }
81 | res := make([]byte, len(b))
82 | copy(res, b)
83 | return res
84 | }
85 |
86 | // GetKey gets the key to the requested value into the default database or returns an error
87 | func (d *Database) GetKey(key string) ([]byte, error) {
88 | var result []byte
89 | err := d.db.View(func(tx *bolt.Tx) error {
90 | b := tx.Bucket(defaultBucket)
91 | result = copyByteSlice(b.Get([]byte(key)))
92 | return nil
93 | })
94 |
95 | if err == nil {
96 | return result, nil
97 | }
98 | return nil, err
99 | }
100 |
101 | // GetNextKeyForReplication returns the key and value for the keys that have
102 | // changed and have not yet been applied to replicas.
103 | // If there are no new keys, nil key and value will be returned.
104 | func (d *Database) GetNextKeyForReplication() (key, value []byte, err error) {
105 | err = d.db.View(func(tx *bolt.Tx) error {
106 | b := tx.Bucket(replicaBucket)
107 | k, v := b.Cursor().First()
108 | key = copyByteSlice(k)
109 | value = copyByteSlice(v)
110 | return nil
111 | })
112 |
113 | if err != nil {
114 | return nil, nil, err
115 | }
116 |
117 | return key, value, nil
118 | }
119 |
120 | // DeleteReplicationKey deletes the key from the replication queue
121 | // if the value matches the contents or if the key is already absent.
122 | func (d *Database) DeleteReplicationKey(key, value []byte) (err error) {
123 | return d.db.Update(func(tx *bolt.Tx) error {
124 | b := tx.Bucket(replicaBucket)
125 |
126 | v := b.Get(key)
127 | if v == nil {
128 | return errors.New("key does not exist")
129 | }
130 |
131 | if !bytes.Equal(v, value) {
132 | return errors.New("value does not match")
133 | }
134 |
135 | return b.Delete(key)
136 | })
137 | }
138 |
139 | // Bytes2str 去除底层CP指针拷贝,指针强制转换
140 | func byteConvStr(b []byte) string {
141 | return *(*string)(unsafe.Pointer(&b))
142 | }
143 |
144 | // DeleteExtraKeys delete the keys tha do not belong to this shard
145 | func (d *Database) DeleteExtraKeys(isExtra func(string) bool) error {
146 | var keys []string
147 | //To get all keys for this array
148 | err := d.db.View(func(tx *bolt.Tx) error {
149 | b := tx.Bucket(defaultBucket)
150 | //iterates the all key , as the key of the same has been occurred
151 | return b.ForEach(func(k, v []byte) error {
152 | ks := byteConvStr(k)
153 | //如果不是当前分区的KEY 直接删除
154 | if isExtra(ks) {
155 | keys = append(keys, ks)
156 | }
157 | return nil
158 | })
159 | })
160 |
161 | if err != nil {
162 | return err
163 | }
164 |
165 | return d.db.Update(func(tx *bolt.Tx) error {
166 | b := tx.Bucket(defaultBucket)
167 |
168 | for _, k := range keys {
169 | if err := b.Delete([]byte(k)); err != nil {
170 | return err
171 | }
172 | }
173 | return nil
174 | })
175 | }
176 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # distrBoltX
2 | Handwriting based on boltDB distributed KV database, the library will be updated from time to time, suitable for small white scholars entry and distributed advanced
3 |
4 | This project is not integrated as an SDK, nor can it be used as an on-site library. You can understand it as a platform for public learning. In the case of distributed protocol CAP or split-brain, this project does not provide consistency protocol as **raft** for the time being
5 | >该库是基于 **io/etcd.bbolt** 驱动打造一个分布式KV库(Bbolt有点类似innodb 完全兼容ACID事务),新能完全取决于Bbolt的B+tree的顺序写,和MMAP的预随机读,因为是基于硬盘的读写驱动,所以在固态硬盘上运行的性能最佳
6 | # Prepare the dependency library
7 | > go mod tidy
8 | # Standalone Start to up
9 | > go mod install; main -db-location=shard0.db -http-addr=127.0.0.2:8080 -config-file=sharding.toml -shard=shard0
10 | # Supporting a simple data sharding,which the server sharding is being accessed
11 | ```toml
12 | [[shards]]
13 | name = "shard0"
14 | idx = 0
15 | address = "127.0.0.2:8080"
16 | replicas = ["127.0.0.22:8080"]
17 |
18 | [[shards]]
19 | name = "shard1"
20 | idx = 1
21 | address = "127.0.0.3:8081"
22 | replicas = ["127.0.0.33:8080"]
23 |
24 | [[shards]]
25 | name = "shard2"
26 | idx = 2
27 | address = "127.0.0.4:8082"
28 | replicas = ["127.0.0.44:8080"]
29 |
30 | [[shards]]
31 | name = "shard3"
32 | idx = 3
33 | address = "127.0.0.5:8083"
34 | replicas = ["127.0.0.55:8080"]
35 |
36 | ```
37 | # Middleware dependency
38 | >bbolt
39 | > https://github.com/etcd-io
40 |
41 | bbolt is a fork of Ben Johnson's Bolt key/value store. The purpose of this fork is to provide the Go community with an active maintenance and development target for Bolt; the goal is improved reliability and stability. bbolt includes bug fixes, performance enhancements, and features not found in Bolt while preserving backwards compatibility with the Bolt API.
42 |
43 | Bolt is a pure Go key/value store inspired by Howard Chu's LMDB project. The goal of the project is to provide a simple, fast, and reliable database for projects that don't require a full database server such as Postgres or MySQL.
44 |
45 | Since Bolt is meant to be used as such a low-level piece of functionality, simplicity is key. The API will be small and only focus on getting values and setting values. That's it.
46 |
47 | DistrBoltX is secondary developed based on bbolt, adding distributed fragmentation high availability data security and other scenarios
48 |
49 | There will be a lot of optimization details in the future, so stay tuned
50 |
51 | # Distributed startup
52 | 1. ./populate.sh
53 | 2. 检查toml配置文件是否对应服务器完整
54 | 3. ./luanch.sh
55 |
56 | 当你看到:
57 | 
58 |
59 | 说明你此时已经启动了四个端口监听四个分片库 ,You know ?
60 |
61 | Core module :
62 | 1. 数据分片
63 | 2. 读写基准测试
64 | 3. 多机备份
65 | 4. shard
66 | 5. replicas
67 |
68 | 集群分片采用CRC64 MOD SHARD_COUNT 得到 当前分片,如果有数据写入当前分片,又单协程轮询同步到副本节点,副本节点启动时即刻加载对主节点的写入监听,内部节点采用节点转发的方式避免集群连接过多(参考redis HASHSLOT REDIRECT)
69 |
70 | #### DEBUG
71 | When you need to debug locally, here is a more suitable way that you can start using two VS/IDEA boosters for example
72 | 1. Open both editors
73 | 2. Enter two startup scripts in the editor, respectively, as follows
74 | ```shell
75 |
76 | 主节点
77 | --db-location=shard0.db --http-addr=127.0.0.2:8080 --grpc-addr=127.0.0.2:50030 --config-file=sharding.toml --config=shard0
78 | 副本
79 | --db-location=shard0-r.db --http-addr=127.0.0.22:8080 --grpc-addr=127.0.0.2:50030 --config-file=sharding.toml --config=shard0 --replica
80 |
81 | ```
82 |
83 | #### 部分使用日志
84 | >当你看到如下日志就说明已经成功了同步了
85 | - 主节点日志
86 | ```shell
87 | 2023/04/12 11:03:15 Shard count is 1 current shard :0 cur config &config.Config{Shards:[]config.Shard{config.Shard{Name:"shard0", Idx:0, Address:"127
88 | .0.0.2:8080"}}}:
89 | execute init for grpc register &web.Server{Db:(*db.Database)(0xc0001a8080), shards:(*config.Shards)(0xc000008810)} register node ip addr : "127.0.0.2
90 | :50030"2023/04/12 11:03:15 server listening at 127.0.0.2:50030
91 | Data sent to the replica is synchronized to the replica node key = "key8",value="value8"The ACK identifier of the replica node synchronization is com
92 | pleted ack = "ack:true"The key asynchronized from the master ,which has been deleted key "key8" val "value8" Data replica sync have been done,deletin
93 | g local key = "key8"
94 | ```
95 | - 副本节点日志
96 | ```shell
97 | GOPATH=E:\go-workspace #gosetup
98 | E:\go\bin\go.exe build -o C:\Users\Administrator\AppData\Local\Temp\GoLand\___4go_build_github_com_s5364733_distrBoltX_main.exe github.com/s5364733/distrBoltX/main #gosetup
99 | C:\Users\Administrator\AppData\Local\Temp\GoLand\___4go_build_github_com_s5364733_distrBoltX_main.exe --db-location=shard0-r.db --http-addr=127.0.0.22:8080 --grpc-addr=127.0.0.2:50030 --config-file=sharding.toml --config=shard0 --replica
100 | 2023/04/12 11:03:25 Shard count is 1 current shard :0 cur config &config.Config{Shards:[]config.Shard{config.Shard{Name:"shard0", Idx:0, Address:"127
101 | .0.0.2:8080"}}}:
102 | 2023/04/12 11:03:35 The key asynchronized from the master , which has been loaded (key:"key8",value:"value8")
103 | ```
104 |
105 |
106 | 3. If you only need to start two nodes for testing, you only need to keep one shards shard in sharding.toml as follows:
107 | ```shell
108 | [[shards]]
109 | name = "shard0"
110 | idx = 0
111 | address = "127.0.0.2:8080"
112 | replicas = ["127.0.0.22:8080"]
113 | ```
114 |
115 | ##### 您可能会问,为什么我在本地可以监听127.0.0.2
116 | 因为127.0.0.2是环回地址中的一个是可以使用的,您可以参考 https://superuser.com/questions/393700/what-is-the-127-0-0-2-ip-address-for
117 | #### CHANGELOG_FEATURE
118 | 1. 内部连接使用GRPC代替HTTP1.1协议 (done)
119 | 2. 取模分片算法采用一致性HASH算发代替用来解决HASH迁移的问题
120 | 3. 分片之后数据合并可能会有问题,所有可以参考REDIS HASHTAG 实现HASH聚合
--------------------------------------------------------------------------------
/api/nextKeyValue.pb.go:
--------------------------------------------------------------------------------
1 | // Code generated by protoc-gen-go. DO NOT EDIT.
2 | // versions:
3 | // protoc-gen-go v1.28.1
4 | // protoc v4.22.2
5 | // source: nextKeyValue.proto
6 |
7 | package api
8 |
9 | import (
10 | protoreflect "google.golang.org/protobuf/reflect/protoreflect"
11 | protoimpl "google.golang.org/protobuf/runtime/protoimpl"
12 | reflect "reflect"
13 | sync "sync"
14 | )
15 |
16 | const (
17 | // Verify that this generated code is sufficiently up-to-date.
18 | _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
19 | // Verify that runtime/protoimpl is sufficiently up-to-date.
20 | _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
21 | )
22 |
23 | type NextKeyValue struct {
24 | state protoimpl.MessageState
25 | sizeCache protoimpl.SizeCache
26 | unknownFields protoimpl.UnknownFields
27 |
28 | Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
29 | Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
30 | }
31 |
32 | func (x *NextKeyValue) Reset() {
33 | *x = NextKeyValue{}
34 | if protoimpl.UnsafeEnabled {
35 | mi := &file_nextKeyValue_proto_msgTypes[0]
36 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
37 | ms.StoreMessageInfo(mi)
38 | }
39 | }
40 |
41 | func (x *NextKeyValue) String() string {
42 | return protoimpl.X.MessageStringOf(x)
43 | }
44 |
45 | func (*NextKeyValue) ProtoMessage() {}
46 |
47 | func (x *NextKeyValue) ProtoReflect() protoreflect.Message {
48 | mi := &file_nextKeyValue_proto_msgTypes[0]
49 | if protoimpl.UnsafeEnabled && x != nil {
50 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
51 | if ms.LoadMessageInfo() == nil {
52 | ms.StoreMessageInfo(mi)
53 | }
54 | return ms
55 | }
56 | return mi.MessageOf(x)
57 | }
58 |
59 | // Deprecated: Use NextKeyValue.ProtoReflect.Descriptor instead.
60 | func (*NextKeyValue) Descriptor() ([]byte, []int) {
61 | return file_nextKeyValue_proto_rawDescGZIP(), []int{0}
62 | }
63 |
64 | func (x *NextKeyValue) GetKey() string {
65 | if x != nil {
66 | return x.Key
67 | }
68 | return ""
69 | }
70 |
71 | func (x *NextKeyValue) GetValue() string {
72 | if x != nil {
73 | return x.Value
74 | }
75 | return ""
76 | }
77 |
78 | // sync flag for serve
79 | type SyncD struct {
80 | state protoimpl.MessageState
81 | sizeCache protoimpl.SizeCache
82 | unknownFields protoimpl.UnknownFields
83 |
84 | Ack bool `protobuf:"varint,1,opt,name=ack,proto3" json:"ack,omitempty"`
85 | }
86 |
87 | func (x *SyncD) Reset() {
88 | *x = SyncD{}
89 | if protoimpl.UnsafeEnabled {
90 | mi := &file_nextKeyValue_proto_msgTypes[1]
91 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
92 | ms.StoreMessageInfo(mi)
93 | }
94 | }
95 |
96 | func (x *SyncD) String() string {
97 | return protoimpl.X.MessageStringOf(x)
98 | }
99 |
100 | func (*SyncD) ProtoMessage() {}
101 |
102 | func (x *SyncD) ProtoReflect() protoreflect.Message {
103 | mi := &file_nextKeyValue_proto_msgTypes[1]
104 | if protoimpl.UnsafeEnabled && x != nil {
105 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
106 | if ms.LoadMessageInfo() == nil {
107 | ms.StoreMessageInfo(mi)
108 | }
109 | return ms
110 | }
111 | return mi.MessageOf(x)
112 | }
113 |
114 | // Deprecated: Use SyncD.ProtoReflect.Descriptor instead.
115 | func (*SyncD) Descriptor() ([]byte, []int) {
116 | return file_nextKeyValue_proto_rawDescGZIP(), []int{1}
117 | }
118 |
119 | func (x *SyncD) GetAck() bool {
120 | if x != nil {
121 | return x.Ack
122 | }
123 | return false
124 | }
125 |
126 | var File_nextKeyValue_proto protoreflect.FileDescriptor
127 |
128 | var file_nextKeyValue_proto_rawDesc = []byte{
129 | 0x0a, 0x12, 0x6e, 0x65, 0x78, 0x74, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70,
130 | 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x36, 0x0a, 0x0c, 0x4e,
131 | 0x65, 0x78, 0x74, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b,
132 | 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a,
133 | 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61,
134 | 0x6c, 0x75, 0x65, 0x22, 0x19, 0x0a, 0x05, 0x53, 0x79, 0x6e, 0x63, 0x44, 0x12, 0x10, 0x0a, 0x03,
135 | 0x61, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x61, 0x63, 0x6b, 0x32, 0x40,
136 | 0x0a, 0x0d, 0x41, 0x63, 0x6b, 0x53, 0x79, 0x6e, 0x63, 0x44, 0x69, 0x61, 0x6c, 0x65, 0x72, 0x12,
137 | 0x2f, 0x0a, 0x04, 0x64, 0x69, 0x61, 0x6c, 0x12, 0x0c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
138 | 0x53, 0x79, 0x6e, 0x63, 0x44, 0x1a, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4e, 0x65,
139 | 0x78, 0x74, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01,
140 | 0x42, 0x53, 0x42, 0x11, 0x4e, 0x65, 0x78, 0x74, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65,
141 | 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
142 | 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x35, 0x33, 0x36, 0x34, 0x37, 0x33, 0x33, 0x2f, 0x64, 0x69, 0x73,
143 | 0x74, 0x72, 0x42, 0x6f, 0x6c, 0x74, 0x58, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x2f, 0x77, 0x6f, 0x72,
144 | 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x64, 0x69, 0x73, 0x74, 0x72, 0x42, 0x6f, 0x6c, 0x74,
145 | 0x58, 0x2f, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
146 | }
147 |
148 | var (
149 | file_nextKeyValue_proto_rawDescOnce sync.Once
150 | file_nextKeyValue_proto_rawDescData = file_nextKeyValue_proto_rawDesc
151 | )
152 |
153 | func file_nextKeyValue_proto_rawDescGZIP() []byte {
154 | file_nextKeyValue_proto_rawDescOnce.Do(func() {
155 | file_nextKeyValue_proto_rawDescData = protoimpl.X.CompressGZIP(file_nextKeyValue_proto_rawDescData)
156 | })
157 | return file_nextKeyValue_proto_rawDescData
158 | }
159 |
160 | var file_nextKeyValue_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
161 | var file_nextKeyValue_proto_goTypes = []interface{}{
162 | (*NextKeyValue)(nil), // 0: proto.NextKeyValue
163 | (*SyncD)(nil), // 1: proto.SyncD
164 | }
165 | var file_nextKeyValue_proto_depIdxs = []int32{
166 | 1, // 0: proto.AckSyncDialer.dial:input_type -> proto.SyncD
167 | 0, // 1: proto.AckSyncDialer.dial:output_type -> proto.NextKeyValue
168 | 1, // [1:2] is the sub-list for method output_type
169 | 0, // [0:1] is the sub-list for method input_type
170 | 0, // [0:0] is the sub-list for extension type_name
171 | 0, // [0:0] is the sub-list for extension extendee
172 | 0, // [0:0] is the sub-list for field type_name
173 | }
174 |
175 | func init() { file_nextKeyValue_proto_init() }
176 | func file_nextKeyValue_proto_init() {
177 | if File_nextKeyValue_proto != nil {
178 | return
179 | }
180 | if !protoimpl.UnsafeEnabled {
181 | file_nextKeyValue_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
182 | switch v := v.(*NextKeyValue); i {
183 | case 0:
184 | return &v.state
185 | case 1:
186 | return &v.sizeCache
187 | case 2:
188 | return &v.unknownFields
189 | default:
190 | return nil
191 | }
192 | }
193 | file_nextKeyValue_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
194 | switch v := v.(*SyncD); i {
195 | case 0:
196 | return &v.state
197 | case 1:
198 | return &v.sizeCache
199 | case 2:
200 | return &v.unknownFields
201 | default:
202 | return nil
203 | }
204 | }
205 | }
206 | type x struct{}
207 | out := protoimpl.TypeBuilder{
208 | File: protoimpl.DescBuilder{
209 | GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
210 | RawDescriptor: file_nextKeyValue_proto_rawDesc,
211 | NumEnums: 0,
212 | NumMessages: 2,
213 | NumExtensions: 0,
214 | NumServices: 1,
215 | },
216 | GoTypes: file_nextKeyValue_proto_goTypes,
217 | DependencyIndexes: file_nextKeyValue_proto_depIdxs,
218 | MessageInfos: file_nextKeyValue_proto_msgTypes,
219 | }.Build()
220 | File_nextKeyValue_proto = out.File
221 | file_nextKeyValue_proto_rawDesc = nil
222 | file_nextKeyValue_proto_goTypes = nil
223 | file_nextKeyValue_proto_depIdxs = nil
224 | }
225 |
--------------------------------------------------------------------------------