├── cluster.png ├── .gitignore ├── codec ├── ss │ ├── test.proto │ ├── message.go │ ├── test.pb.go │ ├── codec.go │ └── ss_test.go ├── receiver.go ├── pb │ └── pb.go └── buffer │ └── buffer.go ├── membership ├── redis │ ├── redis.go │ ├── design.md │ ├── admin.go │ ├── script.go │ ├── script_test.go │ ├── subscribe.go │ └── client_test.go ├── etcd │ ├── admin.go │ ├── test │ │ └── test.go │ └── subscribe.go └── membership.go ├── example ├── pbrpc │ ├── proto │ │ ├── echo.proto │ │ └── test.proto │ ├── gen.sh │ ├── echosvr.go │ ├── echocli.go │ ├── service │ │ ├── echo │ │ │ ├── echo.go │ │ │ └── echo.pb.go │ │ └── test │ │ │ ├── test.go │ │ │ └── test.pb.go │ └── genrpc │ │ └── gen.go ├── membership │ ├── main │ │ ├── config.json │ │ └── server.go │ └── membership.go └── stream │ ├── gameserver.go │ ├── gateserver.go │ ├── client.go │ └── pb │ └── test.pb.go ├── pkg └── crypto │ ├── aes_test.go │ └── aes.go ├── logger.go ├── logger └── zap │ └── logger.go ├── go.mod ├── addr └── addr.go ├── rpc.go ├── README.md ├── go.sum ├── node.go ├── cluster.go └── cluster_test.go /cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sniperHW/clustergo/HEAD/cluster.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | ._.DS_Store 3 | ._cluster.png 4 | *.txt 5 | *.exe 6 | *.log 7 | *.out -------------------------------------------------------------------------------- /codec/ss/test.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option go_package = "../ss"; 4 | 5 | message Echo { 6 | string msg = 1; 7 | } -------------------------------------------------------------------------------- /membership/redis/redis.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | func GetRedisError(err error) error { 4 | if err == nil || err.Error() == "redis: nil" { 5 | return nil 6 | } else { 7 | return err 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /example/pbrpc/proto/echo.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option go_package = "service/echo"; 4 | 5 | message echoReq { 6 | string msg = 1; 7 | } 8 | 9 | message echoRsp { 10 | string msg = 1; 11 | } -------------------------------------------------------------------------------- /example/pbrpc/proto/test.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option go_package = "service/test"; 4 | 5 | message testReq { 6 | string msg = 1; 7 | } 8 | 9 | message testRsp { 10 | string msg = 1; 11 | } -------------------------------------------------------------------------------- /example/membership/main/config.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "LogicAddr":"1.1.1", 4 | "NetAddr":"127.0.0.1:18111", 5 | "Export":false, 6 | "Available":true 7 | }, 8 | { 9 | "LogicAddr":"1.2.1", 10 | "NetAddr":"127.0.0.1:18112", 11 | "Export":false, 12 | "Available":true 13 | } 14 | ] -------------------------------------------------------------------------------- /pkg/crypto/aes_test.go: -------------------------------------------------------------------------------- 1 | package crypto 2 | 3 | import ( 4 | "encoding/base64" 5 | "fmt" 6 | "testing" 7 | ) 8 | 9 | func TestAes(t *testing.T) { 10 | b, err := AESCBCEncrypt([]byte("123456"), []byte("123456")) 11 | if err != nil { 12 | panic(err) 13 | } else { 14 | fmt.Println(base64.StdEncoding.EncodeToString(b)) 15 | //b64 := base64.NewEncoder() 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /example/pbrpc/gen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function make_proto(){ 4 | if [ ! -d "./service" ];then 5 | mkdir ./service 6 | fi 7 | for file in `ls ./proto` 8 | do 9 | if [ "${file##*.}"x = "proto"x ];then 10 | protoc --go_out=./ "./proto/"$file 11 | fi 12 | done 13 | } 14 | 15 | # 执行命令 16 | make_proto 17 | # 生成rpc文件 18 | go run genrpc/gen.go -------------------------------------------------------------------------------- /membership/etcd/admin.go: -------------------------------------------------------------------------------- 1 | package etcd 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/sniperHW/clustergo" 7 | clientv3 "go.etcd.io/etcd/client/v3" 8 | ) 9 | 10 | type Admin struct { 11 | Cfg clientv3.Config 12 | PrefixConfig string 13 | PrefixAlive string 14 | Logger clustergo.Logger 15 | TTL time.Duration 16 | 17 | //leaseID clientv3.LeaseID 18 | //leaseCh <-chan *clientv3.LeaseKeepAliveResponse 19 | } 20 | -------------------------------------------------------------------------------- /example/membership/main/server.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "os" 6 | 7 | "github.com/sniperHW/clustergo/example/membership" 8 | ) 9 | 10 | func main() { 11 | var config []*membership.Node 12 | f, err := os.Open("./config.json") 13 | if err != nil { 14 | panic(err) 15 | } 16 | 17 | decoder := json.NewDecoder(f) 18 | err = decoder.Decode(&config) 19 | if err != nil { 20 | panic(err) 21 | } 22 | 23 | svr := membership.NewServer() 24 | 25 | svr.Start("127.0.0.1:18110", config) 26 | 27 | } 28 | -------------------------------------------------------------------------------- /membership/redis/design.md: -------------------------------------------------------------------------------- 1 | db0 存储配置 2 | db1 存储 alive 信息 3 | 4 | ## db0 5 | 6 | 每个member使用logic.addr作为key。value中包含一个verison字段,version值为发生变更时db0.version。 7 | 8 | 单独的version字段,db0的变更通过lua脚本实现,每次变更version++,同时向notify member发布事件,所有监听事件的客户端重新 9 | 向db0获取数据,以更新本地信息。 10 | 11 | 12 | ## db1 13 | 14 | 记录live节点,所有活动节点定时执行heartbeat,heartbeat更新节点deadline。db1也有一个单独的version,每当向db1插入或删除节点, 15 | version++。 16 | 17 | 每当节点插入db1,向notify live发布事件。所有监听事件的客户端重新向db1获取数据,以更新本地live信息。 18 | 19 | 一个单独的进程定时执行check_timeout脚本,check_timeout遍历db1中的节点,将超时节点删除(标记删除),更新version,向notify live发布事件。 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /logger.go: -------------------------------------------------------------------------------- 1 | package clustergo 2 | 3 | import ( 4 | "github.com/sniperHW/rpcgo" 5 | ) 6 | 7 | type Logger interface { 8 | Debugf(string, ...interface{}) 9 | Infof(string, ...interface{}) 10 | Warnf(string, ...interface{}) 11 | Errorf(string, ...interface{}) 12 | Panicf(string, ...interface{}) 13 | Fatalf(string, ...interface{}) 14 | Debug(...interface{}) 15 | Info(...interface{}) 16 | Warn(...interface{}) 17 | Error(...interface{}) 18 | Panic(...interface{}) 19 | Fatal(...interface{}) 20 | } 21 | 22 | var logger Logger 23 | 24 | func InitLogger(l Logger) { 25 | rpcgo.InitLogger(l.(rpcgo.Logger)) 26 | logger = l 27 | } 28 | 29 | func Log() Logger { 30 | return logger 31 | } 32 | -------------------------------------------------------------------------------- /membership/etcd/test/test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/sniperHW/clustergo/membership" 8 | "github.com/sniperHW/clustergo/membership/etcd" 9 | clientv3 "go.etcd.io/etcd/client/v3" 10 | ) 11 | 12 | func main() { 13 | 14 | membershipCli := etcd.Subscribe{ 15 | PrefixConfig: "/test/", 16 | PrefixAlive: "/alive/", 17 | //LogicAddr: "1.1.1", 18 | TTL: time.Second * 10, 19 | Cfg: clientv3.Config{ 20 | Endpoints: []string{"localhost:2379"}, 21 | DialTimeout: time.Second * 5, 22 | }, 23 | } 24 | 25 | membershipCli.Subscribe(func(di membership.MemberInfo) { 26 | fmt.Println("add", di.Add) 27 | fmt.Println("update", di.Update) 28 | fmt.Println("remove", di.Remove) 29 | }) 30 | 31 | ch := make(chan struct{}) 32 | 33 | <-ch 34 | } 35 | -------------------------------------------------------------------------------- /example/stream/gameserver.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/sniperHW/clustergo" 5 | "github.com/sniperHW/clustergo/addr" 6 | "github.com/sniperHW/clustergo/example/membership" 7 | "github.com/sniperHW/clustergo/logger/zap" 8 | "github.com/xtaci/smux" 9 | ) 10 | 11 | func main() { 12 | l := zap.NewZapLogger("1.1.1.log", "./logfile", "debug", 1024*1024*100, 14, 28, true) 13 | clustergo.InitLogger(l.Sugar()) 14 | localaddr, _ := addr.MakeLogicAddr("1.1.1") 15 | clustergo.Start(membership.NewClient("127.0.0.1:18110"), localaddr) 16 | clustergo.StartSmuxServer(func(s *smux.Stream) { 17 | go func() { 18 | buff := make([]byte, 64) 19 | for { 20 | n, err := s.Read(buff) 21 | if err != nil { 22 | break 23 | } 24 | n, err = s.Write(buff[:n]) 25 | if err != nil { 26 | break 27 | } 28 | } 29 | s.Close() 30 | }() 31 | }) 32 | clustergo.Wait() 33 | } 34 | -------------------------------------------------------------------------------- /example/pbrpc/echosvr.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/sniperHW/clustergo" 7 | "github.com/sniperHW/clustergo/addr" 8 | "github.com/sniperHW/clustergo/example/membership" 9 | "github.com/sniperHW/clustergo/example/pbrpc/service/echo" 10 | "github.com/sniperHW/clustergo/logger/zap" 11 | ) 12 | 13 | type echoService struct { 14 | } 15 | 16 | func (e *echoService) ServeEcho(ctx context.Context, replyer *echo.Replyer, request *echo.EchoReq) { 17 | from := replyer.Channel().(clustergo.RPCChannel).Peer() //获取请求的对端逻辑地址 18 | clustergo.Log().Debug("from:", from.String(), ",echo:", request.Msg) 19 | replyer.Reply(&echo.EchoRsp{Msg: request.Msg}) 20 | } 21 | 22 | func main() { 23 | l := zap.NewZapLogger("1.1.1.log", "./logfile", "debug", 1024*1024*100, 14, 28, true) 24 | clustergo.InitLogger(l.Sugar()) 25 | echo.Register(&echoService{}) 26 | 27 | localaddr, _ := addr.MakeLogicAddr("1.1.1") 28 | clustergo.Start(membership.NewClient("127.0.0.1:18110"), localaddr) 29 | 30 | clustergo.Wait() 31 | 32 | } 33 | -------------------------------------------------------------------------------- /example/pbrpc/echocli.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | "time" 7 | 8 | "github.com/sniperHW/clustergo" 9 | "github.com/sniperHW/clustergo/addr" 10 | "github.com/sniperHW/clustergo/example/membership" 11 | "github.com/sniperHW/clustergo/example/pbrpc/service/echo" 12 | "github.com/sniperHW/clustergo/logger/zap" 13 | ) 14 | 15 | func main() { 16 | l := zap.NewZapLogger("1.2.1.log", "./logfile", "debug", 1024*1024*100, 14, 28, true) 17 | clustergo.InitLogger(l.Sugar()) 18 | localaddr, _ := addr.MakeLogicAddr("1.2.1") 19 | clustergo.Start(membership.NewClient("127.0.0.1:18110"), localaddr) 20 | 21 | echoAddr, _ := clustergo.GetAddrByType(1) 22 | 23 | for i := 0; i < 10; i++ { 24 | resp, err := echo.CallWithTimeout(echoAddr, &echo.EchoReq{Msg: fmt.Sprintf("hello:%d", i)}, time.Second) 25 | clustergo.Log().Debug(resp, err) 26 | } 27 | 28 | var wait sync.WaitGroup 29 | for i := 0; i < 10; i++ { 30 | wait.Add(1) 31 | echo.AsyncCall(echoAddr, &echo.EchoReq{Msg: fmt.Sprintf("hello async:%d", i)}, time.Now().Add(time.Second), func(resp *echo.EchoRsp, err error) { 32 | clustergo.Log().Debug(resp, err) 33 | wait.Done() 34 | }) 35 | } 36 | wait.Wait() 37 | clustergo.Stop() 38 | clustergo.Wait() 39 | } 40 | -------------------------------------------------------------------------------- /example/stream/gateserver.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "io" 5 | "net" 6 | "sync" 7 | 8 | "github.com/sniperHW/clustergo" 9 | "github.com/sniperHW/clustergo/addr" 10 | "github.com/sniperHW/clustergo/example/membership" 11 | "github.com/sniperHW/clustergo/logger/zap" 12 | "github.com/sniperHW/netgo" 13 | ) 14 | 15 | func main() { 16 | l := zap.NewZapLogger("1.2.1.log", "./logfile", "debug", 1024*1024*100, 14, 28, true) 17 | clustergo.InitLogger(l.Sugar()) 18 | localaddr, _ := addr.MakeLogicAddr("1.2.1") 19 | clustergo.Start(membership.NewClient("127.0.0.1:18110"), localaddr) 20 | 21 | gameAddr, _ := clustergo.GetAddrByType(1) 22 | 23 | _, serve, _ := netgo.ListenTCP("tcp", "127.0.0.1:18113", func(conn *net.TCPConn) { 24 | go func() { 25 | cliStream, err := clustergo.OpenStream(gameAddr) 26 | if err != nil { 27 | conn.Close() 28 | return 29 | } 30 | 31 | defer func() { 32 | conn.Close() 33 | cliStream.Close() 34 | }() 35 | 36 | var wait sync.WaitGroup 37 | wait.Add(2) 38 | 39 | go func() { 40 | io.Copy(cliStream, conn) 41 | wait.Done() 42 | }() 43 | 44 | go func() { 45 | io.Copy(conn, cliStream) 46 | wait.Done() 47 | }() 48 | wait.Wait() 49 | }() 50 | }) 51 | go serve() 52 | 53 | clustergo.Wait() 54 | } 55 | -------------------------------------------------------------------------------- /example/pbrpc/service/echo/echo.go: -------------------------------------------------------------------------------- 1 | 2 | package echo 3 | 4 | import ( 5 | "github.com/sniperHW/clustergo" 6 | "github.com/sniperHW/clustergo/addr" 7 | "github.com/sniperHW/rpcgo" 8 | "context" 9 | "time" 10 | ) 11 | 12 | type Replyer struct { 13 | replyer *rpcgo.Replyer 14 | } 15 | 16 | func (r *Replyer) Reply(result *EchoRsp) { 17 | r.replyer.Reply(result) 18 | } 19 | 20 | func (r *Replyer) Error(err error) { 21 | r.replyer.Error(err) 22 | } 23 | 24 | func (r *Replyer) Channel() rpcgo.Channel { 25 | return r.replyer.Channel() 26 | } 27 | 28 | type Echo interface { 29 | ServeEcho(context.Context, *Replyer,*EchoReq) 30 | } 31 | 32 | func Register(o Echo) { 33 | clustergo.RegisterService("echo",func(ctx context.Context, r *rpcgo.Replyer,arg *EchoReq) { 34 | o.ServeEcho(ctx,&Replyer{replyer:r},arg) 35 | }) 36 | } 37 | 38 | 39 | func Call(ctx context.Context, peer addr.LogicAddr,arg *EchoReq) (*EchoRsp,error) { 40 | return clustergo.Call[*EchoReq,EchoRsp](ctx,peer,"echo",arg) 41 | } 42 | 43 | func CallWithTimeout(peer addr.LogicAddr,arg *EchoReq,d time.Duration) (*EchoRsp,error) { 44 | return clustergo.CallWithTimeout[*EchoReq,EchoRsp](peer,"echo",arg,d) 45 | } 46 | 47 | func AsyncCall(peer addr.LogicAddr,arg *EchoReq,deadline time.Time,callback func(*EchoRsp,error)) error { 48 | return clustergo.AsyncCall[*EchoReq,EchoRsp](peer,"echo",arg,deadline,callback) 49 | } 50 | 51 | -------------------------------------------------------------------------------- /example/pbrpc/service/test/test.go: -------------------------------------------------------------------------------- 1 | 2 | package test 3 | 4 | import ( 5 | "github.com/sniperHW/clustergo" 6 | "github.com/sniperHW/clustergo/addr" 7 | "github.com/sniperHW/rpcgo" 8 | "context" 9 | "time" 10 | ) 11 | 12 | type Replyer struct { 13 | replyer *rpcgo.Replyer 14 | } 15 | 16 | func (r *Replyer) Reply(result *TestRsp) { 17 | r.replyer.Reply(result) 18 | } 19 | 20 | func (r *Replyer) Error(err error) { 21 | r.replyer.Error(err) 22 | } 23 | 24 | func (r *Replyer) Channel() rpcgo.Channel { 25 | return r.replyer.Channel() 26 | } 27 | 28 | type Test interface { 29 | ServeTest(context.Context, *Replyer,*TestReq) 30 | } 31 | 32 | func Register(o Test) { 33 | clustergo.RegisterService("test",func(ctx context.Context, r *rpcgo.Replyer,arg *TestReq) { 34 | o.ServeTest(ctx,&Replyer{replyer:r},arg) 35 | }) 36 | } 37 | 38 | 39 | func Call(ctx context.Context, peer addr.LogicAddr,arg *TestReq) (*TestRsp,error) { 40 | return clustergo.Call[*TestReq,TestRsp](ctx,peer,"test",arg) 41 | } 42 | 43 | func CallWithTimeout(peer addr.LogicAddr,arg *TestReq,d time.Duration) (*TestRsp,error) { 44 | return clustergo.CallWithTimeout[*TestReq,TestRsp](peer,"test",arg,d) 45 | } 46 | 47 | func AsyncCall(peer addr.LogicAddr,arg *TestReq,deadline time.Time,callback func(*TestRsp,error)) error { 48 | return clustergo.AsyncCall[*TestReq,TestRsp](peer,"test",arg,deadline,callback) 49 | } 50 | 51 | -------------------------------------------------------------------------------- /membership/membership.go: -------------------------------------------------------------------------------- 1 | package membership 2 | 3 | import ( 4 | "encoding/json" 5 | 6 | "github.com/sniperHW/clustergo/addr" 7 | ) 8 | 9 | type nodeJson struct { 10 | LogicAddr string `json:"logicAddr"` 11 | NetAddr string `json:"netAddr"` 12 | Export bool `json:"export"` 13 | Available bool `json:"available"` 14 | } 15 | 16 | type Node struct { 17 | Addr addr.Addr 18 | Export bool //是否将节点暴露到cluster外部 19 | Available bool //是否可用, 20 | } 21 | 22 | func (n *Node) Marshal() ([]byte, error) { 23 | j := nodeJson{ 24 | Export: n.Export, 25 | Available: n.Available, 26 | LogicAddr: n.Addr.LogicAddr().String(), 27 | NetAddr: n.Addr.NetAddr().String(), 28 | } 29 | return json.Marshal(j) 30 | } 31 | 32 | func (n *Node) Unmarshal(data []byte) (err error) { 33 | var j nodeJson 34 | if err = json.Unmarshal(data, &j); err != nil { 35 | return 36 | } 37 | 38 | n.Available = j.Available 39 | n.Export = j.Export 40 | n.Addr, err = addr.MakeAddr(j.LogicAddr, j.NetAddr) 41 | return 42 | } 43 | 44 | type MemberInfo struct { 45 | Add []Node 46 | Remove []Node 47 | Update []Node 48 | } 49 | 50 | type Subscribe interface { 51 | //订阅变更 52 | Subscribe(func(MemberInfo)) error 53 | Close() 54 | } 55 | 56 | type Admin interface { 57 | //更新节点信息,如果节点不存在将它添加到membership中 58 | UpdateMember(Node) 59 | //从membership中移除节点 60 | RemoveMember(Node) 61 | //保活 62 | KeepAlive(Node) 63 | } 64 | -------------------------------------------------------------------------------- /membership/redis/admin.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/redis/go-redis/v9" 8 | "github.com/sniperHW/clustergo/membership" 9 | ) 10 | 11 | type Admin struct { 12 | RedisCli *redis.Client 13 | heartbeatSha string 14 | checkTimeoutSha string 15 | updateMemberSha string 16 | } 17 | 18 | func (cli *Admin) Init() (err error) { 19 | if cli.heartbeatSha, err = cli.RedisCli.ScriptLoad(context.Background(), ScriptHeartbeat).Result(); err != nil { 20 | err = fmt.Errorf("error on init ScriptHeartbeat:%s", err.Error()) 21 | return err 22 | } 23 | 24 | if cli.checkTimeoutSha, err = cli.RedisCli.ScriptLoad(context.Background(), ScriptCheckTimeout).Result(); err != nil { 25 | err = fmt.Errorf("error on init checkTimeout:%s", err.Error()) 26 | return err 27 | } 28 | 29 | if cli.updateMemberSha, err = cli.RedisCli.ScriptLoad(context.Background(), ScriptUpdateMember).Result(); err != nil { 30 | err = fmt.Errorf("error on init updateMember:%s", err.Error()) 31 | return err 32 | } 33 | 34 | return err 35 | } 36 | 37 | func (cli *Admin) UpdateMember(n membership.Node) error { 38 | jsonBytes, _ := n.Marshal() 39 | _, err := cli.RedisCli.EvalSha(context.Background(), cli.updateMemberSha, []string{n.Addr.LogicAddr().String()}, "insert_update", string(jsonBytes)).Result() 40 | return GetRedisError(err) 41 | } 42 | 43 | func (cli *Admin) RemoveMember(n membership.Node) error { 44 | _, err := cli.RedisCli.EvalSha(context.Background(), cli.updateMemberSha, []string{n.Addr.LogicAddr().String()}, "delete").Result() 45 | return GetRedisError(err) 46 | } 47 | 48 | func (cli *Admin) KeepAlive(n membership.Node) error { 49 | _, err := cli.RedisCli.EvalSha(context.Background(), cli.heartbeatSha, []string{n.Addr.LogicAddr().String()}, 10).Result() 50 | return GetRedisError(err) 51 | } 52 | 53 | func (cli *Admin) CheckTimeout() { 54 | cli.RedisCli.EvalSha(context.Background(), cli.checkTimeoutSha, []string{}).Result() 55 | } 56 | -------------------------------------------------------------------------------- /logger/zap/logger.go: -------------------------------------------------------------------------------- 1 | package zap 2 | 3 | import ( 4 | "go.uber.org/zap" 5 | "go.uber.org/zap/zapcore" 6 | "gopkg.in/natefinch/lumberjack.v2" 7 | 8 | //"clustergo/node/common/config" 9 | "os" 10 | ) 11 | 12 | type stdoutWriteSyncer struct { 13 | } 14 | 15 | func (s stdoutWriteSyncer) Write(p []byte) (n int, err error) { 16 | n = len(p) 17 | os.Stdout.Write(p) 18 | return 19 | } 20 | 21 | func (s stdoutWriteSyncer) Sync() error { 22 | return nil 23 | } 24 | 25 | var levelMap = map[string]zapcore.Level{ 26 | 27 | "debug": zapcore.DebugLevel, 28 | 29 | "info": zapcore.InfoLevel, 30 | 31 | "warn": zapcore.WarnLevel, 32 | 33 | "error": zapcore.ErrorLevel, 34 | 35 | "dpanic": zapcore.DPanicLevel, 36 | 37 | "panic": zapcore.PanicLevel, 38 | 39 | "fatal": zapcore.FatalLevel, 40 | } 41 | 42 | func getLoggerLevel(lvl string) zapcore.Level { 43 | if level, ok := levelMap[lvl]; ok { 44 | return level 45 | } 46 | return zapcore.InfoLevel 47 | } 48 | 49 | func NewZapLogger(name string, path string, level string, maxLogfileSize int, maxAge int, maxBackups int, enableLogStdout bool) *zap.Logger /**zap.SugaredLogger*/ { 50 | 51 | syncWriter := zapcore.AddSync(&lumberjack.Logger{ 52 | Filename: path + "/" + name, 53 | MaxSize: maxLogfileSize, 54 | MaxAge: maxAge, 55 | MaxBackups: maxBackups, 56 | LocalTime: true, 57 | //Compress: true, 58 | }) 59 | 60 | var w zapcore.WriteSyncer 61 | 62 | encoder := zap.NewProductionEncoderConfig() 63 | 64 | encoder.EncodeTime = zapcore.ISO8601TimeEncoder 65 | 66 | if enableLogStdout { 67 | //encoder.EncodeLevel = zapcore.CapitalColorLevelEncoder 68 | w = zap.CombineWriteSyncers(syncWriter, stdoutWriteSyncer{}) 69 | } else { 70 | w = zap.CombineWriteSyncers(syncWriter) 71 | } 72 | /*NewConsoleEncoder*/ 73 | /*NewJSONEncoder*/ 74 | 75 | core := zapcore.NewCore(zapcore.NewConsoleEncoder(encoder), w, zap.NewAtomicLevelAt(getLoggerLevel(level))) 76 | 77 | return zap.New(core, zap.AddCaller()) //.Sugar() 78 | } 79 | -------------------------------------------------------------------------------- /codec/receiver.go: -------------------------------------------------------------------------------- 1 | package codec 2 | 3 | import ( 4 | "encoding/binary" 5 | "fmt" 6 | "time" 7 | 8 | "github.com/sniperHW/netgo" 9 | ) 10 | 11 | const sizeLen int = 4 12 | 13 | // 接收一个length|payload类型的数据包 14 | type LengthPayloadPacketReceiver struct { 15 | MaxPacketSize int 16 | Buff []byte 17 | w int 18 | r int 19 | } 20 | 21 | func (ss *LengthPayloadPacketReceiver) read(readable netgo.ReadAble, deadline time.Time) (int, error) { 22 | if err := readable.SetReadDeadline(deadline); err != nil { 23 | return 0, err 24 | } else { 25 | return readable.Read(ss.Buff[ss.w:]) 26 | } 27 | } 28 | 29 | // 接收一个length|payload类型的数据包,返回payload 30 | func (ss *LengthPayloadPacketReceiver) Recv(readable netgo.ReadAble, deadline time.Time) (pkt []byte, err error) { 31 | for { 32 | unpackSize := ss.w - ss.r 33 | if unpackSize >= sizeLen { 34 | payload := int(binary.BigEndian.Uint32(ss.Buff[ss.r:])) 35 | totalSize := payload + sizeLen 36 | if payload == 0 { 37 | return nil, fmt.Errorf("zero payload") 38 | } else if totalSize > ss.MaxPacketSize { 39 | return nil, fmt.Errorf("packet too large:%d", totalSize) 40 | } else if totalSize <= unpackSize { 41 | ss.r += sizeLen 42 | pkt := ss.Buff[ss.r : ss.r+payload] 43 | ss.r += payload 44 | if ss.r == ss.w { 45 | ss.r = 0 46 | ss.w = 0 47 | } 48 | return pkt, nil 49 | } else { 50 | if totalSize > cap(ss.Buff) { 51 | buff := make([]byte, totalSize) 52 | copy(buff, ss.Buff[ss.r:ss.w]) 53 | ss.Buff = buff 54 | } else if ss.r > 0 { 55 | copy(ss.Buff, ss.Buff[ss.r:ss.w]) 56 | } 57 | ss.w = ss.w - ss.r 58 | ss.r = 0 59 | } 60 | } else if ss.r > 0 { 61 | copy(ss.Buff, ss.Buff[ss.r:ss.w]) 62 | ss.w = ss.w - ss.r 63 | ss.r = 0 64 | } 65 | 66 | var n int 67 | n, err = ss.read(readable, deadline) 68 | if n > 0 { 69 | ss.w += n 70 | } 71 | if nil != err { 72 | return 73 | } 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/sniperHW/clustergo 2 | 3 | go 1.21.5 4 | 5 | require ( 6 | github.com/redis/go-redis/v9 v9.5.2 7 | github.com/sniperHW/netgo v0.0.0-20231214060736-2b26e624b5f6 8 | github.com/sniperHW/rpcgo v0.0.0-20250501095528-a98841003660 9 | github.com/stretchr/testify v1.8.4 10 | github.com/xtaci/smux v1.5.24 11 | go.etcd.io/etcd/client/v3 v3.5.11 12 | go.uber.org/zap v1.26.0 13 | google.golang.org/protobuf v1.31.0 14 | gopkg.in/natefinch/lumberjack.v2 v2.2.1 15 | ) 16 | 17 | replace google.golang.org/grpc => google.golang.org/grpc v1.59.0 18 | 19 | require ( 20 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 21 | github.com/coreos/go-semver v0.3.0 // indirect 22 | github.com/coreos/go-systemd/v22 v22.3.2 // indirect 23 | github.com/davecgh/go-spew v1.1.1 // indirect 24 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect 25 | github.com/gogo/protobuf v1.3.2 // indirect 26 | github.com/golang/protobuf v1.5.3 // indirect 27 | github.com/google/go-cmp v0.6.0 // indirect 28 | github.com/gorilla/websocket v1.5.0 // indirect 29 | github.com/klauspost/cpuid v1.3.1 // indirect 30 | github.com/klauspost/reedsolomon v1.9.9 // indirect 31 | github.com/mmcloughlin/avo v0.0.0-20200803215136-443f81d77104 // indirect 32 | github.com/pkg/errors v0.9.1 // indirect 33 | github.com/pmezard/go-difflib v1.0.0 // indirect 34 | github.com/templexxx/cpu v0.0.7 // indirect 35 | github.com/templexxx/xorsimd v0.4.1 // indirect 36 | github.com/tjfoc/gmsm v1.3.2 // indirect 37 | github.com/xtaci/kcp-go/v5 v5.6.1 // indirect 38 | go.etcd.io/etcd/api/v3 v3.5.11 // indirect 39 | go.etcd.io/etcd/client/pkg/v3 v3.5.11 // indirect 40 | go.uber.org/multierr v1.10.0 // indirect 41 | golang.org/x/crypto v0.14.0 // indirect 42 | golang.org/x/mod v0.12.0 // indirect 43 | golang.org/x/net v0.17.0 // indirect 44 | golang.org/x/sys v0.13.0 // indirect 45 | golang.org/x/text v0.13.0 // indirect 46 | golang.org/x/tools v0.12.0 // indirect 47 | google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect 48 | google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect 49 | google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect 50 | google.golang.org/grpc v1.59.0 // indirect 51 | gopkg.in/yaml.v3 v3.0.1 // indirect 52 | ) 53 | -------------------------------------------------------------------------------- /codec/ss/message.go: -------------------------------------------------------------------------------- 1 | package ss 2 | 3 | import ( 4 | "encoding/binary" 5 | 6 | "github.com/sniperHW/clustergo/addr" 7 | "github.com/sniperHW/clustergo/codec/buffer" 8 | "github.com/sniperHW/rpcgo" 9 | ) 10 | 11 | const ( 12 | sizeLen = 4 13 | sizeFlag = 1 14 | sizeToAndFrom = 8 15 | sizeCmd = 2 16 | sizeRpcSeqNo = 8 17 | minSize = sizeLen + sizeFlag + sizeToAndFrom 18 | ) 19 | 20 | const ( 21 | PbMsg = 0x1 //Pb消息 22 | BinMsg = 0x2 //二进制消息 23 | RpcReq = 0x3 //RPC请求 24 | RpcResp = 0x4 //RPC响应 25 | MaskMessageType = 0x7 26 | ) 27 | 28 | var ( 29 | MaxPacketSize = 1024 * 4 30 | ) 31 | 32 | func setMsgType(flag *byte, tt byte) { 33 | switch tt { 34 | case PbMsg, BinMsg, RpcReq, RpcResp: 35 | *flag |= tt 36 | } 37 | } 38 | 39 | func getMsgType(flag byte) byte { 40 | return flag & MaskMessageType 41 | } 42 | 43 | type Message struct { 44 | cmd uint16 45 | to addr.LogicAddr 46 | from addr.LogicAddr 47 | payload interface{} 48 | } 49 | 50 | func NewMessage(to addr.LogicAddr, from addr.LogicAddr, payload interface{}, cmd ...uint16) *Message { 51 | msg := &Message{ 52 | to: to, 53 | from: from, 54 | payload: payload, 55 | } 56 | 57 | if len(cmd) > 0 { 58 | msg.cmd = cmd[0] 59 | } 60 | 61 | return msg 62 | } 63 | 64 | func (m *Message) Payload() interface{} { 65 | return m.payload 66 | } 67 | 68 | func (m *Message) Cmd() uint16 { 69 | return m.cmd 70 | } 71 | 72 | func (m *Message) From() addr.LogicAddr { 73 | return m.from 74 | } 75 | 76 | func (m *Message) To() addr.LogicAddr { 77 | return m.to 78 | } 79 | 80 | // 透传消息 81 | type RelayMessage struct { 82 | to addr.LogicAddr 83 | from addr.LogicAddr 84 | payload []byte 85 | } 86 | 87 | func (m *RelayMessage) Payload() []byte { 88 | return m.payload 89 | } 90 | 91 | func (m *RelayMessage) From() addr.LogicAddr { 92 | return m.from 93 | } 94 | 95 | func (m *RelayMessage) To() addr.LogicAddr { 96 | return m.to 97 | } 98 | 99 | func (m *RelayMessage) GetRpcRequest() *rpcgo.RequestMsg { 100 | if getMsgType(m.payload[4]) != RpcReq { 101 | return nil 102 | } else { 103 | if req, err := rpcgo.DecodeRequest(m.payload[13:]); err != nil { 104 | return nil 105 | } else { 106 | return req 107 | } 108 | } 109 | } 110 | 111 | func NewRelayMessage(to addr.LogicAddr, from addr.LogicAddr, payload []byte) *RelayMessage { 112 | m := &RelayMessage{ 113 | to: to, 114 | from: from, 115 | } 116 | 117 | b := make([]byte, 0, len(payload)+sizeLen) 118 | w := buffer.NeWriter(binary.BigEndian) 119 | b = w.AppendUint32(b, uint32(len(payload))) 120 | m.payload = w.AppendBytes(b, payload) 121 | return m 122 | } 123 | -------------------------------------------------------------------------------- /pkg/crypto/aes.go: -------------------------------------------------------------------------------- 1 | package crypto 2 | 3 | import ( 4 | "bytes" 5 | "crypto/aes" 6 | "crypto/cipher" 7 | "crypto/rand" 8 | "encoding/binary" 9 | "errors" 10 | "io" 11 | ) 12 | 13 | func fixKey(key []byte) []byte { 14 | if len(key) > 32 { 15 | return key[:32] 16 | } else { 17 | var size int 18 | if len(key) < 16 { 19 | size = 16 20 | } else if len(key) < 24 { 21 | size = 24 22 | } else { 23 | size = 32 24 | } 25 | padding := size - (len(key))%size 26 | padtext := bytes.Repeat([]byte{byte(padding)}, padding) 27 | return append(key, padtext...) 28 | } 29 | } 30 | 31 | func paddingData(ciphertext []byte, blockSize int) []byte { 32 | paddingSize := blockSize - (len(ciphertext)+4)%blockSize 33 | ret := make([]byte, 4, len(ciphertext)+4+paddingSize) 34 | binary.BigEndian.PutUint32(ret, uint32(len(ciphertext))) 35 | ret = append(ret, ciphertext...) 36 | padding := blockSize - len(ret)%blockSize 37 | padtext := bytes.Repeat([]byte{byte(padding)}, padding) 38 | return append(ret, padtext...) 39 | } 40 | 41 | /* 42 | AES CBC 加密 43 | key:加密key 44 | plaintext:加密明文 45 | ciphertext:解密返回字节字符串[ 整型以十六进制方式显示] 46 | */ 47 | 48 | func AESCBCEncrypt(keybyte, plainbyte []byte) (cipherbyte []byte, err error) { 49 | 50 | keybyte = fixKey(keybyte) 51 | 52 | plainbyte = paddingData(plainbyte, aes.BlockSize) 53 | 54 | block, err := aes.NewCipher(keybyte) 55 | if err != nil { 56 | return 57 | } 58 | 59 | cipherbyte = make([]byte, aes.BlockSize+len(plainbyte)) 60 | iv := cipherbyte[:aes.BlockSize] 61 | if _, err = io.ReadFull(rand.Reader, iv); err != nil { 62 | return 63 | } 64 | 65 | mode := cipher.NewCBCEncrypter(block, iv) 66 | mode.CryptBlocks(cipherbyte[aes.BlockSize:], plainbyte) 67 | return 68 | } 69 | 70 | /* 71 | AES CBC 解码 72 | key:解密key 73 | ciphertext:加密返回的串 74 | plaintext:解密后的字符串 75 | */ 76 | func AESCBCDecrypter(keybyte, cipherbyte []byte) (plainbyte []byte, err error) { 77 | keybyte = fixKey(keybyte) 78 | 79 | block, err := aes.NewCipher(keybyte) 80 | if err != nil { 81 | return 82 | } 83 | if len(cipherbyte) < aes.BlockSize { 84 | err = errors.New("ciphertext too short") 85 | return 86 | } 87 | 88 | iv := cipherbyte[:aes.BlockSize] 89 | cipherbyte = cipherbyte[aes.BlockSize:] 90 | if len(cipherbyte)%aes.BlockSize != 0 { 91 | err = errors.New("ciphertext is not a multiple of the block size") 92 | return 93 | } 94 | 95 | mode := cipher.NewCBCDecrypter(block, iv) 96 | mode.CryptBlocks(cipherbyte, cipherbyte) 97 | 98 | size := int(binary.BigEndian.Uint32(cipherbyte[:4])) 99 | 100 | plainbyte = cipherbyte[4 : 4+size] 101 | 102 | return 103 | } 104 | -------------------------------------------------------------------------------- /codec/pb/pb.go: -------------------------------------------------------------------------------- 1 | package pb 2 | 3 | import ( 4 | "fmt" 5 | "reflect" 6 | 7 | "google.golang.org/protobuf/proto" 8 | ) 9 | 10 | const maxArraySize = 65536 11 | 12 | type PbMeta struct { 13 | namespace string 14 | nameToID map[string]uint32 15 | idToMeta map[uint32]reflect.Type //存放>65535的reflect.Type 16 | metaArray []reflect.Type //0-65535直接通过数组下标获取reflect.Type 17 | } 18 | 19 | var nameSpace = map[string]*PbMeta{} 20 | 21 | func getArraySize(id uint32) int { 22 | for i := 1; i <= 64; i++ { 23 | s := i * 1024 24 | if int(id) < s { 25 | return s 26 | } 27 | } 28 | return 0 29 | } 30 | 31 | func (m *PbMeta) register(msg proto.Message, id uint32) error { 32 | tt := reflect.TypeOf(msg) 33 | name := tt.String() 34 | if _, ok := m.nameToID[name]; ok { 35 | return fmt.Errorf("%s already register to namespace:%s", name, m.namespace) 36 | } 37 | 38 | m.nameToID[name] = id 39 | 40 | if id < maxArraySize { 41 | if int(id) >= len(m.metaArray) { 42 | metaArray := make([]reflect.Type, getArraySize(id)) 43 | copy(metaArray, m.metaArray) 44 | m.metaArray = metaArray 45 | } 46 | m.metaArray[id] = tt 47 | } else { 48 | m.idToMeta[id] = tt 49 | } 50 | 51 | return nil 52 | } 53 | 54 | func (m *PbMeta) newMessage(id uint32) (msg proto.Message, err error) { 55 | if id < uint32(len(m.metaArray)) { 56 | tt := m.metaArray[id] 57 | if tt == nil { 58 | err = fmt.Errorf("invaild id:%d", id) 59 | } else { 60 | msg = reflect.New(tt.Elem()).Interface().(proto.Message) 61 | } 62 | } else { 63 | if tt, ok := m.idToMeta[id]; ok { 64 | msg = reflect.New(tt.Elem()).Interface().(proto.Message) 65 | } else { 66 | err = fmt.Errorf("invaild id:%d", id) 67 | } 68 | } 69 | return 70 | } 71 | 72 | func (m *PbMeta) Marshal(o interface{}) ([]byte, uint32, error) { 73 | var id uint32 74 | var ok bool 75 | if id, ok = m.nameToID[reflect.TypeOf(o).String()]; !ok { 76 | return nil, 0, fmt.Errorf("unregister type:%s", reflect.TypeOf(o).String()) 77 | } 78 | 79 | msg := o.(proto.Message) 80 | 81 | data, err := proto.Marshal(msg) 82 | if err != nil { 83 | return nil, 0, err 84 | } 85 | return data, id, nil 86 | } 87 | 88 | func (m *PbMeta) Unmarshal(id uint32, buff []byte) (msg proto.Message, err error) { 89 | if msg, err = m.newMessage(id); err != nil { 90 | return 91 | } 92 | 93 | if len(buff) > 0 { 94 | if err = proto.Unmarshal(buff, msg); err != nil { 95 | return 96 | } 97 | } 98 | 99 | return 100 | } 101 | 102 | func GetCmd(namespace string, o proto.Message) uint32 { 103 | if ns, ok := nameSpace[namespace]; ok { 104 | return ns.nameToID[reflect.TypeOf(o).String()] 105 | } else { 106 | return 0 107 | } 108 | } 109 | 110 | // 根据名字注册实例(注意函数非线程安全,需要在初始化阶段完成所有消息的Register) 111 | func Register(namespace string, msg proto.Message, id uint32) error { 112 | 113 | var ns *PbMeta 114 | var ok bool 115 | 116 | if ns, ok = nameSpace[namespace]; !ok { 117 | ns = &PbMeta{namespace: namespace, nameToID: map[string]uint32{}, idToMeta: map[uint32]reflect.Type{}} 118 | nameSpace[namespace] = ns 119 | } 120 | 121 | return ns.register(msg, id) 122 | } 123 | 124 | func GetMeta(namespace string) *PbMeta { 125 | return nameSpace[namespace] 126 | } 127 | -------------------------------------------------------------------------------- /example/stream/client.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "encoding/binary" 6 | "errors" 7 | "log" 8 | "net" 9 | "sync/atomic" 10 | "time" 11 | 12 | "github.com/sniperHW/clustergo/example/stream/pb" 13 | "github.com/sniperHW/netgo" 14 | "google.golang.org/protobuf/proto" 15 | ) 16 | 17 | type PBCodec struct { 18 | r int 19 | w int 20 | buff []byte 21 | } 22 | 23 | func (codec *PBCodec) Decode(b []byte) (interface{}, error) { 24 | o := &pb.Echo{} 25 | if err := proto.Unmarshal(b, o); nil != err { 26 | return nil, err 27 | } else { 28 | return o, nil 29 | } 30 | } 31 | 32 | func (codec *PBCodec) Encode(buffs net.Buffers, o interface{}) (net.Buffers, int) { 33 | if _, ok := o.(*pb.Echo); !ok { 34 | return buffs, 0 35 | } else { 36 | if data, err := proto.Marshal(o.(*pb.Echo)); nil != err { 37 | return buffs, 0 38 | } else { 39 | bu := make([]byte, 4) 40 | binary.BigEndian.PutUint32(bu, uint32(len(data))) 41 | return append(buffs, bu, data), len(bu) + len(data) 42 | } 43 | } 44 | } 45 | 46 | func (codec *PBCodec) read(readable netgo.ReadAble, deadline time.Time) (int, error) { 47 | if err := readable.SetReadDeadline(deadline); err != nil { 48 | return 0, err 49 | } else { 50 | return readable.Read(codec.buff[codec.w:]) 51 | } 52 | } 53 | 54 | func (codec *PBCodec) Recv(readable netgo.ReadAble, deadline time.Time) (pkt []byte, err error) { 55 | const lenHead int = 4 56 | for { 57 | rr := codec.r 58 | pktLen := 0 59 | if (codec.w - rr) >= lenHead { 60 | pktLen = int(binary.BigEndian.Uint32(codec.buff[rr:])) 61 | rr += lenHead 62 | } 63 | 64 | if pktLen > 0 { 65 | if pktLen > (len(codec.buff) - lenHead) { 66 | err = errors.New("pkt too large") 67 | return 68 | } 69 | if (codec.w - rr) >= pktLen { 70 | pkt = codec.buff[rr : rr+pktLen] 71 | rr += pktLen 72 | codec.r = rr 73 | if codec.r == codec.w { 74 | codec.r = 0 75 | codec.w = 0 76 | } 77 | return 78 | } 79 | } 80 | 81 | if codec.r > 0 { 82 | //移动到头部 83 | copy(codec.buff, codec.buff[codec.r:codec.w]) 84 | codec.w = codec.w - codec.r 85 | codec.r = 0 86 | } 87 | 88 | var n int 89 | n, err = codec.read(readable, deadline) 90 | if n > 0 { 91 | codec.w += n 92 | } 93 | if nil != err { 94 | return 95 | } 96 | } 97 | } 98 | 99 | func main() { 100 | dialer := &net.Dialer{} 101 | var ( 102 | s netgo.Socket 103 | ) 104 | 105 | codec := &PBCodec{buff: make([]byte, 4096)} 106 | 107 | for { 108 | if conn, err := dialer.Dial("tcp", "127.0.0.1:18113"); nil != err { 109 | time.Sleep(time.Second) 110 | } else { 111 | s = netgo.NewTcpSocket(conn.(*net.TCPConn), codec) 112 | break 113 | } 114 | } 115 | 116 | okChan := make(chan struct{}) 117 | count := int32(0) 118 | 119 | as := netgo.NewAsynSocket(s, netgo.AsynSocketOption{ 120 | Codec: codec, 121 | }).SetCloseCallback(func(_ *netgo.AsynSocket, err error) { 122 | log.Println("client closed err:", err) 123 | }).SetPacketHandler(func(_ context.Context, as *netgo.AsynSocket, packet interface{}) error { 124 | c := atomic.AddInt32(&count, 1) 125 | log.Println("go echo resp", c) 126 | if c == 100 { 127 | close(okChan) 128 | } else { 129 | as.Recv() 130 | } 131 | return nil 132 | }).Recv() 133 | 134 | for i := 0; i < 100; i++ { 135 | as.Send(&pb.Echo{Msg: "hello"}) 136 | } 137 | <-okChan 138 | as.Close(nil) 139 | } 140 | -------------------------------------------------------------------------------- /example/pbrpc/genrpc/gen.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "log" 7 | "os" 8 | "strings" 9 | "text/template" 10 | ) 11 | 12 | var templateStr string = ` 13 | package {{.Method}} 14 | 15 | import ( 16 | "github.com/sniperHW/clustergo" 17 | "github.com/sniperHW/clustergo/addr" 18 | "github.com/sniperHW/rpcgo" 19 | "context" 20 | "time" 21 | ) 22 | 23 | type Replyer struct { 24 | replyer *rpcgo.Replyer 25 | } 26 | 27 | func (r *Replyer) Reply(result *{{.Response}}) { 28 | r.replyer.Reply(result) 29 | } 30 | 31 | func (r *Replyer) Error(err error) { 32 | r.replyer.Error(err) 33 | } 34 | 35 | func (r *Replyer) Channel() rpcgo.Channel { 36 | return r.replyer.Channel() 37 | } 38 | 39 | type {{.Service}} interface { 40 | Serve{{.Service}}(context.Context, *Replyer,*{{.Request}}) 41 | } 42 | 43 | func Register(o {{.Service}}) { 44 | clustergo.RegisterService("{{.Method}}",func(ctx context.Context, r *rpcgo.Replyer,arg *{{.Request}}) { 45 | o.Serve{{.Service}}(ctx,&Replyer{replyer:r},arg) 46 | }) 47 | } 48 | 49 | 50 | func Call(ctx context.Context, peer addr.LogicAddr,arg *{{.Request}}) (*{{.Response}},error) { 51 | return clustergo.Call[*{{.Request}},{{.Response}}](ctx,peer,"{{.Method}}",arg) 52 | } 53 | 54 | func CallWithTimeout(peer addr.LogicAddr,arg *{{.Request}},d time.Duration) (*{{.Response}},error) { 55 | return clustergo.CallWithTimeout[*{{.Request}},{{.Response}}](peer,"{{.Method}}",arg,d) 56 | } 57 | 58 | func AsyncCall(peer addr.LogicAddr,arg *{{.Request}},deadline time.Time,callback func(*{{.Response}},error)) error { 59 | return clustergo.AsyncCall[*{{.Request}},{{.Response}}](peer,"{{.Method}}",arg,deadline,callback) 60 | } 61 | 62 | ` 63 | 64 | type method struct { 65 | Method string 66 | Request string 67 | Response string 68 | Service string 69 | } 70 | 71 | var ( 72 | inputPath *string 73 | outputPath *string 74 | ) 75 | 76 | func gen(tmpl *template.Template, name string) { 77 | filename := fmt.Sprintf("%s/%s/%s.go", *outputPath, name, name) 78 | os.MkdirAll(fmt.Sprintf("%s/%s", *outputPath, name), os.ModePerm) 79 | f, err := os.OpenFile(filename, os.O_RDWR, os.ModePerm) 80 | if err != nil { 81 | if os.IsNotExist(err) { 82 | f, err = os.Create(filename) 83 | if err != nil { 84 | log.Printf("------ error -------- create %s failed:%s", filename, err.Error()) 85 | return 86 | } 87 | } else { 88 | log.Printf("------ error -------- open %s failed:%s", filename, err.Error()) 89 | return 90 | } 91 | } 92 | defer f.Close() 93 | 94 | err = os.Truncate(filename, 0) 95 | if err != nil { 96 | log.Printf("------ error -------- Truncate %s failed:%s", filename, err.Error()) 97 | return 98 | } 99 | 100 | err = tmpl.Execute(f, method{ 101 | Method: name, 102 | Service: strings.Title(name), 103 | Request: fmt.Sprintf("%sReq", strings.Title(name)), 104 | Response: fmt.Sprintf("%sRsp", strings.Title(name)), 105 | }) 106 | if err != nil { 107 | panic(err) 108 | } else { 109 | log.Printf("%s Write ok\n", filename) 110 | } 111 | } 112 | 113 | func main() { 114 | 115 | inputPath = flag.String("inputPath", "proto", "inputPath") 116 | outputPath = flag.String("outputPath", "service", "outputPath") 117 | 118 | flag.Parse() 119 | 120 | tmpl, err := template.New("test").Parse(templateStr) 121 | if err != nil { 122 | panic(err) 123 | } 124 | 125 | //遍历proto目录获取所有.proto文件 126 | if f, err := os.Open(*inputPath); err == nil { 127 | var fi []os.FileInfo 128 | if fi, err = f.Readdir(0); err == nil { 129 | for _, v := range fi { 130 | t := strings.Split(v.Name(), ".") 131 | if len(t) == 2 && t[1] == "proto" { 132 | gen(tmpl, t[0]) 133 | } 134 | } 135 | } 136 | f.Close() 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /codec/ss/test.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go. DO NOT EDIT. 2 | // versions: 3 | // protoc-gen-go v1.26.0 4 | // protoc v3.21.7 5 | // source: test.proto 6 | 7 | package ss 8 | 9 | import ( 10 | protoreflect "google.golang.org/protobuf/reflect/protoreflect" 11 | protoimpl "google.golang.org/protobuf/runtime/protoimpl" 12 | reflect "reflect" 13 | sync "sync" 14 | ) 15 | 16 | const ( 17 | // Verify that this generated code is sufficiently up-to-date. 18 | _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) 19 | // Verify that runtime/protoimpl is sufficiently up-to-date. 20 | _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) 21 | ) 22 | 23 | type Echo struct { 24 | state protoimpl.MessageState 25 | sizeCache protoimpl.SizeCache 26 | unknownFields protoimpl.UnknownFields 27 | 28 | Msg string `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` 29 | } 30 | 31 | func (x *Echo) Reset() { 32 | *x = Echo{} 33 | if protoimpl.UnsafeEnabled { 34 | mi := &file_test_proto_msgTypes[0] 35 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 36 | ms.StoreMessageInfo(mi) 37 | } 38 | } 39 | 40 | func (x *Echo) String() string { 41 | return protoimpl.X.MessageStringOf(x) 42 | } 43 | 44 | func (*Echo) ProtoMessage() {} 45 | 46 | func (x *Echo) ProtoReflect() protoreflect.Message { 47 | mi := &file_test_proto_msgTypes[0] 48 | if protoimpl.UnsafeEnabled && x != nil { 49 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 50 | if ms.LoadMessageInfo() == nil { 51 | ms.StoreMessageInfo(mi) 52 | } 53 | return ms 54 | } 55 | return mi.MessageOf(x) 56 | } 57 | 58 | // Deprecated: Use Echo.ProtoReflect.Descriptor instead. 59 | func (*Echo) Descriptor() ([]byte, []int) { 60 | return file_test_proto_rawDescGZIP(), []int{0} 61 | } 62 | 63 | func (x *Echo) GetMsg() string { 64 | if x != nil { 65 | return x.Msg 66 | } 67 | return "" 68 | } 69 | 70 | var File_test_proto protoreflect.FileDescriptor 71 | 72 | var file_test_proto_rawDesc = []byte{ 73 | 0x0a, 0x0a, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x18, 0x0a, 0x04, 74 | 0x45, 0x63, 0x68, 0x6f, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x73, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 75 | 0x09, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x42, 0x07, 0x5a, 0x05, 0x2e, 0x2e, 0x2f, 0x73, 0x73, 0x62, 76 | 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 77 | } 78 | 79 | var ( 80 | file_test_proto_rawDescOnce sync.Once 81 | file_test_proto_rawDescData = file_test_proto_rawDesc 82 | ) 83 | 84 | func file_test_proto_rawDescGZIP() []byte { 85 | file_test_proto_rawDescOnce.Do(func() { 86 | file_test_proto_rawDescData = protoimpl.X.CompressGZIP(file_test_proto_rawDescData) 87 | }) 88 | return file_test_proto_rawDescData 89 | } 90 | 91 | var file_test_proto_msgTypes = make([]protoimpl.MessageInfo, 1) 92 | var file_test_proto_goTypes = []interface{}{ 93 | (*Echo)(nil), // 0: Echo 94 | } 95 | var file_test_proto_depIdxs = []int32{ 96 | 0, // [0:0] is the sub-list for method output_type 97 | 0, // [0:0] is the sub-list for method input_type 98 | 0, // [0:0] is the sub-list for extension type_name 99 | 0, // [0:0] is the sub-list for extension extendee 100 | 0, // [0:0] is the sub-list for field type_name 101 | } 102 | 103 | func init() { file_test_proto_init() } 104 | func file_test_proto_init() { 105 | if File_test_proto != nil { 106 | return 107 | } 108 | if !protoimpl.UnsafeEnabled { 109 | file_test_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { 110 | switch v := v.(*Echo); i { 111 | case 0: 112 | return &v.state 113 | case 1: 114 | return &v.sizeCache 115 | case 2: 116 | return &v.unknownFields 117 | default: 118 | return nil 119 | } 120 | } 121 | } 122 | type x struct{} 123 | out := protoimpl.TypeBuilder{ 124 | File: protoimpl.DescBuilder{ 125 | GoPackagePath: reflect.TypeOf(x{}).PkgPath(), 126 | RawDescriptor: file_test_proto_rawDesc, 127 | NumEnums: 0, 128 | NumMessages: 1, 129 | NumExtensions: 0, 130 | NumServices: 0, 131 | }, 132 | GoTypes: file_test_proto_goTypes, 133 | DependencyIndexes: file_test_proto_depIdxs, 134 | MessageInfos: file_test_proto_msgTypes, 135 | }.Build() 136 | File_test_proto = out.File 137 | file_test_proto_rawDesc = nil 138 | file_test_proto_goTypes = nil 139 | file_test_proto_depIdxs = nil 140 | } 141 | -------------------------------------------------------------------------------- /example/stream/pb/test.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go. DO NOT EDIT. 2 | // versions: 3 | // protoc-gen-go v1.28.1 4 | // protoc v3.21.7 5 | // source: test.proto 6 | 7 | package pb 8 | 9 | import ( 10 | protoreflect "google.golang.org/protobuf/reflect/protoreflect" 11 | protoimpl "google.golang.org/protobuf/runtime/protoimpl" 12 | reflect "reflect" 13 | sync "sync" 14 | ) 15 | 16 | const ( 17 | // Verify that this generated code is sufficiently up-to-date. 18 | _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) 19 | // Verify that runtime/protoimpl is sufficiently up-to-date. 20 | _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) 21 | ) 22 | 23 | type Echo struct { 24 | state protoimpl.MessageState 25 | sizeCache protoimpl.SizeCache 26 | unknownFields protoimpl.UnknownFields 27 | 28 | Msg string `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` 29 | } 30 | 31 | func (x *Echo) Reset() { 32 | *x = Echo{} 33 | if protoimpl.UnsafeEnabled { 34 | mi := &file_test_proto_msgTypes[0] 35 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 36 | ms.StoreMessageInfo(mi) 37 | } 38 | } 39 | 40 | func (x *Echo) String() string { 41 | return protoimpl.X.MessageStringOf(x) 42 | } 43 | 44 | func (*Echo) ProtoMessage() {} 45 | 46 | func (x *Echo) ProtoReflect() protoreflect.Message { 47 | mi := &file_test_proto_msgTypes[0] 48 | if protoimpl.UnsafeEnabled && x != nil { 49 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 50 | if ms.LoadMessageInfo() == nil { 51 | ms.StoreMessageInfo(mi) 52 | } 53 | return ms 54 | } 55 | return mi.MessageOf(x) 56 | } 57 | 58 | // Deprecated: Use Echo.ProtoReflect.Descriptor instead. 59 | func (*Echo) Descriptor() ([]byte, []int) { 60 | return file_test_proto_rawDescGZIP(), []int{0} 61 | } 62 | 63 | func (x *Echo) GetMsg() string { 64 | if x != nil { 65 | return x.Msg 66 | } 67 | return "" 68 | } 69 | 70 | var File_test_proto protoreflect.FileDescriptor 71 | 72 | var file_test_proto_rawDesc = []byte{ 73 | 0x0a, 0x0a, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x18, 0x0a, 0x04, 74 | 0x45, 0x63, 0x68, 0x6f, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x73, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 75 | 0x09, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x42, 0x06, 0x5a, 0x04, 0x2e, 0x2f, 0x70, 0x62, 0x62, 0x06, 76 | 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 77 | } 78 | 79 | var ( 80 | file_test_proto_rawDescOnce sync.Once 81 | file_test_proto_rawDescData = file_test_proto_rawDesc 82 | ) 83 | 84 | func file_test_proto_rawDescGZIP() []byte { 85 | file_test_proto_rawDescOnce.Do(func() { 86 | file_test_proto_rawDescData = protoimpl.X.CompressGZIP(file_test_proto_rawDescData) 87 | }) 88 | return file_test_proto_rawDescData 89 | } 90 | 91 | var file_test_proto_msgTypes = make([]protoimpl.MessageInfo, 1) 92 | var file_test_proto_goTypes = []interface{}{ 93 | (*Echo)(nil), // 0: Echo 94 | } 95 | var file_test_proto_depIdxs = []int32{ 96 | 0, // [0:0] is the sub-list for method output_type 97 | 0, // [0:0] is the sub-list for method input_type 98 | 0, // [0:0] is the sub-list for extension type_name 99 | 0, // [0:0] is the sub-list for extension extendee 100 | 0, // [0:0] is the sub-list for field type_name 101 | } 102 | 103 | func init() { file_test_proto_init() } 104 | func file_test_proto_init() { 105 | if File_test_proto != nil { 106 | return 107 | } 108 | if !protoimpl.UnsafeEnabled { 109 | file_test_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { 110 | switch v := v.(*Echo); i { 111 | case 0: 112 | return &v.state 113 | case 1: 114 | return &v.sizeCache 115 | case 2: 116 | return &v.unknownFields 117 | default: 118 | return nil 119 | } 120 | } 121 | } 122 | type x struct{} 123 | out := protoimpl.TypeBuilder{ 124 | File: protoimpl.DescBuilder{ 125 | GoPackagePath: reflect.TypeOf(x{}).PkgPath(), 126 | RawDescriptor: file_test_proto_rawDesc, 127 | NumEnums: 0, 128 | NumMessages: 1, 129 | NumExtensions: 0, 130 | NumServices: 0, 131 | }, 132 | GoTypes: file_test_proto_goTypes, 133 | DependencyIndexes: file_test_proto_depIdxs, 134 | MessageInfos: file_test_proto_msgTypes, 135 | }.Build() 136 | File_test_proto = out.File 137 | file_test_proto_rawDesc = nil 138 | file_test_proto_goTypes = nil 139 | file_test_proto_depIdxs = nil 140 | } 141 | -------------------------------------------------------------------------------- /membership/redis/script.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | // 更新一个member,add|modify|markdelete 4 | const ScriptUpdateMember string = ` 5 | redis.call('select',0) 6 | local serverVer = redis.call('get','version') 7 | if not serverVer then 8 | serverVer = 1 9 | else 10 | serverVer = tonumber(serverVer) + 1 11 | end 12 | 13 | redis.call('set','version',serverVer) 14 | if ARGV[1] == 'insert_update' then 15 | redis.call('hmset',KEYS[1],'version',serverVer,'info',ARGV[2],'markdel','false') 16 | --publish 17 | redis.call('PUBLISH',"members",serverVer) 18 | elseif ARGV[1] == "delete" then 19 | local v = redis.call('hget',KEYS[1],'version') 20 | if v then 21 | redis.call('hmset',KEYS[1],'version',serverVer,'markdel','true') 22 | --publish 23 | redis.call('PUBLISH',"members",serverVer) 24 | end 25 | end 26 | ` 27 | 28 | const ScriptGetMembers string = ` 29 | redis.call('select',0) 30 | local clientVer = tonumber(ARGV[1]) 31 | local serverVer = redis.call('get','version') 32 | if not serverVer then 33 | return {clientVer} 34 | else 35 | serverVer = tonumber(serverVer) 36 | end 37 | --两端版本号一致,客户端的数据已经是最新的 38 | if clientVer == serverVer then 39 | return {serverVer} 40 | end 41 | local nodes = {} 42 | local result = redis.call('scan',0) 43 | for k,v in pairs(result[2]) do 44 | if v ~= "version" then 45 | local r = redis.call('hmget',v,'version','info','markdel') 46 | local node_version,info,markdel = r[1],r[2],r[3] 47 | if clientVer == 0 then 48 | if markdel == "false" then 49 | table.insert(nodes,{v,info,markdel}) 50 | end 51 | elseif tonumber(node_version) > clientVer then 52 | --返回比客户端新的节点,包括markdel=="true"的节点,这样客户端可以在本地将这种节点删除 53 | table.insert(nodes,{v,info,markdel}) 54 | end 55 | end 56 | end 57 | return {serverVer,nodes} 58 | ` 59 | 60 | const ScriptHeartbeat string = ` 61 | redis.call('select',1) 62 | local dead = redis.call('get','dead') 63 | local deadline = tonumber(redis.call('TIME')[1]) + tonumber(ARGV[1]) 64 | 65 | if not dead or dead == "false" then 66 | local serverVer = redis.call('get','version') 67 | if not serverVer then 68 | serverVer = 1 69 | else 70 | serverVer = tonumber(serverVer) + 1 71 | end 72 | redis.call('set','version',serverVer) 73 | redis.call('hmset',KEYS[1],'deadline',deadline,'version',serverVer,'dead','false') 74 | --publish 75 | redis.call('select',0) 76 | redis.call('PUBLISH',"alive",serverVer) 77 | else 78 | redis.call('hmset',KEYS[1],'deadline',deadline) 79 | end 80 | ` 81 | 82 | const ScriptGetAlive string = ` 83 | redis.call('select',1) 84 | local clientVer = tonumber(ARGV[1]) 85 | local serverVer = redis.call('get','version') 86 | if not serverVer then 87 | return {clientVer} 88 | else 89 | serverVer = tonumber(serverVer) 90 | end 91 | 92 | --两端版本号一致,客户端的数据已经是最新的 93 | if clientVer == serverVer then 94 | return {serverVer} 95 | end 96 | local nodes = {} 97 | local result = redis.call('scan',0) 98 | for k,v in pairs(result[2]) do 99 | if v ~= "version" then 100 | local r = redis.call('hmget',v,'version','dead') 101 | local node_version,dead = r[1],r[2] 102 | if clientVer == 0 then 103 | --初始状态,只返回dead==false 104 | if dead == "false" then 105 | table.insert(nodes,{v,dead}) 106 | end 107 | elseif tonumber(node_version) > clientVer then 108 | --返回比客户端新的节点,包括dead=="true"的节点,这样客户端可以在本地将这种节点删除 109 | table.insert(nodes,{v,dead}) 110 | end 111 | end 112 | end 113 | return {serverVer,nodes} 114 | ` 115 | 116 | // 遍历db1,将超时节点标记为dead=true 117 | // todo: 记录下上次检测的时间,避免频繁执行检查 118 | const ScriptCheckTimeout string = ` 119 | redis.call('select',1) 120 | local serverVer = redis.call('get','version') 121 | if not serverVer then 122 | serverVer = 1 123 | else 124 | serverVer = tonumber(serverVer) + 1 125 | end 126 | 127 | local now = tonumber(redis.call('TIME')[1]) 128 | 129 | local change = false 130 | local result = redis.call('scan',0) 131 | for k,v in pairs(result[2]) do 132 | if v ~= "version" then 133 | local r = redis.call('hmget',v,'dead','deadline') 134 | local dead,deadline = r[1],r[2] 135 | if dead == "false" and now > tonumber(deadline) then 136 | if change == false then 137 | change = true 138 | redis.call('set','version',serverVer) 139 | end 140 | redis.call('hmset',v,'version',serverVer,'dead','true') 141 | end 142 | end 143 | end 144 | 145 | if change then 146 | --publish 147 | redis.call('select',0) 148 | redis.call('PUBLISH','alive',serverVer) 149 | end 150 | ` 151 | -------------------------------------------------------------------------------- /codec/ss/codec.go: -------------------------------------------------------------------------------- 1 | package ss 2 | 3 | import ( 4 | "encoding/binary" 5 | "fmt" 6 | "net" 7 | 8 | "github.com/sniperHW/clustergo/addr" 9 | "github.com/sniperHW/clustergo/codec" 10 | "github.com/sniperHW/clustergo/codec/buffer" 11 | "github.com/sniperHW/clustergo/codec/pb" 12 | "github.com/sniperHW/rpcgo" 13 | "google.golang.org/protobuf/proto" 14 | ) 15 | 16 | const Namespace string = "ss" 17 | 18 | type SSCodec struct { 19 | codec.LengthPayloadPacketReceiver 20 | selfAddr addr.LogicAddr 21 | reader buffer.BufferReader 22 | pbMeta *pb.PbMeta 23 | } 24 | 25 | func NewCodec(selfAddr addr.LogicAddr) *SSCodec { 26 | return &SSCodec{ 27 | LengthPayloadPacketReceiver: codec.LengthPayloadPacketReceiver{ 28 | Buff: make([]byte, 4096), 29 | MaxPacketSize: MaxPacketSize, 30 | }, 31 | selfAddr: selfAddr, 32 | pbMeta: pb.GetMeta(Namespace), 33 | reader: buffer.NewReader(binary.BigEndian, nil), 34 | } 35 | } 36 | 37 | func (ss *SSCodec) encode(buffs net.Buffers, m *Message, cmd uint16, flag byte, data []byte) (net.Buffers, int) { 38 | payloadLen := sizeFlag + sizeToAndFrom + len(data) 39 | 40 | if flag == PbMsg || flag == BinMsg { 41 | payloadLen += sizeCmd 42 | } 43 | 44 | totalLen := sizeLen + payloadLen 45 | 46 | if totalLen > MaxPacketSize { 47 | return buffs, 0 48 | } 49 | 50 | b := make([]byte, 13, totalLen-len(data)) 51 | 52 | //写payload大小 53 | binary.BigEndian.PutUint32(b, uint32(payloadLen)) 54 | 55 | //写flag 56 | b[4] = flag 57 | 58 | binary.BigEndian.PutUint32(b[5:], uint32(m.To())) 59 | binary.BigEndian.PutUint32(b[9:], uint32(m.From())) 60 | 61 | if flag == PbMsg || flag == BinMsg { 62 | //写cmd 63 | b = buffer.NeWriter(binary.BigEndian).AppendUint16(b, cmd) 64 | } 65 | 66 | return append(buffs, b, data), totalLen 67 | } 68 | 69 | func (ss *SSCodec) Encode(buffs net.Buffers, o interface{}) (net.Buffers, int) { 70 | switch o := o.(type) { 71 | case *Message: 72 | var data []byte 73 | var err error 74 | 75 | flag := byte(0) 76 | 77 | switch msg := o.Payload().(type) { 78 | case []byte: 79 | if len(msg) == 0 { 80 | return buffs, 0 81 | } 82 | //设置Bin消息标记 83 | setMsgType(&flag, BinMsg) 84 | return ss.encode(buffs, o, o.cmd, flag, msg) 85 | case proto.Message: 86 | var cmd uint32 87 | if data, cmd, err = ss.pbMeta.Marshal(msg); err != nil { 88 | return buffs, 0 89 | } 90 | //设置Pb消息标记 91 | setMsgType(&flag, PbMsg) 92 | return ss.encode(buffs, o, uint16(cmd), flag, data) 93 | case *rpcgo.RequestMsg: 94 | //设置RPC请求标记 95 | setMsgType(&flag, RpcReq) 96 | return ss.encode(buffs, o, 0, flag, rpcgo.EncodeRequest(msg)) 97 | case *rpcgo.ResponseMsg: 98 | //设置RPC响应标记 99 | setMsgType(&flag, RpcResp) 100 | return ss.encode(buffs, o, 0, flag, rpcgo.EncodeResponse(msg)) 101 | } 102 | return buffs, 0 103 | case *RelayMessage: 104 | return append(buffs, o.Payload()), len(o.Payload()) 105 | default: 106 | return buffs, 0 107 | } 108 | } 109 | 110 | func (ss *SSCodec) isTarget(to addr.LogicAddr) bool { 111 | return ss.selfAddr == to 112 | } 113 | 114 | func (ss *SSCodec) Decode(payload []byte) (interface{}, error) { 115 | ss.reader.Reset(payload) 116 | flag := ss.reader.GetByte() 117 | to := addr.LogicAddr(ss.reader.GetUint32()) 118 | from := addr.LogicAddr(ss.reader.GetUint32()) 119 | if ss.isTarget(to) { 120 | //当前节点是数据包的目标接收方 121 | switch getMsgType(flag) { 122 | case BinMsg: 123 | cmd := ss.reader.GetUint16() 124 | data := ss.reader.GetAll() 125 | return NewMessage(to, from, data, cmd), nil 126 | case PbMsg: 127 | cmd := ss.reader.GetUint16() 128 | data := ss.reader.GetAll() 129 | if msg, err := ss.pbMeta.Unmarshal(uint32(cmd), data); err != nil { 130 | return nil, err 131 | } else { 132 | return NewMessage(to, from, msg, cmd), nil 133 | } 134 | case RpcReq: 135 | if req, err := rpcgo.DecodeRequest(ss.reader.GetAll()); err != nil { 136 | return nil, err 137 | } else { 138 | return NewMessage(to, from, req), nil 139 | } 140 | case RpcResp: 141 | if resp, err := rpcgo.DecodeResponse(ss.reader.GetAll()); err != nil { 142 | return nil, err 143 | } else { 144 | return NewMessage(to, from, resp), nil 145 | } 146 | default: 147 | return nil, fmt.Errorf("invaild packet type") 148 | } 149 | } else { 150 | //当前接收方不是目标节点,返回RelayMessage 151 | return NewRelayMessage(to, from, payload), nil 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /addr/addr.go: -------------------------------------------------------------------------------- 1 | package addr 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | "strconv" 7 | "strings" 8 | "sync/atomic" 9 | "unsafe" 10 | ) 11 | 12 | const ClusterMask uint32 = 0xFFFC0000 //高14 13 | const TypeMask uint32 = 0x0003FC00 //中8 14 | const ServerMask uint32 = 0x000003FF //低10 15 | const HarbarType uint32 = 255 16 | 17 | var ErrInvaildAddrFmt error = fmt.Errorf("invaild addr format") 18 | var ErrHarborType error = fmt.Errorf("type should be 255") 19 | var ErrInvaildType error = fmt.Errorf("type should between(1,254)") 20 | var ErrInvaildCluster error = fmt.Errorf("cluster should between(1,16383)") 21 | var ErrInvaildServer error = fmt.Errorf("server should between(0,1023)") 22 | 23 | type LogicAddr uint32 24 | 25 | type Addr struct { 26 | logicAddr LogicAddr 27 | netAddr *net.TCPAddr 28 | } 29 | 30 | func MakeAddr(logic string, tcpAddr string) (Addr, error) { 31 | logicAddr, err := MakeLogicAddr(logic) 32 | if nil != err { 33 | return Addr{}, err 34 | } 35 | 36 | netAddr, err := net.ResolveTCPAddr("tcp", tcpAddr) 37 | if nil != err { 38 | return Addr{}, err 39 | } 40 | 41 | return Addr{ 42 | logicAddr: logicAddr, 43 | netAddr: netAddr, 44 | }, nil 45 | } 46 | 47 | func MakeHarborAddr(logic string, tcpAddr string) (Addr, error) { 48 | logicAddr, err := MakeHarborLogicAddr(logic) 49 | if nil != err { 50 | return Addr{}, err 51 | } 52 | 53 | netAddr, err := net.ResolveTCPAddr("tcp", tcpAddr) 54 | if nil != err { 55 | return Addr{}, err 56 | } 57 | 58 | return Addr{ 59 | logicAddr: logicAddr, 60 | netAddr: netAddr, 61 | }, nil 62 | } 63 | 64 | func (a Addr) LogicAddr() LogicAddr { 65 | return a.logicAddr 66 | } 67 | 68 | func (a Addr) NetAddr() *net.TCPAddr { 69 | return (*net.TCPAddr)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&a.netAddr)))) 70 | } 71 | 72 | func (a *Addr) UpdateNetAddr(addr *net.TCPAddr) { 73 | atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&a.netAddr)), unsafe.Pointer(addr)) 74 | } 75 | 76 | func (a LogicAddr) Cluster() uint32 { 77 | return (uint32(a) & ClusterMask) >> 18 78 | } 79 | 80 | func (a LogicAddr) Type() uint32 { 81 | return (uint32(a) & TypeMask) >> 10 82 | } 83 | 84 | func (a LogicAddr) Server() uint32 { 85 | return uint32(a) & ServerMask 86 | } 87 | 88 | func (a LogicAddr) String() string { 89 | return fmt.Sprintf("%d.%d.%d", a.Cluster(), a.Type(), a.Server()) 90 | } 91 | 92 | func (a LogicAddr) Empty() bool { 93 | return uint32(a) == 0 94 | } 95 | 96 | func (a *LogicAddr) Clear() { 97 | (*a) = 0 98 | } 99 | 100 | func MakeLogicAddr(addr string) (LogicAddr, error) { 101 | var err error 102 | v := strings.Split(addr, ".") 103 | if len(v) != 3 { 104 | return LogicAddr(0), ErrInvaildAddrFmt 105 | } 106 | 107 | cluster, err := strconv.Atoi(v[0]) 108 | 109 | if nil != err { 110 | return LogicAddr(0), ErrInvaildCluster 111 | } 112 | 113 | if cluster == 0 || uint32(cluster) > (ClusterMask>>18) { 114 | return LogicAddr(0), ErrInvaildCluster 115 | } 116 | 117 | tt, err := strconv.Atoi(v[1]) 118 | if nil != err { 119 | return LogicAddr(0), ErrInvaildType 120 | } 121 | 122 | if tt == 0 || uint32(tt) > ((TypeMask>>10)-1) { 123 | return LogicAddr(0), ErrInvaildType 124 | } 125 | 126 | server, err := strconv.Atoi(v[2]) 127 | if nil != err { 128 | return LogicAddr(0), ErrInvaildServer 129 | } 130 | 131 | if uint32(server) > ServerMask { 132 | return LogicAddr(0), ErrInvaildServer 133 | } 134 | 135 | return LogicAddr(0 | (uint32(tt) << 10) | (uint32(cluster) << 18) | (uint32(server))), nil 136 | } 137 | 138 | func MakeHarborLogicAddr(addr string) (LogicAddr, error) { 139 | 140 | var err error 141 | v := strings.Split(addr, ".") 142 | if len(v) != 3 { 143 | return LogicAddr(0), ErrInvaildAddrFmt 144 | } 145 | 146 | cluster, err := strconv.Atoi(v[0]) 147 | 148 | if nil != err { 149 | return LogicAddr(0), ErrInvaildCluster 150 | } 151 | 152 | if cluster == 0 || uint32(cluster) > (ClusterMask>>18) { 153 | return LogicAddr(0), ErrInvaildCluster 154 | } 155 | 156 | tt, err := strconv.Atoi(v[1]) 157 | if nil != err { 158 | return LogicAddr(0), ErrInvaildType 159 | } 160 | 161 | if uint32(tt) != uint32(255) { 162 | return LogicAddr(0), ErrHarborType 163 | } 164 | 165 | server, err := strconv.Atoi(v[2]) 166 | if nil != err { 167 | return LogicAddr(0), ErrInvaildServer 168 | } 169 | 170 | if uint32(server) > ServerMask { 171 | return LogicAddr(0), ErrInvaildServer 172 | } 173 | 174 | return LogicAddr(0 | (uint32(tt) << 10) | (uint32(cluster) << 18) | (uint32(server))), nil 175 | } 176 | -------------------------------------------------------------------------------- /membership/redis/script_test.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "testing" 7 | "time" 8 | 9 | "github.com/redis/go-redis/v9" 10 | ) 11 | 12 | func TestMembers(t *testing.T) { 13 | cli := redis.NewClient(&redis.Options{ 14 | Addr: "localhost:6379", 15 | }) 16 | cli.FlushAll(context.Background()) 17 | 18 | { 19 | _, err := cli.Eval(context.Background(), ScriptUpdateMember, []string{"sniperHW1"}, "insert_update", "sniperHW's data").Result() 20 | fmt.Println(GetRedisError(err)) 21 | } 22 | 23 | { 24 | _, err := cli.Eval(context.Background(), ScriptUpdateMember, []string{"sniperHW2"}, "insert_update", "sniperHW2's data").Result() 25 | fmt.Println(GetRedisError(err)) 26 | } 27 | 28 | { 29 | _, err := cli.Eval(context.Background(), ScriptUpdateMember, []string{"sniperHW2"}, "delete").Result() 30 | fmt.Println(GetRedisError(err)) 31 | } 32 | 33 | { 34 | re, err := cli.Eval(context.Background(), ScriptGetMembers, []string{}, 0).Result() 35 | if err != nil { 36 | fmt.Println(GetRedisError(err)) 37 | } 38 | fmt.Println(re) 39 | } 40 | 41 | { 42 | re, err := cli.Eval(context.Background(), ScriptGetMembers, []string{}, 2).Result() 43 | if err != nil { 44 | fmt.Println(GetRedisError(err)) 45 | } 46 | fmt.Println(re) 47 | } 48 | 49 | { 50 | re, err := cli.Eval(context.Background(), ScriptGetMembers, []string{}, 3).Result() 51 | if err != nil { 52 | fmt.Println(GetRedisError(err)) 53 | } 54 | fmt.Println(re) 55 | } 56 | 57 | } 58 | 59 | func TestAlive(t *testing.T) { 60 | cli := redis.NewClient(&redis.Options{ 61 | Addr: "localhost:6379", 62 | }) 63 | cli.FlushAll(context.Background()) 64 | 65 | { 66 | _, err := cli.Eval(context.Background(), ScriptHeartbeat, []string{"sniperHW1"}, 2).Result() 67 | fmt.Println("sniperHW1 heartbeat", GetRedisError(err)) 68 | } 69 | 70 | { 71 | _, err := cli.Eval(context.Background(), ScriptHeartbeat, []string{"sniperHW2"}, 5).Result() 72 | fmt.Println("sniperHW2 heartbeat", GetRedisError(err)) 73 | } 74 | 75 | { 76 | re, err := cli.Eval(context.Background(), ScriptGetAlive, []string{}, 0).Result() 77 | err = GetRedisError(err) 78 | if err != nil { 79 | fmt.Println(err) 80 | return 81 | } 82 | version := re.([]interface{})[0].(int64) 83 | fmt.Println("alive version", version) 84 | for _, v := range re.([]interface{})[1].([]interface{}) { 85 | fmt.Println(v.([]interface{})[0].(string), v.([]interface{})[1].(string)) 86 | } 87 | } 88 | 89 | c := make(chan struct{}) 90 | 91 | go func() { 92 | m, err := cli.Subscribe(context.Background(), "alive").ReceiveMessage(context.Background()) 93 | err = GetRedisError(err) 94 | fmt.Println("server version", m.Payload) 95 | if err == nil { 96 | re, err := cli.Eval(context.Background(), ScriptGetAlive, []string{}, 0).Result() 97 | err = GetRedisError(err) 98 | if err != nil { 99 | fmt.Println(err) 100 | return 101 | } 102 | //fmt.Println(re) 103 | version := re.([]interface{})[0].(int64) 104 | fmt.Println("alive version", version) 105 | for _, v := range re.([]interface{})[1].([]interface{}) { 106 | fmt.Println(v.([]interface{})[0].(string), v.([]interface{})[1].(string)) 107 | } 108 | close(c) 109 | } 110 | }() 111 | 112 | time.Sleep(time.Second * 3) 113 | 114 | _, err := cli.Eval(context.Background(), ScriptCheckTimeout, []string{}).Result() 115 | err = GetRedisError(err) 116 | if err != nil { 117 | fmt.Println(err) 118 | return 119 | } 120 | fmt.Println("ScriptCheckTimeout") 121 | 122 | <-c 123 | 124 | { 125 | re, err := cli.Eval(context.Background(), ScriptGetAlive, []string{}, 1).Result() 126 | err = GetRedisError(err) 127 | if err != nil { 128 | fmt.Println(err) 129 | return 130 | } 131 | version := re.([]interface{})[0].(int64) 132 | fmt.Println("alive version", version) 133 | for _, v := range re.([]interface{})[1].([]interface{}) { 134 | fmt.Println(v.([]interface{})[0].(string), v.([]interface{})[1].(string)) 135 | } 136 | } 137 | 138 | } 139 | 140 | func TestRedis(t *testing.T) { 141 | cli := redis.NewClient(&redis.Options{ 142 | Addr: "localhost:6379", 143 | MaxRetries: 10, 144 | }) 145 | cli.FlushAll(context.Background()) 146 | 147 | cli.Set(context.Background(), "hello", "world", 0) 148 | 149 | v, err := cli.Get(context.Background(), "hello").Result() 150 | 151 | fmt.Println(v, err) 152 | 153 | time.Sleep(time.Second * 5) 154 | 155 | fmt.Println("again") 156 | 157 | v, err = cli.Get(context.Background(), "hello").Result() 158 | 159 | fmt.Println(v, err) 160 | 161 | } 162 | 163 | func TestRedisSubscribe(t *testing.T) { 164 | cli := redis.NewClient(&redis.Options{ 165 | Addr: "localhost:6379", 166 | MaxRetries: 10, 167 | }) 168 | cli.FlushAll(context.Background()) 169 | 170 | _, err := cli.Subscribe(context.Background(), "alive").ReceiveMessage(context.Background()) 171 | err = GetRedisError(err) 172 | fmt.Println(err) 173 | 174 | time.Sleep(time.Second * 5) 175 | 176 | fmt.Println("again") 177 | 178 | _, err = cli.Subscribe(context.Background(), "alive").ReceiveMessage(context.Background()) 179 | err = GetRedisError(err) 180 | fmt.Println(err) 181 | 182 | } 183 | -------------------------------------------------------------------------------- /codec/ss/ss_test.go: -------------------------------------------------------------------------------- 1 | package ss 2 | 3 | //go test -race -covermode=atomic -v -coverprofile=coverage.out -run=. 4 | //go tool cover -html=coverage.out 5 | import ( 6 | "net" 7 | "testing" 8 | "time" 9 | 10 | "github.com/sniperHW/clustergo/addr" 11 | "github.com/sniperHW/rpcgo" 12 | 13 | "github.com/sniperHW/clustergo/codec/pb" 14 | "github.com/stretchr/testify/assert" 15 | ) 16 | 17 | func init() { 18 | pb.Register(Namespace, &Echo{}, 1) 19 | } 20 | 21 | type readable struct { 22 | buff []byte 23 | } 24 | 25 | func (r *readable) Read(buff []byte) (int, error) { 26 | copy(buff, r.buff) 27 | return len(r.buff), nil 28 | } 29 | 30 | func (r *readable) SetReadDeadline(_ time.Time) error { 31 | return nil 32 | } 33 | 34 | func TestRPCResponse(t *testing.T) { 35 | 36 | selfAddr, _ := addr.MakeLogicAddr("1.1.1") 37 | targetAddr, _ := addr.MakeLogicAddr("1.1.2") 38 | var buffs net.Buffers 39 | var n int 40 | { 41 | codec := NewCodec(selfAddr) 42 | msg := NewMessage(targetAddr, selfAddr, &rpcgo.ResponseMsg{ 43 | Seq: 1, 44 | Ret: []byte("world"), 45 | }) 46 | 47 | buffs, n = codec.Encode(buffs, msg) 48 | assert.Equal(t, len(buffs[0])+len(buffs[1]), n) 49 | } 50 | 51 | { 52 | codec := NewCodec(targetAddr) 53 | 54 | r := &readable{ 55 | buff: buffs[0], 56 | } 57 | r.buff = append(r.buff, buffs[1]...) 58 | 59 | pkt, err := codec.Recv(r, time.Time{}) 60 | assert.Nil(t, err) 61 | assert.Equal(t, len(pkt), n-4) 62 | 63 | message, err := codec.Decode(pkt) 64 | assert.Nil(t, err) 65 | 66 | rpcReq, ok := message.(*Message).Payload().(*rpcgo.ResponseMsg) 67 | assert.Equal(t, true, ok) 68 | assert.Equal(t, rpcReq.Seq, uint64(1)) 69 | assert.Equal(t, rpcReq.Ret, []byte("world")) 70 | } 71 | 72 | } 73 | 74 | func TestRPCRequest(t *testing.T) { 75 | 76 | selfAddr, _ := addr.MakeLogicAddr("1.1.1") 77 | targetAddr, _ := addr.MakeLogicAddr("1.1.2") 78 | var buffs net.Buffers 79 | var n int 80 | { 81 | codec := NewCodec(selfAddr) 82 | msg := NewMessage(targetAddr, selfAddr, &rpcgo.RequestMsg{ 83 | Seq: 1, 84 | Method: "hello", 85 | Arg: []byte("world"), 86 | }) 87 | 88 | buffs, n = codec.Encode(buffs, msg) 89 | assert.Equal(t, len(buffs[0])+len(buffs[1]), n) 90 | } 91 | 92 | { 93 | codec := NewCodec(targetAddr) 94 | 95 | r := &readable{ 96 | buff: buffs[0], 97 | } 98 | r.buff = append(r.buff, buffs[1]...) 99 | 100 | pkt, err := codec.Recv(r, time.Time{}) 101 | assert.Nil(t, err) 102 | assert.Equal(t, len(pkt), n-4) 103 | 104 | message, err := codec.Decode(pkt) 105 | assert.Nil(t, err) 106 | 107 | rpcReq, ok := message.(*Message).Payload().(*rpcgo.RequestMsg) 108 | assert.Equal(t, true, ok) 109 | assert.Equal(t, rpcReq.Seq, uint64(1)) 110 | assert.Equal(t, rpcReq.Method, "hello") 111 | assert.Equal(t, rpcReq.Arg, []byte("world")) 112 | } 113 | 114 | } 115 | 116 | func TestMessage(t *testing.T) { 117 | selfAddr, _ := addr.MakeLogicAddr("1.1.1") 118 | targetAddr, _ := addr.MakeLogicAddr("1.1.2") 119 | var buffs net.Buffers 120 | var n int 121 | { 122 | codec := NewCodec(selfAddr) 123 | 124 | msg := NewMessage(targetAddr, selfAddr, &Echo{Msg: "hello"}) 125 | 126 | buffs, n = codec.Encode(buffs, msg) 127 | assert.Equal(t, len(buffs[0])+len(buffs[1]), n) 128 | } 129 | 130 | { 131 | codec := NewCodec(targetAddr) 132 | 133 | r := &readable{ 134 | buff: buffs[0], 135 | } 136 | r.buff = append(r.buff, buffs[1]...) 137 | pkt, err := codec.Recv(r, time.Time{}) 138 | assert.Nil(t, err) 139 | assert.Equal(t, len(pkt), n-4) 140 | 141 | message, err := codec.Decode(pkt) 142 | assert.Nil(t, err) 143 | 144 | assert.Equal(t, message.(*Message).From(), selfAddr) 145 | assert.Equal(t, message.(*Message).To(), targetAddr) 146 | assert.Equal(t, message.(*Message).Cmd(), uint16(1)) 147 | assert.Equal(t, message.(*Message).Payload().(*Echo).Msg, "hello") 148 | } 149 | 150 | } 151 | 152 | func TestRelayMessage(t *testing.T) { 153 | 154 | selfAddr, _ := addr.MakeLogicAddr("1.1.1") 155 | harborAddr, _ := addr.MakeLogicAddr("1.255.1") 156 | targetAddr, _ := addr.MakeLogicAddr("1.1.3") 157 | var buffs net.Buffers 158 | var n int 159 | { 160 | codec := NewCodec(selfAddr) 161 | msg := NewMessage(targetAddr, selfAddr, &rpcgo.RequestMsg{ 162 | Seq: 1, 163 | Method: "hello", 164 | Arg: []byte("world"), 165 | }) 166 | 167 | buffs, n = codec.Encode(buffs, msg) 168 | assert.Equal(t, len(buffs[0])+len(buffs[1]), n) 169 | } 170 | 171 | { 172 | codec := NewCodec(harborAddr) 173 | 174 | r := &readable{ 175 | buff: buffs[0], 176 | } 177 | r.buff = append(r.buff, buffs[1]...) 178 | 179 | pkt, err := codec.Recv(r, time.Time{}) 180 | assert.Nil(t, err) 181 | assert.Equal(t, len(pkt), n-4) 182 | 183 | message, err := codec.Decode(pkt) 184 | assert.Nil(t, err) 185 | 186 | relayMessage, ok := message.(*RelayMessage) 187 | assert.Equal(t, ok, true) 188 | assert.Equal(t, n, len(relayMessage.Payload())) 189 | 190 | rpcReq := relayMessage.GetRpcRequest() 191 | assert.Equal(t, rpcReq.Seq, uint64(1)) 192 | assert.Equal(t, rpcReq.Method, "hello") 193 | assert.Equal(t, rpcReq.Arg, []byte("world")) 194 | } 195 | 196 | } 197 | -------------------------------------------------------------------------------- /example/pbrpc/service/echo/echo.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go. DO NOT EDIT. 2 | // versions: 3 | // protoc-gen-go v1.36.6 4 | // protoc v5.29.3 5 | // source: proto/echo.proto 6 | 7 | package echo 8 | 9 | import ( 10 | protoreflect "google.golang.org/protobuf/reflect/protoreflect" 11 | protoimpl "google.golang.org/protobuf/runtime/protoimpl" 12 | reflect "reflect" 13 | sync "sync" 14 | unsafe "unsafe" 15 | ) 16 | 17 | const ( 18 | // Verify that this generated code is sufficiently up-to-date. 19 | _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) 20 | // Verify that runtime/protoimpl is sufficiently up-to-date. 21 | _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) 22 | ) 23 | 24 | type EchoReq struct { 25 | state protoimpl.MessageState `protogen:"open.v1"` 26 | Msg string `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` 27 | unknownFields protoimpl.UnknownFields 28 | sizeCache protoimpl.SizeCache 29 | } 30 | 31 | func (x *EchoReq) Reset() { 32 | *x = EchoReq{} 33 | mi := &file_proto_echo_proto_msgTypes[0] 34 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 35 | ms.StoreMessageInfo(mi) 36 | } 37 | 38 | func (x *EchoReq) String() string { 39 | return protoimpl.X.MessageStringOf(x) 40 | } 41 | 42 | func (*EchoReq) ProtoMessage() {} 43 | 44 | func (x *EchoReq) ProtoReflect() protoreflect.Message { 45 | mi := &file_proto_echo_proto_msgTypes[0] 46 | if x != nil { 47 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 48 | if ms.LoadMessageInfo() == nil { 49 | ms.StoreMessageInfo(mi) 50 | } 51 | return ms 52 | } 53 | return mi.MessageOf(x) 54 | } 55 | 56 | // Deprecated: Use EchoReq.ProtoReflect.Descriptor instead. 57 | func (*EchoReq) Descriptor() ([]byte, []int) { 58 | return file_proto_echo_proto_rawDescGZIP(), []int{0} 59 | } 60 | 61 | func (x *EchoReq) GetMsg() string { 62 | if x != nil { 63 | return x.Msg 64 | } 65 | return "" 66 | } 67 | 68 | type EchoRsp struct { 69 | state protoimpl.MessageState `protogen:"open.v1"` 70 | Msg string `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` 71 | unknownFields protoimpl.UnknownFields 72 | sizeCache protoimpl.SizeCache 73 | } 74 | 75 | func (x *EchoRsp) Reset() { 76 | *x = EchoRsp{} 77 | mi := &file_proto_echo_proto_msgTypes[1] 78 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 79 | ms.StoreMessageInfo(mi) 80 | } 81 | 82 | func (x *EchoRsp) String() string { 83 | return protoimpl.X.MessageStringOf(x) 84 | } 85 | 86 | func (*EchoRsp) ProtoMessage() {} 87 | 88 | func (x *EchoRsp) ProtoReflect() protoreflect.Message { 89 | mi := &file_proto_echo_proto_msgTypes[1] 90 | if x != nil { 91 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 92 | if ms.LoadMessageInfo() == nil { 93 | ms.StoreMessageInfo(mi) 94 | } 95 | return ms 96 | } 97 | return mi.MessageOf(x) 98 | } 99 | 100 | // Deprecated: Use EchoRsp.ProtoReflect.Descriptor instead. 101 | func (*EchoRsp) Descriptor() ([]byte, []int) { 102 | return file_proto_echo_proto_rawDescGZIP(), []int{1} 103 | } 104 | 105 | func (x *EchoRsp) GetMsg() string { 106 | if x != nil { 107 | return x.Msg 108 | } 109 | return "" 110 | } 111 | 112 | var File_proto_echo_proto protoreflect.FileDescriptor 113 | 114 | const file_proto_echo_proto_rawDesc = "" + 115 | "\n" + 116 | "\x10proto/echo.proto\"\x1b\n" + 117 | "\aechoReq\x12\x10\n" + 118 | "\x03msg\x18\x01 \x01(\tR\x03msg\"\x1b\n" + 119 | "\aechoRsp\x12\x10\n" + 120 | "\x03msg\x18\x01 \x01(\tR\x03msgB\x0eZ\fservice/echob\x06proto3" 121 | 122 | var ( 123 | file_proto_echo_proto_rawDescOnce sync.Once 124 | file_proto_echo_proto_rawDescData []byte 125 | ) 126 | 127 | func file_proto_echo_proto_rawDescGZIP() []byte { 128 | file_proto_echo_proto_rawDescOnce.Do(func() { 129 | file_proto_echo_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_proto_echo_proto_rawDesc), len(file_proto_echo_proto_rawDesc))) 130 | }) 131 | return file_proto_echo_proto_rawDescData 132 | } 133 | 134 | var file_proto_echo_proto_msgTypes = make([]protoimpl.MessageInfo, 2) 135 | var file_proto_echo_proto_goTypes = []any{ 136 | (*EchoReq)(nil), // 0: echoReq 137 | (*EchoRsp)(nil), // 1: echoRsp 138 | } 139 | var file_proto_echo_proto_depIdxs = []int32{ 140 | 0, // [0:0] is the sub-list for method output_type 141 | 0, // [0:0] is the sub-list for method input_type 142 | 0, // [0:0] is the sub-list for extension type_name 143 | 0, // [0:0] is the sub-list for extension extendee 144 | 0, // [0:0] is the sub-list for field type_name 145 | } 146 | 147 | func init() { file_proto_echo_proto_init() } 148 | func file_proto_echo_proto_init() { 149 | if File_proto_echo_proto != nil { 150 | return 151 | } 152 | type x struct{} 153 | out := protoimpl.TypeBuilder{ 154 | File: protoimpl.DescBuilder{ 155 | GoPackagePath: reflect.TypeOf(x{}).PkgPath(), 156 | RawDescriptor: unsafe.Slice(unsafe.StringData(file_proto_echo_proto_rawDesc), len(file_proto_echo_proto_rawDesc)), 157 | NumEnums: 0, 158 | NumMessages: 2, 159 | NumExtensions: 0, 160 | NumServices: 0, 161 | }, 162 | GoTypes: file_proto_echo_proto_goTypes, 163 | DependencyIndexes: file_proto_echo_proto_depIdxs, 164 | MessageInfos: file_proto_echo_proto_msgTypes, 165 | }.Build() 166 | File_proto_echo_proto = out.File 167 | file_proto_echo_proto_goTypes = nil 168 | file_proto_echo_proto_depIdxs = nil 169 | } 170 | -------------------------------------------------------------------------------- /example/pbrpc/service/test/test.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go. DO NOT EDIT. 2 | // versions: 3 | // protoc-gen-go v1.36.6 4 | // protoc v5.29.3 5 | // source: proto/test.proto 6 | 7 | package test 8 | 9 | import ( 10 | protoreflect "google.golang.org/protobuf/reflect/protoreflect" 11 | protoimpl "google.golang.org/protobuf/runtime/protoimpl" 12 | reflect "reflect" 13 | sync "sync" 14 | unsafe "unsafe" 15 | ) 16 | 17 | const ( 18 | // Verify that this generated code is sufficiently up-to-date. 19 | _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) 20 | // Verify that runtime/protoimpl is sufficiently up-to-date. 21 | _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) 22 | ) 23 | 24 | type TestReq struct { 25 | state protoimpl.MessageState `protogen:"open.v1"` 26 | Msg string `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` 27 | unknownFields protoimpl.UnknownFields 28 | sizeCache protoimpl.SizeCache 29 | } 30 | 31 | func (x *TestReq) Reset() { 32 | *x = TestReq{} 33 | mi := &file_proto_test_proto_msgTypes[0] 34 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 35 | ms.StoreMessageInfo(mi) 36 | } 37 | 38 | func (x *TestReq) String() string { 39 | return protoimpl.X.MessageStringOf(x) 40 | } 41 | 42 | func (*TestReq) ProtoMessage() {} 43 | 44 | func (x *TestReq) ProtoReflect() protoreflect.Message { 45 | mi := &file_proto_test_proto_msgTypes[0] 46 | if x != nil { 47 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 48 | if ms.LoadMessageInfo() == nil { 49 | ms.StoreMessageInfo(mi) 50 | } 51 | return ms 52 | } 53 | return mi.MessageOf(x) 54 | } 55 | 56 | // Deprecated: Use TestReq.ProtoReflect.Descriptor instead. 57 | func (*TestReq) Descriptor() ([]byte, []int) { 58 | return file_proto_test_proto_rawDescGZIP(), []int{0} 59 | } 60 | 61 | func (x *TestReq) GetMsg() string { 62 | if x != nil { 63 | return x.Msg 64 | } 65 | return "" 66 | } 67 | 68 | type TestRsp struct { 69 | state protoimpl.MessageState `protogen:"open.v1"` 70 | Msg string `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` 71 | unknownFields protoimpl.UnknownFields 72 | sizeCache protoimpl.SizeCache 73 | } 74 | 75 | func (x *TestRsp) Reset() { 76 | *x = TestRsp{} 77 | mi := &file_proto_test_proto_msgTypes[1] 78 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 79 | ms.StoreMessageInfo(mi) 80 | } 81 | 82 | func (x *TestRsp) String() string { 83 | return protoimpl.X.MessageStringOf(x) 84 | } 85 | 86 | func (*TestRsp) ProtoMessage() {} 87 | 88 | func (x *TestRsp) ProtoReflect() protoreflect.Message { 89 | mi := &file_proto_test_proto_msgTypes[1] 90 | if x != nil { 91 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 92 | if ms.LoadMessageInfo() == nil { 93 | ms.StoreMessageInfo(mi) 94 | } 95 | return ms 96 | } 97 | return mi.MessageOf(x) 98 | } 99 | 100 | // Deprecated: Use TestRsp.ProtoReflect.Descriptor instead. 101 | func (*TestRsp) Descriptor() ([]byte, []int) { 102 | return file_proto_test_proto_rawDescGZIP(), []int{1} 103 | } 104 | 105 | func (x *TestRsp) GetMsg() string { 106 | if x != nil { 107 | return x.Msg 108 | } 109 | return "" 110 | } 111 | 112 | var File_proto_test_proto protoreflect.FileDescriptor 113 | 114 | const file_proto_test_proto_rawDesc = "" + 115 | "\n" + 116 | "\x10proto/test.proto\"\x1b\n" + 117 | "\atestReq\x12\x10\n" + 118 | "\x03msg\x18\x01 \x01(\tR\x03msg\"\x1b\n" + 119 | "\atestRsp\x12\x10\n" + 120 | "\x03msg\x18\x01 \x01(\tR\x03msgB\x0eZ\fservice/testb\x06proto3" 121 | 122 | var ( 123 | file_proto_test_proto_rawDescOnce sync.Once 124 | file_proto_test_proto_rawDescData []byte 125 | ) 126 | 127 | func file_proto_test_proto_rawDescGZIP() []byte { 128 | file_proto_test_proto_rawDescOnce.Do(func() { 129 | file_proto_test_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_proto_test_proto_rawDesc), len(file_proto_test_proto_rawDesc))) 130 | }) 131 | return file_proto_test_proto_rawDescData 132 | } 133 | 134 | var file_proto_test_proto_msgTypes = make([]protoimpl.MessageInfo, 2) 135 | var file_proto_test_proto_goTypes = []any{ 136 | (*TestReq)(nil), // 0: testReq 137 | (*TestRsp)(nil), // 1: testRsp 138 | } 139 | var file_proto_test_proto_depIdxs = []int32{ 140 | 0, // [0:0] is the sub-list for method output_type 141 | 0, // [0:0] is the sub-list for method input_type 142 | 0, // [0:0] is the sub-list for extension type_name 143 | 0, // [0:0] is the sub-list for extension extendee 144 | 0, // [0:0] is the sub-list for field type_name 145 | } 146 | 147 | func init() { file_proto_test_proto_init() } 148 | func file_proto_test_proto_init() { 149 | if File_proto_test_proto != nil { 150 | return 151 | } 152 | type x struct{} 153 | out := protoimpl.TypeBuilder{ 154 | File: protoimpl.DescBuilder{ 155 | GoPackagePath: reflect.TypeOf(x{}).PkgPath(), 156 | RawDescriptor: unsafe.Slice(unsafe.StringData(file_proto_test_proto_rawDesc), len(file_proto_test_proto_rawDesc)), 157 | NumEnums: 0, 158 | NumMessages: 2, 159 | NumExtensions: 0, 160 | NumServices: 0, 161 | }, 162 | GoTypes: file_proto_test_proto_goTypes, 163 | DependencyIndexes: file_proto_test_proto_depIdxs, 164 | MessageInfos: file_proto_test_proto_msgTypes, 165 | }.Build() 166 | File_proto_test_proto = out.File 167 | file_proto_test_proto_goTypes = nil 168 | file_proto_test_proto_depIdxs = nil 169 | } 170 | -------------------------------------------------------------------------------- /membership/redis/subscribe.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "sync" 7 | "sync/atomic" 8 | "time" 9 | 10 | "github.com/redis/go-redis/v9" 11 | "github.com/sniperHW/clustergo/membership" 12 | ) 13 | 14 | type Subscribe struct { 15 | RedisCli *redis.Client 16 | memberVersion int64 17 | aliveVersion int64 18 | alive map[string]struct{} //健康节点 19 | members map[string]*membership.Node //*membership.Node //配置中的节点 20 | cb func(membership.MemberInfo) 21 | getMembersSha string 22 | getAliveSha string 23 | once sync.Once 24 | closeFunc context.CancelFunc 25 | closed atomic.Bool 26 | } 27 | 28 | func (cli *Subscribe) Init() (err error) { 29 | cli.alive = map[string]struct{}{} 30 | cli.members = map[string]*membership.Node{} 31 | if cli.getMembersSha, err = cli.RedisCli.ScriptLoad(context.Background(), ScriptGetMembers).Result(); err != nil { 32 | err = fmt.Errorf("error on init ScriptGetMembers:%s", err.Error()) 33 | return err 34 | } 35 | 36 | if cli.getAliveSha, err = cli.RedisCli.ScriptLoad(context.Background(), ScriptGetAlive).Result(); err != nil { 37 | err = fmt.Errorf("error on init getAlive:%s", err.Error()) 38 | return err 39 | } 40 | 41 | return err 42 | } 43 | 44 | func (cli *Subscribe) getAlives() error { 45 | re, err := cli.RedisCli.EvalSha(context.Background(), cli.getAliveSha, []string{}, cli.aliveVersion).Result() 46 | if err = GetRedisError(err); err != nil { 47 | return err 48 | } 49 | 50 | r := re.([]interface{}) 51 | version := r[0].(int64) 52 | if version == cli.aliveVersion { 53 | return nil 54 | } 55 | cli.aliveVersion = version 56 | var nodeinfo membership.MemberInfo 57 | for _, v := range r[1].([]interface{}) { 58 | addr, dead := v.([]interface{})[0].(string), v.([]interface{})[1].(string) 59 | if dead == "true" { 60 | delete(cli.alive, addr) 61 | if n, ok := cli.members[addr]; ok { 62 | //标记为不可用状态 63 | nodeinfo.Update = append(nodeinfo.Update, membership.Node{ 64 | Addr: n.Addr, 65 | Export: n.Export, 66 | Available: false, 67 | }) 68 | } 69 | } else if _, ok := cli.alive[addr]; !ok { 70 | cli.alive[addr] = struct{}{} 71 | if n, ok := cli.members[addr]; ok && n.Available { 72 | nodeinfo.Update = append(nodeinfo.Update, membership.Node{ 73 | Addr: n.Addr, 74 | Export: n.Export, 75 | Available: true, 76 | }) 77 | } 78 | } 79 | } 80 | if cli.cb != nil && len(nodeinfo.Update) > 0 { 81 | cli.cb(nodeinfo) 82 | } 83 | return nil 84 | } 85 | 86 | func (cli *Subscribe) getMembers() error { 87 | re, err := cli.RedisCli.EvalSha(context.Background(), cli.getMembersSha, []string{}, cli.memberVersion).Result() 88 | if err = GetRedisError(err); err != nil { 89 | return err 90 | } 91 | 92 | r := re.([]interface{}) 93 | version := r[0].(int64) 94 | if version == cli.memberVersion { 95 | return nil 96 | } 97 | cli.memberVersion = version 98 | var nodeinfo membership.MemberInfo 99 | for _, v := range r[1].([]interface{}) { 100 | m, markdel := v.([]interface{})[1].(string), v.([]interface{})[2].(string) 101 | var n membership.Node 102 | if err = n.Unmarshal([]byte(m)); err != nil { 103 | continue 104 | } else if markdel == "true" { 105 | if _, ok := cli.members[n.Addr.LogicAddr().String()]; ok { 106 | if _, ok := cli.alive[n.Addr.LogicAddr().String()]; ok && n.Available { 107 | n.Available = true 108 | } else { 109 | n.Available = false 110 | } 111 | nodeinfo.Remove = append(nodeinfo.Remove, n) 112 | delete(cli.members, n.Addr.LogicAddr().String()) 113 | } 114 | } else { 115 | logicAddr := n.Addr.LogicAddr().String() 116 | 117 | nn := membership.Node{ 118 | Addr: n.Addr, 119 | Export: n.Export, 120 | } 121 | 122 | if _, ok := cli.alive[logicAddr]; ok && n.Available { 123 | nn.Available = true 124 | } 125 | 126 | if _, ok := cli.members[logicAddr]; ok { 127 | nodeinfo.Update = append(nodeinfo.Update, nn) 128 | } else { 129 | nodeinfo.Add = append(nodeinfo.Add, nn) 130 | } 131 | cli.members[logicAddr] = &n 132 | 133 | } 134 | } 135 | if cli.cb != nil && (len(nodeinfo.Add) > 0 || len(nodeinfo.Update) > 0 || len(nodeinfo.Remove) > 0) { 136 | cli.cb(nodeinfo) 137 | } 138 | return nil 139 | } 140 | 141 | func (cli *Subscribe) watch(ctx context.Context) { 142 | ch := cli.RedisCli.Subscribe(ctx, "members", "alive").Channel() 143 | 144 | /* 145 | * 如果更新Node的事件早于Subscribe,更新事件将丢失,因此必须设置超时时间,超时后尝试获取members和alives的更新 146 | */ 147 | 148 | ticker := time.NewTicker(time.Second * 5) 149 | for { 150 | select { 151 | case m := <-ch: 152 | switch m.Channel { 153 | case "members": 154 | cli.getMembers() 155 | case "alive": 156 | cli.getAlives() 157 | } 158 | case <-ticker.C: 159 | cli.getMembers() 160 | cli.getAlives() 161 | case <-ctx.Done(): 162 | return 163 | } 164 | } 165 | } 166 | 167 | func (cli *Subscribe) Close() { 168 | if cli.closed.CompareAndSwap(false, true) { 169 | if cli.closeFunc != nil { 170 | cli.closeFunc() 171 | } 172 | } 173 | } 174 | 175 | func (cli *Subscribe) Subscribe(cb func(membership.MemberInfo)) error { 176 | 177 | once := false 178 | 179 | cli.once.Do(func() { 180 | once = true 181 | }) 182 | 183 | if once { 184 | cli.cb = cb 185 | 186 | err := cli.getMembers() 187 | if err != nil { 188 | return err 189 | } 190 | err = cli.getAlives() 191 | if err != nil { 192 | return err 193 | } 194 | 195 | ctx, cancel := context.WithCancel(context.Background()) 196 | 197 | cli.closeFunc = cancel 198 | 199 | go cli.watch(ctx) 200 | } 201 | return nil 202 | } 203 | -------------------------------------------------------------------------------- /membership/redis/client_test.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "testing" 7 | "time" 8 | 9 | "github.com/redis/go-redis/v9" 10 | "github.com/sniperHW/clustergo/addr" 11 | "github.com/sniperHW/clustergo/membership" 12 | ) 13 | 14 | func makeAddr(logicAddr, netAddr string) addr.Addr { 15 | a, _ := addr.MakeAddr(logicAddr, netAddr) 16 | return a 17 | } 18 | 19 | func makeLogicAddr(logicAddr string) addr.LogicAddr { 20 | a, _ := addr.MakeLogicAddr(logicAddr) 21 | return a 22 | } 23 | 24 | func TestSubscribe(t *testing.T) { 25 | cli := redis.NewClient(&redis.Options{ 26 | Addr: "localhost:6379", 27 | }) 28 | cli.FlushAll(context.Background()) 29 | 30 | sub := &Subscribe{ 31 | RedisCli: cli, 32 | } 33 | 34 | if err := sub.Init(); err != nil { 35 | panic(err) 36 | } 37 | 38 | sub.Subscribe(func(di membership.MemberInfo) { 39 | fmt.Println("add", di.Add) 40 | fmt.Println("update", di.Update) 41 | fmt.Println("remove", di.Remove) 42 | }) 43 | 44 | //time.Sleep(time.Second * 10) 45 | 46 | admin := &Admin{ 47 | RedisCli: redis.NewClient(&redis.Options{ 48 | Addr: "localhost:6379", 49 | }), 50 | } 51 | 52 | if err := admin.Init(); err != nil { 53 | panic(err) 54 | } 55 | 56 | fmt.Println("Update1") 57 | 58 | err := admin.UpdateMember(membership.Node{ 59 | Addr: makeAddr("1.1.1", "192.168.1.1:8011"), 60 | Available: true, 61 | }) 62 | if err != nil { 63 | panic(err) 64 | } 65 | 66 | //time.Sleep(time.Second) 67 | 68 | fmt.Println("Update2") 69 | 70 | err = admin.UpdateMember(membership.Node{ 71 | Addr: makeAddr("1.1.2", "192.168.1.2:8011"), 72 | Available: true, 73 | }) 74 | if err != nil { 75 | panic(err) 76 | } 77 | 78 | time.Sleep(time.Second * 2) 79 | 80 | err = admin.RemoveMember(membership.Node{ 81 | Addr: makeAddr("1.1.2", "192.168.1.2:8011"), 82 | }) 83 | if err != nil { 84 | panic(err) 85 | } 86 | 87 | time.Sleep(time.Second) 88 | 89 | err = admin.UpdateMember(membership.Node{ 90 | Addr: makeAddr("1.1.1", "192.168.1.1:8012"), 91 | Available: true, 92 | }) 93 | if err != nil { 94 | panic(err) 95 | } 96 | 97 | time.Sleep(time.Second) 98 | 99 | fmt.Println("------------keepalive---------") 100 | 101 | admin.KeepAlive(membership.Node{ 102 | Addr: makeAddr("1.1.1", "192.168.1.1:8012"), 103 | }) 104 | 105 | time.Sleep(time.Second * 11) 106 | 107 | fmt.Println("ScriptCheckTimeout") 108 | 109 | admin.CheckTimeout() 110 | 111 | time.Sleep(time.Second * 2) 112 | 113 | } 114 | 115 | /* 116 | func TestGetMember(t *testing.T) { 117 | cli := redis.NewClient(&redis.Options{ 118 | Addr: "localhost:6379", 119 | }) 120 | cli.FlushAll(context.Background()) 121 | 122 | rcli := &MemberShip{ 123 | RedisCli: cli, 124 | } 125 | 126 | if err := rcli.Init(); err != nil { 127 | panic(err) 128 | } 129 | 130 | err := rcli.UpdateMember(&Node{ 131 | LogicAddr: "1.1.1", 132 | NetAddr: "192.168.1.1:8011", 133 | Available: true, 134 | }) 135 | if err != nil { 136 | panic(err) 137 | } 138 | 139 | err = rcli.UpdateMember(&Node{ 140 | LogicAddr: "1.1.2", 141 | NetAddr: "192.168.1.2:8011", 142 | Available: true, 143 | }) 144 | if err != nil { 145 | panic(err) 146 | } 147 | 148 | rcli.getMembers() 149 | 150 | err = rcli.RemoveMember(&Node{ 151 | LogicAddr: "1.1.2", 152 | }) 153 | if err != nil { 154 | panic(err) 155 | } 156 | 157 | rcli.getMembers() 158 | 159 | } 160 | 161 | func TestGetAlive(t *testing.T) { 162 | cli := redis.NewClient(&redis.Options{ 163 | Addr: "localhost:6379", 164 | }) 165 | cli.FlushAll(context.Background()) 166 | 167 | { 168 | _, err := cli.Eval(context.Background(), ScriptHeartbeat, []string{"sniperHW1"}, 2).Result() 169 | fmt.Println("sniperHW1 heartbeat", GetRedisError(err)) 170 | } 171 | 172 | { 173 | _, err := cli.Eval(context.Background(), ScriptHeartbeat, []string{"sniperHW2"}, 5).Result() 174 | fmt.Println("sniperHW2 heartbeat", GetRedisError(err)) 175 | } 176 | 177 | rcli := &MemberShip{ 178 | alive: map[string]struct{}{}, 179 | members: map[string]*Node{}, 180 | RedisCli: cli, 181 | } 182 | 183 | if err := rcli.Init(); err != nil { 184 | panic(err) 185 | } 186 | 187 | rcli.getAlives() 188 | 189 | c := make(chan struct{}) 190 | 191 | go func() { 192 | m, err := cli.Subscribe(context.Background(), "alive").ReceiveMessage(context.Background()) 193 | err = GetRedisError(err) 194 | fmt.Println("server version", m.Payload) 195 | if err == nil { 196 | _, err := cli.Eval(context.Background(), ScriptGetAlive, []string{}, 0).Result() 197 | err = GetRedisError(err) 198 | if err != nil { 199 | fmt.Println(err) 200 | return 201 | } 202 | //fmt.Println(re) 203 | //version := re.([]interface{})[0].(int64) 204 | //fmt.Println("alive version", version) 205 | //for _, v := range re.([]interface{})[1].([]interface{}) { 206 | // fmt.Println(v.([]interface{})[0].(string), v.([]interface{})[1].(string)) 207 | //} 208 | close(c) 209 | } 210 | }() 211 | 212 | time.Sleep(time.Second * 3) 213 | 214 | _, err := cli.Eval(context.Background(), ScriptCheckTimeout, []string{}).Result() 215 | err = GetRedisError(err) 216 | if err != nil { 217 | fmt.Println(err) 218 | return 219 | } 220 | fmt.Println("ScriptCheckTimeout") 221 | 222 | <-c 223 | 224 | rcli.getAlives() 225 | 226 | /*{ 227 | re, err := cli.Eval(ScriptGetAlive, []string{}, 1).Result() 228 | err = GetRedisError(err) 229 | if err != nil { 230 | fmt.Println(err) 231 | return 232 | } 233 | version := re.([]interface{})[0].(int64) 234 | fmt.Println("alive version", version) 235 | for _, v := range re.([]interface{})[1].([]interface{}) { 236 | fmt.Println(v.([]interface{})[0].(string), v.([]interface{})[1].(string)) 237 | } 238 | }* / 239 | 240 | } 241 | */ 242 | -------------------------------------------------------------------------------- /codec/buffer/buffer.go: -------------------------------------------------------------------------------- 1 | package buffer 2 | 3 | import ( 4 | "encoding/binary" 5 | "errors" 6 | ) 7 | 8 | var ErrOutOfBounds error = errors.New("out of bounds") 9 | 10 | type BufferWriter struct { 11 | endian binary.ByteOrder 12 | } 13 | 14 | func NeWriter(endian binary.ByteOrder) BufferWriter { 15 | return BufferWriter{endian: endian} 16 | } 17 | 18 | func (w BufferWriter) AppendByte(bs []byte, v byte) []byte { 19 | return append(bs, v) 20 | } 21 | 22 | func (w BufferWriter) AppendString(bs []byte, s string) []byte { 23 | return append(bs, s...) 24 | } 25 | 26 | func (w BufferWriter) AppendBytes(bs []byte, bytes []byte) []byte { 27 | return append(bs, bytes...) 28 | } 29 | 30 | func (w BufferWriter) AppendUint16(bs []byte, u16 uint16) []byte { 31 | bu := []byte{0, 0} 32 | w.endian.PutUint16(bu, u16) 33 | return w.AppendBytes(bs, bu) 34 | } 35 | 36 | func (w BufferWriter) AppendUint32(bs []byte, u32 uint32) []byte { 37 | bu := []byte{0, 0, 0, 0} 38 | w.endian.PutUint32(bu, u32) 39 | return w.AppendBytes(bs, bu) 40 | } 41 | 42 | func (w BufferWriter) AppendUint64(bs []byte, u64 uint64) []byte { 43 | bu := []byte{0, 0, 0, 0, 0, 0, 0, 0} 44 | w.endian.PutUint64(bu, u64) 45 | return w.AppendBytes(bs, bu) 46 | } 47 | 48 | func (w BufferWriter) AppendInt16(bs []byte, i16 int16) []byte { 49 | return w.AppendUint16(bs, uint16(i16)) 50 | } 51 | 52 | func (w BufferWriter) AppendInt32(bs []byte, i32 int32) []byte { 53 | return w.AppendUint32(bs, uint32(i32)) 54 | } 55 | 56 | func (w BufferWriter) AppendInt64(bs []byte, i64 int64) []byte { 57 | return w.AppendUint64(bs, uint64(i64)) 58 | } 59 | 60 | func (w BufferWriter) AppendInt(bs []byte, i32 int) []byte { 61 | return w.AppendUint32(bs, uint32(i32)) 62 | } 63 | 64 | type BufferReader struct { 65 | bs []byte 66 | offset int 67 | endian binary.ByteOrder 68 | } 69 | 70 | func NewReader(endian binary.ByteOrder, b []byte) BufferReader { 71 | return BufferReader{bs: b, endian: endian} 72 | } 73 | 74 | func (r *BufferReader) Reset(b []byte) { 75 | if len(b) > 0 { 76 | r.bs = b 77 | } 78 | r.offset = 0 79 | } 80 | 81 | func (r *BufferReader) GetAll() []byte { 82 | return r.bs[r.offset:] 83 | } 84 | 85 | func (r *BufferReader) GetOffset() int { 86 | return r.offset 87 | } 88 | 89 | func (r *BufferReader) IsOver() bool { 90 | return r.offset >= len(r.bs) 91 | } 92 | 93 | func (r *BufferReader) GetByte() byte { 94 | if r.offset+1 > len(r.bs) { 95 | return 0 96 | } else { 97 | ret := r.bs[r.offset] 98 | r.offset += 1 99 | return ret 100 | } 101 | } 102 | 103 | func (r *BufferReader) CheckGetByte() (byte, error) { 104 | if r.offset+1 > len(r.bs) { 105 | return 0, ErrOutOfBounds 106 | } else { 107 | ret := r.bs[r.offset] 108 | r.offset += 1 109 | return ret, nil 110 | } 111 | } 112 | 113 | func (r *BufferReader) GetUint16() uint16 { 114 | if r.offset+2 > len(r.bs) { 115 | return 0 116 | } else { 117 | ret := r.endian.Uint16(r.bs[r.offset : r.offset+2]) 118 | r.offset += 2 119 | return ret 120 | } 121 | } 122 | 123 | func (r *BufferReader) CheckGetUint16() (uint16, error) { 124 | if r.offset+2 > len(r.bs) { 125 | return 0, ErrOutOfBounds 126 | } else { 127 | ret := r.endian.Uint16(r.bs[r.offset : r.offset+2]) 128 | r.offset += 2 129 | return ret, nil 130 | } 131 | } 132 | 133 | func (r *BufferReader) GetInt16() int16 { 134 | return int16(r.GetUint16()) 135 | } 136 | 137 | func (r *BufferReader) CheckGetInt16() (int16, error) { 138 | u, err := r.CheckGetUint16() 139 | if nil != err { 140 | return 0, err 141 | } else { 142 | return int16(u), nil 143 | } 144 | } 145 | 146 | func (r *BufferReader) GetUint32() uint32 { 147 | if r.offset+4 > len(r.bs) { 148 | return 0 149 | } else { 150 | ret := r.endian.Uint32(r.bs[r.offset : r.offset+4]) 151 | r.offset += 4 152 | return ret 153 | } 154 | } 155 | 156 | func (r *BufferReader) CheckGetUint32() (uint32, error) { 157 | if r.offset+4 > len(r.bs) { 158 | return 0, ErrOutOfBounds 159 | } else { 160 | ret := r.endian.Uint32(r.bs[r.offset : r.offset+4]) 161 | r.offset += 4 162 | return ret, nil 163 | } 164 | } 165 | 166 | func (r *BufferReader) GetInt32() int32 { 167 | return int32(r.GetUint32()) 168 | } 169 | 170 | func (r *BufferReader) CheckGetInt32() (int32, error) { 171 | u, err := r.CheckGetUint32() 172 | if nil != err { 173 | return 0, err 174 | } else { 175 | return int32(u), nil 176 | } 177 | } 178 | 179 | func (r *BufferReader) GetUint64() uint64 { 180 | if r.offset+8 > len(r.bs) { 181 | return 0 182 | } else { 183 | ret := r.endian.Uint64(r.bs[r.offset : r.offset+8]) 184 | r.offset += 8 185 | return ret 186 | } 187 | } 188 | 189 | func (r *BufferReader) CheckGetUint64() (uint64, error) { 190 | if r.offset+8 > len(r.bs) { 191 | return 0, ErrOutOfBounds 192 | } else { 193 | ret := r.endian.Uint64(r.bs[r.offset : r.offset+8]) 194 | r.offset += 8 195 | return ret, nil 196 | } 197 | } 198 | 199 | func (r *BufferReader) GetInt64() int64 { 200 | return int64(r.GetUint64()) 201 | } 202 | 203 | func (r *BufferReader) CheckGetInt64() (int64, error) { 204 | u, err := r.CheckGetUint64() 205 | if nil != err { 206 | return 0, err 207 | } else { 208 | return int64(u), nil 209 | } 210 | } 211 | 212 | func (r *BufferReader) CheckGetInt() (int, error) { 213 | u, err := r.CheckGetUint32() 214 | if nil != err { 215 | return 0, err 216 | } else { 217 | return int(u), nil 218 | } 219 | } 220 | 221 | func (r *BufferReader) GetString(size int) string { 222 | return string(r.GetBytes(size)) 223 | } 224 | 225 | func (r *BufferReader) CheckGetString(size int) (string, error) { 226 | b, err := r.CheckGetBytes(size) 227 | if nil != err { 228 | return "", err 229 | } else { 230 | return string(b), nil 231 | } 232 | } 233 | 234 | func (r *BufferReader) GetBytes(size int) []byte { 235 | if len(r.bs)-r.offset < size { 236 | size = len(r.bs) - r.offset 237 | } 238 | ret := r.bs[r.offset : r.offset+size] 239 | r.offset += size 240 | return ret 241 | } 242 | 243 | func (r *BufferReader) CheckGetBytes(size int) ([]byte, error) { 244 | if len(r.bs)-r.offset < size { 245 | return nil, ErrOutOfBounds 246 | } 247 | ret := r.bs[r.offset : r.offset+size] 248 | r.offset += size 249 | return ret, nil 250 | } 251 | 252 | func (r *BufferReader) CopyBytes(size int) ([]byte, error) { 253 | if b, err := r.CheckGetBytes(size); nil == err { 254 | out := make([]byte, len(b)) 255 | copy(out, b) 256 | return out, nil 257 | } else { 258 | return nil, err 259 | } 260 | } 261 | -------------------------------------------------------------------------------- /rpc.go: -------------------------------------------------------------------------------- 1 | package clustergo 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "sync" 8 | "sync/atomic" 9 | "time" 10 | "unsafe" 11 | 12 | "github.com/sniperHW/clustergo/addr" 13 | "github.com/sniperHW/clustergo/codec/ss" 14 | "github.com/sniperHW/netgo" 15 | "github.com/sniperHW/rpcgo" 16 | "google.golang.org/protobuf/proto" 17 | ) 18 | 19 | type RPCChannel interface { 20 | Peer() addr.LogicAddr 21 | } 22 | 23 | type rpcChannel struct { 24 | peer addr.LogicAddr 25 | node *node 26 | self *Node 27 | name string 28 | once sync.Once 29 | } 30 | 31 | func (c *rpcChannel) RequestWithContext(ctx context.Context, request *rpcgo.RequestMsg) error { 32 | return c.node.sendMessageWithContext(ctx, c.self, ss.NewMessage(c.peer, c.self.localAddr.LogicAddr(), request)) 33 | } 34 | 35 | func (c *rpcChannel) Request(request *rpcgo.RequestMsg) error { 36 | return c.node.sendMessage(c.self, ss.NewMessage(c.peer, c.self.localAddr.LogicAddr(), request), time.Time{}) 37 | } 38 | 39 | func (c *rpcChannel) Reply(response *rpcgo.ResponseMsg) error { 40 | return c.node.sendMessage(c.self, ss.NewMessage(c.peer, c.self.localAddr.LogicAddr(), response), time.Now().Add(time.Second)) 41 | } 42 | 43 | func (c *rpcChannel) Name() string { 44 | c.once.Do(func() { 45 | selfAddr := c.self.localAddr.LogicAddr().String() 46 | peerAddr := c.peer.String() 47 | if selfAddr > peerAddr { 48 | c.name = fmt.Sprintf("%s <-> %s", selfAddr, peerAddr) 49 | } else { 50 | c.name = fmt.Sprintf("%s <-> %s", peerAddr, selfAddr) 51 | } 52 | }) 53 | return c.name 54 | } 55 | 56 | func (c *rpcChannel) Identity() uint64 { 57 | return *(*uint64)(unsafe.Pointer(c.node)) 58 | } 59 | 60 | func (c *rpcChannel) Peer() addr.LogicAddr { 61 | return c.peer 62 | } 63 | 64 | func (c *rpcChannel) IsRetryAbleError(err error) bool { 65 | switch err { 66 | case ErrPendingQueueFull, netgo.ErrSendQueueFull, netgo.ErrPushToSendQueueTimeout: 67 | return true 68 | default: 69 | return false 70 | } 71 | } 72 | 73 | // 自连接channel 74 | type selfChannel struct { 75 | self *Node 76 | name string 77 | once sync.Once 78 | } 79 | 80 | func (c *selfChannel) RequestWithContext(ctx context.Context, request *rpcgo.RequestMsg) error { 81 | c.self.Go(func() { 82 | c.self.rpcSvr.svr.OnMessage(context.TODO(), c, request) 83 | }) 84 | return nil 85 | } 86 | 87 | func (c *selfChannel) Request(request *rpcgo.RequestMsg) error { 88 | c.self.Go(func() { 89 | c.self.rpcSvr.svr.OnMessage(context.TODO(), c, request) 90 | }) 91 | return nil 92 | } 93 | 94 | func (c *selfChannel) Reply(response *rpcgo.ResponseMsg) error { 95 | c.self.Go(func() { 96 | c.self.rpcCli.cli.OnMessage(nil, response) 97 | }) 98 | return nil 99 | } 100 | 101 | func (c *selfChannel) Name() string { 102 | c.once.Do(func() { 103 | localAddrStr := c.self.localAddr.LogicAddr().String() 104 | c.name = fmt.Sprintf("%s <-> %s", localAddrStr, localAddrStr) 105 | }) 106 | return c.name 107 | } 108 | 109 | func (c *selfChannel) Peer() addr.LogicAddr { 110 | return c.self.localAddr.LogicAddr() 111 | } 112 | 113 | func (c *selfChannel) IsRetryAbleError(_ error) bool { 114 | return false 115 | } 116 | 117 | type JsonCodec struct { 118 | } 119 | 120 | func (c JsonCodec) Encode(v interface{}) ([]byte, error) { 121 | return json.Marshal(v) 122 | } 123 | 124 | func (c JsonCodec) Decode(b []byte, v interface{}) error { 125 | return json.Unmarshal(b, v) 126 | } 127 | 128 | type PbCodec struct { 129 | } 130 | 131 | func (c PbCodec) Encode(v interface{}) ([]byte, error) { 132 | return proto.Marshal(v.(proto.Message)) 133 | } 134 | 135 | func (c PbCodec) Decode(b []byte, v interface{}) error { 136 | return proto.Unmarshal(b, v.(proto.Message)) 137 | } 138 | 139 | type RPCServer struct { 140 | pendingRespCount atomic.Int32 //尚未响应的rpc数量 141 | svr *rpcgo.Server 142 | } 143 | 144 | func (s *RPCServer) SetInInterceptor(interceptor []func(*rpcgo.Replyer, *rpcgo.RequestMsg) bool) { 145 | s.svr.SetInInterceptor(append(interceptor, func(replyer *rpcgo.Replyer, req *rpcgo.RequestMsg) bool { 146 | s.pendingRespCount.Add(1) 147 | replyer.AppendOutInterceptor(func(req *rpcgo.RequestMsg, ret interface{}, err error) { 148 | s.pendingRespCount.Add(-1) 149 | }) 150 | return true 151 | })) 152 | } 153 | 154 | type RPCClient struct { 155 | n *Node 156 | cli *rpcgo.Client 157 | } 158 | 159 | func (c *RPCClient) SetInInterceptor(interceptor []func(*rpcgo.RequestMsg, interface{}, error)) { 160 | c.cli.SetInInterceptor(interceptor) 161 | } 162 | 163 | func (c *RPCClient) SetOutInterceptor(interceptor []func(*rpcgo.RequestMsg, interface{})) { 164 | c.cli.SetOutInterceptor(interceptor) 165 | } 166 | 167 | func (c *RPCClient) AsyncCall(to addr.LogicAddr, method string, arg interface{}, ret interface{}, deadline time.Time, callback func(interface{}, error)) error { 168 | s := c.n 169 | select { 170 | case <-s.die: 171 | return rpcgo.NewError(rpcgo.ErrOther, "server die") 172 | case <-s.started: 173 | default: 174 | return rpcgo.NewError(rpcgo.ErrOther, "server not start") 175 | } 176 | var err error 177 | if to == s.localAddr.LogicAddr() { 178 | err = c.cli.AsyncCall(&selfChannel{self: s}, method, arg, ret, deadline, callback) 179 | } else if n := s.getNodeByLogicAddr(to); n != nil { 180 | err = c.cli.AsyncCall(&rpcChannel{peer: to, node: n, self: s}, method, arg, ret, deadline, callback) 181 | } else { 182 | return ErrInvaildNode 183 | } 184 | 185 | switch err { 186 | case ErrPendingQueueFull, netgo.ErrSendQueueFull: 187 | return ErrBusy 188 | default: 189 | return err 190 | } 191 | } 192 | 193 | func (c *RPCClient) CallWithTimeout(to addr.LogicAddr, method string, arg interface{}, ret interface{}, d time.Duration) error { 194 | ctx, cancel := context.WithTimeout(context.Background(), d) 195 | defer cancel() 196 | return c.Call(ctx, to, method, arg, ret) 197 | } 198 | 199 | func (c *RPCClient) Call(ctx context.Context, to addr.LogicAddr, method string, arg interface{}, ret interface{}) error { 200 | s := c.n 201 | select { 202 | case <-s.die: 203 | return rpcgo.NewError(rpcgo.ErrOther, "server die") 204 | case <-s.started: 205 | default: 206 | return rpcgo.NewError(rpcgo.ErrOther, "server not start") 207 | } 208 | if to == s.localAddr.LogicAddr() { 209 | return c.cli.Call(ctx, &selfChannel{self: s}, method, arg, ret) 210 | } else { 211 | if n := s.getNodeByLogicAddr(to); n != nil { 212 | return c.cli.Call(ctx, &rpcChannel{peer: to, node: n, self: s}, method, arg, ret) 213 | } else { 214 | return ErrInvaildNode 215 | } 216 | } 217 | } 218 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 游戏服务端框架 2 | 3 | clustergo是一个简单的网络游戏服务端框架。可以快速构建服务器集群内部以及服务器与客户端时间的通信。集群内部采用tcp通信。服务器客户端之间 4 | 支持tcp,websocket,kcp通信方式。 5 | 6 | 服务器集群间支持两种通信模式:PRC及普通的消息传递。服务器节点使用一个逻辑地址作为标识,只要知道对端逻辑地址就可以与对端通信。集群内部通信节点会建立tcp连接,连接在首次通信请求时建立。 7 | 8 | 9 | ## 逻辑地址 10 | 11 | 各节点使用一个32位逻辑地址标识,逻辑地址被分为3段,高12位表示服务器组,中8位表示节点类型(255保留给harbor使用),低12位表示进程id。 12 | 13 | ## 消息投递与处理 14 | 15 | cluster支持在节点之间传递protobuf消息和raw binary消息。如果只需要使用protobuf可以使用默认的SendPbMessage和RegisterPbMessageHandler。 16 | 17 | 如果需要支持不同的消息格式,可以使用SendBinMessage和RegisterBinMessageHandler自己处理消息的编码和解码。 18 | 19 | 20 | ## rpc 21 | 22 | clustergo默认采用pbrpc 23 | 24 | 将协议文件添加到pbrpc/proto目录中 25 | 26 | 例如对于echo服务,首先在proto目录添加echo.proto文件,之后填充文件内容如下: 27 | 28 | ```go 29 | syntax = "proto3"; 30 | 31 | option go_package = "../service/echo"; 32 | 33 | message request { 34 | string msg = 1; 35 | } 36 | 37 | message response { 38 | string msg = 1; 39 | } 40 | ``` 41 | 42 | 对于任务服务,请求参数必须命名为request,返回值必须命名为response,go_package永远设置为"../service/echo"。 43 | 44 | 在pbrpc目录执行./gen.sh,这个操作会在pbrpc/service目录下生成echo.pb.go和echo.go两个文件。 45 | 46 | 其中echo.go的内容如下: 47 | 48 | ```go 49 | package echo 50 | 51 | import ( 52 | "context" 53 | "time" 54 | 55 | "github.com/sniperHW/rpcgo" 56 | "github.com/sniperHW/clustergo" 57 | "github.com/sniperHW/clustergo/addr" 58 | ) 59 | 60 | type Replyer struct { 61 | replyer *rpcgo.Replyer 62 | } 63 | 64 | func (this *Replyer) Reply(result *Response, err error) { 65 | this.replyer.Reply(result, err) 66 | } 67 | 68 | type EchoService interface { 69 | OnCall(context.Context, *Replyer, *Request) 70 | } 71 | 72 | func Register(o EchoService) { 73 | clustergo.RegisterRPC("echo", func(ctx context.Context, r *rpcgo.Replyer, arg *Request) { 74 | o.OnCall(ctx, &Replyer{replyer: r}, arg) 75 | }) 76 | } 77 | 78 | func Call(ctx context.Context, peer addr.LogicAddr, arg *Request) (*Response, error) { 79 | var resp Response 80 | err := clustergo.Call(ctx, peer, "echo", arg, &resp) 81 | return &resp, err 82 | } 83 | 84 | func CallWithCallback(peer addr.LogicAddr, deadline time.Time, arg *Request, cb func(*Response, error)) func() bool { 85 | var resp Response 86 | var fn func(interface{}, error) 87 | if cb != nil { 88 | fn = func(ret interface{}, err error) { 89 | if ret != nil { 90 | cb(ret.(*Response), err) 91 | } else { 92 | cb(nil, err) 93 | } 94 | } 95 | } 96 | 97 | return clustergo.CallWithCallback(peer, deadline, "echo", arg, &resp, fn) 98 | } 99 | ``` 100 | 101 | ### 服务端 102 | 103 | 在服务端定义实现了EchoService的类型,通过Register注册服务: 104 | 105 | ```go 106 | 107 | package main 108 | 109 | import ( 110 | "context" 111 | 112 | "github.com/sniperHW/clustergo" 113 | "github.com/sniperHW/clustergo/addr" 114 | "github.com/sniperHW/clustergo/example/discovery" 115 | "github.com/sniperHW/clustergo/log/zap" 116 | "github.com/sniperHW/clustergo/pbrpc/service/echo" 117 | ) 118 | 119 | //实现echo.EchoService 120 | type echoService struct { 121 | } 122 | 123 | func (e *echoService) OnCall(ctx context.Context, replyer *echo.Replyer, request *echo.Request) { 124 | clustergo.Logger().Debug("echo:", request.Msg) 125 | replyer.Reply(&echo.Response{Msg: request.Msg}, nil) 126 | } 127 | 128 | func main() { 129 | l := zap.NewZapLogger("1.1.1.log", "./logfile", "debug", 1024*1024*100, 14, 28, true) 130 | clustergo.InitLogger(l.Sugar()) 131 | 132 | //注册服务 133 | echo.Register(&echoService{}) 134 | 135 | localaddr, _ := addr.MakeLogicAddr("1.1.1") 136 | clustergo.Start(discovery.NewClient("127.0.0.1:8110"), localaddr) 137 | 138 | clustergo.Wait() 139 | 140 | } 141 | 142 | ``` 143 | 144 | ### 客户端 145 | 146 | ```go 147 | 148 | package main 149 | 150 | import ( 151 | "context" 152 | 153 | "github.com/sniperHW/clustergo" 154 | "github.com/sniperHW/clustergo/addr" 155 | "github.com/sniperHW/clustergo/example/discovery" 156 | "github.com/sniperHW/clustergo/log/zap" 157 | "github.com/sniperHW/clustergo/pbrpc/service/echo" 158 | ) 159 | 160 | func main() { 161 | l := zap.NewZapLogger("1.2.1.log", "./logfile", "debug", 1024*1024*100, 14, 28, true) 162 | clustergo.InitLogger(l.Sugar()) 163 | localaddr, _ := addr.MakeLogicAddr("1.2.1") 164 | clustergo.Start(discovery.NewClient("127.0.0.1:8110"), localaddr) 165 | 166 | //假设echo服务全部由逻辑地址type=1的节点提供,这里任意获取一个type=1的节点 167 | echoAddr, _ := clustergo.GetAddrByType(1) 168 | 169 | //执行10次同步调用 170 | for i := 0; i < 10; i++ { 171 | resp, err := echo.Call(context.TODO(), echoAddr, &echo.Request{Msg: "hello"}) 172 | l.Sugar().Debug(resp, err) 173 | } 174 | clustergo.Stop() 175 | clustergo.Wait() 176 | } 177 | 178 | 179 | ``` 180 | 181 | ## 集群 182 | 183 | 逻辑地址最高字段位表示集群 184 | 185 | 集群内的节点可以建立通信连接直接通信(harbor不受这个限制),跨集群节点通信请看下一小节。 186 | 187 | 默认情况下,集群A的节点无法通过GetAddrByType获取到集群B中的节点地址(如果已经获得对端的逻辑地址,则不受此限制)。如果希望将节点暴露到集群外,可以将节点的Export字段设置为true。 188 | 189 | 例如对于一个分服,开房间的游戏。 190 | 191 | 每个服属于一个集群,房间服务器作为公共服务,属于另外的集群。此时需要将战斗节点的Export字段设置为true。游戏服中的节点便可以通过GetAddrByType获取到可用的战斗节点。 192 | 193 | 194 | 195 | ### 跨集群通信 196 | 197 | 如果通信目标与自己不在同一集群内,则无法与目标建立直接通信连接,为了跨集群通信,需要启动harbor节点,消息路由如下: 198 | 199 | 本机 -> 本集群内harbor节点 -> 目标所在集群harbor节点 -> 目标节点 200 | 201 | 多集群部署示意图: 202 | 203 | ![Alt text](cluster.png) 204 | 205 | 如上图所示,有A,B两个集群,集群各自的harbor节点分别连接本集群的center以及harbors center。 206 | 当节点B向节点A发消息时,发现A不在本集群内无法建立直接通信,因此将消息发往harbor B,harbor B接收到路由请求后,根据目标地址 207 | 2.1.1将请求转发给harbor A(harbor A和harbor B连接了同一个harbors center,因此他们之间可以建立直接通信连接)。harbor A 208 | 接收到之后发现目标A可以直达,于是将消息发送给A。 209 | 210 | 211 | ## Stream(在单个连接上建立多个流) 212 | 213 | clustergo支持在同一cluster内的节点之间建立stream。典型的使用方式由gateway接受客户端连接,并且为每一个连接建立一个到gameserver的stream。在gameserver看来,每个stream代表一个客户端连接。 214 | 215 | gameserver.go 216 | 217 | ```go 218 | 219 | package main 220 | 221 | import ( 222 | "github.com/sniperHW/clustergo" 223 | "github.com/sniperHW/clustergo/addr" 224 | "github.com/sniperHW/clustergo/example/discovery" 225 | "github.com/sniperHW/clustergo/logger/zap" 226 | "github.com/xtaci/smux" 227 | ) 228 | 229 | func main() { 230 | l := zap.NewZapLogger("1.1.1.log", "./logfile", "debug", 1024*1024*100, 14, 28, true) 231 | clustergo.InitLogger(l.Sugar()) 232 | localaddr, _ := addr.MakeLogicAddr("1.1.1") 233 | clustergo.Start(discovery.NewClient("127.0.0.1:8110"), localaddr) 234 | clustergo.OnNewStream(func(s *smux.Stream) { 235 | //处理stream 236 | go func() { 237 | buff := make([]byte, 64) 238 | for { 239 | n, err := s.Read(buff) 240 | if err != nil { 241 | break 242 | } 243 | n, err = s.Write(buff[:n]) 244 | if err != nil { 245 | break 246 | } 247 | } 248 | s.Close() 249 | }() 250 | }) 251 | clustergo.Wait() 252 | } 253 | 254 | 255 | ``` 256 | 257 | gateserver.go 258 | 259 | ```go 260 | package main 261 | 262 | import ( 263 | "io" 264 | "net" 265 | "sync" 266 | 267 | "github.com/sniperHW/netgo" 268 | "github.com/sniperHW/clustergo" 269 | "github.com/sniperHW/clustergo/addr" 270 | "github.com/sniperHW/clustergo/example/discovery" 271 | "github.com/sniperHW/clustergo/logger/zap" 272 | ) 273 | 274 | func main() { 275 | l := zap.NewZapLogger("1.2.1.log", "./logfile", "debug", 1024*1024*100, 14, 28, true) 276 | clustergo.InitLogger(l.Sugar()) 277 | localaddr, _ := addr.MakeLogicAddr("1.2.1") 278 | clustergo.Start(discovery.NewClient("127.0.0.1:8110"), localaddr) 279 | 280 | gameAddr, _ := clustergo.GetAddrByType(1) 281 | 282 | _, serve, _ := netgo.ListenTCP("tcp", "127.0.0.1:8113", func(conn *net.TCPConn) { 283 | go func() { 284 | //客户端连接到达,建立到1.1.1的stream 285 | cliStream, err := clustergo.OpenStream(gameAddr) 286 | if err != nil { 287 | conn.Close() 288 | return 289 | } 290 | 291 | defer func() { 292 | conn.Close() 293 | cliStream.Close() 294 | }() 295 | 296 | var wait sync.WaitGroup 297 | wait.Add(2) 298 | 299 | //将来自客户端的数据通过stream透传到1.1.1 300 | go func() { 301 | io.Copy(cliStream, conn) 302 | wait.Done() 303 | }() 304 | 305 | //将来自1.1.1的stream数据透传回客户端 306 | go func() { 307 | io.Copy(conn, cliStream) 308 | wait.Done() 309 | }() 310 | wait.Wait() 311 | }() 312 | }) 313 | go serve() 314 | 315 | clustergo.Wait() 316 | } 317 | ``` 318 | 319 | ## Membership 320 | 321 | clustergo是为游戏服务端设计的分布式框架,游戏服务通常由一个个服务节点组成,每个节点提供一组内聚的功能服务。 322 | 323 | Membership服务保存了所有成员的如下信息: 324 | 325 | ```go 326 | type Node struct { 327 | Addr addr.Addr 328 | Export bool //是否将节点暴露到cluster外部 329 | Available bool //是否可用, 330 | } 331 | ``` 332 | 333 | 节点启动时,首先连接Membership获取配置信息,之后通过配置的逻辑地址从配置中查询自身节点信息。如果找到则使用配置中的网络地址启动服务,否则启动失败。 334 | 335 | 336 | ### 节点发现 337 | 338 | 普通节点只会将配置中Cluster相同或Export为true的节点保存到本地。可以通过`GetAddrByType`来获取某个类型的可用节点(Available=true),如果有多个节点可用`GetAddrByType`将随机返回一个节点。 339 | 340 | 341 | 342 | 343 | -------------------------------------------------------------------------------- /membership/etcd/subscribe.go: -------------------------------------------------------------------------------- 1 | package etcd 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "log" 8 | "strings" 9 | "sync" 10 | "sync/atomic" 11 | "time" 12 | 13 | "github.com/sniperHW/clustergo" 14 | "github.com/sniperHW/clustergo/membership" 15 | clientv3 "go.etcd.io/etcd/client/v3" 16 | ) 17 | 18 | type Subscribe struct { 19 | members map[string]*membership.Node //配置中的节点 20 | alive map[string]struct{} //活动节点 21 | once sync.Once 22 | Cfg clientv3.Config 23 | PrefixConfig string 24 | PrefixAlive string 25 | Logger clustergo.Logger 26 | TTL time.Duration 27 | rversionConfig int64 28 | rversionAlive int64 29 | cb func(membership.MemberInfo) 30 | watchConfig clientv3.WatchChan 31 | watchAlive clientv3.WatchChan 32 | closeFunc context.CancelFunc 33 | closed atomic.Bool 34 | } 35 | 36 | func (ectd *Subscribe) fetchLogicAddr(str string) string { 37 | v := strings.Split(str, "/") 38 | if len(v) > 0 { 39 | return v[len(v)-1] 40 | } else { 41 | return "" 42 | } 43 | } 44 | 45 | func (etcd *Subscribe) fetchMembers(ctx context.Context, cli *clientv3.Client) error { 46 | etcd.members = map[string]*membership.Node{} 47 | resp, err := cli.Get(ctx, etcd.PrefixConfig, clientv3.WithPrefix()) 48 | if err != nil { 49 | return err 50 | } 51 | for _, v := range resp.Kvs { 52 | var n membership.Node 53 | if err := json.Unmarshal(v.Value, &n); err == nil && n.Addr.LogicAddr().String() == etcd.fetchLogicAddr(string(v.Key)) { 54 | nn := membership.Node{ 55 | Addr: n.Addr, 56 | Available: n.Available, 57 | Export: n.Export, 58 | } 59 | etcd.members[nn.Addr.LogicAddr().String()] = &nn 60 | } else if err != nil { 61 | etcd.errorf("json.Unmarshal error:%v key:%s value:%s", err, string(v.Key), string(v.Value)) 62 | } 63 | } 64 | 65 | etcd.rversionConfig = resp.Header.GetRevision() 66 | return nil 67 | } 68 | 69 | func (etcd *Subscribe) fetchAlive(ctx context.Context, cli *clientv3.Client) error { 70 | etcd.alive = map[string]struct{}{} 71 | resp, err := cli.Get(ctx, etcd.PrefixAlive, clientv3.WithPrefix()) 72 | if err != nil { 73 | return err 74 | } 75 | 76 | for _, v := range resp.Kvs { 77 | key := etcd.fetchLogicAddr(string(v.Key)) 78 | etcd.alive[key] = struct{}{} 79 | } 80 | 81 | etcd.rversionAlive = resp.Header.GetRevision() 82 | 83 | return nil 84 | } 85 | 86 | func (etcd *Subscribe) watch(ctx context.Context, cli *clientv3.Client) (err error) { 87 | 88 | /*if etcd.LogicAddr == "" { 89 | etcd.leaseCh = make(chan *clientv3.LeaseKeepAliveResponse) 90 | } else { 91 | if etcd.leaseCh == nil { 92 | etcd.leaseCh, err = cli.Lease.KeepAlive(ctx, etcd.leaseID) 93 | if err != nil { 94 | return err 95 | } 96 | } 97 | }*/ 98 | 99 | if etcd.watchConfig == nil { 100 | etcd.watchConfig = cli.Watch(ctx, etcd.PrefixConfig, clientv3.WithPrefix(), clientv3.WithRev(etcd.rversionConfig+1)) 101 | } 102 | 103 | if etcd.watchAlive == nil { 104 | etcd.watchAlive = cli.Watch(ctx, etcd.PrefixAlive, clientv3.WithPrefix(), clientv3.WithRev(etcd.rversionAlive+1)) 105 | } 106 | 107 | for { 108 | select { 109 | /*case _, ok := <-etcd.leaseCh: 110 | if !ok { 111 | if respLeaseGrant, err := cli.Lease.Grant(ctx, int64(etcd.TTL/time.Second)); err != nil { 112 | return err 113 | } else { 114 | etcd.leaseID = respLeaseGrant.ID 115 | _, err = cli.Put(ctx, fmt.Sprintf("%s%s", etcd.PrefixAlive, etcd.LogicAddr), fmt.Sprintf("%x", respLeaseGrant.ID), clientv3.WithLease(respLeaseGrant.ID)) 116 | if err != nil { 117 | return err 118 | } 119 | etcd.leaseCh, err = cli.Lease.KeepAlive(ctx, etcd.leaseID) 120 | if err != nil { 121 | return err 122 | } 123 | } 124 | }*/ 125 | case v := <-etcd.watchConfig: 126 | if v.Canceled { 127 | etcd.watchConfig = nil 128 | return v.Err() 129 | } 130 | etcd.rversionConfig = v.Header.GetRevision() 131 | for _, e := range v.Events { 132 | var nodeinfo membership.MemberInfo 133 | key := etcd.fetchLogicAddr(string(e.Kv.Key)) 134 | switch e.Type { 135 | case clientv3.EventTypePut: 136 | var n membership.Node 137 | if err := json.Unmarshal(e.Kv.Value, &n); err == nil && n.Addr.LogicAddr().String() == key { 138 | 139 | nn := membership.Node{ 140 | Addr: n.Addr, 141 | Export: n.Export, 142 | } 143 | 144 | if _, ok := etcd.alive[key]; ok && n.Available { 145 | nn.Available = true 146 | } 147 | 148 | if _, ok := etcd.members[key]; ok { 149 | nodeinfo.Update = append(nodeinfo.Update, nn) 150 | } else { 151 | nodeinfo.Add = append(nodeinfo.Add, nn) 152 | } 153 | 154 | etcd.members[key] = &nn 155 | 156 | etcd.cb(nodeinfo) 157 | 158 | } else if err != nil { 159 | etcd.errorf("json.Unmarshal error:%v key:%s value:%s", err, string(e.Kv.Key), string(e.Kv.Value)) 160 | } 161 | case clientv3.EventTypeDelete: 162 | if n, ok := etcd.members[key]; ok { 163 | delete(etcd.members, key) 164 | 165 | nn := membership.Node{ 166 | Addr: n.Addr, 167 | Export: n.Export, 168 | } 169 | 170 | if _, ok := etcd.alive[key]; ok && n.Available { 171 | nn.Available = true 172 | } 173 | 174 | nodeinfo.Remove = append(nodeinfo.Remove, nn) 175 | etcd.cb(nodeinfo) 176 | } 177 | } 178 | } 179 | case v := <-etcd.watchAlive: 180 | if v.Canceled { 181 | etcd.watchAlive = nil 182 | return v.Err() 183 | } 184 | etcd.rversionAlive = v.Header.GetRevision() 185 | for _, e := range v.Events { 186 | var nodeinfo membership.MemberInfo 187 | key := etcd.fetchLogicAddr(string(e.Kv.Key)) 188 | switch e.Type { 189 | case clientv3.EventTypePut: 190 | etcd.alive[key] = struct{}{} 191 | if n, ok := etcd.members[key]; ok && n.Available { 192 | nodeinfo.Update = append(nodeinfo.Update, *n) 193 | etcd.cb(nodeinfo) 194 | } 195 | case clientv3.EventTypeDelete: 196 | delete(etcd.alive, key) 197 | if n, ok := etcd.members[key]; ok { 198 | nn := *n 199 | nn.Available = false 200 | nodeinfo.Update = append(nodeinfo.Update, nn) 201 | etcd.cb(nodeinfo) 202 | } 203 | } 204 | } 205 | } 206 | } 207 | } 208 | 209 | func (etcd *Subscribe) subscribe(ctx context.Context, cli *clientv3.Client) { 210 | var err error 211 | if etcd.rversionConfig == 0 { 212 | ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*5) 213 | defer cancel() 214 | //获取配置 215 | if err = etcd.fetchMembers(ctxWithTimeout, cli); err != nil { 216 | etcd.errorf("fetchMembers() error:%v", err) 217 | return 218 | } 219 | 220 | //获取alive信息 221 | if err = etcd.fetchAlive(ctxWithTimeout, cli); err != nil { 222 | etcd.errorf("fetchAlive() error:%v", err) 223 | return 224 | } 225 | 226 | /*if etcd.LogicAddr != "" && etcd.leaseID == 0 { 227 | //创建lease 228 | if respLeaseGrant, err := cli.Lease.Grant(ctxWithTimeout, int64(etcd.TTL/time.Second)); err != nil { 229 | etcd.errorf("Lease.Grant() error:%v", err) 230 | return 231 | } else { 232 | etcd.leaseID = respLeaseGrant.ID 233 | _, err = cli.Put(ctxWithTimeout, fmt.Sprintf("%s%s", etcd.PrefixAlive, etcd.LogicAddr), fmt.Sprintf("%x", etcd.leaseID), clientv3.WithLease(etcd.leaseID)) 234 | if err != nil { 235 | etcd.errorf("alive put error:%v", err) 236 | return 237 | } 238 | } 239 | }*/ 240 | 241 | var nodeinfo membership.MemberInfo 242 | for k, v := range etcd.members { 243 | if _, ok := etcd.alive[k]; ok && v.Available { 244 | v.Available = true 245 | } else { 246 | v.Available = false 247 | } 248 | nodeinfo.Add = append(nodeinfo.Add, *v) 249 | } 250 | 251 | etcd.cb(nodeinfo) 252 | } 253 | 254 | if err = etcd.watch(ctx, cli); err != nil { 255 | etcd.errorf("watch err:%v", err) 256 | } else { 257 | log.Println("watch break with no error") 258 | } 259 | } 260 | 261 | func (etcd *Subscribe) Close() { 262 | if etcd.closed.CompareAndSwap(false, true) { 263 | if etcd.closeFunc != nil { 264 | etcd.closeFunc() 265 | } 266 | } 267 | } 268 | 269 | func (etcd *Subscribe) Subscribe(cb func(membership.MemberInfo)) error { 270 | 271 | once := false 272 | 273 | etcd.once.Do(func() { 274 | once = true 275 | }) 276 | 277 | if once { 278 | if etcd.closed.Load() { 279 | return errors.New("closed") 280 | } 281 | 282 | etcd.cb = cb 283 | if etcd.TTL == 0 { 284 | etcd.TTL = time.Second * 10 285 | } 286 | 287 | cli, err := clientv3.New(etcd.Cfg) 288 | if err != nil { 289 | etcd.errorf("clientv3.New() error:%v", err) 290 | return err 291 | } 292 | 293 | ctx, cancel := context.WithCancel(context.Background()) 294 | 295 | etcd.closeFunc = cancel 296 | 297 | done := ctx.Done() 298 | 299 | go func() { 300 | for { 301 | etcd.subscribe(ctx, cli) 302 | select { 303 | case <-done: 304 | cli.Close() 305 | return 306 | default: 307 | time.Sleep(time.Millisecond * 100) 308 | } 309 | } 310 | }() 311 | } 312 | 313 | return nil 314 | } 315 | 316 | func (etcd *Subscribe) errorf(format string, v ...any) { 317 | if etcd.Logger != nil { 318 | etcd.Logger.Errorf(format, v...) 319 | } else { 320 | log.Printf(format, v...) 321 | } 322 | } 323 | -------------------------------------------------------------------------------- /example/membership/membership.go: -------------------------------------------------------------------------------- 1 | package membership 2 | 3 | import ( 4 | "context" 5 | "encoding/binary" 6 | "encoding/json" 7 | "errors" 8 | "fmt" 9 | "net" 10 | "sort" 11 | "sync" 12 | "time" 13 | 14 | "log" 15 | 16 | "github.com/sniperHW/clustergo/addr" 17 | "github.com/sniperHW/clustergo/codec/buffer" 18 | "github.com/sniperHW/clustergo/membership" 19 | "github.com/sniperHW/netgo" 20 | ) 21 | 22 | type Node struct { 23 | LogicAddr string 24 | NetAddr string 25 | Export bool 26 | Available bool 27 | } 28 | 29 | const ( 30 | addNode = 1 31 | remNode = 2 32 | modNode = 3 33 | nodeInfo = 4 34 | subscribe = 5 35 | ) 36 | 37 | type AddNode struct { 38 | Node Node 39 | } 40 | 41 | type RemNode struct { 42 | LogicAddr string 43 | } 44 | 45 | type ModNode struct { 46 | Node Node 47 | } 48 | 49 | type NodeInfo struct { 50 | Nodes []Node 51 | } 52 | 53 | type Subscribe struct { 54 | } 55 | 56 | type codec struct { 57 | buff []byte 58 | w int 59 | r int 60 | reader buffer.BufferReader 61 | writer buffer.BufferWriter 62 | } 63 | 64 | func (cc *codec) Encode(buffs net.Buffers, o interface{}) (net.Buffers, int) { 65 | if buff, err := json.Marshal(o); err != nil { 66 | log.Println("json.Marshal error:", err) 67 | return buffs, 0 68 | } else { 69 | b := make([]byte, 0, 8) 70 | b = cc.writer.AppendUint32(b, uint32(len(buff)+4)) 71 | switch o.(type) { 72 | case *AddNode: 73 | b = cc.writer.AppendUint32(b, addNode) 74 | case *RemNode: 75 | b = cc.writer.AppendUint32(b, remNode) 76 | case *ModNode: 77 | b = cc.writer.AppendUint32(b, modNode) 78 | case *NodeInfo: 79 | b = cc.writer.AppendUint32(b, nodeInfo) 80 | case *Subscribe: 81 | b = cc.writer.AppendUint32(b, subscribe) 82 | default: 83 | log.Println("invaild packet") 84 | return buffs, 0 85 | } 86 | return append(buffs, b, buff), len(b) + len(buff) 87 | } 88 | } 89 | 90 | func (cc *codec) read(readable netgo.ReadAble, deadline time.Time) (int, error) { 91 | if err := readable.SetReadDeadline(deadline); err != nil { 92 | return 0, err 93 | } else { 94 | return readable.Read(cc.buff[cc.w:]) 95 | } 96 | } 97 | 98 | func (cc *codec) Recv(readable netgo.ReadAble, deadline time.Time) (pkt []byte, err error) { 99 | sizeLen := 4 100 | for { 101 | unpackSize := cc.w - cc.r 102 | if unpackSize >= sizeLen { 103 | cc.reader.Reset(cc.buff[cc.r:cc.w]) 104 | payload := int(cc.reader.GetUint32()) 105 | 106 | if payload == 0 { 107 | return nil, fmt.Errorf("zero payload") 108 | } 109 | 110 | totalSize := payload + sizeLen 111 | 112 | if totalSize <= unpackSize { 113 | cc.r += sizeLen 114 | pkt := cc.buff[cc.r : cc.r+payload] 115 | cc.r += payload 116 | if cc.r == cc.w { 117 | cc.r = 0 118 | cc.w = 0 119 | } 120 | return pkt, nil 121 | } else { 122 | if totalSize > cap(cc.buff) { 123 | buff := make([]byte, totalSize) 124 | copy(buff, cc.buff[cc.r:cc.w]) 125 | cc.buff = buff 126 | } else { 127 | //空间足够容纳下一个包, 128 | copy(cc.buff, cc.buff[cc.r:cc.w]) 129 | } 130 | cc.w = cc.w - cc.r 131 | cc.r = 0 132 | } 133 | } else if cc.r > 0 { 134 | copy(cc.buff, cc.buff[cc.r:cc.w]) 135 | cc.w = cc.w - cc.r 136 | cc.r = 0 137 | } 138 | 139 | var n int 140 | n, err = cc.read(readable, deadline) 141 | //log.Println(n, err) 142 | if n > 0 { 143 | cc.w += n 144 | } 145 | if nil != err { 146 | return 147 | } 148 | } 149 | } 150 | 151 | func (cc *codec) Decode(payload []byte) (interface{}, error) { 152 | cc.reader.Reset(payload) 153 | cmd := cc.reader.GetUint32() 154 | //log.Println("Decode", cmd, len(payload)) 155 | switch cmd { 156 | case addNode: 157 | o := &AddNode{} 158 | err := json.Unmarshal(payload[4:], o) 159 | return o, err 160 | case remNode: 161 | o := &RemNode{} 162 | err := json.Unmarshal(payload[4:], o) 163 | return o, err 164 | case modNode: 165 | o := &ModNode{} 166 | err := json.Unmarshal(payload[4:], o) 167 | return o, err 168 | case nodeInfo: 169 | o := &NodeInfo{} 170 | err := json.Unmarshal(payload[4:], o) 171 | return o, err 172 | case subscribe: 173 | o := &Subscribe{} 174 | err := json.Unmarshal(payload[4:], o) 175 | return o, err 176 | } 177 | return nil, errors.New("invaild object") 178 | } 179 | 180 | type memberShipSvr struct { 181 | sync.Mutex 182 | nodes map[string]*Node 183 | clients map[*netgo.AsynSocket]struct{} 184 | } 185 | 186 | func NewServer() *memberShipSvr { 187 | return &memberShipSvr{ 188 | nodes: map[string]*Node{}, 189 | clients: map[*netgo.AsynSocket]struct{}{}, 190 | } 191 | } 192 | 193 | func (svr *memberShipSvr) pub(socket *netgo.AsynSocket) { 194 | var nodeinfo NodeInfo 195 | for _, v := range svr.nodes { 196 | nodeinfo.Nodes = append(nodeinfo.Nodes, *v) 197 | } 198 | 199 | if socket != nil { 200 | for s, _ := range svr.clients { 201 | s.Send(&nodeinfo) 202 | } 203 | } else { 204 | for s, _ := range svr.clients { 205 | if s == socket { 206 | s.Send(&nodeinfo) 207 | } 208 | } 209 | } 210 | } 211 | 212 | func (svr *memberShipSvr) Start(service string, config []*Node) error { 213 | for _, v := range config { 214 | svr.nodes[v.LogicAddr] = v 215 | } 216 | 217 | _, serve, err := netgo.ListenTCP("tcp", service, func(conn *net.TCPConn) { 218 | log.Println("new client") 219 | cc := &codec{ 220 | buff: make([]byte, 65535), 221 | reader: buffer.NewReader(binary.BigEndian, nil), 222 | writer: buffer.NeWriter(binary.BigEndian), 223 | } 224 | netgo.NewAsynSocket(netgo.NewTcpSocket(conn, cc), 225 | netgo.AsynSocketOption{ 226 | Codec: cc, 227 | AutoRecv: true, 228 | }).SetCloseCallback(func(s *netgo.AsynSocket, _ error) { 229 | svr.Lock() 230 | delete(svr.clients, s) 231 | svr.Unlock() 232 | }).SetPacketHandler(func(_ context.Context, as *netgo.AsynSocket, packet interface{}) error { 233 | //log.Println("on packet", packet) 234 | switch packet := packet.(type) { 235 | case *AddNode: 236 | svr.Lock() 237 | defer svr.Unlock() 238 | if _, ok := svr.nodes[packet.Node.LogicAddr]; !ok { 239 | svr.nodes[packet.Node.LogicAddr] = &packet.Node 240 | svr.pub(nil) 241 | } 242 | case *RemNode: 243 | svr.Lock() 244 | defer svr.Unlock() 245 | if _, ok := svr.nodes[packet.LogicAddr]; ok { 246 | delete(svr.nodes, packet.LogicAddr) 247 | svr.pub(nil) 248 | } 249 | case *ModNode: 250 | svr.Lock() 251 | defer svr.Unlock() 252 | if _, ok := svr.nodes[packet.Node.LogicAddr]; ok { 253 | svr.nodes[packet.Node.LogicAddr] = &packet.Node 254 | svr.pub(nil) 255 | } 256 | case *Subscribe: 257 | //log.Println("on Subscribe") 258 | svr.Lock() 259 | defer svr.Unlock() 260 | svr.clients[as] = struct{}{} 261 | svr.pub(as) 262 | 263 | } 264 | return nil 265 | }).Recv() 266 | 267 | }) 268 | 269 | if err != nil { 270 | return err 271 | } else { 272 | serve() 273 | return nil 274 | } 275 | } 276 | 277 | type memberShipCli struct { 278 | svrService string 279 | nodes []membership.Node 280 | } 281 | 282 | func NewClient(svr string) *memberShipCli { 283 | return &memberShipCli{ 284 | svrService: svr, 285 | } 286 | } 287 | 288 | func (c *memberShipCli) Close() { 289 | 290 | } 291 | 292 | // 订阅变更 293 | func (c *memberShipCli) Subscribe(updateCB func(membership.MemberInfo)) error { 294 | dialer := &net.Dialer{} 295 | for { 296 | if conn, err := dialer.Dial("tcp", c.svrService); err == nil { 297 | cc := &codec{ 298 | buff: make([]byte, 65535), 299 | reader: buffer.NewReader(binary.BigEndian, nil), 300 | writer: buffer.NeWriter(binary.BigEndian), 301 | } 302 | as := netgo.NewAsynSocket(netgo.NewTcpSocket(conn.(*net.TCPConn), cc), 303 | netgo.AsynSocketOption{ 304 | Codec: cc, 305 | AutoRecv: true, 306 | }).SetCloseCallback(func(_ *netgo.AsynSocket, _ error) { 307 | go c.Subscribe(updateCB) 308 | }).SetPacketHandler(func(_ context.Context, as *netgo.AsynSocket, packet interface{}) error { 309 | locals := c.nodes 310 | updates := []membership.Node{} 311 | for _, v := range packet.(*NodeInfo).Nodes { 312 | if address, err := addr.MakeAddr(v.LogicAddr, v.NetAddr); err == nil { 313 | updates = append(updates, membership.Node{ 314 | Export: v.Export, 315 | Available: v.Available, 316 | Addr: address, 317 | }) 318 | } 319 | } 320 | 321 | sort.Slice(updates, func(l int, r int) bool { 322 | return updates[l].Addr.LogicAddr() < updates[r].Addr.LogicAddr() 323 | }) 324 | 325 | var nodeInfo membership.MemberInfo 326 | 327 | i := 0 328 | j := 0 329 | 330 | for i < len(updates) && j < len(locals) { 331 | nodej := locals[j] 332 | nodei := updates[i] 333 | 334 | if nodei.Addr.LogicAddr() == nodej.Addr.LogicAddr() { 335 | if nodei.Addr.NetAddr().String() != nodej.Addr.NetAddr().String() || 336 | nodei.Available != nodej.Available { 337 | nodeInfo.Update = append(nodeInfo.Update, nodei) 338 | } 339 | i++ 340 | j++ 341 | } else if nodei.Addr.LogicAddr() > nodej.Addr.LogicAddr() { 342 | //local 1 2 3 4 5 6 343 | //update 1 2 4 5 6 344 | //移除节点 345 | nodeInfo.Remove = append(nodeInfo.Remove, nodej) 346 | j++ 347 | } else { 348 | //local 1 2 4 5 6 349 | //update 1 2 3 4 5 6 350 | //添加节点 351 | nodeInfo.Add = append(nodeInfo.Add, nodei) 352 | i++ 353 | } 354 | } 355 | 356 | nodeInfo.Add = append(nodeInfo.Add, updates[i:]...) 357 | 358 | nodeInfo.Remove = append(nodeInfo.Remove, locals[j:]...) 359 | 360 | c.nodes = updates 361 | updateCB(nodeInfo) 362 | return nil 363 | }).Recv() 364 | as.Send(&Subscribe{}) 365 | return nil 366 | } else { 367 | time.Sleep(time.Second) 368 | } 369 | } 370 | } 371 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= 2 | github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= 3 | github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= 4 | github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= 5 | github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= 6 | github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 7 | github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= 8 | github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= 9 | github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= 10 | github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= 11 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 12 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 13 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 14 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= 15 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= 16 | github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= 17 | github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= 18 | github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= 19 | github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= 20 | github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= 21 | github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= 22 | github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 23 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 24 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 25 | github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= 26 | github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= 27 | github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= 28 | github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= 29 | github.com/klauspost/cpuid v1.2.4/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= 30 | github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s= 31 | github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= 32 | github.com/klauspost/reedsolomon v1.9.9 h1:qCL7LZlv17xMixl55nq2/Oa1Y86nfO8EqDfv2GHND54= 33 | github.com/klauspost/reedsolomon v1.9.9/go.mod h1:O7yFFHiQwDR6b2t63KPUpccPtNdp5ADgh1gg4fd12wo= 34 | github.com/mmcloughlin/avo v0.0.0-20200803215136-443f81d77104 h1:ULR/QWMgcgRiZLUjSSJMU+fW+RDMstRdmnDWj9Q+AsA= 35 | github.com/mmcloughlin/avo v0.0.0-20200803215136-443f81d77104/go.mod h1:wqKykBG2QzQDJEzvRkcS8x6MiSJkF52hXZsXcjaB3ls= 36 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 37 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 38 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 39 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 40 | github.com/redis/go-redis/v9 v9.5.2 h1:L0L3fcSNReTRGyZ6AqAEN0K56wYeYAwapBIhkvh0f3E= 41 | github.com/redis/go-redis/v9 v9.5.2/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M= 42 | github.com/sniperHW/netgo v0.0.0-20231214060736-2b26e624b5f6 h1:dcUm3Yzg3Y8KVerhyh2VtYk6DfaVHnGnMr/tA5xal68= 43 | github.com/sniperHW/netgo v0.0.0-20231214060736-2b26e624b5f6/go.mod h1:5XP+qSGWoPip7z6RNZQxr3oDJqclD/4zx57dxaSa5Zc= 44 | github.com/sniperHW/rpcgo v0.0.0-20250501095528-a98841003660 h1:eZSObHC+UEsmFfG27T4NXGcD2un3ryt7TWrngvveVUc= 45 | github.com/sniperHW/rpcgo v0.0.0-20250501095528-a98841003660/go.mod h1:fCF3WWEs8aTTNZd4vd/9KaWTpwqFO9W4xbKTo7DqUvU= 46 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 47 | github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 48 | github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= 49 | github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= 50 | github.com/templexxx/cpu v0.0.1/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk= 51 | github.com/templexxx/cpu v0.0.7 h1:pUEZn8JBy/w5yzdYWgx+0m0xL9uk6j4K91C5kOViAzo= 52 | github.com/templexxx/cpu v0.0.7/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk= 53 | github.com/templexxx/xorsimd v0.4.1 h1:iUZcywbOYDRAZUasAs2eSCUW8eobuZDy0I9FJiORkVg= 54 | github.com/templexxx/xorsimd v0.4.1/go.mod h1:W+ffZz8jJMH2SXwuKu9WhygqBMbFnp14G2fqEr8qaNo= 55 | github.com/tjfoc/gmsm v1.3.2 h1:7JVkAn5bvUJ7HtU08iW6UiD+UTmJTIToHCfeFzkcCxM= 56 | github.com/tjfoc/gmsm v1.3.2/go.mod h1:HaUcFuY0auTiaHB9MHFGCPx5IaLhTUd2atbCFBQXn9w= 57 | github.com/xtaci/kcp-go/v5 v5.6.1 h1:Pwn0aoeNSPF9dTS7IgiPXn0HEtaIlVb6y5UKWPsx8bI= 58 | github.com/xtaci/kcp-go/v5 v5.6.1/go.mod h1:W3kVPyNYwZ06p79dNwFWQOVFrdcBpDBsdyvK8moQrYo= 59 | github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae h1:J0GxkO96kL4WF+AIT3M4mfUVinOCPgf2uUWYFUzN0sM= 60 | github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae/go.mod h1:gXtu8J62kEgmN++bm9BVICuT/e8yiLI2KFobd/TRFsE= 61 | github.com/xtaci/smux v1.5.24 h1:77emW9dtnOxxOQ5ltR+8BbsX1kzcOxQ5gB+aaV9hXOY= 62 | github.com/xtaci/smux v1.5.24/go.mod h1:OMlQbT5vcgl2gb49mFkYo6SMf+zP3rcjcwQz7ZU7IGY= 63 | github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 64 | github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 65 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 66 | go.etcd.io/etcd/api/v3 v3.5.11 h1:B54KwXbWDHyD3XYAwprxNzTe7vlhR69LuBgZnMVvS7E= 67 | go.etcd.io/etcd/api/v3 v3.5.11/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4= 68 | go.etcd.io/etcd/client/pkg/v3 v3.5.11 h1:bT2xVspdiCj2910T0V+/KHcVKjkUrCZVtk8J2JF2z1A= 69 | go.etcd.io/etcd/client/pkg/v3 v3.5.11/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4= 70 | go.etcd.io/etcd/client/v3 v3.5.11 h1:ajWtgoNSZJ1gmS8k+icvPtqsqEav+iUorF7b0qozgUU= 71 | go.etcd.io/etcd/client/v3 v3.5.11/go.mod h1:a6xQUEqFJ8vztO1agJh/KQKOMfFI8og52ZconzcDJwE= 72 | go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= 73 | go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= 74 | go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= 75 | go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= 76 | go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= 77 | go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= 78 | golang.org/x/arch v0.0.0-20190909030613-46d78d1859ac/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4= 79 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 80 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 81 | golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 82 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 83 | golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 84 | golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= 85 | golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= 86 | golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 87 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 88 | golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= 89 | golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= 90 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 91 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 92 | golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 93 | golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= 94 | golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= 95 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 96 | golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= 97 | golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= 98 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 99 | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 100 | golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 101 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 102 | golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= 103 | golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= 104 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 105 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 106 | golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 107 | golang.org/x/sys v0.0.0-20200808120158-1030fc2bf1d9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 108 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 109 | golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= 110 | golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 111 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 112 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 113 | golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= 114 | golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= 115 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 116 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 117 | golang.org/x/tools v0.0.0-20200425043458-8463f397d07c/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= 118 | golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= 119 | golang.org/x/tools v0.0.0-20200808161706-5bf02b21f123/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= 120 | golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= 121 | golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= 122 | golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= 123 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 124 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 125 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 126 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 127 | google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= 128 | google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= 129 | google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= 130 | google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= 131 | google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= 132 | google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= 133 | google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= 134 | google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= 135 | google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= 136 | google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= 137 | google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= 138 | google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= 139 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 140 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 141 | gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= 142 | gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= 143 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 144 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 145 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 146 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 147 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 148 | rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= 149 | -------------------------------------------------------------------------------- /node.go: -------------------------------------------------------------------------------- 1 | package clustergo 2 | 3 | import ( 4 | "container/list" 5 | "context" 6 | "encoding/binary" 7 | "encoding/json" 8 | "errors" 9 | "fmt" 10 | "io" 11 | "math/rand" 12 | "net" 13 | "sync" 14 | "time" 15 | 16 | "github.com/sniperHW/clustergo/addr" 17 | "github.com/sniperHW/clustergo/codec/ss" 18 | "github.com/sniperHW/clustergo/membership" 19 | "github.com/sniperHW/clustergo/pkg/crypto" 20 | "github.com/sniperHW/netgo" 21 | "github.com/sniperHW/rpcgo" 22 | "github.com/xtaci/smux" 23 | "google.golang.org/protobuf/proto" 24 | ) 25 | 26 | // 从配置系统获得的node视图缓存 27 | type nodeCache struct { 28 | sync.RWMutex 29 | localAddr addr.LogicAddr 30 | allnodes map[addr.LogicAddr]*node //所有的节点 31 | nodes map[uint32][]*node //普通avalabale节点,按type分组 32 | harbors map[uint32][]*node //harbor avalabale节点,按cluster分组 33 | initOnce sync.Once 34 | initC chan struct{} 35 | } 36 | 37 | func (cache *nodeCache) close() { 38 | cache.RLock() 39 | for _, v := range cache.allnodes { 40 | v.closeSocket() 41 | } 42 | cache.RUnlock() 43 | } 44 | 45 | func (cache *nodeCache) waitInit() { 46 | <-cache.initC 47 | } 48 | 49 | func (cache *nodeCache) addNode(n *node) { 50 | if nodeArray, ok := cache.nodes[n.addr.LogicAddr().Type()]; !ok { 51 | cache.nodes[n.addr.LogicAddr().Type()] = []*node{n} 52 | } else { 53 | cache.nodes[n.addr.LogicAddr().Type()] = append(nodeArray, n) 54 | } 55 | } 56 | 57 | func (cache *nodeCache) removeNode(n *node) { 58 | if nodeArray, ok := cache.nodes[n.addr.LogicAddr().Type()]; ok { 59 | for k, v := range nodeArray { 60 | if v == n { 61 | nodeArray[k] = nodeArray[len(nodeArray)-1] 62 | cache.nodes[n.addr.LogicAddr().Type()] = nodeArray[:len(nodeArray)-1] 63 | break 64 | } 65 | } 66 | } 67 | } 68 | 69 | func (cache *nodeCache) addHarbor(harbor *node) { 70 | if harborArray, ok := cache.harbors[harbor.addr.LogicAddr().Cluster()]; !ok { 71 | cache.harbors[harbor.addr.LogicAddr().Cluster()] = []*node{harbor} 72 | } else { 73 | cache.harbors[harbor.addr.LogicAddr().Cluster()] = append(harborArray, harbor) 74 | } 75 | } 76 | 77 | func (cache *nodeCache) removeHarbor(harbor *node) { 78 | if harborArray, ok := cache.harbors[harbor.addr.LogicAddr().Cluster()]; ok { 79 | for k, v := range harborArray { 80 | if v == harbor { 81 | harborArray[k] = harborArray[len(harborArray)-1] 82 | cache.harbors[harbor.addr.LogicAddr().Cluster()] = harborArray[:len(harborArray)-1] 83 | break 84 | } 85 | } 86 | } 87 | } 88 | 89 | func (cache *nodeCache) onNodeInfoUpdate(self *Node, nodeinfo membership.MemberInfo) { 90 | 91 | defer cache.initOnce.Do(func() { 92 | logger.Debug("init ok") 93 | close(cache.initC) 94 | }) 95 | 96 | cache.Lock() 97 | defer cache.Unlock() 98 | 99 | for _, v := range nodeinfo.Add { 100 | if v.Export || v.Addr.LogicAddr().Cluster() == cache.localAddr.Cluster() || 101 | (cache.localAddr.Type() == addr.HarbarType && v.Addr.LogicAddr().Type() == addr.HarbarType) { 102 | n := &node{ 103 | addr: v.Addr, 104 | pendingMsg: list.New(), 105 | available: v.Available, 106 | } 107 | cache.allnodes[n.addr.LogicAddr()] = n 108 | if n.available { 109 | if n.addr.LogicAddr().Type() != addr.HarbarType { 110 | cache.addNode(n) 111 | } else { 112 | cache.addHarbor(n) 113 | } 114 | } 115 | } 116 | } 117 | 118 | for _, v := range nodeinfo.Remove { 119 | if v.Export || v.Addr.LogicAddr().Cluster() == cache.localAddr.Cluster() || 120 | (cache.localAddr.Type() == addr.HarbarType && v.Addr.LogicAddr().Type() == addr.HarbarType) { 121 | if v.Addr.LogicAddr() == cache.localAddr { 122 | go self.Stop() 123 | return 124 | } else { 125 | n := cache.allnodes[v.Addr.LogicAddr()] 126 | delete(cache.allnodes, n.addr.LogicAddr()) 127 | if n.available { 128 | if n.addr.LogicAddr().Type() != addr.HarbarType { 129 | cache.removeNode(n) 130 | } else { 131 | cache.removeHarbor(n) 132 | } 133 | } 134 | n.closeSocket() 135 | } 136 | } 137 | } 138 | 139 | for _, v := range nodeinfo.Update { 140 | if v.Export || v.Addr.LogicAddr().Cluster() == cache.localAddr.Cluster() || 141 | (cache.localAddr.Type() == addr.HarbarType && v.Addr.LogicAddr().Type() == addr.HarbarType) { 142 | n := cache.allnodes[v.Addr.LogicAddr()] 143 | 144 | if v.Addr.NetAddr().String() != n.addr.NetAddr().String() { 145 | //网络地址发生变更 146 | if n.addr.LogicAddr() == cache.localAddr { 147 | go self.Stop() 148 | return 149 | } else { 150 | n.closeSocket() 151 | n.addr.UpdateNetAddr(v.Addr.NetAddr()) 152 | } 153 | } 154 | 155 | if v.Available { 156 | if !n.available { 157 | n.available = true 158 | if n.addr.LogicAddr().Type() != addr.HarbarType { 159 | cache.addNode(n) 160 | } else { 161 | cache.addHarbor(n) 162 | } 163 | } 164 | } else { 165 | if n.available { 166 | n.available = false 167 | if n.addr.LogicAddr().Type() != addr.HarbarType { 168 | cache.removeNode(n) 169 | } else { 170 | cache.removeHarbor(n) 171 | } 172 | } 173 | } 174 | } 175 | } 176 | } 177 | 178 | func (cache *nodeCache) getHarbor(cluster uint32, m addr.LogicAddr) *node { 179 | cache.RLock() 180 | defer cache.RUnlock() 181 | if harbors, ok := cache.harbors[cluster]; !ok || len(harbors) == 0 { 182 | return nil 183 | } else { 184 | return harbors[int(m)%len(harbors)] 185 | } 186 | } 187 | 188 | func (cache *nodeCache) getNormalNode(tt uint32, n int) *node { 189 | cache.RLock() 190 | defer cache.RUnlock() 191 | if nodeArray, ok := cache.nodes[tt]; !ok || len(nodeArray) == 0 { 192 | return nil 193 | } else { 194 | if n == 0 { 195 | return nodeArray[int(rand.Int31())%len(nodeArray)] 196 | } else { 197 | return nodeArray[n%len(nodeArray)] 198 | } 199 | } 200 | } 201 | 202 | func (cache *nodeCache) getNodeByLogicAddr(logicAddr addr.LogicAddr) *node { 203 | cache.RLock() 204 | defer cache.RUnlock() 205 | return cache.allnodes[logicAddr] 206 | } 207 | 208 | type pendingMessage struct { 209 | message interface{} 210 | deadline time.Time 211 | ctx context.Context 212 | } 213 | 214 | type streamClient struct { 215 | sync.Mutex 216 | session *smux.Session 217 | } 218 | 219 | type node struct { 220 | sync.Mutex 221 | addr addr.Addr 222 | socket *netgo.AsynSocket 223 | pendingMsg *list.List 224 | available bool 225 | streamCli streamClient 226 | } 227 | 228 | func (n *node) handshake(self *Node, conn *net.TCPConn, isStream bool) error { 229 | 230 | j, err := json.Marshal(&Handshake{ 231 | LogicAddr: uint32(self.localAddr.LogicAddr()), 232 | NetAddr: self.localAddr.NetAddr().String(), 233 | IsStream: isStream, 234 | }) 235 | 236 | if nil != err { 237 | return err 238 | } 239 | 240 | if j, err = crypto.AESCBCEncrypt(cecret_key, j); nil != err { 241 | return err 242 | } 243 | 244 | b := make([]byte, 4+len(j)) 245 | binary.BigEndian.PutUint32(b, uint32(len(j))) 246 | copy(b[4:], j) 247 | 248 | conn.SetWriteDeadline(time.Now().Add(time.Second)) 249 | _, err = conn.Write(b) 250 | conn.SetWriteDeadline(time.Time{}) 251 | 252 | if nil != err { 253 | return err 254 | } else { 255 | buffer := make([]byte, 4) 256 | conn.SetReadDeadline(time.Now().Add(time.Second)) 257 | _, err = io.ReadFull(conn, buffer) 258 | conn.SetReadDeadline(time.Time{}) 259 | if nil != err { 260 | return err 261 | } 262 | } 263 | return nil 264 | } 265 | 266 | func (n *node) onRelayMessage(self *Node, message *ss.RelayMessage) { 267 | logger.Debugf("onRelayMessage self:%s %s->%s", self.localAddr.LogicAddr().String(), message.From().String(), message.To().String()) 268 | if nextNode := self.getNodeByLogicAddr(message.To()); nextNode != nil { 269 | logger.Debugf("nextNode %s", nextNode.addr.LogicAddr()) 270 | nextNode.sendMessage(self, message, time.Now().Add(time.Second)) 271 | } else if rpcReq := message.GetRpcRequest(); rpcReq != nil && !rpcReq.Oneway { 272 | //对于无法路由的rpc请求,返回错误响应 273 | if nextNode = self.getNodeByLogicAddr(message.From()); nextNode != nil { 274 | logger.Debugf(fmt.Sprintf("route message to target:%s failed", message.To().String())) 275 | nextNode.sendMessage(self, ss.NewMessage(message.From(), self.localAddr.LogicAddr(), &rpcgo.ResponseMsg{ 276 | Seq: rpcReq.Seq, 277 | Err: rpcgo.NewError(rpcgo.ErrOther, fmt.Sprintf("route message to target:%s failed", message.To().String())), 278 | }), time.Now().Add(time.Second)) 279 | } 280 | } 281 | } 282 | 283 | func (n *node) onMessage(ctx context.Context, self *Node, msg interface{}) { 284 | switch msg := msg.(type) { 285 | case *ss.Message: 286 | switch m := msg.Payload().(type) { 287 | case proto.Message: 288 | self.msgManager.dispatchProto(ctx, msg.From(), msg.Cmd(), m) 289 | case []byte: 290 | self.msgManager.dispatchBinary(ctx, msg.From(), msg.Cmd(), m) 291 | case *rpcgo.RequestMsg: 292 | self.rpcSvr.svr.OnMessage(ctx, &rpcChannel{peer: msg.From(), node: n, self: self}, m) 293 | case *rpcgo.ResponseMsg: 294 | self.rpcCli.cli.OnMessage(nil, m) 295 | } 296 | case *ss.RelayMessage: 297 | n.onRelayMessage(self, msg) 298 | } 299 | } 300 | 301 | func (n *node) onEstablish(self *Node, conn *net.TCPConn) { 302 | codec := ss.NewCodec(self.localAddr.LogicAddr()) 303 | n.socket = netgo.NewAsynSocket(netgo.NewTcpSocket(conn, codec), netgo.AsynSocketOption{ 304 | SendChanSize: SendChanSize, 305 | Codec: codec, 306 | AutoRecv: true, 307 | Context: context.TODO(), 308 | }) 309 | 310 | n.socket.SetPacketHandler(func(ctx context.Context, as *netgo.AsynSocket, packet interface{}) error { 311 | if self.getNodeByLogicAddr(n.addr.LogicAddr()) != n { 312 | return ErrInvaildNode 313 | } else { 314 | n.onMessage(ctx, self, packet) 315 | return nil 316 | } 317 | }) 318 | 319 | n.socket.SetCloseCallback(func(as *netgo.AsynSocket, err error) { 320 | n.Lock() 321 | n.socket = nil 322 | n.Unlock() 323 | }).Recv() 324 | 325 | now := time.Now() 326 | 327 | for e := n.pendingMsg.Front(); e != nil; e = n.pendingMsg.Front() { 328 | msg := n.pendingMsg.Remove(e).(*pendingMessage) 329 | if msg.ctx != nil { 330 | select { 331 | case <-msg.ctx.Done(): 332 | default: 333 | n.socket.SendWithContext(msg.ctx, msg.message) 334 | } 335 | } else if msg.deadline.IsZero() || now.Before(msg.deadline) { 336 | n.socket.Send(msg.message, msg.deadline) 337 | } 338 | } 339 | } 340 | 341 | // 移除ctx.Done或到达deadline的消息 342 | func (n *node) removeFailedMsg(removeZero bool) { 343 | now := time.Now() 344 | e := n.pendingMsg.Front() 345 | for e != nil { 346 | m := e.Value.(*pendingMessage) 347 | remove := false 348 | 349 | if m.ctx != nil { 350 | select { 351 | case <-m.ctx.Done(): 352 | remove = true 353 | default: 354 | } 355 | } else if (m.deadline.IsZero() && removeZero) || now.After(m.deadline) { 356 | remove = true 357 | } 358 | 359 | if remove { 360 | next := e.Next() 361 | n.pendingMsg.Remove(e) 362 | e = next 363 | } else { 364 | e = e.Next() 365 | } 366 | } 367 | } 368 | 369 | func (n *node) dialError(self *Node) { 370 | n.Lock() 371 | defer n.Unlock() 372 | if nil == n.socket { 373 | n.removeFailedMsg(false) 374 | if n.pendingMsg.Len() > 0 { 375 | time.AfterFunc(time.Second, func() { 376 | n.dial(self) 377 | }) 378 | } 379 | } 380 | } 381 | 382 | func (n *node) dialOK(self *Node, conn *net.TCPConn) { 383 | n.Lock() 384 | defer n.Unlock() 385 | if n.socket != nil { 386 | //两段同时建立连接 387 | conn.Close() 388 | } else { 389 | n.onEstablish(self, conn) 390 | } 391 | } 392 | 393 | func (n *node) dial(self *Node) { 394 | dialer := &net.Dialer{} 395 | logger.Debugf("%s dial %s", self.localAddr.LogicAddr().String(), n.addr.LogicAddr().String()) 396 | ok := false 397 | conn, err := dialer.Dial("tcp", n.addr.NetAddr().String()) 398 | if err == nil { 399 | if err = n.handshake(self, conn.(*net.TCPConn), false); err != nil { 400 | conn.Close() 401 | conn = nil 402 | } else { 403 | ok = true 404 | } 405 | } 406 | 407 | select { 408 | case <-self.die: 409 | if conn != nil { 410 | conn.Close() 411 | } 412 | default: 413 | if ok { 414 | n.dialOK(self, conn.(*net.TCPConn)) 415 | } else { 416 | n.dialError(self) 417 | } 418 | } 419 | } 420 | 421 | func (n *node) openStream(self *Node) (*smux.Stream, error) { 422 | n.streamCli.Lock() 423 | defer n.streamCli.Unlock() 424 | for { 425 | if n.streamCli.session != nil { 426 | if stream, err := n.streamCli.session.OpenStream(); err == nil { 427 | return stream, nil 428 | } else if err == smux.ErrGoAway { 429 | //stream id overflows, should start a new connection 430 | return nil, err 431 | } else { 432 | n.streamCli.session.Close() 433 | n.streamCli.session = nil 434 | } 435 | } else { 436 | dialer := &net.Dialer{} 437 | if conn, err := dialer.Dial("tcp", n.addr.NetAddr().String()); err != nil { 438 | return nil, err 439 | } else if err = n.handshake(self, conn.(*net.TCPConn), true); err != nil { 440 | conn.Close() 441 | return nil, err 442 | } else if session, err := smux.Client(conn, nil); err != nil { 443 | conn.Close() 444 | return nil, err 445 | } else { 446 | select { 447 | case <-self.die: 448 | session.Close() 449 | conn.Close() 450 | return nil, errors.New("server die") 451 | default: 452 | } 453 | n.streamCli.session = session 454 | } 455 | } 456 | } 457 | } 458 | 459 | func (n *node) sendMessage(self *Node, msg interface{}, deadline time.Time) (err error) { 460 | n.Lock() 461 | socket := n.socket 462 | if socket != nil { 463 | n.Unlock() 464 | err = socket.Send(msg, deadline) 465 | } else { 466 | if n.pendingMsg.Len() >= MaxPendingMsgSize { 467 | if deadline.IsZero() { 468 | //deadline.IsZero的包优先级最低,直接丢弃 469 | return ErrPendingQueueFull 470 | } else { 471 | //尝试移除已经失效或deadline.IsZero的msg 472 | n.removeFailedMsg(true) 473 | if n.pendingMsg.Len() >= MaxPendingMsgSize { 474 | return ErrPendingQueueFull 475 | } 476 | } 477 | } 478 | 479 | n.pendingMsg.PushBack(&pendingMessage{ 480 | message: msg, 481 | deadline: deadline, 482 | }) 483 | //尝试与对端建立连接 484 | if n.pendingMsg.Len() == 1 { 485 | self.Go(func() { 486 | n.dial(self) 487 | }) 488 | } 489 | n.Unlock() 490 | } 491 | return err 492 | } 493 | 494 | func (n *node) sendMessageWithContext(ctx context.Context, self *Node, msg interface{}) (err error) { 495 | n.Lock() 496 | socket := n.socket 497 | if socket != nil { 498 | n.Unlock() 499 | err = socket.SendWithContext(ctx, msg) 500 | } else { 501 | if n.pendingMsg.Len() >= MaxPendingMsgSize { 502 | //尝试移除已经失效或deadline.IsZero的msg 503 | n.removeFailedMsg(true) 504 | if n.pendingMsg.Len() >= MaxPendingMsgSize { 505 | return ErrPendingQueueFull 506 | } 507 | } 508 | n.pendingMsg.PushBack(&pendingMessage{ 509 | message: msg, 510 | ctx: ctx, 511 | }) 512 | //尝试与对端建立连接 513 | if n.pendingMsg.Len() == 1 { 514 | self.Go(func() { 515 | n.dial(self) 516 | }) 517 | } 518 | n.Unlock() 519 | } 520 | return err 521 | } 522 | 523 | func (n *node) closeSocket() { 524 | n.Lock() 525 | if n.socket != nil { 526 | n.socket.Close(nil) 527 | n.socket = nil 528 | } 529 | n.Unlock() 530 | 531 | n.streamCli.Lock() 532 | if n.streamCli.session != nil { 533 | n.streamCli.session.Close() 534 | n.streamCli.session = nil 535 | } 536 | n.streamCli.Unlock() 537 | } 538 | -------------------------------------------------------------------------------- /cluster.go: -------------------------------------------------------------------------------- 1 | package clustergo 2 | 3 | import ( 4 | "context" 5 | "encoding/binary" 6 | "encoding/json" 7 | "errors" 8 | "fmt" 9 | "io" 10 | "net" 11 | "reflect" 12 | "runtime/debug" 13 | "sync" 14 | "sync/atomic" 15 | "time" 16 | 17 | "github.com/sniperHW/clustergo/addr" 18 | "github.com/sniperHW/clustergo/codec/pb" 19 | "github.com/sniperHW/clustergo/codec/ss" 20 | "github.com/sniperHW/clustergo/membership" 21 | "github.com/sniperHW/clustergo/pkg/crypto" 22 | "github.com/sniperHW/netgo" 23 | "github.com/sniperHW/rpcgo" 24 | "github.com/xtaci/smux" 25 | "google.golang.org/protobuf/proto" 26 | ) 27 | 28 | var ( 29 | ErrInvaildNode = errors.New("invaild node") 30 | ErrDuplicateConn = errors.New("duplicate node connection") 31 | ErrNetAddrMismatch = errors.New("net addr mismatch") 32 | ErrPendingQueueFull = errors.New("pending queue full") 33 | ErrBusy = errors.New("busy") 34 | ) 35 | 36 | var ( 37 | SendChanSize int = 256 //socket异步发送chan的大小 38 | DefaultSendTimeout time.Duration = time.Millisecond * 200 // 39 | MaxPendingMsgSize int = 1024 //连接建立前待发送消息缓冲区的大小,一旦缓冲区满,发送将返回ErrPendingQueueFull 40 | ) 41 | 42 | var RPCCodec rpcgo.Codec = PbCodec{} 43 | 44 | var cecret_key []byte = []byte("sanguo_2022") 45 | 46 | type Handshake struct { 47 | LogicAddr uint32 `json:"LogicAddr,omitempty"` 48 | NetAddr string `json:"NetAddr,omitempty"` 49 | IsStream bool `json:"IsStream,omitempty"` 50 | } 51 | 52 | type ProtoMsgHandler func(context.Context, addr.LogicAddr, proto.Message) 53 | type BinaryMsgHandler func(context.Context, addr.LogicAddr, uint16, []byte) 54 | 55 | type msgManager struct { 56 | sync.RWMutex 57 | protoMsgHandlers map[uint16]ProtoMsgHandler 58 | binaryMsgHandlers map[uint16]BinaryMsgHandler 59 | } 60 | 61 | func (m ProtoMsgHandler) call(ctx context.Context, from addr.LogicAddr, cmd uint16, msg proto.Message) { 62 | defer func() { 63 | if r := recover(); r != nil { 64 | logger.Errorf("error on Dispatch:%d\nstack:%v,%s\n", cmd, r, debug.Stack()) 65 | } 66 | }() 67 | m(ctx, from, msg) 68 | } 69 | 70 | func (m BinaryMsgHandler) call(ctx context.Context, from addr.LogicAddr, cmd uint16, msg []byte) { 71 | defer func() { 72 | if r := recover(); r != nil { 73 | logger.Errorf("error on Dispatch:%d\nstack:%v,%s\n", cmd, r, debug.Stack()) 74 | } 75 | }() 76 | m(ctx, from, cmd, msg) 77 | } 78 | 79 | func (m *msgManager) registerProto(cmd uint16, handler ProtoMsgHandler) { 80 | m.Lock() 81 | defer m.Unlock() 82 | _, ok := m.protoMsgHandlers[cmd] 83 | if ok { 84 | logger.Panicf("Register proto handler %d failed: duplicate handler\n", cmd) 85 | return 86 | } 87 | 88 | m.protoMsgHandlers[cmd] = handler 89 | } 90 | 91 | func (m *msgManager) registerBinary(cmd uint16, handler BinaryMsgHandler) { 92 | m.Lock() 93 | defer m.Unlock() 94 | _, ok := m.binaryMsgHandlers[cmd] 95 | if ok { 96 | logger.Panicf("Register binary handler %d failed: duplicate handler\n", cmd) 97 | return 98 | } 99 | 100 | m.binaryMsgHandlers[cmd] = handler 101 | } 102 | 103 | func (m *msgManager) dispatchProto(ctx context.Context, from addr.LogicAddr, cmd uint16, msg proto.Message) { 104 | m.RLock() 105 | handler, ok := m.protoMsgHandlers[cmd] 106 | m.RUnlock() 107 | if ok { 108 | if handler == nil { 109 | logger.Errorf("cmd:%d pb handler is nil\n", cmd) 110 | } else { 111 | handler.call(ctx, from, cmd, msg) 112 | } 113 | } else { 114 | logger.Errorf("unkonw cmd:%d\n", cmd) 115 | } 116 | } 117 | 118 | func (m *msgManager) dispatchBinary(ctx context.Context, from addr.LogicAddr, cmd uint16, msg []byte) { 119 | m.RLock() 120 | handler, ok := m.binaryMsgHandlers[cmd] 121 | m.RUnlock() 122 | if ok { 123 | if handler == nil { 124 | logger.Errorf("cmd:%d pb handler is nil\n", cmd) 125 | } else { 126 | handler.call(ctx, from, cmd, msg) 127 | } 128 | } else { 129 | logger.Errorf("unkonw cmd:%d\n", cmd) 130 | } 131 | } 132 | 133 | const pool_goroutine_size = 16 134 | 135 | type gopool struct { 136 | taskqueue chan func() 137 | die chan struct{} 138 | } 139 | 140 | func (p *gopool) loop() { 141 | for { 142 | select { 143 | case v := <-p.taskqueue: 144 | v() 145 | case <-p.die: 146 | return 147 | } 148 | } 149 | } 150 | 151 | func (p *gopool) Go(fn func()) { 152 | select { 153 | case p.taskqueue <- fn: 154 | default: 155 | go fn() 156 | } 157 | } 158 | 159 | type Node struct { 160 | gopool 161 | localAddr addr.Addr 162 | listener net.Listener 163 | nodeCache nodeCache 164 | msgManager msgManager 165 | startOnce sync.Once 166 | stopOnce sync.Once 167 | started chan struct{} 168 | smuxSessions sync.Map 169 | onNewStream atomic.Value 170 | rpcSvr *RPCServer 171 | rpcCli *RPCClient 172 | } 173 | 174 | // 根据目标逻辑地址返回一个node用于发送消息 175 | func (s *Node) getNodeByLogicAddr(to addr.LogicAddr) (n *node) { 176 | if to.Cluster() == s.localAddr.LogicAddr().Cluster() { 177 | //同cluster内发送消息 178 | n = s.nodeCache.getNodeByLogicAddr(to) 179 | } else { 180 | //不同cluster需要通过harbor转发 181 | var harbor *node 182 | if s.localAddr.LogicAddr().Type() == addr.HarbarType { 183 | //当前为harbor节点,选择harbor集群中与to在同一个cluster的harbor节点负责转发 184 | harbor = s.nodeCache.getHarbor(to.Cluster(), to) 185 | } else { 186 | //当前节点非harbor节点,从cluster内选择一个harbor节点负责转发 187 | harbor = s.nodeCache.getHarbor(s.localAddr.LogicAddr().Cluster(), to) 188 | } 189 | n = harbor 190 | } 191 | return n 192 | } 193 | 194 | func (s *Node) Addr() addr.Addr { 195 | return s.localAddr 196 | } 197 | 198 | func (s *Node) StartSmuxServer(onNewStream func(*smux.Stream)) error { 199 | if s.onNewStream.CompareAndSwap(nil, onNewStream) { 200 | return nil 201 | } else { 202 | return errors.New("started") 203 | } 204 | } 205 | 206 | func (s *Node) OpenStream(peer addr.LogicAddr) (*smux.Stream, error) { 207 | select { 208 | case <-s.die: 209 | return nil, errors.New("server die") 210 | case <-s.started: 211 | default: 212 | return nil, errors.New("server not start") 213 | } 214 | 215 | if peer == s.localAddr.LogicAddr() { 216 | return nil, errors.New("cant't open stream to self") 217 | } else if n := s.getNodeByLogicAddr(peer); n != nil { 218 | return n.openStream(s) 219 | } else { 220 | return nil, errors.New("invaild peer") 221 | } 222 | } 223 | 224 | func (s *Node) GetAddrByType(tt uint32, n ...int) (addr addr.LogicAddr, err error) { 225 | select { 226 | case <-s.die: 227 | err = errors.New("server die") 228 | return 229 | case <-s.started: 230 | default: 231 | err = errors.New("server not start") 232 | return 233 | } 234 | 235 | var num int 236 | if len(n) > 0 { 237 | num = n[0] 238 | } 239 | 240 | if node := s.nodeCache.getNormalNode(tt, num); node != nil { 241 | addr = node.addr.LogicAddr() 242 | } else { 243 | err = errors.New("no available node") 244 | } 245 | return addr, err 246 | } 247 | 248 | func (s *Node) RegisterProtoHandler(msg proto.Message, handler func(context.Context, addr.LogicAddr, proto.Message)) *Node { 249 | if handler == nil { 250 | logger.Panicf("RegisterBinrayHandler %s handler == nil", reflect.TypeOf(msg).Name()) 251 | } 252 | if cmd := pb.GetCmd(ss.Namespace, msg); cmd != 0 { 253 | s.msgManager.registerProto(uint16(cmd), handler) 254 | } 255 | return s 256 | } 257 | 258 | func (s *Node) RegisterBinaryHandler(cmd uint16, handler func(context.Context, addr.LogicAddr, uint16, []byte)) *Node { 259 | if handler == nil { 260 | logger.Panicf("RegisterBinrayHandler %d handler == nil", cmd) 261 | } 262 | s.msgManager.registerBinary(uint16(cmd), handler) 263 | return s 264 | } 265 | 266 | func (s *Node) SendBinMessageWithContext(ctx context.Context, to addr.LogicAddr, cmd uint16, msg []byte) error { 267 | select { 268 | case <-s.die: 269 | return errors.New("server die") 270 | case <-s.started: 271 | default: 272 | return errors.New("server not start") 273 | } 274 | if to == s.localAddr.LogicAddr() { 275 | s.Go(func() { 276 | s.msgManager.dispatchBinary(context.TODO(), to, cmd, msg) 277 | }) 278 | } else { 279 | if n := s.getNodeByLogicAddr(to); n != nil { 280 | n.sendMessageWithContext(ctx, s, ss.NewMessage(to, s.localAddr.LogicAddr(), msg, cmd)) 281 | } else { 282 | return ErrInvaildNode 283 | } 284 | } 285 | return nil 286 | } 287 | 288 | /* 289 | * SendMessage系列函数的deadline参数选择 290 | * 291 | * 发送操作存在两个对外隐藏的缓冲区 分别为: 292 | * pendingMsgQueue: 连接建立之前缓存待发送消息,如果pendingMsgQueue满SendMessage返回ErrPendingQueueFull 293 | * sendQueue: 异步发送缓冲区,如果满行为取决于deadline,如果deadline==0返回netgo.ErrSendQueueFull,否则等到deadline超时返回ErrPushToSendQueueTimeout 294 | * 295 | * 当 deadline != 0, deadline包含连接建立的时间,如果deadline到达时连接尚未建立 msg将被丢弃。 296 | * 连接建立后将检查msg的deadline,如果到达msg将被丢弃。 297 | * 298 | */ 299 | 300 | func (s *Node) SendBinMessage(to addr.LogicAddr, cmd uint16, msg []byte, deadline ...time.Time) error { 301 | select { 302 | case <-s.die: 303 | return errors.New("server die") 304 | case <-s.started: 305 | default: 306 | return errors.New("server not start") 307 | } 308 | if to == s.localAddr.LogicAddr() { 309 | s.Go(func() { 310 | s.msgManager.dispatchBinary(context.TODO(), to, cmd, msg) 311 | }) 312 | } else { 313 | if n := s.getNodeByLogicAddr(to); n != nil { 314 | d := time.Now().Add(DefaultSendTimeout) 315 | if len(deadline) > 0 { 316 | d = deadline[0] 317 | } 318 | n.sendMessage(s, ss.NewMessage(to, s.localAddr.LogicAddr(), msg, cmd), d) 319 | } else { 320 | return ErrInvaildNode 321 | } 322 | } 323 | return nil 324 | } 325 | 326 | func (s *Node) SendPbMessage(to addr.LogicAddr, msg proto.Message, deadline ...time.Time) error { 327 | select { 328 | case <-s.die: 329 | return errors.New("server die") 330 | case <-s.started: 331 | default: 332 | return errors.New("server not start") 333 | } 334 | if to == s.localAddr.LogicAddr() { 335 | s.Go(func() { 336 | s.msgManager.dispatchProto(context.TODO(), to, uint16(pb.GetCmd(ss.Namespace, msg)), msg) 337 | }) 338 | } else { 339 | if n := s.getNodeByLogicAddr(to); n != nil { 340 | d := time.Now().Add(DefaultSendTimeout) 341 | if len(deadline) > 0 { 342 | d = deadline[0] 343 | } 344 | n.sendMessage(s, ss.NewMessage(to, s.localAddr.LogicAddr(), msg), d) 345 | } else { 346 | return ErrInvaildNode 347 | } 348 | } 349 | return nil 350 | } 351 | 352 | func (s *Node) Stop() error { 353 | select { 354 | case <-s.started: 355 | default: 356 | return errors.New("server not start") 357 | } 358 | 359 | once := false 360 | s.stopOnce.Do(func() { 361 | once = true 362 | }) 363 | if once { 364 | go func() { 365 | s.listener.Close() 366 | //rpcSvr不再接收新的请求 367 | s.rpcSvr.svr.Stop() 368 | //等待所有rpc请求都返回,重构rpc拦截器,使用拦截器实现计数 369 | for s.rpcSvr.pendingRespCount.Load() > 0 { 370 | time.Sleep(time.Millisecond * 10) 371 | } 372 | s.nodeCache.close() 373 | s.smuxSessions.Range(func(key, _ interface{}) bool { 374 | key.(*smux.Session).Close() 375 | return true 376 | }) 377 | close(s.die) 378 | }() 379 | return nil 380 | } else { 381 | return errors.New("stoped") 382 | } 383 | } 384 | 385 | func (s *Node) Start(MemberShip membership.Subscribe, localAddr addr.LogicAddr) (err error) { 386 | once := false 387 | s.startOnce.Do(func() { 388 | once = true 389 | }) 390 | if once { 391 | s.nodeCache.localAddr = localAddr 392 | 393 | if err = MemberShip.Subscribe(func(nodeinfo membership.MemberInfo) { 394 | s.nodeCache.onNodeInfoUpdate(s, nodeinfo) 395 | }); err != nil { 396 | return err 397 | } 398 | 399 | s.nodeCache.waitInit() 400 | 401 | if n := s.nodeCache.getNodeByLogicAddr(localAddr); n == nil { 402 | //当前节点在配置中找不到 403 | err = fmt.Errorf("%s not in config", localAddr.String()) 404 | } else { 405 | s.localAddr = n.addr 406 | var serve func() 407 | s.listener, serve, err = netgo.ListenTCP("tcp", s.localAddr.NetAddr().String(), func(conn *net.TCPConn) { 408 | go func() { 409 | if err := s.onNewConnection(conn); nil != err { 410 | logger.Infof("auth error %s self %s", err.Error(), localAddr.String()) 411 | conn.Close() 412 | } 413 | }() 414 | }) 415 | if err == nil { 416 | for i := 0; i < pool_goroutine_size; i++ { 417 | go s.loop() 418 | } 419 | logger.Debugf("%s serve on:%s", localAddr.String(), s.localAddr.NetAddr().String()) 420 | go serve() 421 | close(s.started) 422 | } 423 | } 424 | } else { 425 | err = errors.New("started") 426 | } 427 | return err 428 | } 429 | 430 | func (s *Node) Wait() error { 431 | select { 432 | case <-s.started: 433 | default: 434 | return errors.New("server not start") 435 | } 436 | <-s.die 437 | return nil 438 | } 439 | 440 | func (s *Node) listenStream(session *smux.Session, onNewStream func(*smux.Stream)) { 441 | defer func() { 442 | session.Close() 443 | s.smuxSessions.Delete(session) 444 | }() 445 | s.smuxSessions.Store(session, struct{}{}) 446 | for { 447 | if stream, err := session.AcceptStream(); err == nil { 448 | onNewStream(stream) 449 | } else { 450 | return 451 | } 452 | } 453 | } 454 | 455 | func (s *Node) onNewConnection(conn net.Conn) (err error) { 456 | buff := make([]byte, 4) 457 | conn.SetReadDeadline(time.Now().Add(time.Second)) 458 | defer conn.SetReadDeadline(time.Time{}) 459 | 460 | _, err = io.ReadFull(conn, buff) 461 | if nil != err { 462 | return err 463 | } 464 | 465 | datasize := int(binary.BigEndian.Uint32(buff)) 466 | 467 | buff = make([]byte, datasize) 468 | 469 | _, err = io.ReadFull(conn, buff) 470 | if nil != err { 471 | return err 472 | } 473 | 474 | if buff, err = crypto.AESCBCDecrypter(cecret_key, buff); nil != err { 475 | return err 476 | } 477 | 478 | var handshake Handshake 479 | 480 | if err = json.Unmarshal(buff, &handshake); nil != err { 481 | return err 482 | } 483 | 484 | node := s.nodeCache.getNodeByLogicAddr(addr.LogicAddr(handshake.LogicAddr)) 485 | if node == nil { 486 | return ErrInvaildNode 487 | } else if node.addr.NetAddr().String() != handshake.NetAddr { 488 | return ErrNetAddrMismatch 489 | } 490 | 491 | resp := []byte{0, 0, 0, 0} 492 | binary.BigEndian.PutUint32(resp, 0) 493 | 494 | if handshake.IsStream { 495 | if onNewStream, ok := s.onNewStream.Load().(func(*smux.Stream)); !ok { 496 | return errors.New("onNewStream not set") 497 | } else { 498 | conn.SetWriteDeadline(time.Now().Add(time.Millisecond * 5)) 499 | defer conn.SetWriteDeadline(time.Time{}) 500 | 501 | if _, err = conn.Write(resp); err != nil { 502 | return err 503 | } else { 504 | if streamSvr, err := smux.Server(conn, nil); err != nil { 505 | return err 506 | } else { 507 | go s.listenStream(streamSvr, onNewStream) 508 | return nil 509 | } 510 | } 511 | } 512 | } else { 513 | node.Lock() 514 | defer node.Unlock() 515 | if node.pendingMsg.Len() != 0 { 516 | //当前节点同时正在向对端dialing,逻辑地址小的一方放弃接受连接 517 | if s.localAddr.LogicAddr() < node.addr.LogicAddr() { 518 | logger.Errorf("(self:%v) (other:%v) connectting simultaneously", s.localAddr.LogicAddr(), node.addr.LogicAddr()) 519 | return errors.New("connectting simultaneously") 520 | } 521 | } else if nil != node.socket { 522 | return ErrDuplicateConn 523 | } 524 | 525 | conn.SetWriteDeadline(time.Now().Add(time.Millisecond * 5)) 526 | defer conn.SetWriteDeadline(time.Time{}) 527 | 528 | if _, err = conn.Write(resp); err != nil { 529 | return err 530 | } else { 531 | node.onEstablish(s, conn.(*net.TCPConn)) 532 | return nil 533 | } 534 | } 535 | } 536 | 537 | func (s *Node) GetRPCClient() *RPCClient { 538 | return s.rpcCli 539 | } 540 | 541 | func (s *Node) GetRPCServer() *RPCServer { 542 | return s.rpcSvr 543 | } 544 | 545 | func NewClusterNode(rpccodec rpcgo.Codec) *Node { 546 | n := &Node{ 547 | gopool: gopool{ 548 | die: make(chan struct{}), 549 | taskqueue: make(chan func(), 256), 550 | }, 551 | nodeCache: nodeCache{ 552 | allnodes: map[addr.LogicAddr]*node{}, 553 | nodes: map[uint32][]*node{}, 554 | harbors: map[uint32][]*node{}, 555 | initC: make(chan struct{}), 556 | }, 557 | msgManager: msgManager{ 558 | protoMsgHandlers: map[uint16]ProtoMsgHandler{}, 559 | binaryMsgHandlers: map[uint16]BinaryMsgHandler{}, 560 | }, 561 | started: make(chan struct{}), 562 | } 563 | n.rpcCli = &RPCClient{ 564 | n: n, 565 | cli: rpcgo.NewClient(rpccodec), 566 | } 567 | n.rpcSvr = &RPCServer{ 568 | svr: rpcgo.NewServer(rpccodec), 569 | } 570 | n.rpcSvr.SetInInterceptor([]func(*rpcgo.Replyer, *rpcgo.RequestMsg) bool{}) 571 | return n 572 | } 573 | 574 | var defaultInstance *Node 575 | var defaultOnce sync.Once 576 | 577 | func GetDefaultNode() *Node { 578 | defaultOnce.Do(func() { 579 | defaultInstance = NewClusterNode(RPCCodec) 580 | }) 581 | return defaultInstance 582 | } 583 | 584 | func Start(MemberShip membership.Subscribe, localAddr addr.LogicAddr) (err error) { 585 | return GetDefaultNode().Start(MemberShip, localAddr) 586 | } 587 | 588 | func GetAddrByType(tt uint32, n ...int) (addr addr.LogicAddr, err error) { 589 | return GetDefaultNode().GetAddrByType(tt, n...) 590 | } 591 | 592 | func Stop() error { 593 | return GetDefaultNode().Stop() 594 | } 595 | 596 | func Wait() error { 597 | return GetDefaultNode().Wait() 598 | } 599 | 600 | func RegisterProtoHandler(msg proto.Message, handler func(context.Context, addr.LogicAddr, proto.Message)) *Node { 601 | return GetDefaultNode().RegisterProtoHandler(msg, handler) 602 | } 603 | 604 | func RegisterBinaryHandler(cmd uint16, handler func(context.Context, addr.LogicAddr, uint16, []byte)) *Node { 605 | return GetDefaultNode().RegisterBinaryHandler(cmd, handler) 606 | } 607 | 608 | func GetRPCClient() *RPCClient { 609 | return GetDefaultNode().GetRPCClient() 610 | } 611 | 612 | func GetRPCServer() *RPCServer { 613 | return GetDefaultNode().GetRPCServer() 614 | } 615 | 616 | func SendPbMessage(to addr.LogicAddr, msg proto.Message, deadline ...time.Time) error { 617 | return GetDefaultNode().SendPbMessage(to, msg, deadline...) 618 | } 619 | 620 | func SendBinMessage(to addr.LogicAddr, cmd uint16, msg []byte, deadline ...time.Time) error { 621 | return GetDefaultNode().SendBinMessage(to, cmd, msg, deadline...) 622 | } 623 | 624 | func SendBinMessageWithContext(ctx context.Context, to addr.LogicAddr, cmd uint16, msg []byte) error { 625 | return GetDefaultNode().SendBinMessageWithContext(ctx, to, cmd, msg) 626 | } 627 | 628 | func StartSmuxServer(onNewStream func(*smux.Stream)) { 629 | GetDefaultNode().StartSmuxServer(onNewStream) 630 | } 631 | 632 | func OpenStream(peer addr.LogicAddr) (*smux.Stream, error) { 633 | return GetDefaultNode().OpenStream(peer) 634 | } 635 | 636 | func RegisterService[Arg any](name string, method func(context.Context, *rpcgo.Replyer, *Arg)) error { 637 | return rpcgo.Register(GetDefaultNode().rpcSvr.svr, name, method) 638 | } 639 | 640 | func Call[Arg any, Ret any](ctx context.Context, to addr.LogicAddr, method string, arg Arg) (ret *Ret, err error) { 641 | r := new(Ret) 642 | if err = GetRPCClient().Call(ctx, to, method, arg, r); err == nil { 643 | ret = r 644 | } 645 | return 646 | } 647 | 648 | func Post[Arg any](ctx context.Context, to addr.LogicAddr, method string, arg Arg) (err error) { 649 | err = GetRPCClient().Call(ctx, to, method, arg, nil) 650 | return 651 | } 652 | 653 | func CallWithTimeout[Arg any, Ret any](to addr.LogicAddr, method string, arg Arg, d time.Duration) (ret *Ret, err error) { 654 | r := new(Ret) 655 | if err = GetRPCClient().CallWithTimeout(to, method, arg, r, d); err == nil { 656 | ret = r 657 | } 658 | return 659 | } 660 | 661 | func AsyncCall[Arg any, Ret any](to addr.LogicAddr, method string, arg Arg, deadline time.Time, callback func(*Ret, error)) error { 662 | return GetRPCClient().AsyncCall(to, method, arg, new(Ret), deadline, func(r interface{}, err error) { 663 | callback(r.(*Ret), err) 664 | }) 665 | } 666 | 667 | // for unit test only 668 | func registerService[Arg any](node *Node, name string, method func(context.Context, *rpcgo.Replyer, *Arg)) error { 669 | return rpcgo.Register(node.rpcSvr.svr, name, method) 670 | } 671 | 672 | func call[Arg any, Ret any](ctx context.Context, node *Node, to addr.LogicAddr, method string, arg Arg) (ret *Ret, err error) { 673 | r := new(Ret) 674 | if err = node.GetRPCClient().Call(ctx, to, method, arg, r); err == nil { 675 | ret = r 676 | } 677 | return 678 | } 679 | 680 | func post[Arg any](ctx context.Context, node *Node, to addr.LogicAddr, method string, arg Arg) (err error) { 681 | err = node.GetRPCClient().Call(ctx, to, method, arg, nil) 682 | return 683 | } 684 | 685 | func callWithTimeout[Arg any, Ret any](node *Node, to addr.LogicAddr, method string, arg Arg, d time.Duration) (ret *Ret, err error) { 686 | r := new(Ret) 687 | if err = node.GetRPCClient().CallWithTimeout(to, method, arg, r, d); err == nil { 688 | ret = r 689 | } 690 | return 691 | } 692 | 693 | func asyncCall[Arg any, Ret any](ctx context.Context, node *Node, to addr.LogicAddr, method string, arg Arg, deadline time.Time, callback func(*Ret, error)) error { 694 | return node.GetRPCClient().AsyncCall(to, method, arg, new(Ret), deadline, func(r interface{}, err error) { 695 | callback(r.(*Ret), err) 696 | }) 697 | } 698 | -------------------------------------------------------------------------------- /cluster_test.go: -------------------------------------------------------------------------------- 1 | package clustergo 2 | 3 | //go test -race -covermode=atomic -v -coverprofile=coverage.out -run=. 4 | //go tool cover -html=coverage.out 5 | 6 | import ( 7 | "context" 8 | "fmt" 9 | "runtime" 10 | "sync" 11 | "sync/atomic" 12 | "testing" 13 | "time" 14 | 15 | "github.com/sniperHW/clustergo/addr" 16 | "github.com/sniperHW/clustergo/codec/pb" 17 | "github.com/sniperHW/clustergo/codec/ss" 18 | "github.com/sniperHW/clustergo/logger/zap" 19 | "github.com/sniperHW/clustergo/membership" 20 | "github.com/sniperHW/rpcgo" 21 | "github.com/stretchr/testify/assert" 22 | "github.com/xtaci/smux" 23 | "google.golang.org/protobuf/proto" 24 | ) 25 | 26 | type localMemberShip struct { 27 | nodes map[addr.LogicAddr]*membership.Node 28 | subscribes []func(membership.MemberInfo) 29 | } 30 | 31 | // 订阅变更 32 | func (d *localMemberShip) Subscribe(updateCB func(membership.MemberInfo)) error { 33 | d.subscribes = append(d.subscribes, updateCB) 34 | i := membership.MemberInfo{} 35 | for _, v := range d.nodes { 36 | i.Add = append(i.Add, *v) 37 | } 38 | updateCB(i) 39 | return nil 40 | } 41 | 42 | func (d *localMemberShip) AddNode(n *membership.Node) { 43 | d.nodes[n.Addr.LogicAddr()] = n 44 | add := membership.MemberInfo{ 45 | Add: []membership.Node{*n}, 46 | } 47 | for _, v := range d.subscribes { 48 | v(add) 49 | } 50 | } 51 | 52 | func (d *localMemberShip) RemoveNode(logicAddr addr.LogicAddr) { 53 | if n := d.nodes[logicAddr]; n != nil { 54 | delete(d.nodes, logicAddr) 55 | remove := membership.MemberInfo{ 56 | Remove: []membership.Node{*n}, 57 | } 58 | for _, v := range d.subscribes { 59 | v(remove) 60 | } 61 | } 62 | } 63 | 64 | func (d *localMemberShip) ModifyNode(modify *membership.Node) { 65 | if n, ok := d.nodes[modify.Addr.LogicAddr()]; ok { 66 | if n.Available != modify.Available || n.Addr.NetAddr() != modify.Addr.NetAddr() { 67 | logger.Debug("modify") 68 | d.nodes[modify.Addr.LogicAddr()] = modify 69 | //nodes := d.LoadNodeInfo() 70 | update := membership.MemberInfo{ 71 | Update: []membership.Node{*modify}, 72 | } 73 | 74 | for _, v := range d.subscribes { 75 | v(update) 76 | } 77 | } 78 | } 79 | } 80 | 81 | func (d *localMemberShip) Close() { 82 | 83 | } 84 | 85 | func init() { 86 | pb.Register(ss.Namespace, &ss.Echo{}, 1) 87 | l := zap.NewZapLogger("sanguo_test.log", "./logfile", "debug", 1024*1024*100, 14, 28, true) 88 | InitLogger(l.Sugar()) 89 | } 90 | 91 | func TestBenchmarkRPCAsync(t *testing.T) { 92 | localDiscovery := &localMemberShip{ 93 | nodes: map[addr.LogicAddr]*membership.Node{}, 94 | } 95 | 96 | node1Addr, _ := addr.MakeAddr("1.1.1", "localhost:18110") 97 | node2Addr, _ := addr.MakeAddr("1.2.1", "localhost:18111") 98 | 99 | localDiscovery.AddNode(&membership.Node{ 100 | Addr: node1Addr, 101 | Available: true, 102 | }) 103 | 104 | localDiscovery.AddNode(&membership.Node{ 105 | Addr: node2Addr, 106 | Available: true, 107 | }) 108 | 109 | node1 := NewClusterNode(JsonCodec{}) 110 | node1.RegisterProtoHandler(&ss.Echo{}, func(_ context.Context, _ addr.LogicAddr, msg proto.Message) { 111 | logger.Debug(msg.(*ss.Echo).Msg) 112 | }) 113 | 114 | registerService(node1, "hello", func(_ context.Context, replyer *rpcgo.Replyer, arg *string) { 115 | replyer.Reply(fmt.Sprintf("hello world:%s", *arg)) 116 | }) 117 | 118 | node2 := NewClusterNode(JsonCodec{}) 119 | err := node2.Start(localDiscovery, node2Addr.LogicAddr()) 120 | assert.Nil(t, err) 121 | 122 | logger.Debug("Start OK") 123 | 124 | var resp string 125 | 126 | err = node1.Start(localDiscovery, node1Addr.LogicAddr()) 127 | assert.Nil(t, err) 128 | 129 | node2.GetRPCClient().Call(context.TODO(), node1Addr.LogicAddr(), "hello", "sniperHW", &resp) 130 | 131 | begtime := time.Now() 132 | 133 | counter := int32(0) 134 | concurrent := 500 135 | var wait sync.WaitGroup 136 | var callback func(resp interface{}, e error) 137 | callback = func(resp interface{}, e error) { 138 | if c := atomic.AddInt32(&counter, 1); c <= 100000 { 139 | for { 140 | err := node2.GetRPCClient().AsyncCall(node1Addr.LogicAddr(), "hello", "sniperHW", resp, time.Now().Add(time.Second*5), callback) 141 | if err != ErrBusy { 142 | break 143 | } else { 144 | runtime.Gosched() 145 | } 146 | } 147 | } else { 148 | wait.Done() 149 | } 150 | } 151 | 152 | for i := 0; i < concurrent; i++ { 153 | wait.Add(1) 154 | var resp string 155 | for { 156 | err := node2.GetRPCClient().AsyncCall(node1Addr.LogicAddr(), "hello", "sniperHW", &resp, time.Now().Add(time.Second*5), callback) 157 | if err != ErrBusy { 158 | break 159 | } else { 160 | runtime.Gosched() 161 | } 162 | } 163 | } 164 | 165 | wait.Wait() 166 | 167 | fmt.Println("10W call,use:", time.Since(begtime)) 168 | 169 | localDiscovery.RemoveNode(node1Addr.LogicAddr()) 170 | node1.Wait() 171 | 172 | _, err = node2.GetAddrByType(1) 173 | assert.NotNil(t, err) 174 | 175 | localDiscovery.RemoveNode(node2Addr.LogicAddr()) 176 | node2.Wait() 177 | } 178 | 179 | func TestBenchmarkRPCSync(t *testing.T) { 180 | localDiscovery := &localMemberShip{ 181 | nodes: map[addr.LogicAddr]*membership.Node{}, 182 | } 183 | 184 | node1Addr, _ := addr.MakeAddr("1.1.1", "localhost:18110") 185 | node2Addr, _ := addr.MakeAddr("1.2.1", "localhost:18111") 186 | 187 | localDiscovery.AddNode(&membership.Node{ 188 | Addr: node1Addr, 189 | Available: true, 190 | }) 191 | 192 | localDiscovery.AddNode(&membership.Node{ 193 | Addr: node2Addr, 194 | Available: true, 195 | }) 196 | 197 | node1 := NewClusterNode(JsonCodec{}) 198 | node1.RegisterProtoHandler(&ss.Echo{}, func(_ context.Context, _ addr.LogicAddr, msg proto.Message) { 199 | logger.Debug(msg.(*ss.Echo).Msg) 200 | }) 201 | 202 | registerService(node1, "hello", func(_ context.Context, replyer *rpcgo.Replyer, arg *string) { 203 | replyer.Reply(fmt.Sprintf("hello world:%s", *arg)) 204 | }) 205 | 206 | node2 := NewClusterNode(JsonCodec{}) 207 | err := node2.Start(localDiscovery, node2Addr.LogicAddr()) 208 | assert.Nil(t, err) 209 | 210 | logger.Debug("Start OK") 211 | 212 | err = node1.Start(localDiscovery, node1Addr.LogicAddr()) 213 | assert.Nil(t, err) 214 | 215 | _, err = call[string, string](context.TODO(), node2, node1Addr.LogicAddr(), "hello", "sniperHW") 216 | 217 | begtime := time.Now() 218 | 219 | counter := int32(0) 220 | 221 | var wait sync.WaitGroup 222 | 223 | for i := 0; i < 25; i++ { 224 | wait.Add(1) 225 | go func() { 226 | for atomic.AddInt32(&counter, 1) <= 100000 { 227 | _, err = call[string, string](context.TODO(), node2, node1Addr.LogicAddr(), "hello", "sniperHW") 228 | } 229 | wait.Done() 230 | }() 231 | } 232 | 233 | wait.Wait() 234 | 235 | fmt.Println("10W call,use:", time.Since(begtime)) 236 | 237 | localDiscovery.RemoveNode(node1Addr.LogicAddr()) 238 | node1.Wait() 239 | 240 | _, err = call[string, string](context.TODO(), node2, node1Addr.LogicAddr(), "hello", "sniperHW") 241 | assert.NotNil(t, err) 242 | 243 | localDiscovery.RemoveNode(node2Addr.LogicAddr()) 244 | node2.Wait() 245 | } 246 | 247 | func TestSingleNode(t *testing.T) { 248 | localDiscovery := &localMemberShip{ 249 | nodes: map[addr.LogicAddr]*membership.Node{}, 250 | } 251 | 252 | localAddr, _ := addr.MakeAddr("1.1.1", "localhost:18110") 253 | 254 | localDiscovery.AddNode(&membership.Node{ 255 | Addr: localAddr, 256 | Available: true, 257 | }) 258 | 259 | s := NewClusterNode(JsonCodec{}) 260 | 261 | assert.NotNil(t, s.Stop()) 262 | assert.NotNil(t, s.Wait()) 263 | assert.NotNil(t, s.SendPbMessage(localAddr.LogicAddr(), &ss.Echo{ 264 | Msg: "hello", 265 | })) 266 | 267 | s.RegisterProtoHandler(&ss.Echo{}, func(_ context.Context, _ addr.LogicAddr, msg proto.Message) { 268 | logger.Debug(msg.(*ss.Echo).Msg) 269 | }) 270 | 271 | s.GetRPCServer().SetInInterceptor(append([]func(replyer *rpcgo.Replyer, req *rpcgo.RequestMsg) bool{}, func(replyer *rpcgo.Replyer, req *rpcgo.RequestMsg) bool { 272 | beg := time.Now() 273 | //设置钩子函数,当Replyer发送应答时调用 274 | replyer.AppendOutInterceptor(func(req *rpcgo.RequestMsg, ret interface{}, err error) { 275 | if err == nil { 276 | logger.Debugf("call %s(\"%v\") use:%v", req.Method, *req.GetArg().(*string), time.Now().Sub(beg)) 277 | } else { 278 | logger.Debugf("call %s(\"%v\") with error:%v", req.Method, *req.GetArg().(*string), err) 279 | } 280 | }) 281 | return true 282 | })) 283 | 284 | registerService(s, "hello", func(_ context.Context, replyer *rpcgo.Replyer, arg *string) { 285 | logger.Debugf("on hello call,channel:%s", replyer.Channel().Name()) 286 | replyer.Reply(fmt.Sprintf("hello world:%s", *arg)) 287 | }) 288 | 289 | err := s.Start(localDiscovery, localAddr.LogicAddr()) 290 | assert.Nil(t, err) 291 | 292 | s.SendPbMessage(localAddr.LogicAddr(), &ss.Echo{ 293 | Msg: "hello", 294 | }) 295 | 296 | resp, err := callWithTimeout[string, string](s, localAddr.LogicAddr(), "hello", "sniperHW", time.Second) 297 | assert.Nil(t, err) 298 | assert.Equal(t, *resp, "hello world:sniperHW") 299 | 300 | localDiscovery.RemoveNode(localAddr.LogicAddr()) 301 | s.Wait() 302 | 303 | } 304 | 305 | func TestTwoNode(t *testing.T) { 306 | localDiscovery := &localMemberShip{ 307 | nodes: map[addr.LogicAddr]*membership.Node{}, 308 | } 309 | 310 | node1Addr, _ := addr.MakeAddr("1.1.1", "localhost:18110") 311 | node2Addr, _ := addr.MakeAddr("1.2.1", "localhost:18111") 312 | 313 | localDiscovery.AddNode(&membership.Node{ 314 | Addr: node1Addr, 315 | Available: true, 316 | }) 317 | 318 | localDiscovery.AddNode(&membership.Node{ 319 | Addr: node2Addr, 320 | Available: true, 321 | }) 322 | 323 | node1 := NewClusterNode(JsonCodec{}) 324 | node1.RegisterProtoHandler(&ss.Echo{}, func(_ context.Context, _ addr.LogicAddr, msg proto.Message) { 325 | logger.Debug(msg.(*ss.Echo).Msg) 326 | }) 327 | 328 | registerService(node1, "hello", func(_ context.Context, replyer *rpcgo.Replyer, arg *string) { 329 | logger.Debugf("on hello call,channel:%s", replyer.Channel().Name()) 330 | replyer.Reply(fmt.Sprintf("hello world:%s", *arg)) 331 | }) 332 | 333 | registerService(node1, "Delay", func(_ context.Context, replyer *rpcgo.Replyer, arg *string) { 334 | logger.Debugf("on Delay") 335 | time.Sleep(time.Second * 5) 336 | replyer.Reply(fmt.Sprintf("hello world:%s", *arg)) 337 | }) 338 | 339 | node2 := NewClusterNode(JsonCodec{}) 340 | err := node2.Start(localDiscovery, node2Addr.LogicAddr()) 341 | assert.Nil(t, err) 342 | 343 | logger.Debug("Start OK") 344 | 345 | ctx, cancel := context.WithTimeout(context.Background(), time.Second*1) 346 | _, err = call[string, string](ctx, node2, node1Addr.LogicAddr(), "hello", "sniperHW") 347 | cancel() 348 | logger.Debug(err) 349 | 350 | err = node1.Start(localDiscovery, node1Addr.LogicAddr()) 351 | assert.Nil(t, err) 352 | 353 | node2.SendPbMessage(node1Addr.LogicAddr(), &ss.Echo{ 354 | Msg: "hello", 355 | }) 356 | 357 | //var resp string 358 | resp, err := call[string, string](ctx, node2, node1Addr.LogicAddr(), "hello", "sniperHW") 359 | assert.Nil(t, err) 360 | assert.Equal(t, *resp, "hello world:sniperHW") 361 | 362 | go func() { 363 | _, err = call[string, string](ctx, node2, node1Addr.LogicAddr(), "Delay", "sniperHW") 364 | assert.Nil(t, err) 365 | }() 366 | 367 | time.Sleep(time.Second) 368 | 369 | beg := time.Now() 370 | localDiscovery.RemoveNode(node1Addr.LogicAddr()) 371 | logger.Debugf("waitting...") 372 | node1.Wait() //Delay返回后,Wait才会返回 373 | logger.Debugf("wait:%v", time.Now().Sub(beg)) 374 | 375 | _, err = node2.GetAddrByType(1) 376 | assert.NotNil(t, err) 377 | 378 | localDiscovery.RemoveNode(node2Addr.LogicAddr()) 379 | node2.Wait() 380 | 381 | } 382 | 383 | func TestHarbor(t *testing.T) { 384 | localDiscovery := &localMemberShip{ 385 | nodes: map[addr.LogicAddr]*membership.Node{}, 386 | } 387 | 388 | //cluster:1 389 | node1Addr, _ := addr.MakeAddr("1.1.1", "localhost:18110") 390 | harbor1Addr, _ := addr.MakeHarborAddr("1.255.1", "localhost:19110") 391 | 392 | //cluster:2 393 | node2Addr, _ := addr.MakeAddr("2.2.1", "localhost:18111") 394 | harbor2Addr, _ := addr.MakeHarborAddr("2.255.1", "localhost:19111") 395 | 396 | localDiscovery.AddNode(&membership.Node{ 397 | Addr: node1Addr, 398 | Available: true, 399 | Export: true, //将节点暴露到cluster以外 400 | }) 401 | 402 | localDiscovery.AddNode(&membership.Node{ 403 | Addr: harbor1Addr, 404 | Available: true, 405 | }) 406 | 407 | localDiscovery.AddNode(&membership.Node{ 408 | Addr: node2Addr, 409 | Available: true, 410 | }) 411 | 412 | localDiscovery.AddNode(&membership.Node{ 413 | Addr: harbor2Addr, 414 | Available: true, 415 | }) 416 | 417 | node1 := NewClusterNode(&JsonCodec{}) 418 | node1.RegisterProtoHandler(&ss.Echo{}, func(_ context.Context, _ addr.LogicAddr, msg proto.Message) { 419 | logger.Debug(msg.(*ss.Echo).Msg) 420 | }) 421 | 422 | registerService(node1, "hello", func(_ context.Context, replyer *rpcgo.Replyer, arg *string) { 423 | logger.Debugf("on hello call,channel:%s", replyer.Channel().Name()) 424 | replyer.Reply(fmt.Sprintf("hello world:%s", *arg)) 425 | }) 426 | 427 | err := node1.Start(localDiscovery, node1Addr.LogicAddr()) 428 | assert.Nil(t, err) 429 | 430 | node2 := NewClusterNode(JsonCodec{}) 431 | err = node2.Start(localDiscovery, node2Addr.LogicAddr()) 432 | assert.Nil(t, err) 433 | 434 | harbor1 := NewClusterNode(JsonCodec{}) 435 | err = harbor1.Start(localDiscovery, harbor1Addr.LogicAddr()) 436 | assert.Nil(t, err) 437 | 438 | harbor2 := NewClusterNode(JsonCodec{}) 439 | err = harbor2.Start(localDiscovery, harbor2Addr.LogicAddr()) 440 | assert.Nil(t, err) 441 | 442 | var type1Addr addr.LogicAddr 443 | 444 | for { 445 | if type1Addr, err = node2.GetAddrByType(1); err == nil { 446 | break 447 | } 448 | } 449 | 450 | node1.SendPbMessage(type1Addr, &ss.Echo{ 451 | Msg: "hello", 452 | }) 453 | 454 | resp, err := call[string, string](context.TODO(), node2, type1Addr, "hello", "sniperHW") 455 | assert.Nil(t, err) 456 | assert.Equal(t, *resp, "hello world:sniperHW") 457 | 458 | //将1.1.1移除 459 | localDiscovery.RemoveNode(node1Addr.LogicAddr()) 460 | _, err = call[string, string](context.TODO(), node2, type1Addr, "hello", "sniperHW") 461 | assert.Equal(t, "route message to target:1.1.1 failed", err.Error()) 462 | logger.Debug(err) 463 | 464 | //将harbor1移除 465 | localDiscovery.RemoveNode(harbor1Addr.LogicAddr()) 466 | 467 | _, err = call[string, string](context.TODO(), node2, type1Addr, "hello", "sniperHW") 468 | assert.Equal(t, "route message to target:1.1.1 failed", err.Error()) 469 | logger.Debug(err) 470 | 471 | node1.Stop() 472 | node2.Stop() 473 | harbor1.Stop() 474 | harbor2.Stop() 475 | 476 | node1.Wait() 477 | node2.Wait() 478 | harbor1.Wait() 479 | harbor2.Wait() 480 | } 481 | 482 | func TestStream(t *testing.T) { 483 | localDiscovery := &localMemberShip{ 484 | nodes: map[addr.LogicAddr]*membership.Node{}, 485 | } 486 | 487 | node1Addr, _ := addr.MakeAddr("1.1.1", "localhost:18110") 488 | node2Addr, _ := addr.MakeAddr("1.2.1", "localhost:18111") 489 | 490 | localDiscovery.AddNode(&membership.Node{ 491 | Addr: node1Addr, 492 | Available: true, 493 | }) 494 | 495 | localDiscovery.AddNode(&membership.Node{ 496 | Addr: node2Addr, 497 | Available: true, 498 | }) 499 | 500 | node1 := NewClusterNode(JsonCodec{}) 501 | 502 | node1.StartSmuxServer(func(s *smux.Stream) { 503 | go func() { 504 | buff := make([]byte, 64) 505 | n, _ := s.Read(buff) 506 | s.Write(buff[:n]) 507 | s.Close() 508 | }() 509 | }) 510 | 511 | err := node1.Start(localDiscovery, node1Addr.LogicAddr()) 512 | assert.Nil(t, err) 513 | 514 | node2 := NewClusterNode(JsonCodec{}) 515 | err = node2.Start(localDiscovery, node2Addr.LogicAddr()) 516 | assert.Nil(t, err) 517 | 518 | logger.Debug("Start OK") 519 | { 520 | ss, err := node2.OpenStream(node1Addr.LogicAddr()) 521 | if err != nil { 522 | panic(err) 523 | } 524 | 525 | ss.Write([]byte("hello")) 526 | buff := make([]byte, 64) 527 | n, _ := ss.Read(buff) 528 | assert.Equal(t, "hello", string(buff[:n])) 529 | ss.Close() 530 | } 531 | 532 | { 533 | ss, err := node2.OpenStream(node1Addr.LogicAddr()) 534 | if err != nil { 535 | panic(err) 536 | } 537 | 538 | ss.Write([]byte("hello")) 539 | buff := make([]byte, 64) 540 | n, _ := ss.Read(buff) 541 | assert.Equal(t, "hello", string(buff[:n])) 542 | ss.Close() 543 | } 544 | 545 | localDiscovery.RemoveNode(node1Addr.LogicAddr()) 546 | node1.Wait() 547 | 548 | _, err = node2.GetAddrByType(1) 549 | assert.NotNil(t, err) 550 | 551 | localDiscovery.RemoveNode(node2Addr.LogicAddr()) 552 | node2.Wait() 553 | 554 | } 555 | 556 | func TestBiDirectionDial(t *testing.T) { 557 | localDiscovery := &localMemberShip{ 558 | nodes: map[addr.LogicAddr]*membership.Node{}, 559 | } 560 | 561 | node1Addr, _ := addr.MakeAddr("1.1.1", "localhost:18110") 562 | node2Addr, _ := addr.MakeAddr("1.2.1", "localhost:18111") 563 | 564 | localDiscovery.AddNode(&membership.Node{ 565 | Addr: node1Addr, 566 | Available: true, 567 | }) 568 | 569 | localDiscovery.AddNode(&membership.Node{ 570 | Addr: node2Addr, 571 | Available: true, 572 | }) 573 | 574 | node1 := NewClusterNode(JsonCodec{}) 575 | 576 | var wait sync.WaitGroup 577 | 578 | wait.Add(2) 579 | 580 | node1.RegisterProtoHandler(&ss.Echo{}, func(_ context.Context, from addr.LogicAddr, msg proto.Message) { 581 | logger.Debug("message from ", from.String()) 582 | wait.Done() 583 | }) 584 | 585 | node2 := NewClusterNode(JsonCodec{}) 586 | 587 | node2.RegisterProtoHandler(&ss.Echo{}, func(_ context.Context, from addr.LogicAddr, msg proto.Message) { 588 | logger.Debug("message from ", from.String()) 589 | wait.Done() 590 | }) 591 | 592 | err := node1.Start(localDiscovery, node1Addr.LogicAddr()) 593 | assert.Nil(t, err) 594 | 595 | err = node2.Start(localDiscovery, node2Addr.LogicAddr()) 596 | assert.Nil(t, err) 597 | 598 | logger.Debug("Start OK") 599 | 600 | //双方同时向对方发消息,使得两边同时dial,最终两节点之间应该只能成功建立一条通信连接 601 | go func() { 602 | node2.SendPbMessage(node1Addr.LogicAddr(), &ss.Echo{ 603 | Msg: "hello", 604 | }) 605 | }() 606 | 607 | go func() { 608 | node1.SendPbMessage(node2Addr.LogicAddr(), &ss.Echo{ 609 | Msg: "hello", 610 | }) 611 | }() 612 | 613 | wait.Wait() 614 | 615 | localDiscovery.RemoveNode(node1Addr.LogicAddr()) 616 | node1.Wait() 617 | 618 | localDiscovery.RemoveNode(node2Addr.LogicAddr()) 619 | node2.Wait() 620 | 621 | } 622 | 623 | func TestDefault(t *testing.T) { 624 | 625 | RPCCodec = JsonCodec{} 626 | 627 | localDiscovery := &localMemberShip{ 628 | nodes: map[addr.LogicAddr]*membership.Node{}, 629 | } 630 | 631 | node1Addr, _ := addr.MakeAddr("1.1.1", "localhost:18110") 632 | node2Addr, _ := addr.MakeAddr("1.2.3", "localhost:18111") 633 | 634 | localDiscovery.AddNode(&membership.Node{ 635 | Addr: node1Addr, 636 | Available: true, 637 | }) 638 | 639 | localDiscovery.AddNode(&membership.Node{ 640 | Addr: node2Addr, 641 | Available: true, 642 | }) 643 | 644 | RegisterProtoHandler(&ss.Echo{}, func(_ context.Context, _ addr.LogicAddr, msg proto.Message) { 645 | logger.Debug(msg.(*ss.Echo).Msg) 646 | }) 647 | 648 | RegisterService("hello", func(_ context.Context, replyer *rpcgo.Replyer, arg *string) { 649 | logger.Debugf("on hello call,channel:%s", replyer.Channel().Name()) 650 | replyer.Reply(fmt.Sprintf("hello world:%s", *arg)) 651 | }) 652 | 653 | RegisterBinaryHandler(1, func(_ context.Context, from addr.LogicAddr, cmd uint16, msg []byte) { 654 | logger.Debugf("from:%v,cmd:%d,msg:%v", from.String(), cmd, string(msg)) 655 | }) 656 | 657 | err := Start(localDiscovery, node1Addr.LogicAddr()) 658 | assert.Nil(t, err) 659 | 660 | //向自身发送消息 661 | SendPbMessage(node1Addr.LogicAddr(), &ss.Echo{Msg: "hello"}) 662 | 663 | SendBinMessage(node1Addr.LogicAddr(), 1, []byte("binMessage")) 664 | 665 | //调用自身hello 666 | resp, err := Call[string, string](context.TODO(), node1Addr.LogicAddr(), "hello", "sniperHW") 667 | assert.Nil(t, err) 668 | assert.Equal(t, *resp, "hello world:sniperHW") 669 | 670 | _, err = OpenStream(node1Addr.LogicAddr()) 671 | assert.Equal(t, err.Error(), "cant't open stream to self") 672 | 673 | node2 := NewClusterNode(JsonCodec{}) 674 | err = node2.Start(localDiscovery, node2Addr.LogicAddr()) 675 | assert.Nil(t, err) 676 | 677 | Log().Debug("Start OK") 678 | 679 | node2.SendPbMessage(node1Addr.LogicAddr(), &ss.Echo{ 680 | Msg: "hello", 681 | }) 682 | 683 | logger.Debug("send bin") 684 | node2.SendBinMessage(node1Addr.LogicAddr(), 1, []byte("binMessage")) 685 | 686 | node3Addr, _ := addr.MakeAddr("1.2.1", "localhost:18113") 687 | 688 | localDiscovery.AddNode(&membership.Node{ 689 | Addr: node3Addr, 690 | Available: true, 691 | }) 692 | 693 | time.Sleep(time.Second) 694 | 695 | localDiscovery.ModifyNode(&membership.Node{ 696 | Addr: node3Addr, 697 | Available: false, 698 | }) 699 | 700 | time.Sleep(time.Second) 701 | //变更网络地址 702 | node3Addr, _ = addr.MakeAddr("1.2.1", "localhost:18114") 703 | localDiscovery.ModifyNode(&membership.Node{ 704 | Addr: node3Addr, 705 | Available: false, 706 | }) 707 | 708 | //恢复available 709 | time.Sleep(time.Second) 710 | localDiscovery.ModifyNode(&membership.Node{ 711 | Addr: node3Addr, 712 | Available: true, 713 | }) 714 | 715 | time.Sleep(time.Second) 716 | //移除node3 717 | localDiscovery.RemoveNode(node3Addr.LogicAddr()) 718 | 719 | time.Sleep(time.Second) 720 | 721 | node4Addr, _ := addr.MakeAddr("1.2.5", "localhost:18115") 722 | localDiscovery.AddNode(&membership.Node{ 723 | Addr: node4Addr, 724 | Available: true, 725 | }) 726 | 727 | time.Sleep(time.Second) 728 | localDiscovery.RemoveNode(node4Addr.LogicAddr()) 729 | 730 | //var resp string 731 | err = node2.GetRPCClient().Call(context.TODO(), node1Addr.LogicAddr(), "hello", "sniperHW", &resp) 732 | assert.Nil(t, err) 733 | assert.Equal(t, resp, "hello world:sniperHW") 734 | 735 | localDiscovery.RemoveNode(node1Addr.LogicAddr()) 736 | Wait() 737 | 738 | _, err = node2.GetAddrByType(1) 739 | assert.NotNil(t, err) 740 | 741 | localDiscovery.RemoveNode(node2Addr.LogicAddr()) 742 | node2.Wait() 743 | } 744 | --------------------------------------------------------------------------------