├── .gitignore
├── Dockerfile
├── README.md
├── build.sh
├── conf
├── .DS_Store
├── cluster.toml
├── log.xml
└── registry_demo.yaml
├── doc
├── communicate_protocol.png
├── kiteq.001.png
├── kiteq_arch.graffle
├── kiteq_arch.png
├── kiteq_dep.png
└── logo.jpg
├── exchange
├── bind_exchanger.go
└── bind_exchanger_test.go
├── go.mod
├── go.sum
├── handler
├── accept_event.go
├── access_event.go
├── check_msg.go
├── deliver_pre.go
├── deliver_qos.go
├── deliver_result.go
├── delivery_registry.go
├── delivery_registry_test.go
├── handler.go
├── heartbeat.go
├── packet_decode.go
├── persistent_msg.go
├── pipe_events.go
├── remote_future.go
├── tx_ack.go
└── validate_event.go
├── kiteq.go
├── kiteq.sh
├── server
├── kite_server_config.go
├── kite_server_config_test.go
├── kiteq_server.go
├── kiteq_server_monitor.go
├── recover_manager.go
└── recover_manager_test.go
├── store
├── file
│ ├── kite_file_store.go
│ ├── kite_file_store_test.go
│ ├── kite_message_store.go
│ ├── kite_message_store_test.go
│ ├── kite_segment.go
│ └── kite_segment_log.go
├── kite_mock_store.go
├── kite_store.go
├── memory
│ └── kite_memory_store.go
├── mysql
│ ├── kite_mysql.go
│ ├── kite_mysql_batch.go
│ ├── kite_mysql_convertor.go
│ ├── kite_mysql_convertor_test.go
│ ├── kite_mysql_shard.go
│ ├── kite_mysql_shard_test.go
│ ├── kite_mysql_test.go
│ ├── kite_sql_wrapper.go
│ ├── stmt_pool.go
│ ├── stmt_pool_test.go
│ └── table.sh
├── parser
│ ├── kite_store_parser.go
│ └── kite_store_parser_test.go
└── rocksdb
│ ├── rocksdb_store.go
│ └── rocksdb_store_test.go
└── tools
└── kite_store_tools.go
/.gitignore:
--------------------------------------------------------------------------------
1 | # Compiled Object files, Static and Dynamic libs (Shared Objects)
2 | *.o
3 | *.a
4 | *.so
5 |
6 | # Folders
7 | _obj
8 | _test
9 |
10 | # Architecture specific extensions/prefixes
11 | *.[568vq]
12 | [568vq].out
13 |
14 | *.cgo1.go
15 | *.cgo2.c
16 | _cgo_defun.c
17 | _cgo_gotypes.go
18 | _cgo_export.*
19 |
20 | _testmain.go
21 |
22 | *.exe
23 | *.test
24 | *.prof
25 |
26 | kite_benchmark_consumer
27 | kite_benchmark_producer
28 | kiteq
29 |
30 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # 注:我们的源代码和工作目录都是 /go/src/app/
2 | # 二进制可执行文件统一为 /go/src/app/bootstrap
3 | # 配置文件统一在 /go/src/app/conf
4 |
5 |
6 | # build 第一阶段:生成可执行二进制
7 | FROM golang:1.16.4 AS builder
8 | LABEL maintainer="blackbeans.zc@gmail.com"
9 |
10 | # 处理 ssh key
11 | ARG SSH_PRIVATE_KEY=""
12 | RUN mkdir /root/.ssh
13 | RUN echo "${SSH_PRIVATE_KEY}" > /root/.ssh/id_rsa
14 | RUN chmod 600 /root/.ssh/id_rsa
15 | RUN touch /root/.ssh/known_hosts
16 | RUN ssh-keyscan github.com >> /root/.ssh/known_hosts
17 |
18 |
19 | # 设置goproxy
20 | RUN go env -w GO111MODULE=on
21 | RUN go env -w GOPROXY=https://mirrors.aliyun.com/goproxy/,direct
22 |
23 | # 设置 go 编译参数
24 | # 设置为1后我们的编译产物就是完全不依赖操作系统的 static binary了, 否则scratch等小镜像没法运行
25 | RUN go env -w CGO_ENABLED=0
26 |
27 | COPY . /go/src/app/
28 | WORKDIR /go/src/app/
29 |
30 | # go构建可执行文件,使用之前的缓存
31 | RUN --mount=type=cache,target=/go/pkg/mod \
32 | --mount=type=cache,target=/root/.cache/go-build \
33 | go build -v -o /go/src/app/bootstrap /go/src/app/kiteq.go
34 |
35 | RUN rm /root/.ssh/id_rsa
36 |
37 | # build 第二阶段:将二进制包和配置文件放入基础镜像中
38 | FROM scratch
39 |
40 | COPY --from=builder /go/src/app/bootstrap /go/src/app/bootstrap
41 | COPY --from=builder /go/src/app/conf /go/src/app/conf
42 |
43 |
44 | WORKDIR /go/src/app/
45 |
46 | # 最终运行docker的命令
47 | ENTRYPOINT ["./bootstrap"]
48 |
49 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | KiteQ 
2 | =======
3 |
4 | 基于go+protobuff实现的多种持久化方案的mq框架
5 |
6 | #### WIKI(https://kiteq/wiki)
7 |
8 | #### 简介
9 | * 基于zk/etcd维护发送方、订阅方、broker订阅发送关系、支持水平、垂直方面的扩展
10 | * 基于与topic以及第二级messageType订阅消息
11 | * 基于mysql、文件存储方式(file、rocksdb)多重持久层消息存储
12 | * 保证可靠异步投递
13 | * 支持两阶段提交分布式事务
14 | * 自定义group内的Topic级别的流控措施,保护订阅方安全
15 | * kiteserver的流量保护
16 | * 客户端连接上报系统预热时间,根据预热时间逐步放量推送消息
17 | * Client Supported:Java、Go、CPP、PHP
18 |
19 | #### 工程结构
20 | kiteq/
21 | ├── README.md
22 | ├── conf 配置信息
23 | ├── log log4go的配置
24 | ├── build.sh 安装脚本
25 | ├── doc 文档
26 | ├── handler KiteQ所需要的处理Handler
27 | ├── kiteq.go KiteQ对外启动入口
28 | └── server KiteQ的Server端组装需要的组件
29 |
30 | ##### 概念:
31 |
32 | * Binding:订阅关系,描述订阅某种消息类型的数据结构
33 | * Consumer : 消息的消费方
34 | * Producer : 消息的发送方
35 | * Topic: 消息的主题比如 Trade则为消息主题,一般可以定义为某种业务类型
36 | * MessageType: 第二级别的消息类型,比如Trade下存在支付成功的pay-succ-200的消息类型
37 |
38 | #### Zookeeper数据结构
39 |
40 | KiteServer : /kiteq/server/${topic}/ip:port
41 | Producer : /kiteq/pub/${topic}/${groupId}/ip:port
42 | Consumer : /kiteq/sub/${topic}/${groupId}-bind/#$data(bind)
43 |
44 | ##### 订阅方式:
45 |
46 | Direct (直接订阅): 明确的Topic+MessageType订阅消息
47 | Regx(正则式订阅): Topic级别下,对MessageType进行正则匹配方式订阅消息
48 | Fanout(广播式订阅): Topic级别下,订阅所有的MessageType的消息
49 |
50 | ##### 持久订阅和非持久订阅
51 |
52 | 持久订阅 : 分组集群内的机器哪怕是全部离线,消息也不会丢弃,上线后KiteQ推送
53 |
54 | 非持久订阅: 分组集群内的机器全部离线,消息不会保留,直接丢弃
55 |
56 | #### 模块图
57 | 
58 |
59 | - 描述:
60 |
61 | [KiteQ-Common](https://github.com/blackbeans/kiteq-common) 提供了对存储、网络传输协议、zk管理
62 |
63 | [KiteQ-OPS](https://github.com/blackbeans/kiteq-ops) 提供了KiteQ集群图形化监控
64 |
65 | [Turbo](https://github.com/blackbeans/turbo) 提供通用网络层的封装
66 |
67 | #### 架构图
68 | 
69 |
70 | - 流程描述:
71 |
72 | KiteQ:
73 |
74 | 1.1 KiteQ启动会推送本机可以支持的接收和投递的Topic到注册中心(zk/etcd) /kiteq/server节点,同时监听是否订阅本机支持Topic的订阅分组订阅关系变化
75 |
76 | 1.2 在订阅关系发生变化时,注册中心通过watch机制通知KiteQ更新本地的订阅关系,并可以提供对新增订阅分组的消息投递工作
77 |
78 | Producer:
79 |
80 | 2.1 Producer启动后将本应用需要发送的消息的Topic列表推送到注册中心 /kiteq/pub节点(这部分只是用来集中化管理publish的分组)
81 |
82 | 2.2 通过配置的消息Topic列表获取注册中心对应KiteQ节点的IP:Port,并增加watch观测KiteQ节点的变化,做本地KiteQ地址列表的变化
83 |
84 | 3.1 通过获取的IP:Port在本地发起TCP长连接,并保持固定周期的心跳
85 |
86 | 3.2 KiteQ对长连接验证授权完成后,返回连接建立成功,此后即可发布对应的消息了。
87 |
88 | Consumer:
89 |
90 | 2.1 Consumer启动后会将本分组所订阅的订阅关系Binding推送到注册中心/kiteq/sub节点。
91 |
92 | 2.2 通过本地订阅关系订阅的Topic列表获取注册中心对应KiteQ节点的IP:Port,并增加watch观测KiteQ节点的变化,做本地KiteQ地址列表的变化
93 |
94 | 3.1、3.2 与Producer一致,在3.2完成后,KiteQ会在有对应消息来到时,推送给本分组内的随机一台Consumer
95 |
96 | KiteQ-OPS:
97 |
98 | 基于注册中心以及KiteQ暴露的Http接口提供系统的数据,并以图表方式呈现KiteQ当前状态
99 |
100 | - 注:
101 | 发布订阅者没有绝对的角色区分。同一个分组既可以作为发布者也可以作为消息的订阅方。
102 |
103 | ##### 两阶段提交:
104 |
105 | 因为引入了异步投递方案,所以在有些场景下需要本地执行某个事务成功的时候,本条消息才可以被订阅方消费。
106 | 例如:
107 | 用户购买会员支付成功成功需要修改本地用户账户Mysql的余额、并且告知会员系统为用户的会员期限延长。
108 | 这个时候就会碰到、必须在保证mysql操作成功的情况下,会员系统才可以接收到会员延期的消息。
109 |
110 | 对于以上的问题,KiteQ的处理如下:
111 | 1. 发送一个UnCommit的消息到KiteQ ,KiteQ 不会对Uncommite的消息做投递操作
112 | 2. KiteQ定期对UnCommit的消息向Producer发送TxAck的询问
113 | 3. 直到Producer明确告诉Commit或者Rollback该消息
114 | 4. Commit会走正常投递流程、Rollback会对当前消息回滚即删除操作。
115 |
116 | ##### Quickstart
117 |
118 | DockerHub
119 |
120 | ```shell
121 |
122 | #install zookeeper
123 | docker pull zookeeper
124 | #start zookeeper
125 | docker run --name zk001 --network=host -t zookeeper
126 | #start kiteq server
127 | docker run --name kiteq001 --network=host -d hubean/kiteq:v1.0.0 -clusterName=rocksdb_dev -configPath=./conf/cluster.toml -pport=13801 -bind=:13800
128 |
129 | #start your client
130 |
131 | code with kiteq-client-go(https://github.com/blackbeans/kiteq-client-go)
132 | ```
133 |
134 |
135 | * Note :
136 |
137 | KiteQ's Config is conf/cluster.toml
138 |
139 | You can use command 'sh kiteq -clusterName=${cluster_name} -configPath=${toml_path}'
140 |
141 | Recommend Using supervisor to manager kiteq .
142 |
143 | * Registry Of KiteQ :
144 |
145 | zk: zk://localhost:2181,localhost:2181 (stable)
146 |
147 | etcd: etcd://http://localhost:2379,http://localhost:2379 (unstable)
148 |
149 | #### KiteQ Start Mode
150 |
151 | - How to use Rocksdb for kiteq's message storage
152 |
153 | ```shell
154 |
155 | go run kiteq.go -clusterName=rocksdb_dev -configPath=./conf/cluster.toml -pport=13801 -bind=:13800
156 |
157 | ```
158 |
159 | - How to use memory for kiteq's message storage
160 |
161 | ```shell
162 |
163 | go run kiteq.go -clusterName=memory_dev -configPath=./conf/cluster.toml -pport=13801 -bind=:13800
164 |
165 | ```
166 |
167 | - How to use RDS/Mysql for kiteq's message storage
168 |
169 | ```shell
170 |
171 | install mysql & initial message table by running store/mysql/table.sh
172 |
173 | go run kiteq.go -clusterName=mysql_dev -configPath=./conf/cluster.toml -pport=13801 -bind=:13800
174 |
175 | ```
176 |
177 | ##### Client Supported:
178 |
179 | [Go-Client](https://github.com/blackbeans/kiteq-client-go)
180 |
181 | [Java-Client](https://github.com/blackbeans/kiteq-client-java)
182 |
183 | [PHP-Client](https://github.com/blackbeans/kiteq-client-php) (Deprecated)
184 |
185 | [CPP-Client](https://github.com/quguangjie/kiteq-client-cpp) (Deprecated)
186 |
187 | #### Contact us
188 |
189 | QQ Group: 139685004(Kiteq实战群)
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 |
199 |
200 |
201 |
202 |
--------------------------------------------------------------------------------
/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #protoc --go_out=. ./protocol/*.proto
4 |
5 | ##############
6 | echo "------------ compoments installing is finished!-------------"
7 |
8 | PROJ=`pwd | awk -F'/' '{print $(NF)}'`
9 | #VERSION=$1
10 | go build -o ./$PROJ $PROJ.go
11 |
12 |
13 | #CGO_ENABLED=0 GOOS=linux GOARCH=386 go build -a -o ./$PROJ $PROJ.go
14 | #GOOS=darwin GOARCH=386 go build -a -o ./$PROJ $PROJ.go
15 |
16 | #tar -zcvf kiteq-1.0.2-linux-386.tar.gz $PROJ log/log.xml conf/*.toml
17 | #tar -zcvf kiteq-1.0.2-darwin-386.tar.gz $PROJ log/log.xml conf/*.toml
18 | tar -zcvf kiteq.tar.gz $PROJ conf/*.toml
19 |
--------------------------------------------------------------------------------
/conf/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blackbeans/kiteq/8cc04e2c1fd54908a3a6fc18a02751f4af4923a4/conf/.DS_Store
--------------------------------------------------------------------------------
/conf/cluster.toml:
--------------------------------------------------------------------------------
1 | #registry的管理
2 | [registry]
3 | [registry.dev]
4 | # hosts="zk://localhost:2181"
5 | hosts="file://./conf/registry_demo.yaml"
6 | [registry.online]
7 | hosts="zk://vm-bibi-zk-mq001.vm:2181,vm-bibi-zk-mq002.vm:2181,vm-bibi-zk-mq003.vm:2181"
8 |
9 | #kiteq的服务器
10 | [clusters]
11 | [clusters.rocksdb_dev]
12 | env="dev" #env=dev/online
13 | topics=["relation","message","user-profile"]
14 | dlqExecHour=2
15 | deliveryFirst=false
16 | logxml="./conf/log.xml"
17 | db="rocksdb://./db/"
18 | deliverySeconds=5
19 | maxDeliverWorkers=10000
20 | recoverSeconds=10
21 | recievePermitsPerSecond=20000
22 | [clusters.rocksdb_dev_2]
23 | env="dev" #env=dev/online
24 | topics=["relation","message","user-profile"]
25 | dlqExecHour=2
26 | deliveryFirst=false
27 | logxml="./conf/log.xml"
28 | db="rocksdb://./db2/"
29 | deliverySeconds=5
30 | maxDeliverWorkers=8000
31 | recoverSeconds=10
32 | recievePermitsPerSecond=20000
33 | [clusters.file_dev]
34 | env="dev" #env=dev/online
35 | topics=["relation","message","user-profile"]
36 | dlqExecHour=2
37 | deliveryFirst=false
38 | logxml="./conf/log.yaml"
39 | db="file://.?cap=10000000&checkSeconds=10"
40 | deliverySeconds=5
41 | maxDeliverWorkers=8000
42 | recoverSeconds=1
43 | recievePermitsPerSecond=20000
44 |
45 | [clusters.mysql_dev]
46 | env="dev" #env=dev/online
47 | topics=["relation","message","user-profile"]
48 | dlqExecHour=2
49 | deliveryFirst=false
50 | logxml="./conf/log.xml"
51 | db="mysql://localhost:3306?db=kite&username=root&batchUpdateSize=2000&batchDelSize=10000&flushSeconds=1&maxConn=20"
52 | deliverySeconds=5
53 | maxDeliverWorkers=8000
54 | recoverSeconds=10
55 | recievePermitsPerSecond=20000
56 |
57 | [clusters.memory_dev]
58 | env="dev" #env=dev/online
59 | topics=["relation","message","user-profile"]
60 | dlqExecHour=2
61 | deliveryFirst=false
62 | logxml="./conf/log.xml"
63 | db="memory://.?initcap=10000000&maxcap=2000000"
64 | deliverySeconds=5
65 | maxDeliverWorkers=8000
66 | recoverSeconds=10
67 | recievePermitsPerSecond=20000
68 |
69 |
--------------------------------------------------------------------------------
/conf/log.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | kiteq_server
4 | file
5 | INFO
6 | 100M
7 | 10M
8 |
9 |
10 | kiteq_store
11 | file
12 | INFO
13 | 100M
14 | 10M
15 |
16 |
17 | kiteq_handler
18 | file
19 | INFO
20 | 100M
21 | 10M
22 |
23 |
24 | kiteq_deliver
25 | file
26 | INFO
27 | 100M
28 | 10M
29 |
30 |
31 |
--------------------------------------------------------------------------------
/conf/registry_demo.yaml:
--------------------------------------------------------------------------------
1 | brokers:
2 | - address: "localhost:13800" #服务IP地址
3 | topics: [ "relation","message","user-profile"]
4 | env: "dev"
5 | - address: "localhost:13900" #服务IP地址
6 | topics: [ "relation","message","user-profile" ]
7 | env: "pre"
8 |
9 | #绑定关系
10 | bindings:
11 | #一组绑定关系
12 | - groupIds: ["s-user-profile"]
13 | topic: "user-profile"
14 | messageType: "profile-update"
15 | bindType: "direct"
16 | watermark: 1000
17 | persistent: true
18 |
--------------------------------------------------------------------------------
/doc/communicate_protocol.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blackbeans/kiteq/8cc04e2c1fd54908a3a6fc18a02751f4af4923a4/doc/communicate_protocol.png
--------------------------------------------------------------------------------
/doc/kiteq.001.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blackbeans/kiteq/8cc04e2c1fd54908a3a6fc18a02751f4af4923a4/doc/kiteq.001.png
--------------------------------------------------------------------------------
/doc/kiteq_arch.graffle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blackbeans/kiteq/8cc04e2c1fd54908a3a6fc18a02751f4af4923a4/doc/kiteq_arch.graffle
--------------------------------------------------------------------------------
/doc/kiteq_arch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blackbeans/kiteq/8cc04e2c1fd54908a3a6fc18a02751f4af4923a4/doc/kiteq_arch.png
--------------------------------------------------------------------------------
/doc/kiteq_dep.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blackbeans/kiteq/8cc04e2c1fd54908a3a6fc18a02751f4af4923a4/doc/kiteq_dep.png
--------------------------------------------------------------------------------
/doc/logo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blackbeans/kiteq/8cc04e2c1fd54908a3a6fc18a02751f4af4923a4/doc/logo.jpg
--------------------------------------------------------------------------------
/exchange/bind_exchanger.go:
--------------------------------------------------------------------------------
1 | package exchange
2 |
3 | import (
4 | "context"
5 | "github.com/blackbeans/kiteq-common/registry"
6 | "github.com/blackbeans/logx"
7 | "github.com/blackbeans/turbo"
8 | "math"
9 | "sort"
10 | "sync"
11 | "time"
12 | )
13 |
14 | const (
15 | DEFAULT_WARTER_MARK = int32(6000)
16 | )
17 |
18 | var log = logx.GetLogger("kiteq_server")
19 |
20 | //用于管理订阅关系,对接zookeeper的订阅关系变更
21 | type BindExchanger struct {
22 | exchanger map[string] /*topic*/ map[string] /*groupId*/ []*registry.Binding //保存的订阅关系
23 | limiters map[string] /*topic*/ map[string] /*groupId*/ *turbo.BurstyLimiter //group->topic->limiter
24 | topics []string //当前服务器可投递的topic类型
25 | lock sync.RWMutex
26 | registryCenter *registry.RegistryCenter
27 | kiteqserver string
28 | defaultLimiter *turbo.BurstyLimiter
29 | }
30 |
31 | func NewBindExchanger(parent context.Context, registryUri string,
32 | kiteQServer string) *BindExchanger {
33 | ex := &BindExchanger{
34 | exchanger: make(map[string]map[string][]*registry.Binding, 100),
35 | limiters: make(map[string]map[string]*turbo.BurstyLimiter, 100),
36 | topics: make([]string, 0, 50)}
37 | center := registry.NewRegistryCenter(parent, registryUri)
38 | center.RegisterWatcher(ex)
39 |
40 | //center.RegisterWatcher(PATH_SERVER, ex)
41 | //center.RegisterWatcher(PATH_SUB, ex)
42 | ex.registryCenter = center
43 | ex.kiteqserver = kiteQServer
44 | limiter, err := turbo.NewBurstyLimiter(int(DEFAULT_WARTER_MARK/2), int(DEFAULT_WARTER_MARK))
45 | if nil != err {
46 | panic(err)
47 | }
48 | ex.defaultLimiter = limiter
49 | return ex
50 | }
51 |
52 | //topics limiter
53 | func (self *BindExchanger) Topic2Limiters() map[string]map[string][]int {
54 | wrapper := make(map[string]map[string][]int, 2)
55 | self.lock.RLock()
56 | defer self.lock.RUnlock()
57 | for t, m := range self.limiters {
58 | wrapper[t] = make(map[string][]int, 2)
59 | for g, l := range m {
60 | val := make([]int, 0, 2)
61 | acquried, total := l.LimiterInfo()
62 | val = append(val, acquried)
63 | val = append(val, total)
64 | wrapper[t][g] = val
65 | }
66 | }
67 | return wrapper
68 | }
69 |
70 | //当前topic到Groups的对应关系
71 | func (self *BindExchanger) Topic2Groups() map[string][]string {
72 | binds := make(map[string][]string, 10)
73 | for topic, groups := range self.exchanger {
74 | v, ok := binds[topic]
75 | if !ok {
76 | v = make([]string, 0, len(groups))
77 | }
78 |
79 | for g, _ := range groups {
80 | v = append(v, g)
81 | }
82 | binds[topic] = v
83 | }
84 | return binds
85 | }
86 |
87 | //推送Qserver到配置中心
88 | func (self *BindExchanger) PushQServer(hostport string, topics []string) bool {
89 | err := self.registryCenter.PublishQServer(hostport, topics)
90 | if nil != err {
91 | log.Errorf("BindExchanger|PushQServer|FAIL|%s|%s|%s", err, hostport, topics)
92 | return false
93 | }
94 |
95 | //删除掉不需要的topics
96 | delTopics := make([]string, 0, 2)
97 | for _, t := range self.topics {
98 | exist := false
99 | for _, v := range topics {
100 | if v == t {
101 | exist = true
102 | break
103 | }
104 | }
105 | //已经删除的topics
106 | if !exist {
107 | delTopics = append(delTopics, t)
108 | }
109 | }
110 | //存在需要删除的topics
111 | if len(delTopics) > 0 {
112 | self.registryCenter.UnPublishQServer(hostport, delTopics)
113 | func() {
114 | self.lock.Lock()
115 | defer self.lock.Unlock()
116 | for _, t := range delTopics {
117 | //清除掉对应的topics
118 | delete(self.exchanger, t)
119 | }
120 | }()
121 | log.Infof("BindExchanger|UnpushlishQServer|SUCC|%s|%s", hostport, delTopics)
122 | }
123 |
124 | //处理新增topic
125 | addedTopics := make([]string, 0, 2)
126 | for _, t := range topics {
127 | exist := false
128 | for _, v := range self.topics {
129 | if v == t {
130 | exist = true
131 | break
132 | }
133 | }
134 | //不存在则是新增的
135 | if !exist {
136 | addedTopics = append(addedTopics, t)
137 | }
138 | }
139 | sort.Strings(topics)
140 | func() {
141 | self.lock.Lock()
142 | defer self.lock.Unlock()
143 | self.topics = topics
144 | }()
145 | //订阅订阅关系变更
146 | succ := self.subscribeBinds(addedTopics)
147 | log.Infof("BindExchanger|PushQServer|SUCC|%s|%s", hostport, topics)
148 | return succ
149 | }
150 |
151 | //监听topics的对应的订阅关系的变更
152 | func (self *BindExchanger) subscribeBinds(topics []string) bool {
153 | self.lock.Lock()
154 | defer self.lock.Unlock()
155 | for _, topic := range topics {
156 | binds, err := self.registryCenter.GetBindAndWatch(topic)
157 | if nil != err {
158 | log.Errorf("BindExchanger|SubscribeBinds|FAIL|%s|%s", err, topic)
159 | return false
160 | } else {
161 | for groupId, bs := range binds {
162 | self.OnBindChanged(topic, groupId, bs)
163 | log.Infof("BindExchanger|SubscribeBinds|SUCC|%s|%s", topic, binds)
164 | }
165 | }
166 | }
167 |
168 | return true
169 | }
170 |
171 | //根据topic和messageType 类型获取订阅关系
172 | func (self *BindExchanger) FindBinds(topic string, messageType string, filter func(b *registry.Binding) bool) ([]*registry.Binding, map[string]*turbo.BurstyLimiter) {
173 | self.lock.RLock()
174 | defer self.lock.RUnlock()
175 | groups, ok := self.exchanger[topic]
176 | if !ok {
177 | return []*registry.Binding{}, nil
178 | }
179 |
180 | topicLimiters, ok := self.limiters[topic]
181 | limiters := make(map[string]*turbo.BurstyLimiter, 10)
182 | //符合规则的binds
183 | validBinds := make([]*registry.Binding, 0, 10)
184 | for _, binds := range groups {
185 | for _, b := range binds {
186 | //匹配并且不被过滤
187 | if b.Matches(topic, messageType) && !filter(b) {
188 | validBinds = append(validBinds, b)
189 | if ok {
190 | limiter, gok := topicLimiters[b.GroupId]
191 | if gok {
192 | limiters[b.GroupId] = limiter
193 | } else {
194 | //this is a bug
195 | }
196 | } else {
197 | //this is a bug
198 | }
199 | }
200 | }
201 | }
202 |
203 | return validBinds, limiters
204 | }
205 |
206 | //订阅关系改变
207 | func (self *BindExchanger) OnBindChanged(topic, groupId string, newbinds []*registry.Binding) {
208 |
209 | if len(groupId) <= 0 {
210 | delete(self.exchanger, topic)
211 | return
212 | }
213 |
214 | //不是当前服务可以处理的topic则直接丢地啊哦
215 | if sort.SearchStrings(self.topics, topic) == len(self.topics) {
216 | log.Warnf("BindExchanger|onBindChanged|UnAccept Bindings|%s|%s|%s", topic, self.topics, newbinds)
217 | return
218 | }
219 |
220 | v, ok := self.exchanger[topic]
221 |
222 | if !ok {
223 | v = make(map[string][]*registry.Binding, 10)
224 | self.exchanger[topic] = v
225 | }
226 |
227 | limiter, lok := self.limiters[topic]
228 | if !lok {
229 | limiter = make(map[string]*turbo.BurstyLimiter, 10)
230 | self.limiters[topic] = limiter
231 | }
232 |
233 | if len(newbinds) > 0 {
234 | v[groupId] = newbinds
235 |
236 | //create limiter for topic group
237 | waterMark := newbinds[0].Watermark
238 | if waterMark <= 0 {
239 | waterMark = DEFAULT_WARTER_MARK
240 | }
241 |
242 | waterMark = int32(math.Min(float64(waterMark), float64(DEFAULT_WARTER_MARK)))
243 |
244 | li, liok := limiter[groupId]
245 | if !liok || ((int32)(li.PermitsPerSecond()) != waterMark) {
246 | lim, err := turbo.NewBurstyLimiter(int(waterMark/2), int(waterMark))
247 | if nil != err {
248 | log.Errorf("BindExchanger|onBindChanged|NewBurstyLimiter|FAIL|%v|%v|%v|%v", err, topic, groupId, waterMark)
249 | lim = self.defaultLimiter
250 | }
251 | limiter[groupId] = lim
252 | }
253 | } else {
254 | delete(v, groupId)
255 | delete(limiter, groupId)
256 |
257 | }
258 | }
259 |
260 | //当QServer变更
261 | func (self *BindExchanger) OnQServerChanged(topic string, hosts []string) {
262 |
263 | }
264 |
265 | //当zk断开链接时
266 | func (self *BindExchanger) OnSessionExpired() {
267 | err := self.registryCenter.PublishQServer(self.kiteqserver, self.topics)
268 | if nil != err {
269 | log.Errorf("BindExchanger|OnSessionExpired|PushQServer|FAIL|%s|%s|%s", err, self.kiteqserver, self.topics)
270 | return
271 | }
272 |
273 | //订阅订阅关系变更
274 | succ := self.subscribeBinds(self.topics)
275 | log.Infof("BindExchanger|OnSessionExpired|SUCC|subscribeBinds|%v|%s|%s", succ, self.kiteqserver, self.topics)
276 |
277 | }
278 |
279 | //关闭掉exchanger
280 | func (self *BindExchanger) Shutdown() {
281 | //删除掉当前的QServer
282 | self.registryCenter.UnPublishQServer(self.kiteqserver, self.topics)
283 | time.Sleep(10 * time.Second)
284 | self.registryCenter.Close()
285 | log.Infof("BindExchanger|Shutdown...")
286 | }
287 |
--------------------------------------------------------------------------------
/exchange/bind_exchanger_test.go:
--------------------------------------------------------------------------------
1 | package exchange
2 |
3 | import (
4 | "context"
5 | "github.com/blackbeans/go-zookeeper/zk"
6 | "github.com/blackbeans/kiteq-common/registry"
7 |
8 | "log"
9 | "testing"
10 | "time"
11 | )
12 |
13 | func filter(b *registry.Binding) bool {
14 |
15 | return false
16 | }
17 |
18 | func TestSubscribeBindings(t *testing.T) {
19 | conn, _, _ := zk.Connect([]string{"localhost:2181"}, 10*time.Second)
20 | cleanUp(t, conn, "/kiteq")
21 |
22 | exchanger := NewBindExchanger(context.TODO(), "zk://localhost:2181", "127.0.0.1:13800")
23 | //推送一下当前服务器的可用的topics列表
24 | topics := []string{"trade", "feed"}
25 | succ := exchanger.PushQServer("localhost:13800", topics)
26 | if !succ {
27 | t.Fail()
28 | t.Logf("PushQServer|FAIL|%s", succ)
29 | return
30 | }
31 |
32 | t.Log("TestSubscribeBindings|PushQServer|SUCC|.....")
33 |
34 | bindings := []*registry.Binding{registry.Bind_Direct("s-trade-001", "trade", "trade-succ-200", -1, true),
35 | registry.Bind_Regx("s-trade-001", "feed", "feed-geo-*", -1, true)}
36 |
37 | err := exchanger.registryCenter.PublishBindings("s-trade-001", bindings)
38 | if nil != err {
39 | t.Logf("TestSubscribeBindings|FAIL|%s|%s", err, "s-trade-001")
40 | return
41 | }
42 |
43 | time.Sleep(10 * time.Second)
44 |
45 | tradeBind, _ := exchanger.FindBinds("trade", "trade-succ-200", filter)
46 | t.Logf("trade trade-succ-200|%t", tradeBind)
47 | if len(tradeBind) != 1 {
48 | t.Fail()
49 | return
50 | }
51 |
52 | if !tradeBind[0].Matches("trade", "trade-succ-200") {
53 | t.Fail()
54 | return
55 | }
56 |
57 | feedBindU, _ := exchanger.FindBinds("feed", "feed-geo-update", filter)
58 |
59 | if len(feedBindU) != 1 {
60 | t.Fail()
61 | return
62 | }
63 |
64 | t.Logf("feed feed-geo-update|%t", feedBindU)
65 |
66 | if !feedBindU[0].Matches("feed", "feed-geo-update") {
67 | t.Fail()
68 | return
69 | }
70 |
71 | feedBindD, _ := exchanger.FindBinds("feed", "feed-geo-delete", filter)
72 | if len(feedBindD) != 1 {
73 | t.Fail()
74 | return
75 | }
76 |
77 | t.Logf("feed feed-geo-delete|%t", feedBindD)
78 |
79 | if !feedBindD[0].Matches("feed", "feed-geo-delete") {
80 | t.Fail()
81 | return
82 | }
83 |
84 | log.Println("start delete rade/s-trade-001-bind ....")
85 |
86 | time.Sleep(5 * time.Second)
87 | //"s-trade-001", "trade"
88 | //删除掉topic+groupId
89 | path := registry.KITEQ_SUB + "/trade/s-trade-001-bind"
90 | conn.Delete(path, -1)
91 | nodes, _, _ := conn.Children(registry.KITEQ_SUB + "/trade")
92 | t.Logf("trade trade-succ-200|delete|s-trade-001-bind|%t", nodes)
93 | time.Sleep(5 * time.Second)
94 |
95 | tradeBind, _ = exchanger.FindBinds("trade", "trade-succ-200", filter)
96 | t.Logf("trade trade-succ-200|no binding |%t", tradeBind)
97 | if len(tradeBind) != 0 {
98 | t.Fail()
99 | return
100 | }
101 | cleanUp(t, conn, "/kiteq")
102 | conn.Close()
103 | exchanger.Shutdown()
104 | }
105 |
106 | func cleanUp(t *testing.T, conn *zk.Conn, path string) {
107 |
108 | children, _, _ := conn.Children(path)
109 |
110 | //循环遍历当前孩子节点并删除
111 | for _, v := range children {
112 | tchildren, _, _ := conn.Children(path + "/" + v)
113 | if len(tchildren) <= 0 {
114 | //开始删除
115 | conn.Delete(path+"/"+v, -1)
116 | time.Sleep(2 * time.Second)
117 | t.Logf("cleanUp|%s", path+"/"+v)
118 | } else {
119 | cleanUp(t, conn, path+"/"+v)
120 | }
121 | }
122 |
123 | //删除当前节点
124 | conn.Delete(path, -1)
125 | }
126 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module kiteq
2 |
3 | go 1.16
4 |
5 | require (
6 | github.com/blackbeans/go-uuid v0.0.0-20160524092444-daf034455212
7 | github.com/blackbeans/go-zookeeper v0.0.0-20160315041820-e9ca0f2da2a7
8 | github.com/blackbeans/kiteq-common v0.0.0-20230518154807-df4546f9f853
9 | github.com/blackbeans/logx v0.0.0-20230518151533-7059fbb3d603
10 | github.com/blackbeans/turbo v0.0.0-20230518151841-580b848285bf
11 | github.com/cockroachdb/pebble v0.0.0-20210526183633-dd2a545f5d75
12 | github.com/go-sql-driver/mysql v1.7.1-0.20230308081629-af380e92cd24
13 | github.com/gogo/protobuf v1.3.2 // indirect
14 | github.com/golang/protobuf v1.5.3
15 | github.com/naoina/go-stringutil v0.1.0 // indirect
16 | github.com/naoina/toml v0.1.0
17 | github.com/valyala/bytebufferpool v1.0.0 // indirect
18 | )
19 |
--------------------------------------------------------------------------------
/handler/accept_event.go:
--------------------------------------------------------------------------------
1 | package handler
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "github.com/blackbeans/kiteq-common/protocol"
7 | "github.com/blackbeans/kiteq-common/stat"
8 | "github.com/blackbeans/turbo"
9 | "kiteq/store"
10 | "os"
11 | "time"
12 | )
13 |
14 | //--------------------如下为具体的处理Handler
15 | type AcceptHandler struct {
16 | turbo.BaseForwardHandler
17 | topics []string
18 | kiteserver string
19 | flowstat *stat.FlowStat
20 | limiter *turbo.BurstyLimiter
21 | }
22 |
23 | func NewAcceptHandler(name string, limiter *turbo.BurstyLimiter, flowstat *stat.FlowStat) *AcceptHandler {
24 | ahandler := &AcceptHandler{}
25 | ahandler.BaseForwardHandler = turbo.NewBaseForwardHandler(name, ahandler)
26 | hn, _ := os.Hostname()
27 | ahandler.kiteserver = hn
28 | ahandler.flowstat = flowstat
29 | ahandler.limiter = limiter
30 | return ahandler
31 | }
32 |
33 | func (self *AcceptHandler) TypeAssert(event turbo.IEvent) bool {
34 | _, ok := self.cast(event)
35 | return ok
36 | }
37 |
38 | func (self *AcceptHandler) cast(event turbo.IEvent) (val *acceptEvent, ok bool) {
39 | val, ok = event.(*acceptEvent)
40 | return
41 | }
42 |
43 | var INVALID_MSG_TYPE_ERROR = errors.New("INVALID MSG TYPE !")
44 |
45 | func (self *AcceptHandler) Process(ctx *turbo.DefaultPipelineContext, event turbo.IEvent) error {
46 | // log.Debug("AcceptHandler|Process|%s|%t", self.GetName(), event)
47 |
48 | ae, ok := self.cast(event)
49 | if !ok {
50 | return turbo.ERROR_INVALID_EVENT_TYPE
51 | }
52 |
53 | //这里处理一下ae,做一下校验
54 | var msg *store.MessageEntity
55 | switch ae.msgType {
56 | case protocol.CMD_DELIVER_ACK:
57 | //收到投递结果直接attach响应
58 | // log.Debugf( "AcceptHandler|DELIVER_ACK|%s|%t", ae.opaque, ae.msg)
59 | ae.client.Attach(ae.opaque, ae.msg)
60 | return nil
61 | case protocol.CMD_HEARTBEAT:
62 | hb := ae.msg.(*protocol.HeartBeat)
63 | event = turbo.NewHeartbeatEvent(ae.client, ae.opaque, hb.GetVersion())
64 | ctx.SendForward(event)
65 | return nil
66 |
67 | case protocol.CMD_BYTES_MESSAGE:
68 | msg = store.NewMessageEntity(protocol.NewQMessage(ae.msg.(*protocol.BytesMessage)))
69 | case protocol.CMD_STRING_MESSAGE:
70 | msg = store.NewMessageEntity(protocol.NewQMessage(ae.msg.(*protocol.StringMessage)))
71 | default:
72 | //这只是一个bug不支持的数据类型能给你
73 | log.Warnf("AcceptHandler|Process|%s|%t", INVALID_MSG_TYPE_ERROR, ae.msg)
74 | }
75 |
76 | //如果申请流量失败则放弃
77 | if nil != msg && !self.limiter.Acquire() {
78 | remoteEvent := turbo.NewRemotingEvent(storeAck(ae.opaque,
79 | msg.Header.GetMessageId(), false,
80 | fmt.Sprintf("Store Result KiteQ OverFlow [%s]", ae.client.LocalAddr())),
81 | []string{ae.client.RemoteAddr()})
82 | ctx.SendForward(remoteEvent)
83 | return nil
84 | }
85 |
86 | if nil != msg {
87 | msg.PublishTime = time.Now().Unix()
88 | msg.KiteServer = self.kiteserver
89 | deliver := newPersistentEvent(msg, ae.client, ae.opaque)
90 |
91 | //接收消息的统计
92 | self.flowstat.IncrTopicReceiveFlow(msg.Topic, 1)
93 | self.flowstat.RecieveFlow.Incr(1)
94 | ctx.SendForward(deliver)
95 | return nil
96 | }
97 | return INVALID_MSG_TYPE_ERROR
98 | }
99 |
--------------------------------------------------------------------------------
/handler/access_event.go:
--------------------------------------------------------------------------------
1 | package handler
2 |
3 | import (
4 | "github.com/blackbeans/kiteq-common/protocol"
5 | "github.com/blackbeans/turbo"
6 | )
7 |
8 | //----------------鉴权handler
9 | type AccessHandler struct {
10 | turbo.BaseForwardHandler
11 | clientManager *turbo.ClientManager
12 | }
13 |
14 | //------创建鉴权handler
15 | func NewAccessHandler(name string, clientManager *turbo.ClientManager) *AccessHandler {
16 | ahandler := &AccessHandler{}
17 | ahandler.BaseForwardHandler = turbo.NewBaseForwardHandler(name, ahandler)
18 | ahandler.clientManager = clientManager
19 | return ahandler
20 | }
21 |
22 | func (self *AccessHandler) TypeAssert(event turbo.IEvent) bool {
23 | _, ok := self.cast(event)
24 | return ok
25 | }
26 |
27 | func (self *AccessHandler) cast(event turbo.IEvent) (val *accessEvent, ok bool) {
28 | val, ok = event.(*accessEvent)
29 | return
30 | }
31 |
32 | func (self *AccessHandler) Process(ctx *turbo.DefaultPipelineContext, event turbo.IEvent) error {
33 |
34 | // log.Debug("accessEvent|Process|%s|%t", self.GetName(), event)
35 |
36 | aevent, ok := self.cast(event)
37 | if !ok {
38 | return turbo.ERROR_INVALID_EVENT_TYPE
39 | }
40 |
41 | // 权限验证通过 保存到clientmanager
42 | auth := turbo.NewGroupAuth(aevent.connMeta.GetGroupId(), aevent.connMeta.GetSecretKey())
43 | //填写warmingup的时间
44 | auth.WarmingupSec = int(aevent.connMeta.GetWarmingupSec())
45 | self.clientManager.Auth(auth, aevent.remoteClient)
46 | cmd := protocol.MarshalConnAuthAck(true, "Auth Succ")
47 | //响应包
48 | packet := turbo.NewRespPacket(aevent.opaque, protocol.CMD_CONN_AUTH, cmd)
49 |
50 | //向当前连接写入当前包
51 | remoteEvent := turbo.NewRemotingEvent(packet, []string{aevent.remoteClient.RemoteAddr()})
52 |
53 | //向后走网络传输
54 | ctx.SendForward(remoteEvent)
55 | return nil
56 |
57 | }
58 |
--------------------------------------------------------------------------------
/handler/check_msg.go:
--------------------------------------------------------------------------------
1 | package handler
2 |
3 | import (
4 | "regexp"
5 | "sort"
6 | "time"
7 |
8 | "sync"
9 |
10 | "github.com/blackbeans/kiteq-common/protocol"
11 | "github.com/blackbeans/turbo"
12 | )
13 |
14 | const (
15 | MAX_EXPIRED_TIME = 7 * 24 * 3600 * time.Second
16 | MAX_DELIVER_LIMIT = 100
17 | )
18 |
19 | var rc *regexp.Regexp
20 |
21 | func init() {
22 | rc = regexp.MustCompile("[0-9a-fA-F]{32}")
23 | }
24 |
25 | //----------------持久化的handler
26 | type CheckMessageHandler struct {
27 | turbo.BaseForwardHandler
28 | topicNotify chan []string
29 | topics []string
30 | sync.RWMutex
31 | }
32 |
33 | //------创建persitehandler
34 | func NewCheckMessageHandler(name string, topicNotify chan []string) *CheckMessageHandler {
35 | phandler := &CheckMessageHandler{}
36 | phandler.BaseForwardHandler = turbo.NewBaseForwardHandler(name, phandler)
37 | phandler.topicNotify = topicNotify
38 | topics := <-topicNotify
39 | go func() {
40 | for {
41 | tmp := <-phandler.topicNotify
42 | func() {
43 | phandler.Lock()
44 | defer phandler.Unlock()
45 | phandler.topics = tmp
46 | }()
47 | }
48 | }()
49 | phandler.topics = topics
50 | return phandler
51 | }
52 |
53 | func (self *CheckMessageHandler) TypeAssert(event turbo.IEvent) bool {
54 | _, ok := self.cast(event)
55 | return ok
56 | }
57 |
58 | func (self *CheckMessageHandler) cast(event turbo.IEvent) (val *persistentEvent, ok bool) {
59 | val, ok = event.(*persistentEvent)
60 | return
61 | }
62 |
63 | func (self *CheckMessageHandler) Process(ctx *turbo.DefaultPipelineContext, event turbo.IEvent) error {
64 |
65 | pevent, ok := self.cast(event)
66 | if !ok {
67 | return turbo.ERROR_INVALID_EVENT_TYPE
68 | }
69 |
70 | if nil != pevent.entity {
71 |
72 | //先判断是否是可以处理的topic的消息
73 | self.RLock()
74 |
75 | validTopic := sort.SearchStrings(self.topics, pevent.entity.Header.GetTopic()) == len(self.topics)
76 |
77 | self.RUnlock()
78 |
79 | if validTopic {
80 | //不存在该消息的处理则直接返回存储失败
81 | remoteEvent := turbo.NewRemotingEvent(
82 | storeAck(pevent.opaque,
83 | pevent.entity.Header.GetMessageId(), false, "UnSupport Topic Message!"),
84 | []string{pevent.remoteClient.RemoteAddr()})
85 | ctx.SendForward(remoteEvent)
86 | } else if !isUUID(pevent.entity.Header.GetMessageId()) {
87 | //不存在该消息的处理则直接返回存储失败
88 | remoteEvent := turbo.NewRemotingEvent(storeAck(pevent.opaque,
89 | pevent.entity.Header.GetMessageId(), false, "Invalid MessageId For UUID!"),
90 | []string{pevent.remoteClient.RemoteAddr()})
91 | ctx.SendForward(remoteEvent)
92 | } else {
93 | //对头部的数据进行校验设置
94 | h := pevent.entity.Header
95 |
96 | //check createTime
97 | if h.GetCreateTime() <= 0 {
98 | h.CreateTime = protocol.MarshalInt64(time.Now().Unix())
99 | }
100 |
101 | if h.GetDeliverLimit() <= 0 || h.GetDeliverLimit() > MAX_DELIVER_LIMIT {
102 | h.DeliverLimit = protocol.MarshalInt32(MAX_DELIVER_LIMIT)
103 | //config entity value
104 | pevent.entity.DeliverLimit = MAX_DELIVER_LIMIT
105 | }
106 | if h.GetExpiredTime() <= 0 || h.GetExpiredTime() > time.Now().Add(MAX_EXPIRED_TIME).Unix() {
107 | et := time.Now().Add(MAX_EXPIRED_TIME).Unix()
108 | h.ExpiredTime = protocol.MarshalInt64(et)
109 | //config entity value
110 | pevent.entity.ExpiredTime = et
111 | } else if h.GetExpiredTime() > 0 && h.GetExpiredTime() <= time.Now().Unix() {
112 | //不存在该消息的处理则直接返回存储失败
113 | remoteEvent := turbo.NewRemotingEvent(storeAck(pevent.opaque,
114 | pevent.entity.Header.GetMessageId(), false, "Expired Message!"),
115 | []string{pevent.remoteClient.RemoteAddr()})
116 | ctx.SendForward(remoteEvent)
117 | return nil
118 | }
119 | //向后发送
120 | ctx.SendForward(pevent)
121 | }
122 | }
123 |
124 | return nil
125 | }
126 |
127 | func isUUID(id string) bool {
128 |
129 | if len(id) > 32 || !rc.MatchString(id) {
130 | return false
131 | }
132 | return true
133 | }
134 |
135 | func storeAck(opaque uint32, messageid string, succ bool, feedback string) *turbo.Packet {
136 |
137 | storeAck := protocol.MarshalMessageStoreAck(messageid, succ, feedback)
138 | //响应包
139 | return turbo.NewRespPacket(opaque, protocol.CMD_MESSAGE_STORE_ACK, storeAck)
140 | }
141 |
--------------------------------------------------------------------------------
/handler/deliver_pre.go:
--------------------------------------------------------------------------------
1 | package handler
2 |
3 | import (
4 | "github.com/blackbeans/kiteq-common/protocol"
5 | "github.com/blackbeans/kiteq-common/registry"
6 | "github.com/blackbeans/kiteq-common/stat"
7 | "github.com/blackbeans/turbo"
8 | "kiteq/exchange"
9 | "kiteq/store"
10 | "time"
11 | )
12 |
13 | //----------------持久化的handler
14 | type DeliverPreHandler struct {
15 | turbo.BaseForwardHandler
16 | kitestore store.IKiteStore
17 | exchanger *exchange.BindExchanger
18 | maxDeliverNum chan interface{}
19 | deliverTimeout time.Duration
20 | flowstat *stat.FlowStat
21 | deliveryRegistry *DeliveryRegistry
22 | }
23 |
24 | //------创建deliverpre
25 | func NewDeliverPreHandler(name string, kitestore store.IKiteStore,
26 | exchanger *exchange.BindExchanger, flowstat *stat.FlowStat,
27 | maxDeliverWorker int, deliveryRegistry *DeliveryRegistry) *DeliverPreHandler {
28 | phandler := &DeliverPreHandler{}
29 | phandler.BaseForwardHandler = turbo.NewBaseForwardHandler(name, phandler)
30 | phandler.kitestore = kitestore
31 | phandler.exchanger = exchanger
32 | phandler.maxDeliverNum = make(chan interface{}, maxDeliverWorker)
33 | phandler.flowstat = flowstat
34 | phandler.deliveryRegistry = deliveryRegistry
35 |
36 | return phandler
37 | }
38 |
39 | func (self *DeliverPreHandler) TypeAssert(event turbo.IEvent) bool {
40 | _, ok := self.cast(event)
41 | return ok
42 | }
43 |
44 | func (self *DeliverPreHandler) cast(event turbo.IEvent) (val *deliverPreEvent, ok bool) {
45 | val, ok = event.(*deliverPreEvent)
46 | return
47 | }
48 |
49 | func (self *DeliverPreHandler) Process(ctx *turbo.DefaultPipelineContext, event turbo.IEvent) error {
50 |
51 | pevent, ok := self.cast(event)
52 | if !ok {
53 | return turbo.ERROR_INVALID_EVENT_TYPE
54 | }
55 |
56 | //尝试注册一下当前的投递事件的消息
57 | //如果失败则放弃本次投递
58 | //会在 deliverResult里取消该注册事件可以继续投递
59 | succ := self.deliveryRegistry.Register(pevent.messageId, ExpiredSecond)
60 | if !succ {
61 | return nil
62 | }
63 |
64 | self.maxDeliverNum <- nil
65 | self.flowstat.DeliverGo.Incr(1)
66 | go func() {
67 | defer func() {
68 | <-self.maxDeliverNum
69 | self.flowstat.DeliverGo.Incr(-1)
70 | }()
71 | //启动投递
72 | self.send0(ctx, pevent)
73 | self.flowstat.DeliverFlow.Incr(1)
74 | }()
75 |
76 | return nil
77 | }
78 |
79 | //check entity need to deliver
80 | func (self *DeliverPreHandler) checkValid(entity *store.MessageEntity) bool {
81 | //判断个当前的header和投递次数消息有效时间是否过期
82 | return entity.DeliverCount < entity.Header.GetDeliverLimit() &&
83 | entity.ExpiredTime > time.Now().Unix()
84 | }
85 |
86 | //内部处理
87 | func (self *DeliverPreHandler) send0(ctx *turbo.DefaultPipelineContext, pevent *deliverPreEvent) {
88 |
89 | //如果没有entity则直接查询一下db
90 | entity := pevent.entity
91 | if nil == entity {
92 | //查询消息
93 | entity = self.kitestore.Query(pevent.header.GetTopic(), pevent.messageId)
94 | if nil == entity {
95 | self.kitestore.Expired(pevent.header.GetTopic(), pevent.messageId)
96 | //log.Error("DeliverPreHandler|send0|Query|FAIL|%s", pevent.messageId)
97 | return
98 | }
99 | }
100 |
101 | //check entity need to deliver
102 | if !self.checkValid(entity) {
103 | self.kitestore.Expired(pevent.header.GetTopic(), entity.MessageId)
104 | return
105 | }
106 |
107 | // log.Debug("DeliverPreHandler|send0|Query|%s", entity.Header)
108 | data := protocol.MarshalMessage(entity.Header, entity.MsgType, entity.GetBody())
109 |
110 | //构造deliverEvent
111 | deliverEvent := newDeliverEvent(pevent.header, pevent.attemptDeliver)
112 |
113 | //创建不同的packet
114 | switch entity.MsgType {
115 | case protocol.CMD_BYTES_MESSAGE:
116 | deliverEvent.packet = turbo.NewPacket(protocol.CMD_BYTES_MESSAGE, data)
117 | case protocol.CMD_STRING_MESSAGE:
118 | deliverEvent.packet = turbo.NewPacket(protocol.CMD_STRING_MESSAGE, data)
119 | }
120 |
121 | //填充订阅分组
122 | self.fillGroupIds(deliverEvent, entity)
123 | self.fillDeliverExt(deliverEvent, entity)
124 |
125 | //向后投递发送
126 | ctx.SendForward(deliverEvent)
127 | }
128 |
129 | //填充订阅分组
130 | func (self *DeliverPreHandler) fillGroupIds(pevent *deliverEvent, entity *store.MessageEntity) {
131 | binds, limiters := self.exchanger.FindBinds(entity.Header.GetTopic(), entity.Header.GetMessageType(),
132 | func(b *registry.Binding) bool {
133 | // log.Printf("DeliverPreHandler|fillGroupIds|Filter Bind |%s|", b)
134 | //过滤掉已经投递成功的分组
135 | for _, sg := range entity.SuccGroups {
136 | if sg == b.GroupId {
137 | return true
138 | }
139 | }
140 | return false
141 | })
142 |
143 | //合并本次需要投递的分组
144 | groupIds := make([]string, 0, 10)
145 | groupBinds := make(map[string]registry.Binding, 10)
146 | //按groupid归并
147 | for _, bind := range binds {
148 | //获取group对应的limiter
149 | groupIds = append(groupIds, bind.GroupId)
150 |
151 | _, ok := groupBinds[bind.GroupId]
152 | if !ok {
153 | groupBinds[bind.GroupId] = *bind
154 | }
155 | }
156 | pevent.groupBinds = groupBinds
157 | pevent.limiters = limiters
158 | pevent.deliverGroups = groupIds
159 | }
160 |
161 | //填充投递的额外信息
162 | func (self *DeliverPreHandler) fillDeliverExt(pevent *deliverEvent, entity *store.MessageEntity) {
163 | pevent.header = entity.Header
164 | pevent.deliverLimit = entity.DeliverLimit
165 | pevent.deliverCount = entity.DeliverCount
166 | pevent.succGroups = entity.SuccGroups
167 | }
168 |
--------------------------------------------------------------------------------
/handler/deliver_qos.go:
--------------------------------------------------------------------------------
1 | package handler
2 |
3 | import (
4 | "context"
5 | "github.com/blackbeans/kiteq-common/stat"
6 | "github.com/blackbeans/turbo"
7 | "sort"
8 | "time"
9 | )
10 |
11 | const (
12 | ExpiredSecond = 10 * time.Second
13 | )
14 |
15 | //----------------投递的handler
16 | type DeliverQosHandler struct {
17 | turbo.BaseDoubleSidedHandler
18 | flowstat *stat.FlowStat
19 | ctx context.Context
20 | }
21 |
22 | //------创建deliver
23 | func NewDeliverQosHandler(parent context.Context, name string, flowstat *stat.FlowStat) *DeliverQosHandler {
24 |
25 | phandler := &DeliverQosHandler{}
26 | phandler.BaseDoubleSidedHandler = turbo.NewBaseDoubleSidedHandler(name, phandler)
27 | phandler.flowstat = flowstat
28 | phandler.ctx = parent
29 | return phandler
30 | }
31 |
32 | func (self *DeliverQosHandler) TypeAssert(event turbo.IEvent) bool {
33 | _, ok := self.cast(event)
34 | return ok
35 | }
36 |
37 | func (self *DeliverQosHandler) cast(event turbo.IEvent) (val *deliverEvent, ok bool) {
38 | val, ok = event.(*deliverEvent)
39 | return
40 | }
41 |
42 | func (self *DeliverQosHandler) Process(ctx *turbo.DefaultPipelineContext, event turbo.IEvent) error {
43 | pevent, ok := self.cast(event)
44 | if !ok {
45 | return turbo.ERROR_INVALID_EVENT_TYPE
46 | }
47 |
48 | //没有投递分组直接投递结果
49 | if len(pevent.deliverGroups) <= 0 {
50 | //直接显示投递成功
51 | resultEvent := newDeliverResultEvent(pevent, turbo.EMPTY_FUTURE)
52 | ctx.SendForward(resultEvent)
53 | return nil
54 | }
55 |
56 | groups := make([]string, 0, 10)
57 | overflow := make(map[string]*turbo.Future, 2)
58 | //sort deliver groups
59 | sort.Strings(pevent.deliverGroups)
60 | //check flows
61 | for g, limiter := range pevent.limiters {
62 | //matches valid limiter
63 | idx := sort.SearchStrings(pevent.deliverGroups, g)
64 | if idx >= len(pevent.deliverGroups) || pevent.deliverGroups[idx] != g {
65 | //not find
66 | continue
67 | }
68 |
69 | succ := limiter.Acquire()
70 | if succ {
71 | //allow deliver
72 | groups = append(groups, g)
73 | } else {
74 | //too fast overflow
75 | overflow[g] = turbo.NewErrFuture(0, g, turbo.ERR_OVER_FLOW, self.ctx)
76 | }
77 | }
78 |
79 | //赋值投递的分组
80 | pevent.deliverGroups = groups
81 |
82 | //投递消息的统计
83 | self.flowstat.IncrTopicDeliverFlow(pevent.header.GetTopic(), int32(len(groups)))
84 |
85 | //增加消息投递的次数
86 | pevent.deliverCount++
87 |
88 | //创建投递事件
89 | revent := turbo.NewRemotingEvent(pevent.packet, nil, groups...)
90 | revent.AttachEvent(pevent)
91 | revent.AttachErrFutures(overflow)
92 | //发起网络请求
93 | ctx.SendForward(revent)
94 | return nil
95 |
96 | }
97 |
--------------------------------------------------------------------------------
/handler/deliver_result.go:
--------------------------------------------------------------------------------
1 | package handler
2 |
3 | import (
4 | "fmt"
5 | "kiteq/store"
6 | "sort"
7 | "time"
8 |
9 | "github.com/blackbeans/kiteq-common/protocol"
10 | "github.com/blackbeans/turbo"
11 | )
12 |
13 | type redeliveryWindows []RedeliveryWindow
14 |
15 | //redelivery的窗口,根据投递次数决定延迟投递的时间
16 | type RedeliveryWindow struct {
17 | minDeliveryCount int32
18 | maxDeliveryCount int32
19 | delaySeconds int64 //延迟的秒数
20 | }
21 |
22 | func NewRedeliveryWindow(minDeliveryCount, maxDeliveryCount int32, delaySeconds int32) RedeliveryWindow {
23 | return RedeliveryWindow{
24 | minDeliveryCount: minDeliveryCount,
25 | maxDeliveryCount: maxDeliveryCount,
26 | delaySeconds: int64(delaySeconds)}
27 | }
28 |
29 | func (self redeliveryWindows) Len() int { return len(self) }
30 | func (self redeliveryWindows) Swap(i, j int) {
31 | self[i], self[j] = self[j], self[i]
32 | }
33 | func (self redeliveryWindows) Less(i, j int) bool {
34 | return (self[i].maxDeliveryCount <= self[j].minDeliveryCount &&
35 | self[i].maxDeliveryCount < self[j].maxDeliveryCount) &&
36 | self[i].maxDeliveryCount >= 0
37 | }
38 |
39 | func (self redeliveryWindows) String() string {
40 | str := ""
41 | for _, v := range self {
42 | str += fmt.Sprintf("[min:%d,max:%d,sec:%d]->", v.minDeliveryCount, v.maxDeliveryCount, v.delaySeconds)
43 | }
44 | return str
45 | }
46 |
47 | //-------投递结果记录的handler
48 | type DeliverResultHandler struct {
49 | turbo.BaseForwardHandler
50 | kitestore store.IKiteStore
51 | rw redeliveryWindows //多个恢复的windows
52 | deliverTimeout time.Duration
53 | updateChan chan store.MessageEntity
54 | deleteChan chan string
55 | deliveryRegistry *DeliveryRegistry
56 | }
57 |
58 | //------创建投递结果处理器
59 | func NewDeliverResultHandler(name string, deliverTimeout time.Duration, kitestore store.IKiteStore,
60 | rw []RedeliveryWindow, deliveryRegistry *DeliveryRegistry) *DeliverResultHandler {
61 | dhandler := &DeliverResultHandler{}
62 | dhandler.BaseForwardHandler = turbo.NewBaseForwardHandler(name, dhandler)
63 | dhandler.kitestore = kitestore
64 | dhandler.deliverTimeout = deliverTimeout
65 | dhandler.rw = redeliveryWindows(rw)
66 | dhandler.deliveryRegistry = deliveryRegistry
67 | //排好序
68 | sort.Sort(dhandler.rw)
69 | log.Infof("RedeliveryWindows|%s", dhandler.rw)
70 | return dhandler
71 | }
72 |
73 | func (self *DeliverResultHandler) TypeAssert(event turbo.IEvent) bool {
74 | _, ok := self.cast(event)
75 | return ok
76 | }
77 |
78 | func (self *DeliverResultHandler) cast(event turbo.IEvent) (val *deliverResultEvent, ok bool) {
79 | val, ok = event.(*deliverResultEvent)
80 | return
81 | }
82 |
83 | func (self *DeliverResultHandler) Process(ctx *turbo.DefaultPipelineContext, event turbo.IEvent) error {
84 |
85 | fevent, ok := self.cast(event)
86 | if !ok {
87 | return turbo.ERROR_INVALID_EVENT_TYPE
88 | }
89 |
90 | if len(fevent.futures) > 0 {
91 | //等待回调结果
92 | fevent.wait(self.deliverTimeout, fevent.deliverEvent.groupBinds)
93 | }
94 |
95 | //增加投递成功的分组
96 | if len(fevent.deliverSuccGroups) > 0 {
97 | //剔除掉投递成功的分组
98 | for _, g := range fevent.deliverSuccGroups {
99 | delete(fevent.limiters, g)
100 | }
101 | fevent.succGroups = append(fevent.succGroups, fevent.deliverSuccGroups...)
102 |
103 | }
104 |
105 | attemptDeliver := nil != fevent.attemptDeliver
106 | //第一次尝试投递失败了立即通知
107 | if attemptDeliver {
108 | fevent.attemptDeliver <- fevent.deliverFailGroups
109 | close(fevent.attemptDeliver)
110 | }
111 |
112 | //sb := strings.Builder{}
113 | //sb.WriteString(self.GetName())
114 | //sb.WriteString("|Process|SEND RESULT:")
115 | //sb.WriteString("\nHeader:")
116 | //sb.WriteString(fevent.header.String())
117 | //sb.WriteString("\nDeliverCount:")
118 | //sb.WriteString(strconv.FormatInt(int64(fevent.deliverCount), 10))
119 | //sb.WriteString("\nNextDeliverTime:")
120 | //sb.WriteString(strconv.FormatInt(int64(self.nextDeliveryTime(fevent.deliverCount)), 10))
121 | //sb.WriteString("\nDeliverGroups:")
122 | //raw, _ := json.Marshal(fevent.deliverGroups)
123 | //sb.Write(raw)
124 | //sb.WriteString("\nSUCCGROUPS:")
125 | //raw, _ = json.Marshal(fevent.succGroups)
126 | //sb.Write(raw)
127 | //sb.WriteString("\nDeliverSUCCGROUPS:")
128 | //raw, _ = json.Marshal(fevent.succGroupFuture)
129 | //sb.Write(raw)
130 | //sb.WriteString("\nDeliverFAILGROUPS:")
131 | //raw, _ = json.Marshal(fevent.failGroupFuture)
132 | //sb.Write(raw)
133 | //logx.GetLogger("deliver_result").Info(sb.String())
134 | //sb.Reset()
135 |
136 | //都投递成功
137 | if len(fevent.deliverFailGroups) <= 0 {
138 | if !fevent.header.GetFly() && !attemptDeliver {
139 | //async batch remove
140 | self.kitestore.AsyncDelete(fevent.header.GetTopic(), fevent.header.GetMessageId())
141 | }
142 | } else {
143 | //重投策略
144 | //不是尝试投递也就是第一次投递并且也是满足重投条件
145 | if !attemptDeliver && self.checkRedelivery(fevent) {
146 | //去掉当前消息的投递事件
147 | self.deliveryRegistry.UnRegister(fevent.header.GetMessageId())
148 | //再次发起重投策略
149 | ctx.SendBackward(fevent.deliverEvent)
150 | }
151 | }
152 |
153 | return nil
154 |
155 | }
156 |
157 | func (self *DeliverResultHandler) checkRedelivery(fevent *deliverResultEvent) bool {
158 |
159 | //检查当前消息的ttl和有效期是否达到最大的,如果达到最大则不允许再次投递
160 | if fevent.header.GetExpiredTime() <= time.Now().Unix() || (fevent.deliverLimit <= fevent.deliverCount &&
161 | fevent.deliverLimit > 0) {
162 | //只是记录一下本次发送记录不发起重投策略
163 |
164 | } else if fevent.deliverCount < 3 {
165 | //只有在消息前三次投递才会失败立即重投
166 | fevent.deliverGroups = fevent.deliverFailGroups
167 | fevent.packet.Reset()
168 | return true
169 | } else {
170 | //如果投递次数大于3次并且失败了,那么需要持久化一下然后只能等待后续的recover重投了
171 | //log deliver fail
172 | // log.Debugf( "DeliverResultHandler|checkRedelivery|messageId:%s|Topic:%s|MessageType:%s|DeliverCount:%d|SUCCGROUPS:%s|FAILGROUPS:%s|",
173 | // fevent.deliverEvent.messageId, fevent.deliverEvent.topic, fevent.deliverEvent.messageType,
174 | // fevent.deliverCount, fevent.deliverEvent.succGroups, fevent.deliverFailGroups)
175 | }
176 |
177 | //如果不为fly消息那么需要存储投递结果
178 | //并且消息的投递次数大于等于3那么就应该持久化投递结果
179 | if !fevent.header.GetFly() && fevent.deliverCount >= 3 {
180 | //存储投递结果
181 | self.saveDeliverResult(fevent.header, fevent.deliverCount,
182 | fevent.succGroups, fevent.deliverFailGroups)
183 | }
184 |
185 | return false
186 | }
187 |
188 | //存储投递结果
189 | func (self *DeliverResultHandler) saveDeliverResult(h *protocol.Header, deliverCount int32, succGroups []string, failGroups []string) {
190 |
191 | entity := &store.MessageEntity{
192 | Header: h,
193 | Topic: h.GetTopic(),
194 | MessageType: h.GetMessageType(),
195 | MessageId: h.GetMessageId(),
196 | DeliverCount: deliverCount,
197 | SuccGroups: succGroups,
198 | FailGroups: failGroups,
199 | //设置一下下一次投递时间
200 | NextDeliverTime: self.nextDeliveryTime(deliverCount)}
201 | //异步更新当前消息的数据
202 | self.kitestore.AsyncUpdateDeliverResult(entity)
203 | }
204 |
205 | func (self *DeliverResultHandler) nextDeliveryTime(deliverCount int32) int64 {
206 | delayTime := self.rw[0].delaySeconds
207 | for _, w := range self.rw {
208 | if deliverCount >= w.minDeliveryCount &&
209 | w.maxDeliveryCount > deliverCount ||
210 | (w.maxDeliveryCount < 0 && deliverCount >= w.minDeliveryCount) {
211 | delayTime = w.delaySeconds
212 | }
213 | }
214 |
215 | // log.Infof( "DeliverResultHandler|nextDeliveryTime|%d|%d", deliverCount, delayTime)
216 | //总是返回一个区间的不然是个bug
217 |
218 | //设置一下下次投递时间为当前时间+延时时间
219 | return time.Now().Unix() + delayTime
220 | }
221 |
--------------------------------------------------------------------------------
/handler/delivery_registry.go:
--------------------------------------------------------------------------------
1 | package handler
2 |
3 | import (
4 | "context"
5 | "time"
6 |
7 | "github.com/blackbeans/turbo"
8 | )
9 |
10 | //投递注册器
11 | type DeliveryRegistry struct {
12 | registry *turbo.LRUCache //key为messageId-->value为过期时间
13 | }
14 |
15 | func NewDeliveryRegistry(ctx context.Context, tw *turbo.TimerWheel, capacity int) *DeliveryRegistry {
16 | registry := turbo.NewLRUCache(ctx, capacity, tw, nil)
17 | return &DeliveryRegistry{registry: registry}
18 | }
19 |
20 | /*
21 | *注册投递事件
22 | **/
23 | func (self DeliveryRegistry) Register(messageId string, exp time.Duration) bool {
24 | now := time.Now()
25 | //过期或者不存在在直接覆盖设置
26 | expiredTime := now.Add(exp)
27 | exist, ok := self.registry.Get(messageId)
28 | if !ok || time.Time(exist.(time.Time)).Before(now) {
29 | self.registry.Put(messageId, expiredTime, exp)
30 | return true
31 | }
32 |
33 | return false
34 | }
35 |
36 | //取消注册
37 | func (self DeliveryRegistry) UnRegister(messageId string) {
38 | self.registry.Remove(messageId)
39 | }
40 |
--------------------------------------------------------------------------------
/handler/delivery_registry_test.go:
--------------------------------------------------------------------------------
1 | package handler
2 |
3 | import (
4 | "context"
5 | "kiteq/store"
6 | "testing"
7 | "time"
8 |
9 | "github.com/blackbeans/turbo"
10 | )
11 |
12 | func BenchmarkDeliveryRegistry(t *testing.B) {
13 | t.StopTimer()
14 | tw := turbo.NewTimerWheel(100 * time.Millisecond)
15 | registry := NewDeliveryRegistry(context.TODO(), tw, 10*10000)
16 |
17 | t.SetParallelism(8)
18 | t.StartTimer()
19 | t.RunParallel(func(pb *testing.PB) {
20 | for pb.Next() {
21 | msgId := store.MessageId()
22 | succ := registry.Register(msgId, 5*time.Second)
23 | if !succ {
24 | t.Fail()
25 | }
26 | }
27 | })
28 | }
29 |
30 | func TestDeliveryRegistry(t *testing.T) {
31 | tw := turbo.NewTimerWheel(100 * time.Millisecond)
32 | registry := NewDeliveryRegistry(context.TODO(), tw, 10*10000)
33 |
34 | msgId := store.MessageId()
35 | succ := registry.Register(msgId, 5*time.Second)
36 | if !succ {
37 | t.Fail()
38 | t.Logf("TestDeliveryRegistry|FirstRegist|FAIL|%s", msgId)
39 | }
40 |
41 | succ = registry.Register(msgId, 5*time.Second)
42 | if succ {
43 | t.Fail()
44 | t.Logf("TestDeliveryRegistry|SecondRegist|FAIL|%s", msgId)
45 | }
46 |
47 | time.Sleep(5 * time.Second)
48 | succ = registry.Register(msgId, 5*time.Second)
49 | if !succ {
50 | t.Fail()
51 | t.Logf("TestDeliveryRegistry|ThirdRegist|FAIL|%s", msgId)
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/handler/handler.go:
--------------------------------------------------------------------------------
1 | package handler
2 |
3 | import "github.com/blackbeans/logx"
4 |
5 | var log = logx.GetLogger("kiteq_handler")
6 |
--------------------------------------------------------------------------------
/handler/heartbeat.go:
--------------------------------------------------------------------------------
1 | package handler
2 |
3 | import (
4 | "github.com/blackbeans/kiteq-common/protocol"
5 | "github.com/blackbeans/turbo"
6 | // "github.com/blackbeans/logx"
7 | )
8 |
9 | type HeartbeatHandler struct {
10 | turbo.BaseForwardHandler
11 | }
12 |
13 | //------创建heartbeat
14 | func NewHeartbeatHandler(name string) *HeartbeatHandler {
15 | phandler := &HeartbeatHandler{}
16 | phandler.BaseForwardHandler = turbo.NewBaseForwardHandler(name, phandler)
17 | return phandler
18 | }
19 |
20 | func (self *HeartbeatHandler) TypeAssert(event turbo.IEvent) bool {
21 | _, ok := self.cast(event)
22 | return ok
23 | }
24 |
25 | func (self *HeartbeatHandler) cast(event turbo.IEvent) (val *turbo.HeartbeatEvent, ok bool) {
26 | val, ok = event.(*turbo.HeartbeatEvent)
27 | return
28 | }
29 |
30 | func (self *HeartbeatHandler) Process(ctx *turbo.DefaultPipelineContext, event turbo.IEvent) error {
31 |
32 | hevent, ok := self.cast(event)
33 | if !ok {
34 | return turbo.ERROR_INVALID_EVENT_TYPE
35 | }
36 |
37 | //处理本地的pong
38 | hevent.RemoteClient.Pong(hevent.Opaque, hevent.Version)
39 |
40 | //发起一个ping对应的响应
41 | packet := turbo.NewRespPacket(hevent.Opaque, protocol.CMD_HEARTBEAT, protocol.MarshalHeartbeatPacket(hevent.Version))
42 | //发起一个网络请求
43 | remoteEvent := turbo.NewRemotingEvent(packet, []string{hevent.RemoteClient.RemoteAddr()})
44 |
45 | // log.Infof( "HeartbeatHandler|%s|Process|Recieve|Ping|%s|%d", self.GetName(), hevent.RemoteClient.RemoteAddr(), hevent.Version)
46 | ctx.SendForward(remoteEvent)
47 | return nil
48 | }
49 |
--------------------------------------------------------------------------------
/handler/packet_decode.go:
--------------------------------------------------------------------------------
1 | package handler
2 |
3 | import (
4 | "errors"
5 | "github.com/blackbeans/kiteq-common/protocol"
6 | "github.com/blackbeans/turbo"
7 | )
8 |
9 | //远程操作的PacketHandler
10 |
11 | type PacketHandler struct {
12 | turbo.BaseForwardHandler
13 | }
14 |
15 | func NewPacketHandler(name string) *PacketHandler {
16 | packetHandler := &PacketHandler{}
17 | packetHandler.BaseForwardHandler = turbo.NewBaseForwardHandler(name, packetHandler)
18 | return packetHandler
19 |
20 | }
21 |
22 | func (self *PacketHandler) TypeAssert(event turbo.IEvent) bool {
23 | _, ok := self.cast(event)
24 | return ok
25 | }
26 |
27 | func (self *PacketHandler) cast(event turbo.IEvent) (val *turbo.PacketEvent, ok bool) {
28 | val, ok = event.(*turbo.PacketEvent)
29 | return
30 | }
31 |
32 | var INVALID_PACKET_ERROR = errors.New("INVALID PACKET ERROR")
33 |
34 | func (self *PacketHandler) Process(ctx *turbo.DefaultPipelineContext, event turbo.IEvent) error {
35 |
36 | // log.Debugf( "PacketHandler|Process|%s|%t", self.GetName(), event)
37 |
38 | pevent, ok := self.cast(event)
39 | if !ok {
40 | return turbo.ERROR_INVALID_EVENT_TYPE
41 | }
42 |
43 | cevent, err := self.handlePacket(pevent)
44 | if nil != err {
45 | return err
46 | }
47 |
48 | ctx.SendForward(cevent)
49 | return nil
50 | }
51 |
52 | var sunkEvent = &turbo.SunkEvent{}
53 |
54 | //对于请求事件
55 | func (self *PacketHandler) handlePacket(pevent *turbo.PacketEvent) (turbo.IEvent, error) {
56 | var err error
57 | var event turbo.IEvent
58 |
59 | packet := pevent.Packet
60 | //根据类型反解packet
61 | switch packet.Header.CmdType {
62 | //连接的元数据
63 | case protocol.CMD_CONN_META:
64 | var connMeta protocol.ConnMeta
65 | err = protocol.UnmarshalPbMessage(packet.Data, &connMeta)
66 | if nil == err {
67 | event = newAccessEvent(connMeta, pevent.RemoteClient, packet.Header.Opaque)
68 | }
69 |
70 | //心跳
71 | case protocol.CMD_HEARTBEAT:
72 | var hearbeat protocol.HeartBeat
73 | err = protocol.UnmarshalPbMessage(packet.Data, &hearbeat)
74 | if nil == err {
75 | event = newAcceptEvent(protocol.CMD_HEARTBEAT, &hearbeat, pevent.RemoteClient, packet.Header.Opaque)
76 | }
77 | //投递结果确认
78 | case protocol.CMD_DELIVER_ACK:
79 | var delAck protocol.DeliverAck
80 | err = protocol.UnmarshalPbMessage(packet.Data, &delAck)
81 | if nil == err {
82 | event = newAcceptEvent(protocol.CMD_DELIVER_ACK, &delAck, pevent.RemoteClient, packet.Header.Opaque)
83 | }
84 |
85 | case protocol.CMD_TX_ACK:
86 | var txAck protocol.TxACKPacket
87 | err = protocol.UnmarshalPbMessage(packet.Data, &txAck)
88 | if nil == err {
89 | event = newTxAckEvent(&txAck, packet.Header.Opaque, pevent.RemoteClient)
90 | }
91 |
92 | //发送的是bytesmessage
93 | case protocol.CMD_BYTES_MESSAGE:
94 | var msg protocol.BytesMessage
95 | err = protocol.UnmarshalPbMessage(packet.Data, &msg)
96 | if nil == err {
97 | event = newAcceptEvent(protocol.CMD_BYTES_MESSAGE, &msg, pevent.RemoteClient, packet.Header.Opaque)
98 | }
99 | //发送的是StringMessage
100 | case protocol.CMD_STRING_MESSAGE:
101 | var msg protocol.StringMessage
102 | err = protocol.UnmarshalPbMessage(packet.Data, &msg)
103 | if nil == err {
104 | event = newAcceptEvent(protocol.CMD_STRING_MESSAGE, &msg, pevent.RemoteClient, packet.Header.Opaque)
105 | }
106 | }
107 |
108 | return event, err
109 |
110 | }
111 |
--------------------------------------------------------------------------------
/handler/persistent_msg.go:
--------------------------------------------------------------------------------
1 | package handler
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "github.com/blackbeans/turbo"
7 | "kiteq/store"
8 | "time"
9 | )
10 |
11 | var ERROR_PERSISTENT = errors.New("persistent msg error!")
12 |
13 | //----------------持久化的handler
14 | type PersistentHandler struct {
15 | turbo.BaseForwardHandler
16 | kitestore store.IKiteStore
17 | deliverTimeout time.Duration
18 | deliveryFirst bool //是否优先投递
19 | }
20 |
21 | //------创建persitehandler
22 | func NewPersistentHandler(name string, deliverTimeout time.Duration,
23 | kitestore store.IKiteStore, deliveryFirst bool) *PersistentHandler {
24 | phandler := &PersistentHandler{}
25 | phandler.BaseForwardHandler = turbo.NewBaseForwardHandler(name, phandler)
26 | phandler.kitestore = kitestore
27 | phandler.deliverTimeout = deliverTimeout
28 | phandler.deliveryFirst = deliveryFirst
29 | return phandler
30 | }
31 |
32 | func (self *PersistentHandler) TypeAssert(event turbo.IEvent) bool {
33 | _, ok := self.cast(event)
34 | return ok
35 | }
36 |
37 | func (self *PersistentHandler) cast(event turbo.IEvent) (val *persistentEvent, ok bool) {
38 | val, ok = event.(*persistentEvent)
39 | return
40 | }
41 |
42 | func (self *PersistentHandler) Process(ctx *turbo.DefaultPipelineContext, event turbo.IEvent) error {
43 |
44 | pevent, ok := self.cast(event)
45 | if !ok {
46 | return turbo.ERROR_INVALID_EVENT_TYPE
47 | }
48 |
49 | if nil != pevent.entity {
50 | //如果是fly模式不做持久化
51 | if pevent.entity.Header.GetFly() {
52 | if pevent.entity.Header.GetCommit() {
53 | //如果是成功存储的、并且为未提交的消息,则需要发起一个ack的命令
54 | //发送存储结果ack
55 | remoteEvent := turbo.NewRemotingEvent(storeAck(pevent.opaque,
56 | pevent.entity.Header.GetMessageId(), true, "FLY NO NEED SAVE"), []string{pevent.remoteClient.RemoteAddr()})
57 | ctx.SendForward(remoteEvent)
58 |
59 | self.send(ctx, pevent, nil)
60 | } else {
61 | remoteEvent := turbo.NewRemotingEvent(storeAck(pevent.opaque,
62 | pevent.entity.Header.GetMessageId(), false, "FLY MUST BE COMMITTED !"), []string{pevent.remoteClient.RemoteAddr()})
63 | ctx.SendForward(remoteEvent)
64 | }
65 |
66 | } else {
67 | self.sendUnFlyMessage(ctx, pevent)
68 | }
69 |
70 | }
71 |
72 | return nil
73 | }
74 |
75 | //发送非flymessage
76 | func (self *PersistentHandler) sendUnFlyMessage(ctx *turbo.DefaultPipelineContext, pevent *persistentEvent) {
77 | saveSucc := false
78 |
79 | // log.Debugf( "PersistentHandler|sendUnFlyMessage|%s", pevent.entity)
80 | var storeCostMs int64
81 | //提交并且开启优化
82 | if self.deliveryFirst &&
83 | pevent.entity.Header.GetCommit() {
84 | //先投递尝试投递一次再去根据结果写存储
85 | ch := make(chan []string, 1) //用于返回尝试投递结果
86 | self.send(ctx, pevent, ch)
87 | /*如果是成功的则直接返回处理存储成功的
88 | *如果失败了,则需要持久化
89 | */
90 | failGroups := <-ch
91 | //失败或者超时的持久化
92 | if len(failGroups) > 0 {
93 | pevent.entity.DeliverCount += 1
94 | if nil != failGroups {
95 | pevent.entity.FailGroups = failGroups
96 | }
97 |
98 | //写入到持久化存储里面
99 | now := time.Now().UnixNano()
100 | //写入到持久化存储里面,再投递
101 | saveSucc = self.kitestore.Save(pevent.entity)
102 | storeCostMs = (time.Now().UnixNano() - now) / (1000 * 1000)
103 | if storeCostMs >= 200 {
104 | log.Warnf("PersistentHandler|Save Too Long|cost:%d ms|%v", storeCostMs, pevent.entity.Header.String())
105 | }
106 |
107 | //再投递
108 | self.send(ctx, pevent, nil)
109 | } else {
110 | log.Debugf("PersistentHandler|sendUnFlyMessage|FLY|%s", pevent.entity)
111 | }
112 |
113 | } else {
114 | now := time.Now().UnixNano()
115 | //写入到持久化存储里面,再投递
116 | saveSucc = self.kitestore.Save(pevent.entity)
117 | storeCostMs = (time.Now().UnixNano() - now) / (1000 * 1000)
118 | if storeCostMs >= 200 {
119 | log.Warnf("PersistentHandler|Save Too Long|cost:%d ms|%v", storeCostMs, pevent.entity.Header.String())
120 | }
121 |
122 | if saveSucc && pevent.entity.Commit {
123 | self.send(ctx, pevent, nil)
124 | }
125 |
126 | }
127 |
128 | //发送存储结果ack
129 | remoteEvent := turbo.NewRemotingEvent(storeAck(pevent.opaque,
130 | pevent.entity.Header.GetMessageId(), saveSucc, fmt.Sprintf("Store Result %t Cost:%d", saveSucc, storeCostMs)),
131 | []string{pevent.remoteClient.RemoteAddr()})
132 | ctx.SendForward(remoteEvent)
133 |
134 | }
135 |
136 | func (self *PersistentHandler) send(ctx *turbo.DefaultPipelineContext, pevent *persistentEvent, ch chan []string) {
137 |
138 | //启动投递当然会重投3次
139 | preDeliver := NewDeliverPreEvent(
140 | pevent.entity.Header.GetMessageId(),
141 | pevent.entity.Header,
142 | pevent.entity)
143 | preDeliver.attemptDeliver = ch
144 | ctx.SendForward(preDeliver)
145 |
146 | // log.Debugf( "PersistentHandler|send|FULL|TRY SEND BY CURRENT GO ....")
147 | }
148 |
--------------------------------------------------------------------------------
/handler/pipe_events.go:
--------------------------------------------------------------------------------
1 | package handler
2 |
3 | import (
4 | "fmt"
5 | "kiteq/store"
6 | "time"
7 |
8 | "github.com/blackbeans/kiteq-common/protocol"
9 | "github.com/blackbeans/kiteq-common/registry"
10 | "github.com/blackbeans/turbo"
11 | )
12 |
13 | type iauth interface {
14 | turbo.IForwardEvent
15 | getClient() *turbo.TClient
16 | }
17 |
18 | type accessEvent struct {
19 | iauth
20 | connMeta protocol.ConnMeta
21 | opaque uint32
22 | remoteClient *turbo.TClient
23 | }
24 |
25 | func (self *accessEvent) getClient() *turbo.TClient {
26 | return self.remoteClient
27 | }
28 |
29 | func newAccessEvent(connMeta protocol.ConnMeta, remoteClient *turbo.TClient, opaque uint32) *accessEvent {
30 | access := &accessEvent{
31 | connMeta: connMeta,
32 | opaque: opaque,
33 | remoteClient: remoteClient}
34 | return access
35 | }
36 |
37 | //接受消息事件
38 | type acceptEvent struct {
39 | iauth
40 | msgType uint8
41 | msg interface{} //attach的数据message
42 | opaque uint32
43 | client *turbo.TClient
44 | }
45 |
46 | func (self *acceptEvent) getClient() *turbo.TClient {
47 | return self.client
48 | }
49 |
50 | func newAcceptEvent(msgType uint8, msg interface{},
51 | remoteClient *turbo.TClient, opaque uint32) *acceptEvent {
52 | ae := &acceptEvent{
53 | msgType: msgType,
54 | msg: msg,
55 | opaque: opaque,
56 | client: remoteClient}
57 | return ae
58 | }
59 |
60 | type txAckEvent struct {
61 | iauth
62 | txPacket *protocol.TxACKPacket
63 | opaque uint32
64 | remoteClient *turbo.TClient
65 | }
66 |
67 | func (self *txAckEvent) getClient() *turbo.TClient {
68 | return self.remoteClient
69 | }
70 |
71 | func newTxAckEvent(txPacket *protocol.TxACKPacket, opaque uint32, remoteClient *turbo.TClient) *txAckEvent {
72 | tx := &txAckEvent{
73 | txPacket: txPacket,
74 | opaque: opaque,
75 | remoteClient: remoteClient}
76 | return tx
77 | }
78 |
79 | //投递策略
80 | type persistentEvent struct {
81 | turbo.IForwardEvent
82 | entity *store.MessageEntity
83 | remoteClient *turbo.TClient
84 | opaque uint32
85 | }
86 |
87 | func newPersistentEvent(entity *store.MessageEntity, remoteClient *turbo.TClient, opaque uint32) *persistentEvent {
88 | return &persistentEvent{entity: entity, remoteClient: remoteClient, opaque: opaque}
89 |
90 | }
91 |
92 | //投递准备事件
93 | type deliverPreEvent struct {
94 | turbo.IForwardEvent
95 | messageId string
96 | header *protocol.Header
97 | entity *store.MessageEntity
98 | attemptDeliver chan []string
99 | }
100 |
101 | func NewDeliverPreEvent(messageId string, header *protocol.Header,
102 | entity *store.MessageEntity) *deliverPreEvent {
103 | return &deliverPreEvent{
104 | messageId: messageId,
105 | header: header,
106 | entity: entity}
107 | }
108 |
109 | //投递事件
110 | type deliverEvent struct {
111 | turbo.IForwardEvent
112 | turbo.IBackwardEvent
113 | header *protocol.Header
114 | // fly bool //是否为fly模式的消息
115 | packet *turbo.Packet //消息包
116 | succGroups []string //已经投递成功的分组
117 | deliverGroups []string //需要投递的群组
118 | deliverLimit int32
119 | deliverCount int32 //已经投递的次数
120 | attemptDeliver chan []string
121 | limiters map[string]*turbo.BurstyLimiter
122 | groupBinds map[string]registry.Binding //本次投递的订阅关系
123 | }
124 |
125 | //创建投递事件
126 | func newDeliverEvent(header *protocol.Header, attemptDeliver chan []string) *deliverEvent {
127 | return &deliverEvent{
128 | header: header,
129 | attemptDeliver: attemptDeliver}
130 | }
131 |
132 | type GroupFuture struct {
133 | *turbo.Future
134 | resp interface{}
135 | groupId string
136 | }
137 |
138 | func (self GroupFuture) String() string {
139 | ack, ok := self.resp.(*protocol.DeliverAck)
140 | if ok {
141 | return fmt.Sprintf("[%s@%s,resp:(status:%v,feedback:%s),err:%v]", self.TargetHost, self.groupId, ack.GetStatus(), ack.GetFeedback(), self.Err)
142 | }
143 | return fmt.Sprintf("[%s@%s,resp:%v,err:%v]", self.TargetHost, self.groupId, self.resp, self.Err)
144 | }
145 |
146 | //统计投递结果的事件,决定不决定重发
147 | type deliverResultEvent struct {
148 | *deliverEvent
149 | turbo.IBackwardEvent
150 | futures map[string]*turbo.Future
151 | failGroupFuture []GroupFuture
152 | succGroupFuture []GroupFuture
153 | deliverFailGroups []string
154 | deliverSuccGroups []string
155 | }
156 |
157 | func newDeliverResultEvent(deliverEvent *deliverEvent, futures map[string]*turbo.Future) *deliverResultEvent {
158 | re := &deliverResultEvent{}
159 | re.deliverEvent = deliverEvent
160 | re.futures = futures
161 | re.succGroupFuture = make([]GroupFuture, 0, 5)
162 | re.failGroupFuture = make([]GroupFuture, 0, 5)
163 |
164 | return re
165 | }
166 |
167 | //等待响应
168 | func (self *deliverResultEvent) wait(timeout time.Duration, groupBinds map[string]registry.Binding) bool {
169 | istimeout := false
170 | latch := make(chan time.Time, 1)
171 | t := time.AfterFunc(timeout, func() {
172 | close(latch)
173 | })
174 |
175 | defer t.Stop()
176 | tch := (<-chan time.Time)(latch)
177 |
178 | //等待回调结果
179 | for g, f := range self.futures {
180 |
181 | resp, err := f.Get(tch)
182 |
183 | if err == turbo.ERR_TIMEOUT {
184 | istimeout = true
185 | } else if nil != resp {
186 | ack, ok := resp.(*protocol.DeliverAck)
187 | if !ok || !ack.GetStatus() {
188 | self.failGroupFuture = append(self.failGroupFuture, GroupFuture{f, resp, g})
189 | } else {
190 | self.succGroupFuture = append(self.succGroupFuture, GroupFuture{f, resp, g})
191 | }
192 | }
193 |
194 | if nil != err {
195 | //如果没有存在存活机器并且当前分组的订阅关系
196 | //是一个非持久订阅那么就认为成功的
197 | if err == turbo.ERR_NO_HOSTS {
198 | b, ok := groupBinds[g]
199 | if ok && !b.Persistent {
200 | f.Err = fmt.Errorf("All Clients Offline ! Bind[%v]", b)
201 | self.succGroupFuture = append(self.succGroupFuture, GroupFuture{f, resp, g})
202 | continue
203 | }
204 | }
205 | //投递失败的情况
206 | {
207 | gf := GroupFuture{f, resp, g}
208 | gf.Err = err
209 | self.failGroupFuture = append(self.failGroupFuture, gf)
210 | }
211 | }
212 | }
213 |
214 | fg := make([]string, 0, len(self.failGroupFuture))
215 | for _, g := range self.failGroupFuture {
216 | fg = append(fg, g.groupId)
217 | }
218 | self.deliverFailGroups = fg
219 |
220 | sg := make([]string, 0, len(self.succGroupFuture))
221 | for _, g := range self.succGroupFuture {
222 | sg = append(sg, g.groupId)
223 | }
224 | self.deliverSuccGroups = sg
225 | return istimeout
226 | }
227 |
--------------------------------------------------------------------------------
/handler/remote_future.go:
--------------------------------------------------------------------------------
1 | package handler
2 |
3 | import (
4 | "github.com/blackbeans/turbo"
5 | // "github.com/blackbeans/logx"
6 | )
7 |
8 | //网络调用的futurehandler
9 | type RemoteFutureHandler struct {
10 | turbo.BaseForwardHandler
11 | }
12 |
13 | //------创建deliverpre
14 | func NewRemotingFutureHandler(name string) *RemoteFutureHandler {
15 | phandler := &RemoteFutureHandler{}
16 | phandler.BaseForwardHandler = turbo.NewBaseForwardHandler(name, phandler)
17 | return phandler
18 | }
19 |
20 | func (self *RemoteFutureHandler) TypeAssert(event turbo.IEvent) bool {
21 | re, ok := self.cast(event)
22 | if ok {
23 | _, ok := re.Event.(*deliverEvent)
24 | return ok
25 | }
26 | return ok
27 | }
28 |
29 | func (self *RemoteFutureHandler) cast(event turbo.IEvent) (val *turbo.RemoteFutureEvent, ok bool) {
30 | val, ok = event.(*turbo.RemoteFutureEvent)
31 | return val, ok
32 | }
33 |
34 | func (self *RemoteFutureHandler) Process(ctx *turbo.DefaultPipelineContext, event turbo.IEvent) error {
35 | pevent, ok := self.cast(event)
36 | if !ok {
37 | return turbo.ERROR_INVALID_EVENT_TYPE
38 | }
39 |
40 | futures := pevent.Wait()
41 | devent := pevent.RemotingEvent.Event.(*deliverEvent)
42 | // //创建一个投递结果
43 | resultEvent := newDeliverResultEvent(devent, futures)
44 | ctx.SendForward(resultEvent)
45 | return nil
46 | }
47 |
--------------------------------------------------------------------------------
/handler/tx_ack.go:
--------------------------------------------------------------------------------
1 | package handler
2 |
3 | import (
4 | "github.com/blackbeans/kiteq-common/protocol"
5 | "github.com/blackbeans/turbo"
6 | "kiteq/store"
7 | )
8 |
9 | //----------------持久化的handler
10 | type TxAckHandler struct {
11 | turbo.BaseForwardHandler
12 | kitestore store.IKiteStore
13 | }
14 |
15 | //------创建persitehandler
16 | func NewTxAckHandler(name string, kitestore store.IKiteStore) *TxAckHandler {
17 | phandler := &TxAckHandler{}
18 | phandler.BaseForwardHandler = turbo.NewBaseForwardHandler(name, phandler)
19 | phandler.kitestore = kitestore
20 | return phandler
21 | }
22 |
23 | func (self *TxAckHandler) TypeAssert(event turbo.IEvent) bool {
24 | _, ok := self.cast(event)
25 | return ok
26 | }
27 |
28 | func (self *TxAckHandler) cast(event turbo.IEvent) (val *txAckEvent, ok bool) {
29 | val, ok = event.(*txAckEvent)
30 | return
31 | }
32 |
33 | func (self *TxAckHandler) Process(ctx *turbo.DefaultPipelineContext, event turbo.IEvent) error {
34 |
35 | // log.Debugf( "TxAckHandler|Process|%s|%t", self.GetName(), event)
36 |
37 | pevent, ok := self.cast(event)
38 | if !ok {
39 | return turbo.ERROR_INVALID_EVENT_TYPE
40 | }
41 |
42 | h := pevent.txPacket.GetHeader()
43 | //提交或者回滚
44 | if pevent.txPacket.GetStatus() == int32(protocol.TX_COMMIT) {
45 |
46 | succ := self.kitestore.Commit(h.GetTopic(), h.GetMessageId())
47 |
48 | if succ {
49 | //发起投递事件
50 | //启动异步协程处理分发逻辑
51 | preevent := NewDeliverPreEvent(h.GetMessageId(), h, nil)
52 | ctx.SendForward(preevent)
53 |
54 | } else {
55 | //失败了等待下次recover询问
56 | // log.Debugf( "TxAckHandler|%s|Process|Commit|FAIL|%s|%s", self.GetName(), h.GetMessageId(), succ)
57 | }
58 |
59 | } else if pevent.txPacket.GetStatus() == int32(protocol.TX_ROLLBACK) {
60 | succ := self.kitestore.Rollback(h.GetTopic(), h.GetMessageId())
61 | if !succ {
62 | log.Warnf("TxAckHandler|%s|Process|Rollback|FAIL|%s|%s|%s",
63 | self.GetName(), h.GetMessageId(), pevent.txPacket.GetFeedback(), succ)
64 | }
65 |
66 | } else {
67 | //UNKNOWN其他的不处理
68 |
69 | }
70 | ctx.SendForward(&turbo.SunkEvent{})
71 | return nil
72 | }
73 |
--------------------------------------------------------------------------------
/handler/validate_event.go:
--------------------------------------------------------------------------------
1 | package handler
2 |
3 | import (
4 | "github.com/blackbeans/kiteq-common/protocol"
5 | "github.com/blackbeans/turbo"
6 | )
7 |
8 | //----------------鉴权handler
9 | type ValidateHandler struct {
10 | turbo.BaseForwardHandler
11 | clientManager *turbo.ClientManager
12 | }
13 |
14 | //------创建鉴权handler
15 | func NewValidateHandler(name string, clientManager *turbo.ClientManager) *ValidateHandler {
16 | ahandler := &ValidateHandler{}
17 | ahandler.BaseForwardHandler = turbo.NewBaseForwardHandler(name, ahandler)
18 | ahandler.clientManager = clientManager
19 | return ahandler
20 | }
21 |
22 | func (self *ValidateHandler) TypeAssert(event turbo.IEvent) bool {
23 | _, ok := self.cast(event)
24 | return ok
25 | }
26 |
27 | func (self *ValidateHandler) cast(event turbo.IEvent) (val iauth, ok bool) {
28 | val, ok = event.(iauth)
29 | return val, ok
30 | }
31 |
32 | func (self *ValidateHandler) Process(ctx *turbo.DefaultPipelineContext, event turbo.IEvent) error {
33 |
34 | aevent, ok := self.cast(event)
35 | if !ok {
36 | return turbo.ERROR_INVALID_EVENT_TYPE
37 | }
38 |
39 | c := aevent.getClient()
40 | //做权限校验.............
41 | isAuth := self.clientManager.Validate(c)
42 | // log.Debugf( "ValidateHandler|CONNETION|%s|%s", client.RemoteAddr(), isAuth)
43 | if isAuth {
44 | ctx.SendForward(event)
45 | } else {
46 | log.Warnf("ValidateHandler|UnAuth CONNETION|%s", c.RemoteAddr())
47 | cmd := protocol.MarshalConnAuthAck(false, "Unauthorized,Connection broken!")
48 | //响应包
49 | p := turbo.NewPacket(protocol.CMD_CONN_AUTH, cmd)
50 |
51 | //直接写出去授权失败
52 | c.Write(*p)
53 | //断开连接
54 | c.Shutdown()
55 | }
56 |
57 | return nil
58 |
59 | }
60 |
--------------------------------------------------------------------------------
/kiteq.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "kiteq/server"
7 | _ "net/http/pprof"
8 | "os"
9 | "os/signal"
10 | "runtime"
11 | "runtime/debug"
12 | "syscall"
13 | "time"
14 |
15 | "github.com/blackbeans/turbo"
16 | )
17 |
18 | func main() {
19 |
20 | //加载启动参数
21 | so := server.Parse()
22 | runtime.GOMAXPROCS(runtime.NumCPU()*2 + 1)
23 |
24 | rc := turbo.NewTConfig(
25 | "remoting",
26 | 1000, 16*1024,
27 | 16*1024, 10000, 10000,
28 | 10*time.Second,
29 | 100*10000)
30 |
31 | kc := server.NewKiteQConfig(so, rc)
32 | ctx, cancel := context.WithCancel(context.Background())
33 | qserver := server.NewKiteQServer(ctx, kc)
34 | qserver.Start()
35 |
36 | var s = make(chan os.Signal, 1)
37 | signal.Notify(s, syscall.SIGKILL, syscall.SIGUSR1, syscall.SIGTERM)
38 | //是否收到kill的命令
39 | for {
40 | cmd := <-s
41 | if cmd == syscall.SIGKILL || cmd == syscall.SIGTERM {
42 | break
43 | } else if cmd == syscall.SIGUSR1 {
44 | //如果为siguser1则进行dump内存
45 | unixtime := time.Now().Unix()
46 | path := fmt.Sprintf("./heapdump-kiteq-%d", unixtime)
47 | f, err := os.Create(path)
48 | if nil != err {
49 | continue
50 | } else {
51 | debug.WriteHeapDump(f.Fd())
52 | }
53 | }
54 | }
55 | qserver.Shutdown()
56 | cancel()
57 |
58 | }
59 |
--------------------------------------------------------------------------------
/kiteq.sh:
--------------------------------------------------------------------------------
1 | go run kiteq.go -clusterName=rocksdb_dev -configPath=./conf/cluster.toml -pport=13801 -bind=:13800
2 | #./kiteq -clusterName=memory_dev -configPath=./conf/cluster.toml -pport=13801 -bind=:13800
3 | #./kiteq -clusterName=mysql_dev -configPath=./conf/cluster.toml -pport=13801 -bind=:13800
4 |
5 |
--------------------------------------------------------------------------------
/server/kite_server_config.go:
--------------------------------------------------------------------------------
1 | package server
2 |
3 | import (
4 | "errors"
5 | "flag"
6 | "github.com/blackbeans/kiteq-common/stat"
7 | "github.com/blackbeans/logx"
8 | "github.com/blackbeans/turbo"
9 | "github.com/naoina/toml"
10 | "io/ioutil"
11 | "os"
12 | "time"
13 | )
14 |
15 | type KiteQConfig struct {
16 | so ServerOption
17 | flowstat *stat.FlowStat
18 | rc *turbo.TConfig
19 | }
20 |
21 | func NewKiteQConfig(so ServerOption, rc *turbo.TConfig) KiteQConfig {
22 | flowstat := stat.NewFlowStat()
23 | return KiteQConfig{
24 | flowstat: flowstat,
25 | rc: rc,
26 | so: so}
27 | }
28 |
29 | const (
30 | DEFAULT_APP = "default"
31 | )
32 |
33 | type HostPort struct {
34 | Hosts string
35 | }
36 |
37 | //配置信息
38 | type Option struct {
39 | Registry map[string]HostPort //registry的配置
40 | Clusters map[string]Cluster //各集群的配置
41 | }
42 |
43 | //----------------------------------------
44 | //Cluster配置
45 | type Cluster struct {
46 | Env string //当前环境使用的是dev还是online
47 | Topics []string //当前集群所能够处理的topics
48 | DlqExecHour int //过期消息清理时间点 24小时
49 | DeliveryFirst bool //投递优先还是存储优先
50 | Logxml string //日志路径
51 | Db string //数据文件
52 | DeliverySeconds int64 //投递超时时间 单位为s
53 | MaxDeliverWorkers int //最大执行协程数
54 | RecoverSeconds int64 //recover的周期 单位为s
55 | RecievePermitsPerSecond int //接收消息的最大值 单位s
56 | }
57 |
58 | type ServerOption struct {
59 | clusterName string //集群名称
60 | configPath string //配置文件路径
61 | registryUri string //注册中心地址
62 | bindHost string //绑定的端口和IP
63 | pprofPort int //pprof的Port
64 | topics []string //当前集群所能够处理的topics
65 | dlqExecHour int //过期消息清理时间点 24小时
66 | deliveryFirst bool //服务端是否投递优先 默认是false,优先存储
67 | logxml string //日志文件路径
68 | db string //底层对应的存储是什么
69 | deliveryTimeout time.Duration //投递超时时间
70 | maxDeliverWorkers int //最大执行协程数
71 | recoverPeriod time.Duration //recover的周期
72 | recievePermitsPerSecond int //接收消息的最大值 单位s
73 |
74 | }
75 |
76 | //only for test
77 | func MockServerOption() ServerOption {
78 | so := ServerOption{}
79 | so.registryUri = "zk://localhost:2181"
80 | so.bindHost = "localhost:13800"
81 | so.pprofPort = -1
82 | so.topics = []string{"trade"}
83 | so.deliveryFirst = false
84 | so.dlqExecHour = 2
85 | so.db = "memory://"
86 | so.clusterName = DEFAULT_APP
87 | so.deliveryTimeout = 5 * time.Second
88 | so.maxDeliverWorkers = 10
89 | so.recoverPeriod = 60 * time.Second
90 | so.recievePermitsPerSecond = 8000
91 | return so
92 | }
93 |
94 | var log = logx.GetLogger("kiteq_server")
95 |
96 | func Parse() ServerOption {
97 | //两种方式都支持
98 | pprofPort := flag.Int("pport", -1, "pprof port default value is -1 ")
99 | bindAddr := flag.String("bind", "localhost:13800", "-bind=localhost:13800")
100 | clusterName := flag.String("clusterName", "default_dev", "-clusterName=default_dev")
101 | configPath := flag.String("configPath", "", "-configPath=conf/cluster.toml kiteq配置的toml文件")
102 | logsPath := flag.String("logPath", "./logs", "-logPath=./logs logs输出路径")
103 | flag.Parse()
104 |
105 | logPath := "./logs"
106 | if nil != logsPath && len(*logsPath) > 0 {
107 | logPath = *logsPath
108 | }
109 |
110 | so := ServerOption{}
111 |
112 | //判断当前采用配置文件加载
113 | if nil != configPath && len(*configPath) > 0 {
114 | //解析
115 | err := loadTomlConf(*configPath, *clusterName, *bindAddr, *pprofPort, &so)
116 | if nil != err {
117 | panic("loadTomlConf|FAIL|" + err.Error())
118 | }
119 | }
120 | //加载log4go的配置
121 | if err := logx.InitLogger(logPath, so.logxml); nil != err {
122 | panic(err)
123 | }
124 | log = logx.GetLogger("kiteq_server")
125 | return so
126 | }
127 |
128 | func loadTomlConf(path, clusterName, bindAddr string, pprofPort int, so *ServerOption) error {
129 | f, err := os.Open(path)
130 | if err != nil {
131 | return err
132 | }
133 | defer f.Close()
134 | buff, rerr := ioutil.ReadAll(f)
135 | if nil != rerr {
136 | return rerr
137 | }
138 | //读取配置
139 | var option Option
140 | err = toml.Unmarshal(buff, &option)
141 | if nil != err {
142 | return err
143 | }
144 |
145 | cluster, ok := option.Clusters[clusterName]
146 | if !ok {
147 | return errors.New("no cluster config for " + clusterName)
148 | }
149 |
150 | registry, exist := option.Registry[cluster.Env]
151 | if !exist {
152 | return errors.New("no zk for " + clusterName + ":" + cluster.Env)
153 | }
154 |
155 | //解析
156 | so.registryUri = registry.Hosts
157 | so.topics = cluster.Topics
158 | so.deliveryFirst = cluster.DeliveryFirst
159 | so.dlqExecHour = cluster.DlqExecHour
160 | so.logxml = cluster.Logxml
161 | so.db = cluster.Db
162 | so.deliveryTimeout = time.Duration(cluster.DeliverySeconds * int64(time.Second))
163 | so.maxDeliverWorkers = cluster.MaxDeliverWorkers
164 | so.recoverPeriod = time.Duration(cluster.RecoverSeconds * int64(time.Second))
165 | so.recievePermitsPerSecond = cluster.RecievePermitsPerSecond
166 | so.bindHost = bindAddr
167 | so.pprofPort = pprofPort
168 | so.clusterName = clusterName
169 | so.configPath = path
170 | return nil
171 | }
172 |
--------------------------------------------------------------------------------
/server/kite_server_config_test.go:
--------------------------------------------------------------------------------
1 | package server
2 |
3 | import (
4 | "testing"
5 | )
6 |
7 | func TestLoadToml(t *testing.T) {
8 | so := &ServerOption{}
9 | err := loadTomlConf("../conf/cluster.toml", "default", ":13000", 13001, so)
10 | if nil != err {
11 | t.Fail()
12 |
13 | }
14 | t.Log(so)
15 |
16 | }
17 |
--------------------------------------------------------------------------------
/server/kiteq_server.go:
--------------------------------------------------------------------------------
1 | package server
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "kiteq/handler"
7 | "net"
8 | "net/http"
9 | "os"
10 | "strconv"
11 | "time"
12 |
13 | "kiteq/exchange"
14 | "kiteq/store"
15 | "kiteq/store/parser"
16 |
17 | "github.com/blackbeans/kiteq-common/protocol"
18 | "github.com/blackbeans/turbo"
19 | )
20 |
21 | type KiteQServer struct {
22 | ctx context.Context
23 | reconnManager *turbo.ReconnectManager
24 | clientManager *turbo.ClientManager
25 | exchanger *exchange.BindExchanger
26 | remotingServer *turbo.TServer
27 | pipeline *turbo.DefaultPipeline
28 | recoverManager *RecoverManager
29 | kc KiteQConfig
30 | kitedb store.IKiteStore
31 | stop bool
32 | lastNetstat []turbo.NetworkStat
33 | lastKiteStat []kiteqstat
34 | limiter *turbo.BurstyLimiter
35 | topicNotify chan []string
36 | }
37 |
38 | //握手包
39 | func handshake(ga *turbo.GroupAuth, remoteClient *turbo.TClient) (bool, error) {
40 | return false, nil
41 | }
42 |
43 | func NewKiteQServer(ctx context.Context, kc KiteQConfig) *KiteQServer {
44 |
45 | kiteqName, _ := os.Hostname()
46 |
47 | kitedb := parser.ParseDB(ctx, kc.so.db, kiteqName)
48 |
49 | //重连管理器
50 | reconnManager := turbo.NewReconnectManager(false, -1, -1, handshake)
51 |
52 | //客户端连接管理器
53 | clientManager := turbo.NewClientManager(reconnManager)
54 |
55 | // 临时在这里创建的BindExchanger
56 | exchanger := exchange.NewBindExchanger(ctx, kc.so.registryUri, kc.so.bindHost)
57 |
58 | //创建消息投递注册器
59 | registry := handler.NewDeliveryRegistry(ctx, kc.rc.TW, 50*10000)
60 |
61 | //重投策略
62 | rw := make([]handler.RedeliveryWindow, 0, 10)
63 | rw = append(rw, handler.NewRedeliveryWindow(0, 3, 0))
64 | rw = append(rw, handler.NewRedeliveryWindow(4, 10, 5))
65 | rw = append(rw, handler.NewRedeliveryWindow(10, 20, 10))
66 | rw = append(rw, handler.NewRedeliveryWindow(20, 30, 2*10))
67 | rw = append(rw, handler.NewRedeliveryWindow(30, 40, 4*10))
68 | rw = append(rw, handler.NewRedeliveryWindow(40, 50, 8*10))
69 | rw = append(rw, handler.NewRedeliveryWindow(50, -1, 16*10))
70 |
71 | //创建KiteqServer的流控
72 | limiter, _ := turbo.NewBurstyLimiter(kc.so.recievePermitsPerSecond/2, kc.so.recievePermitsPerSecond)
73 |
74 | topicNotify := make(chan []string, 10)
75 | topicNotify <- kc.so.topics
76 | //初始化pipeline
77 | pipeline := turbo.NewDefaultPipeline()
78 | pipeline.RegisteHandler("packet", handler.NewPacketHandler("packet"))
79 | pipeline.RegisteHandler("access", handler.NewAccessHandler("access", clientManager))
80 | pipeline.RegisteHandler("validate", handler.NewValidateHandler("validate", clientManager))
81 | pipeline.RegisteHandler("accept", handler.NewAcceptHandler("accept", limiter, kc.flowstat))
82 | pipeline.RegisteHandler("heartbeat", handler.NewHeartbeatHandler("heartbeat"))
83 | pipeline.RegisteHandler("check_message", handler.NewCheckMessageHandler("check_message", topicNotify))
84 | pipeline.RegisteHandler("persistent", handler.NewPersistentHandler("persistent", kc.so.deliveryTimeout, kitedb, kc.so.deliveryFirst))
85 | pipeline.RegisteHandler("txAck", handler.NewTxAckHandler("txAck", kitedb))
86 | pipeline.RegisteHandler("deliverpre", handler.NewDeliverPreHandler("deliverpre", kitedb, exchanger, kc.flowstat, kc.so.maxDeliverWorkers, registry))
87 | pipeline.RegisteHandler("deliver", handler.NewDeliverQosHandler(ctx, "deliver", kc.flowstat))
88 | pipeline.RegisteHandler("remoting", turbo.NewRemotingHandler("remoting", clientManager))
89 | pipeline.RegisteHandler("remote-future", handler.NewRemotingFutureHandler("remote-future"))
90 | pipeline.RegisteHandler("deliver-result", handler.NewDeliverResultHandler("deliver-result", kc.so.deliveryTimeout, kitedb, rw, registry))
91 | //以下是处理投递结果返回事件,即到了remoting端会backwark到future-->result-->record
92 |
93 | recoverManager := NewRecoverManager(kiteqName, kc.so.recoverPeriod, pipeline, kitedb)
94 |
95 | return &KiteQServer{
96 | ctx: ctx,
97 | reconnManager: reconnManager,
98 | clientManager: clientManager,
99 | exchanger: exchanger,
100 | pipeline: pipeline,
101 | recoverManager: recoverManager,
102 | kc: kc,
103 | kitedb: kitedb,
104 | stop: false,
105 | lastNetstat: make([]turbo.NetworkStat, 2),
106 | lastKiteStat: make([]kiteqstat, 2),
107 | limiter: limiter,
108 | topicNotify: topicNotify}
109 |
110 | }
111 |
112 | func (self *KiteQServer) Start() {
113 |
114 | self.kitedb.Start()
115 |
116 | codec := protocol.KiteQBytesCodec{MaxFrameLength: turbo.MAX_PACKET_BYTES}
117 | self.remotingServer = turbo.NewTServerWithCodec(self.kc.so.bindHost, self.kc.rc,
118 | func() turbo.ICodec {
119 | return codec
120 | },
121 | func(ctx *turbo.TContext) error {
122 | c := ctx.Client
123 | p := ctx.Message
124 | event := turbo.NewPacketEvent(c, p)
125 | err := self.pipeline.FireWork(event)
126 | if nil != err {
127 | log.Errorf("RemotingServer|onPacketReceive|FAIL|%s", err)
128 |
129 | } else {
130 | // log.Debug("RemotingServer|onPacketRecieve|SUCC|%s|%t", rclient.RemoteAddr(), packet)
131 | }
132 | return err
133 | })
134 |
135 | err := self.remotingServer.ListenAndServer()
136 | if nil != err {
137 | log.Fatalf("KiteQServer|RemotionServer|START|FAIL|%s|%s", err, self.kc.so.bindHost)
138 | } else {
139 | log.Infof("KiteQServer|RemotionServer|START|SUCC|%s", self.kc.so.bindHost)
140 | }
141 | //推送可发送的topic列表并且获取了对应topic下的订阅关系
142 | succ := self.exchanger.PushQServer(self.kc.so.bindHost, self.kc.so.topics)
143 | if !succ {
144 | log.Fatalf("KiteQServer|PushQServer|FAIL|%s|%s", err, self.kc.so.topics)
145 | } else {
146 | log.Infof("KiteQServer|PushQServer|SUCC|%s", self.kc.so.topics)
147 | }
148 |
149 | //开启流量统计
150 | self.startFlow()
151 |
152 | //开启recover
153 | self.recoverManager.Start()
154 |
155 | //启动DLQ的时间
156 | self.startDLQ()
157 |
158 | http.HandleFunc("/stat", self.HandleStat)
159 | http.HandleFunc("/binds", self.HandleBindings)
160 | http.HandleFunc("/reload", self.HandleReloadConf)
161 | //启动pprof
162 | host, _, _ := net.SplitHostPort(self.kc.so.bindHost)
163 | go func() {
164 | if self.kc.so.pprofPort > 0 {
165 | log.Error(http.ListenAndServe(host+":"+strconv.Itoa(self.kc.so.pprofPort), nil))
166 | }
167 | }()
168 | }
169 |
170 | func (self *KiteQServer) startDLQ() {
171 | go func() {
172 | now := time.Now()
173 | next := now.Add(time.Hour * 24)
174 | next = time.Date(next.Year(), next.Month(), next.Day(), self.kc.so.dlqExecHour, 0, 0, 0, next.Location())
175 | time.Sleep(next.Sub(now))
176 | do := func() {
177 | defer func() {
178 | if err := recover(); nil != err {
179 | log.Errorf("KiteQServer|startDLQ|FAIL|%s|%s", err, time.Now())
180 | }
181 | }()
182 | //开始做迁移
183 | self.kitedb.MoveExpired()
184 | log.Infof("KiteQServer|startDLQ|SUCC|%s", time.Now())
185 | }
186 |
187 | t := time.NewTicker(24 * time.Hour)
188 | for {
189 | select {
190 | case <-self.ctx.Done():
191 | t.Stop()
192 | return
193 | case <-t.C:
194 | do()
195 | }
196 | }
197 | }()
198 | log.Infof("KiteQServer|startDLQ|SUCC|%s", time.Now())
199 | }
200 |
201 | //处理reload配置
202 | func (self *KiteQServer) HandleReloadConf(resp http.ResponseWriter, req *http.Request) {
203 | so := ServerOption{}
204 | err := loadTomlConf(self.kc.so.configPath, self.kc.so.clusterName, self.kc.so.bindHost, self.kc.so.pprofPort, &so)
205 | if nil != err {
206 | log.Errorf("KiteQServer|HandleReloadConf|FAIL|%s", err)
207 | }
208 |
209 | //新增或者减少topics
210 | if len(so.topics) != len(self.kc.so.topics) {
211 | //推送可发送的topic列表并且获取了对应topic下的订阅关系
212 | succ := self.exchanger.PushQServer(self.kc.so.bindHost, so.topics)
213 | if !succ {
214 | log.Errorf("KiteQServer|HandleReloadConf|PushQServer|FAIL|%s|%s", err, so.topics)
215 | } else {
216 | log.Infof("KiteQServer|HandleReloadConf|PushQServer|SUCC|%s", so.topics)
217 | }
218 | //重置数据
219 | self.kc.so = so
220 | //下发变化的数据
221 | self.topicNotify <- so.topics
222 | }
223 |
224 | var result struct {
225 | Status int `json:"status"`
226 | Topics []string `json:"topics"`
227 | }
228 | result.Status = http.StatusOK
229 | result.Topics = so.topics
230 |
231 | rawJson, _ := json.Marshal(result)
232 | resp.Header().Set("content-type", "text/json")
233 | resp.WriteHeader(http.StatusOK)
234 | resp.Write(rawJson)
235 | }
236 |
237 | func (self *KiteQServer) Shutdown() {
238 | self.stop = true
239 | //先关闭exchanger让客户端不要再输送数据
240 | self.exchanger.Shutdown()
241 | self.recoverManager.Stop()
242 | self.kitedb.Stop()
243 | self.clientManager.Shutdown()
244 | self.remotingServer.Shutdown()
245 | log.Infof("KiteQServer|Shutdown...")
246 |
247 | }
248 |
--------------------------------------------------------------------------------
/server/kiteq_server_monitor.go:
--------------------------------------------------------------------------------
1 | package server
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | "net/http"
7 | "runtime"
8 | "time"
9 | )
10 |
11 | type kiteqstat struct {
12 | Goroutine int32 `json:"goroutine"`
13 | DeliverGo int32 `json:"deliver_go"`
14 | DeliverCount int32 `json:"deliver_count"`
15 | RecieveCount int32 `json:"recieve_count"`
16 | MessageCount map[string]int `json:"message_count"`
17 | TopicsDeliver map[string] /*topicId*/ int32 `json:"topics_deliver"`
18 | TopicsRecieve map[string] /*topicId*/ int32 `json:"topics_recieve"`
19 | Groups map[string][]string `json:"groups"`
20 | KiteServerLimter []int `json:"accept_limiter"`
21 | }
22 |
23 | //handler monitor
24 | func (self *KiteQServer) HandleStat(resp http.ResponseWriter, req *http.Request) {
25 |
26 | defer func() {
27 | if err := recover(); nil != err {
28 | //do nothing
29 | }
30 | }()
31 |
32 | idx := (time.Now().Unix() - 1) % 2
33 |
34 | //network
35 | rstat := self.lastNetstat[idx]
36 | rstat.Connections = self.clientManager.ConnNum()
37 |
38 | //统计topic的数量消息
39 | //kiteq
40 | ks := self.lastKiteStat[idx]
41 | ks.Groups = self.clientManager.CloneGroups()
42 |
43 | result := make(map[string]interface{}, 2)
44 | result["kiteq"] = ks
45 | result["network"] = rstat
46 |
47 | data, _ := json.Marshal(result)
48 |
49 | resp.Header().Set("content-type", "text/json")
50 | resp.WriteHeader(http.StatusOK)
51 | //write monitor
52 | resp.Write(data)
53 | }
54 |
55 | type BindInfo struct {
56 | Topic2Groups map[string][]string `json:"topic_2_groups"`
57 | Topics2Limiters map[string]map[string][]int `json:"topic_limiters"`
58 | }
59 |
60 | //handler monitor
61 | func (self *KiteQServer) HandleBindings(resp http.ResponseWriter, req *http.Request) {
62 |
63 | binds := self.exchanger.Topic2Groups()
64 | limters := self.exchanger.Topic2Limiters()
65 | bi := BindInfo{
66 | Topic2Groups: binds,
67 | Topics2Limiters: limters}
68 | data, _ := json.Marshal(bi)
69 |
70 | resp.Header().Set("content-type", "text/json")
71 | resp.WriteHeader(http.StatusOK)
72 | resp.Write(data)
73 | }
74 |
75 | func (self *KiteQServer) startFlow() {
76 |
77 | go func() {
78 | t := time.NewTicker(1 * time.Second)
79 | count := 0
80 | for !self.stop {
81 |
82 | ns := self.remotingServer.NetworkStat()
83 |
84 | //统计topic的数量消息
85 | topicsdeliver, topicsrecieve := self.kc.flowstat.TopicFlowSnapshot()
86 |
87 | //消息堆积数量
88 | var msgMap map[string]int
89 | if nil != self.kitedb {
90 | msgMap = self.kitedb.Length()
91 | if nil == msgMap {
92 | msgMap = make(map[string]int, 20)
93 | }
94 | }
95 |
96 | for _, t := range self.kc.so.topics {
97 | _, ok := msgMap[t]
98 | if !ok {
99 | msgMap[t] = 0
100 | }
101 |
102 | _, ok = topicsdeliver[t]
103 | if !ok {
104 | topicsdeliver[t] = 0
105 | }
106 |
107 | _, ok = topicsrecieve[t]
108 | if !ok {
109 | topicsrecieve[t] = 0
110 | }
111 | }
112 |
113 | used, total := self.limiter.LimiterInfo()
114 | //kiteq
115 | ks := kiteqstat{
116 | Goroutine: int32(runtime.NumGoroutine()),
117 | DeliverGo: self.kc.flowstat.DeliverGo.Count(),
118 | DeliverCount: self.kc.flowstat.DeliverFlow.Changes(),
119 | RecieveCount: self.kc.flowstat.RecieveFlow.Changes(),
120 | MessageCount: msgMap,
121 | TopicsDeliver: topicsdeliver,
122 | TopicsRecieve: topicsrecieve,
123 | KiteServerLimter: []int{used, total}}
124 |
125 | line := fmt.Sprintf("\nRemoting: \tread:%d/%d\twrite:%d/%d\tdispatcher_go:%d\tconnetions:%d\t", ns.ReadBytes, ns.ReadCount,
126 | ns.WriteBytes, ns.WriteCount, ns.DisPoolSize, self.clientManager.ConnNum())
127 |
128 | line = fmt.Sprintf("%sKiteQ:\tdeliver:%d\tdeliver-go:%d", line, ks.DeliverCount,
129 | ks.DeliverGo)
130 | if nil != self.kitedb {
131 | line = fmt.Sprintf("%s\nKiteStore:%s", line, self.kitedb.Monitor())
132 |
133 | }
134 | log.Info(line)
135 | self.lastNetstat[count%2] = ns
136 | self.lastKiteStat[count%2] = ks
137 | count++
138 | <-t.C
139 | }
140 | t.Stop()
141 | }()
142 | }
143 |
--------------------------------------------------------------------------------
/server/recover_manager.go:
--------------------------------------------------------------------------------
1 | package server
2 |
3 | import (
4 | "fmt"
5 | "kiteq/handler"
6 | "kiteq/store"
7 | "time"
8 |
9 | "github.com/blackbeans/kiteq-common/protocol"
10 | "github.com/blackbeans/turbo"
11 | )
12 |
13 | //-----------recover的handler
14 | type RecoverManager struct {
15 | pipeline *turbo.DefaultPipeline
16 | serverName string
17 | isClose bool
18 | kitestore store.IKiteStore
19 | recoverPeriod time.Duration
20 | recoveLimiter *turbo.BurstyLimiter
21 | }
22 |
23 | //------创建persitehandler
24 | func NewRecoverManager(serverName string, recoverPeriod time.Duration,
25 | pipeline *turbo.DefaultPipeline, kitestore store.IKiteStore) *RecoverManager {
26 |
27 | limter, _ := turbo.NewBurstyLimiter(2000, 2000)
28 | rm := &RecoverManager{
29 | serverName: serverName,
30 | kitestore: kitestore,
31 | isClose: false,
32 | pipeline: pipeline,
33 | recoverPeriod: recoverPeriod,
34 | recoveLimiter: limter}
35 | return rm
36 | }
37 |
38 | //开始启动恢复程序
39 | func (self *RecoverManager) Start() {
40 | for i := 0; i < self.kitestore.RecoverNum(); i++ {
41 | go self.startRecoverTask(fmt.Sprintf("%x", i))
42 | }
43 | log.Infof("RecoverManager|Start|SUCC....")
44 | }
45 |
46 | func (self *RecoverManager) startRecoverTask(hashKey string) {
47 | // log.Info("RecoverManager|startRecoverTask|SUCC|%s....", hashKey)
48 | for !self.isClose {
49 | //开始
50 | count := self.redeliverMsg(hashKey, time.Now())
51 | log.Infof("RecoverManager|endRecoverTask|%s|count:%d....", hashKey, count)
52 | time.Sleep(self.recoverPeriod)
53 | }
54 |
55 | }
56 |
57 | func (self *RecoverManager) Stop() {
58 | self.isClose = true
59 | }
60 |
61 | func (self *RecoverManager) redeliverMsg(hashKey string, now time.Time) int {
62 | startIdx := 0
63 | preTimestamp := now.Unix()
64 | //开始分页查询未过期的消息实体
65 | for !self.isClose {
66 | _, entities := self.kitestore.PageQueryEntity(hashKey, self.serverName,
67 | preTimestamp, startIdx, self.kitestore.RecoverLimit())
68 |
69 | if len(entities) <= 0 {
70 | break
71 | }
72 | // d, _ := json.Marshal(entities[0].Header)
73 | // log.Infof( "RecoverManager|redeliverMsg|%d|%s", now.Unix(), string(d))
74 |
75 | //开始发起重投
76 | for _, entity := range entities {
77 | //如果为未提交的消息则需要发送一个事务检查的消息
78 | if !entity.Commit {
79 | self.txAck(entity)
80 | } else {
81 | //发起投递事件
82 | self.delivery(entity)
83 | }
84 | }
85 |
86 | startIdx += len(entities)
87 | // hasMore = more
88 | preTimestamp = entities[len(entities)-1].NextDeliverTime
89 | }
90 | return startIdx
91 | }
92 |
93 | //发起投递事件
94 | func (self *RecoverManager) delivery(entity *store.MessageEntity) {
95 | deliver := handler.NewDeliverPreEvent(
96 | entity.MessageId,
97 | entity.Header,
98 | nil)
99 | //会先经过pre处理器填充额外信息
100 | self.pipeline.FireWork(deliver)
101 | }
102 |
103 | //发送事务ack信息
104 | func (self *RecoverManager) txAck(entity *store.MessageEntity) {
105 |
106 | txack := protocol.MarshalTxACKPacket(entity.Header, protocol.TX_UNKNOWN, "Server Check")
107 | p := turbo.NewPacket(protocol.CMD_TX_ACK, txack)
108 | //向头部的发送分组发送txack消息
109 | groupId := entity.PublishGroup
110 | event := turbo.NewRemotingEvent(p, nil, groupId)
111 | self.pipeline.FireWork(event)
112 | }
113 |
--------------------------------------------------------------------------------
/server/recover_manager_test.go:
--------------------------------------------------------------------------------
1 | package server
2 |
3 | import (
4 | "context"
5 | "github.com/blackbeans/kiteq-common/protocol"
6 | "github.com/blackbeans/kiteq-common/stat"
7 | "github.com/blackbeans/turbo"
8 | "github.com/golang/protobuf/proto"
9 | "kiteq/exchange"
10 | "kiteq/handler"
11 | "kiteq/store"
12 | "kiteq/store/memory"
13 | "os"
14 | "testing"
15 | "time"
16 | )
17 |
18 | func buildStringMessage(id string) *protocol.StringMessage {
19 | //创建消息
20 | entity := &protocol.StringMessage{}
21 | mid := store.MessageId()
22 | mid = string(mid[:len(mid)-1]) + id
23 | // mid[len(mid)-1] = id[0]
24 | entity.Header = &protocol.Header{
25 | MessageId: proto.String(mid),
26 | Topic: proto.String("trade"),
27 | MessageType: proto.String("pay-succ"),
28 | ExpiredTime: proto.Int64(time.Now().Add(10 * time.Minute).Unix()),
29 | DeliverLimit: proto.Int32(100),
30 | GroupId: proto.String("go-kite-test"),
31 | Commit: proto.Bool(true),
32 | Fly: proto.Bool(false)}
33 | entity.Body = proto.String("hello go-kite")
34 |
35 | return entity
36 | }
37 |
38 | type mockDeliverHandler struct {
39 | turbo.BaseDoubleSidedHandler
40 | ch chan bool
41 | }
42 |
43 | func newmockDeliverHandler(name string, ch chan bool) *mockDeliverHandler {
44 |
45 | phandler := &mockDeliverHandler{}
46 | phandler.BaseDoubleSidedHandler = turbo.NewBaseDoubleSidedHandler(name, phandler)
47 | phandler.ch = ch
48 | return phandler
49 | }
50 |
51 | func (self *mockDeliverHandler) TypeAssert(event turbo.IEvent) bool {
52 | return true
53 | }
54 |
55 | func (self *mockDeliverHandler) Process(ctx *turbo.DefaultPipelineContext, event turbo.IEvent) error {
56 | log.Printf("TestRecoverManager|-------------------%s", event)
57 | self.ch <- true
58 | return nil
59 |
60 | }
61 |
62 | func TestRecoverManager(t *testing.T) {
63 |
64 | pipeline := turbo.NewDefaultPipeline()
65 |
66 | kitedb := memory.NewKiteMemoryStore(context.Background(), 100, 100)
67 |
68 | messageid := store.MessageId()
69 | t.Logf("messageid:%s\b", messageid)
70 | entity := store.NewMessageEntity(protocol.NewQMessage(buildStringMessage(messageid)))
71 | kitedb.Save(entity)
72 | go func() {
73 | for {
74 | log.Println(kitedb.Monitor())
75 | time.Sleep(1 * time.Second)
76 | }
77 | }()
78 |
79 | fs := stat.NewFlowStat()
80 | ch := make(chan bool, 1)
81 |
82 | // 临时在这里创建的BindExchanger
83 | exchanger := exchange.NewBindExchanger(context.Background(), "zk://localhost:2181", "127.0.0.1:13800")
84 |
85 | tw := turbo.NewTimerWheel(100 * time.Millisecond)
86 |
87 | deliveryRegistry := handler.NewDeliveryRegistry(context.Background(), tw, 10)
88 | pipeline.RegisteHandler("deliverpre", handler.NewDeliverPreHandler("deliverpre", kitedb, exchanger, fs, 100, deliveryRegistry))
89 | pipeline.RegisteHandler("deliver", newmockDeliverHandler("deliver", ch))
90 | hostname, _ := os.Hostname()
91 | rm := NewRecoverManager(hostname, 16*time.Second, pipeline, kitedb)
92 | rm.Start()
93 | select {
94 | case succ := <-ch:
95 | log.Printf("--------------recover %s", succ)
96 | case <-time.After(5 * time.Second):
97 | t.Fail()
98 | log.Println("waite recover deliver timeout")
99 | }
100 |
101 | }
102 |
--------------------------------------------------------------------------------
/store/file/kite_file_store_test.go:
--------------------------------------------------------------------------------
1 | package file
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "github.com/blackbeans/kiteq-common/protocol"
7 | "github.com/golang/protobuf/proto"
8 | "kiteq/store"
9 | "testing"
10 | "time"
11 | )
12 |
13 | func TestFileStoreQuery(t *testing.T) {
14 | cleanSnapshot("./snapshot/")
15 | fs := NewKiteFileStore(context.TODO(), ".", 1000, 5000000, 1*time.Second)
16 | fs.Start()
17 |
18 | for i := 0; i < 100; i++ {
19 | //创建消息
20 | msg := &protocol.BytesMessage{}
21 | msg.Header = &protocol.Header{
22 | MessageId: proto.String(fmt.Sprintf("%x", i) + "26c03f00665862591f696a980b5ac"),
23 | Topic: proto.String("trade"),
24 | MessageType: proto.String("pay-succ"),
25 | ExpiredTime: proto.Int64(time.Now().Add(10 * time.Minute).Unix()),
26 | DeliverLimit: proto.Int32(100),
27 | GroupId: proto.String("go-kite-test"),
28 | Commit: proto.Bool(false),
29 | Fly: proto.Bool(false)}
30 | msg.Body = []byte("hello world")
31 |
32 | entity := store.NewMessageEntity(protocol.NewQMessage(msg))
33 |
34 | succ := fs.Save(entity)
35 | if !succ {
36 | t.Fail()
37 | }
38 | }
39 |
40 | for i := 0; i < 100; i++ {
41 | id := fmt.Sprintf("%x", i) + "26c03f00665862591f696a980b5ac"
42 | entity := fs.Query("trade", id)
43 | if nil == entity {
44 | t.Fail()
45 | log.Printf("FAIL|%v", entity)
46 | } else {
47 | // log.Println(entity)
48 | }
49 | }
50 | fs.Stop()
51 | cleanSnapshot("./snapshot/")
52 | }
53 |
54 | func BenchmarkTestFileAppend(t *testing.B) {
55 | t.StopTimer()
56 | cleanSnapshot("./snapshot/")
57 | fs := NewKiteFileStore(context.TODO(), ".", 1000, 5000000, 1*time.Second)
58 | fs.Start()
59 | t.StartTimer()
60 | for i := 0; i < t.N; i++ {
61 | //创建消息
62 | msg := &protocol.BytesMessage{}
63 | msg.Header = &protocol.Header{
64 | MessageId: proto.String(fmt.Sprintf("%x", i) + "26c03f00665862591f696a980b5ac"),
65 | Topic: proto.String("trade"),
66 | MessageType: proto.String("pay-succ"),
67 | ExpiredTime: proto.Int64(time.Now().Add(10 * time.Minute).Unix()),
68 | DeliverLimit: proto.Int32(100),
69 | GroupId: proto.String("go-kite-test"),
70 | Commit: proto.Bool(false),
71 | Fly: proto.Bool(false)}
72 | msg.Body = []byte("hello world")
73 |
74 | entity := store.NewMessageEntity(protocol.NewQMessage(msg))
75 | succ := fs.Save(entity)
76 | if !succ {
77 | t.Fail()
78 | }
79 | }
80 |
81 | }
82 |
83 | func TestFileDuplicateAppend(t *testing.T) {
84 | cleanSnapshot("./snapshot/")
85 | fs := NewKiteFileStore(context.TODO(), ".", 1000, 5000000, 1*time.Second)
86 | fs.Start()
87 | for i := 0; i < 10; i++ {
88 | //创建消息
89 | msg := &protocol.BytesMessage{}
90 | msg.Header = &protocol.Header{
91 | MessageId: proto.String("26c03f00665862591f696a980b5ac"),
92 | Topic: proto.String("trade"),
93 | MessageType: proto.String("pay-succ"),
94 | ExpiredTime: proto.Int64(time.Now().Add(10 * time.Minute).Unix()),
95 | DeliverLimit: proto.Int32(100),
96 | GroupId: proto.String("go-kite-test"),
97 | Commit: proto.Bool(false),
98 | Fly: proto.Bool(false)}
99 | msg.Body = []byte("hello world")
100 |
101 | entity := store.NewMessageEntity(protocol.NewQMessage(msg))
102 | succ := fs.Save(entity)
103 | if i == 0 && !succ {
104 | t.Fail()
105 | return
106 | }
107 |
108 | if i > 0 && succ {
109 | return
110 | }
111 | }
112 |
113 | }
114 |
115 | func TestFileStoreCommit(t *testing.T) {
116 | cleanSnapshot("./snapshot/")
117 | fs := NewKiteFileStore(context.TODO(), ".", 1000, 5000000, 1*time.Second)
118 | fs.Start()
119 |
120 | for i := 0; i < 100; i++ {
121 | //创建消息
122 | msg := &protocol.BytesMessage{}
123 | msg.Header = &protocol.Header{
124 | MessageId: proto.String(fmt.Sprintf("%x", i) + "26c03f00665862591f696a980b5ac"),
125 | Topic: proto.String("trade"),
126 | MessageType: proto.String("pay-succ"),
127 | ExpiredTime: proto.Int64(time.Now().Add(10 * time.Minute).Unix()),
128 | DeliverLimit: proto.Int32(100),
129 | GroupId: proto.String("go-kite-test"),
130 | Commit: proto.Bool(false),
131 | Fly: proto.Bool(false)}
132 | msg.Body = []byte("hello world")
133 |
134 | entity := store.NewMessageEntity(protocol.NewQMessage(msg))
135 |
136 | succ := fs.Save(entity)
137 | if !succ {
138 | t.Fail()
139 | }
140 | }
141 |
142 | //commit and check
143 | for i := 0; i < 100; i++ {
144 | id := fmt.Sprintf("%x", i) + "26c03f00665862591f696a980b5ac"
145 | fs.Commit("trade", id)
146 |
147 | entity := fs.Query("trade", id)
148 | if nil == entity {
149 | t.Fail()
150 | } else if !entity.Commit {
151 | t.Fail()
152 | // log.Println(entity)
153 | }
154 | }
155 | fs.Stop()
156 | cleanSnapshot("./snapshot/")
157 | }
158 |
159 | func TestFileStoreUpdate(t *testing.T) {
160 | cleanSnapshot("./snapshot/")
161 | fs := NewKiteFileStore(context.TODO(), ".", 1000, 5000000, 1*time.Second)
162 | fs.Start()
163 |
164 | for i := 0; i < 100; i++ {
165 | //创建消息
166 | msg := &protocol.BytesMessage{}
167 | msg.Header = &protocol.Header{
168 | MessageId: proto.String(fmt.Sprintf("%x", i) + "26c03f00665862591f696a980b5ac"),
169 | Topic: proto.String("trade"),
170 | MessageType: proto.String("pay-succ"),
171 | ExpiredTime: proto.Int64(time.Now().Add(10 * time.Minute).Unix()),
172 | DeliverLimit: proto.Int32(100),
173 | GroupId: proto.String("go-kite-test"),
174 | Commit: proto.Bool(true),
175 | Fly: proto.Bool(false)}
176 | msg.Body = []byte("hello world")
177 |
178 | entity := store.NewMessageEntity(protocol.NewQMessage(msg))
179 | // log.Printf("------------%s", entity.Header)
180 | succ := fs.Save(entity)
181 | if !succ {
182 | t.Fail()
183 | }
184 | }
185 |
186 | //commit and check
187 | for i := 0; i < 100; i++ {
188 | id := fmt.Sprintf("%x", i) + "26c03f00665862591f696a980b5ac"
189 |
190 | //创建消息
191 | msg := &store.MessageEntity{
192 | MessageId: id,
193 | DeliverCount: 1,
194 | SuccGroups: []string{},
195 | FailGroups: []string{"s-mts-test"}}
196 |
197 | succ := fs.UpdateEntity(msg)
198 | if !succ {
199 | t.Fail()
200 | }
201 | //check entity
202 | entity := fs.Query("trade", id)
203 | // log.Printf("++++++++++++++|%s|%s", entity.Header, string(entity.GetBody().([]byte)))
204 | if nil == entity {
205 | t.Fail()
206 | } else if !entity.Commit && entity.DeliverCount != 1 &&
207 | entity.FailGroups[0] != "s-mts-test" {
208 | t.Fail()
209 | // log.Println(entity)
210 | }
211 | }
212 | fs.Stop()
213 | // cleanSnapshot("./snapshot/")
214 | }
215 |
216 | func TestFileStoreDelete(t *testing.T) {
217 |
218 | cleanSnapshot("./snapshot/")
219 | fs := NewKiteFileStore(context.TODO(), ".", 1000, 5000000, 1*time.Second)
220 | fs.Start()
221 |
222 | for i := 0; i < 100; i++ {
223 | //创建消息
224 | msg := &protocol.BytesMessage{}
225 | msg.Header = &protocol.Header{
226 | MessageId: proto.String(fmt.Sprintf("%x", i) + "26c03f00665862591f696a980b5ac"),
227 | Topic: proto.String("trade"),
228 | MessageType: proto.String("pay-succ"),
229 | ExpiredTime: proto.Int64(time.Now().Add(10 * time.Minute).Unix()),
230 | DeliverLimit: proto.Int32(100),
231 | GroupId: proto.String("go-kite-test"),
232 | Commit: proto.Bool(true),
233 | Fly: proto.Bool(false)}
234 | msg.Body = []byte("hello world")
235 |
236 | entity := store.NewMessageEntity(protocol.NewQMessage(msg))
237 |
238 | succ := fs.Save(entity)
239 | if !succ {
240 | t.Fail()
241 | }
242 | }
243 |
244 | //commit and check
245 | for i := 0; i < 100; i++ {
246 | id := fmt.Sprintf("%x", i) + "26c03f00665862591f696a980b5ac"
247 |
248 | //delete
249 | fs.Delete("trade", id)
250 |
251 | }
252 |
253 | time.Sleep(5 * time.Second)
254 | for i := 0; i < 100; i++ {
255 | id := fmt.Sprintf("%x", i) + "26c03f00665862591f696a980b5ac"
256 | //check entity
257 | entity := fs.Query("trade", id)
258 | if nil != entity {
259 | t.Fail()
260 | }
261 | }
262 |
263 | fs.Stop()
264 | cleanSnapshot("./snapshot/")
265 | }
266 |
267 | func TestFileStoreInit(t *testing.T) {
268 |
269 | cleanSnapshot("./snapshot/")
270 | fs := NewKiteFileStore(context.TODO(), ".", 1000, 5000000, 1*time.Second)
271 | fs.Start()
272 |
273 | for i := 0; i < 100; i++ {
274 | //创建消息
275 | msg := &protocol.BytesMessage{}
276 | msg.Header = &protocol.Header{
277 | MessageId: proto.String(fmt.Sprint(i) + "26c03f00665862591f696a980b5ac"),
278 | Topic: proto.String("trade"),
279 | MessageType: proto.String("pay-succ"),
280 | ExpiredTime: proto.Int64(time.Now().Add(10 * time.Minute).Unix()),
281 | DeliverLimit: proto.Int32(100),
282 | GroupId: proto.String("go-kite-test"),
283 | Commit: proto.Bool(true),
284 | Fly: proto.Bool(false)}
285 | msg.Body = []byte("hello world")
286 |
287 | entity := store.NewMessageEntity(protocol.NewQMessage(msg))
288 |
289 | succ := fs.Save(entity)
290 | if !succ {
291 | t.Fail()
292 | }
293 |
294 | if i < 50 {
295 | fs.AsyncDelete(entity.Topic, entity.MessageId)
296 | }
297 | }
298 |
299 | time.Sleep(10 * time.Second)
300 | fs.Stop()
301 |
302 | log.Println("-------------------Query")
303 | fs = NewKiteFileStore(context.TODO(), ".", 1000, 5000000, 1*time.Second)
304 | fs.Start()
305 |
306 | // for _, v := range fs.oplogs {
307 | // for _, e := range v {
308 | // ob := e.Value.(*opBody)
309 | // log.Printf("TestFileStoreInit|Check|%d|%s", ob.Id, ob.MessageId)
310 | // }
311 | // }
312 | log.Printf("TestFileStoreInit|Check|SUCC|")
313 | //commit and check
314 | for i := 50; i < 100; i++ {
315 | id := fmt.Sprint(i) + "26c03f00665862591f696a980b5ac"
316 |
317 | //check entity
318 | entity := fs.Query("trade", id)
319 | if nil == entity || !entity.Commit {
320 | // log.Printf("TestFileStoreInit|Exist|FAIL|%s|%s", id, entity)
321 | t.Fail()
322 | return
323 | }
324 | log.Printf("TestFileStoreInit|Exist|SUCC|%d|%s|%v", i, id, entity)
325 | }
326 |
327 | log.Printf("TestFileStoreInit|Exist|")
328 |
329 | //commit and check
330 | for i := 0; i < 50; i++ {
331 | id := fmt.Sprint(i) + "26c03f00665862591f696a980b5ac"
332 |
333 | //check entity
334 | entity := fs.Query("trade", id)
335 | if nil != entity {
336 | log.Printf("TestFileStoreInit|Delete|FAIL|%s", id)
337 | t.Fail()
338 | }
339 | }
340 |
341 | log.Printf("TestFileStoreInit|Delete")
342 | fs.Stop()
343 | cleanSnapshot("./snapshot/")
344 | }
345 |
--------------------------------------------------------------------------------
/store/file/kite_message_store_test.go:
--------------------------------------------------------------------------------
1 | package file
2 |
3 | import (
4 | "fmt"
5 | "math/rand"
6 | "os"
7 | "path/filepath"
8 | "strings"
9 | "testing"
10 | "time"
11 | )
12 |
13 | func traverse(oplog *oplog) {
14 | log.Printf("------%v", oplog)
15 | }
16 |
17 | func TestSingle(t *testing.T) {
18 | cleanSnapshot("./snapshot/")
19 | snapshot := NewMessageStore("./snapshot/", 1, 10, traverse)
20 | snapshot.Start()
21 |
22 | for i := 0; i < 2; i++ {
23 | cmd := NewCommand(-1, fmt.Sprintln(i), []byte{0}, []byte{1})
24 | <-snapshot.Append(cmd)
25 |
26 | }
27 |
28 | log.Printf("snapshot|%s", snapshot)
29 |
30 | for i := 0; i < 2; i++ {
31 | snapshot.Delete(NewCommand(int64(i), fmt.Sprintln(i), []byte{0}, []byte{1}))
32 | }
33 | time.Sleep(2 * time.Second)
34 | snapshot.Destory()
35 |
36 | snapshot = NewMessageStore("./snapshot/", 1, 10, traverse)
37 | snapshot.Start()
38 |
39 | cleanSnapshot("./snapshot/")
40 | }
41 |
42 | func TestAppend(t *testing.T) {
43 | cleanSnapshot("./snapshot/")
44 | snapshot := NewMessageStore("./snapshot/", 1, 10, traverse)
45 | snapshot.Start()
46 | run := true
47 | i := 0
48 | last := 0
49 |
50 | go func() {
51 | for ; i < 20; i++ {
52 | cmd := NewCommand(-1, fmt.Sprint(i), []byte(fmt.Sprintf("hello snapshot|%d", i)), nil)
53 | <-snapshot.Append(cmd)
54 | }
55 | run = false
56 | }()
57 |
58 | for run {
59 | log.Printf("tps:%d", (i - last))
60 | last = i
61 | time.Sleep(1 * time.Second)
62 | }
63 | time.Sleep(10 * time.Second)
64 | log.Printf("snapshot|%s", snapshot)
65 |
66 | if snapshot.chunkId != 20-1 {
67 | t.Fail()
68 | }
69 | snapshot.Destory()
70 | cleanSnapshot("./snapshot/")
71 | }
72 |
73 | func cleanSnapshot(path string) {
74 |
75 | err := os.RemoveAll(path)
76 | if nil != err {
77 | log.Printf("Remove|FAIL|%s", path)
78 | } else {
79 | log.Printf("Remove|SUCC|%s", path)
80 | }
81 |
82 | }
83 |
84 | //test delete
85 | func TestDeleteAndStart(t *testing.T) {
86 | cleanSnapshot("./snapshot/")
87 | snapshot := NewMessageStore("./snapshot/", 1, 10, traverse)
88 | snapshot.Start()
89 | for j := 0; j < 1000; j++ {
90 | d := []byte(fmt.Sprintln(j))
91 | cmd := NewCommand(-1, fmt.Sprintln(j), d, nil)
92 | <-snapshot.Append(cmd)
93 | // log.Printf("TestDelete|Append|%d|...", j)
94 | }
95 | log.Printf("TestDeleteAndStart|Delete|Start...")
96 |
97 | if snapshot.chunkId != 999 {
98 | t.Fail()
99 | }
100 |
101 | i := 0
102 | last := 0
103 | run := true
104 | go func() {
105 | for run {
106 | log.Printf("tps:%d", (i - last))
107 | last = i
108 | time.Sleep(1 * time.Second)
109 | }
110 | }()
111 |
112 | for j := 0; j < 1000; j++ {
113 | id := int64(j)
114 | var str string
115 | data, err := snapshot.Query(id)
116 | if nil != err {
117 | log.Printf("TestDeleteAndStart|Query|%s", err)
118 | }
119 |
120 | str = string(data)
121 | if str != fmt.Sprintln(j) {
122 | log.Printf("TestDeleteAndStart|Query|FAIL|%s", str)
123 | t.Fail()
124 | continue
125 | }
126 |
127 | c := NewCommand(id, "", nil, nil)
128 | snapshot.Delete(c)
129 | i++
130 | _, err = snapshot.Query(id)
131 | if nil == err {
132 | t.Fail()
133 | log.Printf("TestDeleteAndStart|DELETE-QUERY|FAIL|%s", str)
134 | continue
135 | }
136 | }
137 | run = false
138 | snapshot.Destory()
139 |
140 | log.Printf("TestDeleteAndStart|Start...")
141 | snapshot = NewMessageStore("./snapshot/", 1, 10, traverse)
142 | snapshot.Start()
143 |
144 | fcount := 0
145 | //fetch all Segment
146 | filepath.Walk("./snapshot/", func(path string, f os.FileInfo, err error) error {
147 | // log.Info("MessageStore|Walk|%s", path)
148 | if nil != f && !f.IsDir() &&
149 | strings.HasSuffix(f.Name(), ".data") && f.Size() == 0 {
150 | fmt.Println(f.Name())
151 | fcount++
152 | }
153 | return nil
154 | })
155 |
156 | log.Printf("TestDeleteAndStart|Start|Lstat|%d", fcount)
157 | if fcount != 1 {
158 | t.Fail()
159 | }
160 | log.Printf("TestDeleteAndStart|Start|ChunkId|%d", snapshot.chunkId)
161 | if snapshot.chunkId != -1 {
162 | t.Fail()
163 | }
164 | snapshot.Destory()
165 | }
166 |
167 | //test delete
168 | func TestDelete(t *testing.T) {
169 | cleanSnapshot("./snapshot/")
170 | snapshot := NewMessageStore("./snapshot/", 1, 10, traverse)
171 | snapshot.Start()
172 | for j := 0; j < 1000; j++ {
173 | d := []byte(fmt.Sprintln(j))
174 | cmd := NewCommand(-1, fmt.Sprintln(j), d, nil)
175 | <-snapshot.Append(cmd)
176 | // log.Printf("TestDelete|Append|%d|...", j)
177 | }
178 | snapshot.Destory()
179 |
180 | // time.Sleep(5 * time.Second)
181 |
182 | log.Printf("TestDelete|Append|Complete...")
183 | // //reload
184 | nsnapshot := NewMessageStore("./snapshot/", 1, 10, traverse)
185 | nsnapshot.Start()
186 |
187 | log.Printf("TestDelete|Delete|Start...")
188 |
189 | i := 0
190 | last := 0
191 | run := true
192 | go func() {
193 | for run {
194 | log.Printf("tps:%d", (i - last))
195 | last = i
196 | time.Sleep(1 * time.Second)
197 | }
198 | }()
199 |
200 | // for _, s := range nsnapshot.segments {
201 | // for _, c := range s.chunks {
202 | // log.Printf("nsnapshot|------%d", c.id)
203 | // }
204 | // }
205 | for j := 0; j < 1000; j++ {
206 | id := int64(j)
207 | var str string
208 | data, err := nsnapshot.Query(id)
209 | if nil != err {
210 | log.Printf("TestDelete|Query|%s", err)
211 | }
212 |
213 | str = string(data)
214 | if str != fmt.Sprintln(j) {
215 | log.Printf("TestDelete|Query|FAIL|%s", str)
216 | t.Fail()
217 | continue
218 | }
219 |
220 | c := NewCommand(id, "", nil, nil)
221 | nsnapshot.Delete(c)
222 | i++
223 | _, err = nsnapshot.Query(id)
224 | if nil == err {
225 | t.Fail()
226 | log.Printf("TestDelete|DELETE-QUERY|FAIL|%s", str)
227 | continue
228 | }
229 | }
230 | run = false
231 | nsnapshot.Destory()
232 | cleanSnapshot("./snapshot/")
233 | }
234 |
235 | func TestQuery(t *testing.T) {
236 |
237 | cleanSnapshot("./snapshot/")
238 | snapshot := NewMessageStore("./snapshot/", 1, 10, traverse)
239 | snapshot.Start()
240 | var data [512]byte
241 | for j := 0; j < 20; j++ {
242 | d := append(data[:512], []byte{
243 | byte((j >> 24) & 0xFF),
244 | byte((j >> 16) & 0xFF),
245 | byte((j >> 8) & 0xFF),
246 | byte(j & 0xFF)}...)
247 |
248 | cmd := NewCommand(-1, fmt.Sprint(j), d, nil)
249 | <-snapshot.Append(cmd)
250 | }
251 |
252 | time.Sleep(10 * time.Second)
253 |
254 | run := true
255 | i := 0
256 | j := 0
257 | last := 0
258 |
259 | go func() {
260 | for run {
261 | log.Printf("qps:%d", (j - last))
262 | last = j
263 | time.Sleep(1 * time.Second)
264 | }
265 |
266 | }()
267 |
268 | for ; i < 20; i++ {
269 | id := int64(rand.Intn(20))
270 | _, err := snapshot.Query(id)
271 | if nil != err {
272 | log.Printf("Query|%s|%d", err, id)
273 | t.Fail()
274 | break
275 |
276 | } else {
277 | // log.Printf("Query|SUCC|%d", id)
278 | j++
279 | }
280 | }
281 |
282 | _, err := snapshot.Query(19)
283 | if nil != err {
284 | log.Printf("Query|%s|%d", err, 19)
285 | t.Fail()
286 |
287 | }
288 |
289 | _, err = snapshot.Query(0)
290 | if nil != err {
291 | log.Printf("Query|%s|%d", err, 0)
292 | t.Fail()
293 | }
294 |
295 | run = false
296 |
297 | log.Printf("snapshot|%s|%d", snapshot, j)
298 |
299 | snapshot.Destory()
300 | cleanSnapshot("./snapshot/")
301 | }
302 |
303 | func BenchmarkDelete(t *testing.B) {
304 | t.Logf("BenchmarkDelete|Delete|Start...")
305 | t.StopTimer()
306 | cleanSnapshot("./snapshot/")
307 | snapshot := NewMessageStore("./snapshot/", 1, 10, traverse)
308 | snapshot.Start()
309 |
310 | for j := 0; j < 20; j++ {
311 | d := []byte(fmt.Sprintf("%d|hello snapshot", j))
312 | cmd := NewCommand(-1, fmt.Sprint(j), d, nil)
313 | <-snapshot.Append(cmd)
314 | }
315 |
316 | time.Sleep(2 * time.Second)
317 | t.StartTimer()
318 |
319 | i := 0
320 | for ; i < t.N; i++ {
321 | id := int64(rand.Intn(20))
322 | cmd := NewCommand(id, "", nil, nil)
323 | snapshot.Delete(cmd)
324 |
325 | }
326 |
327 | t.StopTimer()
328 | snapshot.Destory()
329 | cleanSnapshot("./snapshot/")
330 | t.StartTimer()
331 | t.Logf("BenchmarkDelete|Delete|END...")
332 | }
333 |
334 | func BenchmarkQuery(t *testing.B) {
335 |
336 | log.Printf("BenchmarkQuery|Query|Start...")
337 | t.StopTimer()
338 | cleanSnapshot("./snapshot/")
339 | snapshot := NewMessageStore("./snapshot/", 1, 10, traverse)
340 | snapshot.Start()
341 | for j := 0; j < 20; j++ {
342 | d := []byte(fmt.Sprintf("%d|hello snapshot", j))
343 | cmd := NewCommand(-1, fmt.Sprint(j), d, nil)
344 | snapshot.Append(cmd)
345 | }
346 |
347 | time.Sleep(2 * time.Second)
348 | t.StartTimer()
349 |
350 | i := 0
351 | for ; i < t.N; i++ {
352 | id := int64(rand.Intn(20))
353 | _, err := snapshot.Query(id)
354 | if nil != err {
355 | log.Printf("Query|%s|%d", err, id)
356 | t.Fail()
357 | break
358 | }
359 | }
360 |
361 | t.StopTimer()
362 | snapshot.Destory()
363 | cleanSnapshot("./snapshot/")
364 | t.StartTimer()
365 |
366 | }
367 |
368 | func BenchmarkAppend(t *testing.B) {
369 | t.StopTimer()
370 | cleanSnapshot("./snapshot/")
371 | snapshot := NewMessageStore("./snapshot/", 1, 10, traverse)
372 | snapshot.Start()
373 | t.StartTimer()
374 |
375 | for i := 0; i < t.N; i++ {
376 | d := []byte(fmt.Sprintf("hello snapshot-%d", i))
377 | cmd := NewCommand(-1, fmt.Sprint(i), d, nil)
378 | <-snapshot.Append(cmd)
379 | }
380 |
381 | t.StopTimer()
382 | time.Sleep(5 * time.Second)
383 | snapshot.Destory()
384 | cleanSnapshot("./snapshot/")
385 | t.StartTimer()
386 |
387 | }
388 |
--------------------------------------------------------------------------------
/store/file/kite_segment_log.go:
--------------------------------------------------------------------------------
1 | package file
2 |
3 | import (
4 | "bufio"
5 | "bytes"
6 | "encoding/binary"
7 | "encoding/gob"
8 | "errors"
9 | "fmt"
10 | "io"
11 | "os"
12 | "sync"
13 | "sync/atomic"
14 | "time"
15 | )
16 |
17 | const (
18 | OP_C = 'c' //create
19 | OP_U = 'u' //update
20 | OP_D = 'd' //delete
21 | OP_E = 'e' //expired
22 | )
23 |
24 | type SegmentLog struct {
25 | offset int64 // log offset
26 | path string
27 | rf *os.File //*
28 | wf *os.File //* log file
29 | bw *bufio.Writer //*
30 | br *bufio.Reader //* oplog buffer
31 | sync.RWMutex
32 | isOpen int32
33 | }
34 |
35 | func newSegmentLog(path string) *SegmentLog {
36 | return &SegmentLog{
37 | path: path}
38 |
39 | }
40 |
41 | func (self *SegmentLog) Open() error {
42 | var rf *os.File
43 | var wf *os.File
44 | if atomic.CompareAndSwapInt32(&self.isOpen, 0, 1) {
45 | //file exist
46 | _, err := os.Stat(self.path)
47 | if os.IsNotExist(err) {
48 | _, err := os.Create(self.path)
49 | if nil != err {
50 | log.Errorf("SegmentLog|Create|FAIL|%s|%s", err, self.path)
51 | return err
52 | }
53 | }
54 |
55 | //file not exist create file
56 | wf, err = os.OpenFile(self.path, os.O_RDWR|os.O_APPEND, os.ModePerm)
57 | if nil != err {
58 | log.Errorf("SegmentLog|Open|FAIL|%s|%s", err, self.path)
59 | return err
60 | }
61 |
62 | rf, err = os.OpenFile(self.path, os.O_RDWR, os.ModePerm)
63 | if nil != err {
64 | log.Errorf("SegmentLog|Open|FAIL|%s|%s", err, self.path)
65 | return err
66 | }
67 |
68 | self.rf = rf
69 | self.wf = wf
70 | //buffer
71 | self.br = bufio.NewReader(rf)
72 | self.bw = bufio.NewWriter(wf)
73 | log.Infof("SegmentLog|Open|SUCC|%s", self.path)
74 | }
75 | return nil
76 | }
77 |
78 | //traverse oplog
79 | func (self *SegmentLog) Replay(do func(l *oplog)) {
80 |
81 | self.Open()
82 | offset := int64(0)
83 | tmp := make([]byte, 1024)
84 | //seek to head
85 | self.rf.Seek(0, 0)
86 | self.br.Reset(self.rf)
87 |
88 | for {
89 | var length int32
90 | err := binary.Read(self.br, binary.BigEndian, &length)
91 | if nil != err {
92 | if err == io.EOF {
93 | self.br.Reset(self.rf)
94 | break
95 | }
96 | log.Warnf("SegmentLog|Replay|LEN|%s|Skip...", err)
97 | continue
98 | }
99 |
100 | // log.Debug("SegmentLog|Replay|LEN|%d", length)
101 |
102 | if int(length) > cap(tmp) {
103 | grow := make([]byte, int(length)-cap(tmp))
104 | tmp = append(tmp[0:cap(tmp)], grow...)
105 | }
106 |
107 | err = binary.Read(self.br, binary.BigEndian, tmp[:int(length)-4])
108 | if nil != err {
109 | self.br.Reset(self.rf)
110 | log.Errorf("SegmentLog|Replay|Data|%s", err)
111 | break
112 | }
113 |
114 | var ol oplog
115 | r := bytes.NewReader(tmp[:int(length)-4])
116 | deco := gob.NewDecoder(r)
117 | err = deco.Decode(&ol)
118 | if nil != err {
119 | log.Errorf("SegmentLog|Replay|unmarshal|oplog|FAIL|%s", err)
120 | continue
121 | }
122 | // log.Debug("SegmentLog|Replay|oplog|%s", ol)
123 | do(&ol)
124 | //line
125 | offset += int64(length)
126 |
127 | }
128 | self.offset = int64(offset)
129 | }
130 |
131 | //apend data
132 | func (self *SegmentLog) Appends(logs []*oplog) error {
133 |
134 | //if closed
135 | if self.isOpen == 0 {
136 | return errors.New(fmt.Sprintf("SegmentLog Is Closed!|%s", self.path))
137 | }
138 |
139 | length := int64(0)
140 | for _, lo := range logs {
141 | tmp := lo.marshal()
142 | for {
143 | l, err := self.bw.Write(tmp)
144 | length += int64(l)
145 | if nil != err && err != io.ErrShortWrite {
146 | log.Errorf("SegmentLog|Append|FAIL|%s|%d/%d", err, l, len(tmp))
147 | return err
148 | } else if nil == err {
149 | break
150 | } else {
151 | self.bw.Reset(self.wf)
152 | log.Errorf("SegmentLog|Append|FAIL|%s", err)
153 | }
154 | tmp = tmp[l:]
155 |
156 | }
157 | }
158 |
159 | //flush
160 | self.bw.Flush()
161 |
162 | //move offset
163 | atomic.AddInt64(&self.offset, int64(length))
164 | return nil
165 | }
166 |
167 | //apend data
168 | func (self *SegmentLog) Append(ol *oplog) error {
169 |
170 | //if closed
171 | if self.isOpen == 0 {
172 | return errors.New(fmt.Sprintf("SegmentLog Is Closed!|%s", self.path))
173 | }
174 |
175 | buff := ol.marshal()
176 | tmp := buff
177 | for {
178 | l, err := self.bw.Write(tmp)
179 | if nil != err && err != io.ErrShortWrite {
180 | log.Errorf("SegmentLog|Append|FAIL|%s|%d/%d", err, l, len(tmp))
181 | return err
182 | } else if nil == err {
183 | break
184 | } else {
185 | self.bw.Reset(self.wf)
186 | }
187 | tmp = tmp[l:]
188 | }
189 | self.bw.Flush()
190 |
191 | //line
192 | atomic.AddInt64(&self.offset, int64(len(buff)))
193 | return nil
194 | }
195 |
196 | func (self *SegmentLog) Close() error {
197 | if atomic.CompareAndSwapInt32(&self.isOpen, 1, 0) {
198 | err := self.bw.Flush()
199 | if nil != err {
200 | log.Errorf("SegmentLog|Close|Writer|FLUSH|FAIL|%s|%s", err, self.path)
201 | }
202 |
203 | err = self.wf.Close()
204 | if nil != err {
205 | log.Errorf("SegmentLog|Close|Write FD|FAIL|%s|%s", err, self.path)
206 | return err
207 | } else {
208 | err = self.rf.Close()
209 | if nil != err {
210 | log.Errorf("SegmentLog|Close|Read FD|FAIL|%s|%s", err, self.path)
211 | }
212 | return err
213 | }
214 | return nil
215 |
216 | }
217 |
218 | return nil
219 | }
220 |
221 | //data operation log
222 | type oplog struct {
223 | Time int64 `json:"time"`
224 | Op byte `json:"op"`
225 | ChunkId int64 `json:"chunk_id"`
226 | LogicId string `json:"logic_id"`
227 | Body []byte `json:"body"`
228 | }
229 |
230 | func newOplog(op byte, logicId string, chunkid int64, body []byte) *oplog {
231 | return &oplog{
232 | Time: time.Now().Unix(),
233 | Op: op,
234 | ChunkId: chunkid,
235 | LogicId: logicId,
236 | Body: body}
237 | }
238 |
239 | //marshal oplog
240 | func (self *oplog) marshal() []byte {
241 |
242 | buff := new(bytes.Buffer)
243 |
244 | binary.Write(buff, binary.BigEndian, int32(0))
245 | encoder := gob.NewEncoder(buff)
246 | err := encoder.Encode(self)
247 | if nil != err {
248 | log.Errorf("oplog|marshal|fail|%s|%v", err, self)
249 | return nil
250 | }
251 | b := buff.Bytes()
252 | binary.BigEndian.PutUint32(b, uint32(buff.Len()))
253 | return b
254 | }
255 |
256 | //unmarshal data
257 | func (self *oplog) unmarshal(data []byte) error {
258 | r := bytes.NewReader(data)
259 | dec := gob.NewDecoder(r)
260 | return dec.Decode(self)
261 |
262 | }
263 |
--------------------------------------------------------------------------------
/store/kite_mock_store.go:
--------------------------------------------------------------------------------
1 | package store
2 |
3 | import (
4 | "github.com/blackbeans/kiteq-common/protocol"
5 | "github.com/golang/protobuf/proto"
6 | "time"
7 | )
8 |
9 | type MockKiteStore struct {
10 | }
11 |
12 | func NewMockKiteStore() *MockKiteStore {
13 | return &MockKiteStore{}
14 | }
15 |
16 | func (self *MockKiteStore) Start() {}
17 | func (self *MockKiteStore) Stop() {}
18 | func (self *MockKiteStore) Monitor() string { return "mock" }
19 |
20 | func (self *MockKiteStore) RecoverNum() int {
21 | return 0
22 | }
23 |
24 | func (self *MockKiteStore) RecoverLimit() int {
25 | return 0
26 | }
27 |
28 | func (self *MockKiteStore) Length() map[string] /*topic*/ int {
29 | //TODO mysql中的未过期的消息数量
30 |
31 | return make(map[string] /*topic*/ int, 1)
32 | }
33 |
34 | func (self *MockKiteStore) AsyncUpdateDeliverResult(entity *MessageEntity) bool { return true }
35 | func (self *MockKiteStore) AsyncDelete(topic, messgeid string) bool { return true }
36 | func (self *MockKiteStore) AsyncCommit(topic, messageId string) bool { return true }
37 | func (self *MockKiteStore) Expired(topic, messageId string) bool { return true }
38 |
39 | func (self *MockKiteStore) Query(topic, messageId string) *MessageEntity {
40 | entity := NewMessageEntity(protocol.NewQMessage(buildBytesMessage(messageId)))
41 | return entity
42 |
43 | }
44 | func (self *MockKiteStore) Save(entity *MessageEntity) bool {
45 | return true
46 | }
47 | func (self *MockKiteStore) Commit(topic, messageId string) bool {
48 | return true
49 | }
50 |
51 | func (self *MockKiteStore) Delete(topic, messageId string) bool {
52 | return true
53 | }
54 | func (self *MockKiteStore) BatchDelete(topic, messageId []string) bool {
55 | return true
56 | }
57 | func (self *MockKiteStore) Rollback(topic, messageId string) bool {
58 | return true
59 | }
60 |
61 | func (self *MockKiteStore) BatchUpdate(entity []*MessageEntity) bool {
62 | return true
63 | }
64 |
65 | func (self *MockKiteStore) MoveExpired() {
66 |
67 | }
68 |
69 | func (self *MockKiteStore) PageQueryEntity(hashKey string, kiteServer string, nextDeliveryTime int64, startIdx, limit int) (bool, []*MessageEntity) {
70 | recoverMessage := buildStringMessage(MessageId())
71 | entity := NewMessageEntity(protocol.NewQMessage(recoverMessage))
72 | entity.DeliverCount = 10
73 | entity.SuccGroups = []string{"a", "b"}
74 | entity.FailGroups = []string{"c", "d"}
75 | return false, []*MessageEntity{entity}
76 | }
77 |
78 | func buildStringMessage(id string) *protocol.StringMessage {
79 | //创建消息
80 | entity := &protocol.StringMessage{}
81 | entity.Header = &protocol.Header{
82 | MessageId: proto.String(id),
83 | Topic: proto.String("trade"),
84 | MessageType: proto.String("pay-succ"),
85 | ExpiredTime: proto.Int64(time.Now().Add(10 * time.Minute).Unix()),
86 | DeliverLimit: proto.Int32(100),
87 | GroupId: proto.String("go-kite-test"),
88 | Commit: proto.Bool(true),
89 | Fly: proto.Bool(false)}
90 | entity.Body = proto.String("hello go-kite")
91 |
92 | return entity
93 | }
94 |
95 | func buildBytesMessage(id string) *protocol.BytesMessage {
96 | //创建消息
97 | entity := &protocol.BytesMessage{}
98 | entity.Header = &protocol.Header{
99 | MessageId: proto.String(id),
100 | Topic: proto.String("trade"),
101 | MessageType: proto.String("pay-succ"),
102 | ExpiredTime: proto.Int64(time.Now().Add(10 * time.Minute).Unix()),
103 | DeliverLimit: proto.Int32(100),
104 | GroupId: proto.String("go-kite-test"),
105 | Commit: proto.Bool(true),
106 | Fly: proto.Bool(false)}
107 | entity.Body = []byte("hello go-kite")
108 |
109 | return entity
110 | }
111 |
--------------------------------------------------------------------------------
/store/kite_store.go:
--------------------------------------------------------------------------------
1 | package store
2 |
3 | import (
4 | "fmt"
5 | "github.com/blackbeans/go-uuid"
6 | "github.com/blackbeans/kiteq-common/protocol"
7 | )
8 |
9 | //生成messageId uuid
10 | func MessageId() string {
11 | id := uuid.NewRandom()
12 | if id == nil || len(id) != 16 {
13 | return ""
14 | }
15 | b := []byte(id)
16 | return fmt.Sprintf("%08x%04x%04x%04x%012x",
17 | b[:4], b[4:6], b[6:8], b[8:10], b[10:])
18 | }
19 |
20 | //用于持久化的messageEntity
21 | type MessageEntity struct {
22 | Id int32 `kiteq:"id" db:"transient"`
23 | MessageId string `kiteq:"messageId" db:"message_id,pk"`
24 | Header *protocol.Header `kiteq:"header" db:"header"`
25 | Body interface{} `kiteq:"body" db:"body"` //序列化后的消息
26 | //-----------------
27 | MsgType uint8 `kiteq:"msg_type" db:"msg_type"` //消息类型
28 | Topic string `kiteq:"topic" db:"topic"` //Topic
29 | MessageType string `kiteq:"messageType" db:"message_type"` //MessageType
30 | PublishGroup string `kiteq:"publish_group" db:"publish_group"` //发布的groupId
31 | Commit bool `kiteq:"commit" db:"commit"` //是否已提交
32 | PublishTime int64 `kiteq:"publish_time" db:"publish_time"`
33 | ExpiredTime int64 `kiteq:"expiredTime" db:"expired_time"` //过期时间
34 | DeliverCount int32 `kiteq:"deliver_count" db:"deliver_count"` //投递次数
35 | DeliverLimit int32 `kiteq:"deliver_limit" db:"deliver_limit"` //投递次数上线
36 | KiteServer string `kiteq:"kite_server" db:"kite_server"` // 当前的处理kiteqserver地址
37 | FailGroups []string `kiteq:"failGroups,omitempty" db:"fail_groups"` //投递失败的分组tags
38 | SuccGroups []string `kiteq:"succGroups,omitempty" db:"succ_groups"` //投递成功的分组tags
39 | NextDeliverTime int64 `kiteq:"next_deliver_time" db:"next_deliver_time"` //下一次投递的时间
40 |
41 | }
42 |
43 | // func (self *MessageEntity) String() string {
44 | // return fmt.Sprintf("id:%s Topic:%s Commit:%t Body:%s", self.MessageId, self.Topic, self.Commit, self.Body)
45 | // }
46 |
47 | func (self *MessageEntity) GetBody() interface{} {
48 | return self.Body
49 | }
50 |
51 | //创建stringmessage
52 | func NewMessageEntity(msg *protocol.QMessage) *MessageEntity {
53 | entity := &MessageEntity{
54 | Header: msg.GetHeader(),
55 | Body: msg.GetBody(),
56 | MessageId: msg.GetHeader().GetMessageId(),
57 | Topic: msg.GetHeader().GetTopic(),
58 | MessageType: msg.GetHeader().GetMessageType(),
59 | PublishGroup: msg.GetHeader().GetGroupId(),
60 | Commit: msg.GetHeader().GetCommit(),
61 | ExpiredTime: msg.GetHeader().GetExpiredTime(),
62 | DeliverCount: 0,
63 | DeliverLimit: msg.GetHeader().GetDeliverLimit(),
64 |
65 | //消息种类
66 | MsgType: msg.GetMsgType()}
67 | return entity
68 |
69 | }
70 |
71 | //kitestore存储
72 | type IKiteStore interface {
73 | Start()
74 | Stop()
75 | Monitor() string
76 | Length() map[string] /*topic*/ int //堆积消息的数量
77 |
78 | MoveExpired() //将过期的消息迁移走
79 |
80 | //recover数量
81 | RecoverNum() int
82 |
83 | //recover批量数据
84 | RecoverLimit() int
85 |
86 | //批量提交头题结果
87 | AsyncUpdateDeliverResult(entity *MessageEntity) bool
88 |
89 | AsyncDelete(topic, messageId string) bool
90 | AsyncCommit(topic, messageId string) bool
91 |
92 | Query(topic, messageId string) *MessageEntity
93 | Save(entity *MessageEntity) bool
94 | Commit(topic, messageId string) bool
95 | Rollback(topic, messageId string) bool
96 | Delete(topic, messageId string) bool
97 | Expired(topic, messageId string) bool
98 |
99 | //根据kiteServer名称查询需要重投的消息 返回值为 是否还有更多、和本次返回的数据结果
100 | PageQueryEntity(hashKey string, kiteServer string, nextDeliverySeconds int64, startIdx, limit int) (bool, []*MessageEntity)
101 | }
102 |
--------------------------------------------------------------------------------
/store/memory/kite_memory_store.go:
--------------------------------------------------------------------------------
1 | package memory
2 |
3 | import (
4 | "container/list"
5 | "context"
6 | "fmt"
7 | "github.com/blackbeans/logx"
8 | . "kiteq/store"
9 | "strconv"
10 | "sync"
11 | )
12 |
13 | const (
14 | CONCURRENT_LEVEL = 16
15 | )
16 |
17 | var log = logx.GetLogger("kiteq_store")
18 |
19 | type KiteMemoryStore struct {
20 | datalinks []*list.List //用于LRU
21 | stores []map[string] /*messageId*/ *list.Element //用于LRU
22 | locks []*sync.RWMutex
23 | maxcap int
24 | }
25 |
26 | func NewKiteMemoryStore(ctx context.Context, initcap, maxcap int) *KiteMemoryStore {
27 |
28 | //定义holder
29 | datalinks := make([]*list.List, 0, CONCURRENT_LEVEL)
30 | stores := make([]map[string] /*messageId*/ *list.Element, 0, CONCURRENT_LEVEL)
31 | locks := make([]*sync.RWMutex, 0, CONCURRENT_LEVEL)
32 | for i := 0; i < CONCURRENT_LEVEL; i++ {
33 | splitMap := make(map[string] /*messageId*/ *list.Element, maxcap/CONCURRENT_LEVEL)
34 | stores = append(stores, splitMap)
35 | locks = append(locks, &sync.RWMutex{})
36 | datalinks = append(datalinks, list.New())
37 | }
38 |
39 | return &KiteMemoryStore{
40 | datalinks: datalinks,
41 | stores: stores,
42 | locks: locks,
43 | maxcap: maxcap / CONCURRENT_LEVEL}
44 | }
45 |
46 | func (self *KiteMemoryStore) Start() {}
47 | func (self *KiteMemoryStore) Stop() {}
48 |
49 | func (self *KiteMemoryStore) RecoverNum() int {
50 | return CONCURRENT_LEVEL
51 | }
52 |
53 | //recover批量数据
54 | func (self *KiteMemoryStore) RecoverLimit() int {
55 | return 200
56 | }
57 |
58 | func (self *KiteMemoryStore) Length() map[string] /*topic*/ int {
59 | defer func() {
60 | if err := recover(); nil != err {
61 |
62 | }
63 | }()
64 | stat := make(map[string]int, 10)
65 | for i := 0; i < CONCURRENT_LEVEL; i++ {
66 | _, _, dl := self.hash(fmt.Sprintf("%x", i))
67 | for e := dl.Back(); nil != e; e = e.Prev() {
68 | enity := e.Value.(*MessageEntity)
69 | v, ok := stat[enity.Topic]
70 | if !ok {
71 | v = 0
72 | }
73 | stat[enity.Topic] = (v + 1)
74 | }
75 | }
76 |
77 | return stat
78 | }
79 |
80 | func (self *KiteMemoryStore) Monitor() string {
81 | return fmt.Sprintf("memory-length:%v", self.Length())
82 | }
83 |
84 | func (self *KiteMemoryStore) AsyncUpdateDeliverResult(entity *MessageEntity) bool {
85 | return self.UpdateEntity(entity)
86 | }
87 | func (self *KiteMemoryStore) AsyncDelete(topic, messageId string) bool {
88 | return self.Delete(topic, messageId)
89 | }
90 | func (self *KiteMemoryStore) AsyncCommit(topic, messageId string) bool {
91 | return self.Commit(topic, messageId)
92 | }
93 |
94 | //hash get elelment
95 | func (self *KiteMemoryStore) hash(messageid string) (l *sync.RWMutex, e map[string]*list.Element, lt *list.List) {
96 | id := string(messageid[len(messageid)-1])
97 | i, err := strconv.ParseInt(id, CONCURRENT_LEVEL, 8)
98 | hashId := int(i)
99 | if nil != err {
100 | log.Errorf("KiteMemoryStore|hash|INVALID MESSAGEID|%s", messageid)
101 | hashId = 0
102 | } else {
103 | hashId = hashId % CONCURRENT_LEVEL
104 | }
105 |
106 | // log.Debug("KiteMemoryStore|hash|%s|%d", messageid, hashId)
107 |
108 | //hash part
109 | l = self.locks[hashId]
110 | e = self.stores[hashId]
111 | lt = self.datalinks[hashId]
112 | return
113 | }
114 |
115 | func (self *KiteMemoryStore) Query(topic, messageId string) *MessageEntity {
116 | lock, el, _ := self.hash(messageId)
117 | lock.RLock()
118 | defer lock.RUnlock()
119 | e, ok := el[messageId]
120 | if !ok {
121 | return nil
122 | }
123 | //将当前节点放到最前面
124 | return e.Value.(*MessageEntity)
125 | }
126 |
127 | func (self *KiteMemoryStore) Save(entity *MessageEntity) bool {
128 | lock, el, dl := self.hash(entity.MessageId)
129 | lock.Lock()
130 | defer lock.Unlock()
131 |
132 | //没有空闲node,则判断当前的datalinke中是否达到容量上限
133 | cl := dl.Len()
134 | if cl >= self.maxcap {
135 | // log.Warn("KiteMemoryStore|SAVE|OVERFLOW|%d/%d", cl, self.maxcap)
136 | //淘汰最旧的数据
137 | back := dl.Back()
138 | b := dl.Remove(back).(*MessageEntity)
139 | delete(el, b.MessageId)
140 |
141 | }
142 | front := dl.PushFront(entity)
143 | el[entity.MessageId] = front
144 | return true
145 | }
146 | func (self *KiteMemoryStore) Commit(topic, messageId string) bool {
147 | lock, el, _ := self.hash(messageId)
148 | lock.Lock()
149 | defer lock.Unlock()
150 | e, ok := el[messageId]
151 | if !ok {
152 | return false
153 | }
154 | entity := e.Value.(*MessageEntity)
155 | entity.Commit = true
156 | return true
157 | }
158 | func (self *KiteMemoryStore) Rollback(topic, messageId string) bool {
159 | return self.Delete(topic, messageId)
160 | }
161 | func (self *KiteMemoryStore) UpdateEntity(entity *MessageEntity) bool {
162 | lock, el, _ := self.hash(entity.MessageId)
163 | lock.Lock()
164 | defer lock.Unlock()
165 | v, ok := el[entity.MessageId]
166 | if !ok {
167 | return true
168 | }
169 |
170 | e := v.Value.(*MessageEntity)
171 | e.DeliverCount = entity.DeliverCount
172 | e.NextDeliverTime = entity.NextDeliverTime
173 | e.SuccGroups = entity.SuccGroups
174 | e.FailGroups = entity.FailGroups
175 | return true
176 | }
177 | func (self *KiteMemoryStore) Delete(topic, messageId string) bool {
178 | lock, el, dl := self.hash(messageId)
179 | lock.Lock()
180 | defer lock.Unlock()
181 | self.innerDelete(messageId, el, dl)
182 | return true
183 |
184 | }
185 |
186 | func (self *KiteMemoryStore) innerDelete(messageId string,
187 | el map[string]*list.Element, dl *list.List) {
188 | e, ok := el[messageId]
189 | if !ok {
190 | return
191 | }
192 | delete(el, messageId)
193 | dl.Remove(e)
194 | e = nil
195 | // log.Info("KiteMemoryStore|innerDelete|%s", messageId)
196 | }
197 |
198 | func (self *KiteMemoryStore) Expired(topic, messageId string) bool {
199 | succ := self.Delete(topic, messageId)
200 | return succ
201 |
202 | }
203 |
204 | func (self *KiteMemoryStore) MoveExpired() {
205 | //donothing
206 | }
207 |
208 | //根据kiteServer名称查询需要重投的消息 返回值为 是否还有更多、和本次返回的数据结果
209 | func (self *KiteMemoryStore) PageQueryEntity(hashKey string, kiteServer string, nextDeliveryTime int64, startIdx, limit int) (bool, []*MessageEntity) {
210 |
211 | pe := make([]*MessageEntity, 0, limit+1)
212 | var delMessage []string
213 |
214 | lock, el, dl := self.hash(hashKey)
215 | lock.RLock()
216 |
217 | i := 0
218 | for e := dl.Back(); nil != e; e = e.Prev() {
219 | entity := e.Value.(*MessageEntity)
220 | if entity.NextDeliverTime <= nextDeliveryTime &&
221 | entity.DeliverCount < entity.Header.GetDeliverLimit() &&
222 | entity.ExpiredTime > nextDeliveryTime {
223 | if startIdx <= i {
224 | pe = append(pe, entity)
225 | }
226 |
227 | i++
228 | if len(pe) > limit {
229 | break
230 | }
231 | } else if entity.DeliverCount >= entity.Header.GetDeliverLimit() ||
232 | entity.ExpiredTime <= nextDeliveryTime {
233 | if nil == delMessage {
234 | delMessage = make([]string, 0, 10)
235 | }
236 | delMessage = append(delMessage, entity.MessageId)
237 | }
238 | }
239 |
240 | lock.RUnlock()
241 |
242 | //删除过期的message
243 | if nil != delMessage {
244 | lock.Lock()
245 | for _, v := range delMessage {
246 | self.innerDelete(v, el, dl)
247 | }
248 | lock.Unlock()
249 | }
250 |
251 | if len(pe) > limit {
252 | return true, pe[:limit]
253 | } else {
254 | return false, pe
255 | }
256 |
257 | }
258 |
--------------------------------------------------------------------------------
/store/mysql/kite_mysql.go:
--------------------------------------------------------------------------------
1 | package mysql
2 |
3 | import (
4 | "context"
5 | "database/sql"
6 | "fmt"
7 | "github.com/blackbeans/logx"
8 | . "kiteq/store"
9 | "strings"
10 | "sync"
11 | "time"
12 |
13 | "github.com/blackbeans/kiteq-common/protocol"
14 | )
15 |
16 | var log = logx.GetLogger("kiteq_store")
17 |
18 | //mysql的参数
19 | type MysqlOptions struct {
20 | ShardNum int //分库的数量
21 | Addr string
22 | SlaveAddr string
23 | DB string
24 | Username, Password string
25 | BatchUpSize, BatchDelSize int
26 | FlushPeriod time.Duration
27 | MaxIdleConn int
28 | MaxOpenConn int
29 | }
30 |
31 | type KiteMysqlStore struct {
32 | convertor convertor
33 | sqlwrapper *sqlwrapper
34 | dbshard DbShard
35 | batchUpChan []chan *MessageEntity
36 | batchDelChan []chan string
37 | batchComChan []chan string
38 | batchUpSize int
39 | batchDelSize int
40 | flushPeriod time.Duration
41 | stmtPools map[batchType][][]*sql.Stmt //第一层dblevel 第二维table level
42 | stop bool
43 | serverName string
44 | }
45 |
46 | func NewKiteMysql(ctx context.Context, options MysqlOptions, serverName string) *KiteMysqlStore {
47 |
48 | shard := newDbShard(options)
49 |
50 | sqlwrapper := newSqlwrapper("kite_msg", shard, MessageEntity{})
51 |
52 | ins := &KiteMysqlStore{
53 | dbshard: shard,
54 | convertor: convertor{columns: sqlwrapper.columns},
55 | sqlwrapper: sqlwrapper,
56 | batchUpSize: options.BatchUpSize,
57 | batchDelSize: options.BatchDelSize,
58 | flushPeriod: options.FlushPeriod,
59 | serverName: serverName,
60 | stop: false}
61 |
62 | log.Infof("NewKiteMysql|KiteMysqlStore|SUCC|%s|%s...", options.Addr, options.SlaveAddr)
63 | return ins
64 | }
65 |
66 | func (self *KiteMysqlStore) RecoverNum() int {
67 | return self.dbshard.ShardNum() * self.dbshard.HashNum()
68 | }
69 |
70 | //recover批量数据
71 | func (self *KiteMysqlStore) RecoverLimit() int {
72 | return 200
73 | }
74 |
75 | var filternothing = func(colname string) bool {
76 | return false
77 | }
78 |
79 | func (self *KiteMysqlStore) Length() map[string] /*topic*/ int {
80 | //TODO mysql中的未过期的消息数量
81 | defer func() {
82 | if err := recover(); nil != err {
83 |
84 | }
85 | }()
86 | stat := make(map[string]int, 10)
87 | //开始查询Mysql中的堆积消息数量
88 | for i := 0; i < self.RecoverNum(); i++ {
89 | func() error {
90 | hashKey := fmt.Sprintf("%x", i)
91 | s := self.sqlwrapper.hashMessageStatSQL(hashKey)
92 | // log.Println(s)
93 | rows, err := self.dbshard.FindSlave(hashKey).Query(s, self.serverName, time.Now().Unix())
94 | if err != nil {
95 | log.Errorf("KiteMysqlStore|Length|Query|FAIL|%s|%s|%s", err, hashKey, s)
96 | return err
97 | }
98 | defer rows.Close()
99 | if rows.Next() {
100 | topic := ""
101 | num := 0
102 | err = rows.Scan(&topic, &num)
103 | if nil != err {
104 | log.Errorf("KiteMysqlStore|Length|Scan|FAIL|%s|%s|%s", err, hashKey, s)
105 | return err
106 | } else {
107 | v, ok := stat[topic]
108 | if !ok {
109 | v = 0
110 | }
111 | stat[topic] = v + num
112 | }
113 | }
114 | return nil
115 | }()
116 | }
117 |
118 | return stat
119 | }
120 |
121 | func (self *KiteMysqlStore) Monitor() string {
122 | line := "KiteMysqlStore:\t"
123 | for _, r := range self.dbshard.shardranges {
124 | line += fmt.Sprintf("[master:%d,slave:%d]@[%d,%d]\t", r.master.Stats().OpenConnections,
125 | r.slave.Stats().OpenConnections, r.min, r.max)
126 | }
127 | return line
128 | }
129 |
130 | func (self *KiteMysqlStore) Query(topic, messageId string) *MessageEntity {
131 |
132 | var entity *MessageEntity
133 | s := self.sqlwrapper.hashQuerySQL(messageId)
134 | rows, err := self.dbshard.FindSlave(messageId).Query(s, messageId)
135 | if nil != err {
136 | log.Errorf("KiteMysqlStore|Query|FAIL|%s|%s", err, messageId)
137 | return nil
138 | }
139 | defer rows.Close()
140 |
141 | if rows.Next() {
142 |
143 | entity = &MessageEntity{}
144 | fc := self.convertor.convertFields(entity, filternothing)
145 | err := rows.Scan(fc...)
146 | if nil != err {
147 | log.Errorf("KiteMysqlStore|Query|SCAN|FAIL|%s|%s", err, messageId)
148 | return nil
149 | }
150 | self.convertor.Convert2Entity(fc, entity, filternothing)
151 | switch entity.MsgType {
152 | case protocol.CMD_BYTES_MESSAGE:
153 | //do nothing
154 | case protocol.CMD_STRING_MESSAGE:
155 | entity.Body = string(entity.GetBody().([]byte))
156 | }
157 | }
158 |
159 | return entity
160 | }
161 |
162 | func (self *KiteMysqlStore) Save(entity *MessageEntity) bool {
163 | fvs := self.convertor.Convert2Params(entity)
164 | s := self.sqlwrapper.hashSaveSQL(entity.MessageId)
165 | result, err := self.dbshard.FindMaster(entity.MessageId).Exec(s, fvs...)
166 | if err != nil {
167 | log.Errorf("KiteMysqlStore|SAVE|FAIL|%s|%s", err, entity.MessageId)
168 | return false
169 | }
170 |
171 | num, _ := result.RowsAffected()
172 | return num == 1
173 | }
174 |
175 | func (self *KiteMysqlStore) Commit(topic, messageId string) bool {
176 | return self.AsyncCommit(topic, messageId)
177 | }
178 |
179 | func (self *KiteMysqlStore) Rollback(topic, messageId string) bool {
180 | return self.Delete(topic, messageId)
181 | }
182 |
183 | func (self *KiteMysqlStore) Delete(topic, messageId string) bool {
184 | return self.AsyncDelete(topic, messageId)
185 | }
186 |
187 | func (self *KiteMysqlStore) Expired(topic, messageId string) bool { return true }
188 |
189 | func (self *KiteMysqlStore) MoveExpired() {
190 | wg := sync.WaitGroup{}
191 |
192 | now := time.Now().Unix()
193 | //开始查询Mysql中的堆积消息数量
194 | for i := 0; i < self.RecoverNum(); i++ {
195 | hashKey := fmt.Sprintf("%x", i)
196 | wg.Add(1)
197 | go func() {
198 | defer func() {
199 | wg.Done()
200 | }()
201 | self.migrateMessage(now, hashKey)
202 | }()
203 | }
204 | wg.Wait()
205 | }
206 |
207 | //迁移过期的消息
208 | func (self *KiteMysqlStore) migrateMessage(now int64, hashKey string) {
209 |
210 | log.Infof("KiteMysqlStore|MoveExpired|START|%s|%d", hashKey)
211 |
212 | //需要将过期的消息迁移到DLQ中
213 | sql := self.sqlwrapper.hashDLQSQL(DLQ_MOVE_QUERY, hashKey)
214 | //获取到需要导入的id,然后导入
215 | isql := self.sqlwrapper.hashDLQSQL(DLQ_MOVE_INSERT, hashKey)
216 | //删除已导入的数据
217 | dsql := self.sqlwrapper.hashDLQSQL(DLQ_MOVE_DELETE, hashKey)
218 |
219 | start := 0
220 | limit := 50
221 | for {
222 | messageIds := make([]interface{}, 1, 50)
223 | err := func() error {
224 | rows, err := self.dbshard.FindSlave(hashKey).Query(sql, self.serverName, now, start, limit)
225 | if err != nil {
226 | log.Errorf("KiteMysqlStore|migrateMessage|Query|FAIL|%s|%s|%s", err, hashKey, sql)
227 | return err
228 | }
229 | defer rows.Close()
230 | for rows.Next() {
231 | var id int
232 | var messageId string
233 | err = rows.Scan(&id, &messageId)
234 | if nil != err {
235 | log.Errorf("KiteMysqlStore|MoveExpired|Scan|FAIL|%s|%s|%s", err, hashKey, sql)
236 | } else {
237 | start = id
238 | messageIds = append(messageIds, messageId)
239 | }
240 | }
241 | return nil
242 | }()
243 |
244 | //已经搬迁完毕则退出进行下一个
245 | if nil != err || len(messageIds[1:]) <= 0 {
246 | log.Warnf("KiteMysqlStore|MoveExpired|SUCC|%s|%d|%s", hashKey, start, err)
247 | break
248 | }
249 |
250 | in := strings.Repeat("?,", len(messageIds[1:]))
251 | in = in[:len(in)-1]
252 | isqlTmp := strings.Replace(isql, "{ids}", in, 1)
253 | _, err = self.dbshard.FindMaster(hashKey).Exec(isqlTmp, messageIds[1:]...)
254 | if err != nil {
255 | log.Errorf("KiteMysqlStore|MoveExpired|Insert|FAIL|%s|%s", err, isqlTmp, messageIds)
256 | break
257 | }
258 |
259 | dsqlTmp := strings.Replace(dsql, "{ids}", in, 1)
260 | messageIds[0] = self.serverName
261 | _, err = self.dbshard.FindMaster(hashKey).Exec(dsqlTmp, messageIds...)
262 | if err != nil {
263 | log.Errorf("KiteMysqlStore|MoveExpired|DELETE|FAIL|%s|%s|%s|%s", err, dsql, dsqlTmp, messageIds)
264 | break
265 | }
266 | }
267 | }
268 |
269 | var filterbody = func(colname string) bool {
270 | //不需要查询body
271 | return colname == "body"
272 | }
273 |
274 | //没有body的entity
275 | func (self *KiteMysqlStore) PageQueryEntity(hashKey string, kiteServer string, nextDeliverySeconds int64, startIdx, limit int) (bool, []*MessageEntity) {
276 |
277 | s := self.sqlwrapper.hashPQSQL(hashKey)
278 | // log.Println(s)
279 | rows, err := self.dbshard.FindSlave(hashKey).
280 | Query(s, kiteServer, time.Now().Unix(), nextDeliverySeconds, startIdx, limit+1)
281 | if err != nil {
282 | log.Errorf("KiteMysqlStore|Query|FAIL|%s|%s", err, hashKey)
283 | return false, nil
284 | }
285 | defer rows.Close()
286 |
287 | results := make([]*MessageEntity, 0, limit)
288 | for rows.Next() {
289 |
290 | entity := &MessageEntity{}
291 | fc := self.convertor.convertFields(entity, filterbody)
292 | err := rows.Scan(fc...)
293 | if err != nil {
294 | log.Errorf("KiteMysqlStore|PageQueryEntity|FAIL|%s|%s|%d|%d", err, kiteServer, nextDeliverySeconds, startIdx)
295 | } else {
296 |
297 | self.convertor.Convert2Entity(fc, entity, filterbody)
298 | results = append(results, entity)
299 | }
300 | }
301 |
302 | if len(results) > limit {
303 | return true, results[:limit]
304 | } else {
305 | return false, results
306 | }
307 | }
308 |
--------------------------------------------------------------------------------
/store/mysql/kite_mysql_batch.go:
--------------------------------------------------------------------------------
1 | package mysql
2 |
3 | import (
4 | "database/sql"
5 | "encoding/json"
6 | . "kiteq/store"
7 | "time"
8 | )
9 |
10 | func (self *KiteMysqlStore) Start() {
11 |
12 | count := SHARD_SEED
13 | //创建Hash的channel
14 | batchDelChan := make([]chan string, 0, count)
15 | batchUpChan := make([]chan *MessageEntity, 0, count)
16 | batchComChan := make([]chan string, 0, count)
17 | for i := 0; i < count; i++ {
18 | batchUpChan = append(batchUpChan, make(chan *MessageEntity, self.batchUpSize*2))
19 | batchDelChan = append(batchDelChan, make(chan string, self.batchDelSize*2))
20 | batchComChan = append(batchComChan, make(chan string, self.batchUpSize*2))
21 | }
22 |
23 | //批量的channel
24 | self.batchUpChan = batchUpChan
25 | self.batchDelChan = batchDelChan
26 | self.batchComChan = batchComChan
27 |
28 | //创建每种批量的preparedstmt
29 | stmts := make(map[batchType][][]*sql.Stmt, 4)
30 | for k, v := range self.sqlwrapper.batchSQL {
31 | btype := k
32 | pool := make([][]*sql.Stmt, 0, self.dbshard.ShardNum())
33 | //对每个shard构建stmt的pool
34 | for i := 0; i < self.dbshard.ShardNum(); i++ {
35 | innerPool := make([]*sql.Stmt, 0, self.dbshard.HashNum())
36 | for j, s := range v {
37 | psql := s
38 | db := self.dbshard.FindShardById(i*self.dbshard.HashNum() + j).master
39 | err, stmt := func() (error, *sql.Stmt) {
40 |
41 | stmt, err := db.Prepare(psql)
42 | if nil != err {
43 | log.Errorf("StmtPool|Create Stmt|FAIL|%s|%s", err, psql)
44 | return err, nil
45 | }
46 | return nil, stmt
47 | }()
48 | if nil != err {
49 | log.Errorf("NewKiteMysql|NewStmtPool|FAIL|%s", err)
50 | panic(err)
51 | }
52 | innerPool = append(innerPool, stmt)
53 |
54 | }
55 |
56 | pool = append(pool, innerPool)
57 | }
58 | stmts[btype] = pool
59 | }
60 |
61 | self.stmtPools = stmts
62 |
63 | for i := 0; i < count; i++ {
64 | // log.Printf("KiteMysqlStore|start|SQL|%s\n|%s", sqlu, sqld)
65 | self.startBatch(i, self.batchUpChan[i],
66 | self.batchDelChan[i], self.batchComChan[i])
67 | }
68 | log.Infof("KiteMysqlStore|Start...")
69 | }
70 |
71 | //批量删除任务
72 | func (self *KiteMysqlStore) startBatch(hash int,
73 | chu chan *MessageEntity, chd, chcommit chan string) {
74 |
75 | //启动的entity更新的携程
76 | go func(hashId int, ch chan *MessageEntity, batchSize int,
77 | do func(sql int, d []*MessageEntity) bool) {
78 |
79 | //批量提交的池子
80 | batchPool := make(chan []*MessageEntity, 8)
81 | for i := 0; i < 8; i++ {
82 | batchPool <- make([]*MessageEntity, 0, batchSize)
83 | }
84 | data := <-batchPool
85 |
86 | timer := time.NewTimer(self.flushPeriod)
87 | flush := false
88 | for !self.stop {
89 | select {
90 | case mid := <-ch:
91 | data = append(data, mid)
92 | case <-timer.C:
93 | flush = true
94 | }
95 | //强制提交: 达到批量提交的阀值或者超时没有数据则提交
96 | if len(data) >= batchSize || flush {
97 | tmp := data
98 | go func() {
99 | defer func() {
100 | batchPool <- tmp[:0]
101 | }()
102 | do(hashId, tmp)
103 | }()
104 |
105 | //重新获取一个data
106 | data = <-batchPool
107 | flush = false
108 | timer.Reset(self.flushPeriod)
109 | }
110 | }
111 | timer.Stop()
112 | }(hash, chu, self.batchUpSize, self.batchUpdate)
113 |
114 | batchFun := func(hashid int, ch chan string, batchSize int,
115 | do func(hashid int, d []string) bool) {
116 |
117 | //批量提交池子
118 | batchPool := make(chan []string, 8)
119 | for i := 0; i < 8; i++ {
120 | batchPool <- make([]string, 0, batchSize)
121 | }
122 | data := make([]string, 0, batchSize)
123 |
124 | timer := time.NewTimer(self.flushPeriod)
125 | flush := false
126 | for !self.stop {
127 | select {
128 | case mid := <-ch:
129 | data = append(data, mid)
130 | case <-timer.C:
131 | flush = true
132 | }
133 | //强制提交: 达到批量提交的阀值或者超时没有数据则提交
134 | if len(data) >= batchSize || flush {
135 |
136 | tmp := data
137 | go func() {
138 | defer func() {
139 | batchPool <- tmp[:0]
140 | }()
141 | do(hashid, tmp)
142 | }()
143 |
144 | //重新获取一个data
145 | data = <-batchPool
146 | flush = false
147 | timer.Reset(self.flushPeriod)
148 | }
149 | }
150 | timer.Stop()
151 | }
152 |
153 | //启动批量删除
154 | go batchFun(hash, chd, self.batchDelSize, self.batchDelete)
155 | //启动批量提交
156 | go batchFun(hash, chcommit, self.batchUpSize, self.batchCommit)
157 |
158 | }
159 |
160 | func (self *KiteMysqlStore) AsyncCommit(topic, messageid string) bool {
161 | idx := self.dbshard.HashId(messageid)
162 | self.batchComChan[idx] <- messageid
163 | return true
164 |
165 | }
166 |
167 | func (self *KiteMysqlStore) AsyncUpdateDeliverResult(entity *MessageEntity) bool {
168 | idx := self.dbshard.HashId(entity.MessageId)
169 | self.batchUpChan[idx] <- entity
170 | return true
171 |
172 | }
173 | func (self *KiteMysqlStore) AsyncDelete(topic, messageid string) bool {
174 | idx := self.dbshard.HashId(messageid)
175 | self.batchDelChan[idx] <- messageid
176 | return true
177 | }
178 |
179 | func (self *KiteMysqlStore) stmtPool(bt batchType, hash string) *sql.Stmt {
180 | shard := self.dbshard.FindForShard(hash)
181 | id := self.dbshard.FindForKey(hash)
182 | return self.stmtPools[bt][shard.shardId][id]
183 | }
184 |
185 | func (self *KiteMysqlStore) batchCommit(hashId int, messageId []string) bool {
186 |
187 | if len(messageId) <= 0 {
188 | return true
189 | }
190 | // log.Printf("KiteMysqlStore|batchCommit|%s|%s", prepareSQL, messageId)
191 | stmt := self.stmtPool(COMMIT, messageId[0])
192 | var err error
193 | for _, v := range messageId {
194 | _, err = stmt.Exec(true, v)
195 | if nil != err {
196 | log.Errorf("KiteMysqlStore|batchCommit|FAIL|%s|%s", err, v)
197 | }
198 | }
199 | return nil == err
200 | }
201 |
202 | func (self *KiteMysqlStore) batchDelete(hashId int, messageId []string) bool {
203 |
204 | if len(messageId) <= 0 {
205 | return true
206 | }
207 |
208 | stmt := self.stmtPool(DELETE, messageId[0])
209 | var err error
210 | for _, v := range messageId {
211 | _, err = stmt.Exec(v)
212 | if nil != err {
213 | log.Errorf("KiteMysqlStore|batchDelete|FAIL|%s|%s", err, v)
214 | }
215 | }
216 | return nil == err
217 | }
218 |
219 | func (self *KiteMysqlStore) batchUpdate(hashId int, entity []*MessageEntity) bool {
220 |
221 | if len(entity) <= 0 {
222 | return true
223 | }
224 |
225 | stmt := self.stmtPool(UPDATE, entity[0].MessageId)
226 | args := make([]interface{}, 0, 5)
227 | var errs error
228 | for _, e := range entity {
229 |
230 | args = args[:0]
231 |
232 | sg, err := json.Marshal(e.SuccGroups)
233 | if nil != err {
234 | log.Errorf("KiteMysqlStore|batchUpdate|SUCC GROUP|MARSHAL|FAIL|%s|%s|%s", err, e.MessageId, e.SuccGroups)
235 | errs = err
236 | continue
237 | }
238 |
239 | args = append(args, sg)
240 |
241 | fg, err := json.Marshal(e.FailGroups)
242 | if nil != err {
243 | log.Errorf("KiteMysqlStore|batchUpdate|FAIL GROUP|MARSHAL|FAIL|%s|%s|%s", err, e.MessageId, e.FailGroups)
244 | errs = err
245 | continue
246 | }
247 |
248 | args = append(args, fg)
249 |
250 | //设置一下下一次投递时间
251 | args = append(args, e.NextDeliverTime)
252 |
253 | args = append(args, e.DeliverCount)
254 |
255 | args = append(args, e.MessageId)
256 |
257 | _, err = stmt.Exec(args...)
258 | if nil != err {
259 | log.Errorf("KiteMysqlStore|batchUpdate|FAIL|%s|%s", err, e)
260 | errs = err
261 | }
262 |
263 | }
264 | return nil == errs
265 | }
266 |
267 | func (self *KiteMysqlStore) Stop() {
268 | self.stop = true
269 | for k, v := range self.stmtPools {
270 | for _, s := range v {
271 | for _, p := range s {
272 | p.Close()
273 | }
274 | }
275 | log.Infof("KiteMysqlStore|Stop|Stmt|%s", k)
276 | }
277 | self.dbshard.Stop()
278 | }
279 |
--------------------------------------------------------------------------------
/store/mysql/kite_mysql_convertor.go:
--------------------------------------------------------------------------------
1 | package mysql
2 |
3 | import (
4 | "encoding/json"
5 |
6 | "github.com/blackbeans/kiteq-common/protocol"
7 | "kiteq/store"
8 | "reflect"
9 | )
10 |
11 | type convertor struct {
12 | columns []column
13 | }
14 |
15 | func (self convertor) convertFields(entity *store.MessageEntity, filter func(colname string) bool) []interface{} {
16 | dest := make([]interface{}, 0, len(self.columns))
17 | rv := reflect.ValueOf(entity)
18 | elem := rv.Elem()
19 | for _, c := range self.columns {
20 | if filter(c.columnName) {
21 | continue
22 | }
23 | fb := elem.FieldByName(c.fieldName).Addr().Interface()
24 | if c.fieldKind == reflect.Slice || c.fieldKind == reflect.Array {
25 | var a string
26 | dest = append(dest, &a)
27 | } else if c.columnName == "header" || c.columnName == "body" {
28 | var h []byte
29 | dest = append(dest, &h)
30 | } else {
31 | dest = append(dest, fb)
32 | }
33 | }
34 | return dest
35 |
36 | }
37 |
38 | func (self convertor) Convert2Entity(fv []interface{}, entity *store.MessageEntity, filter func(colname string) bool) {
39 | val := reflect.ValueOf(entity)
40 | elem := val.Elem()
41 | //可能存在过滤的值需要记录应该读取那个位置的参数
42 | j := 0
43 | for i, c := range self.columns {
44 | if filter(c.columnName) {
45 | j++
46 | continue
47 | }
48 | i = i - j
49 | v := fv[i]
50 | rval := reflect.ValueOf(v)
51 | rv := rval.Elem().Interface()
52 | fn := elem.FieldByName(c.fieldName)
53 |
54 | k := fn.Kind()
55 |
56 | // log.Debug("convertor|Convert2Entity|%s|%s", c.fieldName, rv)
57 |
58 | switch k {
59 | case reflect.Int8:
60 | fn.Set(reflect.ValueOf(rv.(int8)))
61 | case reflect.Int:
62 | fn.Set(reflect.ValueOf(rv.(int)))
63 | case reflect.Int16:
64 | fn.Set(reflect.ValueOf(rv.(int16)))
65 | case reflect.Int32:
66 | fn.Set(reflect.ValueOf(rv.(int32)))
67 | case reflect.Int64:
68 | fn.SetInt((rv.(int64)))
69 | case reflect.Uint8:
70 | fn.Set(reflect.ValueOf(rv.(uint8)))
71 | case reflect.Uint:
72 | fn.Set(reflect.ValueOf(rv.(uint)))
73 | case reflect.Uint16:
74 | fn.Set(reflect.ValueOf(rv.(uint16)))
75 | case reflect.Uint32:
76 | fn.Set(reflect.ValueOf(rv.(uint32)))
77 | case reflect.Uint64:
78 | fn.SetUint((rv.(uint64)))
79 | case reflect.Bool:
80 | fn.SetBool(rv.(bool))
81 | case reflect.String:
82 | fn.SetString((rv.(string)))
83 | case reflect.Ptr:
84 | {
85 | hd, hok := rv.([]byte)
86 | _, ok := fn.Interface().(*protocol.Header)
87 | if ok && hok {
88 | var header protocol.Header
89 | if hok {
90 | //头部用PB反序列化
91 | err := protocol.UnmarshalPbMessage(hd, &header)
92 | if nil != err {
93 | log.Errorf("convertor|Convert2Entity|Unmarshal Header|FAIL|%s|%s", err, c.fieldName)
94 | }
95 | }
96 | fn.Set(reflect.ValueOf(&header))
97 | } else if hok {
98 | fn.SetBytes(hd)
99 | } else {
100 | log.Errorf("convertor|Convert2Entity|FAIL|UnSupport Ptr DataType|%s|%t|%s|%s", c.fieldName, rv, hok, ok)
101 | return
102 | }
103 | }
104 | case reflect.Array, reflect.Slice:
105 | k := fn.Type().Elem().Kind()
106 | if k == reflect.String {
107 | var data []string
108 | err := json.Unmarshal([]byte(rv.(string)), &data)
109 | if nil != err {
110 | log.Errorf("convertor|Convert2Entity|FAIL|UnSupport SLICE|%s|%s", c.fieldName, rv)
111 | }
112 | fn.Set(reflect.ValueOf(data))
113 | } else if k == reflect.Uint8 {
114 | fn.SetBytes(rv.([]byte))
115 | } else {
116 | log.Errorf("convertor|Convert2Entity|FAIL|UnSupport SLICE DataType|%s|%s", c.columnName, fn.Elem().Kind())
117 | return
118 | }
119 | default:
120 | if c.columnName == "body" {
121 | _, ok := rv.([]byte)
122 | if ok {
123 | fn.Set(rval.Elem())
124 | } else {
125 | log.Errorf("convertor|Convert2Entity|FAIL|UnSupport BodyType |REQ:[]byte|%s|%T", c.fieldName, rv)
126 | return
127 | }
128 | } else {
129 | log.Errorf("convertor|Convert2Entity|FAIL|UnSupport DataType|%s|%s", c.fieldName, rv)
130 | }
131 | }
132 | }
133 | }
134 |
135 | func (self convertor) Convert2Params(entity *store.MessageEntity) []interface{} {
136 |
137 | val := reflect.ValueOf(*entity)
138 | fvs := make([]interface{}, 0, len(self.columns))
139 | for _, v := range self.columns {
140 |
141 | var fv interface{}
142 | if v.columnName == "body" {
143 | if entity.MsgType == protocol.CMD_STRING_MESSAGE {
144 | fv = []byte(entity.GetBody().(string))
145 | } else if entity.MsgType == protocol.CMD_BYTES_MESSAGE {
146 | fv = entity.GetBody().([]byte)
147 | } else {
148 | log.Errorf("convertor|Convert2Params|UnSupport MESSAGE TYPE|%s", entity.MsgType)
149 | }
150 | } else {
151 | f := val.FieldByName(v.fieldName)
152 |
153 | // log.Debug("convertor|Convert2Params|%s|%s", v.fieldName, f)
154 |
155 | switch f.Kind() {
156 | case reflect.Ptr:
157 | header, ok := f.Interface().(*protocol.Header)
158 | if ok {
159 | //头部用Pb序列化
160 | data, err := protocol.MarshalPbMessage(header)
161 | if err != nil {
162 | log.Errorf("convertor|Convert2Params|Marshal|HEAD|FAIL|%s|%s", err, f.Addr().Interface())
163 | return nil
164 | }
165 | fv = data
166 | } else {
167 | log.Errorf("convertor|Convert2Params|Not protocol.Header PRT |FAIL|%s", f.Addr())
168 | return nil
169 | }
170 |
171 | case reflect.Slice, reflect.Array:
172 |
173 | if f.Type().Elem().Kind() == reflect.String {
174 | data, err := json.Marshal(f.Interface())
175 | if nil != err {
176 | log.Errorf("convertor|Convert2Params|Marshal|Slice|FAIL||%s", err)
177 | return nil
178 | }
179 | fv = string(data)
180 | } else {
181 | fv = f.Interface()
182 | }
183 |
184 | default:
185 | fv = f.Interface()
186 | }
187 | }
188 | fvs = append(fvs, &fv)
189 | }
190 |
191 | return fvs
192 |
193 | }
194 |
--------------------------------------------------------------------------------
/store/mysql/kite_mysql_convertor_test.go:
--------------------------------------------------------------------------------
1 | package mysql
2 |
3 | import (
4 | "github.com/blackbeans/kiteq-common/protocol"
5 | "github.com/golang/protobuf/proto"
6 | "kiteq/store"
7 | "os"
8 | "reflect"
9 | "testing"
10 | "time"
11 | )
12 |
13 | func TestConvertFields(t *testing.T) {
14 |
15 | options := MysqlOptions{
16 | Addr: "localhost:3306",
17 | Username: "root",
18 | Password: "",
19 | ShardNum: 4,
20 | BatchUpSize: 1000,
21 | BatchDelSize: 1000,
22 | FlushPeriod: 1 * time.Minute,
23 | MaxIdleConn: 2,
24 | MaxOpenConn: 4}
25 |
26 | hs := newDbShard(options)
27 |
28 | c := convertor{}
29 | sqlwrapper := newSqlwrapper("kite_msg", hs, store.MessageEntity{})
30 | sqlwrapper.initSQL()
31 | c.columns = sqlwrapper.columns
32 |
33 | entity := &store.MessageEntity{}
34 | fs := c.convertFields(entity, func(colname string) bool {
35 | return false
36 | })
37 | t.Logf("%d|%s", len(c.columns), fs)
38 | if len(fs) != len(c.columns) {
39 | t.Fail()
40 | }
41 | }
42 |
43 | func TestConvert2Entity(t *testing.T) {
44 |
45 | options := MysqlOptions{
46 | Addr: "localhost:3306",
47 | Username: "root",
48 | Password: "",
49 | ShardNum: 4,
50 | BatchUpSize: 1000,
51 | BatchDelSize: 1000,
52 | FlushPeriod: 1 * time.Minute,
53 | MaxIdleConn: 2,
54 | MaxOpenConn: 4}
55 |
56 | hs := newDbShard(options)
57 |
58 | c := convertor{}
59 | sqlwrapper := newSqlwrapper("kite_msg", hs, store.MessageEntity{})
60 | sqlwrapper.initSQL()
61 | c.columns = sqlwrapper.columns
62 |
63 | //创建消息
64 | msg := &protocol.BytesMessage{}
65 | msg.Header = &protocol.Header{
66 | MessageId: proto.String("26c03f00665462591f696a980b5a6c4"),
67 | Topic: proto.String("trade"),
68 | MessageType: proto.String("pay-succ"),
69 | ExpiredTime: proto.Int64(time.Now().Add(10 * time.Minute).Unix()),
70 | DeliverLimit: proto.Int32(100),
71 | GroupId: proto.String("go-kite-test"),
72 | Commit: proto.Bool(false),
73 | Fly: proto.Bool(false)}
74 | msg.Body = []byte("hello world")
75 |
76 | entity := store.NewMessageEntity(protocol.NewQMessage(msg))
77 | entity.SuccGroups = []string{"go-kite-test"}
78 | hn, _ := os.Hostname()
79 | entity.KiteServer = hn
80 |
81 | params := c.Convert2Params(entity)
82 | t.Logf("TestConvert2Entity|Convert2Params|%s", params)
83 | econ := &store.MessageEntity{}
84 | c.Convert2Entity(params, econ, func(colname string) bool {
85 | return false
86 | })
87 | t.Logf("TestConvert2Entity|Convert2Entity|%s", econ)
88 | if econ.MessageId != entity.MessageId {
89 | t.Fail()
90 | }
91 |
92 | if econ.ExpiredTime != entity.ExpiredTime {
93 | t.Fail()
94 | }
95 |
96 | }
97 |
98 | func TestConvert2Params(t *testing.T) {
99 |
100 | options := MysqlOptions{
101 | Addr: "localhost:3306",
102 | Username: "root",
103 | Password: "",
104 | ShardNum: 4,
105 | BatchUpSize: 1000,
106 | BatchDelSize: 1000,
107 | FlushPeriod: 1 * time.Minute,
108 | MaxIdleConn: 2,
109 | MaxOpenConn: 4}
110 |
111 | hs := newDbShard(options)
112 |
113 | c := convertor{}
114 | sqlwrapper := newSqlwrapper("kite_msg", hs, store.MessageEntity{})
115 | sqlwrapper.initSQL()
116 | c.columns = sqlwrapper.columns
117 |
118 | //创建消息
119 | msg := &protocol.BytesMessage{}
120 | msg.Header = &protocol.Header{
121 | MessageId: proto.String("26c03f00665862591f696a980b5a6c4"),
122 | Topic: proto.String("trade"),
123 | MessageType: proto.String("pay-succ"),
124 | ExpiredTime: proto.Int64(time.Now().Add(10 * time.Minute).Unix()),
125 | DeliverLimit: proto.Int32(100),
126 | GroupId: proto.String("go-kite-test"),
127 | Commit: proto.Bool(false),
128 | Fly: proto.Bool(false)}
129 | msg.Body = []byte("hello world")
130 |
131 | entity := store.NewMessageEntity(protocol.NewQMessage(msg))
132 | entity.SuccGroups = []string{"go-kite-test"}
133 | hn, _ := os.Hostname()
134 | entity.KiteServer = hn
135 |
136 | params := c.Convert2Params(entity)
137 |
138 | if nil != params {
139 | for i, col := range c.columns {
140 | cv := params[i]
141 | t.Logf("TestConvert2Params|FIELD|%s|%s", col.fieldName, cv)
142 | if col.fieldName == "MessageId" {
143 | rv := reflect.ValueOf(cv)
144 | s := rv.Elem().Interface()
145 | if s.(string) != entity.MessageId {
146 | t.Fail()
147 | }
148 | } else if col.columnName == "body" {
149 | rv := reflect.ValueOf(cv)
150 | s := rv.Elem().Interface()
151 | if string(s.([]byte)) != string(entity.GetBody().([]byte)) {
152 | t.Fail()
153 | }
154 | }
155 | }
156 | } else {
157 | t.Fail()
158 | }
159 | }
160 |
--------------------------------------------------------------------------------
/store/mysql/kite_mysql_shard.go:
--------------------------------------------------------------------------------
1 | package mysql
2 |
3 | import (
4 | "database/sql"
5 | _ "github.com/go-sql-driver/mysql"
6 | "strconv"
7 | // "time"
8 | )
9 |
10 | const (
11 | SHARD_SEED = 16
12 | )
13 |
14 | type shardrange struct {
15 | min int
16 | max int
17 | shardId int
18 | master *sql.DB
19 | slave *sql.DB
20 | }
21 |
22 | type DbShard struct {
23 | shardNum int
24 | hashNum int
25 | shardranges []shardrange
26 | }
27 |
28 | func newDbShard(options MysqlOptions) DbShard {
29 | hash := SHARD_SEED / options.ShardNum
30 |
31 | shardranges := make([]shardrange, 0, hash)
32 | for i := 0; i < options.ShardNum; i++ {
33 |
34 | //创建shard的db
35 | master := openDb(
36 | options.Username+":"+options.Password+"@tcp("+options.Addr+")/"+options.DB,
37 | i,
38 | options.MaxIdleConn, options.MaxOpenConn)
39 | slave := master
40 | if len(options.SlaveAddr) > 0 {
41 | slave = openDb(
42 | options.Username+":"+options.Password+"@tcp("+options.SlaveAddr+")/"+options.DB,
43 | i,
44 | options.MaxIdleConn, options.MaxOpenConn)
45 | }
46 | shardranges = append(shardranges, shardrange{i * hash, (i + 1) * hash, i, master, slave})
47 | }
48 |
49 | return DbShard{options.ShardNum, hash, shardranges}
50 | }
51 |
52 | func openDb(addr string, shardId int, idleConn, maxConn int) *sql.DB {
53 | db, err := sql.Open("mysql", addr+"_"+strconv.Itoa(shardId)+"?timeout=30s&readTimeout=30s")
54 | if err != nil {
55 | log.Errorf("NewKiteMysql|CONNECT FAIL|%s|%s", err, addr)
56 | panic(err)
57 | }
58 | db.SetMaxIdleConns(idleConn)
59 | db.SetMaxOpenConns(maxConn)
60 | // db.SetConnMaxLifetime(5 * time.Minute)
61 | return db
62 | }
63 |
64 | func (s DbShard) FindForShard(key string) shardrange {
65 |
66 | i := s.HashId(key)
67 | for _, v := range s.shardranges {
68 | if v.min <= i && v.max > i {
69 | return v
70 | }
71 | }
72 | return s.shardranges[0]
73 |
74 | }
75 |
76 | func (s DbShard) FindForKey(key string) int {
77 | return s.HashId(key) % s.hashNum
78 | }
79 |
80 | func (s DbShard) FindSlave(key string) *sql.DB {
81 | return s.FindForShard(key).slave
82 | }
83 |
84 | func (s DbShard) FindMaster(key string) *sql.DB {
85 | return s.FindForShard(key).master
86 | }
87 |
88 | func (s DbShard) FindShardById(id int) shardrange {
89 | for _, v := range s.shardranges {
90 | if v.min <= id && v.max > id {
91 | return v
92 | }
93 | }
94 | return s.shardranges[0]
95 | }
96 |
97 | func (s DbShard) HashId(key string) int {
98 | num := key
99 | if len(key) > 1 {
100 | num = string(key[len(key)-1:])
101 | }
102 |
103 | i, err := strconv.ParseInt(num, 16, 16)
104 | if nil != err {
105 | log.Errorf("DbShard|HashId|INVALID HASHKEY|%s|%s", key, err)
106 | return 0
107 | }
108 | return int(i)
109 | }
110 |
111 | func (s DbShard) ShardNum() int {
112 | return s.shardNum
113 | }
114 |
115 | func (s DbShard) HashNum() int {
116 | return s.hashNum
117 | }
118 |
119 | func (s DbShard) Stop() {
120 | for _, v := range s.shardranges {
121 | v.master.Close()
122 | v.slave.Close()
123 | }
124 | }
125 |
--------------------------------------------------------------------------------
/store/mysql/kite_mysql_shard_test.go:
--------------------------------------------------------------------------------
1 | package mysql
2 |
3 | import (
4 | "regexp"
5 | "testing"
6 | "time"
7 | )
8 |
9 | func TestHash(t *testing.T) {
10 |
11 | options := MysqlOptions{
12 | Addr: "localhost:3306",
13 | Username: "root",
14 | Password: "",
15 | ShardNum: 4,
16 | BatchUpSize: 1000,
17 | BatchDelSize: 1000,
18 | FlushPeriod: 1 * time.Minute,
19 | MaxIdleConn: 2,
20 | MaxOpenConn: 4}
21 |
22 | hs := newDbShard(options)
23 |
24 | fk := hs.FindForShard("26c03f00665862591f696a980b5a6c4f")
25 | if fk.shardId != 3 {
26 | t.Fail()
27 | t.Logf("FAIL|FindForShard|26c03f00665862591f696a980b5a6c4f|%d", fk.shardId)
28 | return
29 | }
30 |
31 | hash := hs.FindForKey("26c03f00665862591f696a980b5a6c4f")
32 | if hash != 3 {
33 | t.Fail()
34 | t.Logf("FAIL|FindForKey|26c03f00665862591f696a980b5a6c4f|%d", hash)
35 | }
36 |
37 | // !regexp.MatchString(, id)
38 | rc := regexp.MustCompile("[0-9a-f]{32}")
39 | match := rc.MatchString("26c03f00665862591f696a980b5a6c4f")
40 | if !match {
41 | t.Fail()
42 | t.Log("MatchString|26c03f00665862591f696a980b5a6c4f|FAIL")
43 | }
44 |
45 | match = rc.MatchString("26c03f006-65862591f696a980b5a6c4")
46 | if match {
47 | t.Fail()
48 | t.Log("MatchString|26c03f006-65862591f696a980b5a6c4|FAIL")
49 | }
50 |
51 | t.Logf("FindForShard|%d", fk)
52 |
53 | sc := hs.ShardNum()
54 | if sc != 4 {
55 | t.Fail()
56 | }
57 |
58 | t.Logf("ShardNum|%d", sc)
59 | }
60 |
--------------------------------------------------------------------------------
/store/mysql/kite_mysql_test.go:
--------------------------------------------------------------------------------
1 | package mysql
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "github.com/blackbeans/kiteq-common/protocol"
7 | "github.com/golang/protobuf/proto"
8 | "kiteq/store"
9 | "testing"
10 | "time"
11 | )
12 |
13 | func TestPageQuery(t *testing.T) {
14 |
15 | options := MysqlOptions{
16 | Addr: "localhost:3306",
17 | DB: "kite",
18 | Username: "root",
19 | Password: "",
20 | ShardNum: 4,
21 | BatchUpSize: 100,
22 | BatchDelSize: 100,
23 | FlushPeriod: 10 * time.Millisecond,
24 | MaxIdleConn: 10,
25 | MaxOpenConn: 10}
26 |
27 | kiteMysql := NewKiteMysql(context.TODO(), options, "localhost")
28 | truncate(kiteMysql)
29 | hn := "localhost"
30 | for i := 0; i < 10; i++ {
31 | //创建消息
32 | msg := &protocol.BytesMessage{}
33 | msg.Header = &protocol.Header{
34 | MessageId: proto.String(fmt.Sprintf("%x", i) + "26c03f00665862591f696a980b5ac"),
35 | Topic: proto.String("trade"),
36 | MessageType: proto.String("pay-succ"),
37 | ExpiredTime: proto.Int64(time.Now().Add(10 * time.Minute).Unix()),
38 | DeliverLimit: proto.Int32(100),
39 | GroupId: proto.String("go-kite-test"),
40 | Commit: proto.Bool(false),
41 | Fly: proto.Bool(false)}
42 | msg.Body = []byte("hello world")
43 |
44 | entity := store.NewMessageEntity(protocol.NewQMessage(msg))
45 |
46 | entity.KiteServer = hn
47 | entity.PublishTime = time.Now().Unix()
48 | kiteMysql.Save(entity)
49 | }
50 |
51 | startIdx := 0
52 | hasMore := true
53 | count := 0
54 | //开始分页查询未过期的消息实体
55 | for hasMore {
56 | more, entities := kiteMysql.PageQueryEntity("c", hn,
57 | time.Now().Unix(), 0, 1)
58 | if len(entities) <= 0 {
59 | break
60 | }
61 |
62 | //开始发起重投
63 | for _, entity := range entities {
64 | count++
65 | t.Logf("TestPageQuery|PageQueryEntity|%s", entity.MessageId)
66 | msg := &store.MessageEntity{
67 | MessageId: entity.MessageId,
68 | DeliverCount: 1,
69 | SuccGroups: []string{},
70 | FailGroups: []string{"s-mts-test"},
71 | //设置一下下一次投递时间
72 | NextDeliverTime: time.Now().Add(1 * time.Minute).Unix()}
73 | kiteMysql.AsyncUpdateDeliverResult(msg)
74 |
75 | }
76 |
77 | time.Sleep(1 * time.Second)
78 | hasMore = more
79 | startIdx += len(entities)
80 | }
81 | if count != 10 {
82 | t.Fail()
83 | t.Logf("TestPageQuery|IDX|FAIL|%d", count)
84 | return
85 | }
86 |
87 | startIdx = 0
88 | hasMore = true
89 | //开始分页查询未过期的消息实体
90 | for hasMore {
91 | more, entities := kiteMysql.PageQueryEntity("6c", hn,
92 | time.Now().Add(8*time.Minute).Unix(), startIdx, 1)
93 | if len(entities) <= 0 {
94 | t.Logf("TestPageQuery|CHECK|NO DATA|%s", entities)
95 | break
96 | }
97 |
98 | //开始发起重投
99 | for _, entity := range entities {
100 | if entity.DeliverCount != 1 || entity.FailGroups[0] != "s-mts-test" {
101 | t.Fail()
102 | }
103 | t.Logf("TestPageQuery|PageQueryEntity|CHECK|%s", entity.MessageId)
104 | }
105 | startIdx += len(entities)
106 | hasMore = more
107 | }
108 |
109 | t.Logf("TestPageQuery|CHECK|%d", startIdx)
110 | if startIdx != 10 {
111 | t.Fail()
112 | }
113 |
114 | truncate(kiteMysql)
115 |
116 | }
117 |
118 | func TestBatch(t *testing.T) {
119 | options := MysqlOptions{
120 | Addr: "localhost:3306",
121 | DB: "kite",
122 | Username: "root",
123 | Password: "",
124 | ShardNum: 4,
125 | BatchUpSize: 100,
126 | BatchDelSize: 100,
127 | FlushPeriod: 10 * time.Millisecond,
128 | MaxIdleConn: 10,
129 | MaxOpenConn: 10}
130 |
131 | kiteMysql := NewKiteMysql(context.TODO(), options, "localhost")
132 |
133 | truncate(kiteMysql)
134 |
135 | mids := make([]string, 0, 16)
136 | for i := 0; i < 16; i++ {
137 | //创建消息
138 | msg := &protocol.BytesMessage{}
139 | msg.Header = &protocol.Header{
140 | MessageId: proto.String("26c03f00665862591f696a980b5a6" + fmt.Sprintf("%x", i)),
141 | Topic: proto.String("trade"),
142 | MessageType: proto.String("pay-succ"),
143 | ExpiredTime: proto.Int64(time.Now().Add(10 * time.Minute).Unix()),
144 | DeliverLimit: proto.Int32(100),
145 | GroupId: proto.String("go-kite-test"),
146 | Commit: proto.Bool(false),
147 | Fly: proto.Bool(false)}
148 | msg.Body = []byte("hello world")
149 |
150 | entity := store.NewMessageEntity(protocol.NewQMessage(msg))
151 | entity.SuccGroups = []string{"go-kite-test"}
152 | hn := "localhost"
153 | entity.KiteServer = hn
154 | entity.PublishTime = time.Now().Unix()
155 | kiteMysql.Save(entity)
156 | mids = append(mids, entity.MessageId)
157 | }
158 |
159 | for _, v := range mids {
160 |
161 | msg := &store.MessageEntity{
162 | MessageId: v,
163 | DeliverCount: 1,
164 | SuccGroups: []string{"s-mts-test"},
165 | FailGroups: []string{},
166 | //设置一下下一次投递时间
167 | NextDeliverTime: time.Now().Unix()}
168 | kiteMysql.AsyncUpdateDeliverResult(msg)
169 | }
170 |
171 | time.Sleep(5 * time.Second)
172 | for _, v := range mids {
173 | e := kiteMysql.Query("trade", v)
174 | if nil == e || len(e.SuccGroups) < 1 {
175 | t.Fatalf("TestBatch|Update FAIL|%s|%s", e, v)
176 | t.Fail()
177 | return
178 | }
179 | t.Logf("Query|%s", e)
180 | }
181 |
182 | //测试批量删除
183 | for _, v := range mids {
184 | kiteMysql.AsyncDelete("trade", v)
185 | }
186 | time.Sleep(5 * time.Second)
187 | for _, v := range mids {
188 | entity := kiteMysql.Query("trade", v)
189 | if nil != entity {
190 | t.Fatalf("TestBatch|AysncDelete FAIL|%s", entity)
191 | t.Fail()
192 |
193 | }
194 | }
195 |
196 | truncate(kiteMysql)
197 | }
198 |
199 | func truncate(k *KiteMysqlStore) {
200 | for i := 0; i < 4; i++ {
201 | for j := 0; j < 4; j++ {
202 | m := k.dbshard.FindShardById(i*4 + j).master
203 | _, err := m.Exec(fmt.Sprintf("truncate table kite_msg_%d", j))
204 | if nil != err {
205 | log.Printf("ERROR|truncate table kite_msg_%d.%s|%s", i, j, err)
206 | } else {
207 | // log.Printf("SUCC|truncate table kite_msg_%d.%s|%s", i, j, err)
208 | }
209 | _, err = m.Exec(fmt.Sprintf("truncate table kite_msg_dlq"))
210 | if nil != err {
211 | log.Printf("ERROR|truncate table kite_msg_%d.kite_msg_dlq|%s", i, j, err)
212 | } else {
213 | // log.Printf("SUCC|truncate table kite_msg_%d.%s|%s", i, j, err)
214 | }
215 | }
216 | }
217 | }
218 |
219 | func TestStringSave(t *testing.T) {
220 |
221 | options := MysqlOptions{
222 | Addr: "localhost:3306",
223 | DB: "kite",
224 | Username: "root",
225 | Password: "",
226 | ShardNum: 4,
227 | BatchUpSize: 100,
228 | BatchDelSize: 100,
229 | FlushPeriod: 10 * time.Millisecond,
230 | MaxIdleConn: 10,
231 | MaxOpenConn: 10}
232 |
233 | kiteMysql := NewKiteMysql(context.TODO(), options, "localhost")
234 | truncate(kiteMysql)
235 | for i := 0; i < 16; i++ {
236 | //创建消息
237 | msg := &protocol.StringMessage{}
238 | msg.Header = &protocol.Header{
239 | MessageId: proto.String("26c03f00665862591f696a980b5a6" + fmt.Sprintf("%x", i)),
240 | Topic: proto.String("trade"),
241 | MessageType: proto.String("pay-succ"),
242 | ExpiredTime: proto.Int64(time.Now().Add(10 * time.Minute).Unix()),
243 | DeliverLimit: proto.Int32(100),
244 | GroupId: proto.String("go-kite-test"),
245 | Commit: proto.Bool(false),
246 | Fly: proto.Bool(false)}
247 |
248 | msg.Body = proto.String("hello world")
249 | innerT(kiteMysql, msg, msg.GetHeader().GetMessageId(), t)
250 | }
251 | kiteMysql.Stop()
252 | }
253 |
254 | func TestBytesSave(t *testing.T) {
255 |
256 | options := MysqlOptions{
257 | Addr: "localhost:3306",
258 | DB: "kite",
259 | Username: "root",
260 | Password: "",
261 | ShardNum: 4,
262 | BatchUpSize: 100,
263 | BatchDelSize: 100,
264 | FlushPeriod: 10 * time.Millisecond,
265 | MaxIdleConn: 10,
266 | MaxOpenConn: 10}
267 |
268 | kiteMysql := NewKiteMysql(context.TODO(), options, "localhost")
269 | truncate(kiteMysql)
270 | for i := 0; i < 16; i++ {
271 | //创建消息
272 | msg := &protocol.BytesMessage{}
273 | msg.Header = &protocol.Header{
274 | MessageId: proto.String("26c03f00665862591f696a980b5a6" + fmt.Sprintf("%x", i)),
275 | Topic: proto.String("trade"),
276 | MessageType: proto.String("pay-succ"),
277 | ExpiredTime: proto.Int64(time.Now().Add(10 * time.Minute).Unix()),
278 | DeliverLimit: proto.Int32(100),
279 | GroupId: proto.String("go-kite-test"),
280 | Commit: proto.Bool(false),
281 | Fly: proto.Bool(false)}
282 |
283 | msg.Body = []byte("hello world")
284 | innerT(kiteMysql, msg, msg.GetHeader().GetMessageId(), t)
285 | }
286 |
287 | truncate(kiteMysql)
288 |
289 | kiteMysql.Stop()
290 | }
291 |
292 | //增加迁移
293 | func TestExpiredDLQ(t *testing.T) {
294 |
295 | options := MysqlOptions{
296 | Addr: "localhost:3306",
297 | DB: "kite",
298 | Username: "root",
299 | Password: "",
300 | ShardNum: 4,
301 | BatchUpSize: 100,
302 | BatchDelSize: 100,
303 | FlushPeriod: 10 * time.Millisecond,
304 | MaxIdleConn: 10,
305 | MaxOpenConn: 10}
306 |
307 | kiteMysql := NewKiteMysql(context.TODO(), options, "localhost")
308 | truncate(kiteMysql)
309 | messageIds := make([]string, 0, 10)
310 | for i := 0; i < 256; i++ {
311 | //创建消息
312 | msg := &protocol.BytesMessage{}
313 | msg.Header = &protocol.Header{
314 | MessageId: proto.String("26c03f00665862591f696a980b5a6" + fmt.Sprintf("%x", i)),
315 | Topic: proto.String("trade"),
316 | MessageType: proto.String("pay-succ"),
317 | ExpiredTime: proto.Int64(time.Now().Add(10 * time.Second).Unix()),
318 | DeliverLimit: proto.Int32(100),
319 | GroupId: proto.String("go-kite-test"),
320 | Commit: proto.Bool(false),
321 | Fly: proto.Bool(false)}
322 |
323 | msg.Body = []byte("hello world")
324 | qm := protocol.NewQMessage(msg)
325 | entity := store.NewMessageEntity(qm)
326 | entity.SuccGroups = []string{"go-kite-test"}
327 | hn := "localhost"
328 | entity.KiteServer = hn
329 | entity.PublishTime = time.Now().Unix()
330 | entity.DeliverCount = 101
331 |
332 | succ := kiteMysql.Save(entity)
333 | if !succ {
334 | t.Fail()
335 | } else {
336 | // fmt.Printf("SAVE|SUCC|%s", entity)
337 | }
338 | messageIds = append(messageIds, msg.GetHeader().GetMessageId())
339 | }
340 |
341 | //开始清理
342 | kiteMysql.MoveExpired()
343 |
344 | for _, messageId := range messageIds {
345 |
346 | entity := kiteMysql.Query("trade", messageId)
347 | if nil != entity {
348 | t.Fail()
349 | fmt.Println("MoveExpired|FAIL|" + messageId)
350 | }
351 | }
352 |
353 | total := 0
354 | for i := 0; i < 4; i++ {
355 | db := kiteMysql.dbshard.FindSlave(fmt.Sprintf("%x", i))
356 | rows, _ := db.Query("select count(*) from kite_msg_dlq")
357 | if rows.Next() {
358 | var count int
359 | rows.Scan(&count)
360 | t.Logf("TestExpiredDLQ|COUNT|%s|%d", fmt.Sprintf("%x", i), total)
361 | total += count
362 | }
363 | }
364 |
365 | if total != 256 {
366 | t.Fail()
367 | t.Logf("TestExpiredDLQ|TOTAL NOT EQUAL|%d", total)
368 | }
369 |
370 | truncate(kiteMysql)
371 | kiteMysql.Stop()
372 | }
373 |
374 | func innerT(kiteMysql *KiteMysqlStore, msg interface{}, msgid string, t *testing.T) {
375 |
376 | qm := protocol.NewQMessage(msg)
377 | entity := store.NewMessageEntity(qm)
378 | entity.SuccGroups = []string{"go-kite-test"}
379 | hn := "localhost"
380 | entity.KiteServer = hn
381 | entity.PublishTime = time.Now().Unix()
382 |
383 | succ := kiteMysql.Save(entity)
384 | if !succ {
385 | t.Fail()
386 | } else {
387 | t.Logf("SAVE|SUCC|%s", entity)
388 | }
389 |
390 | ret := kiteMysql.Query("trade", msgid)
391 | t.Logf("Query|%s|%s", msgid, ret)
392 | if nil == ret {
393 | t.Fail()
394 | return
395 | }
396 |
397 | bb, ok := qm.GetBody().([]byte)
398 | if ok {
399 | rb, _ := ret.GetBody().([]byte)
400 | if string(rb) != string(bb) {
401 | t.Fail()
402 | } else {
403 | t.Logf("Query|SUCC|%s", ret)
404 | }
405 | } else {
406 | bs, _ := qm.GetBody().(string)
407 | rs, _ := ret.GetBody().(string)
408 | if bs != rs {
409 | t.Fail()
410 | } else {
411 | t.Logf("Query|SUCC|%s", ret)
412 | }
413 | }
414 |
415 | t.Logf("Commint BEGIN")
416 | commit := kiteMysql.Commit("trade", msgid)
417 | if !commit {
418 | t.Logf("Commint FAIL")
419 | t.Fail()
420 | }
421 | t.Logf("Commint END")
422 | time.Sleep(200 * time.Millisecond)
423 | ret = kiteMysql.Query("trade", msgid)
424 | t.Logf("PageQueryEntity|COMMIT RESULT|%s", ret)
425 | if !ret.Commit {
426 | t.Logf("Commit|FAIL|%s", ret)
427 | t.Fail()
428 | }
429 |
430 | hasNext, entities := kiteMysql.PageQueryEntity(msgid, hn, time.Now().Unix(), 0, 10)
431 | t.Logf("PageQueryEntity|%s", entities)
432 | if hasNext {
433 | t.Logf("PageQueryEntity|FAIL|HasNext|%s", entities)
434 | t.Fail()
435 | } else {
436 | if len(entities) != 1 {
437 | t.Logf("PageQueryEntity|FAIL|%s", entities)
438 | t.Fail()
439 | } else {
440 | if entities[0].Header.GetMessageId() != qm.GetHeader().GetMessageId() {
441 | t.Fail()
442 | }
443 | }
444 | }
445 | }
446 |
--------------------------------------------------------------------------------
/store/mysql/kite_sql_wrapper.go:
--------------------------------------------------------------------------------
1 | package mysql
2 |
3 | import (
4 | "bytes"
5 | "reflect"
6 | "strconv"
7 | "strings"
8 | )
9 |
10 | type batchType uint8
11 |
12 | func (s batchType) String() string {
13 | switch s {
14 | case 1:
15 | return "Stmt-Commit"
16 | case 2:
17 | return "Stmt-Upate"
18 | case 3:
19 | return "Stmt-Delete"
20 | case 4:
21 | return "Stmt-DLQ_MOVE_QUERY"
22 | case 5:
23 | return "Stmt-DLQ_MOVE_INSERT"
24 | case 6:
25 | return "Stmt-DLQ_MOVE_DELETE"
26 | }
27 | return "Stmt-Unknown"
28 | }
29 |
30 | type batchTypes []batchType
31 |
32 | func (s batchTypes) Len() int {
33 | return len(s)
34 | }
35 |
36 | func (s batchTypes) Swap(i, j int) {
37 | s[i], s[j] = s[j], s[i]
38 | }
39 | func (s batchTypes) Less(i, j int) bool {
40 | return s[i] <= s[j]
41 | }
42 |
43 | const (
44 | COMMIT batchType = 1
45 | UPDATE batchType = 2
46 | DELETE batchType = 3
47 | DLQ_MOVE_QUERY batchType = 4
48 | DLQ_MOVE_INSERT batchType = 5
49 | DLQ_MOVE_DELETE batchType = 6
50 | )
51 |
52 | type column struct {
53 | columnName string
54 | fieldName string
55 | isPK bool
56 | isHashKey bool
57 | fieldKind reflect.Kind
58 | }
59 |
60 | type sqlwrapper struct {
61 | tablename string
62 | columns []column
63 | batchSQL map[batchType][]string
64 | queryPrepareSQL []string
65 | pageQuerySQL []string
66 | savePrepareSQL []string
67 | msgStatSQL []string
68 | dlqMoveSQL map[batchType][]string
69 | dbshard DbShard
70 | }
71 |
72 | func newSqlwrapper(tablename string, dbshard DbShard, i interface{}) *sqlwrapper {
73 |
74 | columns := make([]column, 0, 10)
75 | //开始反射得到所有的field->column
76 | r := reflect.TypeOf(i)
77 | for i := 0; i < r.NumField(); i++ {
78 | f := r.Field(i)
79 | tag := f.Tag.Get("db")
80 | c := column{}
81 | c.fieldName = f.Name
82 | c.fieldKind = f.Type.Kind()
83 | //使用字段名称
84 | if len(tag) <= 0 {
85 | c.columnName = f.Name
86 | } else if tag != "transient" {
87 | tags := strings.Split(tag, ",")
88 | c.columnName = tags[0] //column
89 | if len(tags) > 1 && tags[1] == "pk" {
90 | c.isPK = true
91 | c.isHashKey = true
92 | }
93 | } else if tag == "transient" {
94 | continue
95 | }
96 | columns = append(columns, c)
97 | }
98 |
99 | sw := &sqlwrapper{columns: columns, dbshard: dbshard, tablename: tablename}
100 | sw.initSQL()
101 | return sw
102 | }
103 |
104 | func (self *sqlwrapper) hashQuerySQL(hashkey string) string {
105 | return self.queryPrepareSQL[self.dbshard.FindForKey(hashkey)]
106 | }
107 | func (self *sqlwrapper) hashSaveSQL(hashkey string) string {
108 | return self.savePrepareSQL[self.dbshard.FindForKey(hashkey)]
109 | }
110 | func (self *sqlwrapper) hashCommitSQL(hashkey string) string {
111 | return self.batchSQL[COMMIT][self.dbshard.FindForKey(hashkey)]
112 | }
113 | func (self *sqlwrapper) hashDeleteSQL(hashkey string) string {
114 | return self.batchSQL[DELETE][self.dbshard.FindForKey(hashkey)]
115 | }
116 | func (self *sqlwrapper) hashPQSQL(hashkey string) string {
117 | return self.pageQuerySQL[self.dbshard.FindForKey(hashkey)]
118 | }
119 |
120 | func (self *sqlwrapper) hashMessageStatSQL(hashkey string) string {
121 | return self.msgStatSQL[self.dbshard.FindForKey(hashkey)]
122 | }
123 |
124 | func (self *sqlwrapper) hashDLQSQL(bt batchType, hashkey string) string {
125 | return self.dlqMoveSQL[bt][self.dbshard.FindForKey(hashkey)]
126 | }
127 |
128 | func (self *sqlwrapper) initSQL() {
129 |
130 | //query
131 | buff := make([]byte, 0, 128)
132 | s := bytes.NewBuffer(buff)
133 | s.WriteString("select ")
134 | for i, v := range self.columns {
135 | s.WriteString(v.columnName)
136 | if i < len(self.columns)-1 {
137 | s.WriteString(",")
138 | }
139 | }
140 |
141 | s.WriteString(" from ")
142 | s.WriteString(self.tablename)
143 | s.WriteString("_{} ")
144 | s.WriteString(" where message_id=?")
145 | sql := s.String()
146 |
147 | self.queryPrepareSQL = make([]string, 0, self.dbshard.HashNum())
148 | for i := 0; i < self.dbshard.HashNum(); i++ {
149 | st := strconv.Itoa(i)
150 | self.queryPrepareSQL = append(self.queryPrepareSQL, strings.Replace(sql, "{}", st, -1))
151 | }
152 |
153 | //save
154 | s.Reset()
155 | s.WriteString("insert into ")
156 | s.WriteString(self.tablename)
157 | s.WriteString("_{} (")
158 | for i, v := range self.columns {
159 | s.WriteString(v.columnName)
160 | if i < len(self.columns)-1 {
161 | s.WriteString(",")
162 | }
163 | }
164 | s.WriteString(") ")
165 |
166 | s.WriteString(" values (")
167 | for i, _ := range self.columns {
168 | s.WriteString("?")
169 | if i < len(self.columns)-1 {
170 | s.WriteString(",")
171 | }
172 |
173 | }
174 | s.WriteString(" )")
175 |
176 | sql = s.String()
177 |
178 | self.savePrepareSQL = make([]string, 0, self.dbshard.HashNum())
179 | for i := 0; i < self.dbshard.HashNum(); i++ {
180 | st := strconv.Itoa(i)
181 | self.savePrepareSQL = append(self.savePrepareSQL, strings.Replace(sql, "{}", st, -1))
182 | }
183 |
184 | //page query
185 |
186 | // select
187 | // a.message_id,a.header,a.msg_type,a.topic,a.message_type,
188 | // a.publish_group,a.commit,a.publish_time,a.expired_time,
189 | // a.deliver_count,a.deliver_limit,a.kite_server,a.fail_groups,a.succ_groups,
190 | // a.next_deliver_time
191 | // from kite_msg_3 a
192 | // inner join (
193 | // select message_id
194 | // from kite_msg_3 force index(idx_recover)
195 | // where
196 | // kite_server='vm-golang001.vm.momo.com' and deliver_count< deliver_limit
197 | // and expired_time>=1428056089 and next_deliver_time<=1428055512
198 | // order by next_deliver_time asc limit 28500,51) b using (message_id);
199 |
200 | s.Reset()
201 | s.WriteString("select ")
202 | for i, v := range self.columns {
203 | //如果为Body字段则不用于查询
204 | if v.columnName == "body" {
205 | continue
206 | }
207 | s.WriteString("a.")
208 | s.WriteString(v.columnName)
209 | if i < len(self.columns)-1 {
210 | s.WriteString(",")
211 | }
212 | }
213 | s.WriteString(" from ")
214 | s.WriteString(self.tablename)
215 | s.WriteString("_{} a ")
216 | s.WriteString(" inner join ") //强制使用idx_recover索引
217 | s.WriteString(" ( select message_id from ")
218 | s.WriteString(self.tablename)
219 | s.WriteString("_{} ")
220 | s.WriteString(" force index(idx_recover) ")
221 | s.WriteString(" where kite_server=? and deliver_count=? and next_deliver_time<=? ")
222 | s.WriteString(" order by next_deliver_time asc limit ?,?) b")
223 | s.WriteString(" using (message_id) ")
224 |
225 | sql = s.String()
226 |
227 | self.pageQuerySQL = make([]string, 0, self.dbshard.HashNum())
228 | for i := 0; i < self.dbshard.HashNum(); i++ {
229 | st := strconv.Itoa(i)
230 | self.pageQuerySQL = append(self.pageQuerySQL, strings.Replace(sql, "{}", st, -1))
231 | }
232 |
233 | //--------------batchOps
234 |
235 | self.batchSQL = make(map[batchType][]string, 4)
236 | //commit
237 | s.Reset()
238 | s.WriteString("update ")
239 | s.WriteString(self.tablename)
240 | s.WriteString("_{} ")
241 | s.WriteString(" set commit=? ")
242 | s.WriteString(" where message_id=?")
243 |
244 | sql = s.String()
245 |
246 | self.batchSQL[COMMIT] = make([]string, 0, self.dbshard.HashNum())
247 | for i := 0; i < self.dbshard.HashNum(); i++ {
248 | st := strconv.Itoa(i)
249 | self.batchSQL[COMMIT] = append(self.batchSQL[COMMIT], strings.Replace(sql, "{}", st, -1))
250 | }
251 |
252 | //delete
253 | s.Reset()
254 | s.WriteString("delete from ")
255 | s.WriteString(self.tablename)
256 | s.WriteString("_{} ")
257 | s.WriteString(" where message_id=?")
258 |
259 | sql = s.String()
260 |
261 | self.batchSQL[DELETE] = make([]string, 0, self.dbshard.HashNum())
262 | for i := 0; i < self.dbshard.HashNum(); i++ {
263 | st := strconv.Itoa(i)
264 | self.batchSQL[DELETE] = append(self.batchSQL[DELETE], strings.Replace(sql, "{}", st, -1))
265 | }
266 |
267 | //batch update
268 | s.Reset()
269 | s.WriteString("update ")
270 | s.WriteString(self.tablename)
271 | s.WriteString("_{} ")
272 | s.WriteString(" set succ_groups=?,fail_groups=?,next_deliver_time=?,deliver_count=? ")
273 | s.WriteString(" where message_id=?")
274 |
275 | sql = s.String()
276 |
277 | self.batchSQL[UPDATE] = make([]string, 0, self.dbshard.HashNum())
278 | for i := 0; i < self.dbshard.HashNum(); i++ {
279 | st := strconv.Itoa(i)
280 | self.batchSQL[UPDATE] = append(self.batchSQL[UPDATE], strings.Replace(sql, "{}", st, -1))
281 | }
282 |
283 | //----------- 查询本机消息堆积数
284 |
285 | // select
286 | // topic,count(message_id) total
287 | // from kite_msg_3
288 | // where
289 | // kite_server='vm-golang001.vm.momo.com' and deliver_count=? group by topic;
290 |
291 | s.Reset()
292 | s.WriteString("select topic,count(message_id) total ")
293 | s.WriteString(" from ")
294 | s.WriteString(self.tablename)
295 | s.WriteString("_{} ")
296 | s.WriteString(" where kite_server=? and deliver_count=? group by topic")
297 |
298 | sql = s.String()
299 |
300 | self.msgStatSQL = make([]string, 0, self.dbshard.HashNum())
301 | for i := 0; i < self.dbshard.HashNum(); i++ {
302 | st := strconv.Itoa(i)
303 | self.msgStatSQL = append(self.msgStatSQL, strings.Replace(sql, "{}", st, -1))
304 | }
305 |
306 | //-----------查询最后一条数据的Id
307 | s.Reset()
308 | s.WriteString("select id,message_id from ")
309 | s.WriteString(self.tablename)
310 | s.WriteString("_{} ")
311 | s.WriteString(" where kite_server=? and (deliver_count>=deliver_limit or expired_time<=?) and id > ? order by id asc limit ? ")
312 | sql = s.String()
313 |
314 | self.dlqMoveSQL = make(map[batchType][]string, 3)
315 | self.dlqMoveSQL[DLQ_MOVE_QUERY] = make([]string, self.dbshard.hashNum)
316 | for i := 0; i < self.dbshard.HashNum(); i++ {
317 | st := strconv.Itoa(i)
318 | self.dlqMoveSQL[DLQ_MOVE_QUERY][i] = strings.Replace(sql, "{}", st, -1)
319 | }
320 |
321 | s.Reset()
322 | //------------批量写入本机的DLQ列表中
323 | s.WriteString("insert into ")
324 | s.WriteString(self.tablename)
325 | s.WriteString("_dlq( ")
326 | for i, v := range self.columns {
327 | s.WriteString(v.columnName)
328 | if i < len(self.columns)-1 {
329 | s.WriteString(",")
330 | }
331 | }
332 | s.WriteString(") select ")
333 | for i, v := range self.columns {
334 | s.WriteString(v.columnName)
335 | if i < len(self.columns)-1 {
336 | s.WriteString(",")
337 | }
338 | }
339 | s.WriteString(" from ")
340 | s.WriteString(self.tablename)
341 | s.WriteString("_{} ")
342 | s.WriteString(" where message_id in({ids}) ")
343 |
344 | sql = s.String()
345 | self.dlqMoveSQL[DLQ_MOVE_INSERT] = make([]string, self.dbshard.hashNum)
346 | for i := 0; i < self.dbshard.HashNum(); i++ {
347 | st := strconv.Itoa(i)
348 | self.dlqMoveSQL[DLQ_MOVE_INSERT][i] = strings.Replace(sql, "{}", st, -1)
349 | }
350 |
351 | //---------------清理已经导入的过期数据
352 | s.Reset()
353 | s.WriteString("delete from ")
354 | s.WriteString(self.tablename)
355 | s.WriteString("_{} ")
356 | s.WriteString(" where kite_server=? and message_id in ({ids})")
357 | sql = s.String()
358 | self.dlqMoveSQL[DLQ_MOVE_DELETE] = make([]string, self.dbshard.hashNum)
359 | for i := 0; i < self.dbshard.HashNum(); i++ {
360 | st := strconv.Itoa(i)
361 | self.dlqMoveSQL[DLQ_MOVE_DELETE][i] = strings.Replace(sql, "{}", st, -1)
362 | }
363 | }
364 |
--------------------------------------------------------------------------------
/store/mysql/stmt_pool.go:
--------------------------------------------------------------------------------
1 | package mysql
2 |
3 | import (
4 | "container/list"
5 | "database/sql"
6 | "errors"
7 | "sync"
8 | "time"
9 | )
10 |
11 | //连接工厂
12 | type IStmtFactory interface {
13 | Get() (error, *sql.Stmt) //获取一个stmt
14 | Release(stmt *sql.Stmt) error //释放对应的stmt
15 | ReleaseBroken(stmt *sql.Stmt) error //释放掉坏的stmt
16 | Shutdown() //关闭当前的池子
17 | MonitorPool() (int, int, int)
18 | }
19 |
20 | //StmtPool的连接池
21 | type StmtPool struct {
22 | dialFunc func() (error, *sql.Stmt)
23 | maxPoolSize int //最大尺子大小
24 | minPoolSize int //最小连接池大小
25 | corepoolSize int //核心池子大小
26 | numActive int //当前正在存活的client
27 | numWork int //当前正在工作的client
28 | idletime time.Duration //空闲时间
29 |
30 | idlePool *list.List //空闲连接
31 |
32 | running bool
33 |
34 | mutex sync.Mutex //全局锁
35 | }
36 |
37 | type IdleStmt struct {
38 | stmt *sql.Stmt
39 | expiredTime time.Time
40 | }
41 |
42 | func NewStmtPool(minPoolSize, corepoolSize,
43 | maxPoolSize int, idletime time.Duration,
44 | dialFunc func() (error, *sql.Stmt)) (error, *StmtPool) {
45 |
46 | idlePool := list.New()
47 | pool := &StmtPool{
48 | maxPoolSize: maxPoolSize,
49 | corepoolSize: corepoolSize,
50 | minPoolSize: minPoolSize,
51 | idletime: idletime,
52 | idlePool: idlePool,
53 | dialFunc: dialFunc,
54 | running: true}
55 |
56 | err := pool.enhancedPool(pool.minPoolSize)
57 | if nil != err {
58 | return err, nil
59 | }
60 |
61 | //启动链接过期
62 | go pool.evict()
63 |
64 | return nil, pool
65 | }
66 |
67 | func (self *StmtPool) enhancedPool(size int) error {
68 |
69 | //初始化一下最小的Poolsize,让入到idlepool中
70 | for i := 0; i < size; i++ {
71 | j := 0
72 | var err error
73 | var stmt *sql.Stmt
74 | for ; j < 3; j++ {
75 | err, stmt = self.dialFunc()
76 | if nil != err {
77 | log.Errorf("POOL_FACTORY|CREATE STMT|INIT|FAIL|%s", err)
78 |
79 | } else {
80 | break
81 | }
82 | }
83 |
84 | if j >= 3 {
85 | return errors.New("POOL_FACTORY|CREATE STMT|INIT|FAIL|%s" + err.Error())
86 | }
87 |
88 | idlestmt := &IdleStmt{stmt: stmt, expiredTime: (time.Now().Add(self.idletime))}
89 | self.idlePool.PushFront(idlestmt)
90 | self.numActive++
91 | }
92 |
93 | return nil
94 | }
95 |
96 | func (self *StmtPool) evict() {
97 | for self.running {
98 |
99 | select {
100 | case <-time.After(self.idletime):
101 | self.mutex.Lock()
102 | for e := self.idlePool.Back(); nil != e; e = e.Prev() {
103 | idlestmt := e.Value.(*IdleStmt)
104 | //如果当前时间在过期时间之后并且活动的链接大于corepoolsize则关闭
105 | isExpired := idlestmt.expiredTime.Before(time.Now())
106 | if isExpired &&
107 | self.numActive >= self.corepoolSize {
108 | idlestmt.stmt.Close()
109 | idlestmt = nil
110 | self.idlePool.Remove(e)
111 | //并且该表当前的active数量
112 | self.numActive--
113 | } else if isExpired {
114 | //过期的但是已经不够corepoolsize了直接重新设置过期时间
115 | idlestmt.expiredTime = time.Now().Add(self.idletime)
116 | } else {
117 | //活动的数量小于corepool的则修改存活时间
118 | idlestmt.expiredTime = time.Now().Add(self.idletime)
119 | }
120 | }
121 | self.mutex.Unlock()
122 | }
123 | }
124 | }
125 |
126 | func (self *StmtPool) MonitorPool() (int, int, int) {
127 | return self.numWork, self.numActive, self.idlePool.Len()
128 | }
129 |
130 | func (self *StmtPool) Get() (error, *sql.Stmt) {
131 | self.mutex.Lock()
132 | defer self.mutex.Unlock()
133 | if !self.running {
134 | return errors.New("POOL_FACTORY|POOL IS SHUTDOWN"), nil
135 | }
136 |
137 | var stmt *sql.Stmt
138 | var err error
139 | //先从Idealpool中获取如果存在那么就直接使用
140 | if self.idlePool.Len() > 0 {
141 | e := self.idlePool.Back()
142 | idle := e.Value.(*IdleStmt)
143 | self.idlePool.Remove(e)
144 | stmt = idle.stmt
145 | }
146 |
147 | //如果当前依然是stmt
148 | if nil == stmt {
149 | //只有当前活动的链接小于最大的则创建
150 | if self.numActive < self.maxPoolSize {
151 | //如果没有可用链接则创建一个
152 | err, stmt = self.dialFunc()
153 | if nil != err {
154 | stmt = nil
155 | } else {
156 | self.numActive++
157 | }
158 | } else {
159 | return errors.New("POOLFACTORY|POOL|FULL!"), nil
160 | }
161 | }
162 |
163 | if nil != stmt {
164 | self.numWork++
165 | }
166 |
167 | return err, stmt
168 | }
169 |
170 | //释放坏的资源
171 | func (self *StmtPool) ReleaseBroken(conn *sql.Stmt) error {
172 | self.mutex.Lock()
173 | defer self.mutex.Unlock()
174 |
175 | if nil != conn {
176 | conn.Close()
177 | conn = nil
178 | }
179 |
180 | var err error
181 | //只有当前的存活链接和当前工作链接大于0的时候才会去销毁
182 | if self.numActive > 0 && self.numWork > 0 {
183 | self.numWork--
184 | self.numActive--
185 |
186 | } else {
187 | err = errors.New("POOL|RELEASE BROKEN|INVALID CONN")
188 | }
189 |
190 | //判断当前是否连接不是最小连接
191 | incrCount := self.minPoolSize - self.numActive
192 | if incrCount < 0 {
193 | //如果不够最小连接则创建
194 | err = self.enhancedPool(incrCount)
195 | }
196 |
197 | return err
198 | }
199 |
200 | /**
201 | * 归还当前的连接
202 | **/
203 | func (self *StmtPool) Release(stmt *sql.Stmt) error {
204 |
205 | idleStmt := &IdleStmt{stmt: stmt, expiredTime: (time.Now().Add(self.idletime))}
206 |
207 | self.mutex.Lock()
208 | defer self.mutex.Unlock()
209 |
210 | if self.numWork > 0 {
211 | //放入ideal池子中
212 | self.idlePool.PushFront(idleStmt)
213 | //工作链接数量--
214 | self.numWork--
215 | return nil
216 | } else {
217 | stmt.Close()
218 | stmt = nil
219 | log.Errorf("POOL|RELEASE|FAIL|%d", self.numActive)
220 | return errors.New("POOL|RELEASE|INVALID CONN")
221 | }
222 |
223 | }
224 |
225 | func (self *StmtPool) Shutdown() {
226 | self.mutex.Lock()
227 | defer self.mutex.Unlock()
228 | self.running = false
229 | //等待五秒中结束
230 | time.Sleep(1 * time.Second)
231 | for i := 0; i < 3; {
232 |
233 | if self.numWork <= 0 {
234 | break
235 | }
236 |
237 | log.Infof("Statment Pool|CLOSEING|WORK POOL SIZE|:%d", self.numWork)
238 | i++
239 | }
240 |
241 | var idleStmt *IdleStmt
242 | //关闭掉空闲的client
243 | for e := self.idlePool.Front(); e != nil; e = e.Next() {
244 | idleStmt = e.Value.(*IdleStmt)
245 | idleStmt.stmt.Close()
246 | self.idlePool.Remove(e)
247 | idleStmt = nil
248 | }
249 |
250 | log.Infof("Statment Pool|SHUTDOWN")
251 | }
252 |
--------------------------------------------------------------------------------
/store/mysql/stmt_pool_test.go:
--------------------------------------------------------------------------------
1 | package mysql
2 |
3 | import (
4 | "database/sql"
5 | "testing"
6 | "time"
7 | )
8 |
9 | func TestStmtPool(t *testing.T) {
10 |
11 | // dropsql := "drop database if exists `test`;"
12 | // createsql :=
13 | // "create table `test_a` ( " +
14 | // "id int(10) primary key auto_increment ," +
15 | // "username char(20)" +
16 | // ");"
17 |
18 | // db, _ := dbcreate("root:@tcp(localhost:3306)/test")
19 | // _, err := db.Exec(dropsql)
20 | // t.Errorf("%s", err)
21 |
22 | // db.Exec("create database test;")
23 | // db.Exec(createsql)
24 | // db.Close()
25 |
26 | db, _ := dbcreate("root:@tcp(localhost:3306)/test")
27 | //建表
28 | db.Exec("insert `test_a`(username) values('a')")
29 |
30 | preSql := "update `test_a` set username=? where id=?"
31 |
32 | err, p := NewStmtPool(10, 10, 20, 10*time.Second, func() (error, *sql.Stmt) {
33 | prepare, err := db.Prepare(preSql)
34 | return err, prepare
35 | })
36 |
37 | if nil != err {
38 | t.Fail()
39 | t.Logf("NewStmtPool|FAIL|%s", err)
40 | return
41 | }
42 |
43 | row, err := db.Query("select * from test_a where username=?", "a")
44 | if nil != err {
45 | t.Logf("Query DB |FAIL|%s", err)
46 | t.Fail()
47 | return
48 | }
49 |
50 | if row.Next() {
51 | var id int
52 | var username string
53 |
54 | err := row.Scan(&id, &username)
55 | if nil != err {
56 | t.Fail()
57 | t.Logf("db|QUERY|Scan |FAIL|%s", err)
58 | return
59 | }
60 |
61 | if username != "a" {
62 | t.Fail()
63 | t.Logf("db|QUERY|username is not a |FAIL")
64 | return
65 | }
66 | } else {
67 | t.Fail()
68 | t.Logf("db|QUERY|FAIL|%s", err)
69 | }
70 |
71 | err, stmt := p.Get()
72 | if nil != err {
73 | t.Fail()
74 | t.Logf("Get Stmt |FAIL|%s", err)
75 | return
76 | }
77 |
78 | stmt.Exec("b", 1)
79 | p.Release(stmt)
80 |
81 | row, err = db.Query("select * from test_a where username=?", "b")
82 | if nil != err {
83 | t.Logf("Query Stmt |FAIL|%s", err)
84 | t.Fail()
85 | return
86 | }
87 |
88 | if row.Next() {
89 | var id int
90 | var username string
91 |
92 | err := row.Scan(&id, &username)
93 | if nil != err {
94 | t.Fail()
95 | t.Logf("Query Stmt|Scan |FAIL|%s", err)
96 | return
97 | }
98 |
99 | if username != "b" || id != 1 {
100 | t.Fail()
101 | t.Logf("Query Stmt|username is not a |FAIL")
102 | return
103 | }
104 | } else {
105 | t.Fail()
106 | t.Logf("Query Stmt|username is not a |FAIL")
107 | }
108 |
109 | p.Shutdown()
110 | }
111 |
112 | func dbcreate(addr string) (*sql.DB, error) {
113 | db, err := sql.Open("mysql", addr)
114 | if err != nil {
115 | log.Panicf("NewKiteMysql|CONNECT FAIL|%s|%s", err, addr)
116 | }
117 |
118 | db.SetMaxIdleConns(10)
119 | db.SetMaxOpenConns(100)
120 | return db, nil
121 | }
122 |
--------------------------------------------------------------------------------
/store/mysql/table.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # "db 初始化kiteq脚本 单库多表"
4 |
5 |
6 |
7 |
8 | shardNum=4
9 |
10 | hashNum=`expr 16 / 4`
11 |
12 |
13 | echo "------------分库:"$shardNum"\t每个表"$hashNum"张表------------------"
14 |
15 | DB_NAME="kite"
16 | TB_NAME="kite_msg"
17 | for db_index in {0..3}; do
18 | mysql -u root -e "
19 | drop database if exists ${DB_NAME}_${db_index};
20 | create database ${DB_NAME}_${db_index};"
21 |
22 | for tb_index in {0..3}; do
23 | mysql -u root -e "
24 | use ${DB_NAME}_${db_index};
25 | DROP TABLE IF EXISTS ${TB_NAME}_${tb_index};
26 | create table if not exists ${TB_NAME}_${tb_index} (
27 | id int NOT NULL AUTO_INCREMENT,
28 | header mediumblob,
29 | body mediumblob,
30 | msg_type tinyint(3) unsigned DEFAULT NULL,
31 | message_id varchar(32) NOT NULL,
32 | topic varchar(255) DEFAULT NULL,
33 | message_type varchar(255) DEFAULT NULL,
34 | publish_group varchar(255) DEFAULT NULL,
35 | commit tinyint(1) DEFAULT NULL,
36 | expired_time bigint(13) DEFAULT NULL,
37 | publish_time bigint(13) DEFAULT NULL,
38 | deliver_count int(11) DEFAULT NULL,
39 | deliver_limit int(11) DEFAULT NULL,
40 | kite_server varchar(255) DEFAULT NULL,
41 | fail_groups varchar(255) DEFAULT NULL,
42 | succ_groups varchar(255) DEFAULT NULL,
43 | next_deliver_time bigint(13) DEFAULT NULL,
44 | PRIMARY KEY (id),
45 | UNIQUE KEY idx_message_id (message_id),
46 | KEY idx_commit (commit),
47 | KEY idx_kite_server (kite_server),
48 | KEY idx_expired_time (expired_time),
49 | KEY idx_recover_a (next_deliver_time,kite_server,expired_time,deliver_count,deliver_limit)
50 | ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
51 | "
52 | echo "create table "${TB_NAME}_${tb_index}" succ!"
53 | done;
54 | #创建一下DLQ的表
55 | mysql -u root -e "
56 | use ${DB_NAME}_${db_index};
57 | DROP TABLE IF EXISTS ${TB_NAME}_dlq;
58 | create table if not exists ${TB_NAME}_dlq (
59 | id int NOT NULL AUTO_INCREMENT,
60 | header mediumblob,
61 | body mediumblob,
62 | msg_type tinyint(3) unsigned DEFAULT NULL,
63 | message_id varchar(32) NOT NULL,
64 | topic varchar(255) DEFAULT NULL,
65 | message_type varchar(255) DEFAULT NULL,
66 | publish_group varchar(255) DEFAULT NULL,
67 | commit tinyint(1) DEFAULT NULL,
68 | expired_time bigint(13) DEFAULT NULL,
69 | publish_time bigint(13) DEFAULT NULL,
70 | deliver_count int(11) DEFAULT NULL,
71 | deliver_limit int(11) DEFAULT NULL,
72 | kite_server varchar(255) DEFAULT NULL,
73 | fail_groups varchar(255) DEFAULT NULL,
74 | succ_groups varchar(255) DEFAULT NULL,
75 | next_deliver_time bigint(13) DEFAULT NULL,
76 | PRIMARY KEY (id),
77 | UNIQUE KEY idx_message_id (message_id),
78 | KEY idx_commit (commit),
79 | KEY idx_kite_server (kite_server),
80 | KEY idx_expired_time (expired_time),
81 | KEY idx_recover_a (next_deliver_time,kite_server,expired_time,deliver_count,deliver_limit)
82 | ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
83 | "
84 | echo "create table "${TB_NAME}_dlq" succ!"
85 |
86 | done;
--------------------------------------------------------------------------------
/store/parser/kite_store_parser.go:
--------------------------------------------------------------------------------
1 | package parser
2 |
3 | import (
4 | "context"
5 | "github.com/blackbeans/logx"
6 | "kiteq/store"
7 | smf "kiteq/store/file"
8 | sm "kiteq/store/memory"
9 | smq "kiteq/store/mysql"
10 | "kiteq/store/rocksdb"
11 | "net/url"
12 | "strconv"
13 | "strings"
14 | "time"
15 | )
16 |
17 | var log = logx.GetLogger("kiteq_store")
18 |
19 | // storage schema
20 | // mock mock://
21 | // memory memory://?initcap=1000&maxcap=2000
22 | // mysql mysql://master:3306,slave:3306?db=kite&username=root&password=root&maxConn=500&batchUpdateSize=1000&batchDelSize=1000&flushSeconds=1
23 | // file file:///path?cap=10000000&checkSeconds=60&flushBatchSize=1000
24 | // rocksdb rocksdb://path?
25 |
26 | func ParseDB(ctx context.Context, db string, serverName string) store.IKiteStore {
27 | var kitedb store.IKiteStore
28 | parsed, err := url.Parse(db)
29 | if nil != err {
30 | panic(err)
31 | }
32 | switch parsed.Scheme {
33 | case "mock":
34 | kitedb = &store.MockKiteStore{}
35 | case "memory":
36 | params := parsed.Query()
37 | initval := 10 * 10000
38 |
39 | if initcap := params.Get("initcap"); len(initcap) > 0 {
40 | v, e := strconv.ParseInt(initcap, 10, 32)
41 | if nil != e {
42 | log.Fatalf("NewKiteQServer|INVALID|INIT CAP|%s", db)
43 | }
44 | initval = int(v)
45 | }
46 | max := 50 * 10000
47 | if maxcap := params.Get("maxcap"); len(maxcap) > 0 {
48 | v, e := strconv.ParseInt(maxcap, 10, 32)
49 | if nil != e {
50 | log.Fatalf("NewKiteQServer|INVALID|MAX CAP|%s", db)
51 | }
52 | max = int(v)
53 | }
54 | kitedb = sm.NewKiteMemoryStore(ctx, initval, max)
55 | case "mysql":
56 | params := parsed.Query()
57 | bus := 100
58 | if batchUpdateSize := params.Get("batchUpdateSize"); len(batchUpdateSize) > 0 {
59 | v, e := strconv.ParseInt(batchUpdateSize, 10, 32)
60 | if nil != e {
61 | log.Fatalf("NewKiteQServer|INVALID|batchUpdateSize|%s", db)
62 | }
63 | bus = int(v)
64 | }
65 |
66 | bds := 100
67 | if batchDelSize := params.Get("batchDelSize"); len(batchDelSize) > 0 {
68 | v, e := strconv.ParseInt(batchDelSize, 10, 32)
69 | if nil != e {
70 | log.Fatalf("NewKiteQServer|INVALID|batchDelSize|%s", db)
71 | }
72 | bds = int(v)
73 | }
74 |
75 | flushPeriod := 1 * time.Second
76 | if flushSeconds := params.Get("flushSeconds"); len(flushSeconds) > 0 {
77 | v, e := strconv.ParseInt(flushSeconds, 10, 32)
78 | if nil != e {
79 | log.Fatalf("NewKiteQServer|INVALID|batchDelSize|%s", db)
80 | }
81 | flushPeriod = time.Duration(v * int64(flushPeriod))
82 | }
83 |
84 | maxConn := 20
85 | if mc := params.Get("maxConn"); len(mc) > 0 {
86 | v, e := strconv.ParseInt(mc, 10, 32)
87 | if nil != e {
88 | log.Fatalf("NewKiteQServer|INVALID|batchDelSize|%s", db)
89 | }
90 | maxConn = int(v)
91 | }
92 |
93 | //解析Mysql的host
94 | master := parsed.Host
95 | slave := ""
96 | mysqlHosts := strings.Split(master, ",")
97 | if len(mysqlHosts) > 1 {
98 | master = mysqlHosts[0]
99 | slave = mysqlHosts[1]
100 | }
101 |
102 | //解析用户名密码:
103 | username := params.Get("username")
104 | password := params.Get("password")
105 |
106 | //shard的数量
107 | shardnum := 4
108 | if sd := params.Get("shardnum"); len(sd) > 0 {
109 | v, e := strconv.ParseInt(sd, 10, 32)
110 | if nil != e {
111 | log.Fatalf("NewKiteQServer|INVALID|ShardNum|%s", db)
112 | }
113 | shardnum = int(v)
114 | }
115 |
116 | options := smq.MysqlOptions{
117 | Addr: master,
118 | SlaveAddr: slave,
119 | ShardNum: shardnum,
120 | DB: params.Get("db"),
121 | Username: username,
122 | Password: password,
123 | BatchUpSize: bus,
124 | BatchDelSize: bds,
125 | FlushPeriod: flushPeriod,
126 | MaxIdleConn: maxConn / 2,
127 | MaxOpenConn: maxConn}
128 | kitedb = smq.NewKiteMysql(ctx, options, serverName)
129 | case "file":
130 | params := parsed.Query()
131 | //最大消息容量
132 | maxcap := 100 * 10000
133 | if d := params.Get("cap"); len(d) > 0 {
134 | v, e := strconv.ParseInt(d, 10, 32)
135 | if nil != e {
136 | log.Fatalf("NewKiteQServer|INVALID|cap|%s", db)
137 | }
138 | maxcap = int(v)
139 | }
140 |
141 | //检查文件过期时间
142 | checkPeriod := 1 * time.Second
143 | if cs := params.Get("checkSeconds"); len(cs) > 0 {
144 | v, e := strconv.ParseInt(cs, 10, 32)
145 | if nil != e {
146 | log.Fatalf("NewKiteQServer|INVALID|checkPeriod|%s", db)
147 | }
148 | checkPeriod = time.Duration(v * int64(checkPeriod))
149 | }
150 |
151 | //批量flush的大小
152 | fbsize := 1000
153 | if fbs := params.Get("flushBatchSize"); len(fbs) > 0 {
154 | v, e := strconv.ParseInt(fbs, 10, 32)
155 | if nil != e {
156 | log.Fatalf("NewKiteQServer|INVALID|checkPeriod|%s", db)
157 | }
158 | fbsize = int(v)
159 | }
160 |
161 | kitedb = smf.NewKiteFileStore(ctx, parsed.Host+parsed.Path, fbsize, maxcap, checkPeriod)
162 | log.Infof("NewKiteQServer|FileStore|%s|%d|%d", parsed.Host+parsed.Path, maxcap, int(checkPeriod.Seconds()))
163 | case "rocksdb":
164 | options := make(map[string]string, len(parsed.Query()))
165 | for k, v := range parsed.Query() {
166 | if len(v) > 0 {
167 | options[k] = v[0]
168 | }
169 | }
170 | //获取到存储路径
171 | kitedb = rocksdb.NewRocksDbStore(ctx, parsed.Host+parsed.Path, options)
172 | log.Infof("NewKiteQServer|RocksDb|%s|%v", parsed.Host+parsed.Path, options)
173 |
174 | default:
175 | log.Fatalf("NewKiteQServer|UNSUPPORT DB PROTOCOL|%s", db)
176 | }
177 | return kitedb
178 | }
179 |
--------------------------------------------------------------------------------
/store/parser/kite_store_parser_test.go:
--------------------------------------------------------------------------------
1 | package parser
2 |
3 | import (
4 | "context"
5 | "testing"
6 | )
7 |
8 | func TestParse(t *testing.T) {
9 |
10 | storeUrls := []string{
11 | "mock://",
12 | "memory://?initcap=1000&maxcap=2000",
13 | "mysql://master:3306,slave:3306?db=kite&username=root&password=root&maxConn=500&batchUpdateSize=1000&batchDelSize=1000&flushSeconds=1",
14 | "file:///path?cap=10000000&checkSeconds=60&flushBatchSize=1000",
15 | "rocksdb://./data/rocksdb/",
16 | }
17 | ctx := context.TODO()
18 | for _, store := range storeUrls {
19 | ParseDB(ctx, store, "kiteq001")
20 | }
21 |
22 | }
23 |
--------------------------------------------------------------------------------
/store/rocksdb/rocksdb_store_test.go:
--------------------------------------------------------------------------------
1 | package rocksdb
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "github.com/blackbeans/go-uuid"
7 | "kiteq/store"
8 | "testing"
9 | "time"
10 |
11 | "github.com/blackbeans/kiteq-common/protocol"
12 | "github.com/golang/protobuf/proto"
13 | )
14 |
15 | var rocksstore *RocksDbStore
16 |
17 | func init() {
18 | rocksstore = NewRocksDbStore(context.TODO(), ".", map[string]string{})
19 | rocksstore.Start()
20 | }
21 |
22 | func BenchmarkRocksDbStore_Save(b *testing.B) {
23 | for i := 0; i < b.N; i++ {
24 | //创建消息
25 |
26 | msg := &protocol.BytesMessage{}
27 | msg.Header = &protocol.Header{
28 | MessageId: proto.String(uuid.New()),
29 | Topic: proto.String("trade"),
30 | MessageType: proto.String("pay-succ"),
31 | ExpiredTime: proto.Int64(time.Now().Add(10 * time.Minute).Unix()),
32 | DeliverLimit: proto.Int32(100),
33 | GroupId: proto.String("go-kite-test"),
34 | Commit: proto.Bool(false),
35 | Fly: proto.Bool(false)}
36 | msg.Body = []byte("hello world")
37 |
38 | entity := store.NewMessageEntity(protocol.NewQMessage(msg))
39 | if succ := rocksstore.Save(entity); !succ {
40 | b.FailNow()
41 | }
42 | }
43 |
44 | }
45 |
46 | func TestRocksDbStore_Save(t *testing.T) {
47 |
48 | //创建消息
49 | msg := &protocol.BytesMessage{}
50 | msg.Header = &protocol.Header{
51 | MessageId: proto.String("26c03f00665862591f696a980b5ac"),
52 | Topic: proto.String("trade"),
53 | MessageType: proto.String("pay-succ"),
54 | ExpiredTime: proto.Int64(time.Now().Add(10 * time.Minute).Unix()),
55 | DeliverLimit: proto.Int32(100),
56 | GroupId: proto.String("go-kite-test"),
57 | Commit: proto.Bool(false),
58 | Fly: proto.Bool(false)}
59 | msg.Body = []byte("hello world")
60 |
61 | entity := store.NewMessageEntity(protocol.NewQMessage(msg))
62 | if succ := rocksstore.Save(entity); !succ || *msg.Header.MessageId != "26c03f00665862591f696a980b5ac" {
63 | t.FailNow()
64 | }
65 |
66 | //查询消息
67 | entity = rocksstore.Query("trade", "26c03f00665862591f696a980b5ac")
68 | if nil == entity || entity.MessageId != "26c03f00665862591f696a980b5ac" {
69 | t.FailNow()
70 | }
71 | t.Logf("%+v", entity.MessageId)
72 | }
73 |
74 | func TestRocksDbStore_AsyncCommit(t *testing.T) {
75 | TestRocksDbStore_Save(t)
76 |
77 | //commited
78 | if commited := rocksstore.Commit("trade", "26c03f00665862591f696a980b5ac"); !commited {
79 | t.FailNow()
80 | }
81 |
82 | //查询消息
83 | entity := rocksstore.Query("trade", "26c03f00665862591f696a980b5ac")
84 | if nil == entity || entity.MessageId != "26c03f00665862591f696a980b5ac" || !entity.Commit {
85 | t.FailNow()
86 | }
87 | }
88 |
89 | //move to dlq
90 | func TestRocksDbStore_Expired(t *testing.T) {
91 | TestRocksDbStore_Save(t)
92 | if succ := rocksstore.Expired("trade", "26c03f00665862591f696a980b5ac"); !succ {
93 | t.FailNow()
94 | }
95 |
96 | //查询消息
97 | entity := rocksstore.Query("trade", "26c03f00665862591f696a980b5ac")
98 | if nil != entity {
99 | t.FailNow()
100 | }
101 | }
102 |
103 | //更新消息
104 | func TestRocksDbStore_AsyncUpdate(t *testing.T) {
105 | TestRocksDbStore_AsyncCommit(t)
106 | msg := &protocol.BytesMessage{}
107 | msg.Header = &protocol.Header{
108 | MessageId: proto.String("26c03f00665862591f696a980b5ac"),
109 | Topic: proto.String("trade"),
110 | MessageType: proto.String("pay-succ"),
111 | ExpiredTime: proto.Int64(time.Now().Add(24 * time.Hour).Unix()),
112 | DeliverLimit: proto.Int32(100),
113 | }
114 |
115 | now := time.Now().Add(20 * time.Minute).Unix()
116 | entity := store.NewMessageEntity(protocol.NewQMessage(msg))
117 | entity.FailGroups = []string{"s-vip-service"}
118 | entity.SuccGroups = []string{"s-profile-service", "s-group-service"}
119 | entity.DeliverCount = 11
120 | entity.NextDeliverTime = now
121 |
122 | //更新失败了?
123 | if succ := rocksstore.AsyncUpdateDeliverResult(entity); !succ {
124 | t.Logf("TestRocksDbStore_AsyncUpdate.AsyncUpdateDeliverResult|Fail|%v", entity)
125 | t.FailNow()
126 | }
127 |
128 | //查询消息
129 | entity = rocksstore.Query("trade", "26c03f00665862591f696a980b5ac")
130 | if nil == entity {
131 | t.Logf("TestRocksDbStore_AsyncUpdate.Query|Fail|26c03f00665862591f696a980b5ac")
132 | t.FailNow()
133 | }
134 | if entity.NextDeliverTime != now ||
135 | entity.FailGroups[0] != "s-vip-service" ||
136 | entity.SuccGroups[0] != "s-profile-service" || entity.SuccGroups[1] != "s-group-service" {
137 | t.Logf("TestRocksDbStore_AsyncUpdate.Query|Fail|%v", entity)
138 | t.FailNow()
139 | }
140 | }
141 |
142 | //
143 | func TestRocksDbStore_PageQueryEntity(t *testing.T) {
144 | TestRocksDbStore_AsyncUpdate(t)
145 |
146 | hasmore, entities := rocksstore.PageQueryEntity("", "", time.Now().Add(30*time.Minute).Unix(), 0, 10)
147 | if hasmore || len(entities) != 1 {
148 | t.Logf("TestRocksDbStore_PageQueryEntity|FAIL|%d", len(entities))
149 | t.FailNow()
150 | }
151 |
152 | rawJson, _ := json.Marshal(entities)
153 | t.Logf("TestRocksDbStore_PageQueryEntity:%s", string(rawJson))
154 |
155 | }
156 |
157 | func TestLoadLocal(t *testing.T) {
158 | _, entities := rocksstore.PageQueryEntity("0", "", time.Now().Add(-30*time.Minute).Unix(), 0, 1000)
159 | t.Logf("%d\n", len(entities))
160 | for _, entity := range entities {
161 | entity := rocksstore.Query(entity.Header.GetTopic(), entity.Header.GetMessageId())
162 | rawJson, _ := json.Marshal(entity)
163 | t.Logf("%s", string(rawJson))
164 | }
165 |
166 | }
167 |
--------------------------------------------------------------------------------
/tools/kite_store_tools.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "flag"
6 | "fmt"
7 | "kiteq/store/parser"
8 | "time"
9 | )
10 |
11 | func main() {
12 | db := flag.String("db", "", "-db")
13 | serverTag := flag.String("serverTag", "", "default")
14 | flag.Parse()
15 | store := parser.ParseDB(context.TODO(), *db, *serverTag)
16 | store.Start()
17 | count := 0
18 | for i := 0; i < 16; i++ {
19 | _, entities := store.PageQueryEntity(fmt.Sprintf("%x", i), "default", time.Now().Unix(), 0, 1000)
20 | count += len(entities)
21 | //fmt.Printf("%s,%v", len(entities), string(entities[0].Body.([]byte)))
22 |
23 | }
24 |
25 | time.Sleep(5 * time.Minute)
26 |
27 | fmt.Printf("----------end|%d", count)
28 | }
29 |
--------------------------------------------------------------------------------