├── .github └── workflows │ └── coverall.yml ├── .gitignore ├── .travis.yml ├── .vscode └── launch.json ├── LICENSE ├── README.md ├── README_CN.md ├── delayqueue.go ├── delayqueue_test.go ├── events.go ├── example ├── getstarted │ └── main.go └── monitor │ └── main.go ├── go.mod ├── go.sum ├── monitor.go ├── monitor_test.go ├── publisher.go ├── publisher_test.go └── wrapper.go /.github/workflows/coverall.yml: -------------------------------------------------------------------------------- 1 | # This workflow will build a golang project 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go 3 | 4 | name: Go 5 | 6 | on: 7 | push: 8 | branches: [ "master", "github-actions" ] 9 | pull_request: 10 | branches: [ "master" ] 11 | workflow_dispatch: 12 | 13 | 14 | jobs: 15 | 16 | build: 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/checkout@v3 20 | 21 | - name: install redis cluster service 22 | uses: pfapi/redis-cluster-service@v1 23 | 24 | - name: start redis cluster 25 | run: sudo systemctl start redis-cluster 26 | 27 | - uses: actions/setup-go@v3 28 | with: 29 | go-version: '1.19' 30 | - run: go test -v -coverprofile=profile.cov ./... 31 | 32 | - uses: shogo82148/actions-goveralls@v1 33 | with: 34 | path-to-profile: profile.cov 35 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Dependency directories (remove the comment below to include it) 15 | # vendor/ 16 | .idea 17 | unack2retry.lua 18 | exmaple -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | 3 | go: 4 | - 1.16.x 5 | before_install: 6 | - sudo apt-get install redis-server; redis-server & 7 | - go install github.com/mattn/goveralls@latest 8 | script: 9 | - $GOPATH/bin/goveralls -service=travis-ci 10 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // 使用 IntelliSense 了解相关属性。 3 | // 悬停以查看现有属性的描述。 4 | // 欲了解更多信息,请访问: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "name": "Launch Package", 9 | "type": "go", 10 | "request": "launch", 11 | "mode": "auto", 12 | "program": "${fileDirname}" 13 | }, 14 | { 15 | "name": "Run Example", 16 | "type": "go", 17 | "request": "launch", 18 | "mode": "auto", 19 | "program": "${workspaceFolder}/example/getstarted" 20 | } 21 | ] 22 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DelayQueue 2 | 3 | ![license](https://img.shields.io/github/license/HDT3213/delayqueue) 4 | ![Build Status](https://github.com/hdt3213/delayqueue/actions/workflows/coverall.yml/badge.svg) 5 | [![Coverage Status](https://coveralls.io/repos/github/HDT3213/delayqueue/badge.svg?branch=master)](https://coveralls.io/github/HDT3213/delayqueue?branch=master) 6 | [![Go Report Card](https://goreportcard.com/badge/github.com/HDT3213/delayqueue)](https://goreportcard.com/report/github.com/HDT3213/delayqueue) 7 | [![Go Reference](https://pkg.go.dev/badge/github.com/hdt3213/delayqueue.svg)](https://pkg.go.dev/github.com/hdt3213/delayqueue) 8 | 9 | [中文版](https://github.com/HDT3213/delayqueue/blob/master/README_CN.md) 10 | 11 | DelayQueue is a message queue supporting delayed/scheduled delivery based on redis. It is designed to be reliable, scalable and easy to get started. 12 | 13 | Core Advantages: 14 | 15 | - Guaranteed at least once consumption 16 | - Auto retry failed messages 17 | - Works out of the box, Config Nothing and Deploy Nothing, A Redis is all you need. 18 | - Natively adapted to the distributed environment, messages processed concurrently on multiple machines 19 | . Workers can be added, removed or migrated at any time 20 | - Support Redis Cluster or clusters of most cloud service providers. see chapter [Cluster](./README.md#Cluster) 21 | - Easy to use monitoring data exporter, see [Monitoring](./README.md#Monitoring) 22 | 23 | ## Install 24 | 25 | DelayQueue requires a Go version with modules support. Run following command line in your project with go.mod: 26 | 27 | ```bash 28 | go get github.com/hdt3213/delayqueue 29 | ``` 30 | 31 | > if you are using `github.com/go-redis/redis/v8` please use `go get github.com/hdt3213/delayqueue@redisv8` 32 | 33 | ## Get Started 34 | 35 | ```go 36 | package main 37 | 38 | import ( 39 | "github.com/redis/go-redis/v9" 40 | "github.com/hdt3213/delayqueue" 41 | "strconv" 42 | "time" 43 | ) 44 | 45 | func main() { 46 | redisCli := redis.NewClient(&redis.Options{ 47 | Addr: "127.0.0.1:6379", 48 | }) 49 | queue := delayqueue.NewQueue("example", redisCli, func(payload string) bool { 50 | // callback returns true to confirm successful consumption. 51 | // If callback returns false or not return within maxConsumeDuration, DelayQueue will re-deliver this message 52 | return true 53 | }).WithConcurrent(4) // set the number of concurrent consumers 54 | // send delay message 55 | for i := 0; i < 10; i++ { 56 | _, err := queue.SendDelayMsgV2(strconv.Itoa(i), time.Hour, delayqueue.WithRetryCount(3)) 57 | if err != nil { 58 | panic(err) 59 | } 60 | } 61 | // send schedule message 62 | for i := 0; i < 10; i++ { 63 | _, err := queue.SendScheduleMsgV2(strconv.Itoa(i), time.Now().Add(time.Hour)) 64 | if err != nil { 65 | panic(err) 66 | } 67 | } 68 | // start consume 69 | done := queue.StartConsume() 70 | <-done 71 | } 72 | ``` 73 | 74 | > `SendScheduleMsgV2` (`SendDelayMsgV2`) is fully compatible with `SendScheduleMsg` (`SendDelayMsg`) 75 | 76 | > Please note that redis/v8 is not compatible with redis cluster 7.x. [detail](https://github.com/redis/go-redis/issues/2085) 77 | 78 | > If you are using redis client other than go-redis, you could wrap your redis client into [RedisCli](https://pkg.go.dev/github.com/hdt3213/delayqueue#RedisCli) interface 79 | 80 | > If you don't want to set the callback during initialization, you can use func `WithCallback`. 81 | 82 | ## Producer consumer distributed deployment 83 | 84 | By default, delayqueue instances can be both producers and consumers. 85 | 86 | If your program only need producers and consumers are placed elsewhere, `delayqueue.NewPublisher` is a good option for you. 87 | 88 | ```go 89 | func consumer() { 90 | queue := NewQueue("test", redisCli, cb) 91 | queue.StartConsume() 92 | } 93 | 94 | func producer() { 95 | publisher := NewPublisher("test", redisCli) 96 | publisher.SendDelayMsg(strconv.Itoa(i), 0) 97 | } 98 | ``` 99 | 100 | ## Intercept/Delete Messages 101 | 102 | ```go 103 | msg, err := queue.SendScheduleMsgV2(strconv.Itoa(i), time.Now().Add(time.Second)) 104 | if err != nil { 105 | panic(err) 106 | } 107 | result, err := queue.TryIntercept(msg) 108 | if err != nil { 109 | panic(err) 110 | } 111 | if result.Intercepted { 112 | println("interception success!") 113 | } else { 114 | println("interception failed, message has been consumed!") 115 | } 116 | ``` 117 | 118 | `SendScheduleMsgV2` and `SendDelayMsgV2` return a structure which contains message tracking information.Then passing it to `TryIntercept` to try to intercept the consumption of the message. 119 | 120 | If the message is pending or waiting to consume the interception will succeed.If the message has been consumed or is awaiting retry, the interception will fail, but TryIntercept will prevent subsequent retries. 121 | 122 | TryIntercept returns a InterceptResult, which Intercepted field indicates whether it is successful. 123 | 124 | ## Options 125 | 126 | ### Consume Function 127 | 128 | ```go 129 | func (q *DelayQueue)WithCallback(callback CallbackFunc) *DelayQueue 130 | ``` 131 | 132 | WithCallback set callback for queue to receives and consumes messages 133 | callback returns true to confirm successfully consumed, false to re-deliver this message. 134 | 135 | If there is no callback set, StartConsume will panic 136 | 137 | ```go 138 | queue := NewQueue("test", redisCli) 139 | queue.WithCallback(func(payload string) bool { 140 | return true 141 | }) 142 | ``` 143 | 144 | ### Logger 145 | 146 | ```go 147 | func (q *DelayQueue)WithLogger(logger Logger) *DelayQueue 148 | ``` 149 | 150 | WithLogger customizes logger for queue. Logger should implemented the following interface: 151 | 152 | ```go 153 | type Logger interface { 154 | Printf(format string, v ...interface{}) 155 | } 156 | ``` 157 | 158 | ### Concurrent 159 | 160 | ```go 161 | func (q *DelayQueue)WithConcurrent(c uint) *DelayQueue 162 | ``` 163 | 164 | WithConcurrent sets the number of concurrent consumers 165 | 166 | ### Polling Interval 167 | 168 | ```go 169 | func (q *DelayQueue)WithFetchInterval(d time.Duration) *DelayQueue 170 | ``` 171 | 172 | WithFetchInterval customizes the interval at which consumer fetch message from redis 173 | 174 | ### Timeout 175 | 176 | ```go 177 | func (q *DelayQueue)WithMaxConsumeDuration(d time.Duration) *DelayQueue 178 | ``` 179 | 180 | WithMaxConsumeDuration customizes max consume duration 181 | 182 | If no acknowledge received within WithMaxConsumeDuration after message delivery, DelayQueue will try to deliver this 183 | message again 184 | 185 | ### Max Processing Limit 186 | 187 | ```go 188 | func (q *DelayQueue)WithFetchLimit(limit uint) *DelayQueue 189 | ``` 190 | 191 | WithFetchLimit limits the max number of unack (processing) messages 192 | 193 | ### Hash Tag 194 | 195 | ```go 196 | UseHashTagKey() 197 | ``` 198 | 199 | UseHashTagKey add hashtags to redis keys to ensure all keys of this queue are allocated in the same hash slot. 200 | 201 | If you are using Codis/AliyunRedisCluster/TencentCloudRedisCluster, you should add this option to NewQueue: `NewQueue("test", redisCli, cb, UseHashTagKey())`. This Option cannot be changed after DelayQueue has been created. 202 | 203 | WARNING! CHANGING(add or remove) this option will cause DelayQueue failing to read existed data in redis 204 | 205 | > see more: https://redis.io/docs/reference/cluster-spec/#hash-tags 206 | 207 | ### Default Retry Count 208 | 209 | ```go 210 | WithDefaultRetryCount(count uint) *DelayQueue 211 | ``` 212 | 213 | WithDefaultRetryCount customizes the max number of retry, it effects of messages in this queue 214 | 215 | use WithRetryCount during DelayQueue.SendScheduleMsg or DelayQueue.SendDelayMsg to specific retry count of particular message 216 | 217 | ```go 218 | queue.SendDelayMsg(msg, time.Hour, delayqueue.WithRetryCount(3)) 219 | ``` 220 | 221 | ### Nack Redelivery Delay 222 | 223 | ```go 224 | WithNackRedeliveryDelay(d time.Duration) *DelayQueue 225 | ``` 226 | 227 | WithNackRedeliveryDelay customizes the interval between redelivery and nack (callback returns false) 228 | But if consumption exceeded deadline, the message will be redelivered immediately. 229 | 230 | ### Script Preload 231 | 232 | ```go 233 | (q *DelayQueue) WithScriptPreload(flag bool) *DelayQueue 234 | ``` 235 | 236 | WithScriptPreload(true) makes DelayQueue preload scripts and call them using EvalSha to reduce communication costs. WithScriptPreload(false) makes DelayQueue run scripts by Eval commnand. Using preload and EvalSha by Default 237 | 238 | ### Customize Prefix 239 | 240 | ```go 241 | queue := delayqueue.NewQueue("example", redisCli, callback, UseCustomPrefix("MyPrefix")) 242 | ``` 243 | 244 | All keys of delayqueue has a smae prefix, `dp` by default. If you want to modify the prefix, you could use `UseCustomPrefix`. 245 | 246 | 247 | ## Monitoring 248 | 249 | We provides Monitor to monitor the running status. 250 | 251 | ```go 252 | monitor := delayqueue.NewMonitor("example", redisCli) 253 | ``` 254 | 255 | Monitor.ListenEvent can register a listener that can receive all internal events, so you can use it to implement customized data reporting and metrics. 256 | 257 | The monitor can receive events from all workers, even if they are running on another server. 258 | 259 | ```go 260 | type EventListener interface { 261 | OnEvent(*Event) 262 | } 263 | 264 | // returns: close function, error 265 | func (m *Monitor) ListenEvent(listener EventListener) (func(), error) 266 | ``` 267 | 268 | The definition of event could be found in [events.go](./events.go). 269 | 270 | Besides, We provide a demo that uses EventListener to monitor the production and consumption amount per minute. 271 | 272 | The complete demo code can be found in [example/monitor](./example/monitor/main.go). 273 | 274 | ```go 275 | type MyProfiler struct { 276 | List []*Metrics 277 | Start int64 278 | } 279 | 280 | func (p *MyProfiler) OnEvent(event *delayqueue.Event) { 281 | sinceUptime := event.Timestamp - p.Start 282 | upMinutes := sinceUptime / 60 283 | if len(p.List) <= int(upMinutes) { 284 | p.List = append(p.List, &Metrics{}) 285 | } 286 | current := p.List[upMinutes] 287 | switch event.Code { 288 | case delayqueue.NewMessageEvent: 289 | current.ProduceCount += event.MsgCount 290 | case delayqueue.DeliveredEvent: 291 | current.DeliverCount += event.MsgCount 292 | case delayqueue.AckEvent: 293 | current.ConsumeCount += event.MsgCount 294 | case delayqueue.RetryEvent: 295 | current.RetryCount += event.MsgCount 296 | case delayqueue.FinalFailedEvent: 297 | current.FailCount += event.MsgCount 298 | } 299 | } 300 | 301 | func main() { 302 | queue := delayqueue.NewQueue("example", redisCli, func(payload string) bool { 303 | return true 304 | }) 305 | start := time.Now() 306 | // IMPORTANT: EnableReport must be called so monitor can do its work 307 | queue.EnableReport() 308 | 309 | // setup monitor 310 | monitor := delayqueue.NewMonitor("example", redisCli) 311 | listener := &MyProfiler{ 312 | Start: start.Unix(), 313 | } 314 | monitor.ListenEvent(listener) 315 | 316 | // print metrics every minute 317 | tick := time.Tick(time.Minute) 318 | go func() { 319 | for range tick { 320 | minutes := len(listener.List)-1 321 | fmt.Printf("%d: %#v", minutes, listener.List[minutes]) 322 | } 323 | }() 324 | } 325 | ``` 326 | 327 | Monitor use redis pub/sub to collect data, so it is important to call `DelayQueue.EnableReport` of all workers, to enable events reporting for monitor. 328 | 329 | If you do not want to use redis pub/sub, you can use `DelayQueue.ListenEvent` to collect data yourself. 330 | 331 | Please be advised, `DelayQueue.ListenEvent` can only receive events from the current instance, while monitor can receive events from all instances in the queue. 332 | 333 | Once `DelayQueue.ListenEvent` is called, the monitor's listener will be overwritten unless EnableReport is called again to re-enable the monitor. 334 | 335 | ### Get Status 336 | 337 | You could get Pending Count, Ready Count and Processing Count from the monitor: 338 | 339 | ```go 340 | func (m *Monitor) GetPendingCount() (int64, error) 341 | ``` 342 | 343 | GetPendingCount returns the number of which delivery time has not arrived. 344 | 345 | ```go 346 | func (m *Monitor) GetReadyCount() (int64, error) 347 | ``` 348 | 349 | GetReadyCount returns the number of messages which have arrived delivery time but but have not been delivered yet 350 | 351 | ```go 352 | func (m *Monitor) GetProcessingCount() (int64, error) 353 | ``` 354 | 355 | GetProcessingCount returns the number of messages which are being processed 356 | 357 | 358 | ## Cluster 359 | 360 | If you are using Redis Cluster, please use `NewQueueOnCluster` 361 | 362 | ```go 363 | redisCli := redis.NewClusterClient(&redis.ClusterOptions{ 364 | Addrs: []string{ 365 | "127.0.0.1:7000", 366 | "127.0.0.1:7001", 367 | "127.0.0.1:7002", 368 | }, 369 | }) 370 | callback := func(s string) bool { 371 | return true 372 | } 373 | queue := NewQueueOnCluster("test", redisCli, callback) 374 | ``` 375 | 376 | If you are using transparent clusters, such as codis, twemproxy, or the redis of cluster architecture on aliyun, tencentcloud, 377 | just use `NewQueue` and enable hash tag 378 | 379 | ```go 380 | redisCli := redis.NewClient(&redis.Options{ 381 | Addr: "127.0.0.1:6379", 382 | }) 383 | callback := func(s string) bool { 384 | return true 385 | } 386 | queue := delayqueue.NewQueue("example", redisCli, callback, UseHashTagKey()) 387 | ``` 388 | 389 | ## More Details 390 | 391 | Here is the complete flowchart: 392 | 393 | ![](https://s2.loli.net/2022/09/10/tziHmcAX4sFJPN6.png) 394 | 395 | - pending: A sorted set of messages pending for delivery. `member` is message id, `score` is delivery unix timestamp. 396 | - ready: A list of messages ready to deliver. Workers fetch messages from here. 397 | - unack: A sorted set of messages waiting for ack (successfully consumed confirmation) which means the messages here is being processing. `member` is message id, `score` is the unix timestamp of processing deadline. 398 | - retry: A list of messages which processing exceeded deadline and waits for retry 399 | - garbage: A list of messages reaching max retry count and waits for cleaning -------------------------------------------------------------------------------- /README_CN.md: -------------------------------------------------------------------------------- 1 | 2 | ![license](https://img.shields.io/github/license/HDT3213/delayqueue) 3 | ![Build Status](https://github.com/hdt3213/delayqueue/actions/workflows/coverall.yml/badge.svg) 4 | [![Coverage Status](https://coveralls.io/repos/github/HDT3213/delayqueue/badge.svg?branch=master)](https://coveralls.io/github/HDT3213/delayqueue?branch=master) 5 | [![Go Report Card](https://goreportcard.com/badge/github.com/HDT3213/delayqueue)](https://goreportcard.com/report/github.com/HDT3213/delayqueue) 6 | [![Go Reference](https://pkg.go.dev/badge/github.com/hdt3213/delayqueue.svg)](https://pkg.go.dev/github.com/hdt3213/delayqueue) 7 | 8 | DelayQueue 是使用 Go 语言基于 Redis 实现的支持延时/定时投递的消息队列。 9 | 10 | DelayQueue 的主要优势: 11 | - 保证至少消费一次(At-Least-Once) 12 | - 自动重试处理失败的消息 13 | - 开箱即用, 无需部署或安装中间件, 只需要一个 Redis 即可工作 14 | - 原生适配分布式环境, 可在多台机器上并发的处理消息. 可以随时增加、减少或迁移 Worker 15 | - 支持各类 Redis 集群, 详见[集群](./README_CN.md#集群) 16 | - 简单易用的监控数据导出,详见[监控](./README_CN.md#监控) 17 | 18 | ## 安装 19 | 20 | 在启用了 go mod 的项目中运行下列命令即可完成安装: 21 | 22 | ```shell 23 | go get github.com/hdt3213/delayqueue 24 | ``` 25 | 26 | > 如果您仍在使用 `github.com/go-redis/redis/v8` 请安装 `go get github.com/hdt3213/delayqueue@redisv8` 27 | 28 | ## 开始使用 29 | 30 | ```go 31 | package main 32 | 33 | import ( 34 | "github.com/redis/go-redis/v9" 35 | "github.com/hdt3213/delayqueue" 36 | "strconv" 37 | "time" 38 | ) 39 | 40 | func main() { 41 | redisCli := redis.NewClient(&redis.Options{ 42 | Addr: "127.0.0.1:6379", 43 | }) 44 | queue := delayqueue.NewQueue("example", redisCli, func(payload string) bool { 45 | // return true 表示成功消费 46 | // 如果返回了 false 或者在 maxConsumeDuration 时限内没有返回则视为消费失败,DelayQueue 会重新投递消息 47 | return true 48 | }).WithConcurrent(4) // 设置消费者并发数 49 | // 发送延时投递消息 50 | for i := 0; i < 10; i++ { 51 | _, err := queue.SendDelayMsgV2(strconv.Itoa(i), time.Hour, delayqueue.WithRetryCount(3)) 52 | if err != nil { 53 | panic(err) 54 | } 55 | } 56 | // 发送定时投递消息 57 | for i := 0; i < 10; i++ { 58 | _, err := queue.SendScheduleMsg(strconv.Itoa(i), time.Now().Add(time.Hour)) 59 | if err != nil { 60 | panic(err) 61 | } 62 | } 63 | // 开始消费 64 | done := queue.StartConsume() 65 | <-done // 如需等待消费者关闭,监听 done 即可 66 | } 67 | ``` 68 | 69 | > 如果您仍在使用 redis/v8 请使用 redisv8 分支: `go get github.com/hdt3213/delayqueue@redisv8` 70 | 71 | > 请注意 redis/v8 无法兼容 redis cluster 7.x; [详情](https://github.com/redis/go-redis/issues/2085) 72 | 73 | > 如果您在使用其他的 redis 客户端, 可以将其包装到 [RedisCli](https://pkg.go.dev/github.com/hdt3213/delayqueue#RedisCli) 接口中 74 | 75 | > 如果您不想在初始化时设置callback, 您可以使用 WithCallback 函数 76 | 77 | ## 分开部署生产者和消费者 78 | 79 | 默认情况下 delayqueue 实例既可以做生产者也可以做消费者。如果某些程序只需要发送消息,消费者部署在其它程序中,那么可以使用 `delayqueue.NewPublisher`. 80 | 81 | ```go 82 | func consumer() { 83 | queue := NewQueue("test", redisCli, cb) 84 | queue.StartConsume() 85 | } 86 | 87 | func producer() { 88 | publisher := NewPublisher("test", redisCli) 89 | publisher.SendDelayMsg(strconv.Itoa(i), 0) 90 | } 91 | ``` 92 | 93 | ## 拦截消息/删除消息 94 | 95 | ```go 96 | msg, err := queue.SendScheduleMsgV2(strconv.Itoa(i), time.Now().Add(time.Second)) 97 | if err != nil { 98 | panic(err) 99 | } 100 | result, err := queue.TryIntercept(msg) 101 | if err != nil { 102 | panic(err) 103 | } 104 | if result.Intercepted { 105 | println("拦截成功!") 106 | } else { 107 | println("拦截失败,消息已被消费!") 108 | } 109 | ``` 110 | 111 | `SendScheduleMsgV2` 和 `SendDelayMsgV2` 返回一个可以跟踪消息的结构体。然后将其传递给 `TryIntercept` 就可以尝试拦截消息的消费。 112 | 113 | 如果消息处于待处理状态(pending)或等待消费(ready),则可以成功拦截。如果消息已被消费或正在等待重试,则无法拦截,但 TryIntercept 将阻止后续重试。 114 | 115 | TryIntercept 返回一个 InterceptResult,其中的 Intercepted 字段会表示拦截是否成功。 116 | 117 | ## 选项 118 | 119 | ### 回调函数 120 | 121 | ```go 122 | func (q *DelayQueue)WithCallback(callback CallbackFunc) *DelayQueue 123 | ``` 124 | 125 | callback 函数负责接收并消费消息。callback 返回 true 确认已成功消费,返回 false 表示处理失败,需要重试。 126 | 127 | 如果没有设置 callback, 调用 StartConsume 时会 panic。 128 | 129 | ```go 130 | queue := NewQueue("test", redisCli) 131 | queue.WithCallback(func(payload string) bool { 132 | return true 133 | }) 134 | ``` 135 | 136 | ### 日志 137 | 138 | ```go 139 | func (q *DelayQueue)WithLogger(logger *log.Logger) *DelayQueue 140 | ``` 141 | 142 | 为 DelayQueue 设置 logger, logger 需要实现下面的接口: 143 | 144 | ```go 145 | type Logger interface { 146 | Printf(format string, v ...interface{}) 147 | } 148 | ``` 149 | 150 | ### 并发数 151 | 152 | ```go 153 | func (q *DelayQueue)WithConcurrent(c uint) *DelayQueue 154 | ``` 155 | 156 | 设置消费者并发数 157 | 158 | ### 轮询间隔 159 | 160 | ```go 161 | func (q *DelayQueue)WithFetchInterval(d time.Duration) *DelayQueue 162 | ``` 163 | 164 | 设置消费者从 Redis 拉取消息的时间间隔 165 | 166 | ### 消费超时 167 | 168 | ```go 169 | func (q *DelayQueue)WithMaxConsumeDuration(d time.Duration) *DelayQueue 170 | ``` 171 | 172 | 设置最长消费时间。若拉取消息后超出 MaxConsumeDuration 时限仍未返回 ACK 则认为消费失败,DelayQueue 会重新投递此消息。 173 | 174 | ### 最大处理中消息数 175 | 176 | ```go 177 | func (q *DelayQueue)WithFetchLimit(limit uint) *DelayQueue 178 | ``` 179 | 180 | 单个消费者正在处理中的消息数不会超过 FetchLimit 181 | 182 | ### 启用 HashTag 183 | 184 | ```go 185 | UseHashTagKey() 186 | ``` 187 | 188 | UseHashTagKey() 会在 Redis Key 上添加 hash tag 确保同一个队列的所有 Key 分布在同一个哈希槽中。 189 | 190 | 如果您正在使用 Codis/阿里云/腾讯云等 Redis 集群,请在 NewQueue 时添加这个选项:`NewQueue("test", redisCli, cb, UseHashTagKey())`。UseHashTagKey 选项在队列创建后禁止修改。 191 | 192 | **注意:** 修改(添加或移除)此选项会导致无法访问 Redis 中已有的数据。 193 | 194 | see more: https://redis.io/docs/reference/cluster-spec/#hash-tags 195 | 196 | ### 设置默认重试次数 197 | 198 | ```go 199 | WithDefaultRetryCount(count uint) 200 | ``` 201 | 202 | 设置队列中消息的默认重试次数。 203 | 204 | 在调用 DelayQueue.SendScheduleMsg or DelayQueue.SendDelayMsg 发送消息时,可以调用 WithRetryCount 为这条消息单独指定重试次数。 205 | 206 | ```go 207 | queue.SendDelayMsg(msg, time.Hour, delayqueue.WithRetryCount(3)) 208 | ``` 209 | 210 | ### 设置 nack 后重试间隔 211 | 212 | ```go 213 | WithNackRedeliveryDelay(d time.Duration) *DelayQueue 214 | ``` 215 | 216 | WithNackRedeliveryDelay 可以设置 nack (callback 函数返回 false) 之后到重新投递的间隔。 217 | 但是如果消费超时,消息会被立即重新投递。 218 | 219 | ### 预加载脚本 220 | 221 | ```go 222 | (q *DelayQueue) WithScriptPreload(flag bool) *DelayQueue 223 | ``` 224 | 225 | WithScriptPreload(true) 会让 delayqueue 预上传脚本并使用 EvalSha 命令调用脚本,WithScriptPreload(false) 会让 delayqueue 使用 Eval 命令运行脚本。 226 | 227 | ScriptPreload 默认值为 true. 228 | 229 | ### 自定义前缀 230 | 231 | ```go 232 | queue := delayqueue.NewQueue("example", redisCli, callback, UseCustomPrefix("MyPrefix")) 233 | ``` 234 | 235 | delayqueue 中所有的 key 都有相同的前缀,默认情况下前缀为 `dp`。如果你需要自定义前缀可以使用 UseCustomPrefix 函数。 236 | 237 | ## 监控 238 | 239 | 我们提供了 `Monitor` 来监控运行数据: 240 | 241 | ```go 242 | monitor := delayqueue.NewMonitor("example", redisCli) 243 | ``` 244 | 245 | 我们可以使用 `Monitor.ListenEvent` 注册一个可以收到队列中所有事件的监听器, 从而实现自定义的事件上报和指标监控。 246 | 247 | Monitor 可以受到所有 Worker 的事件, 包括运行在其它服务器上的 Worker. 248 | 249 | ```go 250 | type EventListener interface { 251 | OnEvent(*Event) 252 | } 253 | 254 | // returns: close function, error 255 | func (m *Monitor) ListenEvent(listener EventListener) (func(), error) 256 | ``` 257 | 258 | Event 的定义在 [events.go](./events.go). 259 | 260 | 此外,我们提供了一个 Demo,它会每分钟显示一次队列中产生和处理的消息数量。 261 | 262 | Demo 完整代码在 [example/monitor](./example/monitor/main.go). 263 | 264 | ```go 265 | type MyProfiler struct { 266 | List []*Metrics 267 | Start int64 268 | } 269 | 270 | func (p *MyProfiler) OnEvent(event *delayqueue.Event) { 271 | sinceUptime := event.Timestamp - p.Start 272 | upMinutes := sinceUptime / 60 273 | if len(p.List) <= int(upMinutes) { 274 | p.List = append(p.List, &Metrics{}) 275 | } 276 | current := p.List[upMinutes] 277 | switch event.Code { 278 | case delayqueue.NewMessageEvent: 279 | current.ProduceCount += event.MsgCount 280 | case delayqueue.DeliveredEvent: 281 | current.DeliverCount += event.MsgCount 282 | case delayqueue.AckEvent: 283 | current.ConsumeCount += event.MsgCount 284 | case delayqueue.RetryEvent: 285 | current.RetryCount += event.MsgCount 286 | case delayqueue.FinalFailedEvent: 287 | current.FailCount += event.MsgCount 288 | } 289 | } 290 | 291 | func main() { 292 | queue := delayqueue.NewQueue("example", redisCli, func(payload string) bool { 293 | return true 294 | }) 295 | start := time.Now() 296 | // 注意: 使用 Monitor 前必须调用 EnableReport 297 | queue.EnableReport() 298 | 299 | // setup monitor 300 | monitor := delayqueue.NewMonitor("example", redisCli) 301 | listener := &MyProfiler{ 302 | Start: start.Unix(), 303 | } 304 | monitor.ListenEvent(listener) 305 | 306 | // 每分钟打印一次报告 307 | tick := time.Tick(time.Minute) 308 | go func() { 309 | for range tick { 310 | minutes := len(listener.List)-1 311 | fmt.Printf("%d: %#v", minutes, listener.List[minutes]) 312 | } 313 | }() 314 | } 315 | ``` 316 | 317 | Monitor 使用 redis 的发布订阅功能来收集数据,使用 Monitor 前必须在所有 Worker 处调用 `EnableReport` 来启用上报。 318 | 319 | 如果你不想使用 redis pub/sub, 可以调用 `DelayQueue.ListenEvent` 来直接收集数据。请注意,`DelayQueue.ListenEvent` 只能收到当前 Worker 的事件, 而 Monitor 可以收到所有 Worker 的事件。 320 | 321 | 另外,`DelayQueue.ListenEvent` 会覆盖掉 Monitor 的监听器,再次调用 `EnableReport` 后 Monitor 才能恢复工作。 322 | 323 | ### 获得状态信息 324 | 325 | Monitor 也可以直接获得一些队列的状态信息。 326 | 327 | ```go 328 | func (m *Monitor) GetPendingCount() (int64, error) 329 | ``` 330 | 331 | 返回未到投递时间的消息数。 332 | 333 | ```go 334 | func (m *Monitor) GetReadyCount() (int64, error) 335 | ``` 336 | 337 | 返回已到投递时间但尚未发给 Worker 的消息数。 338 | 339 | ```go 340 | func (m *Monitor) GetProcessingCount() (int64, error) 341 | ``` 342 | 343 | 返回 Worker 正在处理中的消息数。 344 | 345 | ## 集群 346 | 347 | 如果需要在 Redis Cluster 上工作, 请使用 `NewQueueOnCluster`: 348 | 349 | ```go 350 | redisCli := redis.NewClusterClient(&redis.ClusterOptions{ 351 | Addrs: []string{ 352 | "127.0.0.1:7000", 353 | "127.0.0.1:7001", 354 | "127.0.0.1:7002", 355 | }, 356 | }) 357 | callback := func(s string) bool { 358 | return true 359 | } 360 | queue := NewQueueOnCluster("test", redisCli, callback) 361 | ``` 362 | 363 | 如果是阿里云,腾讯云的 Redis 集群版或 codis, twemproxy 这类透明式的集群, 使用 `NewQueue` 并启用 UseHashTagKey() 即可: 364 | 365 | ```go 366 | redisCli := redis.NewClient(&redis.Options{ 367 | Addr: "127.0.0.1:6379", 368 | }) 369 | callback := func(s string) bool { 370 | return true 371 | } 372 | queue := delayqueue.NewQueue("example", redisCli, callback, UseHashTagKey()) 373 | ``` 374 | 375 | ## 更多细节 376 | 377 | 完整流程如图所示: 378 | 379 | ![](https://s2.loli.net/2022/09/10/tziHmcAX4sFJPN6.png) 380 | 381 | 382 | 整个消息队列中一共有 7 个 Redis 数据结构: 383 | 384 | - pending: 有序集合类型,存储未到投递时间的消息。 member 为消息 ID、score 为投递时间。 385 | - ready: 列表类型,存储已到投递时间的消息。element 为消息 ID。 386 | - unack: 有序集合类型, 存储已投递但未确认成功消费的消息 ID。 member 为消息 ID、score 为处理超时时间, 超出这个时间还未 ack 的消息会被重试。 387 | - retry: 列表类型,存储处理超时后等待重试的消息 ID。element 为消息 ID。 388 | - garbage: 集合类型,用于暂存已达重试上限的消息 ID。后面介绍 unack2retry 时会介绍为什么需要 garbage 结构。 389 | - msgKey: 为了避免两条内容完全相同的消息造成意外的影响,我们将每条消息放到一个字符串类型的键中,并分配一个 UUID 作为它的唯一标识。其它数据结构中只存储 UUID 而不存储完整的消息内容。每个 msg 拥有一个独立的 key 而不是将所有消息放到一个哈希表中是为了利用 TTL 机制避免泄漏。 390 | - retryCountKey: 哈希表类型,键为消息 ID, 值为剩余的重试次数。 391 | 392 | 如上图所示整个消息队列中一共涉及 6 个操作: 393 | 394 | - send: 发送一条新消息。首先存储消息内容和重试次数,并将消息 ID 放入 pending 中。 395 | - pending2ready: 将已到投递时间的消息从 pending 移动到 ready 中 396 | - ready2unack: 将一条等待投递的消息从 ready (或 retry) 移动到 unack 中,并把消息发送给消费者。 397 | - unack2retry: 将 unack 中未到重试次数上限的消息转移到 retry 中,已到重试次数上限的转移到 garbage 中等待后续清理。 398 | - ack: 从 unack 中删除处理成功的消息并清理它的 msgKey 和 retryCount 数据。 399 | - garbageCollect: 清理已到最大重试次数的消息。 400 | 401 | 402 | 403 | -------------------------------------------------------------------------------- /delayqueue.go: -------------------------------------------------------------------------------- 1 | package delayqueue 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "log" 7 | "strconv" 8 | "strings" 9 | "sync" 10 | "sync/atomic" 11 | "time" 12 | 13 | "github.com/google/uuid" 14 | ) 15 | 16 | // DelayQueue is a message queue supporting delayed/scheduled delivery based on redis 17 | type DelayQueue struct { 18 | // name for this Queue. Make sure the name is unique in redis database 19 | name string 20 | redisCli RedisCli 21 | cb func(string) bool 22 | pendingKey string // sorted set: message id -> delivery time 23 | readyKey string // list 24 | unAckKey string // sorted set: message id -> retry time 25 | retryKey string // list 26 | retryCountKey string // hash: message id -> remain retry count 27 | garbageKey string // set: message id 28 | useHashTag bool 29 | ticker *time.Ticker 30 | logger Logger 31 | close chan struct{} 32 | running int32 33 | maxConsumeDuration time.Duration // default 5 seconds 34 | msgTTL time.Duration // default 1 hour 35 | defaultRetryCount uint // default 3 36 | fetchInterval time.Duration // default 1 second 37 | fetchLimit uint // default no limit 38 | fetchCount int32 // actually running task number 39 | concurrent uint // default 1, executed serially 40 | sha1map map[string]string 41 | sha1mapMu *sync.RWMutex 42 | scriptPreload bool 43 | // for batch consume 44 | consumeBuffer chan string 45 | 46 | eventListener EventListener 47 | nackRedeliveryDelay time.Duration 48 | } 49 | 50 | // NilErr represents redis nil 51 | var NilErr = errors.New("nil") 52 | 53 | // RedisCli is abstraction for redis client, required commands only not all commands 54 | type RedisCli interface { 55 | // Eval sends lua script to redis 56 | // args should be string, integer or float 57 | // returns string, int64, []interface{} (elements can be string or int64) 58 | Eval(script string, keys []string, args []interface{}) (interface{}, error) 59 | Set(key string, value string, expiration time.Duration) error 60 | // Get represents redis command GET 61 | // please NilErr when no such key in redis 62 | Get(key string) (string, error) 63 | Del(keys []string) error 64 | HSet(key string, field string, value string) error 65 | HDel(key string, fields []string) error 66 | SMembers(key string) ([]string, error) 67 | SRem(key string, members []string) error 68 | ZAdd(key string, values map[string]float64) error 69 | ZRem(key string, fields []string) (int64, error) 70 | ZCard(key string) (int64, error) 71 | ZScore(key string, member string) (float64, error) 72 | LLen(key string) (int64, error) 73 | LRem(key string, count int64, value string) (int64, error) 74 | 75 | // Publish used for monitor only 76 | Publish(channel string, payload string) error 77 | // Subscribe used for monitor only 78 | // returns: payload channel, subscription closer, error; the subscription closer should close payload channel as well 79 | Subscribe(channel string) (payloads <-chan string, close func(), err error) 80 | 81 | // ScriptLoad call `script load` command 82 | ScriptLoad(script string) (string, error) 83 | // EvalSha run preload scripts 84 | // If there is no preload scripts please return error with message "NOSCRIPT" 85 | EvalSha(sha1 string, keys []string, args []interface{}) (interface{}, error) 86 | } 87 | 88 | // Logger is an abstraction of logging system 89 | type Logger interface { 90 | Printf(format string, v ...interface{}) 91 | } 92 | 93 | type hashTagKeyOpt int 94 | type prefixOpt string 95 | 96 | // CallbackFunc receives and consumes messages 97 | // returns true to confirm successfully consumed, false to re-deliver this message 98 | type CallbackFunc = func(string) bool 99 | 100 | // UseHashTagKey add hashtags to redis keys to ensure all keys of this queue are allocated in the same hash slot. 101 | // If you are using Codis/AliyunRedisCluster/TencentCloudRedisCluster, add this option to NewQueue 102 | // WARNING! Changing (add or remove) this option will cause DelayQueue failing to read existed data in redis 103 | // see more: https://redis.io/docs/reference/cluster-spec/#hash-tags 104 | func UseHashTagKey() interface{} { 105 | return hashTagKeyOpt(1) 106 | } 107 | 108 | // UseCustomPrefix customize prefix to instead of default prefix "dp" 109 | func UseCustomPrefix(prefix string) interface{} { 110 | return prefixOpt(prefix) 111 | } 112 | 113 | // NewQueue0 creates a new queue, use DelayQueue.StartConsume to consume or DelayQueue.SendScheduleMsg to publish message 114 | // callback returns true to confirm successful consumption. If callback returns false or not return within maxConsumeDuration, DelayQueue will re-deliver this message 115 | func NewQueue0(name string, cli RedisCli, opts ...interface{}) *DelayQueue { 116 | if name == "" { 117 | panic("name is required") 118 | } 119 | if cli == nil { 120 | panic("cli is required") 121 | } 122 | prefix := "dp" 123 | useHashTag := false 124 | var callback CallbackFunc = nil 125 | for _, opt := range opts { 126 | switch o := opt.(type) { 127 | case hashTagKeyOpt: 128 | useHashTag = true 129 | case prefixOpt: 130 | prefix = string(o) 131 | case CallbackFunc: 132 | callback = o 133 | } 134 | } 135 | keyPrefix := prefix + ":" + name 136 | if useHashTag { 137 | keyPrefix = "{" + keyPrefix + "}" 138 | } 139 | return &DelayQueue{ 140 | name: name, 141 | redisCli: cli, 142 | cb: callback, 143 | pendingKey: keyPrefix + ":pending", 144 | readyKey: keyPrefix + ":ready", 145 | unAckKey: keyPrefix + ":unack", 146 | retryKey: keyPrefix + ":retry", 147 | retryCountKey: keyPrefix + ":retry:cnt", 148 | garbageKey: keyPrefix + ":garbage", 149 | useHashTag: useHashTag, 150 | close: nil, 151 | maxConsumeDuration: 5 * time.Second, 152 | msgTTL: time.Hour, 153 | logger: log.Default(), 154 | defaultRetryCount: 3, 155 | fetchInterval: time.Second, 156 | concurrent: 1, 157 | sha1map: make(map[string]string), 158 | sha1mapMu: &sync.RWMutex{}, 159 | scriptPreload: true, 160 | } 161 | } 162 | 163 | // WithCallback set callback for queue to receives and consumes messages 164 | // callback returns true to confirm successfully consumed, false to re-deliver this message 165 | func (q *DelayQueue) WithCallback(callback CallbackFunc) *DelayQueue { 166 | q.cb = callback 167 | return q 168 | } 169 | 170 | // WithLogger customizes logger for queue 171 | func (q *DelayQueue) WithLogger(logger Logger) *DelayQueue { 172 | q.logger = logger 173 | return q 174 | } 175 | 176 | // WithFetchInterval customizes the interval at which consumer fetch message from redis 177 | func (q *DelayQueue) WithFetchInterval(d time.Duration) *DelayQueue { 178 | q.fetchInterval = d 179 | return q 180 | } 181 | 182 | // WithScriptPreload use script load command preload scripts to redis 183 | func (q *DelayQueue) WithScriptPreload(flag bool) *DelayQueue { 184 | q.scriptPreload = flag 185 | return q 186 | } 187 | 188 | // WithMaxConsumeDuration customizes max consume duration 189 | // If no acknowledge received within WithMaxConsumeDuration after message delivery, DelayQueue will try to deliver this message again 190 | func (q *DelayQueue) WithMaxConsumeDuration(d time.Duration) *DelayQueue { 191 | q.maxConsumeDuration = d 192 | return q 193 | } 194 | 195 | // WithFetchLimit limits the max number of processing messages, 0 means no limit 196 | func (q *DelayQueue) WithFetchLimit(limit uint) *DelayQueue { 197 | q.fetchLimit = limit 198 | return q 199 | } 200 | 201 | // WithConcurrent sets the number of concurrent consumers 202 | func (q *DelayQueue) WithConcurrent(c uint) *DelayQueue { 203 | if c == 0 { 204 | panic("concurrent cannot be 0") 205 | } 206 | q.assertNotRunning() 207 | q.concurrent = c 208 | return q 209 | } 210 | 211 | // WithDefaultRetryCount customizes the max number of retry, it effects of messages in this queue 212 | // use WithRetryCount during DelayQueue.SendScheduleMsg or DelayQueue.SendDelayMsg to specific retry count of particular message 213 | func (q *DelayQueue) WithDefaultRetryCount(count uint) *DelayQueue { 214 | q.defaultRetryCount = count 215 | return q 216 | } 217 | 218 | // WithNackRedeliveryDelay customizes the interval between redelivery and nack (callback returns false) 219 | // If consumption exceeded deadline, the message will be redelivered immediately 220 | func (q *DelayQueue) WithNackRedeliveryDelay(d time.Duration) *DelayQueue { 221 | q.nackRedeliveryDelay = d 222 | return q 223 | } 224 | 225 | func (q *DelayQueue) genMsgKey(idStr string) string { 226 | if q.useHashTag { 227 | return "{dp:" + q.name + "}" + ":msg:" + idStr 228 | } 229 | return "dp:" + q.name + ":msg:" + idStr 230 | } 231 | 232 | type retryCountOpt int 233 | 234 | // WithRetryCount set retry count for a msg 235 | // example: queue.SendDelayMsg(payload, duration, delayqueue.WithRetryCount(3)) 236 | func WithRetryCount(count int) interface{} { 237 | return retryCountOpt(count) 238 | } 239 | 240 | type msgTTLOpt time.Duration 241 | 242 | // WithMsgTTL set ttl for a msg 243 | // example: queue.SendDelayMsg(payload, duration, delayqueue.WithMsgTTL(Hour)) 244 | func WithMsgTTL(d time.Duration) interface{} { 245 | return msgTTLOpt(d) 246 | } 247 | 248 | // MessageInfo stores information to trace a message 249 | type MessageInfo struct { 250 | id string 251 | } 252 | 253 | func (msg *MessageInfo) ID() string { 254 | return msg.id 255 | } 256 | 257 | const ( 258 | StatePending = "pending" 259 | StateReady = "ready" 260 | StateReadyRetry = "ready_to_retry" 261 | StateConsuming = "consuming" 262 | StateUnknown = "unknown" 263 | ) 264 | 265 | // SendScheduleMsgV2 submits a message delivered at given time 266 | func (q *DelayQueue) SendScheduleMsgV2(payload string, t time.Time, opts ...interface{}) (*MessageInfo, error) { 267 | // parse options 268 | retryCount := q.defaultRetryCount 269 | for _, opt := range opts { 270 | switch o := opt.(type) { 271 | case retryCountOpt: 272 | retryCount = uint(o) 273 | case msgTTLOpt: 274 | q.msgTTL = time.Duration(o) 275 | } 276 | } 277 | // generate id 278 | idStr := uuid.Must(uuid.NewRandom()).String() 279 | now := time.Now() 280 | // store msg 281 | msgTTL := t.Sub(now) + q.msgTTL // delivery + q.msgTTL 282 | err := q.redisCli.Set(q.genMsgKey(idStr), payload, msgTTL) 283 | if err != nil { 284 | return nil, fmt.Errorf("store msg failed: %v", err) 285 | } 286 | // store retry count 287 | err = q.redisCli.HSet(q.retryCountKey, idStr, strconv.Itoa(int(retryCount))) 288 | if err != nil { 289 | return nil, fmt.Errorf("store retry count failed: %v", err) 290 | } 291 | // put to pending 292 | err = q.redisCli.ZAdd(q.pendingKey, map[string]float64{idStr: float64(t.Unix())}) 293 | if err != nil { 294 | return nil, fmt.Errorf("push to pending failed: %v", err) 295 | } 296 | q.reportEvent(NewMessageEvent, 1) 297 | return &MessageInfo{ 298 | id: idStr, 299 | }, nil 300 | } 301 | 302 | // SendDelayMsg submits a message delivered after given duration 303 | func (q *DelayQueue) SendDelayMsgV2(payload string, duration time.Duration, opts ...interface{}) (*MessageInfo, error) { 304 | t := time.Now().Add(duration) 305 | return q.SendScheduleMsgV2(payload, t, opts...) 306 | } 307 | 308 | // SendScheduleMsg submits a message delivered at given time 309 | // It is compatible with SendScheduleMsgV2, but does not return MessageInfo 310 | func (q *DelayQueue) SendScheduleMsg(payload string, t time.Time, opts ...interface{}) error { 311 | _, err := q.SendScheduleMsgV2(payload, t, opts...) 312 | return err 313 | } 314 | 315 | // SendDelayMsg submits a message delivered after given duration 316 | // It is compatible with SendDelayMsgV2, but does not return MessageInfo 317 | func (q *DelayQueue) SendDelayMsg(payload string, duration time.Duration, opts ...interface{}) error { 318 | t := time.Now().Add(duration) 319 | return q.SendScheduleMsg(payload, t, opts...) 320 | } 321 | 322 | type InterceptResult struct { 323 | Intercepted bool 324 | State string 325 | } 326 | 327 | // TryIntercept trys to intercept a message 328 | func (q *DelayQueue) TryIntercept(msg *MessageInfo) (*InterceptResult, error) { 329 | id := msg.ID() 330 | // try to intercept at ready 331 | removed, err := q.redisCli.LRem(q.readyKey, 0, id) 332 | if err != nil { 333 | q.logger.Printf("intercept %s from ready failed: %v", id, err) 334 | } 335 | if removed > 0 { 336 | _ = q.redisCli.Del([]string{q.genMsgKey(id)}) 337 | _ = q.redisCli.HDel(q.retryCountKey, []string{id}) 338 | return &InterceptResult{ 339 | Intercepted: true, 340 | State: StateReady, 341 | }, nil 342 | } 343 | // try to intercept at pending 344 | removed, err = q.redisCli.ZRem(q.pendingKey, []string{id}) 345 | if err != nil { 346 | q.logger.Printf("intercept %s from pending failed: %v", id, err) 347 | } 348 | if removed > 0 { 349 | _ = q.redisCli.Del([]string{q.genMsgKey(id)}) 350 | _ = q.redisCli.HDel(q.retryCountKey, []string{id}) 351 | return &InterceptResult{ 352 | Intercepted: true, 353 | State: StatePending, 354 | }, nil 355 | } 356 | // message may be being consumed or has been successfully consumed 357 | // if the message has been successfully consumed, the following action will cause nothing 358 | // if the message is being consumed,the following action will prevent it from being retried 359 | q.redisCli.HDel(q.retryCountKey, []string{id}) 360 | q.redisCli.LRem(q.retryKey, 0, id) 361 | 362 | return &InterceptResult{ 363 | Intercepted: false, 364 | State: StateUnknown, 365 | }, nil 366 | } 367 | 368 | func (q *DelayQueue) loadScript(script string) (string, error) { 369 | sha1, err := q.redisCli.ScriptLoad(script) 370 | if err != nil { 371 | return "", err 372 | } 373 | q.sha1mapMu.Lock() 374 | q.sha1map[script] = sha1 375 | q.sha1mapMu.Unlock() 376 | return sha1, nil 377 | } 378 | 379 | func (q *DelayQueue) eval(script string, keys []string, args []interface{}) (interface{}, error) { 380 | if !q.scriptPreload { 381 | return q.redisCli.Eval(script, keys, args) 382 | } 383 | var err error 384 | q.sha1mapMu.RLock() 385 | sha1, ok := q.sha1map[script] 386 | q.sha1mapMu.RUnlock() 387 | if !ok { 388 | sha1, err = q.loadScript(script) 389 | if err != nil { 390 | return nil, err 391 | } 392 | } 393 | result, err := q.redisCli.EvalSha(sha1, keys, args) 394 | if err == nil { 395 | return result, err 396 | } 397 | // script not loaded, reload it 398 | // It is possible to access a node in the cluster that has no pre-loaded scripts. 399 | if strings.HasPrefix(err.Error(), "NOSCRIPT") { 400 | sha1, err = q.loadScript(script) 401 | if err != nil { 402 | return nil, err 403 | } 404 | // try again 405 | result, err = q.redisCli.EvalSha(sha1, keys, args) 406 | } 407 | return result, err 408 | } 409 | 410 | // pending2ReadyScript atomically moves messages from pending to ready 411 | // keys: pendingKey, readyKey 412 | // argv: currentTime 413 | // returns: ready message number 414 | const pending2ReadyScript = ` 415 | local msgs = redis.call('ZRangeByScore', KEYS[1], '0', ARGV[1]) -- get ready msg 416 | if (#msgs == 0) then return end 417 | local args2 = {} -- keys to push into ready 418 | for _,v in ipairs(msgs) do 419 | table.insert(args2, v) 420 | if (#args2 == 4000) then 421 | redis.call('LPush', KEYS[2], unpack(args2)) 422 | args2 = {} 423 | end 424 | end 425 | if (#args2 > 0) then 426 | redis.call('LPush', KEYS[2], unpack(args2)) 427 | end 428 | redis.call('ZRemRangeByScore', KEYS[1], '0', ARGV[1]) -- remove msgs from pending 429 | return #msgs 430 | ` 431 | 432 | func (q *DelayQueue) pending2Ready() error { 433 | now := time.Now().Unix() 434 | keys := []string{q.pendingKey, q.readyKey} 435 | raw, err := q.eval(pending2ReadyScript, keys, []interface{}{now}) 436 | if err != nil && err != NilErr { 437 | return fmt.Errorf("pending2ReadyScript failed: %v", err) 438 | } 439 | count, ok := raw.(int64) 440 | if ok { 441 | q.reportEvent(ReadyEvent, int(count)) 442 | } 443 | return nil 444 | } 445 | 446 | // ready2UnackScript atomically moves messages from ready to unack 447 | // keys: readyKey/retryKey, unackKey 448 | // argv: retryTime 449 | const ready2UnackScript = ` 450 | local msg = redis.call('RPop', KEYS[1]) 451 | if (not msg) then return end 452 | redis.call('ZAdd', KEYS[2], ARGV[1], msg) 453 | return msg 454 | ` 455 | 456 | func (q *DelayQueue) ready2Unack() (string, error) { 457 | retryTime := time.Now().Add(q.maxConsumeDuration).Unix() 458 | keys := []string{q.readyKey, q.unAckKey} 459 | ret, err := q.eval(ready2UnackScript, keys, []interface{}{retryTime}) 460 | if err == NilErr { 461 | return "", err 462 | } 463 | if err != nil { 464 | return "", fmt.Errorf("ready2UnackScript failed: %v", err) 465 | } 466 | str, ok := ret.(string) 467 | if !ok { 468 | return "", fmt.Errorf("illegal result: %#v", ret) 469 | } 470 | q.reportEvent(DeliveredEvent, 1) 471 | return str, nil 472 | } 473 | 474 | func (q *DelayQueue) retry2Unack() (string, error) { 475 | retryTime := time.Now().Add(q.maxConsumeDuration).Unix() 476 | keys := []string{q.retryKey, q.unAckKey} 477 | ret, err := q.eval(ready2UnackScript, keys, []interface{}{retryTime, q.retryKey, q.unAckKey}) 478 | if err == NilErr { 479 | return "", NilErr 480 | } 481 | if err != nil { 482 | return "", fmt.Errorf("ready2UnackScript failed: %v", err) 483 | } 484 | str, ok := ret.(string) 485 | if !ok { 486 | return "", fmt.Errorf("illegal result: %#v", ret) 487 | } 488 | return str, nil 489 | } 490 | 491 | func (q *DelayQueue) callback(idStr string) error { 492 | payload, err := q.redisCli.Get(q.genMsgKey(idStr)) 493 | if err == NilErr { 494 | return nil 495 | } 496 | if err != nil { 497 | // Is an IO error? 498 | return fmt.Errorf("get message payload failed: %v", err) 499 | } 500 | ack := q.cb(payload) 501 | if ack { 502 | err = q.ack(idStr) 503 | } else { 504 | err = q.nack(idStr) 505 | } 506 | return err 507 | } 508 | 509 | func (q *DelayQueue) ack(idStr string) error { 510 | atomic.AddInt32(&q.fetchCount, -1) 511 | _, err := q.redisCli.ZRem(q.unAckKey, []string{idStr}) 512 | if err != nil { 513 | return fmt.Errorf("remove from unack failed: %v", err) 514 | } 515 | // msg key has ttl, ignore result of delete 516 | _ = q.redisCli.Del([]string{q.genMsgKey(idStr)}) 517 | _ = q.redisCli.HDel(q.retryCountKey, []string{idStr}) 518 | q.reportEvent(AckEvent, 1) 519 | return nil 520 | } 521 | 522 | // updateZSetScoreScript update score of a zset member if it exists 523 | // KEYS[1]: zset 524 | // ARGV[1]: score 525 | // ARGV[2]: member 526 | const updateZSetScoreScript = ` 527 | if redis.call('zrank', KEYS[1], ARGV[2]) ~= nil then 528 | return redis.call('zadd', KEYS[1], ARGV[1], ARGV[2]) 529 | else 530 | return 0 531 | end 532 | ` 533 | 534 | func (q *DelayQueue) updateZSetScore(key string, score float64, member string) error { 535 | scoreStr := strconv.FormatFloat(score, 'f', -1, 64) 536 | _, err := q.eval(updateZSetScoreScript, []string{key}, []interface{}{scoreStr, member}) 537 | return err 538 | } 539 | 540 | func (q *DelayQueue) nack(idStr string) error { 541 | atomic.AddInt32(&q.fetchCount, -1) 542 | retryTime := float64(time.Now().Add(q.nackRedeliveryDelay).Unix()) 543 | // if message consumption has not reach deadlin (still in unAckKey), then update its retry time 544 | err := q.updateZSetScore(q.unAckKey, retryTime, idStr) 545 | if err != nil { 546 | return fmt.Errorf("negative ack failed: %v", err) 547 | } 548 | q.reportEvent(NackEvent, 1) 549 | return nil 550 | } 551 | 552 | // unack2RetryScript atomically moves messages from unack to retry which remaining retry count greater than 0, 553 | // and moves messages from unack to garbage which retry count is 0 554 | // Because DelayQueue cannot determine garbage message before eval unack2RetryScript, so it cannot pass keys parameter to redisCli.Eval 555 | // Therefore unack2RetryScript moves garbage message to garbageKey instead of deleting directly 556 | // keys: unackKey, retryCountKey, retryKey, garbageKey 557 | // argv: currentTime 558 | // returns: {retryMsgs, failMsgs} 559 | const unack2RetryScript = ` 560 | local unack2retry = function(msgs) 561 | local retryCounts = redis.call('HMGet', KEYS[2], unpack(msgs)) -- get retry count 562 | local retryMsgs = 0 563 | local failMsgs = 0 564 | for i,v in ipairs(retryCounts) do 565 | local k = msgs[i] 566 | if v ~= false and v ~= nil and v ~= '' and tonumber(v) > 0 then 567 | redis.call("HIncrBy", KEYS[2], k, -1) -- reduce retry count 568 | redis.call("LPush", KEYS[3], k) -- add to retry 569 | retryMsgs = retryMsgs + 1 570 | else 571 | redis.call("HDel", KEYS[2], k) -- del retry count 572 | redis.call("SAdd", KEYS[4], k) -- add to garbage 573 | failMsgs = failMsgs + 1 574 | end 575 | end 576 | return retryMsgs, failMsgs 577 | end 578 | 579 | local retryMsgs = 0 580 | local failMsgs = 0 581 | local msgs = redis.call('ZRangeByScore', KEYS[1], '0', ARGV[1]) -- get retry msg 582 | if (#msgs == 0) then return end 583 | if #msgs < 4000 then 584 | local d1, d2 = unack2retry(msgs) 585 | retryMsgs = retryMsgs + d1 586 | failMsgs = failMsgs + d2 587 | else 588 | local buf = {} 589 | for _,v in ipairs(msgs) do 590 | table.insert(buf, v) 591 | if #buf == 4000 then 592 | local d1, d2 = unack2retry(buf) 593 | retryMsgs = retryMsgs + d1 594 | failMsgs = failMsgs + d2 595 | buf = {} 596 | end 597 | end 598 | if (#buf > 0) then 599 | local d1, d2 = unack2retry(buf) 600 | retryMsgs = retryMsgs + d1 601 | failMsgs = failMsgs + d2 602 | end 603 | end 604 | redis.call('ZRemRangeByScore', KEYS[1], '0', ARGV[1]) -- remove msgs from unack 605 | return {retryMsgs, failMsgs} 606 | ` 607 | 608 | func (q *DelayQueue) unack2Retry() error { 609 | keys := []string{q.unAckKey, q.retryCountKey, q.retryKey, q.garbageKey} 610 | now := time.Now() 611 | raw, err := q.eval(unack2RetryScript, keys, []interface{}{now.Unix()}) 612 | if err != nil && err != NilErr { 613 | return fmt.Errorf("unack to retry script failed: %v", err) 614 | } 615 | infos, ok := raw.([]interface{}) 616 | if ok && len(infos) == 2 { 617 | retryCount, ok := infos[0].(int64) 618 | if ok { 619 | q.reportEvent(RetryEvent, int(retryCount)) 620 | } 621 | failCount, ok := infos[1].(int64) 622 | if ok { 623 | q.reportEvent(FinalFailedEvent, int(failCount)) 624 | } 625 | } 626 | return nil 627 | } 628 | 629 | func (q *DelayQueue) garbageCollect() error { 630 | msgIds, err := q.redisCli.SMembers(q.garbageKey) 631 | if err != nil { 632 | return fmt.Errorf("smembers failed: %v", err) 633 | } 634 | if len(msgIds) == 0 { 635 | return nil 636 | } 637 | // allow concurrent clean 638 | msgKeys := make([]string, 0, len(msgIds)) 639 | for _, idStr := range msgIds { 640 | msgKeys = append(msgKeys, q.genMsgKey(idStr)) 641 | } 642 | err = q.redisCli.Del(msgKeys) 643 | if err != nil && err != NilErr { 644 | return fmt.Errorf("del msgs failed: %v", err) 645 | } 646 | err = q.redisCli.SRem(q.garbageKey, msgIds) 647 | if err != nil && err != NilErr { 648 | return fmt.Errorf("remove from garbage key failed: %v", err) 649 | } 650 | return nil 651 | } 652 | 653 | func (q *DelayQueue) beforeConsume() ([]string, error) { 654 | // pending to ready 655 | err := q.pending2Ready() 656 | if err != nil { 657 | return nil, err 658 | } 659 | // ready2Unack 660 | // prioritize new message consumption to avoid avalanches 661 | ids := make([]string, 0, q.fetchLimit) 662 | var fetchCount int32 663 | for { 664 | fetchCount = atomic.LoadInt32(&q.fetchCount) 665 | if q.fetchLimit > 0 && fetchCount >= int32(q.fetchLimit) { 666 | break 667 | } 668 | idStr, err := q.ready2Unack() 669 | if err == NilErr { // consumed all 670 | break 671 | } 672 | if err != nil { 673 | return nil, err 674 | } 675 | ids = append(ids, idStr) 676 | atomic.AddInt32(&q.fetchCount, 1) 677 | } 678 | // retry2Unack 679 | if fetchCount < int32(q.fetchLimit) || q.fetchLimit == 0 { 680 | for { 681 | fetchCount = atomic.LoadInt32(&q.fetchCount) 682 | if q.fetchLimit > 0 && fetchCount >= int32(q.fetchLimit) { 683 | break 684 | } 685 | idStr, err := q.retry2Unack() 686 | if err == NilErr { // consumed all 687 | break 688 | } 689 | if err != nil { 690 | return nil, err 691 | } 692 | ids = append(ids, idStr) 693 | atomic.AddInt32(&q.fetchCount, 1) 694 | } 695 | } 696 | return ids, nil 697 | } 698 | 699 | func (q *DelayQueue) afterConsume() error { 700 | // unack to retry 701 | err := q.unack2Retry() 702 | if err != nil { 703 | return err 704 | } 705 | err = q.garbageCollect() 706 | if err != nil { 707 | return err 708 | } 709 | return nil 710 | } 711 | 712 | func (q *DelayQueue) setRunning() { 713 | atomic.StoreInt32(&q.running, 1) 714 | } 715 | 716 | func (q *DelayQueue) setNotRunning() { 717 | atomic.StoreInt32(&q.running, 0) 718 | } 719 | 720 | func (q *DelayQueue) assertNotRunning() { 721 | running := atomic.LoadInt32(&q.running) 722 | if running > 0 { 723 | panic("operation cannot be performed during running") 724 | } 725 | } 726 | 727 | func (q *DelayQueue) goWithRecover(fn func()) { 728 | go func() { 729 | defer func() { 730 | if err := recover(); err != nil { 731 | q.logger.Printf("panic: %v\n", err) 732 | } 733 | }() 734 | fn() 735 | }() 736 | } 737 | 738 | // StartConsume creates a goroutine to consume message from DelayQueue 739 | // use `<-done` to wait consumer stopping 740 | // If there is no callback set, StartConsume will panic 741 | func (q *DelayQueue) StartConsume() (done <-chan struct{}) { 742 | if q.cb == nil { 743 | panic("this instance has no callback") 744 | } 745 | q.close = make(chan struct{}, 1) 746 | q.setRunning() 747 | q.ticker = time.NewTicker(q.fetchInterval) 748 | q.consumeBuffer = make(chan string, q.fetchLimit) 749 | done0 := make(chan struct{}) 750 | // start worker 751 | for i := 0; i < int(q.concurrent); i++ { 752 | q.goWithRecover(func() { 753 | for id := range q.consumeBuffer { 754 | q.callback(id) 755 | q.afterConsume() 756 | } 757 | }) 758 | } 759 | // start main loop 760 | go func() { 761 | tickerLoop: 762 | for { 763 | select { 764 | case <-q.ticker.C: 765 | ids, err := q.beforeConsume() 766 | if err != nil { 767 | q.logger.Printf("consume error: %v", err) 768 | } 769 | q.goWithRecover(func() { 770 | for _, id := range ids { 771 | q.consumeBuffer <- id 772 | } 773 | }) 774 | case <-q.close: 775 | break tickerLoop 776 | } 777 | } 778 | close(done0) 779 | }() 780 | return done0 781 | } 782 | 783 | // StopConsume stops consumer goroutine 784 | func (q *DelayQueue) StopConsume() { 785 | close(q.close) 786 | q.setNotRunning() 787 | if q.ticker != nil { 788 | q.ticker.Stop() 789 | } 790 | } 791 | 792 | // GetPendingCount returns the number of pending messages 793 | func (q *DelayQueue) GetPendingCount() (int64, error) { 794 | return q.redisCli.ZCard(q.pendingKey) 795 | } 796 | 797 | // GetReadyCount returns the number of messages which have arrived delivery time but but have not been delivered 798 | func (q *DelayQueue) GetReadyCount() (int64, error) { 799 | return q.redisCli.LLen(q.readyKey) 800 | } 801 | 802 | // GetProcessingCount returns the number of messages which are being processed 803 | func (q *DelayQueue) GetProcessingCount() (int64, error) { 804 | return q.redisCli.ZCard(q.unAckKey) 805 | } 806 | 807 | // EventListener which will be called when events occur 808 | // This Listener can be used to monitor running status 809 | type EventListener interface { 810 | // OnEvent will be called when events occur 811 | OnEvent(*Event) 812 | } 813 | 814 | // ListenEvent register a listener which will be called when events occur, 815 | // so it can be used to monitor running status 816 | // 817 | // But It can ONLY receive events from the CURRENT INSTANCE, 818 | // if you want to listen to all events in queue, just use Monitor.ListenEvent 819 | // 820 | // There can be AT MOST ONE EventListener in an DelayQueue instance. 821 | // If you are using customized listener, Monitor will stop working 822 | func (q *DelayQueue) ListenEvent(listener EventListener) { 823 | q.eventListener = listener 824 | } 825 | 826 | // RemoveListener stops reporting events to EventListener 827 | func (q *DelayQueue) DisableListener() { 828 | q.eventListener = nil 829 | } 830 | 831 | func (q *DelayQueue) reportEvent(code int, count int) { 832 | listener := q.eventListener // eventListener may be changed during running 833 | if listener != nil && count > 0 { 834 | event := &Event{ 835 | Code: code, 836 | Timestamp: time.Now().Unix(), 837 | MsgCount: count, 838 | } 839 | listener.OnEvent(event) 840 | } 841 | } 842 | 843 | // pubsubListener receives events and reports them through redis pubsub for monitoring 844 | type pubsubListener struct { 845 | redisCli RedisCli 846 | reportChan string 847 | } 848 | 849 | func genReportChannel(name string) string { 850 | return "dq:" + name + ":reportEvents" 851 | } 852 | 853 | // EnableReport enables reporting to monitor 854 | func (q *DelayQueue) EnableReport() { 855 | reportChan := genReportChannel(q.name) 856 | q.ListenEvent(&pubsubListener{ 857 | redisCli: q.redisCli, 858 | reportChan: reportChan, 859 | }) 860 | } 861 | 862 | // DisableReport stops reporting to monitor 863 | func (q *DelayQueue) DisableReport() { 864 | q.DisableListener() 865 | } 866 | 867 | func (l *pubsubListener) OnEvent(event *Event) { 868 | payload := encodeEvent(event) 869 | l.redisCli.Publish(l.reportChan, payload) 870 | } 871 | -------------------------------------------------------------------------------- /delayqueue_test.go: -------------------------------------------------------------------------------- 1 | package delayqueue 2 | 3 | import ( 4 | "context" 5 | "log" 6 | "os" 7 | "strconv" 8 | "strings" 9 | "sync" 10 | "testing" 11 | "time" 12 | 13 | "github.com/redis/go-redis/v9" 14 | ) 15 | 16 | func TestDelayQueue_consume(t *testing.T) { 17 | redisCli := redis.NewClient(&redis.Options{ 18 | Addr: "127.0.0.1:6379", 19 | }) 20 | redisCli.FlushDB(context.Background()) 21 | size := 1000 22 | retryCount := 3 23 | deliveryCount := make(map[string]int) 24 | cb := func(s string) bool { 25 | deliveryCount[s]++ 26 | i, _ := strconv.ParseInt(s, 10, 64) 27 | return i%2 == 0 28 | } 29 | queue := NewQueue("test", redisCli, UseHashTagKey()). 30 | WithCallback(cb). 31 | WithFetchInterval(time.Millisecond * 50). 32 | WithMaxConsumeDuration(0). 33 | WithLogger(log.New(os.Stderr, "[DelayQueue]", log.LstdFlags)). 34 | WithFetchLimit(2) 35 | 36 | for i := 0; i < size; i++ { 37 | err := queue.SendDelayMsg(strconv.Itoa(i), 0, WithRetryCount(retryCount), WithMsgTTL(time.Hour)) 38 | if err != nil { 39 | t.Error(err) 40 | } 41 | } 42 | for i := 0; i < 10*size; i++ { 43 | ids, err := queue.beforeConsume() 44 | if err != nil { 45 | t.Errorf("consume error: %v", err) 46 | return 47 | } 48 | for _, id := range ids { 49 | queue.callback(id) 50 | } 51 | queue.afterConsume() 52 | } 53 | for k, v := range deliveryCount { 54 | i, _ := strconv.ParseInt(k, 10, 64) 55 | if i%2 == 0 { 56 | if v != 1 { 57 | t.Errorf("expect 1 delivery, actual %d", v) 58 | } 59 | } else { 60 | if v != retryCount+1 { 61 | t.Errorf("expect %d delivery, actual %d", retryCount+1, v) 62 | } 63 | } 64 | } 65 | } 66 | 67 | func TestDelayQueueOnCluster(t *testing.T) { 68 | redisCli := redis.NewClusterClient(&redis.ClusterOptions{ 69 | Addrs: []string{ 70 | "127.0.0.1:7000", 71 | "127.0.0.1:7001", 72 | "127.0.0.1:7002", 73 | }, 74 | }) 75 | redisCli.FlushDB(context.Background()) 76 | size := 1000 77 | succeed := 0 78 | cb := func(s string) bool { 79 | succeed++ 80 | return true 81 | } 82 | queue := NewQueueOnCluster("test", redisCli, cb). 83 | WithFetchInterval(time.Millisecond * 50). 84 | WithMaxConsumeDuration(0). 85 | WithLogger(log.New(os.Stderr, "[DelayQueue]", log.LstdFlags)). 86 | WithFetchLimit(2). 87 | WithConcurrent(1) 88 | 89 | for i := 0; i < size; i++ { 90 | err := queue.SendDelayMsg(strconv.Itoa(i), 0) 91 | if err != nil { 92 | t.Error(err) 93 | } 94 | } 95 | for i := 0; i < 10*size; i++ { 96 | ids, err := queue.beforeConsume() 97 | if err != nil { 98 | t.Errorf("consume error: %v", err) 99 | return 100 | } 101 | for _, id := range ids { 102 | queue.callback(id) 103 | } 104 | queue.afterConsume() 105 | } 106 | queue.garbageCollect() 107 | if succeed != size { 108 | t.Error("msg not consumed") 109 | } 110 | } 111 | 112 | func TestDelayQueue_ConcurrentConsume(t *testing.T) { 113 | redisCli := redis.NewClient(&redis.Options{ 114 | Addr: "127.0.0.1:6379", 115 | }) 116 | redisCli.FlushDB(context.Background()) 117 | size := 101 // use a prime number may found some hidden bugs ^_^ 118 | retryCount := 3 119 | mu := sync.Mutex{} 120 | deliveryCount := make(map[string]int) 121 | cb := func(s string) bool { 122 | mu.Lock() 123 | deliveryCount[s]++ 124 | mu.Unlock() 125 | return true 126 | } 127 | queue := NewQueue("test", redisCli, cb). 128 | WithFetchInterval(time.Millisecond * 50). 129 | WithMaxConsumeDuration(0). 130 | WithLogger(log.New(os.Stderr, "[DelayQueue]", log.LstdFlags)). 131 | WithConcurrent(4). 132 | WithScriptPreload(false) 133 | 134 | for i := 0; i < size; i++ { 135 | err := queue.SendDelayMsg(strconv.Itoa(i), 0, WithRetryCount(retryCount), WithMsgTTL(time.Hour)) 136 | if err != nil { 137 | t.Error(err) 138 | } 139 | } 140 | for i := 0; i < 2*size; i++ { 141 | ids, err := queue.beforeConsume() 142 | if err != nil { 143 | t.Errorf("consume error: %v", err) 144 | return 145 | } 146 | for _, id := range ids { 147 | queue.callback(id) 148 | } 149 | queue.afterConsume() 150 | } 151 | for k, v := range deliveryCount { 152 | if v != 1 { 153 | t.Errorf("expect 1 delivery, actual %d. key: %s", v, k) 154 | } 155 | } 156 | } 157 | 158 | func TestDelayQueue_StopConsume(t *testing.T) { 159 | size := 10 160 | redisCli := redis.NewClient(&redis.Options{ 161 | Addr: "127.0.0.1:6379", 162 | }) 163 | redisCli.FlushDB(context.Background()) 164 | var queue *DelayQueue 165 | var received int 166 | queue = NewQueue("test", redisCli, func(s string) bool { 167 | received++ 168 | if received == size { 169 | queue.StopConsume() 170 | t.Log("send stop signal") 171 | } 172 | return true 173 | }).WithDefaultRetryCount(1) 174 | for i := 0; i < size; i++ { 175 | err := queue.SendDelayMsg(strconv.Itoa(i), 0) 176 | if err != nil { 177 | t.Errorf("send message failed: %v", err) 178 | } 179 | } 180 | done := queue.StartConsume() 181 | <-done 182 | } 183 | 184 | func TestDelayQueue_AsyncConsume(t *testing.T) { 185 | size := 10 186 | redisCli := redis.NewClient(&redis.Options{ 187 | Addr: "127.0.0.1:6379", 188 | }) 189 | redisCli.FlushDB(context.Background()) 190 | var queue *DelayQueue 191 | var received int 192 | queue = NewQueue("exampleAsync", redisCli, func(payload string) bool { 193 | println(payload) 194 | received++ 195 | if received == size { 196 | queue.StopConsume() 197 | t.Log("send stop signal") 198 | } 199 | return true 200 | }).WithDefaultRetryCount(1) 201 | 202 | // send schedule message 203 | go func() { 204 | for { 205 | time.Sleep(time.Millisecond * 500) 206 | err := queue.SendScheduleMsg(time.Now().String(), time.Now().Add(time.Second*1)) 207 | if err != nil { 208 | panic(err) 209 | } 210 | } 211 | }() 212 | // start consume 213 | done := queue.StartConsume() 214 | <-done 215 | } 216 | 217 | func TestDelayQueue_Massive_Backlog(t *testing.T) { 218 | redisCli := redis.NewClient(&redis.Options{ 219 | Addr: "127.0.0.1:6379", 220 | }) 221 | redisCli.FlushDB(context.Background()) 222 | size := 20000 223 | retryCount := 3 224 | cb := func(s string) bool { 225 | return false 226 | } 227 | q := NewQueue("test", redisCli, cb). 228 | WithFetchInterval(time.Millisecond * 50). 229 | WithMaxConsumeDuration(0). 230 | WithLogger(log.New(os.Stderr, "[DelayQueue]", log.LstdFlags)). 231 | WithFetchLimit(0) 232 | 233 | for i := 0; i < size; i++ { 234 | err := q.SendDelayMsg(strconv.Itoa(i), 0, WithRetryCount(retryCount)) 235 | if err != nil { 236 | t.Error(err) 237 | } 238 | } 239 | err := q.pending2Ready() 240 | if err != nil { 241 | t.Error(err) 242 | return 243 | } 244 | // consume 245 | ids := make([]string, 0, q.fetchLimit) 246 | for { 247 | idStr, err := q.ready2Unack() 248 | if err == NilErr { // consumed all 249 | break 250 | } 251 | if err != nil { 252 | t.Error(err) 253 | return 254 | } 255 | ids = append(ids, idStr) 256 | if q.fetchLimit > 0 && len(ids) >= int(q.fetchLimit) { 257 | break 258 | } 259 | } 260 | err = q.unack2Retry() 261 | if err != nil { 262 | t.Error(err) 263 | return 264 | } 265 | unackCard, err := redisCli.ZCard(context.Background(), q.unAckKey).Result() 266 | if err != nil { 267 | t.Error(err) 268 | return 269 | } 270 | if unackCard != 0 { 271 | t.Error("unack card should be 0") 272 | return 273 | } 274 | retryLen, err := redisCli.LLen(context.Background(), q.retryKey).Result() 275 | if err != nil { 276 | t.Error(err) 277 | return 278 | } 279 | if int(retryLen) != size { 280 | t.Errorf("unack card should be %d", size) 281 | return 282 | } 283 | } 284 | 285 | // consume should stopped after actual fetch count hits fetch limit 286 | func TestDelayQueue_FetchLimit(t *testing.T) { 287 | redisCli := redis.NewClient(&redis.Options{ 288 | Addr: "127.0.0.1:6379", 289 | }) 290 | redisCli.FlushDB(context.Background()) 291 | fetchLimit := 10 292 | cb := func(s string) bool { 293 | return true 294 | } 295 | queue := NewQueue("test", redisCli, UseHashTagKey()). 296 | WithCallback(cb). 297 | WithFetchInterval(time.Millisecond * 50). 298 | WithMaxConsumeDuration(0). 299 | WithLogger(log.New(os.Stderr, "[DelayQueue]", log.LstdFlags)). 300 | WithFetchLimit(uint(fetchLimit)) 301 | 302 | for i := 0; i < fetchLimit; i++ { 303 | err := queue.SendDelayMsg(strconv.Itoa(i), 0, WithMsgTTL(time.Hour)) 304 | if err != nil { 305 | t.Error(err) 306 | } 307 | } 308 | // fetch but not consume 309 | ids1, err := queue.beforeConsume() 310 | if err != nil { 311 | t.Errorf("consume error: %v", err) 312 | return 313 | } 314 | // send new messages 315 | for i := 0; i < fetchLimit; i++ { 316 | err := queue.SendDelayMsg(strconv.Itoa(i), 0, WithMsgTTL(time.Hour)) 317 | if err != nil { 318 | t.Error(err) 319 | } 320 | } 321 | ids2, err := queue.beforeConsume() 322 | if err != nil { 323 | t.Errorf("consume error: %v", err) 324 | return 325 | } 326 | if len(ids2) > 0 { 327 | t.Error("should get 0 message, after hitting fetch limit") 328 | } 329 | 330 | // consume 331 | for _, id := range ids1 { 332 | queue.callback(id) 333 | } 334 | queue.afterConsume() 335 | 336 | // resume 337 | ids3, err := queue.beforeConsume() 338 | if err != nil { 339 | t.Errorf("consume error: %v", err) 340 | return 341 | } 342 | if len(ids3) == 0 { 343 | t.Error("should get some messages, after consumption") 344 | } 345 | } 346 | 347 | func TestDelayQueue_NackRedeliveryDelay(t *testing.T) { 348 | redisCli := redis.NewClient(&redis.Options{ 349 | Addr: "127.0.0.1:6379", 350 | }) 351 | redisCli.FlushDB(context.Background()) 352 | cb := func(s string) bool { 353 | return false 354 | } 355 | redeliveryDelay := time.Second 356 | queue := NewQueue("test", redisCli, UseHashTagKey()). 357 | WithCallback(cb). 358 | WithFetchInterval(time.Millisecond * 50). 359 | WithLogger(log.New(os.Stderr, "[DelayQueue]", log.LstdFlags)). 360 | WithDefaultRetryCount(3). 361 | WithNackRedeliveryDelay(redeliveryDelay) 362 | 363 | err := queue.SendScheduleMsg("foobar", time.Now().Add(-time.Minute)) 364 | if err != nil { 365 | t.Error(err) 366 | } 367 | // first consume, callback will failed 368 | ids, err := queue.beforeConsume() 369 | if err != nil { 370 | t.Errorf("consume error: %v", err) 371 | return 372 | } 373 | for _, id := range ids { 374 | queue.callback(id) 375 | } 376 | queue.afterConsume() 377 | 378 | // retry immediately 379 | ids, err = queue.beforeConsume() 380 | if err != nil { 381 | t.Errorf("consume error: %v", err) 382 | return 383 | } 384 | if len(ids) != 0 { 385 | t.Errorf("should not redeliver immediately") 386 | return 387 | } 388 | 389 | time.Sleep(redeliveryDelay) 390 | queue.afterConsume() 391 | ids, err = queue.beforeConsume() 392 | if err != nil { 393 | t.Errorf("consume error: %v", err) 394 | return 395 | } 396 | if len(ids) != 1 { 397 | t.Errorf("should not redeliver immediately") 398 | return 399 | } 400 | } 401 | 402 | func TestDelayQueue_TryIntercept(t *testing.T) { 403 | redisCli := redis.NewClient(&redis.Options{ 404 | Addr: "127.0.0.1:6379", 405 | }) 406 | redisCli.FlushDB(context.Background()) 407 | cb := func(s string) bool { 408 | return false 409 | } 410 | queue := NewQueue("test", redisCli, cb). 411 | WithDefaultRetryCount(3). 412 | WithNackRedeliveryDelay(time.Minute) 413 | 414 | // intercept pending message 415 | msg, err := queue.SendDelayMsgV2("foobar", time.Minute) 416 | if err != nil { 417 | t.Error(err) 418 | return 419 | } 420 | result, err := queue.TryIntercept(msg) 421 | if err != nil { 422 | t.Error(err) 423 | return 424 | } 425 | if !result.Intercepted { 426 | t.Error("expect intercepted") 427 | } 428 | 429 | // intercept ready message 430 | msg, err = queue.SendScheduleMsgV2("foobar2", time.Now().Add(-time.Minute)) 431 | if err != nil { 432 | t.Error(err) 433 | return 434 | } 435 | err = queue.pending2Ready() 436 | if err != nil { 437 | t.Error(err) 438 | return 439 | } 440 | result, err = queue.TryIntercept(msg) 441 | if err != nil { 442 | t.Error(err) 443 | return 444 | } 445 | if !result.Intercepted { 446 | t.Error("expect intercepted") 447 | } 448 | 449 | // prevent from retry 450 | msg, err = queue.SendScheduleMsgV2("foobar3", time.Now().Add(-time.Minute)) 451 | if err != nil { 452 | t.Error(err) 453 | return 454 | } 455 | ids, err := queue.beforeConsume() 456 | if err != nil { 457 | t.Errorf("consume error: %v", err) 458 | return 459 | } 460 | for _, id := range ids { 461 | queue.nack(id) 462 | } 463 | queue.afterConsume() 464 | result, err = queue.TryIntercept(msg) 465 | if err != nil { 466 | t.Error(err) 467 | return 468 | } 469 | if result.Intercepted { 470 | t.Error("expect not intercepted") 471 | return 472 | } 473 | ids, err = queue.beforeConsume() 474 | if err != nil { 475 | t.Errorf("consume error: %v", err) 476 | return 477 | } 478 | if len(ids) > 0 { 479 | t.Error("expect empty messages") 480 | } 481 | } 482 | 483 | func TestUseCustomPrefix(t *testing.T) { 484 | redisCli := redis.NewClient(&redis.Options{ 485 | Addr: "127.0.0.1:6379", 486 | }) 487 | cb := func(s string) bool { 488 | return false 489 | } 490 | prefix := "MYQUEUE" 491 | dp := NewQueue("test", redisCli, cb, UseCustomPrefix(prefix)) 492 | if !strings.HasPrefix(dp.pendingKey, prefix) { 493 | t.Error("wrong prefix") 494 | } 495 | if !strings.HasPrefix(dp.readyKey, prefix) { 496 | t.Error("wrong prefix") 497 | } 498 | if !strings.HasPrefix(dp.unAckKey, prefix) { 499 | t.Error("wrong prefix") 500 | } 501 | if !strings.HasPrefix(dp.retryKey, prefix) { 502 | t.Error("wrong prefix") 503 | } 504 | if !strings.HasPrefix(dp.retryCountKey, prefix) { 505 | t.Error("wrong prefix") 506 | } 507 | if !strings.HasPrefix(dp.garbageKey, prefix) { 508 | t.Error("wrong prefix") 509 | } 510 | } -------------------------------------------------------------------------------- /events.go: -------------------------------------------------------------------------------- 1 | package delayqueue 2 | 3 | import ( 4 | "errors" 5 | "strconv" 6 | "strings" 7 | ) 8 | 9 | const ( 10 | // NewMessageEvent emmited when send message 11 | NewMessageEvent = iota + 1 12 | // ReadyEvent emmited when messages has reached delivery time 13 | ReadyEvent 14 | // DeliveredEvent emmited when messages has been delivered to consumer 15 | DeliveredEvent 16 | // AckEvent emmited when receive message successfully consumed callback 17 | AckEvent 18 | // AckEvent emmited when receive message consumption failure callback 19 | NackEvent 20 | // RetryEvent emmited when message re-delivered to consumer 21 | RetryEvent 22 | // FinalFailedEvent emmited when message reaches max retry attempts 23 | FinalFailedEvent 24 | ) 25 | 26 | // Event contains internal event information during the queue operation and can be used to monitor the queue status. 27 | type Event struct { 28 | // Code represents event type, such as NewMessageEvent, ReadyEvent 29 | Code int 30 | // Timestamp is the event time 31 | Timestamp int64 32 | // MsgCount represents the number of messages related to the event 33 | MsgCount int 34 | } 35 | 36 | func encodeEvent(e *Event) string { 37 | return strconv.Itoa(e.Code) + 38 | " " + strconv.FormatInt(e.Timestamp, 10) + 39 | " " + strconv.Itoa(e.MsgCount) 40 | } 41 | 42 | func decodeEvent(payload string) (*Event, error) { 43 | items := strings.Split(payload, " ") 44 | if len(items) != 3 { 45 | return nil, errors.New("[decode event error! wrong item count, payload: " + payload) 46 | } 47 | code, err := strconv.Atoi(items[0]) 48 | if err != nil { 49 | return nil, errors.New("decode event error! wrong event code, payload: " + payload) 50 | } 51 | timestamp, err := strconv.ParseInt(items[1], 10, 64) 52 | if err != nil { 53 | return nil, errors.New("decode event error! wrong timestamp, payload: " + payload) 54 | } 55 | count, err := strconv.Atoi(items[2]) 56 | if err != nil { 57 | return nil, errors.New("decode event error! wrong msg count, payload: " + payload) 58 | } 59 | return &Event{ 60 | Code: code, 61 | Timestamp: timestamp, 62 | MsgCount: count, 63 | }, nil 64 | } 65 | -------------------------------------------------------------------------------- /example/getstarted/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/hdt3213/delayqueue" 5 | "github.com/redis/go-redis/v9" 6 | "strconv" 7 | "time" 8 | ) 9 | 10 | func main() { 11 | redisCli := redis.NewClient(&redis.Options{ 12 | Addr: "127.0.0.1:6379", 13 | }) 14 | queue := delayqueue.NewQueue("example", redisCli, func(payload string) bool { 15 | // callback returns true to confirm successful consumption. 16 | // If callback returns false or not return within maxConsumeDuration, DelayQueue will re-deliver this message 17 | println(payload) 18 | return true 19 | }).WithConcurrent(4) 20 | // send delay message 21 | for i := 0; i < 10; i++ { 22 | _, err := queue.SendDelayMsgV2(strconv.Itoa(i), time.Second, delayqueue.WithRetryCount(3)) 23 | if err != nil { 24 | panic(err) 25 | } 26 | } 27 | // send schedule message 28 | for i := 0; i < 10; i++ { 29 | _, err := queue.SendScheduleMsgV2(strconv.Itoa(i), time.Now().Add(time.Second)) 30 | if err != nil { 31 | panic(err) 32 | } 33 | } 34 | // start consume 35 | done := queue.StartConsume() 36 | <-done 37 | } 38 | -------------------------------------------------------------------------------- /example/monitor/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | "time" 7 | 8 | "github.com/hdt3213/delayqueue" 9 | "github.com/redis/go-redis/v9" 10 | ) 11 | 12 | type Metrics struct { 13 | ProduceCount int 14 | DeliverCount int 15 | ConsumeCount int 16 | RetryCount int 17 | FailCount int 18 | } 19 | 20 | type MyProfiler struct { 21 | List []*Metrics 22 | Start int64 23 | } 24 | 25 | func (p *MyProfiler) OnEvent(event *delayqueue.Event) { 26 | sinceUptime := event.Timestamp - p.Start 27 | upMinutes := sinceUptime / 60 28 | if len(p.List) <= int(upMinutes) { 29 | p.List = append(p.List, &Metrics{}) 30 | } 31 | current := p.List[upMinutes] 32 | switch event.Code { 33 | case delayqueue.NewMessageEvent: 34 | current.ProduceCount += event.MsgCount 35 | case delayqueue.DeliveredEvent: 36 | current.DeliverCount += event.MsgCount 37 | case delayqueue.AckEvent: 38 | current.ConsumeCount += event.MsgCount 39 | case delayqueue.RetryEvent: 40 | current.RetryCount += event.MsgCount 41 | case delayqueue.FinalFailedEvent: 42 | current.FailCount += event.MsgCount 43 | } 44 | } 45 | 46 | func main() { 47 | redisCli := redis.NewClient(&redis.Options{ 48 | Addr: "127.0.0.1:6379", 49 | }) 50 | queue := delayqueue.NewQueue("example", redisCli, func(payload string) bool { 51 | return true 52 | }) 53 | start := time.Now() 54 | queue.EnableReport() 55 | 56 | // setup monitor 57 | monitor := delayqueue.NewMonitor("example", redisCli) 58 | listener := &MyProfiler{ 59 | Start: start.Unix(), 60 | } 61 | monitor.ListenEvent(listener) 62 | 63 | // print metrics every minute 64 | tick := time.Tick(time.Minute) 65 | go func() { 66 | for range tick { 67 | minutes := len(listener.List)-1 68 | fmt.Printf("%d: %#v", minutes, listener.List[minutes]) 69 | } 70 | }() 71 | 72 | // start test 73 | for i := 0; i < 10; i++ { 74 | err := queue.SendDelayMsg(strconv.Itoa(i), 0, delayqueue.WithRetryCount(3)) 75 | if err != nil { 76 | panic(err) 77 | } 78 | } 79 | done := queue.StartConsume() 80 | <-done 81 | } 82 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/hdt3213/delayqueue 2 | 3 | go 1.16 4 | 5 | require ( 6 | github.com/google/uuid v1.3.0 7 | github.com/redis/go-redis/v9 v9.0.5 8 | ) 9 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/bsm/ginkgo/v2 v2.7.0 h1:ItPMPH90RbmZJt5GtkcNvIRuGEdwlBItdNVoyzaNQao= 2 | github.com/bsm/ginkgo/v2 v2.7.0/go.mod h1:AiKlXPm7ItEHNc/2+OkrNG4E0ITzojb9/xWzvQ9XZ9w= 3 | github.com/bsm/gomega v1.26.0 h1:LhQm+AFcgV2M0WyKroMASzAzCAJVpAxQXv4SaI9a69Y= 4 | github.com/bsm/gomega v1.26.0/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= 5 | github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= 6 | github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 7 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= 8 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= 9 | github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= 10 | github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 11 | github.com/redis/go-redis/v9 v9.0.5 h1:CuQcn5HIEeK7BgElubPP8CGtE0KakrnbBSTLjathl5o= 12 | github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk= 13 | -------------------------------------------------------------------------------- /monitor.go: -------------------------------------------------------------------------------- 1 | package delayqueue 2 | 3 | import ( 4 | "log" 5 | 6 | "github.com/redis/go-redis/v9" 7 | ) 8 | 9 | // Monitor can get running status and events of DelayQueue 10 | type Monitor struct { 11 | inner *DelayQueue 12 | } 13 | 14 | // NewMonitor0 creates a new Monitor by a RedisCli instance 15 | func NewMonitor0(name string, cli RedisCli, opts ...interface{}) *Monitor { 16 | return &Monitor{ 17 | inner: NewQueue0(name, cli, opts...), 18 | } 19 | } 20 | 21 | // NewMonitor creates a new Monitor by a *redis.Client 22 | func NewMonitor(name string, cli *redis.Client, opts ...interface{}) *Monitor { 23 | rc := &redisV9Wrapper{ 24 | inner: cli, 25 | } 26 | return NewMonitor0(name, rc, opts...) 27 | } 28 | 29 | // NewMonitor creates a new Monitor by a *redis.ClusterClient 30 | func NewMonitorOnCluster(name string, cli *redis.ClusterClient, opts ...interface{}) *Monitor { 31 | rc := &redisClusterWrapper{ 32 | inner: cli, 33 | } 34 | return NewMonitor0(name, rc, opts...) 35 | } 36 | 37 | // WithLogger customizes logger for queue 38 | func (m *Monitor) WithLogger(logger *log.Logger) *Monitor { 39 | m.inner.logger = logger 40 | return m 41 | } 42 | 43 | // GetPendingCount returns the number of messages which delivery time has not arrived 44 | func (m *Monitor) GetPendingCount() (int64, error) { 45 | return m.inner.GetPendingCount() 46 | } 47 | 48 | // GetReadyCount returns the number of messages which have arrived delivery time but but have not been delivered yet 49 | func (m *Monitor) GetReadyCount() (int64, error) { 50 | return m.inner.GetReadyCount() 51 | } 52 | 53 | // GetProcessingCount returns the number of messages which are being processed 54 | func (m *Monitor) GetProcessingCount() (int64, error) { 55 | return m.inner.GetProcessingCount() 56 | } 57 | 58 | // ListenEvent register a listener which will be called when events occured in this queue 59 | // so it can be used to monitor running status 60 | // returns: close function, error 61 | func (m *Monitor) ListenEvent(listener EventListener) (func(), error) { 62 | reportChan := genReportChannel(m.inner.name) 63 | sub, closer, err := m.inner.redisCli.Subscribe(reportChan) 64 | if err != nil { 65 | return nil, err 66 | } 67 | go func() { 68 | for payload := range sub { 69 | event, err := decodeEvent(payload) 70 | if err != nil { 71 | m.inner.logger.Printf("[listen event] %v\n", event) 72 | } else { 73 | listener.OnEvent(event) 74 | } 75 | } 76 | }() 77 | return closer, nil 78 | } 79 | 80 | -------------------------------------------------------------------------------- /monitor_test.go: -------------------------------------------------------------------------------- 1 | package delayqueue 2 | 3 | import ( 4 | "context" 5 | "log" 6 | "os" 7 | "strconv" 8 | "testing" 9 | 10 | "github.com/redis/go-redis/v9" 11 | ) 12 | 13 | func TestMonitor_GetStatus(t *testing.T) { 14 | redisCli := redis.NewClient(&redis.Options{ 15 | Addr: "127.0.0.1:6379", 16 | }) 17 | redisCli.FlushDB(context.Background()) 18 | size := 1000 19 | cb := func(s string) bool { 20 | return true 21 | } 22 | logger := log.New(os.Stderr, "[DelayQueue]", log.LstdFlags) 23 | queue := NewQueue("test", redisCli, cb) 24 | monitor := NewMonitor("test", redisCli).WithLogger(logger) 25 | 26 | for i := 0; i < size; i++ { 27 | err := queue.SendDelayMsg(strconv.Itoa(i), 0) 28 | if err != nil { 29 | t.Error(err) 30 | } 31 | } 32 | 33 | // test pengding count 34 | pending, err := monitor.GetPendingCount() 35 | if err != nil { 36 | t.Error(err) 37 | return 38 | } 39 | if int(pending) != size { 40 | t.Errorf("execting %d, got %d", size, int(pending)) 41 | return 42 | } 43 | 44 | // test ready count 45 | err = queue.pending2Ready() 46 | if err != nil { 47 | t.Errorf("consume error: %v", err) 48 | return 49 | } 50 | ready, err := monitor.GetReadyCount() 51 | if err != nil { 52 | t.Error(err) 53 | return 54 | } 55 | if int(ready) != size { 56 | t.Errorf("execting %d, got %d", int(pending), size) 57 | return 58 | } 59 | 60 | // test processing count 61 | for i := 0; i < size/2; i++ { 62 | _, _ = queue.ready2Unack() 63 | } 64 | processing, err := monitor.GetProcessingCount() 65 | if err != nil { 66 | t.Error(err) 67 | return 68 | } 69 | if int(processing) != size/2 { 70 | t.Errorf("execting %d, got %d", int(pending), size/2) 71 | return 72 | } 73 | } 74 | 75 | func TestMonitor_Cluster_GetStatus(t *testing.T) { 76 | redisCli := redis.NewClusterClient(&redis.ClusterOptions{ 77 | Addrs: []string{ 78 | "127.0.0.1:7000", 79 | "127.0.0.1:7001", 80 | "127.0.0.1:7002", 81 | }, 82 | }) 83 | redisCli.FlushDB(context.Background()) 84 | size := 1000 85 | cb := func(s string) bool { 86 | return true 87 | } 88 | logger := log.New(os.Stderr, "[DelayQueue]", log.LstdFlags) 89 | queue := NewQueueOnCluster("test", redisCli, cb) 90 | monitor := NewMonitorOnCluster("test", redisCli).WithLogger(logger) 91 | 92 | for i := 0; i < size; i++ { 93 | err := queue.SendDelayMsg(strconv.Itoa(i), 0) 94 | if err != nil { 95 | t.Error(err) 96 | } 97 | } 98 | 99 | // test pengding count 100 | _, err := monitor.GetPendingCount() 101 | if err != nil { 102 | t.Error(err) 103 | return 104 | } 105 | // if int(pending) != size { 106 | // t.Errorf("execting %d, got %d", int(pending), size) 107 | // return 108 | // } 109 | 110 | // test ready count 111 | err = queue.pending2Ready() 112 | if err != nil { 113 | t.Errorf("consume error: %v", err) 114 | return 115 | } 116 | _, err = monitor.GetReadyCount() 117 | if err != nil { 118 | t.Error(err) 119 | return 120 | } 121 | // if int(ready) != size { 122 | // t.Errorf("execting %d, got %d", int(pending), size) 123 | // return 124 | // } 125 | 126 | // test processing count 127 | for i := 0; i < size/2; i++ { 128 | _, _ = queue.ready2Unack() 129 | } 130 | _, err = monitor.GetProcessingCount() 131 | if err != nil { 132 | t.Error(err) 133 | return 134 | } 135 | // if int(processing) != size/2 { 136 | // t.Errorf("execting %d, got %d", int(pending), size/2) 137 | // return 138 | // } 139 | } 140 | 141 | type MyProfiler struct { 142 | ProduceCount int 143 | DeliverCount int 144 | ConsumeCount int 145 | RetryCount int 146 | FailCount int 147 | } 148 | 149 | func (p *MyProfiler) OnEvent(event *Event) { 150 | switch event.Code { 151 | case NewMessageEvent: 152 | p.ProduceCount += event.MsgCount 153 | case DeliveredEvent: 154 | p.DeliverCount += event.MsgCount 155 | case AckEvent: 156 | p.ConsumeCount += event.MsgCount 157 | case RetryEvent: 158 | p.RetryCount += event.MsgCount 159 | case FinalFailedEvent: 160 | p.FailCount += event.MsgCount 161 | } 162 | } 163 | 164 | func TestMonitor_listener1(t *testing.T) { 165 | redisCli := redis.NewClient(&redis.Options{ 166 | Addr: "127.0.0.1:6379", 167 | }) 168 | redisCli.FlushDB(context.Background()) 169 | size := 1000 170 | cb := func(s string) bool { 171 | return true 172 | } 173 | queue := NewQueue("test", redisCli, cb) 174 | queue.EnableReport() 175 | monitor := NewMonitor("test", redisCli) 176 | profile := &MyProfiler{} 177 | monitor.ListenEvent(profile) 178 | 179 | for i := 0; i < size; i++ { 180 | err := queue.SendDelayMsg(strconv.Itoa(i), 0) 181 | if err != nil { 182 | t.Error(err) 183 | } 184 | } 185 | ids, err := queue.beforeConsume() 186 | if err != nil { 187 | t.Errorf("consume error: %v", err) 188 | return 189 | } 190 | for _, id := range ids { 191 | queue.callback(id) 192 | } 193 | queue.afterConsume() 194 | 195 | if profile.ProduceCount != size { 196 | t.Error("wrong produce count") 197 | } 198 | if profile.DeliverCount != size { 199 | t.Error("wrong deliver count") 200 | } 201 | if profile.ConsumeCount != size { 202 | t.Error("wrong consume count") 203 | } 204 | } 205 | 206 | func TestMonitor_Cluster_listener1(t *testing.T) { 207 | redisCli := redis.NewClusterClient(&redis.ClusterOptions{ 208 | Addrs: []string{ 209 | "127.0.0.1:7000", 210 | "127.0.0.1:7001", 211 | "127.0.0.1:7002", 212 | }, 213 | }) 214 | redisCli.FlushDB(context.Background()) 215 | size := 1000 216 | cb := func(s string) bool { 217 | return true 218 | } 219 | queue := NewQueueOnCluster("test", redisCli, cb) 220 | queue.EnableReport() 221 | monitor := NewMonitorOnCluster("test", redisCli) 222 | profile := &MyProfiler{} 223 | monitor.ListenEvent(profile) 224 | 225 | for i := 0; i < size; i++ { 226 | err := queue.SendDelayMsg(strconv.Itoa(i), 0) 227 | if err != nil { 228 | t.Error(err) 229 | } 230 | } 231 | ids, err := queue.beforeConsume() 232 | if err != nil { 233 | t.Errorf("consume error: %v", err) 234 | return 235 | } 236 | for _, id := range ids { 237 | queue.callback(id) 238 | } 239 | queue.afterConsume() 240 | 241 | // if profile.ProduceCount != size { 242 | // t.Error("wrong produce count") 243 | // } 244 | // if profile.DeliverCount != size { 245 | // t.Error("wrong deliver count") 246 | // } 247 | // if profile.ConsumeCount != size { 248 | // t.Error("wrong consume count") 249 | // } 250 | } 251 | 252 | func TestMonitor_listener2(t *testing.T) { 253 | redisCli := redis.NewClient(&redis.Options{ 254 | Addr: "127.0.0.1:6379", 255 | }) 256 | redisCli.FlushDB(context.Background()) 257 | size := 1000 258 | cb := func(s string) bool { 259 | return false 260 | } 261 | queue := NewQueue("test", redisCli, cb).WithDefaultRetryCount(1) 262 | queue.EnableReport() 263 | monitor := NewMonitor("test", redisCli) 264 | profile := &MyProfiler{} 265 | monitor.ListenEvent(profile) 266 | 267 | for i := 0; i < size; i++ { 268 | err := queue.SendDelayMsg(strconv.Itoa(i), 0) 269 | if err != nil { 270 | t.Error(err) 271 | } 272 | } 273 | for i := 0; i < 3; i++ { 274 | ids, err := queue.beforeConsume() 275 | if err != nil { 276 | t.Errorf("consume error: %v", err) 277 | return 278 | } 279 | for _, id := range ids { 280 | queue.callback(id) 281 | } 282 | queue.afterConsume() 283 | } 284 | 285 | if profile.RetryCount != size { 286 | t.Error("wrong deliver count") 287 | } 288 | if profile.FailCount != size { 289 | t.Error("wrong consume count") 290 | } 291 | } 292 | -------------------------------------------------------------------------------- /publisher.go: -------------------------------------------------------------------------------- 1 | package delayqueue 2 | 3 | import ( 4 | "log" 5 | "time" 6 | 7 | "github.com/redis/go-redis/v9" 8 | ) 9 | 10 | // Publisher only publishes messages to delayqueue, it is a encapsulation of delayqueue 11 | type Publisher struct { 12 | inner *DelayQueue 13 | } 14 | 15 | // NewPublisher0 creates a new Publisher by a RedisCli instance 16 | func NewPublisher0(name string, cli RedisCli, opts ...interface{}) *Publisher { 17 | return &Publisher{ 18 | inner: NewQueue0(name, cli, opts...), 19 | } 20 | } 21 | 22 | // NewPublisher creates a new Publisher by a *redis.Client 23 | func NewPublisher(name string, cli *redis.Client, opts ...interface{}) *Publisher { 24 | rc := &redisV9Wrapper{ 25 | inner: cli, 26 | } 27 | return NewPublisher0(name, rc, opts...) 28 | } 29 | 30 | // WithLogger customizes logger for queue 31 | func (p *Publisher) WithLogger(logger *log.Logger) *Publisher { 32 | p.inner.logger = logger 33 | return p 34 | } 35 | 36 | // SendScheduleMsg submits a message delivered at given time 37 | func (p *Publisher) SendScheduleMsg(payload string, t time.Time, opts ...interface{}) error { 38 | return p.inner.SendScheduleMsg(payload, t, opts...) 39 | } 40 | 41 | // SendDelayMsg submits a message delivered after given duration 42 | func (p *Publisher) SendDelayMsg(payload string, duration time.Duration, opts ...interface{}) error { 43 | return p.inner.SendDelayMsg(payload, duration, opts...) 44 | } 45 | -------------------------------------------------------------------------------- /publisher_test.go: -------------------------------------------------------------------------------- 1 | package delayqueue 2 | 3 | import ( 4 | "context" 5 | "log" 6 | "os" 7 | "strconv" 8 | "testing" 9 | "time" 10 | 11 | "github.com/redis/go-redis/v9" 12 | ) 13 | 14 | func TestPublisher(t *testing.T) { 15 | redisCli := redis.NewClient(&redis.Options{ 16 | Addr: "127.0.0.1:6379", 17 | }) 18 | redisCli.FlushDB(context.Background()) 19 | size := 1000 20 | retryCount := 3 21 | deliveryCount := make(map[string]int) 22 | cb := func(s string) bool { 23 | deliveryCount[s]++ 24 | i, _ := strconv.ParseInt(s, 10, 64) 25 | return i%2 == 0 26 | } 27 | logger := log.New(os.Stderr, "[DelayQueue]", log.LstdFlags) 28 | queue := NewQueue("test", redisCli, cb).WithLogger(logger) 29 | publisher := NewPublisher("test", redisCli).WithLogger(logger) 30 | 31 | for i := 0; i < size; i++ { 32 | err := publisher.SendDelayMsg(strconv.Itoa(i), 0, WithRetryCount(retryCount), WithMsgTTL(time.Hour)) 33 | if err != nil { 34 | t.Error(err) 35 | } 36 | } 37 | for i := 0; i < 10*size; i++ { 38 | ids, err := queue.beforeConsume() 39 | if err != nil { 40 | t.Errorf("consume error: %v", err) 41 | return 42 | } 43 | for _, id := range ids { 44 | queue.callback(id) 45 | } 46 | queue.afterConsume() 47 | } 48 | for k, v := range deliveryCount { 49 | i, _ := strconv.ParseInt(k, 10, 64) 50 | if i%2 == 0 { 51 | if v != 1 { 52 | t.Errorf("expect 1 delivery, actual %d", v) 53 | } 54 | } else { 55 | if v != retryCount+1 { 56 | t.Errorf("expect %d delivery, actual %d", retryCount+1, v) 57 | } 58 | } 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /wrapper.go: -------------------------------------------------------------------------------- 1 | package delayqueue 2 | 3 | import ( 4 | "context" 5 | "github.com/redis/go-redis/v9" 6 | "time" 7 | ) 8 | 9 | 10 | // NewQueue creates a new queue, use DelayQueue.StartConsume to consume or DelayQueue.SendScheduleMsg to publish message 11 | // 12 | // queue := delayqueue.NewQueue("example", redisCli, func(payload string) bool { 13 | // // callback returns true to confirm successful consumption. 14 | // // If callback returns false or not return within maxConsumeDuration, DelayQueue will re-deliver this message 15 | // return true 16 | // }) 17 | // 18 | func NewQueue(name string, cli *redis.Client, opts ...interface{}) *DelayQueue { 19 | rc := &redisV9Wrapper{ 20 | inner: cli, 21 | } 22 | return NewQueue0(name, rc, opts...) 23 | } 24 | 25 | func wrapErr(err error) error { 26 | if err == redis.Nil { 27 | return NilErr 28 | } 29 | return err 30 | } 31 | 32 | type redisV9Wrapper struct { 33 | inner *redis.Client 34 | } 35 | 36 | func (r *redisV9Wrapper) Eval(script string, keys []string, args []interface{}) (interface{}, error) { 37 | ctx := context.Background() 38 | ret, err := r.inner.Eval(ctx, script, keys, args...).Result() 39 | return ret, wrapErr(err) 40 | } 41 | 42 | func (r *redisV9Wrapper) Set(key string, value string, expiration time.Duration) error { 43 | ctx := context.Background() 44 | return wrapErr(r.inner.Set(ctx, key, value, expiration).Err()) 45 | } 46 | 47 | func (r *redisV9Wrapper) LRem(key string, count int64, value string) (int64, error) { 48 | ctx := context.Background() 49 | count, err := r.inner.LRem(ctx, key, count, value).Result() 50 | if err != nil { 51 | return 0, wrapErr(err) 52 | } 53 | return count, nil 54 | } 55 | 56 | func (r *redisV9Wrapper) Get(key string) (string, error) { 57 | ctx := context.Background() 58 | ret, err := r.inner.Get(ctx, key).Result() 59 | return ret, wrapErr(err) 60 | } 61 | 62 | func (r *redisV9Wrapper) Del(keys []string) error { 63 | ctx := context.Background() 64 | return wrapErr(r.inner.Del(ctx, keys...).Err()) 65 | } 66 | 67 | func (r *redisV9Wrapper) HSet(key string, field string, value string) error { 68 | ctx := context.Background() 69 | return wrapErr(r.inner.HSet(ctx, key, field, value).Err()) 70 | } 71 | 72 | func (r *redisV9Wrapper) HDel(key string, fields []string) error { 73 | ctx := context.Background() 74 | return wrapErr(r.inner.HDel(ctx, key, fields...).Err()) 75 | } 76 | 77 | func (r *redisV9Wrapper) SMembers(key string) ([]string, error) { 78 | ctx := context.Background() 79 | ret, err := r.inner.SMembers(ctx, key).Result() 80 | return ret, wrapErr(err) 81 | } 82 | 83 | func (r *redisV9Wrapper) SRem(key string, members []string) error { 84 | ctx := context.Background() 85 | members2 := make([]interface{}, len(members)) 86 | for i, v := range members { 87 | members2[i] = v 88 | } 89 | return wrapErr(r.inner.SRem(ctx, key, members2...).Err()) 90 | } 91 | 92 | func (r *redisV9Wrapper) ZAdd(key string, values map[string]float64) error { 93 | ctx := context.Background() 94 | var zs []redis.Z 95 | for member, score := range values { 96 | zs = append(zs, redis.Z{ 97 | Score: score, 98 | Member: member, 99 | }) 100 | } 101 | return wrapErr(r.inner.ZAdd(ctx, key, zs...).Err()) 102 | } 103 | 104 | func (r *redisV9Wrapper) ZRem(key string, members []string) (int64, error) { 105 | ctx := context.Background() 106 | members2 := make([]interface{}, len(members)) 107 | for i, v := range members { 108 | members2[i] = v 109 | } 110 | removed, err := r.inner.ZRem(ctx, key, members2...).Result() 111 | if err != nil { 112 | return 0, wrapErr(err) 113 | } 114 | return removed, nil 115 | } 116 | 117 | func (r *redisV9Wrapper) ZCard(key string) (int64, error) { 118 | ctx := context.Background() 119 | return r.inner.ZCard(ctx, key).Result() 120 | } 121 | 122 | func (r *redisV9Wrapper) ZScore(key string, member string) (float64, error) { 123 | ctx := context.Background() 124 | v, err := r.inner.ZScore(ctx, key, member).Result() 125 | if err != nil { 126 | return 0, wrapErr(err) 127 | } 128 | return v, nil 129 | } 130 | 131 | func (r *redisV9Wrapper) LLen(key string) (int64, error) { 132 | ctx := context.Background() 133 | return r.inner.LLen(ctx, key).Result() 134 | } 135 | 136 | func (r *redisV9Wrapper) Publish(channel string, payload string) error { 137 | ctx := context.Background() 138 | return r.inner.Publish(ctx, channel, payload).Err() 139 | } 140 | 141 | func (r *redisV9Wrapper) Subscribe(channel string) (<-chan string, func(), error) { 142 | ctx := context.Background() 143 | sub := r.inner.Subscribe(ctx, channel) 144 | close := func() { 145 | _ = sub.Close() 146 | } 147 | resultChan := make(chan string) // sub.Channel() has its own buffer 148 | go func() { 149 | for msg := range sub.Channel() { 150 | resultChan <- msg.Payload 151 | } 152 | }() 153 | 154 | return resultChan, close, nil 155 | } 156 | 157 | func (r *redisV9Wrapper) EvalSha(sha1 string, keys []string, args []interface{}) (interface{}, error) { 158 | ctx := context.Background() 159 | ret, err := r.inner.EvalSha(ctx, sha1, keys, args...).Result() 160 | return ret, wrapErr(err) 161 | } 162 | 163 | func (r *redisV9Wrapper) ScriptLoad(script string) (string, error) { 164 | ctx := context.Background() 165 | sha1, err := r.inner.ScriptLoad(ctx, script).Result() 166 | return sha1, wrapErr(err) 167 | } 168 | 169 | type redisClusterWrapper struct { 170 | inner *redis.ClusterClient 171 | } 172 | 173 | func (r *redisClusterWrapper) Eval(script string, keys []string, args []interface{}) (interface{}, error) { 174 | ctx := context.Background() 175 | ret, err := r.inner.Eval(ctx, script, keys, args...).Result() 176 | return ret, wrapErr(err) 177 | } 178 | 179 | func (r *redisClusterWrapper) Set(key string, value string, expiration time.Duration) error { 180 | ctx := context.Background() 181 | return wrapErr(r.inner.Set(ctx, key, value, expiration).Err()) 182 | } 183 | 184 | func (r *redisClusterWrapper) Get(key string) (string, error) { 185 | ctx := context.Background() 186 | ret, err := r.inner.Get(ctx, key).Result() 187 | return ret, wrapErr(err) 188 | } 189 | 190 | func (r *redisClusterWrapper) Del(keys []string) error { 191 | ctx := context.Background() 192 | return wrapErr(r.inner.Del(ctx, keys...).Err()) 193 | } 194 | 195 | func (r *redisClusterWrapper) HSet(key string, field string, value string) error { 196 | ctx := context.Background() 197 | return wrapErr(r.inner.HSet(ctx, key, field, value).Err()) 198 | } 199 | 200 | func (r *redisClusterWrapper) HDel(key string, fields []string) error { 201 | ctx := context.Background() 202 | return wrapErr(r.inner.HDel(ctx, key, fields...).Err()) 203 | } 204 | 205 | func (r *redisClusterWrapper) SMembers(key string) ([]string, error) { 206 | ctx := context.Background() 207 | ret, err := r.inner.SMembers(ctx, key).Result() 208 | return ret, wrapErr(err) 209 | } 210 | 211 | func (r *redisClusterWrapper) SRem(key string, members []string) error { 212 | ctx := context.Background() 213 | members2 := make([]interface{}, len(members)) 214 | for i, v := range members { 215 | members2[i] = v 216 | } 217 | return wrapErr(r.inner.SRem(ctx, key, members2...).Err()) 218 | } 219 | 220 | func (r *redisClusterWrapper) ZAdd(key string, values map[string]float64) error { 221 | ctx := context.Background() 222 | var zs []redis.Z 223 | for member, score := range values { 224 | zs = append(zs, redis.Z{ 225 | Score: score, 226 | Member: member, 227 | }) 228 | } 229 | return wrapErr(r.inner.ZAdd(ctx, key, zs...).Err()) 230 | } 231 | 232 | func (r *redisClusterWrapper) ZRem(key string, members []string) (int64, error) { 233 | ctx := context.Background() 234 | members2 := make([]interface{}, len(members)) 235 | for i, v := range members { 236 | members2[i] = v 237 | } 238 | removed, err := r.inner.ZRem(ctx, key, members2...).Result() 239 | if err != nil { 240 | return 0, wrapErr(err) 241 | } 242 | return removed, nil 243 | } 244 | 245 | func (r *redisClusterWrapper) ZCard(key string) (int64, error) { 246 | ctx := context.Background() 247 | return r.inner.ZCard(ctx, key).Result() 248 | } 249 | 250 | func (r *redisClusterWrapper) ZScore(key string, member string) (float64, error) { 251 | ctx := context.Background() 252 | v, err := r.inner.ZScore(ctx, key, member).Result() 253 | if err != nil { 254 | return 0, wrapErr(err) 255 | } 256 | return v, nil 257 | } 258 | 259 | func (r *redisClusterWrapper) LLen(key string) (int64, error) { 260 | ctx := context.Background() 261 | return r.inner.LLen(ctx, key).Result() 262 | } 263 | 264 | func (r *redisClusterWrapper) LRem(key string, count int64, value string) (int64, error) { 265 | ctx := context.Background() 266 | count, err := r.inner.LRem(ctx, key, count, value).Result() 267 | if err != nil { 268 | return 0, wrapErr(err) 269 | } 270 | return count, nil 271 | } 272 | 273 | func (r *redisClusterWrapper) Publish(channel string, payload string) error { 274 | ctx := context.Background() 275 | return r.inner.Publish(ctx, channel, payload).Err() 276 | } 277 | 278 | func (r *redisClusterWrapper) Subscribe(channel string) (<-chan string, func(), error) { 279 | ctx := context.Background() 280 | sub := r.inner.Subscribe(ctx, channel) 281 | close := func() { 282 | _ = sub.Close() 283 | } 284 | resultChan := make(chan string) // sub.Channel() has its own buffer 285 | go func() { 286 | for msg := range sub.Channel() { 287 | resultChan <- msg.Payload 288 | } 289 | }() 290 | 291 | return resultChan, close, nil 292 | } 293 | 294 | func (r *redisClusterWrapper) EvalSha(sha1 string, keys []string, args []interface{}) (interface{}, error) { 295 | ctx := context.Background() 296 | ret, err := r.inner.EvalSha(ctx, sha1, keys, args...).Result() 297 | return ret, wrapErr(err) 298 | } 299 | 300 | func (r *redisClusterWrapper) ScriptLoad(script string) (string, error) { 301 | ctx := context.Background() 302 | sha1, err := r.inner.ScriptLoad(ctx, script).Result() 303 | return sha1, wrapErr(err) 304 | } 305 | 306 | func NewQueueOnCluster(name string, cli *redis.ClusterClient, opts ...interface{}) *DelayQueue { 307 | rc := &redisClusterWrapper{ 308 | inner: cli, 309 | } 310 | opts = append(opts, UseHashTagKey()) 311 | return NewQueue0(name, rc, opts...) 312 | } 313 | --------------------------------------------------------------------------------