├── .github └── workflows │ └── go.yml ├── .gitignore ├── LICENSE ├── README.md ├── go.mod ├── go.sum ├── queue.go └── queue_test.go /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | name: Go 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | jobs: 10 | 11 | build: 12 | runs-on: ubuntu-latest 13 | 14 | # Service containers to run with `runner-job` 15 | services: 16 | # Label used to access the service container 17 | redis: 18 | # Docker Hub image 19 | image: redis 20 | # 21 | ports: 22 | # Opens tcp port 6379 on the host and service container 23 | - 6379:6379 24 | 25 | steps: 26 | - uses: actions/checkout@v2 27 | 28 | - name: Set up Go 29 | uses: actions/setup-go@v2 30 | with: 31 | go-version: 1.17 32 | 33 | - name: Build 34 | run: go build -v ./... 35 | 36 | - name: Test 37 | run: go test -v -coverprofile=coverage.out ./... 38 | 39 | - name: Convert coverage to lcov 40 | uses: jandelgado/gcov2lcov-action@v1.0.5 41 | 42 | - name: Coveralls 43 | uses: coverallsapp/github-action@master 44 | with: 45 | path-to-lcov: coverage.lcov 46 | github-token: ${{ secrets.GITHUB_TOKEN }} 47 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 2 | *.o 3 | *.a 4 | *.so 5 | 6 | # Folders 7 | _obj 8 | _test 9 | 10 | # Architecture specific extensions/prefixes 11 | *.[568vq] 12 | [568vq].out 13 | 14 | *.cgo1.go 15 | *.cgo2.c 16 | _cgo_defun.c 17 | _cgo_gotypes.go 18 | _cgo_export.* 19 | 20 | _testmain.go 21 | 22 | *.exe 23 | *.test 24 | *.prof 25 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Kaveh Mousavi Zamani 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Queue 2 | ========= 3 | [![Go Lang](http://kavehmz.github.io/static/gopher/gopher-front.svg)](https://golang.org/) 4 | [![GoDoc](https://godoc.org/github.com/kavehmz/queue?status.svg)](https://godoc.org/github.com/kavehmz/queue) 5 | ![Build Status](https://github.com/kavehmz/queue/actions/workflows/go.yml/badge.svg?branch=master) 6 | [![Coverage Status](https://coveralls.io/repos/kavehmz/queue/badge.svg?branch=master&service=github)](https://coveralls.io/github/kavehmz/queue?branch=master) 7 | [![Go Report Card](https://goreportcard.com/badge/github.com/kavehmz/queue)](https://goreportcard.com/report/github.com/kavehmz/queue) 8 | [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/kavehmz/queue) 9 | 10 | A [Go](http://golang.org) library for managing queues on top of Redis. 11 | It is based on a hiring exercise but later I found it useful for myself in a custom task processing project. 12 | I thought it might be useful in general. 13 | 14 | 15 | ## Installation 16 | 17 | ```bash 18 | $ go get github.com/kavehmz/queue 19 | ``` 20 | 21 | # Usage 22 | 23 | ```go 24 | package main 25 | 26 | import ( 27 | "fmt" 28 | "time" 29 | 30 | "github.com/kavehmz/queue" 31 | ) 32 | 33 | func main() { 34 | var q queue.Queue 35 | q.Urls([]string{"redis://localhost:6379"}) 36 | q.AddTask(1, "start") 37 | q.AddTask(2, "start") 38 | q.AddTask(1, "stop") 39 | q.AddTask(2, "stop") 40 | analyzer := func(id int, task chan string, success chan bool) { 41 | for { 42 | select { 43 | case msg := <-task: 44 | fmt.Println(id, msg) 45 | if msg == "stop" { 46 | success <- true 47 | return 48 | } 49 | case <-time.After(2 * time.Second): 50 | fmt.Println("no new events for 2 seconds for ID", id) 51 | success <- false 52 | return 53 | } 54 | } 55 | } 56 | exitOnEmpty := func() bool { 57 | return true 58 | } 59 | q.AnalysePool(1, exitOnEmpty, analyzer) 60 | } 61 | ``` 62 | 63 | ## Approach 64 | 65 | Focus of this design is mainly horizontal scalability via concurrency, partitioning and fault-detection. 66 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/kavehmz/queue 2 | 3 | go 1.18 4 | 5 | require github.com/garyburd/redigo v1.6.2 6 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/garyburd/redigo v1.6.2 h1:yE/pwKCrbLpLpQICzYTeZ7JsTA/C53wFTJHaEtRqniM= 2 | github.com/garyburd/redigo v1.6.2/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= 3 | -------------------------------------------------------------------------------- /queue.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package queue is a simple Queue system written in Go that will uses Redis. 3 | Focus of this design is mainly horisontal scalability via concurrency, partitioning and fault-detection 4 | Queues can be partitions in to more than one Redis if necessary. 5 | 6 | Number of redis partitions is set by using Urls function and setting slice of Redis URL connections. 7 | Redis partitioning is required in cases that one redis cannot handle the load because of IO, moemory or in rare situations CPU limitations. 8 | 9 | In case of crash record of all incomplete tasks will be kepts in redis as keys with this format 10 | QUEUE::0::PENDING::ID 11 | ID will indicate the ID of failed tasks. 12 | 13 | To use this library you need to use queue struct. 14 | 15 | var q Queue 16 | q.Urls([]{redis://localhost:6379}) 17 | 18 | Adding tasks is done by calling AddTask. This function will accept an ID and the task itself that will be as a string. 19 | 20 | q.AddTask(1, "task1") 21 | q.AddTask(2, "task2") 22 | 23 | ID can be used in a special way. If ID of two tasks are the same while processing AnalysePool will send them to the same analyser goroutine if analyzer waits enough. 24 | 25 | q.AddTask(2, "start") 26 | q.AddTask(1, "load") 27 | q.AddTask(2, "load") 28 | q.AddTask(1, "stop") 29 | q.AddTask(2, "stop") 30 | 31 | This feature can be used in analyser to process a set of related tasks one after another. 32 | If you are adding ID related tasks and you need to spinup more than one AnalysePool to fetch and distribute tasks you need to insert the tasks into separate queue or separate redis servers. 33 | To have separate queues you can set Queues number in the queue strcuture. 34 | 35 | whichQueue=id % q.Queues 36 | 37 | AnalysePool accepts 3 parameters. One analyzerID that will identify which redis pool this AnalysePool will connect to. 38 | 39 | whichRedis=(analyzerID/q.Queues) % len(q.urls) 40 | 41 | AnalysePool need two closures, analyzer and exitOnEmpty. Format of those closure are as follows. 42 | 43 | analyzer := func(id int, task chan string, success chan bool) { 44 | for { 45 | select { 46 | case msg := <-task: 47 | //process the task 48 | if msg == "stop_indicator" { 49 | success <- true 50 | return 51 | } 52 | } 53 | } 54 | } 55 | exitOnEmpty := func() bool { 56 | return true 57 | } 58 | q.AnalysePool(1, exitOnEmpty, analyzer) 59 | */ 60 | package queue 61 | 62 | import ( 63 | "regexp" 64 | "runtime" 65 | "strconv" 66 | "time" 67 | 68 | "github.com/garyburd/redigo/redis" 69 | ) 70 | 71 | //Queue the strcuture that will ecompass the queue settings and methods. 72 | type Queue struct { 73 | // AnalyzeBuff will set number of concurrent running anlyzers. It will default to number of cpu if not set. 74 | AnalyzerBuff int 75 | // QueueName this will set the name used in udnerlying system for the queue. Default is "QUEUE" 76 | QueueName string 77 | // Number of queues in each redis server. This is useful if you have ID related tasks and you need more than one AnalysePool. Default is 1 78 | Queues int 79 | urls []string 80 | pool []redis.Conn 81 | } 82 | 83 | func (q *Queue) queues() int { 84 | if q.Queues != 0 { 85 | return q.Queues 86 | } 87 | return 1 88 | } 89 | 90 | func (q *Queue) pendingKeyName(id int) string { 91 | return q.queueName(id) + "::PENDING::" + strconv.Itoa(id) 92 | } 93 | 94 | func (q *Queue) redisID(id int) int { 95 | return (id / q.queues()) % len(q.urls) 96 | } 97 | 98 | func (q *Queue) queueName(id int) string { 99 | if q.QueueName != "" { 100 | return q.QueueName + "::" + strconv.Itoa(id%q.queues()) 101 | } 102 | return "QUEUE" + "::" + strconv.Itoa(id%q.queues()) 103 | } 104 | 105 | func (q *Queue) analyzerBuff() int { 106 | if q.AnalyzerBuff != 0 { 107 | return q.AnalyzerBuff 108 | } 109 | return runtime.NumCPU() 110 | } 111 | 112 | // Urls will accept a slice of redis connection URLS. This slice will setup the connections and also set how many redis partitions will be used. 113 | // Setting more than one redis is useful in some cases that a single redis can't handle a the queue load either because of IO and memory restrictions or if possible CPU. 114 | func (q *Queue) Urls(urls []string) { 115 | q.urls = q.urls[:0] 116 | q.pool = q.pool[:0] 117 | for _, v := range urls { 118 | c, e := redis.DialURL(v) 119 | checkErr(e) 120 | q.urls = append(q.urls, v) 121 | q.pool = append(q.pool, c) 122 | } 123 | } 124 | 125 | // AddTask will add a task to the queue. It will accept an ID and a string. 126 | // If more than one task are added with the same ID, queue will make sure they are send 127 | // to the same analyser as long as analyers does not return before next ID is poped from the queue. 128 | func (q *Queue) AddTask(id int, task string) { 129 | task = strconv.Itoa(id) + ";" + task 130 | _, e := q.pool[q.redisID(id)].Do("RPUSH", q.queueName(id), task) 131 | checkErr(e) 132 | } 133 | 134 | func (q *Queue) waitforSuccess(id int, success chan bool, pool map[int]chan string, next chan bool) { 135 | redisdb, _ := redis.DialURL(q.urls[q.redisID(id)]) 136 | redisdb.Do("SET", q.pendingKeyName(id), 1) 137 | r := <-success 138 | if r { 139 | delete(pool, id) 140 | redisdb.Do("DEL", q.pendingKeyName(id)) 141 | } 142 | <-next 143 | } 144 | 145 | func (q *Queue) removeTask(redisdb redis.Conn, queueName string) (int, string) { 146 | r, e := redisdb.Do("LPOP", queueName) 147 | checkErr(e) 148 | if r != nil { 149 | s, _ := redis.String(r, e) 150 | m := regexp.MustCompile(`(\d+);(.*)$`).FindStringSubmatch(s) 151 | id, _ := strconv.Atoi(m[1]) 152 | redisdb.Do("SET", q.pendingKeyName(id), 1) 153 | return id, m[2] 154 | } 155 | return 0, "" 156 | } 157 | 158 | /* 159 | AnalysePool can be calls to process redis queue(s). 160 | analyzerID will set which redis AnalysePool will connect to (redis:=pool[len(urls)%AnalysePool]) 161 | 162 | exitOnEmpty is a closure function which will control inner loop of AnalysePool when queue is empty. 163 | exitOnEmpty := func() bool { 164 | return true 165 | } 166 | analyzer is a closure function which will be called for processing the tasks popped from queue. 167 | analyzer := func(id int, task chan string, success chan bool) { 168 | for { 169 | select { 170 | case msg := <-task: 171 | if id == 2 { 172 | time.Sleep(20 * time.Millisecond) 173 | } 174 | fmt.Println(id, msg) 175 | if msg == "stop" { 176 | success <- true 177 | return 178 | } 179 | case <-time.After(2 * time.Second): 180 | fmt.Println("no new event for 2 seconds for ID", id) 181 | success <- false 182 | return 183 | } 184 | } 185 | } 186 | Analyser clousre must be able to accept the new Tasks without delay and if needed process them concurrently. Delay in accepting new Task will block AnalysePool. 187 | */ 188 | func (q *Queue) AnalysePool(analyzerID int, exitOnEmpty func() bool, analyzer func(int, chan string, chan bool)) { 189 | redisdb, _ := redis.DialURL(q.urls[q.redisID(analyzerID)]) 190 | 191 | next := make(chan bool, q.analyzerBuff()) 192 | pool := make(map[int]chan string) 193 | for { 194 | id, task := q.removeTask(redisdb, q.queueName(analyzerID)) 195 | if task == "" { 196 | if exitOnEmpty() { 197 | break 198 | } else { 199 | time.Sleep(100 * time.Millisecond) 200 | } 201 | } else { 202 | if pool[id] == nil { 203 | pool[id] = make(chan string) 204 | success := make(chan bool) 205 | go analyzer(id, pool[id], success) 206 | go q.waitforSuccess(id, success, pool, next) 207 | pool[id] <- task 208 | next <- true 209 | } else { 210 | pool[id] <- task 211 | } 212 | } 213 | } 214 | 215 | for i := 0; i < q.analyzerBuff(); i++ { 216 | next <- true 217 | } 218 | } 219 | 220 | func checkErr(e error) { 221 | if e != nil { 222 | panic(e) 223 | } 224 | } 225 | -------------------------------------------------------------------------------- /queue_test.go: -------------------------------------------------------------------------------- 1 | package queue 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | "time" 7 | 8 | "github.com/garyburd/redigo/redis" 9 | ) 10 | 11 | var testRedis = "redis://localhost:6379" 12 | 13 | func TestQueue_Urls(t *testing.T) { 14 | var q Queue 15 | q.Urls([]string{testRedis}) 16 | _, err := q.pool[0].Do("PING") 17 | if err != nil { 18 | t.Error("SetRedisPool items are not set correctly") 19 | } 20 | } 21 | 22 | func TestBadURL(t *testing.T) { 23 | paniced := false 24 | defer func() { 25 | e := recover() 26 | if e != nil { 27 | paniced = true 28 | } 29 | }() 30 | var q Queue 31 | q.Urls([]string{"redis://127.0.0.1:0"}) 32 | if !paniced { 33 | t.Error("Accepted a bad URL without panic") 34 | } 35 | } 36 | 37 | func TestQueue_AddTask(t *testing.T) { 38 | var q Queue 39 | q.Urls([]string{testRedis}) 40 | redisdb := q.pool[0] 41 | redisdb.Do("FLUSHALL") 42 | q.AddTask(4, "test") 43 | r, e := redisdb.Do("RPOP", "QUEUE::0") 44 | s, e := redis.String(r, e) 45 | if s != "4;test" { 46 | t.Error("Task is stored incorrectly: ", s) 47 | } 48 | } 49 | 50 | func TestQueue_QueueName(t *testing.T) { 51 | var q Queue 52 | if q.queueName(5) != "QUEUE::0" { 53 | t.Error("Queue name is wrong for id 5 and queues default: ", q.queueName(5)) 54 | } 55 | 56 | q.Queues = 1 57 | if q.queueName(5) != "QUEUE::0" { 58 | t.Error("Queue name is wrong for id 5 and queues 1: ", q.queueName(5)) 59 | } 60 | 61 | q.Queues = 2 62 | if q.queueName(5) != "QUEUE::1" { 63 | t.Error("Queue name is wrong for id 5 and queues 1: ", q.queueName(5)) 64 | } 65 | } 66 | 67 | func TestQueue_AnalysePool(t *testing.T) { 68 | var q Queue 69 | q.Urls([]string{testRedis}) 70 | redisdb := q.pool[0] 71 | redisdb.Do("FLUSHALL") 72 | q.QueueName = "CUSTOM" 73 | q.AddTask(1, "start") 74 | q.AddTask(2, "start") 75 | q.AddTask(1, "stop") 76 | q.AddTask(2, "stop") 77 | analyzer := func(id int, msg_channel chan string, success chan bool) { 78 | for { 79 | select { 80 | case msg := <-msg_channel: 81 | if msg == "stop" { 82 | success <- true 83 | return 84 | } 85 | } 86 | } 87 | } 88 | exitOnEmpty := func() bool { 89 | return true 90 | } 91 | q.AnalysePool(1, exitOnEmpty, analyzer) 92 | r, e := redisdb.Do("LLEN", "QUEUE::0") 93 | s, e := redis.Int64(r, e) 94 | if s != 0 { 95 | t.Error("Queue is not empty after processing tasks: ", s) 96 | } 97 | 98 | } 99 | 100 | func TestAnalysePoolFailurePending(t *testing.T) { 101 | var q Queue 102 | q.Urls([]string{testRedis}) 103 | redisdb := q.pool[0] 104 | redisdb.Do("FLUSHALL") 105 | q.AddTask(1, "start") 106 | q.AddTask(2, "start") 107 | q.AddTask(1, "stop") 108 | analyzer := func(id int, msg_channel chan string, success chan bool) { 109 | for { 110 | select { 111 | case msg := <-msg_channel: 112 | if msg == "stop" { 113 | success <- true 114 | return 115 | } 116 | case <-time.After(1 * time.Second): 117 | fmt.Println("no new event for 2 seconds for ID", id) 118 | success <- false 119 | return 120 | } 121 | } 122 | } 123 | exitOnEmpty := func() bool { 124 | return true 125 | } 126 | q.AnalysePool(1, exitOnEmpty, analyzer) 127 | r, e := redisdb.Do("GET", "QUEUE::0::PENDING::2") 128 | s, e := redis.Int(r, e) 129 | if s != 1 { 130 | t.Error("Task id 2 is not pending: ", s) 131 | } 132 | 133 | } 134 | 135 | func TestAnalysePoolCheckingWaiting(t *testing.T) { 136 | var q Queue 137 | q.AnalyzerBuff = 2 138 | q.Urls([]string{testRedis}) 139 | redisdb := q.pool[0] 140 | redisdb.Do("FLUSHALL") 141 | q.AddTask(1, "start") 142 | q.AddTask(2, "start") 143 | q.AddTask(1, "stop") 144 | analyzer := func(id int, msg_channel chan string, success chan bool) { 145 | for { 146 | select { 147 | case msg := <-msg_channel: 148 | if msg == "stop" { 149 | success <- true 150 | return 151 | } 152 | } 153 | } 154 | } 155 | exit := false 156 | exitOnEmpty := func() bool { 157 | return exit 158 | } 159 | go q.AnalysePool(1, exitOnEmpty, analyzer) 160 | time.Sleep(100 * time.Millisecond) 161 | r, e := redisdb.Do("GET", "QUEUE::0::PENDING::2") 162 | s, e := redis.Int(r, e) 163 | if s != 1 { 164 | t.Error("Task id 2 is not pending after queue is empty: ", s) 165 | } 166 | q.AddTask(2, "stop") 167 | time.Sleep(200 * time.Millisecond) 168 | r, e = redisdb.Do("GET", "QUEUE::PENDING::2") 169 | s, e = redis.Int(r, e) 170 | if s != 0 { 171 | t.Error("Task 2 did not clear: ", s) 172 | } 173 | exit = true 174 | time.Sleep(200 * time.Millisecond) 175 | } 176 | 177 | func BenchmarkQueue_AddTask(b *testing.B) { 178 | var q Queue 179 | q.Urls([]string{testRedis}) 180 | q.pool[0].Do("FLUSHALL") 181 | b.ResetTimer() 182 | for i := 0; i < b.N; i++ { 183 | q.AddTask(i, "stop") 184 | } 185 | } 186 | 187 | func BenchmarkRemoveTask(b *testing.B) { 188 | var q Queue 189 | q.Urls([]string{testRedis}) 190 | b.ResetTimer() 191 | for i := 0; i < b.N; i++ { 192 | _, s := q.removeTask(q.pool[0], q.queueName(1)) 193 | if s == "" { 194 | panic("Reached an empty queue. Benchmark is not valid:") 195 | } 196 | } 197 | } 198 | 199 | // This will act both as test and example in documentation 200 | func ExampleQueue_AnalysePool() { 201 | var q Queue 202 | q.Urls([]string{testRedis}) 203 | q.pool[0].Do("FLUSHALL") 204 | q.AddTask(1, "start") 205 | q.AddTask(2, "start") 206 | q.AddTask(1, "stop") 207 | q.AddTask(2, "stop") 208 | analyzer := func(id int, msg_channel chan string, success chan bool) { 209 | for { 210 | select { 211 | case msg := <-msg_channel: 212 | if id == 2 { 213 | time.Sleep(20 * time.Millisecond) 214 | } 215 | fmt.Println(id, msg) 216 | if msg == "stop" { 217 | success <- true 218 | return 219 | } 220 | case <-time.After(2 * time.Second): 221 | fmt.Println("no new event for 2 seconds for ID", id) 222 | success <- false 223 | return 224 | } 225 | } 226 | } 227 | exitOnEmpty := func() bool { 228 | return true 229 | } 230 | q.AnalysePool(1, exitOnEmpty, analyzer) 231 | // Output: 232 | // 1 start 233 | // 1 stop 234 | // 2 start 235 | // 2 stop 236 | 237 | } 238 | --------------------------------------------------------------------------------