├── .github └── workflows │ ├── bench.yml │ ├── cloc.yml │ ├── gorelease.yml │ └── test.yml ├── .gitignore ├── LICENSE ├── README.md ├── batch_consumer.go ├── cleaner.go ├── cleaner_test.go ├── connection.go ├── consumer.go ├── deliveries.go ├── delivery.go ├── errors.go ├── example ├── batch_consumer │ └── main.go ├── cleaner │ └── main.go ├── consumer │ └── main.go ├── handler │ └── main.go ├── producer │ └── main.go ├── purger │ └── main.go └── returner │ └── main.go ├── go.mod ├── go.sum ├── header.go ├── header_test.go ├── queue.go ├── queue_cluster_test.go ├── queue_test.go ├── rand.go ├── redis_client.go ├── redis_keys.go ├── redis_wrapper.go ├── state.go ├── state_string.go ├── stats.go ├── stats_test.go ├── test_batch_consumer.go ├── test_connection.go ├── test_connection_test.go ├── test_consumer.go ├── test_delivery.go ├── test_delivery_test.go ├── test_queue.go ├── test_redis_client.go ├── test_redis_client_test.go ├── test_util.go └── testdata └── create-cluster.sh /.github/workflows/bench.yml: -------------------------------------------------------------------------------- 1 | name: bench 2 | on: 3 | pull_request: 4 | workflow_dispatch: 5 | inputs: 6 | old: 7 | description: 'Old Ref' 8 | required: false 9 | default: 'master' 10 | new: 11 | description: 'New Ref' 12 | required: true 13 | 14 | # Cancel the workflow in progress in newer build is about to start. 15 | concurrency: 16 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 17 | cancel-in-progress: true 18 | 19 | env: 20 | GO111MODULE: "on" 21 | CACHE_BENCHMARK: "off" # Enables benchmark result reuse between runs, may skew latency results. 22 | RUN_BASE_BENCHMARK: "on" # Runs benchmark for PR base in case benchmark result is missing. 23 | GO_VERSION: 1.19.x 24 | BENCH_COUNT: 6 25 | jobs: 26 | bench: 27 | runs-on: ubuntu-latest 28 | steps: 29 | - name: Install Go stable 30 | if: env.GO_VERSION != 'tip' 31 | uses: actions/setup-go@v3 32 | with: 33 | go-version: ${{ env.GO_VERSION }} 34 | 35 | - name: Install Go tip 36 | if: env.GO_VERSION == 'tip' 37 | run: | 38 | curl -sL https://storage.googleapis.com/go-build-snap/go/linux-amd64/$(git ls-remote https://github.com/golang/go.git HEAD | awk '{print $1;}').tar.gz -o gotip.tar.gz 39 | ls -lah gotip.tar.gz 40 | mkdir -p ~/sdk/gotip 41 | tar -C ~/sdk/gotip -xzf gotip.tar.gz 42 | ~/sdk/gotip/bin/go version 43 | echo "PATH=$HOME/go/bin:$HOME/sdk/gotip/bin/:$PATH" >> $GITHUB_ENV 44 | 45 | - name: Checkout code 46 | uses: actions/checkout@v2 47 | with: 48 | ref: ${{ (github.event.inputs.new != '') && github.event.inputs.new || github.event.ref }} 49 | 50 | - name: Go cache 51 | uses: actions/cache@v3 52 | with: 53 | # In order: 54 | # * Module download cache 55 | # * Build cache (Linux) 56 | path: | 57 | ~/go/pkg/mod 58 | ~/.cache/go-build 59 | key: ${{ runner.os }}-go-cache-${{ hashFiles('**/go.sum') }} 60 | restore-keys: | 61 | ${{ runner.os }}-go-cache 62 | 63 | - name: Restore benchstat 64 | uses: actions/cache@v3 65 | with: 66 | path: ~/go/bin/benchstat 67 | key: ${{ runner.os }}-benchstat-legacy 68 | 69 | - name: Restore base benchmark result 70 | id: base-benchmark 71 | if: env.CACHE_BENCHMARK == 'on' 72 | uses: actions/cache@v3 73 | with: 74 | path: | 75 | bench-master.txt 76 | # Use base sha for PR or new commit hash for master/main push in benchmark result key. 77 | key: ${{ runner.os }}-bench-${{ (github.event.pull_request.base.sha != github.event.after) && github.event.pull_request.base.sha || github.event.after }} 78 | 79 | - name: Run benchmark 80 | run: | 81 | test -s ~/go/bin/benchstat || GOFLAGS= GOBIN=~/go/bin go install golang.org/x/perf/cmd/benchstat@a1b99499bab64a73929f3ed7c2103c28d30e9ac2 82 | export REF_NAME=new 83 | go test -bench=. -benchmem -count=${BENCH_COUNT} -run=^a ./... | tee bench-${REF_NAME}.txt 84 | 85 | - name: Run benchmark for base code 86 | if: env.RUN_BASE_BENCHMARK == 'on' && steps.base-benchmark.outputs.cache-hit != 'true' && (github.event.pull_request.base.sha != '' || github.event.inputs.old != '') 87 | run: | 88 | git fetch origin master ${{ github.event.pull_request.base.sha }} 89 | HEAD=$(git rev-parse HEAD) 90 | git reset --hard ${{ github.event.pull_request.base.sha }} 91 | export REF_NAME=master 92 | go test -bench=. -benchmem -count=$BENCH_COUNT -run=^a ./... | tee bench-$REF_NAME.txt 93 | ~/go/bin/benchstat bench-$REF_NAME.txt 94 | git reset --hard $HEAD 95 | 96 | - name: Benchmark stats 97 | id: bench 98 | run: | 99 | export REF_NAME=new 100 | OUTPUT=$(test -e bench-master.txt && ~/go/bin/benchstat bench-master.txt bench-$REF_NAME.txt || echo "Missing base report.") 101 | echo "${OUTPUT}" 102 | echo "diff<> $GITHUB_OUTPUT && echo "$OUTPUT" >> $GITHUB_OUTPUT && echo "EOF" >> $GITHUB_OUTPUT 103 | OUTPUT=$(~/go/bin/benchstat bench-$REF_NAME.txt) 104 | echo "${OUTPUT}" 105 | echo "result<> $GITHUB_OUTPUT && echo "$OUTPUT" >> $GITHUB_OUTPUT && echo "EOF" >> $GITHUB_OUTPUT 106 | 107 | - name: Comment benchmark result 108 | continue-on-error: true 109 | uses: marocchino/sticky-pull-request-comment@v2 110 | with: 111 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 112 | header: bench 113 | message: | 114 | ### Benchmark Result 115 |
Benchmark diff with base branch 116 | 117 | ``` 118 | ${{ steps.bench.outputs.diff }} 119 | ``` 120 |
121 | 122 |
Benchmark result 123 | 124 | ``` 125 | ${{ steps.bench.outputs.result }} 126 | ``` 127 |
128 | -------------------------------------------------------------------------------- /.github/workflows/cloc.yml: -------------------------------------------------------------------------------- 1 | name: cloc 2 | on: 3 | pull_request: 4 | 5 | # Cancel the workflow in progress in newer build is about to start. 6 | concurrency: 7 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 8 | cancel-in-progress: true 9 | 10 | jobs: 11 | cloc: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Checkout code 15 | uses: actions/checkout@v2 16 | with: 17 | path: pr 18 | - name: Checkout base code 19 | uses: actions/checkout@v2 20 | with: 21 | ref: ${{ github.event.pull_request.base.sha }} 22 | path: base 23 | - name: Count lines of code 24 | id: loc 25 | run: | 26 | curl -sLO https://github.com/vearutop/sccdiff/releases/download/v1.0.3/linux_amd64.tar.gz && tar xf linux_amd64.tar.gz 27 | sccdiff_hash=$(git hash-object ./sccdiff) 28 | [ "$sccdiff_hash" == "ae8a07b687bd3dba60861584efe724351aa7ff63" ] || (echo "::error::unexpected hash for sccdiff, possible tampering: $sccdiff_hash" && exit 1) 29 | OUTPUT=$(cd pr && ../sccdiff -basedir ../base) 30 | echo "${OUTPUT}" 31 | echo "diff<> $GITHUB_OUTPUT && echo "$OUTPUT" >> $GITHUB_OUTPUT && echo "EOF" >> $GITHUB_OUTPUT 32 | 33 | - name: Comment lines of code 34 | continue-on-error: true 35 | uses: marocchino/sticky-pull-request-comment@v2 36 | with: 37 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 38 | header: LOC 39 | message: | 40 | ### Lines Of Code 41 | 42 | ${{ steps.loc.outputs.diff }} 43 | -------------------------------------------------------------------------------- /.github/workflows/gorelease.yml: -------------------------------------------------------------------------------- 1 | name: gorelease 2 | on: 3 | pull_request: 4 | 5 | # Cancel the workflow in progress in newer build is about to start. 6 | concurrency: 7 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 8 | cancel-in-progress: true 9 | 10 | env: 11 | GO_VERSION: stable 12 | jobs: 13 | gorelease: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Install Go stable 17 | if: env.GO_VERSION != 'tip' 18 | uses: actions/setup-go@v3 19 | with: 20 | go-version: ${{ env.GO_VERSION }} 21 | - name: Install Go tip 22 | if: env.GO_VERSION == 'tip' 23 | run: | 24 | curl -sL https://storage.googleapis.com/go-build-snap/go/linux-amd64/$(git ls-remote https://github.com/golang/go.git HEAD | awk '{print $1;}').tar.gz -o gotip.tar.gz 25 | ls -lah gotip.tar.gz 26 | mkdir -p ~/sdk/gotip 27 | tar -C ~/sdk/gotip -xzf gotip.tar.gz 28 | ~/sdk/gotip/bin/go version 29 | echo "PATH=$HOME/go/bin:$HOME/sdk/gotip/bin/:$PATH" >> $GITHUB_ENV 30 | - name: Checkout code 31 | uses: actions/checkout@v2 32 | - name: Gorelease cache 33 | uses: actions/cache@v3 34 | with: 35 | path: | 36 | ~/go/bin/gorelease 37 | key: ${{ runner.os }}-gorelease-generic 38 | - name: Gorelease 39 | id: gorelease 40 | run: | 41 | test -e ~/go/bin/gorelease || go install golang.org/x/exp/cmd/gorelease@latest 42 | OUTPUT=$(gorelease 2>&1 || exit 0) 43 | echo "${OUTPUT}" 44 | echo "report<> $GITHUB_OUTPUT && echo "$OUTPUT" >> $GITHUB_OUTPUT && echo "EOF" >> $GITHUB_OUTPUT 45 | - name: Comment report 46 | continue-on-error: true 47 | uses: marocchino/sticky-pull-request-comment@v2 48 | with: 49 | header: gorelease 50 | message: | 51 | ### Go API Changes 52 | 53 |
54 |             ${{ steps.gorelease.outputs.report }}
55 |             
56 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: test 2 | on: 3 | push: 4 | branches: 5 | - master 6 | - main 7 | pull_request: 8 | 9 | # Cancel the workflow in progress in newer build is about to start. 10 | concurrency: 11 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 12 | cancel-in-progress: true 13 | 14 | env: 15 | GO111MODULE: "on" 16 | RUN_BASE_COVERAGE: "on" # Runs test for PR base in case base test coverage is missing. 17 | COV_GO_VERSION: 1.19.x # Version of Go to collect coverage 18 | TARGET_DELTA_COV: 90 # Target coverage of changed lines, in percents 19 | REDIS_ADDR: "localhost:6379" 20 | REDIS_CLUSTER_ADDR: "localhost:30001,localhost:30002,localhost:30003,localhost:30004,localhost:30005,localhost:30006" 21 | jobs: 22 | test: 23 | strategy: 24 | matrix: 25 | go-version: [ 1.14.x, 1.15.x, 1.16.x, 1.17.x, 1.18.x, 1.19.x ] 26 | runs-on: ubuntu-latest 27 | 28 | steps: 29 | - name: Install Go 30 | if: matrix.go-version != 'tip' 31 | uses: actions/setup-go@v3 32 | with: 33 | go-version: ${{ matrix.go-version }} 34 | 35 | - name: Checkout code 36 | uses: actions/checkout@v2 37 | 38 | - name: Go cache 39 | uses: actions/cache@v3 40 | with: 41 | # In order: 42 | # * Module download cache 43 | # * Build cache (Linux) 44 | path: | 45 | ~/go/pkg/mod 46 | ~/.cache/go-build 47 | key: ${{ runner.os }}-go-cache-${{ hashFiles('**/go.sum') }} 48 | restore-keys: | 49 | ${{ runner.os }}-go-cache 50 | 51 | - name: Restore base test coverage 52 | id: base-coverage 53 | if: matrix.go-version == env.COV_GO_VERSION && github.event.pull_request.base.sha != '' 54 | uses: actions/cache@v3 55 | with: 56 | path: | 57 | unit-base.txt 58 | # Use base sha for PR or new commit hash for master/main push in test result key. 59 | key: ${{ runner.os }}-unit-test-coverage-${{ (github.event.pull_request.base.sha != github.event.after) && github.event.pull_request.base.sha || github.event.after }} 60 | 61 | - name: Prepare Redis 62 | run: | 63 | sudo apt-get update && sudo apt-get install -y lsb-release curl gpg 64 | curl -fsSL https://packages.redis.io/gpg | sudo gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg 65 | echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/redis.list 66 | sudo apt-get update && sudo apt-get install -y redis 67 | ./testdata/create-cluster.sh start 68 | yes yes | ./testdata/create-cluster.sh create 69 | sleep 5 70 | 71 | - name: Run test for base code 72 | if: matrix.go-version == env.COV_GO_VERSION && env.RUN_BASE_COVERAGE == 'on' && steps.base-coverage.outputs.cache-hit != 'true' && github.event.pull_request.base.sha != '' 73 | run: | 74 | git fetch origin master ${{ github.event.pull_request.base.sha }} 75 | HEAD=$(git rev-parse HEAD) 76 | git reset --hard ${{ github.event.pull_request.base.sha }} 77 | go test -coverprofile=unit.coverprofile -covermode=atomic ./... && go tool cover -func=./unit.coverprofile > unit-base.txt 78 | git reset --hard $HEAD 79 | 80 | - name: Test 81 | id: test 82 | run: | 83 | go test -race -coverprofile=unit.coverprofile -covermode=atomic ./... 84 | go tool cover -func=./unit.coverprofile > unit.txt 85 | TOTAL=$(grep 'total:' unit.txt) 86 | echo "${TOTAL}" 87 | echo "total=$TOTAL" >> $GITHUB_OUTPUT 88 | 89 | - name: Annotate missing test coverage 90 | id: annotate 91 | if: matrix.go-version == env.COV_GO_VERSION && github.event.pull_request.base.sha != '' 92 | run: | 93 | curl -sLO https://github.com/vearutop/gocovdiff/releases/download/v1.3.6/linux_amd64.tar.gz && tar xf linux_amd64.tar.gz 94 | gocovdiff_hash=$(git hash-object ./gocovdiff) 95 | [ "$gocovdiff_hash" == "8e507e0d671d4d6dfb3612309b72b163492f28eb" ] || (echo "::error::unexpected hash for gocovdiff, possible tampering: $gocovdiff_hash" && exit 1) 96 | git fetch origin master ${{ github.event.pull_request.base.sha }} 97 | REP=$(./gocovdiff -cov unit.coverprofile -gha-annotations gha-unit.txt -delta-cov-file delta-cov-unit.txt -target-delta-cov ${TARGET_DELTA_COV}) 98 | echo "${REP}" 99 | cat gha-unit.txt 100 | DIFF=$(test -e unit-base.txt && ./gocovdiff -func-cov unit.txt -func-base-cov unit-base.txt || echo "Missing base coverage file.") 101 | TOTAL=$(cat delta-cov-unit.txt) 102 | echo "rep<> $GITHUB_OUTPUT && echo "$REP" >> $GITHUB_OUTPUT && echo "EOF" >> $GITHUB_OUTPUT 103 | echo "diff<> $GITHUB_OUTPUT && echo "$DIFF" >> $GITHUB_OUTPUT && echo "EOF" >> $GITHUB_OUTPUT 104 | echo "total<> $GITHUB_OUTPUT && echo "$TOTAL" >> $GITHUB_OUTPUT && echo "EOF" >> $GITHUB_OUTPUT 105 | 106 | - name: Comment test coverage 107 | continue-on-error: true 108 | if: matrix.go-version == env.COV_GO_VERSION && github.event.pull_request.base.sha != '' 109 | uses: marocchino/sticky-pull-request-comment@v2 110 | with: 111 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 112 | header: unit-test 113 | message: | 114 | ### Unit Test Coverage 115 | ${{ steps.test.outputs.total }} 116 | ${{ steps.annotate.outputs.total }} 117 |
Coverage of changed lines 118 | 119 | ${{ steps.annotate.outputs.rep }} 120 | 121 |
122 | 123 |
Coverage diff with base branch 124 | 125 | ${{ steps.annotate.outputs.diff }} 126 | 127 |
128 | 129 | - name: Store base coverage 130 | if: ${{ github.ref == 'refs/heads/master' || github.ref == 'refs/heads/main' }} 131 | run: cp unit.txt unit-base.txt 132 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 2 | *.o 3 | *.a 4 | *.so 5 | 6 | # Folders 7 | _obj 8 | _test 9 | /vendor 10 | 11 | # Architecture specific extensions/prefixes 12 | *.[568vq] 13 | [568vq].out 14 | 15 | *.cgo1.go 16 | *.cgo2.c 17 | _cgo_defun.c 18 | _cgo_gotypes.go 19 | _cgo_export.* 20 | 21 | _testmain.go 22 | 23 | *.exe 24 | *.test 25 | *.prof 26 | 27 | .idea -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 adjust 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://github.com/adjust/rmq/workflows/test/badge.svg)](https://github.com/adjust/rmq/actions?query=branch%3Amaster+workflow%3Atest) 2 | [![GoDoc](https://pkg.go.dev/badge/github.com/adjust/rmq)](https://pkg.go.dev/github.com/adjust/rmq) 3 | 4 | ## Overview 5 | 6 | rmq is short for Redis message queue. It's a message queue system written in Go 7 | and backed by Redis. 8 | 9 | ## Basic Usage 10 | 11 | Let's take a look at how to use rmq. 12 | 13 | ### Import 14 | 15 | Of course you need to import rmq wherever you want to use it. 16 | 17 | ```go 18 | import "github.com/adjust/rmq/v5" 19 | ``` 20 | 21 | ### Connection 22 | 23 | Before we get to queues, we first need to establish a connection. Each rmq 24 | connection has a name (used in statistics) and Redis connection details 25 | including which Redis database to use. The most basic Redis connection uses a 26 | TCP connection to a given host and a port: 27 | 28 | ```go 29 | connection, err := rmq.OpenConnection("my service", "tcp", "localhost:6379", 1, errChan) 30 | ``` 31 | 32 | It's also possible to access a Redis listening on a Unix socket: 33 | 34 | ```go 35 | connection, err := rmq.OpenConnection("my service", "unix", "/tmp/redis.sock", 1, errChan) 36 | ``` 37 | 38 | For more flexible setup you can pass Redis options or create your own Redis client: 39 | 40 | ```go 41 | connection, err := OpenConnectionWithRedisOptions("my service", redisOptions, errChan) 42 | ``` 43 | 44 | ```go 45 | connection, err := OpenConnectionWithRedisClient("my service", redisClient, errChan) 46 | ``` 47 | 48 | If the Redis instance can't be reached you will receive an error indicating this. 49 | 50 | Please also note the `errChan` parameter. There is some rmq logic running in 51 | the background which can run into Redis errors. If you pass an error channel to 52 | the `OpenConnection()` functions rmq will send those background errors to this 53 | channel so you can handle them asynchronously. For more details about this and 54 | handling suggestions see the section about handling background errors below. 55 | 56 | #### Connecting to a Redis cluster 57 | 58 | In order to connect to a Redis cluster please use `OpenClusterConnection()`: 59 | 60 | ```go 61 | redisClusterOptions := &redis.ClusterOptions{ /* ... */ } 62 | redisClusterClient := redis.NewClusterClient(redisClusterOptions) 63 | connection, err := OpenClusterConnection("my service", redisClusterClient, errChan) 64 | ``` 65 | 66 | Note that such an rmq cluster connection uses different Redis than rmq connections 67 | opened by `OpenConnection()` or similar. If you have used a Redis instance 68 | with `OpenConnection()` then it is NOT SAFE to reuse that rmq system by connecting 69 | to it via `OpenClusterConnection()`. The cluster state won't be compatible and 70 | this will likely lead to data loss. 71 | 72 | If you've previously used `OpenConnection()` or similar you should only consider 73 | using `OpenClusterConnection()` with a fresh Redis cluster. 74 | 75 | ### Queues 76 | 77 | Once we have a connection we can use it to finally access queues. Each queue 78 | must have a unique name by which we address it. Queues are created once they 79 | are accessed. There is no need to declare them in advance. Here we open a queue 80 | named "tasks": 81 | 82 | ```go 83 | taskQueue, err := connection.OpenQueue("tasks") 84 | ``` 85 | 86 | Again, possibly Redis errors might be returned. 87 | 88 | ### Producers 89 | 90 | An empty queue is boring, let's add some deliveries! Internally all deliveries 91 | are saved to Redis lists as strings. This is how you can publish a string 92 | payload to a queue: 93 | 94 | ```go 95 | delivery := "task payload" 96 | err := taskQueue.Publish(delivery) 97 | ``` 98 | 99 | In practice, however, it's more common to have instances of some struct that we 100 | want to publish to a queue. Assuming `task` is of some type like `Task`, this 101 | is how to publish the JSON representation of that task: 102 | 103 | ```go 104 | // create task 105 | taskBytes, err := json.Marshal(task) 106 | if err != nil { 107 | // handle error 108 | } 109 | 110 | err = taskQueue.PublishBytes(taskBytes) 111 | ``` 112 | 113 | For a full example see [`example/producer`][producer.go]. 114 | 115 | [producer.go]: example/producer/main.go 116 | 117 | ### Consumers 118 | 119 | Now that our queue starts filling, let's add a consumer. After opening the 120 | queue as before, we need it to start consuming before we can add consumers. 121 | 122 | ```go 123 | err := taskQueue.StartConsuming(10, time.Second) 124 | ``` 125 | 126 | This sets the prefetch limit to 10 and the poll duration to one second. This 127 | means the queue will fetch up to 10 deliveries at a time before giving them to 128 | the consumers. To avoid idling consumers while the queues are full, the 129 | prefetch limit should always be greater than the number of consumers you are 130 | going to add. If the queue gets empty, the poll duration sets how long rmq will 131 | wait before checking for new deliveries in Redis. 132 | 133 | Once this is set up, we can actually add consumers to the consuming queue. 134 | 135 | ```go 136 | taskConsumer := &TaskConsumer{} 137 | name, err := taskQueue.AddConsumer("task-consumer", taskConsumer) 138 | ``` 139 | 140 | To uniquely identify each consumer internally rmq creates a random name with 141 | the given prefix. For example in this case `name` might be 142 | `task-consumer-WB1zaq`. This name is only used in statistics. 143 | 144 | In our example above the injected `taskConsumer` (of type `*TaskConsumer`) must 145 | implement the `rmq.Consumer` interface. For example: 146 | 147 | ```go 148 | func (consumer *TaskConsumer) Consume(delivery rmq.Delivery) { 149 | var task Task 150 | if err = json.Unmarshal([]byte(delivery.Payload()), &task); err != nil { 151 | // handle json error 152 | if err := delivery.Reject(); err != nil { 153 | // handle reject error 154 | } 155 | return 156 | } 157 | 158 | // perform task 159 | log.Printf("performing task %s", task) 160 | if err := delivery.Ack(); err != nil { 161 | // handle ack error 162 | } 163 | } 164 | ``` 165 | 166 | First we unmarshal the JSON package found in the delivery payload. If this 167 | fails we reject the delivery. Otherwise we perform the task and ack the 168 | delivery. 169 | 170 | If you don't actually need a consumer struct you can use `AddConsumerFunc` 171 | instead and pass a consumer function which handles an `rmq.Delivery`: 172 | 173 | ```go 174 | name, err := taskQueue.AddConsumerFunc(func(delivery rmq.Delivery) { 175 | // handle delivery and call Ack() or Reject() on it 176 | }) 177 | ``` 178 | 179 | Please note that `delivery.Ack()` and similar functions have a built-in retry 180 | mechanism which will block your consumers in some cases. This is because 181 | failing to acknowledge a delivery is potentially dangerous. For details 182 | see the section about background errors below. 183 | 184 | For a full example see [`example/consumer`][consumer.go]. 185 | 186 | #### Consumer Lifecycle 187 | 188 | As described above you can add consumers to a queue. For each consumer rmq 189 | takes one of the prefetched unacked deliveries from the delivery channel and 190 | passes it to the consumer's `Consume()` function. The next delivery will only 191 | be passed to the same consumer once the prior `Consume()` call returns. So each 192 | consumer will only be consuming a single delivery at any given time. 193 | 194 | Furthermore each `Consume()` call is expected to call either `delivery.Ack()`, 195 | `delivery.Reject()` or `delivery.Push()` (see below). If that's not the case 196 | these deliveries will remain unacked and the prefetch goroutine won't make 197 | progress after a while. So make sure you always call exactly one of those 198 | functions in your `Consume()` implementations. 199 | 200 | [consumer.go]: example/consumer/main.go 201 | 202 | ## Background Errors 203 | 204 | It's recommended to inject an error channel into the `OpenConnection()` 205 | functions. This section describes it's purpose and how you might use it to 206 | monitor rmq background Redis errors. 207 | 208 | There are three sources of background errors which rmq detects (and handles 209 | internally): 210 | 211 | 1. The `OpenConnection()` functions spawn a goroutine which keeps a heartbeat 212 | Redis key alive. This is important so that the cleaner (see below) can tell 213 | which connections are still alive and must not be cleaned yet. If the 214 | heartbeat goroutine fails to update the heartbeat Redis key repeatedly foo 215 | too long the cleaner might clean up the connection prematurely. To avoid 216 | this the connection will automatically stop all consumers after 45 217 | consecutive heartbeat errors. This magic number is based on the details of 218 | the heartbeat key: The heartbeat tries to update the key every second with a 219 | TTL of one minute. So only after 60 failed attempts the heartbeat key would 220 | be dead. 221 | 222 | Every time this goroutine runs into a Redis error it gets send to the error 223 | channel as `HeartbeatError`. 224 | 225 | 2. The `StartConsuming()` function spawns a goroutine which is responsible for 226 | prefetching deliveries from the Redis `ready` list and moving them into a 227 | delivery channel. This delivery channels feeds into your consumers 228 | `Consume()` functions. If the prefetch goroutine runs into Redis errors this 229 | basically means that there won't be new deliveries being sent to your 230 | consumers until it can fetch new ones. So these Redis errors are not 231 | dangerous, it just means that your consumers will start idling until the 232 | Redis connection recovers. 233 | 234 | Every time this goroutine runs into a Redis error it gets send to the error 235 | channel as `ConsumeError`. 236 | 237 | 3. The delivery functions `Ack()`, `Reject()` and `Push()` have a built-in 238 | retry mechanism. This is because failing to acknowledge a delivery 239 | is potentially dangerous. The consumer has already handled the delivery, so 240 | if it can't ack it the cleaner might end up moving it back to the ready list 241 | so another consumer might end up consuming it again in the future, leading 242 | to double delivery. 243 | 244 | So if a delivery failed to be acked because of a Redis error the `Ack()` 245 | call will block and retry once a second until it either succeeds or until 246 | consuming gets stopped (see below). In the latter case the `Ack()` call will 247 | return `rmq.ErrorConsumingStopped` which you should handle in your consume 248 | function. For example you might want to log about the delivery so you can 249 | manually remove it from the unacked or ready list before you start new 250 | consumers. Or at least you can know which deliveries might end up being 251 | consumed twice. 252 | 253 | Every time these functions runs into a Redis error it gets send to the error 254 | channel as `DeliveryError`. 255 | 256 | Each of those error types has a field `Count` which tells you how often the 257 | operation failed consecutively. This indicates for how long the affected Redis 258 | instance has been unavailable. One general way of using this information might 259 | be to have metrics about the error types including the error count so you can 260 | keep track of how stable your Redis instances and connections are. By 261 | monitoring this you might learn about instabilities before they affect your 262 | services in significant ways. 263 | 264 | Below is some more specific advice on handling the different error cases 265 | outlined above. Keep in mind though that all of those errors are likely to 266 | happen at the same time, as Redis tends to be up or down completely. But if 267 | you're using multi Redis instance setup like [nutcracker][nutcracker] you might 268 | see some of them in isolation from the others. 269 | 270 | 1. `HeartbeatErrors`: Once `err.Count` equals `HeartbeatErrorLimit` you should 271 | know that the consumers of this connection will stop consuming. And they 272 | won't restart consuming on their own. This is a condition you should closely 273 | monitor because this means you will have to restart your service in order to 274 | resume consuming. Before restarting you should check your Redis instance. 275 | 276 | 2. `ConsumeError`: These are mostly informational. As long as those errors keep 277 | happening the consumers will effectively be paused. But once these 278 | operations start succeeding again the consumers will resume consumers on 279 | their own. 280 | 281 | 3. `DeliveryError`: When you see deliveries failing to ack repeatedly this also 282 | means your consumers won't make progress as they will keep retrying to ack 283 | pending deliveries before starting to consume new ones. As long as this 284 | keeps happening you should avoid stopping the service if you can. That is 285 | because the already consumed by not yet unacked deliveries will be returned 286 | to `ready` be the cleaner afterwards, which leads to double delivery. So 287 | ideally you try to get Redis connection up again as long as the deliveries 288 | are still trying to ack. Once acking works again it's safe to restart again. 289 | 290 | More realistically, if you still need to stop the service when Redis is 291 | down, keep in mind that calling `StopConsuming()` will make the blocking 292 | `Ack()` calls return with `ErrorConsumingStopped`, so you can handle that 293 | case to make an attempt to either avoid the double delivery or at least 294 | track it for future investigation. 295 | 296 | [nutcracker]: https://github.com/twitter/twemproxy 297 | 298 | ## Advanced Usage 299 | 300 | ### Batch Consumers 301 | 302 | Sometimes it's useful to have consumers work on batches of deliveries instead 303 | of individual ones. For example for bulk database inserts. In those cases you 304 | can use `AddBatchConsumer()`: 305 | 306 | ```go 307 | batchConsumer := &MyBatchConsumer{} 308 | name, err := taskQueue.AddBatchConsumer("my-consumer", 100, time.Second, batchConsumer) 309 | ``` 310 | 311 | In this example we create a batch consumer which will receive batches of up to 312 | 100 deliveries. We set the `batchTimeout` to one second, so if there are less 313 | than 100 deliveries per second we will still consume at least one batch per 314 | second (which would contain less than 100 deliveries). 315 | 316 | The `rmq.BatchConsumer` interface is very similar to `rmq.Consumer`. 317 | 318 | ```go 319 | func (consumer *MyBatchConsumer) Consume(batch rmq.Deliveries) { 320 | payloads := batch.Payloads() 321 | // handle payloads 322 | if errors := batch.Ack(); len(errors) > 0 { 323 | // handle ack errors 324 | } 325 | } 326 | ``` 327 | 328 | Note that `batch.Ack()` acknowledges all deliveries in the batch. It's also 329 | possible to ack some of the deliveries and reject the rest. It uses the same 330 | retry mechanism per delivery as discussed above. If some of the deliveries 331 | continue to fail to ack when consuming gets stopped (see below), then 332 | `batch.Ack()` will return an error map `map[int]error`. For each entry in this 333 | map the key will be the index of the delivery which failed to ack and the value 334 | will be the error it ran into. That way you can map the errors back to the 335 | deliveries to know which deliveries are at risk of being consumed again in the 336 | future as discussed above. 337 | 338 | For a full example see [`example/batch_consumer`][batch_consumer.go]. 339 | 340 | [batch_consumer.go]: example/batch_consumer/main.go 341 | 342 | ### Push Queues 343 | 344 | Another thing which can be useful is a mechanism for retries. Let's say you 345 | have tasks which can fail for external reasons but you'd like to retry them a 346 | few times after a while before you give up. In that case you can set up a chain 347 | of push queues like this: 348 | 349 | ``` 350 | incomingQ -> pushQ1 -> pushQ2 351 | ``` 352 | 353 | In the queue setup code it would look like this (error handling omitted for 354 | brevity): 355 | 356 | ```go 357 | incomingQ, err := connection.OpenQueue("incomingQ") 358 | pushQ1, err := connection.OpenQueue("pushQ1") 359 | pushQ2, err := connection.OpenQueue("pushQ2") 360 | incomingQ.SetPushQueue(pushQ1) 361 | pushQ1.SetPushQueue(pushQ2) 362 | _, err := incomingQ.AddConsumer("incomingQ", NewConsumer()) 363 | _, err := pushQ1.AddConsumer("pushQ1", NewConsumer()) 364 | _, err := pushQ2.AddConsumer("pushQ2", NewConsumer()) 365 | ``` 366 | 367 | If you have set up your queues like this, you can now call `delivery.Push()` in 368 | your `Consume()` function to push the delivery from the consuming queue to the 369 | associated push queue. So if consumption fails on `incomingQ`, then the 370 | delivery would be moved to `pushQ1` and so on. If you have the consumers wait 371 | until the deliveries have a certain age you can use this pattern to retry after 372 | certain durations. 373 | 374 | Note that `delivery.Push()` has the same affect as `delivery.Reject()` if the 375 | queue has no push queue set up. So in our example above, if the delivery fails 376 | in the consumer on `pushQ2`, then the `Push()` call will reject the delivery. 377 | 378 | ### Stop Consuming 379 | 380 | If you want to stop consuming from the queue, you can call `StopConsuming()`: 381 | 382 | ```go 383 | finishedChan := taskQueue.StopConsuming() 384 | ``` 385 | 386 | When `StopConsuming()` is called, it will immediately stop fetching more 387 | deliveries from Redis and won't send any more of the already prefetched 388 | deliveries to consumers. 389 | 390 | In the background it will make pending `Ack()` calls return 391 | `rmq.ErrorConsumingStopped` if they still run into Redis errors (see above) and 392 | wait for all consumers to finish consuming their current delivery before 393 | closing the returned `finishedChan`. So while `StopConsuming()` returns 394 | immediately, you can wait on the returned channel until all consumers are done: 395 | 396 | ```go 397 | <-finishedChan 398 | ``` 399 | 400 | You can also stop consuming on all queues in your connection: 401 | 402 | ```go 403 | finishedChan := connection.StopAllConsuming() 404 | ``` 405 | 406 | Wait on the `finishedChan` to wait for all consumers on all queues to finish. 407 | 408 | This is useful to implement a graceful shutdown of a consumer service. Please 409 | note that after calling `StopConsuming()` the queue might not be in a state 410 | where you can add consumers and call `StartConsuming()` again. If you have a 411 | use case where you actually need that sort of flexibility, please let us know. 412 | Currently for each queue you are only supposed to call `StartConsuming()` and 413 | `StopConsuming()` at most once. 414 | 415 | Also note that `StopAllConsuming()` will stop the heartbeat for this connection. 416 | It's advised to also not publish to any queue opened by this connection anymore. 417 | 418 | ### Return Rejected Deliveries 419 | 420 | Even if you don't have a push queue setup there are cases where you need to 421 | consume previously failed deliveries again. For example an external dependency 422 | might have an issue or you might have deployed a broken consumer service which 423 | rejects all deliveries for some reason. 424 | 425 | In those cases you would wait for the external party to recover or fix your 426 | mistake to get ready to reprocess the deliveries again. Now you can return the 427 | deliveries by opening affected queue and call `ReturnRejected()`: 428 | 429 | ```go 430 | returned, err := queue.ReturnRejected(10000) 431 | ``` 432 | 433 | In this case we ask rmq to return up to 10k deliveries from the `rejected` list 434 | to the `ready` list. To return all of them you can pass `math.MaxInt64`. 435 | 436 | If there was no error it returns the number of deliveries that were moved. 437 | 438 | If you find yourself doing this regularly on some queues consider setting up a 439 | push queue to automatically retry failed deliveries regularly. 440 | 441 | See [`example/returner`][returner.go] 442 | 443 | [returner.go]: example/returner/main.go 444 | 445 | ### Purge Rejected Deliveries 446 | 447 | You might run into the case where you have rejected deliveries which you don't 448 | intend to retry again for one reason or another. In those cases you can clear 449 | the full `rejected` list by calling `PurgeRejected()`: 450 | 451 | ```go 452 | count, err := queue.PurgeRejected() 453 | ``` 454 | 455 | It returns the number of purged deliveries. 456 | 457 | Similarly, there's a function to clear the `ready` list of deliveries: 458 | 459 | ```go 460 | count, err := queue.PurgeReady() 461 | ``` 462 | 463 | See [`example/purger`][purger.go]. 464 | 465 | [purger.go]: example/purger/main.go 466 | 467 | ### Cleaner 468 | 469 | You should regularly run a queue cleaner to make sure no unacked deliveries are 470 | stuck in the queue system. The background is that a consumer service prefetches 471 | deliveries by moving them from the `ready` list to an `unacked` list associated 472 | with the queue connection. If the consumer dies by crashing or even by being 473 | gracefully shut down by calling `StopConsuming()`, the unacked deliveries will 474 | remain in that Redis list. 475 | 476 | If you run a queue cleaner regularly it will detect queue connections whose 477 | heartbeat expired and will clean up all their consumer queues by moving their 478 | unacked deliveries back to the `ready` list. 479 | 480 | Although it should be safe to run multiple cleaners, it's recommended to run 481 | exactly one instance per queue system and have it trigger the cleaning process 482 | regularly, like once a minute. 483 | 484 | See [`example/cleaner`][cleaner.go]. 485 | 486 | [cleaner.go]: example/cleaner/main.go 487 | 488 | ### Header 489 | 490 | Redis protocol does not define a specific way to pass additional data like header. 491 | However, there is often need to pass them (for example for traces propagation). 492 | 493 | This implementation injects optional header values marked with a signature into 494 | payload body during publishing. When message is consumed, if signature is present, 495 | header and original payload are extracted from augmented payload. 496 | 497 | Header is defined as `http.Header` for better interoperability with existing libraries, 498 | for example with [`propagation.HeaderCarrier`](https://pkg.go.dev/go.opentelemetry.io/otel/propagation#HeaderCarrier). 499 | 500 | ```go 501 | // .... 502 | 503 | h := make(http.Header) 504 | h.Set("X-Baz", "quux") 505 | 506 | // You can add header to your payload during publish. 507 | _ = pub.Publish(rmq.PayloadWithHeader(`{"foo":"bar"}`, h)) 508 | 509 | // .... 510 | 511 | _, _ = con.AddConsumerFunc("tag", func(delivery rmq.Delivery) { 512 | // And receive header back in consumer. 513 | delivery.(rmq.WithHeader).Header().Get("X-Baz") // "quux" 514 | 515 | // .... 516 | }) 517 | ``` 518 | 519 | Adding a header is an explicit opt-in operation and so it does not affect library's 520 | backwards compatibility by default (when not used). 521 | 522 | Please note that adding header may lead to compatibility issues if: 523 | * consumer is built with older version of `rmq` when publisher has already 524 | started using header, this can be avoided by upgrading consumers before publishers; 525 | * consumer is not using `rmq` (other libs, low level tools like `redis-cli`) and is 526 | not aware of payload format extension. 527 | 528 | ## Testing Included 529 | 530 | To simplify testing of queue producers and consumers we include test mocks. 531 | 532 | ### Test Connection 533 | 534 | As before, we first need a queue connection, but this time we use a 535 | `rmq.TestConnection` that doesn't need any connection settings. 536 | 537 | ```go 538 | testConn := rmq.NewTestConnection() 539 | ``` 540 | 541 | If you are using a testing framework that uses test suites, you can reuse that 542 | test connection by setting it up once for the suite and resetting it with 543 | `testConn.Reset()` before each test. 544 | 545 | ### Producer Tests 546 | 547 | Now let's say we want to test the function `publishTask()` that creates a task 548 | and publishes it to a queue from that connection. 549 | 550 | ```go 551 | // call the function that should publish a task 552 | publishTask(testConn) 553 | 554 | // check that the task is published 555 | assert.Equal(t, "task payload", suite.testConn.GetDelivery("tasks", 0)) 556 | ``` 557 | 558 | The `assert.Equal` part is from [testify][testify], but it will look similar 559 | for other testing frameworks. Given a `rmq.TestConnection`, we can check the 560 | deliveries that were published to its queues (since the last `Reset()` call) 561 | with `GetDelivery(queueName, index)`. In this case we want to extract the first 562 | (and possibly only) delivery that was published to queue `tasks` and just check 563 | the payload string. 564 | 565 | If the payload is JSON again, the unmarshalling and check might look like this: 566 | 567 | ```go 568 | var task Task 569 | err := json.Unmarshal([]byte(suite.testConn.GetDelivery("tasks", 0)), &task) 570 | assert.NoError(t, err) 571 | assert.NotNil(t, task) 572 | assert.Equal(t, "value", task.Property) 573 | ``` 574 | 575 | If you expect a producer to create multiple deliveries you can use different 576 | indexes to access them all. 577 | 578 | ```go 579 | assert.Equal(t, "task1", suite.testConn.GetDelivery("tasks", 0)) 580 | assert.Equal(t, "task2", suite.testConn.GetDelivery("tasks", 1)) 581 | ``` 582 | 583 | For convenience there's also a function `GetDeliveries` that returns all 584 | published deliveries to a queue as string array. 585 | 586 | ```go 587 | assert.Equal(t, []string{"task1", "task2"}, suite.testConn.GetDeliveries("tasks")) 588 | ``` 589 | 590 | These examples assume that you inject the `rmq.Connection` into your testable 591 | functions. If you inject instances of `rmq.Queue` instead, you can use 592 | `rmq.TestQueue` instances in tests and access their `LastDeliveries` (since 593 | `Reset()`) directly. 594 | 595 | [testify]: https://github.com/stretchr/testify 596 | 597 | ### Consumer Tests 598 | 599 | Testing consumers is a bit easier because consumers must implement the 600 | `rmq.Consumer` interface. In the tests just create an `rmq.TestDelivery` and 601 | pass it to your `Consume()` function. This example creates a test delivery from 602 | a string and then checks that the delivery was acked. 603 | 604 | ```go 605 | consumer := &TaskConsumer{} 606 | delivery := rmq.NewTestDeliveryString("task payload") 607 | 608 | consumer.Consume(delivery) 609 | 610 | assert.Equal(t, rmq.Acked, delivery.State) 611 | ``` 612 | 613 | The `State` field will always be one of these values: 614 | 615 | - `rmq.Acked`: The delivery was acked 616 | - `rmq.Rejected`: The delivery was rejected 617 | - `rmq.Pushed`: The delivery was pushed (see below) 618 | - `rmq.Unacked`: Nothing of the above 619 | 620 | If your packages are JSON marshalled objects, then you can create test 621 | deliveries out of those like this: 622 | 623 | ```go 624 | task := Task{Property: "bad value"} 625 | delivery := rmq.NewTestDelivery(task) 626 | ``` 627 | 628 | ### Integration Tests 629 | 630 | If you want to write integration tests which exercise both producers and 631 | consumers at the same time, you can use the 632 | `rmq.OpenConnectionWithTestRedisClient` constructor. It returns a real 633 | `rmq.Connection` instance which is backed by an in-memory Redis client 634 | implementation. That way it behaves exactly as in production, just without the 635 | durability of a real Redis client. Don't use this in production! 636 | 637 | ## Statistics 638 | 639 | Given a connection, you can call `connection.CollectStats()` to receive 640 | `rmq.Stats` about all open queues, connections and consumers. If you run 641 | [`example/handler`][handler.go] you can see what's available: 642 | 643 | 644 | 645 | In this example you see 5 connections consuming `task_kind1`, each with 5 646 | consumers. They have a total of 1007 packages unacked. Below the marker you see 647 | connections which are not consuming. One of the handler connections died 648 | because I stopped the handler. Running the cleaner would clean that up (see 649 | below). 650 | 651 | [handler.go]: example/handler/main.go 652 | 653 | ### Prometheus 654 | 655 | If you are using Prometheus, [rmqprom](https://github.com/pffreitas/rmqprom) 656 | collects statistics about all open queues and exposes them as Prometheus 657 | metrics. 658 | -------------------------------------------------------------------------------- /batch_consumer.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | type BatchConsumer interface { 4 | Consume(batch Deliveries) 5 | } 6 | 7 | type BatchConsumerFunc func(Deliveries) 8 | 9 | func (batchConsumerFunc BatchConsumerFunc) Consume(batch Deliveries) { 10 | batchConsumerFunc(batch) 11 | } 12 | -------------------------------------------------------------------------------- /cleaner.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | import "math" 4 | 5 | type Cleaner struct { 6 | connection Connection 7 | } 8 | 9 | func NewCleaner(connection Connection) *Cleaner { 10 | return &Cleaner{connection: connection} 11 | } 12 | 13 | // Clean cleans the connection of the cleaner. This is useful to make sure no 14 | // deliveries get lost. The main use case is if your consumers get restarted 15 | // there will be unacked deliveries assigned to the connection. Once the 16 | // heartbeat of that connection dies the cleaner can recognize that and remove 17 | // those unacked deliveries back to the ready list. If there was no error it 18 | // returns the number of deliveries which have been returned from unacked lists 19 | // to ready lists across all cleaned connections and queues. 20 | func (cleaner *Cleaner) Clean() (returned int64, err error) { 21 | connectionNames, err := cleaner.connection.getConnections() 22 | if err != nil { 23 | return 0, err 24 | } 25 | 26 | for _, connectionName := range connectionNames { 27 | hijackedConnection := cleaner.connection.hijackConnection(connectionName) 28 | switch err := hijackedConnection.checkHeartbeat(); err { 29 | case nil: // active connection 30 | continue 31 | case ErrorNotFound: 32 | n, err := cleanStaleConnection(hijackedConnection) 33 | if err != nil { 34 | return 0, err 35 | } 36 | returned += n 37 | default: 38 | return 0, err 39 | } 40 | } 41 | 42 | return returned, nil 43 | } 44 | 45 | func cleanStaleConnection(staleConnection Connection) (returned int64, err error) { 46 | queueNames, err := staleConnection.getConsumingQueues() 47 | if err != nil { 48 | return 0, err 49 | } 50 | 51 | for _, queueName := range queueNames { 52 | queue, err := staleConnection.OpenQueue(queueName) 53 | if err != nil { 54 | return 0, err 55 | } 56 | 57 | n, err := cleanQueue(queue) 58 | if err != nil { 59 | return 0, err 60 | } 61 | 62 | returned += n 63 | } 64 | 65 | if err := staleConnection.closeStaleConnection(); err != nil { 66 | return 0, err 67 | } 68 | 69 | // log.Printf("rmq cleaner cleaned connection %s", staleConnection) 70 | return returned, nil 71 | } 72 | 73 | func cleanQueue(queue Queue) (returned int64, err error) { 74 | returned, err = queue.ReturnUnacked(math.MaxInt64) 75 | if err != nil { 76 | return 0, err 77 | } 78 | if err := queue.closeInStaleConnection(); err != nil { 79 | return 0, err 80 | } 81 | // log.Printf("rmq cleaner cleaned queue %s %d", queue, returned) 82 | return returned, nil 83 | } 84 | -------------------------------------------------------------------------------- /cleaner_test.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/stretchr/testify/assert" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestCleaner(t *testing.T) { 12 | redisOptions, closer := testRedis(t) 13 | defer closer() 14 | 15 | flushConn, err := OpenConnectionWithRedisOptions("cleaner-flush", redisOptions, nil) 16 | assert.NoError(t, err) 17 | assert.NoError(t, flushConn.stopHeartbeat()) 18 | assert.NoError(t, flushConn.flushDb()) 19 | 20 | conn, err := OpenConnectionWithRedisOptions("cleaner-conn1", redisOptions, nil) 21 | assert.NoError(t, err) 22 | queues, err := conn.GetOpenQueues() 23 | assert.NoError(t, err) 24 | assert.Len(t, queues, 0) 25 | queue, err := conn.OpenQueue("q1") 26 | assert.NoError(t, err) 27 | queues, err = conn.GetOpenQueues() 28 | assert.NoError(t, err) 29 | assert.Len(t, queues, 1) 30 | _, err = conn.OpenQueue("q2") 31 | assert.NoError(t, err) 32 | queues, err = conn.GetOpenQueues() 33 | assert.NoError(t, err) 34 | assert.Len(t, queues, 2) 35 | 36 | eventuallyReady(t, queue, 0) 37 | assert.NoError(t, queue.Publish("del1")) 38 | eventuallyReady(t, queue, 1) 39 | assert.NoError(t, queue.Publish("del2")) 40 | eventuallyReady(t, queue, 2) 41 | assert.NoError(t, queue.Publish("del3")) 42 | eventuallyReady(t, queue, 3) 43 | assert.NoError(t, queue.Publish("del4")) 44 | eventuallyReady(t, queue, 4) 45 | assert.NoError(t, queue.Publish("del5")) 46 | eventuallyReady(t, queue, 5) 47 | assert.NoError(t, queue.Publish("del6")) 48 | eventuallyReady(t, queue, 6) 49 | 50 | eventuallyUnacked(t, queue, 0) 51 | assert.NoError(t, queue.StartConsuming(2, time.Millisecond)) 52 | eventuallyUnacked(t, queue, 2) 53 | eventuallyReady(t, queue, 4) 54 | 55 | consumer := NewTestConsumer("c-A") 56 | consumer.AutoFinish = false 57 | consumer.AutoAck = false 58 | 59 | _, err = queue.AddConsumer("consumer1", consumer) 60 | assert.NoError(t, err) 61 | time.Sleep(10 * time.Millisecond) 62 | eventuallyUnacked(t, queue, 2) 63 | eventuallyReady(t, queue, 4) 64 | 65 | require.NotNil(t, consumer.Last()) 66 | assert.Equal(t, "del1", consumer.Last().Payload()) 67 | assert.NoError(t, consumer.Last().Ack()) 68 | eventuallyUnacked(t, queue, 2) 69 | eventuallyReady(t, queue, 3) 70 | 71 | consumer.Finish() 72 | time.Sleep(10 * time.Millisecond) 73 | eventuallyUnacked(t, queue, 2) 74 | eventuallyReady(t, queue, 3) 75 | assert.Equal(t, "del2", consumer.Last().Payload()) 76 | 77 | queue.StopConsuming() 78 | assert.NoError(t, conn.stopHeartbeat()) 79 | time.Sleep(time.Millisecond) 80 | 81 | conn, err = OpenConnectionWithRedisOptions("cleaner-conn1", redisOptions, nil) 82 | assert.NoError(t, err) 83 | queue, err = conn.OpenQueue("q1") 84 | assert.NoError(t, err) 85 | 86 | assert.NoError(t, queue.Publish("del7")) 87 | eventuallyReady(t, queue, 4) 88 | assert.NoError(t, queue.Publish("del8")) 89 | eventuallyReady(t, queue, 5) 90 | assert.NoError(t, queue.Publish("del9")) 91 | eventuallyReady(t, queue, 6) 92 | assert.NoError(t, queue.Publish("del10")) 93 | eventuallyReady(t, queue, 7) 94 | assert.NoError(t, queue.Publish("del11")) 95 | eventuallyReady(t, queue, 8) 96 | 97 | eventuallyUnacked(t, queue, 0) 98 | assert.NoError(t, queue.StartConsuming(2, time.Millisecond)) 99 | eventuallyUnacked(t, queue, 2) 100 | eventuallyReady(t, queue, 6) 101 | 102 | consumer = NewTestConsumer("c-B") 103 | consumer.AutoFinish = false 104 | consumer.AutoAck = false 105 | 106 | _, err = queue.AddConsumer("consumer2", consumer) 107 | assert.NoError(t, err) 108 | time.Sleep(10 * time.Millisecond) 109 | eventuallyUnacked(t, queue, 2) 110 | eventuallyReady(t, queue, 6) 111 | assert.Equal(t, "del4", consumer.Last().Payload()) 112 | 113 | consumer.Finish() // unacked 114 | time.Sleep(10 * time.Millisecond) 115 | eventuallyUnacked(t, queue, 2) 116 | eventuallyReady(t, queue, 6) 117 | 118 | assert.Equal(t, "del5", consumer.Last().Payload()) 119 | assert.NoError(t, consumer.Last().Ack()) 120 | time.Sleep(10 * time.Millisecond) 121 | eventuallyUnacked(t, queue, 2) 122 | eventuallyReady(t, queue, 5) 123 | 124 | queue.StopConsuming() 125 | assert.NoError(t, conn.stopHeartbeat()) 126 | time.Sleep(time.Millisecond) 127 | 128 | cleanerConn, err := OpenConnectionWithRedisOptions("cleaner-conn", redisOptions, nil) 129 | assert.NoError(t, err) 130 | cleaner := NewCleaner(cleanerConn) 131 | returned, err := cleaner.Clean() 132 | assert.NoError(t, err) 133 | assert.Equal(t, int64(4), returned) 134 | eventuallyReady(t, queue, 9) // 2 of 11 were acked above 135 | queues, err = conn.GetOpenQueues() 136 | assert.NoError(t, err) 137 | assert.Len(t, queues, 2) 138 | 139 | conn, err = OpenConnectionWithRedisOptions("cleaner-conn1", redisOptions, nil) 140 | assert.NoError(t, err) 141 | queue, err = conn.OpenQueue("q1") 142 | assert.NoError(t, err) 143 | assert.NoError(t, queue.StartConsuming(10, time.Millisecond)) 144 | consumer = NewTestConsumer("c-C") 145 | 146 | _, err = queue.AddConsumer("consumer3", consumer) 147 | assert.NoError(t, err) 148 | time.Sleep(10 * time.Millisecond) 149 | assert.Eventually(t, func() bool { 150 | return len(consumer.Deliveries()) == 9 151 | }, 10*time.Second, 2*time.Millisecond) 152 | 153 | queue.StopConsuming() 154 | assert.NoError(t, conn.stopHeartbeat()) 155 | time.Sleep(time.Millisecond) 156 | 157 | returned, err = cleaner.Clean() 158 | assert.NoError(t, err) 159 | assert.Equal(t, int64(0), returned) 160 | assert.NoError(t, cleanerConn.stopHeartbeat()) 161 | } 162 | -------------------------------------------------------------------------------- /connection.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "sync" 7 | "time" 8 | 9 | "github.com/redis/go-redis/v9" 10 | ) 11 | 12 | const ( 13 | // NOTE: Be careful when changing any of these values. 14 | // Currently we update the heartbeat every second with a TTL of a minute. 15 | // This means that if we fail to update the heartbeat 60 times in a row 16 | // the connection might get cleaned up by a cleaner. So we want to set the 17 | // error limit to a value lower like this (like 45) to make sure we stop 18 | // all consuming before that happens. 19 | heartbeatDuration = time.Minute // TTL of heartbeat key 20 | heartbeatInterval = time.Second // how often we update the heartbeat key 21 | HeartbeatErrorLimit = 45 // stop consuming after this many heartbeat errors 22 | ) 23 | 24 | // Connection is an interface that can be used to test publishing 25 | type Connection interface { 26 | OpenQueue(name string) (Queue, error) 27 | CollectStats(queueList []string) (Stats, error) 28 | GetOpenQueues() ([]string, error) 29 | StopAllConsuming() <-chan struct{} 30 | 31 | // internals 32 | // used in cleaner 33 | checkHeartbeat() error 34 | getConnections() ([]string, error) 35 | hijackConnection(name string) Connection 36 | closeStaleConnection() error 37 | getConsumingQueues() ([]string, error) 38 | // used for stats 39 | openQueue(name string) Queue 40 | // used in tests 41 | stopHeartbeat() error 42 | flushDb() error 43 | unlistAllQueues() error 44 | } 45 | 46 | // Connection is the entry point. Use a connection to access queues, consumers and deliveries 47 | // Each connection has a single heartbeat shared among all consumers 48 | type redisConnection struct { 49 | Name string 50 | heartbeatKey string // key to keep alive 51 | queuesKey string // key to list of queues consumed by this connection 52 | 53 | consumersTemplate string 54 | unackedTemplate string 55 | readyTemplate string 56 | rejectedTemplate string 57 | 58 | redisClient RedisClient 59 | errChan chan<- error 60 | heartbeatStop chan chan struct{} // used to stop heartbeat() in stopHeartbeat(), nil once stopped 61 | 62 | lock sync.Mutex 63 | stopped bool 64 | // list of all queues that have been opened in this connection 65 | // this is used to handle heartbeat errors without relying on the redis connection 66 | openQueues []Queue 67 | } 68 | 69 | // OpenConnection opens and returns a new connection 70 | func OpenConnection(tag string, network string, address string, db int, errChan chan<- error) (Connection, error) { 71 | return OpenConnectionWithRedisOptions(tag, &redis.Options{Network: network, Addr: address, DB: db}, errChan) 72 | } 73 | 74 | // OpenConnectionWithRedisOptions allows you to pass more flexible options 75 | func OpenConnectionWithRedisOptions(tag string, redisOption *redis.Options, errChan chan<- error) (Connection, error) { 76 | return OpenConnectionWithRedisClient(tag, redis.NewClient(redisOption), errChan) 77 | } 78 | 79 | // OpenConnectionWithRedisClient opens and returns a new connection 80 | // This can be used to passa redis.ClusterClient. 81 | func OpenConnectionWithRedisClient(tag string, redisClient redis.Cmdable, errChan chan<- error) (Connection, error) { 82 | return OpenConnectionWithRmqRedisClient(tag, RedisWrapper{redisClient}, errChan) 83 | } 84 | 85 | // OpenConnectionWithTestRedisClient opens and returns a new connection which 86 | // uses a test redis client internally. This is useful in integration tests. 87 | func OpenConnectionWithTestRedisClient(tag string, errChan chan<- error) (Connection, error) { 88 | return OpenConnectionWithRmqRedisClient(tag, NewTestRedisClient(), errChan) 89 | } 90 | 91 | // OpenConnectionWithRmqRedisClient: If you would like to use a redis client other than the ones 92 | // supported in the constructors above, you can implement the RedisClient interface yourself 93 | func OpenConnectionWithRmqRedisClient(tag string, redisClient RedisClient, errChan chan<- error) (Connection, error) { 94 | return openConnection(tag, redisClient, false, errChan) 95 | } 96 | 97 | // OpenClusterConnection: Same as OpenConnectionWithRedisClient, but using Redis hash tags {} instead of []. 98 | func OpenClusterConnection(tag string, redisClient redis.Cmdable, errChan chan<- error) (Connection, error) { 99 | return openConnection(tag, RedisWrapper{redisClient}, true, errChan) 100 | } 101 | 102 | func openConnection(tag string, redisClient RedisClient, useRedisHashTags bool, errChan chan<- error) (Connection, error) { 103 | name := fmt.Sprintf("%s-%s", tag, RandomString(6)) 104 | 105 | connection := &redisConnection{ 106 | Name: name, 107 | heartbeatKey: strings.Replace(connectionHeartbeatTemplate, phConnection, name, 1), 108 | queuesKey: strings.Replace(connectionQueuesTemplate, phConnection, name, 1), 109 | consumersTemplate: getTemplate(connectionQueueConsumersBaseTemplate, useRedisHashTags), 110 | unackedTemplate: getTemplate(connectionQueueUnackedBaseTemplate, useRedisHashTags), 111 | readyTemplate: getTemplate(queueReadyBaseTemplate, useRedisHashTags), 112 | rejectedTemplate: getTemplate(queueRejectedBaseTemplate, useRedisHashTags), 113 | redisClient: redisClient, 114 | errChan: errChan, 115 | heartbeatStop: make(chan chan struct{}, 1), // mark heartbeat as active, can be stopped 116 | } 117 | 118 | if err := connection.updateHeartbeat(); err != nil { // checks the connection 119 | return nil, err 120 | } 121 | 122 | // add to connection set after setting heartbeat to avoid race with cleaner 123 | if _, err := redisClient.SAdd(connectionsKey, name); err != nil { 124 | return nil, err 125 | } 126 | 127 | go connection.heartbeat(errChan) 128 | // log.Printf("rmq connection connected to %s %s:%s %d", name, network, address, db) 129 | return connection, nil 130 | } 131 | 132 | func (connection *redisConnection) updateHeartbeat() error { 133 | return connection.redisClient.Set(connection.heartbeatKey, "1", heartbeatDuration) 134 | } 135 | 136 | // heartbeat keeps the heartbeat key alive 137 | func (connection *redisConnection) heartbeat(errChan chan<- error) { 138 | errorCount := 0 // number of consecutive errors 139 | 140 | ticker := time.NewTicker(heartbeatInterval) 141 | defer ticker.Stop() 142 | 143 | for { 144 | select { 145 | case <-ticker.C: 146 | // continue below 147 | case c := <-connection.heartbeatStop: // stopHeartbeat() has been called 148 | close(c) // confirm to stopHeartbeat() that the heartbeat is stopped 149 | return // stop updating the heartbeat 150 | } 151 | 152 | err := connection.updateHeartbeat() 153 | if err == nil { // success 154 | errorCount = 0 155 | continue 156 | } 157 | // unexpected redis error 158 | 159 | errorCount++ 160 | 161 | if errorCount >= HeartbeatErrorLimit { 162 | // reached error limit 163 | 164 | // To avoid using this connection while we're not able to maintain its heartbeat we stop all 165 | // consumers. This in turn will call stopHeartbeat() and the responsibility of heartbeat() to 166 | // confirm that the heartbeat is stopped, so we do that here too. 167 | connection.StopAllConsuming() 168 | close(<-connection.heartbeatStop) // wait for stopHeartbeat() and confirm heartbeat is stopped 169 | 170 | // Clients reading from errChan need to see this error 171 | // This allows them to shut themselves down 172 | // Therefore we block adding it to errChan to ensure delivery 173 | errChan <- &HeartbeatError{RedisErr: err, Count: errorCount} 174 | return 175 | } else { 176 | select { // try to add error to channel, but don't block 177 | case errChan <- &HeartbeatError{RedisErr: err, Count: errorCount}: 178 | default: 179 | } 180 | } 181 | // keep trying until we hit the limit 182 | } 183 | } 184 | 185 | func (connection *redisConnection) String() string { 186 | return connection.Name 187 | } 188 | 189 | // OpenQueue opens and returns the queue with a given name 190 | func (connection *redisConnection) OpenQueue(name string) (Queue, error) { 191 | connection.lock.Lock() 192 | defer connection.lock.Unlock() 193 | 194 | if connection.stopped { 195 | return nil, ErrorConsumingStopped 196 | } 197 | 198 | if _, err := connection.redisClient.SAdd(queuesKey, name); err != nil { 199 | return nil, err 200 | } 201 | 202 | queue := connection.openQueue(name) 203 | connection.openQueues = append(connection.openQueues, queue) 204 | 205 | return queue, nil 206 | } 207 | 208 | // CollectStats collects and returns stats 209 | func (connection *redisConnection) CollectStats(queueList []string) (Stats, error) { 210 | return CollectStats(queueList, connection) 211 | } 212 | 213 | // GetOpenQueues returns a list of all open queues 214 | func (connection *redisConnection) GetOpenQueues() ([]string, error) { 215 | return connection.redisClient.SMembers(queuesKey) 216 | } 217 | 218 | // StopAllConsuming stops consuming on all queues opened in this connection. 219 | // It returns a channel which can be used to wait for all active consumers to 220 | // finish their current Consume() call. This is useful to implement graceful 221 | // shutdown. 222 | func (connection *redisConnection) StopAllConsuming() <-chan struct{} { 223 | connection.lock.Lock() 224 | defer func() { 225 | // regardless of how we exit this method, the connection is always stopped when we return 226 | connection.stopped = true 227 | connection.lock.Unlock() 228 | }() 229 | 230 | finishedChan := make(chan struct{}) 231 | 232 | // If we are already stopped then there is nothing to do 233 | if connection.stopped { 234 | close(finishedChan) 235 | return finishedChan 236 | } 237 | 238 | // If there are no open queues we still want to stop the heartbeat 239 | if len(connection.openQueues) == 0 { 240 | connection.stopHeartbeat() 241 | close(finishedChan) 242 | return finishedChan 243 | } 244 | 245 | chans := make([]<-chan struct{}, 0, len(connection.openQueues)) 246 | for _, queue := range connection.openQueues { 247 | chans = append(chans, queue.StopConsuming()) 248 | } 249 | 250 | go func() { 251 | // wait for all channels to be closed 252 | for _, c := range chans { 253 | <-c 254 | } 255 | 256 | // All consuming has been stopped. Now we can stop the heartbeat to avoid a goroutine leak. 257 | connection.stopHeartbeat() 258 | 259 | close(finishedChan) // signal all done 260 | }() 261 | 262 | return finishedChan 263 | } 264 | 265 | // checkHeartbeat retuns true if the connection is currently active in terms of heartbeat 266 | func (connection *redisConnection) checkHeartbeat() error { 267 | heartbeatKey := strings.Replace(connectionHeartbeatTemplate, phConnection, connection.Name, 1) 268 | ttl, err := connection.redisClient.TTL(heartbeatKey) 269 | if err != nil { 270 | return err 271 | } 272 | if ttl <= 0 { 273 | return ErrorNotFound 274 | } 275 | return nil 276 | } 277 | 278 | // getConnections returns a list of all open connections 279 | func (connection *redisConnection) getConnections() ([]string, error) { 280 | return connection.redisClient.SMembers(connectionsKey) 281 | } 282 | 283 | // hijackConnection reopens an existing connection for inspection purposes without starting a heartbeat 284 | func (connection *redisConnection) hijackConnection(name string) Connection { 285 | return &redisConnection{ 286 | Name: name, 287 | heartbeatKey: strings.Replace(connectionHeartbeatTemplate, phConnection, name, 1), 288 | queuesKey: strings.Replace(connectionQueuesTemplate, phConnection, name, 1), 289 | consumersTemplate: connection.consumersTemplate, 290 | unackedTemplate: connection.unackedTemplate, 291 | readyTemplate: connection.readyTemplate, 292 | rejectedTemplate: connection.rejectedTemplate, 293 | redisClient: connection.redisClient, 294 | } 295 | } 296 | 297 | // closes a stale connection. not to be called on an active connection 298 | func (connection *redisConnection) closeStaleConnection() error { 299 | count, err := connection.redisClient.SRem(connectionsKey, connection.Name) 300 | if err != nil { 301 | return err 302 | } 303 | if count == 0 { 304 | return ErrorNotFound 305 | } 306 | 307 | // NOTE: we're not checking count here because stale connection might not 308 | // have been consuming from any queue, in which case this key doesn't exist 309 | if _, err = connection.redisClient.Del(connection.queuesKey); err != nil { 310 | return err 311 | } 312 | 313 | return nil 314 | } 315 | 316 | // getConsumingQueues returns a list of all queues consumed by this connection 317 | func (connection *redisConnection) getConsumingQueues() ([]string, error) { 318 | return connection.redisClient.SMembers(connection.queuesKey) 319 | } 320 | 321 | // openQueue opens a queue without adding it to the set of queues 322 | func (connection *redisConnection) openQueue(name string) Queue { 323 | return newQueue( 324 | name, 325 | connection.Name, 326 | connection.queuesKey, 327 | connection.consumersTemplate, 328 | connection.unackedTemplate, 329 | connection.readyTemplate, 330 | connection.rejectedTemplate, 331 | connection.redisClient, 332 | connection.errChan, 333 | ) 334 | } 335 | 336 | // stopHeartbeat stops the heartbeat of the connection. 337 | // It does not remove it from the list of connections so it can later be found by the cleaner. 338 | // Returns ErrorNotFound if the heartbeat was already stopped. 339 | // Note that this function itself is not threadsafe, it's important to not call it multiple times 340 | // at the same time. Currently it's only called in StopAllConsuming() where it's linearized by 341 | // connection.lock. 342 | func (connection *redisConnection) stopHeartbeat() error { 343 | if connection.heartbeatStop == nil { // already stopped 344 | return ErrorNotFound 345 | } 346 | 347 | heartbeatStopped := make(chan struct{}) 348 | connection.heartbeatStop <- heartbeatStopped 349 | <-heartbeatStopped // wait for heartbeat() to confirm it's stopped 350 | connection.heartbeatStop = nil // mark heartbeat as stopped 351 | 352 | // Delete heartbeat key to immediately make the connection appear inactive to the cleaner, 353 | // instead of waiting for the heartbeat key to run into its TTL. 354 | count, err := connection.redisClient.Del(connection.heartbeatKey) 355 | if err != nil { // redis error 356 | return err 357 | } 358 | if count == 0 { // heartbeat key didn't exist 359 | return ErrorNotFound 360 | } 361 | return nil 362 | } 363 | 364 | // flushDb flushes the redis database to reset everything, used in tests 365 | func (connection *redisConnection) flushDb() error { 366 | return connection.redisClient.FlushDb() 367 | } 368 | 369 | // unlistAllQueues closes all queues by removing them from the global list 370 | func (connection *redisConnection) unlistAllQueues() error { 371 | _, err := connection.redisClient.Del(queuesKey) 372 | return err 373 | } 374 | -------------------------------------------------------------------------------- /consumer.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | type Consumer interface { 4 | Consume(delivery Delivery) 5 | } 6 | 7 | type ConsumerFunc func(Delivery) 8 | 9 | func (consumerFunc ConsumerFunc) Consume(delivery Delivery) { 10 | consumerFunc(delivery) 11 | } 12 | -------------------------------------------------------------------------------- /deliveries.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | type Deliveries []Delivery 4 | 5 | func (deliveries Deliveries) Payloads() []string { 6 | payloads := make([]string, len(deliveries)) 7 | for i, delivery := range deliveries { 8 | payloads[i] = delivery.Payload() 9 | } 10 | return payloads 11 | } 12 | 13 | // NOTE: The returned error map maps delivery indexes to errors. So if the 14 | // error map is non empty you can use the indexes in the map to look up which 15 | // of the deliveries ran into the corresponding error. See 16 | // example/batch_consumer. 17 | 18 | // functions with retry, see comments in delivery.go (recommended) 19 | 20 | func (deliveries Deliveries) Ack() (errMap map[int]error) { 21 | return deliveries.each(Delivery.Ack) 22 | } 23 | 24 | func (deliveries Deliveries) Reject() (errMap map[int]error) { 25 | return deliveries.each(Delivery.Reject) 26 | } 27 | 28 | func (deliveries Deliveries) Push() (errMap map[int]error) { 29 | return deliveries.each(Delivery.Push) 30 | } 31 | 32 | // helper functions 33 | 34 | func (deliveries Deliveries) each( 35 | f func(Delivery) error, 36 | ) (errMap map[int]error) { 37 | for i, delivery := range deliveries { 38 | if err := f(delivery); err != nil { 39 | if errMap == nil { // create error map lazily on demand 40 | errMap = map[int]error{} 41 | } 42 | errMap[i] = err 43 | } 44 | } 45 | return errMap 46 | } 47 | -------------------------------------------------------------------------------- /delivery.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/http" 7 | "time" 8 | ) 9 | 10 | type Delivery interface { 11 | Payload() string 12 | 13 | Ack() error 14 | Reject() error 15 | Push() error 16 | } 17 | 18 | var ( 19 | _ Delivery = &redisDelivery{} 20 | _ WithHeader = &redisDelivery{} 21 | ) 22 | 23 | type redisDelivery struct { 24 | ctx context.Context 25 | payload string 26 | clearPayload string 27 | header http.Header 28 | unackedKey string 29 | rejectedKey string 30 | pushKey string 31 | redisClient RedisClient 32 | errChan chan<- error 33 | } 34 | 35 | func (delivery *redisDelivery) Header() http.Header { 36 | return delivery.header 37 | } 38 | 39 | func (delivery *redisDelivery) String() string { 40 | return fmt.Sprintf("[%s %s]", delivery.clearPayload, delivery.unackedKey) 41 | } 42 | 43 | func (delivery *redisDelivery) Payload() string { 44 | return delivery.clearPayload 45 | } 46 | 47 | // blocking versions of the functions below with the following behavior: 48 | // 1. return immediately if the operation succeeded or failed with ErrorNotFound 49 | // 2. in case of other redis errors, send them to the errors chan and retry after a sleep 50 | // 3. if redis errors occur after StopConsuming() has been called, ErrorConsumingStopped will be returned 51 | 52 | func (delivery *redisDelivery) Ack() error { 53 | errorCount := 0 54 | for { 55 | count, err := delivery.redisClient.LRem(delivery.unackedKey, 1, delivery.payload) 56 | if err == nil { // no redis error 57 | if count == 0 { 58 | return ErrorNotFound 59 | } 60 | return nil 61 | } 62 | 63 | // redis error 64 | 65 | errorCount++ 66 | 67 | select { // try to add error to channel, but don't block 68 | case delivery.errChan <- &DeliveryError{Delivery: delivery, RedisErr: err, Count: errorCount}: 69 | default: 70 | } 71 | 72 | if err := delivery.ctx.Err(); err != nil { 73 | return ErrorConsumingStopped 74 | } 75 | 76 | time.Sleep(time.Second) 77 | } 78 | } 79 | 80 | func (delivery *redisDelivery) Reject() error { 81 | return delivery.move(delivery.rejectedKey) 82 | } 83 | 84 | func (delivery *redisDelivery) Push() error { 85 | if delivery.pushKey == "" { 86 | return delivery.Reject() // fall back to rejecting 87 | } 88 | 89 | return delivery.move(delivery.pushKey) 90 | } 91 | 92 | func (delivery *redisDelivery) move(key string) error { 93 | errorCount := 0 94 | for { 95 | _, err := delivery.redisClient.LPush(key, delivery.payload) 96 | if err == nil { // success 97 | break 98 | } 99 | // error 100 | 101 | errorCount++ 102 | 103 | select { // try to add error to channel, but don't block 104 | case delivery.errChan <- &DeliveryError{Delivery: delivery, RedisErr: err, Count: errorCount}: 105 | default: 106 | } 107 | 108 | if err := delivery.ctx.Err(); err != nil { 109 | return ErrorConsumingStopped 110 | } 111 | 112 | time.Sleep(time.Second) 113 | } 114 | 115 | return delivery.Ack() 116 | } 117 | 118 | // lower level functions which don't retry but just return the first error 119 | -------------------------------------------------------------------------------- /errors.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | ) 7 | 8 | var ( 9 | ErrorNotFound = errors.New("entity not found") // entity being connection/queue/delivery/heartbeat 10 | ErrorAlreadyConsuming = errors.New("must not call StartConsuming() multiple times") 11 | ErrorNotConsuming = errors.New("must call StartConsuming() before adding consumers") 12 | ErrorConsumingStopped = errors.New("consuming stopped") 13 | ) 14 | 15 | type ConsumeError struct { 16 | RedisErr error 17 | Count int // number of consecutive errors 18 | } 19 | 20 | func (e *ConsumeError) Error() string { 21 | return fmt.Sprintf("rmq.ConsumeError (%d): %s", e.Count, e.RedisErr.Error()) 22 | } 23 | 24 | func (e *ConsumeError) Unwrap() error { 25 | return e.RedisErr 26 | } 27 | 28 | type HeartbeatError struct { 29 | RedisErr error 30 | Count int // number of consecutive errors 31 | } 32 | 33 | func (e *HeartbeatError) Error() string { 34 | return fmt.Sprintf("rmq.HeartbeatError (%d): %s", e.Count, e.RedisErr.Error()) 35 | } 36 | 37 | func (e *HeartbeatError) Unwrap() error { 38 | return e.RedisErr 39 | } 40 | 41 | type DeliveryError struct { 42 | Delivery Delivery 43 | RedisErr error 44 | Count int // number of consecutive errors 45 | } 46 | 47 | func (e *DeliveryError) Error() string { 48 | return fmt.Sprintf("rmq.DeliveryError (%d): %s", e.Count, e.RedisErr.Error()) 49 | } 50 | 51 | func (e *DeliveryError) Unwrap() error { 52 | return e.RedisErr 53 | } 54 | -------------------------------------------------------------------------------- /example/batch_consumer/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "os" 6 | "os/signal" 7 | "syscall" 8 | "time" 9 | 10 | "github.com/adjust/rmq/v5" 11 | ) 12 | 13 | const ( 14 | prefetchLimit = 1000 15 | pollDuration = 100 * time.Millisecond 16 | batchSize = 111 17 | batchTimeout = time.Second 18 | 19 | consumeDuration = time.Millisecond 20 | shouldLog = false 21 | ) 22 | 23 | func main() { 24 | errChan := make(chan error, 10) 25 | go logErrors(errChan) 26 | 27 | connection, err := rmq.OpenConnection("consumer", "tcp", "localhost:6379", 2, errChan) 28 | if err != nil { 29 | panic(err) 30 | } 31 | 32 | for _, queueName := range []string{ 33 | "things", 34 | "foobars", 35 | } { 36 | queue, err := connection.OpenQueue(queueName) 37 | if err != nil { 38 | panic(err) 39 | } 40 | if err := queue.StartConsuming(prefetchLimit, pollDuration); err != nil { 41 | panic(err) 42 | } 43 | if _, err := queue.AddBatchConsumer(queueName, batchSize, batchTimeout, NewBatchConsumer(queueName)); err != nil { 44 | panic(err) 45 | } 46 | } 47 | 48 | signals := make(chan os.Signal, 1) 49 | signal.Notify(signals, syscall.SIGINT) 50 | defer signal.Stop(signals) 51 | 52 | <-signals // wait for signal 53 | go func() { 54 | <-signals // hard exit on second signal (in case shutdown gets stuck) 55 | os.Exit(1) 56 | }() 57 | 58 | <-connection.StopAllConsuming() // wait for all Consume() calls to finish 59 | } 60 | 61 | type BatchConsumer struct { 62 | tag string 63 | } 64 | 65 | func NewBatchConsumer(tag string) *BatchConsumer { 66 | return &BatchConsumer{tag: tag} 67 | } 68 | 69 | func (consumer *BatchConsumer) Consume(batch rmq.Deliveries) { 70 | payloads := batch.Payloads() 71 | debugf("start consume %q", payloads) 72 | time.Sleep(consumeDuration) 73 | 74 | log.Printf("%s consumed %d: %s", consumer.tag, len(batch), batch[0]) 75 | errors := batch.Ack() 76 | if len(errors) == 0 { 77 | debugf("acked %q", payloads) 78 | return 79 | } 80 | 81 | for i, err := range errors { 82 | debugf("failed to ack %q: %q", batch[i].Payload(), err) 83 | } 84 | } 85 | 86 | func logErrors(errChan <-chan error) { 87 | for err := range errChan { 88 | switch err := err.(type) { 89 | case *rmq.HeartbeatError: 90 | if err.Count == rmq.HeartbeatErrorLimit { 91 | log.Print("heartbeat error (limit): ", err) 92 | } else { 93 | log.Print("heartbeat error: ", err) 94 | } 95 | case *rmq.ConsumeError: 96 | log.Print("consume error: ", err) 97 | case *rmq.DeliveryError: 98 | log.Print("delivery error: ", err.Delivery, err) 99 | default: 100 | log.Print("other error: ", err) 101 | } 102 | } 103 | } 104 | 105 | func debugf(format string, args ...interface{}) { 106 | if shouldLog { 107 | log.Printf(format, args...) 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /example/cleaner/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "time" 6 | 7 | "github.com/adjust/rmq/v5" 8 | ) 9 | 10 | func main() { 11 | connection, err := rmq.OpenConnection("cleaner", "tcp", "localhost:6379", 2, nil) 12 | if err != nil { 13 | panic(err) 14 | } 15 | 16 | cleaner := rmq.NewCleaner(connection) 17 | 18 | for range time.Tick(time.Second) { 19 | returned, err := cleaner.Clean() 20 | if err != nil { 21 | log.Printf("failed to clean: %s", err) 22 | continue 23 | } 24 | log.Printf("cleaned %d", returned) 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /example/consumer/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | "os/signal" 8 | "syscall" 9 | "time" 10 | 11 | "github.com/adjust/rmq/v5" 12 | ) 13 | 14 | const ( 15 | prefetchLimit = 1000 16 | pollDuration = 100 * time.Millisecond 17 | numConsumers = 5 18 | 19 | reportBatchSize = 10000 20 | consumeDuration = time.Millisecond 21 | shouldLog = false 22 | ) 23 | 24 | func main() { 25 | errChan := make(chan error, 10) 26 | go logErrors(errChan) 27 | 28 | connection, err := rmq.OpenConnection("consumer", "tcp", "localhost:6379", 2, errChan) 29 | if err != nil { 30 | panic(err) 31 | } 32 | 33 | queue, err := connection.OpenQueue("things") 34 | if err != nil { 35 | panic(err) 36 | } 37 | 38 | if err := queue.StartConsuming(prefetchLimit, pollDuration); err != nil { 39 | panic(err) 40 | } 41 | 42 | for i := 0; i < numConsumers; i++ { 43 | name := fmt.Sprintf("consumer %d", i) 44 | if _, err := queue.AddConsumer(name, NewConsumer(i)); err != nil { 45 | panic(err) 46 | } 47 | } 48 | 49 | signals := make(chan os.Signal, 1) 50 | signal.Notify(signals, syscall.SIGINT) 51 | defer signal.Stop(signals) 52 | 53 | <-signals // wait for signal 54 | go func() { 55 | <-signals // hard exit on second signal (in case shutdown gets stuck) 56 | os.Exit(1) 57 | }() 58 | 59 | <-connection.StopAllConsuming() // wait for all Consume() calls to finish 60 | } 61 | 62 | type Consumer struct { 63 | name string 64 | count int 65 | before time.Time 66 | } 67 | 68 | func NewConsumer(tag int) *Consumer { 69 | return &Consumer{ 70 | name: fmt.Sprintf("consumer%d", tag), 71 | count: 0, 72 | before: time.Now(), 73 | } 74 | } 75 | 76 | func (consumer *Consumer) Consume(delivery rmq.Delivery) { 77 | payload := delivery.Payload() 78 | debugf("start consume %s", payload) 79 | time.Sleep(consumeDuration) 80 | 81 | consumer.count++ 82 | if consumer.count%reportBatchSize == 0 { 83 | duration := time.Now().Sub(consumer.before) 84 | consumer.before = time.Now() 85 | perSecond := time.Second / (duration / reportBatchSize) 86 | log.Printf("%s consumed %d %s %d", consumer.name, consumer.count, payload, perSecond) 87 | } 88 | 89 | if consumer.count%reportBatchSize > 0 { 90 | if err := delivery.Ack(); err != nil { 91 | debugf("failed to ack %s: %s", payload, err) 92 | } else { 93 | debugf("acked %s", payload) 94 | } 95 | } else { // reject one per batch 96 | if err := delivery.Reject(); err != nil { 97 | debugf("failed to reject %s: %s", payload, err) 98 | } else { 99 | debugf("rejected %s", payload) 100 | } 101 | } 102 | } 103 | 104 | func logErrors(errChan <-chan error) { 105 | for err := range errChan { 106 | switch err := err.(type) { 107 | case *rmq.HeartbeatError: 108 | if err.Count == rmq.HeartbeatErrorLimit { 109 | log.Print("heartbeat error (limit): ", err) 110 | } else { 111 | log.Print("heartbeat error: ", err) 112 | } 113 | case *rmq.ConsumeError: 114 | log.Print("consume error: ", err) 115 | case *rmq.DeliveryError: 116 | log.Print("delivery error: ", err.Delivery, err) 117 | default: 118 | log.Print("other error: ", err) 119 | } 120 | } 121 | } 122 | 123 | func debugf(format string, args ...interface{}) { 124 | if shouldLog { 125 | log.Printf(format, args...) 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /example/handler/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "net/http" 7 | 8 | "github.com/adjust/rmq/v5" 9 | ) 10 | 11 | func main() { 12 | connection, err := rmq.OpenConnection("handler", "tcp", "localhost:6379", 2, nil) 13 | if err != nil { 14 | panic(err) 15 | } 16 | 17 | http.Handle("/overview", NewHandler(connection)) 18 | fmt.Printf("Handler listening on http://localhost:3333/overview\n") 19 | if err := http.ListenAndServe(":3333", nil); err != nil { 20 | panic(err) 21 | } 22 | } 23 | 24 | type Handler struct { 25 | connection rmq.Connection 26 | } 27 | 28 | func NewHandler(connection rmq.Connection) *Handler { 29 | return &Handler{connection: connection} 30 | } 31 | 32 | func (handler *Handler) ServeHTTP(writer http.ResponseWriter, request *http.Request) { 33 | layout := request.FormValue("layout") 34 | refresh := request.FormValue("refresh") 35 | 36 | queues, err := handler.connection.GetOpenQueues() 37 | if err != nil { 38 | panic(err) 39 | } 40 | 41 | stats, err := handler.connection.CollectStats(queues) 42 | if err != nil { 43 | panic(err) 44 | } 45 | 46 | log.Printf("queue stats\n%s", stats) 47 | fmt.Fprint(writer, stats.GetHtml(layout, refresh)) 48 | } 49 | -------------------------------------------------------------------------------- /example/producer/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "time" 7 | 8 | "github.com/adjust/rmq/v5" 9 | ) 10 | 11 | const ( 12 | numDeliveries = 100000000 13 | batchSize = 10000 14 | ) 15 | 16 | func main() { 17 | connection, err := rmq.OpenConnection("producer", "tcp", "localhost:6379", 2, nil) 18 | if err != nil { 19 | panic(err) 20 | } 21 | 22 | things, err := connection.OpenQueue("things") 23 | if err != nil { 24 | panic(err) 25 | } 26 | foobars, err := connection.OpenQueue("foobars") 27 | if err != nil { 28 | panic(err) 29 | } 30 | 31 | var before time.Time 32 | for i := 0; i < numDeliveries; i++ { 33 | delivery := fmt.Sprintf("delivery %d", i) 34 | if err := things.Publish(delivery); err != nil { 35 | log.Printf("failed to publish: %s", err) 36 | } 37 | 38 | if i%batchSize == 0 { 39 | duration := time.Now().Sub(before) 40 | before = time.Now() 41 | perSecond := time.Second / (duration / batchSize) 42 | log.Printf("produced %d %s %d", i, delivery, perSecond) 43 | if err := foobars.Publish("foo"); err != nil { 44 | log.Printf("failed to publish: %s", err) 45 | } 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /example/purger/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | 6 | "github.com/adjust/rmq/v5" 7 | ) 8 | 9 | func main() { 10 | connection, err := rmq.OpenConnection("cleaner", "tcp", "localhost:6379", 2, nil) 11 | if err != nil { 12 | panic(err) 13 | } 14 | 15 | queue, err := connection.OpenQueue("things") 16 | if err != nil { 17 | panic(err) 18 | } 19 | count, err := queue.PurgeReady() 20 | if err != nil { 21 | log.Printf("failed to purge: %s", err) 22 | return 23 | } 24 | 25 | log.Printf("purged %d", count) 26 | } 27 | -------------------------------------------------------------------------------- /example/returner/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "math" 6 | 7 | "github.com/adjust/rmq/v5" 8 | ) 9 | 10 | func main() { 11 | connection, err := rmq.OpenConnection("returner", "tcp", "localhost:6379", 2, nil) 12 | if err != nil { 13 | panic(err) 14 | } 15 | 16 | queue, err := connection.OpenQueue("things") 17 | if err != nil { 18 | panic(err) 19 | } 20 | returned, err := queue.ReturnRejected(math.MaxInt64) 21 | if err != nil { 22 | panic(err) 23 | } 24 | 25 | log.Printf("queue returner returned %d rejected deliveries", returned) 26 | } 27 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/adjust/rmq/v5 2 | 3 | go 1.17 4 | 5 | require ( 6 | github.com/alicebob/miniredis/v2 v2.30.4 7 | github.com/redis/go-redis/v9 v9.0.3 8 | github.com/stretchr/testify v1.7.0 9 | ) 10 | 11 | require ( 12 | github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 // indirect 13 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 14 | github.com/davecgh/go-spew v1.1.0 // indirect 15 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect 16 | github.com/kr/pretty v0.1.0 // indirect 17 | github.com/pmezard/go-difflib v1.0.0 // indirect 18 | github.com/yuin/gopher-lua v1.1.0 // indirect 19 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect 20 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect 21 | ) 22 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= 2 | github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 h1:uvdUDbHQHO85qeSydJtItA4T55Pw6BtAejd0APRJOCE= 3 | github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= 4 | github.com/alicebob/miniredis/v2 v2.30.4 h1:8S4/o1/KoUArAGbGwPxcwf0krlzceva2XVOSchFS7Eo= 5 | github.com/alicebob/miniredis/v2 v2.30.4/go.mod h1:b25qWj4fCEsBeAAR2mlb0ufImGC6uH3VlUfb/HS5zKg= 6 | github.com/bsm/ginkgo/v2 v2.7.0 h1:ItPMPH90RbmZJt5GtkcNvIRuGEdwlBItdNVoyzaNQao= 7 | github.com/bsm/ginkgo/v2 v2.7.0/go.mod h1:AiKlXPm7ItEHNc/2+OkrNG4E0ITzojb9/xWzvQ9XZ9w= 8 | github.com/bsm/gomega v1.26.0 h1:LhQm+AFcgV2M0WyKroMASzAzCAJVpAxQXv4SaI9a69Y= 9 | github.com/bsm/gomega v1.26.0/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= 10 | github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= 11 | github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 12 | github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= 13 | github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= 14 | github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= 15 | github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= 16 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 17 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= 18 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= 19 | github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= 20 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 21 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 22 | github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= 23 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 24 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 25 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 26 | github.com/redis/go-redis/v9 v9.0.3 h1:+7mmR26M0IvyLxGZUHxu4GiBkJkVDid0Un+j4ScYu4k= 27 | github.com/redis/go-redis/v9 v9.0.3/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk= 28 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 29 | github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= 30 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 31 | github.com/yuin/gopher-lua v1.1.0 h1:BojcDhfyDWgU2f2TOzYK/g5p2gxMrku8oupLDqlnSqE= 32 | github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= 33 | golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 34 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 35 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= 36 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 37 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= 38 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 39 | -------------------------------------------------------------------------------- /header.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "fmt" 7 | "net/http" 8 | "strings" 9 | ) 10 | 11 | // Redis protocol does not define a specific way to pass additional data like header. 12 | // However, there is often need to pass them (for example for traces propagation). 13 | // 14 | // This implementation injects optional header values marked with a signature into payload body 15 | // during publishing. When message is consumed, if signature is present, header and original payload 16 | // are extracted from augmented payload. 17 | // 18 | // Header is defined as http.Header for better interoperability with existing libraries, 19 | // for example with go.opentelemetry.io/otel/propagation.HeaderCarrier. 20 | 21 | // PayloadWithHeader creates a payload string with header. 22 | func PayloadWithHeader(payload string, header http.Header) string { 23 | if len(header) == 0 { 24 | return payload 25 | } 26 | 27 | hd, _ := json.Marshal(header) // String map never fails marshaling. 28 | 29 | return jsonHeaderSignature + string(hd) + "\n" + payload 30 | } 31 | 32 | // PayloadBytesWithHeader creates payload bytes slice with header. 33 | func PayloadBytesWithHeader(payload []byte, header http.Header) []byte { 34 | if len(header) == 0 { 35 | return payload 36 | } 37 | 38 | hd, _ := json.Marshal(header) // String map never fails marshaling. 39 | 40 | res := make([]byte, 0, len(jsonHeaderSignature)+len(hd)+1+len(payload)) 41 | res = append(res, []byte(jsonHeaderSignature)...) 42 | res = append(res, hd...) 43 | res = append(res, '\n') 44 | res = append(res, payload...) 45 | 46 | return res 47 | } 48 | 49 | // ExtractHeaderAndPayload splits augmented payload into header and original payload if specific signature is present. 50 | func ExtractHeaderAndPayload(payload string) (http.Header, string, error) { 51 | if !strings.HasPrefix(payload, jsonHeaderSignature) { 52 | return nil, payload, nil 53 | } 54 | 55 | lineEnd := strings.Index(payload, "\n") 56 | if lineEnd == -1 { 57 | return nil, "", errors.New("missing line separator") 58 | } 59 | 60 | first := payload[len(jsonHeaderSignature):lineEnd] 61 | rest := payload[lineEnd+1:] 62 | 63 | header := make(http.Header) 64 | 65 | if err := json.Unmarshal([]byte(first), &header); err != nil { 66 | return nil, "", fmt.Errorf("parsing header: %w", err) 67 | } 68 | 69 | return header, rest, nil 70 | } 71 | 72 | // WithHeader is a Delivery with Header. 73 | type WithHeader interface { 74 | Header() http.Header 75 | } 76 | 77 | // jsonHeaderSignature is a signature marker to indicate JSON header presence. 78 | // Do not change the value. 79 | const jsonHeaderSignature = "\xFF\x00\xBE\xBEJ" 80 | -------------------------------------------------------------------------------- /header_test.go: -------------------------------------------------------------------------------- 1 | package rmq_test 2 | 3 | import ( 4 | "net/http" 5 | "strings" 6 | "testing" 7 | 8 | "github.com/adjust/rmq/v5" 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | func TestPayloadWithHeader(t *testing.T) { 14 | p := `{"foo":"bar"}` 15 | 16 | h := make(http.Header) 17 | ph := rmq.PayloadWithHeader(p, h) 18 | assert.Equal(t, p, ph) // No change for empty header. 19 | h2, p2, err := rmq.ExtractHeaderAndPayload(ph) 20 | require.NoError(t, err) 21 | assert.Nil(t, h2) 22 | assert.Equal(t, p, p2) 23 | 24 | h.Set("X-Foo", "Bar") 25 | ph = rmq.PayloadWithHeader(p, h) 26 | assert.NotEqual(t, p, ph) 27 | 28 | h2, p2, err = rmq.ExtractHeaderAndPayload(ph) 29 | require.NoError(t, err) 30 | assert.Equal(t, h, h2) 31 | assert.Equal(t, p, p2) 32 | } 33 | 34 | func TestPayloadBytesWithHeader(t *testing.T) { 35 | p := `{"foo":"bar"}` 36 | 37 | h := make(http.Header) 38 | ph := rmq.PayloadBytesWithHeader([]byte(p), h) 39 | assert.Equal(t, p, string(ph)) // No change for empty header. 40 | h2, p2, err := rmq.ExtractHeaderAndPayload(string(ph)) 41 | require.NoError(t, err) 42 | assert.Nil(t, h2) 43 | assert.Equal(t, p, p2) 44 | 45 | h.Set("X-Foo", "Bar") 46 | ph = rmq.PayloadBytesWithHeader([]byte(p), h) 47 | assert.NotEqual(t, p, ph) 48 | 49 | h2, p2, err = rmq.ExtractHeaderAndPayload(string(ph)) 50 | require.NoError(t, err) 51 | assert.Equal(t, h, h2) 52 | assert.Equal(t, p, string(p2)) 53 | } 54 | 55 | func TestExtractHeaderAndPayload(t *testing.T) { 56 | t.Run("missing_line_separator", func(t *testing.T) { 57 | ph := rmq.PayloadWithHeader("foo", http.Header{"foo": []string{"bar"}}) 58 | ph = ph[0:7] // Truncating payload. 59 | h, p, err := rmq.ExtractHeaderAndPayload(ph) 60 | require.Error(t, err) 61 | assert.Nil(t, h) 62 | assert.Empty(t, p) 63 | }) 64 | 65 | t.Run("invalid_json", func(t *testing.T) { 66 | ph := rmq.PayloadWithHeader("foo", http.Header{"foo": []string{"bar"}}) 67 | ph = strings.Replace(ph, `"`, `'`, 1) // Corrupting JSON. 68 | h, p, err := rmq.ExtractHeaderAndPayload(ph) 69 | require.Error(t, err) 70 | assert.Nil(t, h) 71 | assert.Empty(t, p) 72 | }) 73 | 74 | t.Run("ok", func(t *testing.T) { 75 | ph := rmq.PayloadWithHeader("foo", http.Header{"foo": []string{"bar"}}) 76 | h, p, err := rmq.ExtractHeaderAndPayload(ph) 77 | require.NoError(t, err) 78 | assert.Equal(t, http.Header{"foo": []string{"bar"}}, h) 79 | assert.Equal(t, "foo", p) 80 | }) 81 | 82 | t.Run("ok_line_breaks", func(t *testing.T) { 83 | ph := rmq.PayloadWithHeader("foo", http.Header{"foo": []string{"bar1\nbar2\nbar3"}}) 84 | h, p, err := rmq.ExtractHeaderAndPayload(ph) 85 | require.NoError(t, err) 86 | assert.Equal(t, http.Header{"foo": []string{"bar1\nbar2\nbar3"}}, h) 87 | assert.Equal(t, "foo", p) 88 | }) 89 | } 90 | 91 | func ExamplePayloadWithHeader() { 92 | var ( 93 | pub, con rmq.Queue 94 | ) 95 | 96 | // .... 97 | 98 | h := make(http.Header) 99 | h.Set("X-Baz", "quux") 100 | 101 | // You can add header to your payload during publish. 102 | _ = pub.Publish(rmq.PayloadWithHeader(`{"foo":"bar"}`, h)) 103 | 104 | // .... 105 | 106 | _, _ = con.AddConsumerFunc("tag", func(delivery rmq.Delivery) { 107 | // And receive header back in consumer. 108 | delivery.(rmq.WithHeader).Header().Get("X-Baz") // "quux" 109 | 110 | // .... 111 | }) 112 | } 113 | -------------------------------------------------------------------------------- /queue.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "math/rand" 7 | "strings" 8 | "sync" 9 | "time" 10 | ) 11 | 12 | const ( 13 | defaultBatchTimeout = time.Second 14 | purgeBatchSize = int64(100) 15 | ) 16 | 17 | type Queue interface { 18 | Publish(payload ...string) error 19 | PublishBytes(payload ...[]byte) error 20 | SetPushQueue(pushQueue Queue) 21 | Remove(payload string, count int64, removeFromRejected bool) error 22 | RemoveBytes(payload []byte, count int64, removeFromRejected bool) error 23 | StartConsuming(prefetchLimit int64, pollDuration time.Duration) error 24 | StopConsuming() <-chan struct{} 25 | AddConsumer(tag string, consumer Consumer) (string, error) 26 | AddConsumerFunc(tag string, consumerFunc ConsumerFunc) (string, error) 27 | AddBatchConsumer(tag string, batchSize int64, timeout time.Duration, consumer BatchConsumer) (string, error) 28 | AddBatchConsumerFunc(tag string, batchSize int64, timeout time.Duration, batchConsumerFunc BatchConsumerFunc) (string, error) 29 | PurgeReady() (int64, error) 30 | PurgeRejected() (int64, error) 31 | ReturnUnacked(max int64) (int64, error) 32 | ReturnRejected(max int64) (int64, error) 33 | Destroy() (readyCount, rejectedCount int64, err error) 34 | Drain(count int64) ([]string, error) 35 | 36 | // internals 37 | // used in cleaner 38 | closeInStaleConnection() error 39 | // used for stats 40 | readyCount() (int64, error) 41 | unackedCount() (int64, error) 42 | rejectedCount() (int64, error) 43 | getConsumers() ([]string, error) 44 | } 45 | 46 | type redisQueue struct { 47 | name string 48 | connectionName string 49 | queuesKey string // key to list of queues consumed by this connection 50 | consumersKey string // key to set of consumers using this connection 51 | unackedKey string // key to list of currently consuming deliveries 52 | readyKey string // key to list of ready deliveries 53 | rejectedKey string // key to list of rejected deliveries 54 | pushKey string // key to list of pushed deliveries 55 | redisClient RedisClient 56 | errChan chan<- error 57 | prefetchLimit int64 // max number of prefetched deliveries number of unacked can go up to prefetchLimit + numConsumers 58 | pollDuration time.Duration 59 | 60 | lock sync.Mutex // protects the fields below related to starting and stopping this queue 61 | consumingStopped chan struct{} // this chan gets closed when consuming on this queue has stopped 62 | stopWg sync.WaitGroup 63 | ackCtx context.Context 64 | ackCancel context.CancelFunc 65 | deliveryChan chan Delivery // nil for publish channels, not nil for consuming channels 66 | } 67 | 68 | func newQueue( 69 | name, connectionName, queuesKey string, 70 | consumersTemplate, unackedTemplate, readyTemplate, rejectedTemplate string, 71 | redisClient RedisClient, 72 | errChan chan<- error, 73 | ) *redisQueue { 74 | 75 | consumersKey := strings.Replace(consumersTemplate, phConnection, connectionName, 1) 76 | consumersKey = strings.Replace(consumersKey, phQueue, name, 1) 77 | 78 | unackedKey := strings.Replace(unackedTemplate, phConnection, connectionName, 1) 79 | unackedKey = strings.Replace(unackedKey, phQueue, name, 1) 80 | 81 | readyKey := strings.Replace(readyTemplate, phQueue, name, 1) 82 | rejectedKey := strings.Replace(rejectedTemplate, phQueue, name, 1) 83 | 84 | consumingStopped := make(chan struct{}) 85 | ackCtx, ackCancel := context.WithCancel(context.Background()) 86 | 87 | queue := &redisQueue{ 88 | name: name, 89 | connectionName: connectionName, 90 | queuesKey: queuesKey, 91 | consumersKey: consumersKey, 92 | unackedKey: unackedKey, 93 | readyKey: readyKey, 94 | rejectedKey: rejectedKey, 95 | redisClient: redisClient, 96 | errChan: errChan, 97 | consumingStopped: consumingStopped, 98 | ackCtx: ackCtx, 99 | ackCancel: ackCancel, 100 | } 101 | return queue 102 | } 103 | 104 | func (queue *redisQueue) String() string { 105 | return fmt.Sprintf("[%s conn:%s]", queue.name, queue.connectionName) 106 | } 107 | 108 | // Publish adds a delivery with the given payload to the queue 109 | // returns how many deliveries are in the queue afterwards 110 | func (queue *redisQueue) Publish(payload ...string) error { 111 | _, err := queue.redisClient.LPush(queue.readyKey, payload...) 112 | return err 113 | } 114 | 115 | // PublishBytes just casts the bytes and calls Publish 116 | func (queue *redisQueue) PublishBytes(payload ...[]byte) error { 117 | stringifiedBytes := make([]string, len(payload)) 118 | for i, b := range payload { 119 | stringifiedBytes[i] = string(b) 120 | } 121 | return queue.Publish(stringifiedBytes...) 122 | } 123 | 124 | // Remove elements with specific value from the queue (WARN: this operation is pretty slow with O(N+M) complexity where N is length of the queue and M is number of removed elements) 125 | func (queue *redisQueue) Remove(payload string, count int64, removeFromRejected bool) error { 126 | _, err := queue.redisClient.LRem(queue.readyKey, count, payload) 127 | if removeFromRejected { 128 | queue.redisClient.LRem(queue.rejectedKey, count, payload) 129 | } 130 | return err 131 | } 132 | 133 | // RemoveBytes casts bytes to string and calls Remove (WARN: this operation is pretty slow with O(N+M) complexity where N is length of the queue and M is number of removed elements) 134 | func (queue *redisQueue) RemoveBytes(payload []byte, count int64, removeFromRejected bool) error { 135 | return queue.Remove(string(payload), count, removeFromRejected) 136 | } 137 | 138 | // SetPushQueue sets a push queue. In the consumer function you can call 139 | // delivery.Push(). If a push queue is set the delivery then gets moved from 140 | // the original queue to the push queue. If no push queue is set it's 141 | // equivalent to calling delivery.Reject(). 142 | // NOTE: panics if pushQueue is not a *redisQueue 143 | func (queue *redisQueue) SetPushQueue(pushQueue Queue) { 144 | queue.pushKey = pushQueue.(*redisQueue).readyKey 145 | } 146 | 147 | // StartConsuming starts consuming into a channel of size prefetchLimit 148 | // must be called before consumers can be added! 149 | // pollDuration is the duration the queue sleeps before checking for new deliveries 150 | func (queue *redisQueue) StartConsuming(prefetchLimit int64, pollDuration time.Duration) error { 151 | queue.lock.Lock() 152 | defer queue.lock.Unlock() 153 | 154 | // If deliveryChan is set, then we are already consuming 155 | if queue.deliveryChan != nil { 156 | return ErrorAlreadyConsuming 157 | } 158 | select { 159 | case <-queue.consumingStopped: 160 | // If consuming is stopped then we must not try to 161 | return ErrorConsumingStopped 162 | default: 163 | } 164 | 165 | // add queue to list of queues consumed on this connection 166 | if _, err := queue.redisClient.SAdd(queue.queuesKey, queue.name); err != nil { 167 | return err 168 | } 169 | 170 | queue.prefetchLimit = prefetchLimit 171 | queue.pollDuration = pollDuration 172 | queue.deliveryChan = make(chan Delivery, prefetchLimit) 173 | // log.Printf("rmq queue started consuming %s %d %s", queue, prefetchLimit, pollDuration) 174 | go queue.consume() 175 | return nil 176 | } 177 | 178 | func (queue *redisQueue) consume() { 179 | errorCount := 0 // number of consecutive batch errors 180 | 181 | for { 182 | switch err := queue.consumeBatch(); err { 183 | case nil: // success 184 | errorCount = 0 185 | 186 | case ErrorConsumingStopped: 187 | close(queue.deliveryChan) 188 | return 189 | 190 | default: // redis error 191 | errorCount++ 192 | select { // try to add error to channel, but don't block 193 | case queue.errChan <- &ConsumeError{RedisErr: err, Count: errorCount}: 194 | default: 195 | } 196 | } 197 | time.Sleep(jitteredDuration(queue.pollDuration)) 198 | } 199 | } 200 | 201 | func (queue *redisQueue) consumeBatch() error { 202 | select { 203 | case <-queue.consumingStopped: 204 | return ErrorConsumingStopped 205 | default: 206 | } 207 | 208 | // unackedCount == + 209 | unackedCount, err := queue.unackedCount() 210 | if err != nil { 211 | return err 212 | } 213 | 214 | batchSize := queue.prefetchLimit - unackedCount 215 | if batchSize <= 0 { 216 | return nil 217 | } 218 | 219 | for i := int64(0); i < batchSize; i++ { 220 | select { 221 | case <-queue.consumingStopped: 222 | return ErrorConsumingStopped 223 | default: 224 | } 225 | 226 | payload, err := queue.redisClient.RPopLPush(queue.readyKey, queue.unackedKey) 227 | if err == ErrorNotFound { 228 | return nil 229 | } 230 | if err != nil { 231 | return err 232 | } 233 | 234 | d, err := queue.newDelivery(payload) 235 | if err != nil { 236 | return fmt.Errorf("create new delivery: %w", err) 237 | } 238 | 239 | queue.deliveryChan <- d 240 | } 241 | 242 | return nil 243 | } 244 | 245 | func (queue *redisQueue) newDelivery(payload string) (Delivery, error) { 246 | rd := &redisDelivery{ 247 | ctx: queue.ackCtx, 248 | payload: payload, 249 | unackedKey: queue.unackedKey, 250 | rejectedKey: queue.rejectedKey, 251 | pushKey: queue.pushKey, 252 | redisClient: queue.redisClient, 253 | errChan: queue.errChan, 254 | } 255 | 256 | var err error 257 | rd.header, rd.clearPayload, err = ExtractHeaderAndPayload(payload) 258 | if err == nil { 259 | return rd, nil 260 | } 261 | 262 | // we need to reject a delivery here to move the delivery from the unacked to the rejected list. 263 | rejectErr := rd.Reject() 264 | if rejectErr != nil { 265 | return nil, fmt.Errorf("%s, reject faulty delivery: %w", err, rejectErr) 266 | } 267 | 268 | return nil, err 269 | } 270 | 271 | // StopConsuming can be used to stop all consumers on this queue. It returns a 272 | // channel which can be used to wait for all active consumers to finish their 273 | // current Consume() call. This is useful to implement graceful shutdown. 274 | func (queue *redisQueue) StopConsuming() <-chan struct{} { 275 | finishedChan := make(chan struct{}) 276 | // We only stop consuming once 277 | // This function returns immediately, while the work of actually stopping runs in a separate goroutine 278 | go func() { 279 | queue.lock.Lock() 280 | defer queue.lock.Unlock() 281 | 282 | select { 283 | case <-queue.consumingStopped: 284 | // already stopped, nothing to do 285 | close(finishedChan) 286 | return 287 | default: 288 | close(queue.consumingStopped) 289 | queue.ackCancel() 290 | queue.stopWg.Wait() 291 | close(finishedChan) 292 | } 293 | }() 294 | 295 | return finishedChan 296 | } 297 | 298 | // AddConsumer adds a consumer to the queue and returns its internal name 299 | func (queue *redisQueue) AddConsumer(tag string, consumer Consumer) (name string, err error) { 300 | name, err = queue.addConsumer(tag) 301 | if err != nil { 302 | return "", err 303 | } 304 | go queue.consumerConsume(consumer) 305 | return name, nil 306 | } 307 | 308 | func (queue *redisQueue) consumerConsume(consumer Consumer) { 309 | defer func() { 310 | queue.stopWg.Done() 311 | }() 312 | for { 313 | select { 314 | case <-queue.consumingStopped: // prefer this case 315 | return 316 | default: 317 | } 318 | 319 | select { 320 | case <-queue.consumingStopped: 321 | return 322 | 323 | case delivery, ok := <-queue.deliveryChan: 324 | if !ok { // deliveryChan closed 325 | return 326 | } 327 | 328 | consumer.Consume(delivery) 329 | } 330 | } 331 | } 332 | 333 | // AddConsumerFunc adds a consumer which is defined only by a function. This is 334 | // similar to http.HandlerFunc and useful if your consumers don't need any 335 | // state. 336 | func (queue *redisQueue) AddConsumerFunc(tag string, consumerFunc ConsumerFunc) (string, error) { 337 | return queue.AddConsumer(tag, consumerFunc) 338 | } 339 | 340 | // AddBatchConsumer is similar to AddConsumer, but for batches of deliveries 341 | // timeout limits the amount of time waiting to fill an entire batch 342 | // The timer is only started when the first message in a batch is received 343 | func (queue *redisQueue) AddBatchConsumer(tag string, batchSize int64, timeout time.Duration, consumer BatchConsumer) (string, error) { 344 | name, err := queue.addConsumer(tag) 345 | if err != nil { 346 | return "", err 347 | } 348 | go queue.consumerBatchConsume(batchSize, timeout, consumer) 349 | return name, nil 350 | } 351 | 352 | // AddBatchConsumerFunc is similar to AddConsumerFunc, but for batches of deliveries 353 | // timeout limits the amount of time waiting to fill an entire batch 354 | // The timer is only started when the first message in a batch is received 355 | func (queue *redisQueue) AddBatchConsumerFunc(tag string, batchSize int64, timeout time.Duration, batchConsumerFunc BatchConsumerFunc) (string, error) { 356 | name, err := queue.addConsumer(tag) 357 | if err != nil { 358 | return "", err 359 | } 360 | go queue.consumerBatchConsume(batchSize, timeout, batchConsumerFunc) 361 | return name, nil 362 | } 363 | 364 | func (queue *redisQueue) consumerBatchConsume(batchSize int64, timeout time.Duration, consumer BatchConsumer) { 365 | defer func() { 366 | queue.stopWg.Done() 367 | }() 368 | batch := []Delivery{} 369 | for { 370 | select { 371 | case <-queue.consumingStopped: // prefer this case 372 | return 373 | default: 374 | } 375 | 376 | select { 377 | case <-queue.consumingStopped: 378 | return 379 | 380 | case delivery, ok := <-queue.deliveryChan: // Wait for first delivery 381 | if !ok { // deliveryChan closed 382 | return 383 | } 384 | 385 | batch = append(batch, delivery) 386 | batch, ok = queue.batchTimeout(batchSize, batch, timeout) 387 | if !ok { 388 | return 389 | } 390 | 391 | consumer.Consume(batch) 392 | batch = batch[:0] // reset batch 393 | } 394 | } 395 | } 396 | 397 | func (queue *redisQueue) batchTimeout(batchSize int64, batch []Delivery, timeout time.Duration) (fullBatch []Delivery, ok bool) { 398 | timer := time.NewTimer(timeout) 399 | defer timer.Stop() 400 | for { 401 | select { 402 | case <-queue.consumingStopped: // prefer this case 403 | return nil, false 404 | default: 405 | } 406 | 407 | select { 408 | case <-queue.consumingStopped: // consuming stopped: abort batch 409 | return nil, false 410 | 411 | case <-timer.C: // timeout: submit batch 412 | return batch, true 413 | 414 | case delivery, ok := <-queue.deliveryChan: 415 | if !ok { // deliveryChan closed: abort batch 416 | return nil, false 417 | } 418 | 419 | batch = append(batch, delivery) 420 | if int64(len(batch)) >= batchSize { 421 | return batch, true // once big enough: submit batch 422 | } 423 | } 424 | } 425 | } 426 | 427 | func (queue *redisQueue) addConsumer(tag string) (name string, err error) { 428 | queue.lock.Lock() 429 | defer queue.lock.Unlock() 430 | 431 | if err := queue.ensureConsuming(); err != nil { 432 | return "", err 433 | } 434 | 435 | name = fmt.Sprintf("%s-%s", tag, RandomString(6)) 436 | 437 | // add consumer to list of consumers of this queue 438 | if _, err := queue.redisClient.SAdd(queue.consumersKey, name); err != nil { 439 | return "", err 440 | } 441 | 442 | queue.stopWg.Add(1) 443 | // log.Printf("rmq queue added consumer %s %s", queue, name) 444 | return name, nil 445 | } 446 | 447 | // PurgeReady removes all ready deliveries from the queue and returns the number of purged deliveries 448 | func (queue *redisQueue) PurgeReady() (int64, error) { 449 | return queue.deleteRedisList(queue.readyKey) 450 | } 451 | 452 | // PurgeRejected removes all rejected deliveries from the queue and returns the number of purged deliveries 453 | func (queue *redisQueue) PurgeRejected() (int64, error) { 454 | return queue.deleteRedisList(queue.rejectedKey) 455 | } 456 | 457 | // return number of deleted list items 458 | // https://www.redisgreen.net/blog/deleting-large-lists 459 | func (queue *redisQueue) deleteRedisList(key string) (int64, error) { 460 | total, err := queue.redisClient.LLen(key) 461 | if total == 0 { 462 | return 0, err // nothing to do 463 | } 464 | 465 | // delete elements without blocking 466 | for todo := total; todo > 0; todo -= purgeBatchSize { 467 | // minimum of purgeBatchSize and todo 468 | batchSize := purgeBatchSize 469 | if batchSize > todo { 470 | batchSize = todo 471 | } 472 | 473 | // remove one batch 474 | err := queue.redisClient.LTrim(key, 0, -1-batchSize) 475 | if err != nil { 476 | return 0, err 477 | } 478 | } 479 | 480 | return total, nil 481 | } 482 | 483 | // ReturnUnacked tries to return max unacked deliveries back to 484 | // the ready queue and returns the number of returned deliveries 485 | func (queue *redisQueue) ReturnUnacked(max int64) (count int64, error error) { 486 | return queue.move(queue.unackedKey, queue.readyKey, max) 487 | } 488 | 489 | // ReturnRejected tries to return max rejected deliveries back to 490 | // the ready queue and returns the number of returned deliveries 491 | func (queue *redisQueue) ReturnRejected(max int64) (count int64, err error) { 492 | return queue.move(queue.rejectedKey, queue.readyKey, max) 493 | } 494 | 495 | func (queue *redisQueue) move(from, to string, max int64) (n int64, error error) { 496 | for n = 0; n < max; n++ { 497 | switch _, err := queue.redisClient.RPopLPush(from, to); err { 498 | case nil: // moved one 499 | continue 500 | case ErrorNotFound: // nothing left 501 | return n, nil 502 | default: // error 503 | return 0, err 504 | } 505 | } 506 | return n, nil 507 | } 508 | 509 | // Drain removes and returns 'count' elements from the queue. In case of an error, 510 | // Drain return all elements removed until the error occurred and the error itself. 511 | func (queue *redisQueue) Drain(count int64) ([]string, error) { 512 | var ( 513 | n int64 514 | err error 515 | ) 516 | out := make([]string, 0, count) 517 | 518 | for n = 0; n < count; n++ { 519 | val, err := queue.redisClient.RPop(queue.readyKey) 520 | if err != nil { 521 | return out, err 522 | } 523 | out = append(out, val) 524 | } 525 | 526 | return out, err 527 | } 528 | 529 | // Destroy purges and removes the queue from the list of queues 530 | func (queue *redisQueue) Destroy() (readyCount, rejectedCount int64, err error) { 531 | readyCount, err = queue.PurgeReady() 532 | if err != nil { 533 | return 0, 0, err 534 | } 535 | rejectedCount, err = queue.PurgeRejected() 536 | if err != nil { 537 | return 0, 0, err 538 | } 539 | 540 | count, err := queue.redisClient.SRem(queuesKey, queue.name) 541 | if err != nil { 542 | return 0, 0, err 543 | } 544 | if count == 0 { 545 | return 0, 0, ErrorNotFound 546 | } 547 | 548 | return readyCount, rejectedCount, nil 549 | } 550 | 551 | // closeInStaleConnection closes the queue in the associated connection by removing all related keys 552 | // not supposed to be called on queues in active sessions 553 | func (queue *redisQueue) closeInStaleConnection() error { 554 | if _, err := queue.redisClient.Del(queue.unackedKey); err != nil { 555 | return err 556 | } 557 | if _, err := queue.redisClient.Del(queue.consumersKey); err != nil { 558 | return err 559 | } 560 | 561 | count, err := queue.redisClient.SRem(queue.queuesKey, queue.name) 562 | if err != nil { 563 | return err 564 | } 565 | if count == 0 { 566 | return ErrorNotFound 567 | } 568 | 569 | return nil 570 | } 571 | 572 | func (queue *redisQueue) readyCount() (int64, error) { 573 | return queue.redisClient.LLen(queue.readyKey) 574 | } 575 | 576 | func (queue *redisQueue) unackedCount() (int64, error) { 577 | return queue.redisClient.LLen(queue.unackedKey) 578 | } 579 | 580 | func (queue *redisQueue) rejectedCount() (int64, error) { 581 | return queue.redisClient.LLen(queue.rejectedKey) 582 | } 583 | 584 | func (queue *redisQueue) getConsumers() ([]string, error) { 585 | return queue.redisClient.SMembers(queue.consumersKey) 586 | } 587 | 588 | // The caller of this method should be holding the queue.lock mutex 589 | func (queue *redisQueue) ensureConsuming() error { 590 | if queue.deliveryChan == nil { 591 | return ErrorNotConsuming 592 | } 593 | select { 594 | case <-queue.consumingStopped: 595 | return ErrorConsumingStopped 596 | default: 597 | return nil 598 | } 599 | } 600 | 601 | // jitteredDuration calculates and returns a value that is +/-10% the input duration 602 | func jitteredDuration(duration time.Duration) time.Duration { 603 | factor := 0.9 + rand.Float64()*0.2 // a jitter factor between 0.9 and 1.1 (+-10%) 604 | return time.Duration(float64(duration) * factor) 605 | } 606 | -------------------------------------------------------------------------------- /queue_test.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | import ( 4 | "fmt" 5 | "math" 6 | "net/http" 7 | "strconv" 8 | "sync/atomic" 9 | "testing" 10 | "time" 11 | 12 | "github.com/redis/go-redis/v9" 13 | "github.com/stretchr/testify/assert" 14 | "github.com/stretchr/testify/require" 15 | ) 16 | 17 | func TestConnections(t *testing.T) { 18 | redisOptions, closer := testRedis(t) 19 | defer closer() 20 | 21 | flushConn, err := OpenConnectionWithRedisOptions("conns-flush", redisOptions, nil) 22 | assert.NoError(t, err) 23 | assert.NoError(t, flushConn.stopHeartbeat()) 24 | assert.Equal(t, ErrorNotFound, flushConn.stopHeartbeat()) 25 | assert.NoError(t, flushConn.flushDb()) 26 | 27 | connection, err := OpenConnectionWithRedisOptions("conns-conn", redisOptions, nil) 28 | assert.NoError(t, err) 29 | require.NotNil(t, connection) 30 | _, err = NewCleaner(connection).Clean() 31 | require.NoError(t, err) 32 | 33 | connections, err := connection.getConnections() 34 | assert.NoError(t, err) 35 | assert.Len(t, connections, 1) // cleaner connection remains 36 | 37 | conn1, err := OpenConnectionWithRedisOptions("conns-conn1", redisOptions, nil) 38 | assert.NoError(t, err) 39 | connections, err = connection.getConnections() 40 | assert.NoError(t, err) 41 | assert.Len(t, connections, 2) 42 | assert.Equal(t, ErrorNotFound, connection.hijackConnection("nope").checkHeartbeat()) 43 | assert.NoError(t, conn1.checkHeartbeat()) 44 | conn2, err := OpenConnectionWithRedisOptions("conns-conn2", redisOptions, nil) 45 | assert.NoError(t, err) 46 | connections, err = connection.getConnections() 47 | assert.NoError(t, err) 48 | assert.Len(t, connections, 3) 49 | assert.NoError(t, conn1.checkHeartbeat()) 50 | assert.NoError(t, conn2.checkHeartbeat()) 51 | 52 | assert.Equal(t, ErrorNotFound, connection.hijackConnection("nope").stopHeartbeat()) 53 | assert.NoError(t, conn1.stopHeartbeat()) 54 | assert.Equal(t, ErrorNotFound, conn1.checkHeartbeat()) 55 | assert.NoError(t, conn2.checkHeartbeat()) 56 | connections, err = connection.getConnections() 57 | assert.NoError(t, err) 58 | assert.Len(t, connections, 3) 59 | 60 | assert.NoError(t, conn2.stopHeartbeat()) 61 | assert.Equal(t, ErrorNotFound, conn1.checkHeartbeat()) 62 | assert.Equal(t, ErrorNotFound, conn2.checkHeartbeat()) 63 | connections, err = connection.getConnections() 64 | assert.NoError(t, err) 65 | assert.Len(t, connections, 3) 66 | 67 | assert.NoError(t, connection.stopHeartbeat()) 68 | } 69 | 70 | func TestConnectionQueues(t *testing.T) { 71 | redisOptions, closer := testRedis(t) 72 | defer closer() 73 | 74 | connection, err := OpenConnectionWithRedisOptions("conn-q-conn", redisOptions, nil) 75 | assert.NoError(t, err) 76 | require.NotNil(t, connection) 77 | 78 | assert.NoError(t, connection.unlistAllQueues()) 79 | queues, err := connection.GetOpenQueues() 80 | assert.NoError(t, err) 81 | assert.Len(t, queues, 0) 82 | 83 | queue1, err := connection.OpenQueue("conn-q-q1") 84 | assert.NoError(t, err) 85 | require.NotNil(t, queue1) 86 | queues, err = connection.GetOpenQueues() 87 | assert.NoError(t, err) 88 | assert.Equal(t, []string{"conn-q-q1"}, queues) 89 | queues, err = connection.getConsumingQueues() 90 | assert.NoError(t, err) 91 | assert.Len(t, queues, 0) 92 | assert.NoError(t, queue1.StartConsuming(1, time.Millisecond)) 93 | queues, err = connection.getConsumingQueues() 94 | assert.NoError(t, err) 95 | assert.Equal(t, []string{"conn-q-q1"}, queues) 96 | 97 | queue2, err := connection.OpenQueue("conn-q-q2") 98 | assert.NoError(t, err) 99 | require.NotNil(t, queue2) 100 | queues, err = connection.GetOpenQueues() 101 | assert.NoError(t, err) 102 | assert.Len(t, queues, 2) 103 | queues, err = connection.getConsumingQueues() 104 | assert.NoError(t, err) 105 | assert.Len(t, queues, 1) 106 | assert.NoError(t, queue2.StartConsuming(1, time.Millisecond)) 107 | queues, err = connection.getConsumingQueues() 108 | assert.NoError(t, err) 109 | assert.Len(t, queues, 2) 110 | 111 | <-queue2.StopConsuming() 112 | assert.NoError(t, queue2.closeInStaleConnection()) 113 | queues, err = connection.GetOpenQueues() 114 | assert.NoError(t, err) 115 | assert.Len(t, queues, 2) 116 | queues, err = connection.getConsumingQueues() 117 | assert.NoError(t, err) 118 | assert.Equal(t, []string{"conn-q-q1"}, queues) 119 | 120 | <-queue1.StopConsuming() 121 | assert.NoError(t, queue1.closeInStaleConnection()) 122 | queues, err = connection.GetOpenQueues() 123 | assert.NoError(t, err) 124 | assert.Len(t, queues, 2) 125 | queues, err = connection.getConsumingQueues() 126 | assert.NoError(t, err) 127 | assert.Len(t, queues, 0) 128 | 129 | readyCount, rejectedCount, err := queue1.Destroy() 130 | assert.NoError(t, err) 131 | assert.Equal(t, int64(0), readyCount) 132 | assert.Equal(t, int64(0), rejectedCount) 133 | queues, err = connection.GetOpenQueues() 134 | assert.NoError(t, err) 135 | assert.Equal(t, []string{"conn-q-q2"}, queues) 136 | queues, err = connection.getConsumingQueues() 137 | assert.NoError(t, err) 138 | assert.Len(t, queues, 0) 139 | 140 | assert.NoError(t, connection.stopHeartbeat()) 141 | } 142 | 143 | func TestQueueCommon(t *testing.T) { 144 | redisOptions, closer := testRedis(t) 145 | defer closer() 146 | 147 | connection, err := OpenConnectionWithRedisOptions("queue-conn", redisOptions, nil) 148 | assert.NoError(t, err) 149 | require.NotNil(t, connection) 150 | 151 | queue, err := connection.OpenQueue("queue-q") 152 | assert.NoError(t, err) 153 | require.NotNil(t, queue) 154 | _, err = queue.PurgeReady() 155 | assert.NoError(t, err) 156 | eventuallyReady(t, queue, 0) 157 | assert.NoError(t, queue.Publish("queue-d1")) 158 | eventuallyReady(t, queue, 1) 159 | assert.NoError(t, queue.Publish("queue-d2")) 160 | eventuallyReady(t, queue, 2) 161 | count, err := queue.PurgeReady() 162 | assert.Equal(t, int64(2), count) 163 | eventuallyReady(t, queue, 0) 164 | count, err = queue.PurgeReady() 165 | assert.Equal(t, int64(0), count) 166 | 167 | queues, err := connection.getConsumingQueues() 168 | assert.NoError(t, err) 169 | assert.Len(t, queues, 0) 170 | assert.NoError(t, queue.StartConsuming(10, time.Millisecond)) 171 | assert.Equal(t, ErrorAlreadyConsuming, queue.StartConsuming(10, time.Millisecond)) 172 | cons1name, err := queue.AddConsumer("queue-cons1", NewTestConsumer("queue-A")) 173 | assert.NoError(t, err) 174 | time.Sleep(time.Millisecond) 175 | queues, err = connection.getConsumingQueues() 176 | assert.NoError(t, err) 177 | assert.Len(t, queues, 1) 178 | consumers, err := queue.getConsumers() 179 | assert.NoError(t, err) 180 | assert.Equal(t, []string{cons1name}, consumers) 181 | _, err = queue.AddConsumer("queue-cons2", NewTestConsumer("queue-B")) 182 | assert.NoError(t, err) 183 | consumers, err = queue.getConsumers() 184 | assert.NoError(t, err) 185 | assert.Len(t, consumers, 2) 186 | 187 | <-queue.StopConsuming() 188 | assert.NoError(t, connection.stopHeartbeat()) 189 | } 190 | 191 | func TestConsumerCommon(t *testing.T) { 192 | redisOptions, closer := testRedis(t) 193 | defer closer() 194 | 195 | // Note that we're using OpenClusterConnection with redis.NewClient (not redis.NewClusterClient). 196 | // This is just like using OpenConnection, but just using the Redis hash tags {} instead of []. 197 | // This is possible, but not really an expected use case. 198 | connection, err := OpenClusterConnection("cons-conn", redis.NewClient(redisOptions), nil) 199 | assert.NoError(t, err) 200 | require.NotNil(t, connection) 201 | 202 | queue1, err := connection.OpenQueue("cons-q") 203 | assert.NoError(t, err) 204 | require.NotNil(t, queue1) 205 | _, err = queue1.PurgeReady() 206 | assert.NoError(t, err) 207 | 208 | consumer := NewTestConsumer("cons-A") 209 | consumer.AutoAck = false 210 | assert.NoError(t, queue1.StartConsuming(10, time.Millisecond)) 211 | _, err = queue1.AddConsumer("cons-cons", consumer) 212 | assert.NoError(t, err) 213 | assert.Nil(t, consumer.Last()) 214 | 215 | assert.NoError(t, queue1.Publish(PayloadWithHeader("cons-d1", http.Header{"foo": []string{"bar1"}}))) 216 | eventuallyReady(t, queue1, 0) 217 | eventuallyUnacked(t, queue1, 1) 218 | require.NotNil(t, consumer.Last()) 219 | assert.Equal(t, "cons-d1", consumer.Last().Payload()) 220 | assert.Equal(t, http.Header{"foo": []string{"bar1"}}, consumer.Last().(WithHeader).Header()) 221 | 222 | assert.NoError(t, queue1.Publish(PayloadWithHeader("cons-d2", http.Header{"foo": []string{"bar2"}}))) 223 | eventuallyReady(t, queue1, 0) 224 | eventuallyUnacked(t, queue1, 2) 225 | assert.Equal(t, "cons-d2", consumer.Last().Payload()) 226 | assert.Equal(t, http.Header{"foo": []string{"bar2"}}, consumer.Last().(WithHeader).Header()) 227 | 228 | assert.Regexp(t, // using {queue} 229 | `\[cons-d2 rmq::connection::cons-conn-\w{6}::queue::\{cons-q\}::unacked\]`, 230 | fmt.Sprintf("%s", consumer.Last()), 231 | ) 232 | 233 | assert.NoError(t, consumer.Deliveries()[0].Ack()) 234 | eventuallyReady(t, queue1, 0) 235 | eventuallyUnacked(t, queue1, 1) 236 | 237 | assert.NoError(t, consumer.Deliveries()[1].Ack()) 238 | eventuallyReady(t, queue1, 0) 239 | eventuallyUnacked(t, queue1, 0) 240 | 241 | assert.Equal(t, ErrorNotFound, consumer.Deliveries()[0].Ack()) 242 | 243 | assert.NoError(t, queue1.Publish(PayloadWithHeader("cons-d3", http.Header{"foo": []string{"bar3"}}))) 244 | eventuallyReady(t, queue1, 0) 245 | eventuallyUnacked(t, queue1, 1) 246 | eventuallyRejected(t, queue1, 0) 247 | assert.Equal(t, "cons-d3", consumer.Last().Payload()) 248 | assert.Equal(t, http.Header{"foo": []string{"bar3"}}, consumer.Last().(WithHeader).Header()) 249 | assert.NoError(t, consumer.Last().Reject()) 250 | eventuallyReady(t, queue1, 0) 251 | eventuallyUnacked(t, queue1, 0) 252 | eventuallyRejected(t, queue1, 1) 253 | 254 | assert.NoError(t, queue1.Publish("cons-d4")) 255 | eventuallyReady(t, queue1, 0) 256 | eventuallyUnacked(t, queue1, 1) 257 | eventuallyRejected(t, queue1, 1) 258 | assert.Equal(t, "cons-d4", consumer.Last().Payload()) 259 | assert.NoError(t, consumer.Last().Reject()) 260 | eventuallyReady(t, queue1, 0) 261 | eventuallyUnacked(t, queue1, 0) 262 | eventuallyRejected(t, queue1, 2) 263 | count, err := queue1.PurgeRejected() 264 | assert.NoError(t, err) 265 | assert.Equal(t, int64(2), count) 266 | eventuallyRejected(t, queue1, 0) 267 | count, err = queue1.PurgeRejected() 268 | assert.NoError(t, err) 269 | assert.Equal(t, int64(0), count) 270 | 271 | queue2, err := connection.OpenQueue("cons-func-q") 272 | assert.NoError(t, err) 273 | assert.NoError(t, queue2.StartConsuming(10, time.Millisecond)) 274 | 275 | payloadChan := make(chan string, 1) 276 | payload := "cons-func-payload" 277 | 278 | _, err = queue2.AddConsumerFunc("cons-func", func(delivery Delivery) { 279 | err = delivery.Ack() 280 | assert.NoError(t, err) 281 | payloadChan <- delivery.Payload() 282 | }) 283 | assert.NoError(t, err) 284 | 285 | assert.NoError(t, queue2.Publish(payload)) 286 | eventuallyReady(t, queue2, 0) 287 | eventuallyUnacked(t, queue2, 0) 288 | assert.Equal(t, payload, <-payloadChan) 289 | 290 | <-queue1.StopConsuming() 291 | <-queue2.StopConsuming() 292 | assert.NoError(t, connection.stopHeartbeat()) 293 | } 294 | 295 | func TestMulti(t *testing.T) { 296 | redisOptions, closer := testRedis(t) 297 | defer closer() 298 | 299 | connection, err := OpenConnectionWithRedisOptions("multi-conn", redisOptions, nil) 300 | assert.NoError(t, err) 301 | queue, err := connection.OpenQueue("multi-q") 302 | assert.NoError(t, err) 303 | _, err = queue.PurgeReady() 304 | assert.NoError(t, err) 305 | 306 | for i := 0; i < 20; i++ { 307 | err := queue.Publish(fmt.Sprintf("multi-d%d", i)) 308 | assert.NoError(t, err) 309 | } 310 | eventuallyReady(t, queue, 20) 311 | eventuallyUnacked(t, queue, 0) 312 | 313 | assert.NoError(t, queue.StartConsuming(10, time.Millisecond)) 314 | 315 | // Assert that eventually the ready count drops to 10 and unacked rises to 10 316 | // TODO use the util funcs instead 317 | assert.Eventually(t, func() bool { 318 | readyCount, err := queue.readyCount() 319 | if err != nil { 320 | return false 321 | } 322 | unackedCount, err := queue.unackedCount() 323 | if err != nil { 324 | return false 325 | } 326 | return readyCount == 10 && unackedCount == 10 327 | }, 10*time.Second, 2*time.Millisecond) 328 | 329 | consumer := NewTestConsumer("multi-cons") 330 | consumer.AutoAck = false 331 | consumer.AutoFinish = false 332 | 333 | _, err = queue.AddConsumer("multi-cons", consumer) 334 | assert.NoError(t, err) 335 | 336 | // After we add the consumer - ready and unacked do not change 337 | eventuallyReady(t, queue, 10) 338 | eventuallyUnacked(t, queue, 10) 339 | 340 | require.NotNil(t, consumer.Last()) 341 | assert.NoError(t, consumer.Last().Ack()) 342 | // Assert that after the consumer acks a message the ready count drops to 9 and unacked remains at 10 343 | // TODO use util funcs instead 344 | assert.Eventually(t, func() bool { 345 | readyCount, err := queue.readyCount() 346 | if err != nil { 347 | return false 348 | } 349 | unackedCount, err := queue.unackedCount() 350 | if err != nil { 351 | return false 352 | } 353 | return readyCount == 9 && unackedCount == 10 354 | }, 10*time.Second, 2*time.Millisecond) 355 | 356 | consumer.Finish() 357 | // Assert that after the consumer finishes processing the first message ready and unacked do not change 358 | eventuallyReady(t, queue, 9) 359 | eventuallyUnacked(t, queue, 10) 360 | 361 | assert.NoError(t, consumer.Last().Ack()) 362 | // Assert that after the consumer acks a message the ready count drops to 8 and unacked remains at 10 363 | // TODO use the util funcs instead 364 | assert.Eventually(t, func() bool { 365 | readyCount, err := queue.readyCount() 366 | if err != nil { 367 | return false 368 | } 369 | unackedCount, err := queue.unackedCount() 370 | if err != nil { 371 | return false 372 | } 373 | return readyCount == 8 && unackedCount == 10 374 | }, 10*time.Second, 2*time.Millisecond) 375 | 376 | consumer.Finish() 377 | // Assert that after the consumer finishes processing the second message ready and unacked do not change 378 | eventuallyReady(t, queue, 8) 379 | eventuallyUnacked(t, queue, 10) 380 | 381 | // This prevents the consumer from blocking internally inside a call to Consume, which allows the queue to complete 382 | // the call to StopConsuming 383 | consumer.FinishAll() 384 | 385 | <-queue.StopConsuming() 386 | assert.NoError(t, connection.stopHeartbeat()) 387 | } 388 | 389 | func TestBatch(t *testing.T) { 390 | redisOptions, closer := testRedis(t) 391 | defer closer() 392 | 393 | connection, err := OpenConnectionWithRedisOptions("batch-conn", redisOptions, nil) 394 | assert.NoError(t, err) 395 | queue, err := connection.OpenQueue("batch-q") 396 | assert.NoError(t, err) 397 | _, err = queue.PurgeRejected() 398 | assert.NoError(t, err) 399 | _, err = queue.PurgeReady() 400 | assert.NoError(t, err) 401 | 402 | for i := 0; i < 5; i++ { 403 | err := queue.Publish(fmt.Sprintf("batch-d%d", i)) 404 | assert.NoError(t, err) 405 | } 406 | 407 | assert.NoError(t, queue.StartConsuming(10, time.Millisecond)) 408 | eventuallyUnacked(t, queue, 5) 409 | 410 | consumer := NewTestBatchConsumer() 411 | _, err = queue.AddBatchConsumer("batch-cons", 2, 50*time.Millisecond, consumer) 412 | assert.NoError(t, err) 413 | assert.Eventually(t, func() bool { 414 | return len(consumer.Last()) == 2 415 | }, 10*time.Second, 2*time.Millisecond) 416 | assert.Equal(t, "batch-d0", consumer.Last()[0].Payload()) 417 | assert.Equal(t, "batch-d1", consumer.Last()[1].Payload()) 418 | assert.NoError(t, consumer.Last()[0].Reject()) 419 | assert.NoError(t, consumer.Last()[1].Ack()) 420 | eventuallyUnacked(t, queue, 3) 421 | eventuallyRejected(t, queue, 1) 422 | 423 | consumer.Finish() 424 | assert.Eventually(t, func() bool { 425 | return len(consumer.Last()) == 2 426 | }, 10*time.Second, 2*time.Millisecond) 427 | assert.Equal(t, "batch-d2", consumer.Last()[0].Payload()) 428 | assert.Equal(t, "batch-d3", consumer.Last()[1].Payload()) 429 | assert.NoError(t, consumer.Last()[0].Reject()) 430 | assert.NoError(t, consumer.Last()[1].Ack()) 431 | eventuallyUnacked(t, queue, 1) 432 | eventuallyRejected(t, queue, 2) 433 | 434 | consumer.Finish() 435 | // Last Batch is cleared out 436 | assert.Len(t, consumer.Last(), 0) 437 | eventuallyUnacked(t, queue, 1) 438 | eventuallyRejected(t, queue, 2) 439 | 440 | // After a pause the batch consumer will pull down another batch 441 | assert.Eventually(t, func() bool { 442 | return len(consumer.Last()) == 1 443 | }, 10*time.Second, 2*time.Millisecond) 444 | assert.Equal(t, "batch-d4", consumer.Last()[0].Payload()) 445 | assert.NoError(t, consumer.Last()[0].Reject()) 446 | eventuallyUnacked(t, queue, 0) 447 | eventuallyRejected(t, queue, 3) 448 | 449 | for i := 0; i < 5; i++ { 450 | err := queue.Publish(fmt.Sprintf("batch-d%d", i)) 451 | assert.NoError(t, err) 452 | } 453 | _, err = queue.AddBatchConsumerFunc("batch-cons-func", 2, 50*time.Millisecond, func(batch Deliveries) { 454 | errMap := batch.Ack() 455 | assert.Empty(t, errMap) 456 | }) 457 | assert.NoError(t, err) 458 | } 459 | 460 | func TestReturnRejected(t *testing.T) { 461 | redisOptions, closer := testRedis(t) 462 | defer closer() 463 | 464 | connection, err := OpenConnectionWithRedisOptions("return-conn", redisOptions, nil) 465 | assert.NoError(t, err) 466 | queue, err := connection.OpenQueue("return-q") 467 | assert.NoError(t, err) 468 | _, err = queue.PurgeReady() 469 | assert.NoError(t, err) 470 | 471 | for i := 0; i < 6; i++ { 472 | err := queue.Publish(fmt.Sprintf("return-d%d", i)) 473 | assert.NoError(t, err) 474 | } 475 | 476 | eventuallyReady(t, queue, 6) 477 | eventuallyUnacked(t, queue, 0) 478 | eventuallyRejected(t, queue, 0) 479 | 480 | assert.NoError(t, queue.StartConsuming(10, time.Millisecond)) 481 | eventuallyReady(t, queue, 0) 482 | eventuallyUnacked(t, queue, 6) 483 | eventuallyRejected(t, queue, 0) 484 | 485 | consumer := NewTestConsumer("return-cons") 486 | consumer.AutoAck = false 487 | _, err = queue.AddConsumer("cons", consumer) 488 | assert.NoError(t, err) 489 | eventuallyReady(t, queue, 0) 490 | eventuallyUnacked(t, queue, 6) 491 | eventuallyRejected(t, queue, 0) 492 | 493 | assert.Len(t, consumer.Deliveries(), 6) 494 | assert.NoError(t, consumer.Deliveries()[0].Reject()) 495 | assert.NoError(t, consumer.Deliveries()[1].Ack()) 496 | assert.NoError(t, consumer.Deliveries()[2].Reject()) 497 | assert.NoError(t, consumer.Deliveries()[3].Reject()) 498 | // delivery 4 still open 499 | assert.NoError(t, consumer.Deliveries()[5].Reject()) 500 | 501 | eventuallyReady(t, queue, 0) 502 | eventuallyUnacked(t, queue, 1) // delivery 4 503 | eventuallyRejected(t, queue, 4) // delivery 0, 2, 3, 5 504 | 505 | <-queue.StopConsuming() 506 | 507 | n, err := queue.ReturnRejected(2) 508 | assert.NoError(t, err) 509 | assert.Equal(t, int64(2), n) 510 | eventuallyReady(t, queue, 2) 511 | eventuallyUnacked(t, queue, 1) // delivery 4 512 | eventuallyRejected(t, queue, 2) // delivery 3, 5 513 | 514 | n, err = queue.ReturnRejected(math.MaxInt64) 515 | assert.NoError(t, err) 516 | assert.Equal(t, int64(2), n) 517 | eventuallyReady(t, queue, 4) 518 | eventuallyUnacked(t, queue, 1) // delivery 4 519 | eventuallyRejected(t, queue, 0) 520 | } 521 | 522 | func TestRejectFaultyMessages(t *testing.T) { 523 | redisOptions, closer := testRedis(t) 524 | defer closer() 525 | 526 | connection, err := OpenConnectionWithRedisOptions("faulty-conn", redisOptions, nil) 527 | require.NoError(t, err) 528 | queue, err := connection.OpenQueue("faulty-q") 529 | require.NoError(t, err) 530 | _, err = queue.PurgeReady() 531 | require.NoError(t, err) 532 | 533 | for i := 0; i < 6; i++ { 534 | // if there is no line separator after the header in the message, 535 | // it will lead to an error and the message will be rejected 536 | err := queue.Publish(fmt.Sprintf("%sreturn-d%d", jsonHeaderSignature, i)) 537 | require.NoError(t, err) 538 | } 539 | 540 | eventuallyReady(t, queue, 6) 541 | eventuallyUnacked(t, queue, 0) 542 | eventuallyRejected(t, queue, 0) 543 | 544 | require.NoError(t, queue.StartConsuming(10, time.Millisecond)) 545 | eventuallyReady(t, queue, 0) 546 | eventuallyUnacked(t, queue, 0) 547 | eventuallyRejected(t, queue, 6) 548 | 549 | consumer := NewTestConsumer("faulty-cons") 550 | consumer.AutoAck = false 551 | _, err = queue.AddConsumer("cons", consumer) 552 | require.NoError(t, err) 553 | eventuallyReady(t, queue, 0) 554 | eventuallyUnacked(t, queue, 0) 555 | eventuallyRejected(t, queue, 6) 556 | 557 | require.Len(t, consumer.Deliveries(), 0) 558 | 559 | <-queue.StopConsuming() 560 | } 561 | 562 | func TestPushQueue(t *testing.T) { 563 | redisOptions, closer := testRedis(t) 564 | defer closer() 565 | 566 | connection, err := OpenConnectionWithRedisOptions("push", redisOptions, nil) 567 | assert.NoError(t, err) 568 | queue1, err := connection.OpenQueue("queue1") 569 | assert.NoError(t, err) 570 | queue2, err := connection.OpenQueue("queue2") 571 | assert.NoError(t, err) 572 | queue1.SetPushQueue(queue2) 573 | assert.Equal(t, queue2.(*redisQueue).readyKey, queue1.(*redisQueue).pushKey) 574 | 575 | consumer1 := NewTestConsumer("push-cons") 576 | consumer1.AutoAck = false 577 | consumer1.AutoFinish = false 578 | assert.NoError(t, queue1.StartConsuming(10, time.Millisecond)) 579 | _, err = queue1.AddConsumer("push-cons", consumer1) 580 | assert.NoError(t, err) 581 | 582 | consumer2 := NewTestConsumer("push-cons") 583 | consumer2.AutoAck = false 584 | consumer2.AutoFinish = false 585 | assert.NoError(t, queue2.StartConsuming(10, time.Millisecond)) 586 | _, err = queue2.AddConsumer("push-cons", consumer2) 587 | assert.NoError(t, err) 588 | 589 | assert.NoError(t, queue1.Publish("d1")) 590 | eventuallyUnacked(t, queue1, 1) 591 | require.Len(t, consumer1.Deliveries(), 1) 592 | 593 | assert.NoError(t, consumer1.Last().Push()) 594 | eventuallyUnacked(t, queue1, 0) 595 | eventuallyUnacked(t, queue2, 1) 596 | require.Len(t, consumer2.Deliveries(), 1) 597 | 598 | assert.NoError(t, consumer2.Last().Push()) 599 | eventuallyRejected(t, queue2, 1) 600 | } 601 | 602 | func TestStopConsuming_Consumer(t *testing.T) { 603 | redisOptions, closer := testRedis(t) 604 | defer closer() 605 | 606 | connection, err := OpenConnectionWithRedisOptions("consume", redisOptions, nil) 607 | assert.NoError(t, err) 608 | queue, err := connection.OpenQueue("consume-q") 609 | assert.NoError(t, err) 610 | _, err = queue.PurgeReady() 611 | assert.NoError(t, err) 612 | 613 | deliveryCount := int64(30) 614 | 615 | for i := int64(0); i < deliveryCount; i++ { 616 | err := queue.Publish("d" + strconv.FormatInt(i, 10)) 617 | assert.NoError(t, err) 618 | } 619 | 620 | assert.NoError(t, queue.StartConsuming(20, time.Millisecond)) 621 | 622 | var consumers []*TestConsumer 623 | for i := 0; i < 10; i++ { 624 | consumer := NewTestConsumer("c" + strconv.Itoa(i)) 625 | consumers = append(consumers, consumer) 626 | _, err = queue.AddConsumer("consume", consumer) 627 | assert.NoError(t, err) 628 | } 629 | 630 | finishedChan := queue.StopConsuming() 631 | require.NotNil(t, finishedChan) 632 | <-finishedChan // wait for stopping to finish 633 | 634 | var consumedCount int64 635 | for i := 0; i < 10; i++ { 636 | consumedCount += int64(len(consumers[i].Deliveries())) 637 | } 638 | 639 | // make sure all deliveries are either ready, unacked or consumed (acked) 640 | assert.Eventually(t, func() bool { 641 | readyCount, err := queue.readyCount() 642 | if err != nil { 643 | return false 644 | } 645 | unackedCount, err := queue.unackedCount() 646 | if err != nil { 647 | return false 648 | } 649 | return readyCount+unackedCount+consumedCount == deliveryCount 650 | }, 10*time.Second, 2*time.Millisecond) 651 | 652 | assert.NoError(t, connection.stopHeartbeat()) 653 | } 654 | 655 | func TestStopConsuming_BatchConsumer(t *testing.T) { 656 | redisOptions, closer := testRedis(t) 657 | defer closer() 658 | 659 | connection, err := OpenConnectionWithRedisOptions("batchConsume", redisOptions, nil) 660 | assert.NoError(t, err) 661 | queue, err := connection.OpenQueue("batchConsume-q") 662 | assert.NoError(t, err) 663 | _, err = queue.PurgeReady() 664 | assert.NoError(t, err) 665 | 666 | deliveryCount := int64(50) 667 | 668 | for i := int64(0); i < deliveryCount; i++ { 669 | err := queue.Publish("d" + strconv.FormatInt(i, 10)) 670 | assert.NoError(t, err) 671 | } 672 | 673 | assert.NoError(t, queue.StartConsuming(20, time.Millisecond)) 674 | 675 | var consumers []*TestBatchConsumer 676 | for i := 0; i < 10; i++ { 677 | consumer := NewTestBatchConsumer() 678 | consumer.AutoFinish = true 679 | consumers = append(consumers, consumer) 680 | _, err = queue.AddBatchConsumer("consume", 5, time.Second, consumer) 681 | assert.NoError(t, err) 682 | } 683 | 684 | finishedChan := queue.StopConsuming() 685 | require.NotNil(t, finishedChan) 686 | <-finishedChan // wait for stopping to finish 687 | 688 | var consumedCount int64 689 | for i := 0; i < 10; i++ { 690 | consumedCount += consumers[i].Consumed() 691 | } 692 | 693 | // make sure all deliveries are either ready, unacked or consumed (acked) 694 | assert.Eventually(t, func() bool { 695 | readyCount, err := queue.readyCount() 696 | if err != nil { 697 | return false 698 | } 699 | unackedCount, err := queue.unackedCount() 700 | if err != nil { 701 | return false 702 | } 703 | return readyCount+unackedCount+consumedCount == deliveryCount 704 | }, 10*time.Second, 2*time.Millisecond) 705 | 706 | assert.NoError(t, connection.stopHeartbeat()) 707 | } 708 | 709 | func TestConnection_StopAllConsuming_CantOpenQueue(t *testing.T) { 710 | redisOptions, closer := testRedis(t) 711 | defer closer() 712 | 713 | connection, err := OpenConnectionWithRedisOptions("consume", redisOptions, nil) 714 | assert.NoError(t, err) 715 | 716 | finishedChan := connection.StopAllConsuming() 717 | require.NotNil(t, finishedChan) 718 | <-finishedChan // wait for stopping to finish 719 | 720 | queue, err := connection.OpenQueue("consume-q") 721 | require.Nil(t, queue) 722 | require.Equal(t, ErrorConsumingStopped, err) 723 | } 724 | 725 | func TestConnection_StopAllConsuming_CantStartConsuming(t *testing.T) { 726 | redisOptions, closer := testRedis(t) 727 | defer closer() 728 | 729 | connection, err := OpenConnectionWithRedisOptions("consume", redisOptions, nil) 730 | assert.NoError(t, err) 731 | queue, err := connection.OpenQueue("consume-q") 732 | assert.NoError(t, err) 733 | _, err = queue.PurgeReady() 734 | assert.NoError(t, err) 735 | 736 | finishedChan := connection.StopAllConsuming() 737 | require.NotNil(t, finishedChan) 738 | <-finishedChan // wait for stopping to finish 739 | 740 | err = queue.StartConsuming(20, time.Millisecond) 741 | require.Equal(t, ErrorConsumingStopped, err) 742 | } 743 | 744 | func TestQueue_StopConsuming_CantStartConsuming(t *testing.T) { 745 | redisOptions, closer := testRedis(t) 746 | defer closer() 747 | 748 | connection, err := OpenConnectionWithRedisOptions("consume", redisOptions, nil) 749 | assert.NoError(t, err) 750 | queue, err := connection.OpenQueue("consume-q") 751 | assert.NoError(t, err) 752 | _, err = queue.PurgeReady() 753 | assert.NoError(t, err) 754 | 755 | finishedChan := queue.StopConsuming() 756 | require.NotNil(t, finishedChan) 757 | <-finishedChan // wait for stopping to finish 758 | 759 | err = queue.StartConsuming(20, time.Millisecond) 760 | require.Equal(t, ErrorConsumingStopped, err) 761 | } 762 | 763 | func TestConnection_StopAllConsuming_CantAddConsumer(t *testing.T) { 764 | redisOptions, closer := testRedis(t) 765 | defer closer() 766 | 767 | connection, err := OpenConnectionWithRedisOptions("consume", redisOptions, nil) 768 | assert.NoError(t, err) 769 | queue, err := connection.OpenQueue("consume-q") 770 | assert.NoError(t, err) 771 | _, err = queue.PurgeReady() 772 | assert.NoError(t, err) 773 | 774 | assert.NoError(t, queue.StartConsuming(20, time.Millisecond)) 775 | 776 | finishedChan := connection.StopAllConsuming() 777 | require.NotNil(t, finishedChan) 778 | <-finishedChan // wait for stopping to finish 779 | 780 | _, err = queue.AddConsumer("late-consume", NewTestConsumer("late-consumer")) 781 | require.Equal(t, ErrorConsumingStopped, err) 782 | } 783 | 784 | func TestQueue_StopConsuming_CantAddConsumer(t *testing.T) { 785 | redisOptions, closer := testRedis(t) 786 | defer closer() 787 | 788 | connection, err := OpenConnectionWithRedisOptions("consume", redisOptions, nil) 789 | assert.NoError(t, err) 790 | queue, err := connection.OpenQueue("consume-q") 791 | assert.NoError(t, err) 792 | _, err = queue.PurgeReady() 793 | assert.NoError(t, err) 794 | 795 | assert.NoError(t, queue.StartConsuming(20, time.Millisecond)) 796 | 797 | finishedChan := queue.StopConsuming() 798 | require.NotNil(t, finishedChan) 799 | <-finishedChan // wait for stopping to finish 800 | 801 | _, err = queue.AddConsumer("late-consume", NewTestConsumer("late-consumer")) 802 | require.Equal(t, ErrorConsumingStopped, err) 803 | } 804 | 805 | func BenchmarkQueue(b *testing.B) { 806 | redisOptions, closer := testRedis(b) 807 | defer closer() 808 | 809 | // open queue 810 | connection, err := OpenConnectionWithRedisOptions("bench-conn", redisOptions, nil) 811 | assert.NoError(b, err) 812 | queueName := fmt.Sprintf("bench-q%d", b.N) 813 | queue, err := connection.OpenQueue(queueName) 814 | assert.NoError(b, err) 815 | assert.NoError(b, queue.StartConsuming(10, time.Millisecond)) 816 | 817 | // add some consumers 818 | numConsumers := 10 819 | var consumers []*TestConsumer 820 | for i := 0; i < numConsumers; i++ { 821 | consumer := NewTestConsumer("bench-A") 822 | // consumer.SleepDuration = time.Microsecond 823 | consumers = append(consumers, consumer) 824 | _, err = queue.AddConsumer("bench-cons", consumer) 825 | assert.NoError(b, err) 826 | } 827 | 828 | b.ResetTimer() 829 | 830 | // publish deliveries 831 | for i := 0; i < b.N; i++ { 832 | err := queue.Publish("bench-d") 833 | assert.NoError(b, err) 834 | } 835 | 836 | maxWaits := 10000 837 | // wait until all are consumed 838 | for { 839 | ready, err := queue.readyCount() 840 | assert.NoError(b, err) 841 | unacked, err := queue.unackedCount() 842 | assert.NoError(b, err) 843 | if ready == 0 && unacked == 0 { 844 | break 845 | } 846 | maxWaits-- 847 | 848 | if maxWaits == 0 { 849 | b.Fatalf("timeout waiting for all messages to be consumed: %d messages %d ready %d unacked\n", 850 | b.N, ready, unacked) 851 | } 852 | 853 | time.Sleep(time.Millisecond) 854 | } 855 | 856 | time.Sleep(time.Millisecond) 857 | 858 | sum := 0 859 | for _, consumer := range consumers { 860 | sum += len(consumer.Deliveries()) 861 | } 862 | 863 | assert.Equal(b, b.N, sum) 864 | assert.NoError(b, connection.stopHeartbeat()) 865 | } 866 | 867 | func Test_jitteredDuration(t *testing.T) { 868 | dur := 100 * time.Millisecond 869 | for i := 0; i < 5000; i++ { 870 | d := jitteredDuration(dur) 871 | assert.LessOrEqual(t, int64(90*time.Millisecond), int64(d)) 872 | assert.GreaterOrEqual(t, int64(110*time.Millisecond), int64(d)) 873 | } 874 | } 875 | 876 | func TestQueueDrain(t *testing.T) { 877 | redisOptions, closer := testRedis(t) 878 | defer closer() 879 | 880 | connection, err := OpenConnectionWithRedisOptions("drain-connection", redisOptions, nil) 881 | assert.NoError(t, err) 882 | require.NotNil(t, connection) 883 | 884 | queue, err := connection.OpenQueue("drain-queue") 885 | assert.NoError(t, err) 886 | 887 | for x := 0; x < 100; x++ { 888 | queue.Publish(fmt.Sprintf("%d", x)) 889 | } 890 | 891 | eventuallyReady(t, queue, 100) 892 | 893 | for x := 1; x <= 10; x++ { 894 | values, err := queue.Drain(10) 895 | assert.NoError(t, err) 896 | assert.Equal(t, 10, len(values)) 897 | eventuallyReady(t, queue, int64(100-x*10)) 898 | } 899 | } 900 | 901 | func TestQueueHeader(t *testing.T) { 902 | redisOptions, closer := testRedis(t) 903 | defer closer() 904 | 905 | connection, err := OpenConnectionWithRedisOptions("queue-h-conn", redisOptions, nil) 906 | assert.NoError(t, err) 907 | require.NotNil(t, connection) 908 | 909 | queue, err := connection.OpenQueue("queue-h") 910 | assert.NoError(t, err) 911 | require.NotNil(t, queue) 912 | _, err = queue.PurgeReady() 913 | assert.NoError(t, err) 914 | 915 | assert.NoError(t, queue.StartConsuming(2, time.Millisecond)) 916 | time.Sleep(time.Millisecond) 917 | assert.NoError(t, err) 918 | 919 | consumed := int64(0) 920 | cons := ConsumerFunc(func(delivery Delivery) { 921 | atomic.AddInt64(&consumed, 1) 922 | 923 | h, ok := delivery.(WithHeader) 924 | assert.True(t, ok) 925 | 926 | switch delivery.Payload() { 927 | case "queue-d1": 928 | assert.Empty(t, h.Header()) 929 | case "queue-d2": 930 | require.NotNil(t, h.Header()) 931 | assert.Equal(t, "d2", h.Header().Get("X-Foo")) 932 | default: 933 | assert.Failf(t, "unexpected payload: %q", delivery.Payload()) 934 | } 935 | 936 | }) 937 | 938 | _, err = queue.AddConsumer("queue-cons1", cons) 939 | assert.NoError(t, err) 940 | 941 | assert.NoError(t, queue.Publish("queue-d1")) 942 | assert.NoError(t, queue.Publish(PayloadWithHeader("queue-d2", http.Header{"X-Foo": []string{"d2"}}))) 943 | 944 | assert.Eventually(t, func() bool { 945 | return atomic.LoadInt64(&consumed) == 2 946 | }, 10*time.Second, time.Millisecond) 947 | 948 | <-queue.StopConsuming() 949 | assert.NoError(t, connection.stopHeartbeat()) 950 | } 951 | -------------------------------------------------------------------------------- /rand.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | import ( 4 | "math/rand" 5 | "time" 6 | ) 7 | 8 | var ( 9 | source = rand.NewSource(time.Now().UnixNano()) 10 | random = rand.New(source) 11 | ) 12 | 13 | // standard characters used by uniuri 14 | const letterBytes = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" 15 | 16 | // from https://stackoverflow.com/a/31832326 17 | func RandomString(n int) string { 18 | b := make([]byte, n) 19 | for i := range b { 20 | b[i] = letterBytes[random.Intn(len(letterBytes))] 21 | } 22 | return string(b) 23 | } 24 | -------------------------------------------------------------------------------- /redis_client.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | import "time" 4 | 5 | type RedisClient interface { 6 | // simple keys 7 | Set(key string, value string, expiration time.Duration) error 8 | Del(key string) (affected int64, err error) 9 | TTL(key string) (ttl time.Duration, err error) 10 | 11 | // lists 12 | LPush(key string, value ...string) (total int64, err error) 13 | LLen(key string) (affected int64, err error) 14 | LRem(key string, count int64, value string) (affected int64, err error) 15 | LTrim(key string, start, stop int64) error 16 | RPopLPush(source, destination string) (value string, err error) 17 | RPop(key string) (value string, err error) 18 | 19 | // sets 20 | SAdd(key, value string) (total int64, err error) 21 | SMembers(key string) (members []string, err error) 22 | SRem(key, value string) (affected int64, err error) 23 | 24 | // special 25 | FlushDb() error 26 | } 27 | -------------------------------------------------------------------------------- /redis_keys.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | import "strings" 4 | 5 | const ( 6 | connectionsKey = "rmq::connections" // Set of connection names 7 | connectionHeartbeatTemplate = "rmq::connection::{connection}::heartbeat" // expires after {connection} died 8 | connectionQueuesTemplate = "rmq::connection::{connection}::queues" // Set of queues consumers of {connection} are consuming 9 | connectionQueueConsumersBaseTemplate = "rmq::connection::{connection}::queue::[{queue}]::consumers" // Set of all consumers from {connection} consuming from {queue} 10 | connectionQueueUnackedBaseTemplate = "rmq::connection::{connection}::queue::[{queue}]::unacked" // List of deliveries consumers of {connection} are currently consuming 11 | 12 | queuesKey = "rmq::queues" // Set of all open queues 13 | queueReadyBaseTemplate = "rmq::queue::[{queue}]::ready" // List of deliveries in that {queue} (right is first and oldest, left is last and youngest) 14 | queueRejectedBaseTemplate = "rmq::queue::[{queue}]::rejected" // List of rejected deliveries from that {queue} 15 | 16 | phConnection = "{connection}" // connection name 17 | phQueue = "{queue}" // queue name 18 | phConsumer = "{consumer}" // consumer name (consisting of tag and token) 19 | ) 20 | 21 | func getTemplate(baseTemplate string, useRedisHashTags bool) string { 22 | if !useRedisHashTags { 23 | return baseTemplate 24 | } 25 | 26 | return strings.Replace(baseTemplate, "[{queue}]", "{{queue}}", 1) 27 | } 28 | -------------------------------------------------------------------------------- /redis_wrapper.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/redis/go-redis/v9" 8 | ) 9 | 10 | type RedisWrapper struct { 11 | rawClient redis.Cmdable 12 | } 13 | 14 | func (wrapper RedisWrapper) Set(key string, value string, expiration time.Duration) error { 15 | // NOTE: using Err() here because Result() string is always "OK" 16 | return wrapper.rawClient.Set(context.TODO(), key, value, expiration).Err() 17 | } 18 | 19 | func (wrapper RedisWrapper) Del(key string) (affected int64, err error) { 20 | return wrapper.rawClient.Del(context.TODO(), key).Result() 21 | } 22 | 23 | func (wrapper RedisWrapper) TTL(key string) (ttl time.Duration, err error) { 24 | return wrapper.rawClient.TTL(context.TODO(), key).Result() 25 | } 26 | 27 | func (wrapper RedisWrapper) LPush(key string, value ...string) (total int64, err error) { 28 | return wrapper.rawClient.LPush(context.TODO(), key, value).Result() 29 | } 30 | 31 | func (wrapper RedisWrapper) LLen(key string) (affected int64, err error) { 32 | return wrapper.rawClient.LLen(context.TODO(), key).Result() 33 | } 34 | 35 | func (wrapper RedisWrapper) LRem(key string, count int64, value string) (affected int64, err error) { 36 | return wrapper.rawClient.LRem(context.TODO(), key, int64(count), value).Result() 37 | } 38 | 39 | func (wrapper RedisWrapper) LTrim(key string, start, stop int64) error { 40 | // NOTE: using Err() here because Result() string is always "OK" 41 | return wrapper.rawClient.LTrim(context.TODO(), key, int64(start), int64(stop)).Err() 42 | } 43 | 44 | func (wrapper RedisWrapper) RPop(key string) (value string, err error) { 45 | return wrapper.rawClient.RPop(context.TODO(), key).Result() 46 | } 47 | 48 | func (wrapper RedisWrapper) RPopLPush(source, destination string) (value string, err error) { 49 | value, err = wrapper.rawClient.RPopLPush(context.TODO(), source, destination).Result() 50 | // println("RPopLPush", source, destination, value, err) 51 | switch err { 52 | case nil: 53 | return value, nil 54 | case redis.Nil: 55 | return value, ErrorNotFound 56 | default: 57 | return value, err 58 | } 59 | } 60 | 61 | func (wrapper RedisWrapper) SAdd(key, value string) (total int64, err error) { 62 | return wrapper.rawClient.SAdd(context.TODO(), key, value).Result() 63 | } 64 | 65 | func (wrapper RedisWrapper) SMembers(key string) (members []string, err error) { 66 | return wrapper.rawClient.SMembers(context.TODO(), key).Result() 67 | } 68 | 69 | func (wrapper RedisWrapper) SRem(key, value string) (affected int64, err error) { 70 | return wrapper.rawClient.SRem(context.TODO(), key, value).Result() 71 | } 72 | 73 | func (wrapper RedisWrapper) FlushDb() error { 74 | // NOTE: using Err() here because Result() string is always "OK" 75 | return wrapper.rawClient.FlushDB(context.TODO()).Err() 76 | } 77 | -------------------------------------------------------------------------------- /state.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | //go:generate stringer -type=State 4 | 5 | type State int 6 | 7 | const ( 8 | Unacked State = iota 9 | Acked 10 | Rejected 11 | Pushed 12 | ) 13 | -------------------------------------------------------------------------------- /state_string.go: -------------------------------------------------------------------------------- 1 | // generated by stringer -type=State; DO NOT EDIT 2 | 3 | package rmq 4 | 5 | import "fmt" 6 | 7 | const _State_name = "UnackedAckedRejectedPushed" 8 | 9 | var _State_index = [...]uint8{0, 7, 12, 20, 26} 10 | 11 | func (i State) String() string { 12 | if i < 0 || i >= State(len(_State_index)-1) { 13 | return fmt.Sprintf("State(%d)", i) 14 | } 15 | return _State_name[_State_index[i]:_State_index[i+1]] 16 | } 17 | -------------------------------------------------------------------------------- /stats.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "sort" 7 | ) 8 | 9 | type ConnectionStat struct { 10 | active bool 11 | unackedCount int64 12 | consumers []string 13 | } 14 | 15 | func (stat ConnectionStat) String() string { 16 | return fmt.Sprintf("[unacked:%d consumers:%d]", 17 | stat.unackedCount, 18 | len(stat.consumers), 19 | ) 20 | } 21 | 22 | type ConnectionStats map[string]ConnectionStat 23 | 24 | type QueueStat struct { 25 | ReadyCount int64 `json:"ready"` 26 | RejectedCount int64 `json:"rejected"` 27 | connectionStats ConnectionStats 28 | } 29 | 30 | func NewQueueStat(readyCount, rejectedCount int64) QueueStat { 31 | return QueueStat{ 32 | ReadyCount: readyCount, 33 | RejectedCount: rejectedCount, 34 | connectionStats: ConnectionStats{}, 35 | } 36 | } 37 | 38 | func (stat QueueStat) String() string { 39 | return fmt.Sprintf("[ready:%d rejected:%d conn:%s", 40 | stat.ReadyCount, 41 | stat.RejectedCount, 42 | stat.connectionStats, 43 | ) 44 | } 45 | 46 | func (stat QueueStat) UnackedCount() int64 { 47 | unacked := int64(0) 48 | for _, connectionStat := range stat.connectionStats { 49 | unacked += connectionStat.unackedCount 50 | } 51 | return unacked 52 | } 53 | 54 | func (stat QueueStat) ConsumerCount() int64 { 55 | consumer := int64(0) 56 | for _, connectionStat := range stat.connectionStats { 57 | consumer += int64(len(connectionStat.consumers)) 58 | } 59 | return consumer 60 | } 61 | 62 | func (stat QueueStat) ConnectionCount() int64 { 63 | return int64(len(stat.connectionStats)) 64 | } 65 | 66 | type QueueStats map[string]QueueStat 67 | 68 | type Stats struct { 69 | QueueStats QueueStats `json:"queues"` 70 | otherConnections map[string]bool // non consuming connections, active or not 71 | } 72 | 73 | func NewStats() Stats { 74 | return Stats{ 75 | QueueStats: QueueStats{}, 76 | otherConnections: map[string]bool{}, 77 | } 78 | } 79 | 80 | func CollectStats(queueList []string, mainConnection Connection) (Stats, error) { 81 | stats := NewStats() 82 | for _, queueName := range queueList { 83 | queue := mainConnection.openQueue(queueName) 84 | readyCount, err := queue.readyCount() 85 | if err != nil { 86 | return stats, err 87 | } 88 | rejectedCount, err := queue.rejectedCount() 89 | if err != nil { 90 | return stats, err 91 | } 92 | stats.QueueStats[queueName] = NewQueueStat(readyCount, rejectedCount) 93 | } 94 | 95 | connectionNames, err := mainConnection.getConnections() 96 | if err != nil { 97 | return stats, err 98 | } 99 | 100 | for _, connectionName := range connectionNames { 101 | hijackedConnection := mainConnection.hijackConnection(connectionName) 102 | 103 | var connectionActive bool 104 | switch err := hijackedConnection.checkHeartbeat(); err { 105 | case nil: 106 | connectionActive = true 107 | case ErrorNotFound: 108 | connectionActive = false 109 | default: 110 | return stats, err 111 | } 112 | 113 | queueNames, err := hijackedConnection.getConsumingQueues() 114 | if err != nil { 115 | return stats, err 116 | } 117 | if len(queueNames) == 0 { 118 | stats.otherConnections[connectionName] = connectionActive 119 | continue 120 | } 121 | 122 | for _, queueName := range queueNames { 123 | queue := hijackedConnection.openQueue(queueName) 124 | consumers, err := queue.getConsumers() 125 | if err != nil { 126 | return stats, err 127 | } 128 | openQueueStat, ok := stats.QueueStats[queueName] 129 | if !ok { 130 | continue 131 | } 132 | unackedCount, err := queue.unackedCount() 133 | if err != nil { 134 | return stats, err 135 | } 136 | openQueueStat.connectionStats[connectionName] = ConnectionStat{ 137 | active: connectionActive, 138 | unackedCount: unackedCount, 139 | consumers: consumers, 140 | } 141 | } 142 | } 143 | 144 | return stats, nil 145 | } 146 | 147 | func (stats Stats) String() string { 148 | var buffer bytes.Buffer 149 | 150 | for queueName, queueStat := range stats.QueueStats { 151 | buffer.WriteString(fmt.Sprintf(" queue:%s ready:%d rejected:%d unacked:%d consumers:%d\n", 152 | queueName, queueStat.ReadyCount, queueStat.RejectedCount, queueStat.UnackedCount(), queueStat.ConsumerCount(), 153 | )) 154 | 155 | for connectionName, connectionStat := range queueStat.connectionStats { 156 | buffer.WriteString(fmt.Sprintf(" connection:%s unacked:%d consumers:%d active:%t\n", 157 | connectionName, connectionStat.unackedCount, len(connectionStat.consumers), connectionStat.active, 158 | )) 159 | } 160 | } 161 | 162 | for connectionName, active := range stats.otherConnections { 163 | buffer.WriteString(fmt.Sprintf(" connection:%s active:%t\n", 164 | connectionName, active, 165 | )) 166 | } 167 | 168 | return buffer.String() 169 | } 170 | 171 | func (stats Stats) GetHtml(layout, refresh string) string { 172 | buffer := bytes.NewBufferString("") 173 | 174 | if refresh != "" { 175 | buffer.WriteString(fmt.Sprintf(``, refresh)) 176 | } 177 | 178 | buffer.WriteString(``) 179 | buffer.WriteString(``, 187 | ) 188 | 189 | for _, queueName := range stats.sortedQueueNames() { 190 | queueStat := stats.QueueStats[queueName] 191 | connectionNames := queueStat.connectionStats.sortedNames() 192 | buffer.WriteString(fmt.Sprintf(``, 200 | queueName, queueStat.ReadyCount, queueStat.RejectedCount, "", len(connectionNames), queueStat.UnackedCount(), queueStat.ConsumerCount(), 201 | )) 202 | 203 | if layout != "condensed" { 204 | for _, connectionName := range connectionNames { 205 | connectionStat := queueStat.connectionStats[connectionName] 206 | buffer.WriteString(fmt.Sprintf(``, 214 | "", "", "", ActiveSign(connectionStat.active), connectionName, connectionStat.unackedCount, len(connectionStat.consumers), 215 | )) 216 | } 217 | } 218 | } 219 | 220 | if layout != "condensed" { 221 | buffer.WriteString(``) 222 | for _, connectionName := range stats.sortedConnectionNames() { 223 | active := stats.otherConnections[connectionName] 224 | buffer.WriteString(fmt.Sprintf(``, 232 | "", "", "", ActiveSign(active), connectionName, "", "", 233 | )) 234 | } 235 | } 236 | 237 | buffer.WriteString(`
` + 180 | `queue` + 181 | `ready` + 182 | `rejected` + 183 | `` + 184 | `connections` + 185 | `unacked` + 186 | `consumers
`+ 193 | `%s`+ 194 | `%d`+ 195 | `%d`+ 196 | `%s`+ 197 | `%d`+ 198 | `%d`+ 199 | `%d
`+ 207 | `%s`+ 208 | `%s`+ 209 | `%s`+ 210 | `%s`+ 211 | `%s`+ 212 | `%d`+ 213 | `%d
-----
`+ 225 | `%s`+ 226 | `%s`+ 227 | `%s`+ 228 | `%s`+ 229 | `%s`+ 230 | `%s`+ 231 | `%s
`) 238 | return buffer.String() 239 | } 240 | 241 | func (stats ConnectionStats) sortedNames() []string { 242 | var keys []string 243 | for key := range stats { 244 | keys = append(keys, key) 245 | } 246 | sort.Strings(keys) 247 | return keys 248 | } 249 | 250 | func (stats Stats) sortedQueueNames() []string { 251 | var keys []string 252 | for key := range stats.QueueStats { 253 | keys = append(keys, key) 254 | } 255 | sort.Strings(keys) 256 | return keys 257 | } 258 | 259 | func (stats Stats) sortedConnectionNames() []string { 260 | var keys []string 261 | for key := range stats.otherConnections { 262 | keys = append(keys, key) 263 | } 264 | sort.Strings(keys) 265 | return keys 266 | } 267 | 268 | func ActiveSign(active bool) string { 269 | if active { 270 | return "✓" 271 | } 272 | return "✗" 273 | } 274 | -------------------------------------------------------------------------------- /stats_test.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/stretchr/testify/assert" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestStats(t *testing.T) { 12 | redisOptions, closer := testRedis(t) 13 | defer closer() 14 | 15 | connection, err := OpenConnectionWithRedisOptions("stats-conn", redisOptions, nil) 16 | assert.NoError(t, err) 17 | _, err = NewCleaner(connection).Clean() 18 | require.NoError(t, err) 19 | 20 | conn1, err := OpenConnectionWithRedisOptions("stats-conn1", redisOptions, nil) 21 | assert.NoError(t, err) 22 | conn2, err := OpenConnectionWithRedisOptions("stats-conn2", redisOptions, nil) 23 | assert.NoError(t, err) 24 | q1, err := conn2.OpenQueue("stats-q1") 25 | assert.NoError(t, err) 26 | _, err = q1.PurgeReady() 27 | assert.NoError(t, err) 28 | assert.NoError(t, q1.Publish("stats-d1")) 29 | q2, err := conn2.OpenQueue("stats-q2") 30 | assert.NoError(t, err) 31 | _, err = q2.PurgeReady() 32 | assert.NoError(t, err) 33 | consumer := NewTestConsumer("hand-A") 34 | consumer.AutoAck = false 35 | assert.NoError(t, q2.StartConsuming(10, time.Millisecond)) 36 | _, err = q2.AddConsumer("stats-cons1", consumer) 37 | assert.NoError(t, err) 38 | assert.NoError(t, q2.Publish("stats-d2")) 39 | assert.NoError(t, q2.Publish("stats-d3")) 40 | assert.NoError(t, q2.Publish("stats-d4")) 41 | time.Sleep(2 * time.Millisecond) 42 | assert.NoError(t, consumer.Deliveries()[0].Ack()) 43 | assert.NoError(t, consumer.Deliveries()[1].Reject()) 44 | _, err = q2.AddConsumer("stats-cons2", NewTestConsumer("hand-B")) 45 | assert.NoError(t, err) 46 | 47 | queues, err := connection.GetOpenQueues() 48 | assert.NoError(t, err) 49 | stats, err := CollectStats(queues, connection) 50 | assert.NoError(t, err) 51 | // log.Printf("stats\n%s", stats) 52 | html := stats.GetHtml("", "") 53 | assert.Regexp(t, ".*queue.*ready.*connection.*unacked.*consumers.*q1.*1.*0.*0.*", html) 54 | assert.Regexp(t, ".*queue.*ready.*connection.*unacked.*consumers.*q2.*0.*1.*1.*2.*conn2.*1.*2.*", html) 55 | 56 | stats, err = CollectStats([]string{"stats-q1", "stats-q2"}, connection) 57 | assert.NoError(t, err) 58 | 59 | for key, _ := range stats.QueueStats { 60 | assert.Regexp(t, "stats.*", key) 61 | } 62 | /* 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 |
queuereadyrejectedconnectionunackedconsumers
stats-q20112
stats-conn2-vY5ZPz12
stats-q11000
q20000
q10000
71 | */ 72 | 73 | q2.StopConsuming() 74 | assert.NoError(t, connection.stopHeartbeat()) 75 | assert.NoError(t, conn1.stopHeartbeat()) 76 | assert.NoError(t, conn2.stopHeartbeat()) 77 | } 78 | -------------------------------------------------------------------------------- /test_batch_consumer.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | import "sync" 4 | 5 | type TestBatchConsumer struct { 6 | mu sync.Mutex 7 | // Deprecated: use Last() to avoid data races. 8 | LastBatch Deliveries 9 | // Deprecated use Consumed() to avoid data races. 10 | ConsumedCount int64 11 | AutoFinish bool 12 | 13 | finish chan int 14 | } 15 | 16 | func NewTestBatchConsumer() *TestBatchConsumer { 17 | return &TestBatchConsumer{ 18 | finish: make(chan int), 19 | } 20 | } 21 | 22 | func (consumer *TestBatchConsumer) Last() Deliveries { 23 | consumer.mu.Lock() 24 | defer consumer.mu.Unlock() 25 | 26 | return consumer.LastBatch 27 | } 28 | 29 | func (consumer *TestBatchConsumer) Consumed() int64 { 30 | consumer.mu.Lock() 31 | defer consumer.mu.Unlock() 32 | 33 | return consumer.ConsumedCount 34 | } 35 | 36 | func (consumer *TestBatchConsumer) Consume(batch Deliveries) { 37 | consumer.mu.Lock() 38 | consumer.LastBatch = batch 39 | consumer.ConsumedCount += int64(len(batch)) 40 | consumer.mu.Unlock() 41 | 42 | if consumer.AutoFinish { 43 | batch.Ack() 44 | } else { 45 | <-consumer.finish 46 | // log.Printf("TestBatchConsumer.Consume() finished") 47 | } 48 | } 49 | 50 | func (consumer *TestBatchConsumer) Finish() { 51 | // log.Printf("TestBatchConsumer.Finish()") 52 | consumer.mu.Lock() 53 | consumer.LastBatch = nil 54 | consumer.mu.Unlock() 55 | 56 | consumer.finish <- 1 57 | } 58 | -------------------------------------------------------------------------------- /test_connection.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "sync" 7 | ) 8 | 9 | var errorNotSupported = errors.New("not supported") 10 | 11 | type TestConnection struct { 12 | queues *sync.Map 13 | } 14 | 15 | func NewTestConnection() TestConnection { 16 | return TestConnection{ 17 | queues: &sync.Map{}, 18 | } 19 | } 20 | 21 | func (connection TestConnection) OpenQueue(name string) (Queue, error) { 22 | queue, _ := connection.queues.LoadOrStore(name, NewTestQueue(name)) 23 | return queue.(*TestQueue), nil 24 | } 25 | 26 | func (TestConnection) CollectStats([]string) (Stats, error) { panic(errorNotSupported) } 27 | func (TestConnection) GetOpenQueues() ([]string, error) { panic(errorNotSupported) } 28 | func (TestConnection) StopAllConsuming() <-chan struct{} { panic(errorNotSupported) } 29 | func (TestConnection) checkHeartbeat() error { panic(errorNotSupported) } 30 | func (TestConnection) getConnections() ([]string, error) { panic(errorNotSupported) } 31 | func (TestConnection) hijackConnection(string) Connection { panic(errorNotSupported) } 32 | func (TestConnection) closeStaleConnection() error { panic(errorNotSupported) } 33 | func (TestConnection) getConsumingQueues() ([]string, error) { panic(errorNotSupported) } 34 | func (TestConnection) unlistAllQueues() error { panic(errorNotSupported) } 35 | func (TestConnection) openQueue(string) Queue { panic(errorNotSupported) } 36 | func (TestConnection) stopHeartbeat() error { panic(errorNotSupported) } 37 | func (TestConnection) flushDb() error { panic(errorNotSupported) } 38 | 39 | // test helpers for test inspection and similar 40 | 41 | func (connection TestConnection) GetDeliveries(queueName string) []string { 42 | queue, ok := connection.queues.Load(queueName) 43 | if !ok { 44 | return []string{} 45 | } 46 | 47 | return queue.(*TestQueue).LastDeliveries 48 | } 49 | 50 | func (connection TestConnection) GetDelivery(queueName string, index int) string { 51 | queue, ok := connection.queues.Load(queueName) 52 | if !ok || index < 0 || index >= len(queue.(*TestQueue).LastDeliveries) { 53 | return fmt.Sprintf("rmq.TestConnection: delivery not found: %s[%d]", queueName, index) 54 | } 55 | 56 | return queue.(*TestQueue).LastDeliveries[index] 57 | } 58 | 59 | func (connection TestConnection) Reset() { 60 | connection.queues.Range(func(_, v interface{}) bool { 61 | v.(*TestQueue).Reset() 62 | return true 63 | }) 64 | } 65 | -------------------------------------------------------------------------------- /test_connection_test.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestTestConnection(t *testing.T) { 10 | connection := NewTestConnection() 11 | var _ Connection = connection // check that it implements the interface 12 | assert.Equal(t, "rmq.TestConnection: delivery not found: things[0]", connection.GetDelivery("things", 0)) 13 | 14 | queue, err := connection.OpenQueue("things") 15 | assert.NoError(t, err) 16 | assert.Equal(t, "rmq.TestConnection: delivery not found: things[-1]", connection.GetDelivery("things", -1)) 17 | assert.Equal(t, "rmq.TestConnection: delivery not found: things[0]", connection.GetDelivery("things", 0)) 18 | assert.Equal(t, "rmq.TestConnection: delivery not found: things[1]", connection.GetDelivery("things", 1)) 19 | 20 | assert.NoError(t, queue.Publish("bar")) 21 | assert.Equal(t, "bar", connection.GetDelivery("things", 0)) 22 | assert.Equal(t, "rmq.TestConnection: delivery not found: things[1]", connection.GetDelivery("things", 1)) 23 | assert.Equal(t, "rmq.TestConnection: delivery not found: things[2]", connection.GetDelivery("things", 2)) 24 | 25 | assert.NoError(t, queue.Publish("foo")) 26 | assert.Equal(t, "bar", connection.GetDelivery("things", 0)) 27 | assert.Equal(t, "foo", connection.GetDelivery("things", 1)) 28 | assert.Equal(t, "rmq.TestConnection: delivery not found: things[2]", connection.GetDelivery("things", 2)) 29 | 30 | connection.Reset() 31 | assert.Equal(t, "rmq.TestConnection: delivery not found: things[0]", connection.GetDelivery("things", 0)) 32 | 33 | assert.NoError(t, queue.Publish("blab")) 34 | assert.Equal(t, "blab", connection.GetDelivery("things", 0)) 35 | assert.Equal(t, "rmq.TestConnection: delivery not found: things[1]", connection.GetDelivery("things", 1)) 36 | } 37 | -------------------------------------------------------------------------------- /test_consumer.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | ) 7 | 8 | type TestConsumer struct { 9 | name string 10 | AutoAck bool 11 | AutoFinish bool 12 | SleepDuration time.Duration 13 | 14 | mu sync.Mutex 15 | // Deprecated: use Last() to avoid data races. 16 | LastDelivery Delivery 17 | // Deprecated: use Deliveries() to avoid data races. 18 | LastDeliveries []Delivery 19 | 20 | finish chan int 21 | } 22 | 23 | func NewTestConsumer(name string) *TestConsumer { 24 | return &TestConsumer{ 25 | name: name, 26 | AutoAck: true, 27 | AutoFinish: true, 28 | finish: make(chan int), 29 | } 30 | } 31 | 32 | func (consumer *TestConsumer) String() string { 33 | return consumer.name 34 | } 35 | 36 | func (consumer *TestConsumer) Last() Delivery { 37 | consumer.mu.Lock() 38 | defer consumer.mu.Unlock() 39 | 40 | return consumer.LastDelivery 41 | } 42 | 43 | func (consumer *TestConsumer) Deliveries() []Delivery { 44 | consumer.mu.Lock() 45 | defer consumer.mu.Unlock() 46 | 47 | return consumer.LastDeliveries 48 | } 49 | 50 | func (consumer *TestConsumer) Consume(delivery Delivery) { 51 | consumer.mu.Lock() 52 | consumer.LastDelivery = delivery 53 | consumer.LastDeliveries = append(consumer.LastDeliveries, delivery) 54 | consumer.mu.Unlock() 55 | 56 | if consumer.SleepDuration > 0 { 57 | time.Sleep(consumer.SleepDuration) 58 | } 59 | if consumer.AutoAck { 60 | if err := delivery.Ack(); err != nil { 61 | panic(err) 62 | } 63 | } 64 | if !consumer.AutoFinish { 65 | <-consumer.finish 66 | } 67 | } 68 | 69 | func (consumer *TestConsumer) Finish() { 70 | consumer.finish <- 1 71 | } 72 | 73 | func (consumer *TestConsumer) FinishAll() { 74 | close(consumer.finish) 75 | } 76 | -------------------------------------------------------------------------------- /test_delivery.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | import ( 4 | "encoding/json" 5 | "log" 6 | ) 7 | 8 | type TestDelivery struct { 9 | State State 10 | payload string 11 | } 12 | 13 | func NewTestDelivery(content interface{}) *TestDelivery { 14 | if payload, ok := content.(string); ok { 15 | return NewTestDeliveryString(payload) 16 | } 17 | 18 | bytes, err := json.Marshal(content) 19 | if err != nil { 20 | log.Panic("rmq.NewTestDelivery failed to marshal") 21 | } 22 | 23 | return NewTestDeliveryString(string(bytes)) 24 | } 25 | 26 | func NewTestDeliveryString(payload string) *TestDelivery { 27 | return &TestDelivery{ 28 | payload: payload, 29 | } 30 | } 31 | 32 | func (delivery *TestDelivery) Payload() string { 33 | return delivery.payload 34 | } 35 | 36 | func (delivery *TestDelivery) Ack() error { 37 | if delivery.State != Unacked { 38 | return ErrorNotFound 39 | } 40 | delivery.State = Acked 41 | return nil 42 | } 43 | 44 | func (delivery *TestDelivery) Reject() error { 45 | if delivery.State != Unacked { 46 | return ErrorNotFound 47 | } 48 | delivery.State = Rejected 49 | return nil 50 | } 51 | 52 | func (delivery *TestDelivery) Push() error { 53 | if delivery.State != Unacked { 54 | return ErrorNotFound 55 | } 56 | delivery.State = Pushed 57 | return nil 58 | } 59 | -------------------------------------------------------------------------------- /test_delivery_test.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestDeliveryPayload(t *testing.T) { 10 | var delivery Delivery 11 | delivery = NewTestDelivery("p23") 12 | assert.NoError(t, delivery.Ack()) 13 | assert.Equal(t, "p23", delivery.Payload()) 14 | } 15 | 16 | func TestDeliveryAck(t *testing.T) { 17 | delivery := NewTestDelivery("p") 18 | assert.Equal(t, Unacked, delivery.State) 19 | assert.NoError(t, delivery.Ack()) 20 | assert.Equal(t, Acked, delivery.State) 21 | 22 | assert.Equal(t, ErrorNotFound, delivery.Ack()) 23 | assert.Equal(t, ErrorNotFound, delivery.Reject()) 24 | assert.Equal(t, Acked, delivery.State) 25 | } 26 | 27 | func TestDeliveryReject(t *testing.T) { 28 | delivery := NewTestDelivery("p") 29 | assert.Equal(t, Unacked, delivery.State) 30 | assert.NoError(t, delivery.Reject()) 31 | assert.Equal(t, Rejected, delivery.State) 32 | 33 | assert.Equal(t, ErrorNotFound, delivery.Reject()) 34 | assert.Equal(t, ErrorNotFound, delivery.Ack()) 35 | assert.Equal(t, Rejected, delivery.State) 36 | } 37 | 38 | func TestDeliveryPush(t *testing.T) { 39 | delivery := NewTestDelivery("p") 40 | assert.Equal(t, Unacked, delivery.State) 41 | assert.NoError(t, delivery.Push()) 42 | assert.Equal(t, Pushed, delivery.State) 43 | 44 | assert.Equal(t, ErrorNotFound, delivery.Push()) 45 | assert.Equal(t, ErrorNotFound, delivery.Reject()) 46 | assert.Equal(t, ErrorNotFound, delivery.Ack()) 47 | assert.Equal(t, Pushed, delivery.State) 48 | } 49 | -------------------------------------------------------------------------------- /test_queue.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | import "time" 4 | 5 | type TestQueue struct { 6 | name string 7 | LastDeliveries []string 8 | } 9 | 10 | func NewTestQueue(name string) *TestQueue { 11 | queue := &TestQueue{name: name} 12 | queue.Reset() 13 | return queue 14 | } 15 | 16 | func (queue *TestQueue) String() string { 17 | return queue.name 18 | } 19 | 20 | func (queue *TestQueue) Publish(payload ...string) error { 21 | queue.LastDeliveries = append(queue.LastDeliveries, payload...) 22 | return nil 23 | } 24 | 25 | func (queue *TestQueue) PublishBytes(payload ...[]byte) error { 26 | stringifiedBytes := make([]string, len(payload)) 27 | for i, b := range payload { 28 | stringifiedBytes[i] = string(b) 29 | } 30 | return queue.Publish(stringifiedBytes...) 31 | } 32 | 33 | func (queue *TestQueue) Remove(payload string, count int64, removeFromRejected bool) error { 34 | panic(errorNotSupported) 35 | } 36 | 37 | func (queue *TestQueue) RemoveBytes(payload []byte, count int64, removeFromRejected bool) error { 38 | return queue.Remove(string(payload), count, removeFromRejected) 39 | } 40 | 41 | func (*TestQueue) SetPushQueue(Queue) { panic(errorNotSupported) } 42 | func (*TestQueue) StartConsuming(int64, time.Duration) error { panic(errorNotSupported) } 43 | func (*TestQueue) StopConsuming() <-chan struct{} { panic(errorNotSupported) } 44 | func (*TestQueue) AddConsumer(string, Consumer) (string, error) { panic(errorNotSupported) } 45 | func (*TestQueue) AddConsumerFunc(string, ConsumerFunc) (string, error) { panic(errorNotSupported) } 46 | func (*TestQueue) AddBatchConsumer(string, int64, time.Duration, BatchConsumer) (string, error) { 47 | panic(errorNotSupported) 48 | } 49 | func (*TestQueue) AddBatchConsumerFunc(string, int64, time.Duration, BatchConsumerFunc) (string, error) { 50 | panic(errorNotSupported) 51 | } 52 | func (*TestQueue) ReturnUnacked(int64) (int64, error) { panic(errorNotSupported) } 53 | func (*TestQueue) ReturnRejected(int64) (int64, error) { panic(errorNotSupported) } 54 | func (*TestQueue) PurgeReady() (int64, error) { panic(errorNotSupported) } 55 | func (*TestQueue) PurgeRejected() (int64, error) { panic(errorNotSupported) } 56 | func (*TestQueue) Destroy() (int64, int64, error) { panic(errorNotSupported) } 57 | func (*TestQueue) Drain(count int64) ([]string, error) { panic(errorNotSupported) } 58 | func (*TestQueue) closeInStaleConnection() error { panic(errorNotSupported) } 59 | func (*TestQueue) readyCount() (int64, error) { panic(errorNotSupported) } 60 | func (*TestQueue) unackedCount() (int64, error) { panic(errorNotSupported) } 61 | func (*TestQueue) rejectedCount() (int64, error) { panic(errorNotSupported) } 62 | func (*TestQueue) getConsumers() ([]string, error) { panic(errorNotSupported) } 63 | 64 | // test helper 65 | 66 | func (queue *TestQueue) Reset() { 67 | queue.LastDeliveries = []string{} 68 | } 69 | -------------------------------------------------------------------------------- /test_redis_client.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | import ( 4 | "errors" 5 | "strings" 6 | "sync" 7 | "time" 8 | 9 | "github.com/redis/go-redis/v9" 10 | ) 11 | 12 | // TestRedisClient is a mock for redis 13 | type TestRedisClient struct { 14 | store sync.Map 15 | ttl sync.Map 16 | mx sync.Mutex 17 | } 18 | 19 | // NewTestRedisClient returns a NewTestRedisClient 20 | 21 | func NewTestRedisClient() *TestRedisClient { 22 | return &TestRedisClient{} 23 | } 24 | 25 | // Set sets key to hold the string value. 26 | // If key already holds a value, it is overwritten, regardless of its type. 27 | // Any previous time to live associated with the key is discarded on successful SET operation. 28 | func (client *TestRedisClient) Set(key string, value string, expiration time.Duration) error { 29 | 30 | client.mx.Lock() 31 | defer client.mx.Unlock() 32 | 33 | client.store.Store(key, value) 34 | // Delete any previous time to live associated with the key 35 | client.ttl.Delete(key) 36 | 37 | // 0.0 expiration means that the value won't expire 38 | if expiration.Seconds() != 0.0 { 39 | // Store the unix time at which we should delete this 40 | client.ttl.Store(key, time.Now().Add(expiration).Unix()) 41 | } 42 | 43 | return nil 44 | } 45 | 46 | // Get the value of key. 47 | // If the key does not exist or isn't a string 48 | // the special value nil is returned. 49 | func (client *TestRedisClient) Get(key string) (string, error) { 50 | 51 | value, found := client.store.Load(key) 52 | 53 | if found { 54 | if stringValue, casted := value.(string); casted { 55 | return stringValue, nil 56 | } 57 | } 58 | 59 | return "nil", nil 60 | } 61 | 62 | // Del removes the specified key. A key is ignored if it does not exist. 63 | func (client *TestRedisClient) Del(key string) (affected int64, err error) { 64 | 65 | _, found := client.store.Load(key) 66 | client.store.Delete(key) 67 | client.ttl.Delete(key) 68 | 69 | if found { 70 | return 1, nil 71 | } 72 | return 0, nil 73 | 74 | } 75 | 76 | // TTL returns the remaining time to live of a key that has a timeout. 77 | // This introspection capability allows a Redis client to check how many seconds a given key will continue to be part of the dataset. 78 | // In Redis 2.6 or older the command returns -1 if the key does not exist or if the key exist but has no associated expire. 79 | // Starting with Redis 2.8 the return value in case of error changed: 80 | // The command returns -2 if the key does not exist. 81 | // The command returns -1 if the key exists but has no associated expire. 82 | func (client *TestRedisClient) TTL(key string) (ttl time.Duration, err error) { 83 | 84 | // Lookup the expiration map 85 | expiration, found := client.ttl.Load(key) 86 | 87 | // Found an expiration time 88 | if found { 89 | 90 | // It was there, but it expired; removing it now 91 | if expiration.(int64) < time.Now().Unix() { 92 | client.ttl.Delete(key) 93 | return -2, nil 94 | } 95 | 96 | ttl = time.Duration(expiration.(int64) - time.Now().Unix()) 97 | return ttl, nil 98 | } 99 | 100 | // Lookup the store in case this key exists but don't have an expiration 101 | // date 102 | _, found = client.store.Load(key) 103 | 104 | // The key was in store but didn't have an expiration associated 105 | // to it. 106 | if found { 107 | return -1, nil 108 | } 109 | 110 | return -2, nil 111 | } 112 | 113 | // LPush inserts the specified value at the head of the list stored at key. 114 | // If key does not exist, it is created as empty list before performing the push operations. 115 | // When key holds a value that is not a list, an error is returned. 116 | // It is possible to push multiple elements using a single command call just specifying multiple arguments 117 | // at the end of the command. Elements are inserted one after the other to the head of the list, 118 | // from the leftmost element to the rightmost element. 119 | func (client *TestRedisClient) LPush(key string, values ...string) (total int64, err error) { 120 | 121 | client.mx.Lock() 122 | defer client.mx.Unlock() 123 | 124 | list, err := client.findList(key) 125 | 126 | if err != nil { 127 | return 0, nil 128 | } 129 | 130 | newList := append(values, list...) 131 | client.storeList(key, newList) 132 | return int64(len(newList)), nil 133 | } 134 | 135 | // RPop removes and returns one value from the tail of the list stored at key. 136 | // When key holds a value that is not a list, an error is returned. 137 | func (client *TestRedisClient) RPop(key string) (value string, err error) { 138 | 139 | client.mx.Lock() 140 | defer client.mx.Unlock() 141 | 142 | list, err := client.findList(key) 143 | // not a list 144 | if err != nil { 145 | return "", err 146 | } 147 | // list is empty 148 | if len(list) == 0 { 149 | return "", redis.Nil 150 | } 151 | 152 | // Remove the last element of source (tail) 153 | client.storeList(key, list[0:len(list)-1]) 154 | 155 | return list[len(list)-1], nil 156 | } 157 | 158 | // LLen returns the length of the list stored at key. 159 | // If key does not exist, it is interpreted as an empty list and 0 is returned. 160 | // An error is returned when the value stored at key is not a list. 161 | func (client *TestRedisClient) LLen(key string) (affected int64, err error) { 162 | list, err := client.findList(key) 163 | 164 | if err != nil { 165 | return 0, nil 166 | } 167 | return int64(len(list)), nil 168 | } 169 | 170 | // LRem removes the first count occurrences of elements equal to 171 | // value from the list stored at key. The count argument influences 172 | // the operation in the following ways: 173 | // count > 0: Remove elements equal to value moving from head to tail. 174 | // count < 0: Remove elements equal to value moving from tail to head. 175 | // count = 0: Remove all elements equal to value. For example, 176 | // LREM list -2 "hello" will remove the last two occurrences of "hello" in 177 | // the list stored at list. Note that non-existing keys are treated like empty 178 | // lists, so when key does not exist, the command will always return 0. 179 | func (client *TestRedisClient) LRem(key string, count int64, value string) (affected int64, err error) { 180 | 181 | client.mx.Lock() 182 | defer client.mx.Unlock() 183 | 184 | list, err := client.findList(key) 185 | 186 | // Wasn't a list, or is empty 187 | if err != nil || len(list) == 0 { 188 | return 0, nil 189 | } 190 | 191 | // Create a list that have the capacity to store 192 | // the old one 193 | // This will be much more performant in case of 194 | // very long list 195 | newList := make([]string, 0, len(list)) 196 | 197 | if err != nil { 198 | return 0, nil 199 | } 200 | 201 | // left to right removal of count elements 202 | if count >= 0 { 203 | 204 | // All the elements are to be removed. 205 | // Set count to max possible elements 206 | if count == 0 { 207 | count = int64(len(list)) 208 | } 209 | // left to right traversal 210 | for index := 0; index < len(list); index++ { 211 | 212 | // isn't what we look for or we found enough element already 213 | if strings.Compare(list[index], value) != 0 || affected > count { 214 | newList = append(newList, list[index]) 215 | } else { 216 | affected++ 217 | } 218 | } 219 | // right to left removal of count elements 220 | } else if count < 0 { 221 | 222 | // right to left traversal 223 | for index := len(list) - 1; index >= 0; index-- { 224 | 225 | // isn't what we look for or we found enough element already 226 | if strings.Compare(list[index], value) != 0 || affected > count { 227 | // prepend instead of append to keep the order 228 | newList = append([]string{list[index]}, newList...) 229 | } else { 230 | affected++ 231 | } 232 | } 233 | } 234 | 235 | // store the updated list 236 | client.storeList(key, newList) 237 | 238 | return affected, nil 239 | } 240 | 241 | // LTrim trims an existing list so that it will contain only the specified range of elements specified. 242 | // Both start and stop are zero-based indexes, where 0 is the first element of the list (the head), 243 | // 1 the next element and so on. For example: LTRIM foobar 0 2 will modify the list stored 244 | // at foobar so that only the first three elements of the list will remain. 245 | // start and end can also be negative numbers indicating offsets from the end of the list, 246 | // where -1 is the last element of the list, -2 the penultimate element and so on. 247 | // Out of range indexes will not produce an error: if start is larger than the end of the list, 248 | // or start > end, the result will be an empty list (which causes key to be removed). 249 | // If end is larger than the end of the list, Redis will treat it like the last element of the list 250 | func (client *TestRedisClient) LTrim(key string, start, stop int64) error { 251 | 252 | client.mx.Lock() 253 | defer client.mx.Unlock() 254 | 255 | list, err := client.findList(key) 256 | 257 | // Wasn't a list, or is empty 258 | if err != nil || len(list) == 0 { 259 | return nil 260 | } 261 | 262 | if start < 0 { 263 | start += int64(len(list)) 264 | } 265 | if stop < 0 { 266 | stop += int64(len(list)) 267 | } 268 | 269 | // invalid values cause the remove of the key 270 | if start > stop { 271 | client.store.Delete(key) 272 | return nil 273 | } 274 | 275 | client.storeList(key, list[start:stop]) 276 | return nil 277 | } 278 | 279 | // RPopLPush atomically returns and removes the last element (tail) of the list stored at source, 280 | // and pushes the element at the first element (head) of the list stored at destination. 281 | // For example: consider source holding the list a,b,c, and destination holding the list x,y,z. 282 | // Executing RPOPLPUSH results in source holding a,b and destination holding c,x,y,z. 283 | // If source does not exist, the value nil is returned and no operation is performed. 284 | // If source and destination are the same, the operation is equivalent to removing the 285 | // last element from the list and pushing it as first element of the list, 286 | // so it can be considered as a list rotation command. 287 | func (client *TestRedisClient) RPopLPush(source, destination string) (value string, err error) { 288 | 289 | client.mx.Lock() 290 | defer client.mx.Unlock() 291 | 292 | sourceList, sourceErr := client.findList(source) 293 | destList, destErr := client.findList(destination) 294 | 295 | // One of the two isn't a list 296 | if sourceErr != nil || destErr != nil { 297 | return "", ErrorNotFound 298 | } 299 | // we have nothing to move 300 | if len(sourceList) == 0 { 301 | return "", ErrorNotFound 302 | } 303 | 304 | // Remove the last element of source (tail) 305 | client.storeList(source, sourceList[0:len(sourceList)-1]) 306 | // Put the last element of source (tail) and prepend it to dest 307 | client.storeList(destination, append([]string{sourceList[len(sourceList)-1]}, destList...)) 308 | 309 | return sourceList[len(sourceList)-1], nil 310 | } 311 | 312 | // SAdd adds the specified members to the set stored at key. 313 | // Specified members that are already a member of this set are ignored. 314 | // If key does not exist, a new set is created before adding the specified members. 315 | // An error is returned when the value stored at key is not a set. 316 | func (client *TestRedisClient) SAdd(key, value string) (total int64, err error) { 317 | 318 | client.mx.Lock() 319 | defer client.mx.Unlock() 320 | 321 | set, err := client.findSet(key) 322 | if err != nil { 323 | return 0, err 324 | } 325 | 326 | set[value] = struct{}{} 327 | client.storeSet(key, set) 328 | return int64(len(set)), nil 329 | } 330 | 331 | // SMembers returns all the members of the set value stored at key. 332 | // This has the same effect as running SINTER with one argument key. 333 | func (client *TestRedisClient) SMembers(key string) (members []string, err error) { 334 | set, err := client.findSet(key) 335 | if err != nil { 336 | return members, nil 337 | } 338 | 339 | members = make([]string, 0, len(set)) 340 | for k := range set { 341 | members = append(members, k) 342 | } 343 | 344 | return members, nil 345 | } 346 | 347 | // SRem removes the specified members from the set stored at key. 348 | // Specified members that are not a member of this set are ignored. 349 | // If key does not exist, it is treated as an empty set and this command returns 0. 350 | // An error is returned when the value stored at key is not a set. 351 | func (client *TestRedisClient) SRem(key, value string) (affected int64, err error) { 352 | 353 | client.mx.Lock() 354 | defer client.mx.Unlock() 355 | 356 | set, err := client.findSet(key) 357 | if err != nil || len(set) == 0 { 358 | return 0, nil 359 | } 360 | 361 | if _, found := set[value]; found != false { 362 | delete(set, value) 363 | return 1, nil 364 | } 365 | 366 | return 0, nil 367 | } 368 | 369 | // FlushDb delete all the keys of the currently selected DB. This command never fails. 370 | func (client *TestRedisClient) FlushDb() error { 371 | client.store = *new(sync.Map) 372 | client.ttl = *new(sync.Map) 373 | return nil 374 | } 375 | 376 | // storeSet stores a set 377 | func (client *TestRedisClient) storeSet(key string, set map[string]struct{}) { 378 | client.store.Store(key, set) 379 | } 380 | 381 | // findSet finds a set 382 | func (client *TestRedisClient) findSet(key string) (map[string]struct{}, error) { 383 | // Lookup the store for the list 384 | storedValue, found := client.store.Load(key) 385 | if found { 386 | // list are stored as pointer to []string 387 | set, casted := storedValue.(map[string]struct{}) 388 | 389 | if casted { 390 | return set, nil 391 | } 392 | 393 | return nil, errors.New("Stored value wasn't a set") 394 | } 395 | 396 | // return an empty set if not found 397 | return make(map[string]struct{}), nil 398 | } 399 | 400 | // storeList is an helper function so others don't have to deal with pointers 401 | func (client *TestRedisClient) storeList(key string, list []string) { 402 | client.store.Store(key, &list) 403 | } 404 | 405 | // findList returns the list stored at key. 406 | // if key doesn't exist, an empty list is returned 407 | // an error is returned when the value at key isn't a list 408 | func (client *TestRedisClient) findList(key string) ([]string, error) { 409 | // Lookup the store for the list 410 | storedValue, found := client.store.Load(key) 411 | if found { 412 | 413 | // list are stored as pointer to []string 414 | list, casted := storedValue.(*[]string) 415 | 416 | // Successful cass from interface{} to *[]string 417 | // Preprend the new key 418 | if casted { 419 | 420 | // This mock use sync.Map to be thread safe. 421 | // sync.Map only accepts interface{} as values and 422 | // in order to store an array as interface{}, you need 423 | // to use a pointer to it. 424 | // We could return the pointer instead of the value 425 | // and gain some performances here. Returning the pointer, 426 | // however, will open us up to race conditions. 427 | return *list, nil 428 | } 429 | 430 | return nil, errors.New("Stored value wasn't a list") 431 | } 432 | 433 | // return an empty list if not found 434 | return []string{}, nil 435 | } 436 | -------------------------------------------------------------------------------- /test_redis_client_test.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestTestRedisClient_Set(t *testing.T) { 11 | type args struct { 12 | key string 13 | value string 14 | expiration time.Duration 15 | } 16 | tests := []struct { 17 | name string 18 | client *TestRedisClient 19 | args args 20 | }{ 21 | { 22 | "successful add", 23 | NewTestRedisClient(), 24 | args{ 25 | "somekey", 26 | "somevalue", 27 | time.Duration(0), 28 | }, 29 | }, 30 | } 31 | for _, tt := range tests { 32 | t.Run(tt.name, func(t *testing.T) { 33 | 34 | // add 35 | err := tt.client.Set(tt.args.key, tt.args.value, tt.args.expiration) 36 | assert.NoError(t, err) 37 | 38 | // get 39 | v, err := tt.client.Get(tt.args.key) 40 | assert.Equal(t, tt.args.value, v) 41 | assert.NoError(t, err) 42 | 43 | // delete 44 | affected, err := tt.client.Del(tt.args.key) 45 | assert.Equal(t, int64(1), affected) 46 | assert.NoError(t, err) 47 | 48 | // delete it again 49 | affected, err = tt.client.Del(tt.args.key) 50 | assert.Equal(t, int64(0), affected) 51 | assert.NoError(t, err) 52 | }) 53 | } 54 | } 55 | 56 | func TestTestRedisClient_SAdd(t *testing.T) { 57 | type args struct { 58 | key string 59 | value string 60 | } 61 | tests := []struct { 62 | name string 63 | client *TestRedisClient 64 | args args 65 | }{ 66 | { 67 | "adding member", 68 | NewTestRedisClient(), 69 | args{ 70 | "somekey", 71 | "somevalue", 72 | }, 73 | }, 74 | } 75 | for _, tt := range tests { 76 | t.Run(tt.name, func(t *testing.T) { 77 | total, err := tt.client.SAdd(tt.args.key, tt.args.value) 78 | assert.NoError(t, err) 79 | assert.Equal(t, int64(1), total) 80 | 81 | total, err = tt.client.SAdd(tt.args.key, tt.args.value) 82 | assert.NoError(t, err) 83 | assert.Equal(t, int64(1), total) 84 | 85 | members, err := tt.client.SMembers(tt.args.key) 86 | assert.Equal(t, []string{tt.args.value}, members) 87 | assert.NoError(t, err) 88 | 89 | count, err := tt.client.SRem(tt.args.key, tt.args.value) 90 | assert.Equal(t, int64(1), count) 91 | assert.NoError(t, err) 92 | }) 93 | } 94 | } 95 | 96 | func TestTestRedisClient_LPush(t *testing.T) { 97 | type args struct { 98 | key string 99 | value string 100 | } 101 | tests := []struct { 102 | name string 103 | client *TestRedisClient 104 | args args 105 | total int64 106 | }{ 107 | { 108 | "adding to list", 109 | NewTestRedisClient(), 110 | args{ 111 | "somekey", 112 | "somevalue", 113 | }, 114 | 1, 115 | }, 116 | } 117 | for _, tt := range tests { 118 | t.Run(tt.name, func(t *testing.T) { 119 | 120 | // Push 121 | total, err := tt.client.LPush(tt.args.key, tt.args.value) 122 | assert.NoError(t, err) 123 | assert.Equal(t, tt.total, total) 124 | 125 | // Len 126 | count, err := tt.client.LLen(tt.args.key) 127 | assert.Equal(t, int64(1), count) 128 | assert.NoError(t, err) 129 | 130 | // Len of non-existing 131 | count, err = tt.client.LLen(tt.args.key + "nonsense") 132 | assert.Equal(t, int64(0), count) 133 | assert.NoError(t, err) 134 | 135 | // Lrem 136 | count, err = tt.client.LRem(tt.args.key, 100, tt.args.value) 137 | assert.Equal(t, int64(1), count) 138 | assert.NoError(t, err) 139 | 140 | // Len again 141 | count, err = tt.client.LLen(tt.args.key) 142 | assert.Equal(t, int64(0), count) 143 | assert.NoError(t, err) 144 | }) 145 | } 146 | } 147 | 148 | func TestTestRedisClient_LPush_Len(t *testing.T) { 149 | client := NewTestRedisClient() 150 | key := "list-key" 151 | 152 | total, err := client.LPush(key, "1", "2", "3") 153 | assert.NoError(t, err) 154 | assert.Equal(t, int64(3), total) 155 | 156 | total, err = client.LPush(key, "4", "5", "6") 157 | assert.NoError(t, err) 158 | assert.Equal(t, int64(6), total) 159 | } 160 | 161 | func TestTestRedisClient_RPop(t *testing.T) { 162 | client := NewTestRedisClient() 163 | key := "list-key" 164 | 165 | total, err := client.LPush(key, "1", "2", "3") 166 | assert.NoError(t, err) 167 | assert.Equal(t, int64(3), total) 168 | 169 | value, err := client.RPop(key) 170 | assert.NoError(t, err) 171 | assert.Equal(t, "3", value) 172 | 173 | total, err = client.LLen(key) 174 | assert.NoError(t, err) 175 | assert.Equal(t, int64(2), total) 176 | } 177 | -------------------------------------------------------------------------------- /test_util.go: -------------------------------------------------------------------------------- 1 | package rmq 2 | 3 | import ( 4 | "os" 5 | "strings" 6 | "testing" 7 | "time" 8 | 9 | "github.com/alicebob/miniredis/v2" 10 | "github.com/redis/go-redis/v9" 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | func testRedis(t testing.TB) (options *redis.Options, close func()) { 15 | t.Helper() 16 | 17 | if redisAddr, ok := os.LookupEnv("REDIS_ADDR"); ok { 18 | return &redis.Options{Addr: redisAddr}, func() {} 19 | } 20 | 21 | mr := miniredis.RunT(t) 22 | return &redis.Options{Addr: mr.Addr()}, mr.Close 23 | } 24 | 25 | func testClusterRedis(t testing.TB) (options *redis.ClusterOptions, close func()) { 26 | t.Helper() 27 | 28 | // Follow these steps to set up a local redis cluster: 29 | // https://github.com/redis/redis/tree/unstable/utils/create-cluster 30 | // Then run the tests like this: 31 | // REDIS_CLUSTER_ADDR="localhost:30001,localhost:30002,localhost:30003,localhost:30004,localhost:30005,localhost:30006" go test 32 | if redisAddrs, ok := os.LookupEnv("REDIS_CLUSTER_ADDR"); ok { 33 | addrs := strings.Split(redisAddrs, ",") 34 | return &redis.ClusterOptions{Addrs: addrs}, func() {} 35 | } 36 | 37 | mr1 := miniredis.RunT(t) 38 | mr2 := miniredis.RunT(t) 39 | mr3 := miniredis.RunT(t) 40 | 41 | options = &redis.ClusterOptions{ 42 | Addrs: []string{ 43 | mr1.Addr(), 44 | mr2.Addr(), 45 | mr3.Addr(), 46 | }, 47 | } 48 | 49 | closeFunc := func() { 50 | mr1.Close() 51 | mr2.Close() 52 | mr3.Close() 53 | } 54 | 55 | return options, closeFunc 56 | } 57 | 58 | func eventuallyReady(t *testing.T, queue Queue, expectedReady int64) { 59 | t.Helper() 60 | assert.Eventually(t, func() bool { 61 | count, err := queue.readyCount() 62 | if err != nil { 63 | return false 64 | } 65 | return count == expectedReady 66 | }, 1*time.Second, 2*time.Millisecond) 67 | } 68 | 69 | func eventuallyUnacked(t *testing.T, queue Queue, expectedUnacked int64) { 70 | t.Helper() 71 | assert.Eventually(t, func() bool { 72 | count, err := queue.unackedCount() 73 | if err != nil { 74 | return false 75 | } 76 | return count == expectedUnacked 77 | }, 1*time.Second, 2*time.Millisecond) 78 | } 79 | 80 | func eventuallyRejected(t *testing.T, queue Queue, expectedRejected int64) { 81 | t.Helper() 82 | assert.Eventually(t, func() bool { 83 | count, err := queue.rejectedCount() 84 | if err != nil { 85 | return false 86 | } 87 | return count == expectedRejected 88 | }, 1*time.Second, 2*time.Millisecond) 89 | } 90 | -------------------------------------------------------------------------------- /testdata/create-cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" 4 | 5 | # Settings 6 | BIN_PATH="/usr/bin/" 7 | CLUSTER_HOST=127.0.0.1 8 | PORT=30000 9 | TIMEOUT=2000 10 | NODES=6 11 | REPLICAS=1 12 | PROTECTED_MODE=yes 13 | ADDITIONAL_OPTIONS="" 14 | 15 | # You may want to put the above config parameters into config.sh in order to 16 | # override the defaults without modifying this script. 17 | 18 | if [ -a config.sh ] 19 | then 20 | source "config.sh" 21 | fi 22 | 23 | # Computed vars 24 | ENDPORT=$((PORT+NODES)) 25 | 26 | if [ "$1" == "start" ] 27 | then 28 | while [ $((PORT < ENDPORT)) != "0" ]; do 29 | PORT=$((PORT+1)) 30 | echo "Starting $PORT" 31 | $BIN_PATH/redis-server --port $PORT --protected-mode $PROTECTED_MODE --cluster-enabled yes --cluster-config-file nodes-${PORT}.conf --cluster-node-timeout $TIMEOUT --appendonly yes --appendfilename appendonly-${PORT}.aof --appenddirname appendonlydir-${PORT} --dbfilename dump-${PORT}.rdb --logfile ${PORT}.log --daemonize yes ${ADDITIONAL_OPTIONS} 32 | done 33 | exit 0 34 | fi 35 | 36 | if [ "$1" == "create" ] 37 | then 38 | HOSTS="" 39 | while [ $((PORT < ENDPORT)) != "0" ]; do 40 | PORT=$((PORT+1)) 41 | HOSTS="$HOSTS $CLUSTER_HOST:$PORT" 42 | done 43 | OPT_ARG="" 44 | if [ "$2" == "-f" ]; then 45 | OPT_ARG="--cluster-yes" 46 | fi 47 | $BIN_PATH/redis-cli --cluster create $HOSTS --cluster-replicas $REPLICAS $OPT_ARG 48 | exit 0 49 | fi 50 | 51 | if [ "$1" == "stop" ] 52 | then 53 | while [ $((PORT < ENDPORT)) != "0" ]; do 54 | PORT=$((PORT+1)) 55 | echo "Stopping $PORT" 56 | $BIN_PATH/redis-cli -p $PORT shutdown nosave 57 | done 58 | exit 0 59 | fi 60 | 61 | if [ "$1" == "watch" ] 62 | then 63 | PORT=$((PORT+1)) 64 | while [ 1 ]; do 65 | clear 66 | date 67 | $BIN_PATH/redis-cli -p $PORT cluster nodes | head -30 68 | sleep 1 69 | done 70 | exit 0 71 | fi 72 | 73 | if [ "$1" == "tail" ] 74 | then 75 | INSTANCE=$2 76 | PORT=$((PORT+INSTANCE)) 77 | tail -f ${PORT}.log 78 | exit 0 79 | fi 80 | 81 | if [ "$1" == "tailall" ] 82 | then 83 | tail -f *.log 84 | exit 0 85 | fi 86 | 87 | if [ "$1" == "call" ] 88 | then 89 | while [ $((PORT < ENDPORT)) != "0" ]; do 90 | PORT=$((PORT+1)) 91 | $BIN_PATH/redis-cli -p $PORT $2 $3 $4 $5 $6 $7 $8 $9 92 | done 93 | exit 0 94 | fi 95 | 96 | if [ "$1" == "clean" ] 97 | then 98 | echo "Cleaning *.log" 99 | rm -rf *.log 100 | echo "Cleaning appendonlydir-*" 101 | rm -rf appendonlydir-* 102 | echo "Cleaning dump-*.rdb" 103 | rm -rf dump-*.rdb 104 | echo "Cleaning nodes-*.conf" 105 | rm -rf nodes-*.conf 106 | exit 0 107 | fi 108 | 109 | if [ "$1" == "clean-logs" ] 110 | then 111 | echo "Cleaning *.log" 112 | rm -rf *.log 113 | exit 0 114 | fi 115 | 116 | echo "Usage: $0 [start|create|stop|watch|tail|tailall|clean|clean-logs|call]" 117 | echo "start -- Launch Redis Cluster instances." 118 | echo "create [-f] -- Create a cluster using redis-cli --cluster create." 119 | echo "stop -- Stop Redis Cluster instances." 120 | echo "watch -- Show CLUSTER NODES output (first 30 lines) of first node." 121 | echo "tail -- Run tail -f of instance at base port + ID." 122 | echo "tailall -- Run tail -f for all the log files at once." 123 | echo "clean -- Remove all instances data, logs, configs." 124 | echo "clean-logs -- Remove just instances logs." 125 | echo "call -- Call a command (up to 7 arguments) on all nodes." 126 | --------------------------------------------------------------------------------