├── .github ├── pull_request_template.md └── workflows │ ├── main.yaml │ └── pr.yaml ├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── docker-compose.yaml ├── go.mod ├── go.sum └── pkg └── redisstream ├── marshaller.go ├── marshaller_test.go ├── publisher.go ├── pubsub_bench_test.go ├── pubsub_stress_test.go ├── pubsub_test.go └── subscriber.go /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | 10 | 11 | ### Motivation / Background 12 | 13 | 22 | 23 | ### Details 24 | 25 | 26 | 27 | ### Alternative approaches considered (if applicable) 28 | 29 | 30 | 31 | ### Checklist 32 | 33 | The resources of our team are limited. **There are a couple of things that you can do to help us merge your PR faster**: 34 | 35 | - [ ] I wrote tests for the changes. 36 | - [ ] All tests are passing. 37 | - If you are testing a Pub/Sub, you can start Docker with `make up`. 38 | - You can start with `make test_short` for a quick check. 39 | - If you want to run all tests, use `make test`. 40 | - [ ] Code has no breaking changes. 41 | - [ ] _(If applicable)_ documentation on [watermill.io](https://watermill.io/) is updated. 42 | - Documentation is built in the [github.com/ThreeDotsLabs/watermill/docs](https://github.com/ThreeDotsLabs/watermill/tree/master/docs). 43 | - You can find development instructions in the [DEVELOP.md](https://github.com/ThreeDotsLabs/watermill/tree/master/docs/DEVELOP.md). 44 | -------------------------------------------------------------------------------- /.github/workflows/main.yaml: -------------------------------------------------------------------------------- 1 | name: main 2 | on: 3 | push: 4 | branches: 5 | - main 6 | jobs: 7 | ci: 8 | uses: ThreeDotsLabs/watermill/.github/workflows/tests.yml@master 9 | with: 10 | stress-tests: true -------------------------------------------------------------------------------- /.github/workflows/pr.yaml: -------------------------------------------------------------------------------- 1 | name: pr 2 | on: 3 | pull_request: 4 | jobs: 5 | ci: 6 | uses: ThreeDotsLabs/watermill/.github/workflows/tests.yml@master -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | vendor 3 | *.out 4 | *.log 5 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Three Dots Labs 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | up: 2 | docker compose up -d 3 | 4 | down: 5 | docker compose down 6 | 7 | test: 8 | go test ./... 9 | 10 | test_v: 11 | go test -v ./... 12 | 13 | test_short: 14 | go test ./... -short 15 | 16 | test_race: 17 | go test ./... -short -race 18 | 19 | test_stress: 20 | go test -v -tags=stress -timeout=45m ./... 21 | 22 | fmt: 23 | go fmt ./... 24 | goimports -l -w . 25 | 26 | build: 27 | go build ./... 28 | 29 | test_codecov: up wait 30 | go test -coverprofile=coverage.out -covermode=atomic ./... 31 | 32 | wait: 33 | go run github.com/ThreeDotsLabs/wait-for@latest localhost:6379 34 | 35 | update_watermill: 36 | go get -u github.com/ThreeDotsLabs/watermill 37 | go mod tidy 38 | 39 | sed -i '\|go 1\.|d' go.mod 40 | go mod edit -fmt 41 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Watermill Redis Pub/Sub 2 | 3 | 4 | [![CI Status](https://github.com/ThreeDotsLabs/watermill-redisstream/actions/workflows/master.yml/badge.svg)](https://github.com/ThreeDotsLabs/watermill-redisstream/actions/workflows/master.yml) 5 | [![Go Report Card](https://goreportcard.com/badge/github.com/ThreeDotsLabs/watermill-redisstream)](https://goreportcard.com/report/github.com/ThreeDotsLabs/watermill-redisstream) 6 | 7 | This is Pub/Sub for the [Watermill](https://watermill.io/) project. 8 | 9 | 10 | See [DEVELOPMENT.md](./DEVELOPMENT.md) for more information about running and testing. 11 | 12 | Watermill is a Go library for working efficiently with message streams. It is intended 13 | for building event driven applications, enabling event sourcing, RPC over messages, 14 | sagas and basically whatever else comes to your mind. You can use conventional pub/sub 15 | implementations like Kafka or RabbitMQ, but also HTTP or MySQL binlog if that fits your use case. 16 | 17 | All Pub/Sub implementations can be found at [https://watermill.io/pubsubs/](https://watermill.io/pubsubs/). 18 | 19 | Documentation: https://watermill.io/ 20 | 21 | Getting started guide: https://watermill.io/docs/getting-started/ 22 | 23 | Issues: https://github.com/ThreeDotsLabs/watermill/issues 24 | 25 | ## Contributing 26 | 27 | All contributions are very much welcome. If you'd like to help with Watermill development, 28 | please see [open issues](https://github.com/ThreeDotsLabs/watermill/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+) 29 | and submit your pull request via GitHub. 30 | 31 | ## Support 32 | 33 | If you didn't find the answer to your question in [the documentation](https://watermill.io/), feel free to ask us directly! 34 | 35 | Please join us on the `#watermill` channel on the [Three Dots Labs Discord](https://discord.gg/QV6VFg4YQE). 36 | 37 | ## License 38 | 39 | [MIT License](./LICENSE) 40 | -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | # for Watermill development purposes. 2 | # For Watermill based application docker please check https://watermill.io/docs/getting-started/ 3 | 4 | version: '3' 5 | services: 6 | redis: 7 | image: redis:7 8 | ports: 9 | - 6379:6379 10 | restart: unless-stopped -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/ThreeDotsLabs/watermill-redisstream 2 | 3 | go 1.21 4 | 5 | toolchain go1.23.0 6 | 7 | require ( 8 | github.com/Rican7/retry v0.3.1 9 | github.com/ThreeDotsLabs/watermill v1.3.7 10 | github.com/pkg/errors v0.9.1 11 | github.com/redis/go-redis/v9 v9.6.1 12 | github.com/stretchr/testify v1.9.0 13 | github.com/vmihailenco/msgpack v4.0.4+incompatible 14 | ) 15 | 16 | require ( 17 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 18 | github.com/davecgh/go-spew v1.1.1 // indirect 19 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect 20 | github.com/golang/protobuf v1.5.4 // indirect 21 | github.com/google/uuid v1.6.0 // indirect 22 | github.com/hashicorp/errwrap v1.1.0 // indirect 23 | github.com/hashicorp/go-multierror v1.1.1 // indirect 24 | github.com/lithammer/shortuuid/v3 v3.0.7 // indirect 25 | github.com/oklog/ulid v1.3.1 // indirect 26 | github.com/pmezard/go-difflib v1.0.0 // indirect 27 | google.golang.org/appengine v1.6.8 // indirect 28 | google.golang.org/protobuf v1.34.2 // indirect 29 | gopkg.in/yaml.v3 v3.0.1 // indirect 30 | ) 31 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/Rican7/retry v0.3.1 h1:scY4IbO8swckzoA/11HgBwaZRJEyY9vaNJshcdhp1Mc= 2 | github.com/Rican7/retry v0.3.1/go.mod h1:CxSDrhAyXmTMeEuRAnArMu1FHu48vtfjLREWqVl7Vw0= 3 | github.com/ThreeDotsLabs/watermill v1.3.7 h1:NV0PSTmuACVEOV4dMxRnmGXrmbz8U83LENOvpHekN7o= 4 | github.com/ThreeDotsLabs/watermill v1.3.7/go.mod h1:lBnrLbxOjeMRgcJbv+UiZr8Ylz8RkJ4m6i/VN/Nk+to= 5 | github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= 6 | github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= 7 | github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= 8 | github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= 9 | github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= 10 | github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 11 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 12 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 13 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= 14 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= 15 | github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= 16 | github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= 17 | github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= 18 | github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= 19 | github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= 20 | github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 21 | github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 22 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 23 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 24 | github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= 25 | github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= 26 | github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= 27 | github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= 28 | github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= 29 | github.com/lithammer/shortuuid/v3 v3.0.7 h1:trX0KTHy4Pbwo/6ia8fscyHoGA+mf1jWbPJVuvyJQQ8= 30 | github.com/lithammer/shortuuid/v3 v3.0.7/go.mod h1:vMk8ke37EmiewwolSO1NLW8vP4ZaKlRuDIi8tWWmAts= 31 | github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= 32 | github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= 33 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 34 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 35 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 36 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 37 | github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= 38 | github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= 39 | github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= 40 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 41 | github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= 42 | github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= 43 | github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= 44 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 45 | golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= 46 | golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= 47 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 48 | golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= 49 | golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= 50 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 51 | golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 52 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 53 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 54 | golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 55 | golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 56 | golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 57 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 58 | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= 59 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 60 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 61 | golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= 62 | golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= 63 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 64 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 65 | golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= 66 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 67 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= 68 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 69 | google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= 70 | google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= 71 | google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= 72 | google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= 73 | google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= 74 | google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= 75 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 76 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 77 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 78 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 79 | -------------------------------------------------------------------------------- /pkg/redisstream/marshaller.go: -------------------------------------------------------------------------------- 1 | package redisstream 2 | 3 | import ( 4 | "github.com/ThreeDotsLabs/watermill/message" 5 | "github.com/pkg/errors" 6 | "github.com/vmihailenco/msgpack" 7 | ) 8 | 9 | const UUIDHeaderKey = "_watermill_message_uuid" 10 | 11 | type Marshaller interface { 12 | Marshal(topic string, msg *message.Message) (map[string]interface{}, error) 13 | } 14 | 15 | type Unmarshaller interface { 16 | Unmarshal(values map[string]interface{}) (msg *message.Message, err error) 17 | } 18 | 19 | type MarshallerUnmarshaller interface { 20 | Marshaller 21 | Unmarshaller 22 | } 23 | 24 | type DefaultMarshallerUnmarshaller struct{} 25 | 26 | func (DefaultMarshallerUnmarshaller) Marshal(_ string, msg *message.Message) (map[string]interface{}, error) { 27 | if value := msg.Metadata.Get(UUIDHeaderKey); value != "" { 28 | return nil, errors.Errorf("metadata %s is reserved by watermill for message UUID", UUIDHeaderKey) 29 | } 30 | 31 | var ( 32 | md []byte 33 | err error 34 | ) 35 | if len(msg.Metadata) > 0 { 36 | if md, err = msgpack.Marshal(msg.Metadata); err != nil { 37 | return nil, errors.Wrapf(err, "marshal metadata fail") 38 | } 39 | } 40 | 41 | return map[string]interface{}{ 42 | UUIDHeaderKey: msg.UUID, 43 | "metadata": md, 44 | "payload": []byte(msg.Payload), 45 | }, nil 46 | } 47 | 48 | func (DefaultMarshallerUnmarshaller) Unmarshal(values map[string]interface{}) (msg *message.Message, err error) { 49 | var uuid, payload any 50 | uuid, payload = values[UUIDHeaderKey], values["payload"] 51 | if uuid == nil { 52 | return nil, errors.Errorf("%s key is missing as part of the message", UUIDHeaderKey) 53 | } 54 | 55 | if payload == nil { 56 | payload = "" 57 | } 58 | 59 | msg = message.NewMessage(uuid.(string), []byte(payload.(string))) 60 | 61 | md := values["metadata"] 62 | if md != nil { 63 | s := md.(string) 64 | if s != "" { 65 | metadata := make(message.Metadata) 66 | if err := msgpack.Unmarshal([]byte(s), &metadata); err != nil { 67 | return nil, errors.Wrapf(err, "unmarshal metadata fail") 68 | } 69 | msg.Metadata = metadata 70 | } 71 | 72 | } 73 | 74 | return msg, nil 75 | } 76 | -------------------------------------------------------------------------------- /pkg/redisstream/marshaller_test.go: -------------------------------------------------------------------------------- 1 | package redisstream 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/ThreeDotsLabs/watermill" 7 | "github.com/ThreeDotsLabs/watermill/message" 8 | "github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | func TestDefaultMarshallerUnmarshaller_MarshalUnmarshal(t *testing.T) { 13 | m := DefaultMarshallerUnmarshaller{} 14 | 15 | msg := message.NewMessage(watermill.NewUUID(), []byte("payload")) 16 | msg.Metadata.Set("foo", "bar") 17 | 18 | marshaled, err := m.Marshal("topic", msg) 19 | require.NoError(t, err) 20 | 21 | consumerMessage, err := producerToConsumerMessage(marshaled) 22 | require.NoError(t, err) 23 | unmarshaledMsg, err := m.Unmarshal(consumerMessage) 24 | require.NoError(t, err) 25 | 26 | assert.True(t, msg.Equals(unmarshaledMsg)) 27 | } 28 | 29 | func BenchmarkDefaultMarshallerUnmarshaller_Marshal(b *testing.B) { 30 | m := DefaultMarshallerUnmarshaller{} 31 | 32 | msg := message.NewMessage(watermill.NewUUID(), []byte("payload")) 33 | msg.Metadata.Set("foo", "bar") 34 | 35 | var err error 36 | for i := 0; i < b.N; i++ { 37 | _, err = m.Marshal("foo", msg) 38 | if err != nil { 39 | b.Fatal(err) 40 | } 41 | } 42 | } 43 | 44 | func BenchmarkDefaultMarshallerUnmarshaller_Unmarshal(b *testing.B) { 45 | m := DefaultMarshallerUnmarshaller{} 46 | 47 | msg := message.NewMessage(watermill.NewUUID(), []byte("payload")) 48 | msg.Metadata.Set("foo", "bar") 49 | 50 | marshaled, err := m.Marshal("foo", msg) 51 | if err != nil { 52 | b.Fatal(err) 53 | } 54 | 55 | consumedMsg, err := producerToConsumerMessage(marshaled) 56 | if err != nil { 57 | b.Fatal(err) 58 | } 59 | 60 | for i := 0; i < b.N; i++ { 61 | _, err = m.Unmarshal(consumedMsg) 62 | if err != nil { 63 | b.Fatal(err) 64 | } 65 | } 66 | } 67 | 68 | func producerToConsumerMessage(producerMessage map[string]interface{}) (map[string]interface{}, error) { 69 | res := make(map[string]interface{}) 70 | for k, v := range producerMessage { 71 | if b, ok := v.([]byte); ok { 72 | res[k] = string(b) 73 | } else { 74 | res[k] = v 75 | } 76 | } 77 | return res, nil 78 | } 79 | -------------------------------------------------------------------------------- /pkg/redisstream/publisher.go: -------------------------------------------------------------------------------- 1 | package redisstream 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | 7 | "github.com/ThreeDotsLabs/watermill" 8 | "github.com/ThreeDotsLabs/watermill/message" 9 | "github.com/pkg/errors" 10 | "github.com/redis/go-redis/v9" 11 | ) 12 | 13 | type Publisher struct { 14 | config PublisherConfig 15 | client redis.UniversalClient 16 | logger watermill.LoggerAdapter 17 | 18 | closed bool 19 | closeMutex sync.Mutex 20 | } 21 | 22 | // NewPublisher creates a new redis stream Publisher. 23 | func NewPublisher(config PublisherConfig, logger watermill.LoggerAdapter) (*Publisher, error) { 24 | config.setDefaults() 25 | 26 | if err := config.Validate(); err != nil { 27 | return nil, err 28 | } 29 | 30 | if logger == nil { 31 | logger = &watermill.NopLogger{} 32 | } 33 | 34 | return &Publisher{ 35 | config: config, 36 | client: config.Client, 37 | logger: logger, 38 | closed: false, 39 | }, nil 40 | } 41 | 42 | type PublisherConfig struct { 43 | Client redis.UniversalClient 44 | Marshaller Marshaller 45 | Maxlens map[string]int64 46 | DefaultMaxlen int64 47 | } 48 | 49 | func (c *PublisherConfig) setDefaults() { 50 | if c.Marshaller == nil { 51 | c.Marshaller = DefaultMarshallerUnmarshaller{} 52 | } 53 | } 54 | 55 | func (c *PublisherConfig) Validate() error { 56 | if c.Client == nil { 57 | return errors.New("redis client is empty") 58 | } 59 | for topic, maxlen := range c.Maxlens { 60 | if maxlen < 0 { 61 | // zero maxlen stream indicates unlimited stream length 62 | c.Maxlens[topic] = c.DefaultMaxlen 63 | } 64 | } 65 | return nil 66 | } 67 | 68 | // Publish publishes message to redis stream 69 | // 70 | // Publish is blocking and waits for redis response. 71 | // When any of messages delivery fails - function is interrupted. 72 | func (p *Publisher) Publish(topic string, msgs ...*message.Message) error { 73 | if p.closed { 74 | return errors.New("publisher closed") 75 | } 76 | 77 | logFields := make(watermill.LogFields, 3) 78 | logFields["topic"] = topic 79 | 80 | for _, msg := range msgs { 81 | logFields["message_uuid"] = msg.UUID 82 | p.logger.Trace("Sending message to redis stream", logFields) 83 | 84 | values, err := p.config.Marshaller.Marshal(topic, msg) 85 | if err != nil { 86 | return errors.Wrapf(err, "cannot marshal message %s", msg.UUID) 87 | } 88 | 89 | maxlen, ok := p.config.Maxlens[topic] 90 | if !ok { 91 | maxlen = p.config.DefaultMaxlen 92 | } 93 | 94 | id, err := p.client.XAdd(context.Background(), &redis.XAddArgs{ 95 | Stream: topic, 96 | Values: values, 97 | MaxLen: maxlen, 98 | Approx: true, 99 | }).Result() 100 | if err != nil { 101 | return errors.Wrapf(err, "cannot xadd message %s", msg.UUID) 102 | } 103 | 104 | logFields["xadd_id"] = id 105 | p.logger.Trace("Message sent to redis stream", logFields) 106 | } 107 | 108 | return nil 109 | } 110 | 111 | func (p *Publisher) Close() error { 112 | p.closeMutex.Lock() 113 | defer p.closeMutex.Unlock() 114 | 115 | if p.closed { 116 | return nil 117 | } 118 | p.closed = true 119 | 120 | if err := p.client.Close(); err != nil { 121 | return err 122 | } 123 | 124 | return nil 125 | } 126 | -------------------------------------------------------------------------------- /pkg/redisstream/pubsub_bench_test.go: -------------------------------------------------------------------------------- 1 | package redisstream 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/ThreeDotsLabs/watermill" 7 | "github.com/ThreeDotsLabs/watermill/message" 8 | "github.com/ThreeDotsLabs/watermill/pubsub/tests" 9 | ) 10 | 11 | func BenchmarkSubscriber(b *testing.B) { 12 | pubClient, err := redisClient() 13 | if err != nil { 14 | b.Fatal(err) 15 | } 16 | subClient, err := redisClient() 17 | if err != nil { 18 | b.Fatal(err) 19 | } 20 | 21 | tests.BenchSubscriber(b, func(n int) (message.Publisher, message.Subscriber) { 22 | logger := watermill.NopLogger{} 23 | 24 | publisher, err := NewPublisher(PublisherConfig{Client: pubClient}, logger) 25 | if err != nil { 26 | panic(err) 27 | } 28 | 29 | subscriber, err := NewSubscriber( 30 | SubscriberConfig{ 31 | Client: subClient, 32 | Unmarshaller: &DefaultMarshallerUnmarshaller{}, 33 | Consumer: watermill.NewShortUUID(), 34 | ConsumerGroup: watermill.NewShortUUID(), 35 | }, 36 | logger, 37 | ) 38 | if err != nil { 39 | panic(err) 40 | } 41 | 42 | return publisher, subscriber 43 | }) 44 | } 45 | -------------------------------------------------------------------------------- /pkg/redisstream/pubsub_stress_test.go: -------------------------------------------------------------------------------- 1 | //go:build stress 2 | 3 | package redisstream 4 | 5 | import ( 6 | "testing" 7 | 8 | "github.com/ThreeDotsLabs/watermill/pubsub/tests" 9 | ) 10 | 11 | func TestPublishSubscribe_stress(t *testing.T) { 12 | tests.TestPubSubStressTest( 13 | t, 14 | tests.Features{ 15 | ConsumerGroups: true, 16 | ExactlyOnceDelivery: false, 17 | GuaranteedOrder: false, 18 | GuaranteedOrderWithSingleSubscriber: true, 19 | Persistent: true, 20 | RequireSingleInstance: false, 21 | NewSubscriberReceivesOldMessages: true, 22 | }, 23 | createPubSub, 24 | createPubSubWithConsumerGroup, 25 | ) 26 | } 27 | -------------------------------------------------------------------------------- /pkg/redisstream/pubsub_test.go: -------------------------------------------------------------------------------- 1 | package redisstream 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "math/rand" 7 | "sort" 8 | "strconv" 9 | "sync" 10 | "testing" 11 | "time" 12 | 13 | "github.com/ThreeDotsLabs/watermill" 14 | "github.com/ThreeDotsLabs/watermill/message" 15 | "github.com/ThreeDotsLabs/watermill/pubsub/tests" 16 | 17 | "github.com/pkg/errors" 18 | "github.com/redis/go-redis/v9" 19 | "github.com/stretchr/testify/assert" 20 | "github.com/stretchr/testify/require" 21 | ) 22 | 23 | func redisClient() (redis.UniversalClient, error) { 24 | client := redis.NewClient(&redis.Options{ 25 | Addr: "127.0.0.1:6379", 26 | DB: 0, 27 | ReadTimeout: -1, 28 | PoolTimeout: 10 * time.Minute, 29 | }) 30 | err := client.Ping(context.Background()).Err() 31 | if err != nil { 32 | return nil, errors.Wrap(err, "redis simple connect fail") 33 | } 34 | return client, nil 35 | } 36 | 37 | func redisClientOrFail(t *testing.T) redis.UniversalClient { 38 | client, err := redisClient() 39 | require.NoError(t, err) 40 | return client 41 | } 42 | 43 | func newPubSub(t *testing.T, subConfig *SubscriberConfig) (message.Publisher, message.Subscriber) { 44 | logger := watermill.NewStdLogger(true, false) 45 | 46 | publisher, err := NewPublisher( 47 | PublisherConfig{ 48 | Client: redisClientOrFail(t), 49 | }, 50 | watermill.NewStdLogger(false, false), 51 | ) 52 | require.NoError(t, err) 53 | 54 | subscriber, err := NewSubscriber(*subConfig, logger) 55 | require.NoError(t, err) 56 | 57 | return publisher, subscriber 58 | } 59 | 60 | func createPubSub(t *testing.T) (message.Publisher, message.Subscriber) { 61 | return createPubSubWithConsumerGroup(t, watermill.NewShortUUID()) 62 | } 63 | 64 | func createPubSubWithConsumerGroup(t *testing.T, consumerGroup string) (message.Publisher, message.Subscriber) { 65 | return newPubSub(t, &SubscriberConfig{ 66 | Client: redisClientOrFail(t), 67 | Consumer: watermill.NewShortUUID(), 68 | ConsumerGroup: consumerGroup, 69 | BlockTime: 10 * time.Millisecond, 70 | ClaimInterval: 3 * time.Second, 71 | MaxIdleTime: 5 * time.Second, 72 | }) 73 | } 74 | 75 | func TestPublishSubscribe(t *testing.T) { 76 | features := tests.Features{ 77 | ConsumerGroups: true, 78 | ExactlyOnceDelivery: false, 79 | GuaranteedOrder: false, 80 | GuaranteedOrderWithSingleSubscriber: true, 81 | Persistent: true, 82 | RequireSingleInstance: false, 83 | NewSubscriberReceivesOldMessages: true, 84 | } 85 | 86 | tests.TestPubSub(t, features, createPubSub, createPubSubWithConsumerGroup) 87 | } 88 | 89 | func TestSubscriber(t *testing.T) { 90 | topic := watermill.NewShortUUID() 91 | 92 | subscriber, err := NewSubscriber( 93 | SubscriberConfig{ 94 | Client: redisClientOrFail(t), 95 | Consumer: watermill.NewShortUUID(), 96 | ConsumerGroup: watermill.NewShortUUID(), 97 | }, 98 | watermill.NewStdLogger(true, false), 99 | ) 100 | require.NoError(t, err) 101 | messages, err := subscriber.Subscribe(context.Background(), topic) 102 | require.NoError(t, err) 103 | 104 | publisher, err := NewPublisher( 105 | PublisherConfig{ 106 | Client: redisClientOrFail(t), 107 | }, 108 | watermill.NewStdLogger(false, false), 109 | ) 110 | require.NoError(t, err) 111 | 112 | var sentMsgs message.Messages 113 | for i := 0; i < 50; i++ { 114 | msg := message.NewMessage(watermill.NewShortUUID(), nil) 115 | require.NoError(t, publisher.Publish(topic, msg)) 116 | sentMsgs = append(sentMsgs, msg) 117 | } 118 | 119 | var receivedMsgs message.Messages 120 | for i := 0; i < 50; i++ { 121 | msg := <-messages 122 | if msg == nil { 123 | t.Fatal("msg nil") 124 | } 125 | receivedMsgs = append(receivedMsgs, msg) 126 | msg.Ack() 127 | } 128 | tests.AssertAllMessagesReceived(t, sentMsgs, receivedMsgs) 129 | 130 | require.NoError(t, publisher.Close()) 131 | require.NoError(t, subscriber.Close()) 132 | } 133 | 134 | func TestFanOut(t *testing.T) { 135 | topic := watermill.NewShortUUID() 136 | 137 | subscriber1, err := NewSubscriber( 138 | SubscriberConfig{ 139 | Client: redisClientOrFail(t), 140 | Consumer: watermill.NewShortUUID(), 141 | ConsumerGroup: "", 142 | }, 143 | watermill.NewStdLogger(true, false), 144 | ) 145 | require.NoError(t, err) 146 | 147 | subscriber2, err := NewSubscriber( 148 | SubscriberConfig{ 149 | Client: redisClientOrFail(t), 150 | Consumer: watermill.NewShortUUID(), 151 | ConsumerGroup: "", 152 | }, 153 | watermill.NewStdLogger(true, false), 154 | ) 155 | require.NoError(t, err) 156 | 157 | publisher, err := NewPublisher( 158 | PublisherConfig{ 159 | Client: redisClientOrFail(t), 160 | }, 161 | watermill.NewStdLogger(false, false), 162 | ) 163 | require.NoError(t, err) 164 | for i := 0; i < 10; i++ { 165 | require.NoError(t, publisher.Publish(topic, message.NewMessage(watermill.NewShortUUID(), []byte("test"+strconv.Itoa(i))))) 166 | } 167 | 168 | messages1, err := subscriber1.Subscribe(context.Background(), topic) 169 | require.NoError(t, err) 170 | messages2, err := subscriber2.Subscribe(context.Background(), topic) 171 | require.NoError(t, err) 172 | 173 | // wait for initial XREAD before publishing messages to avoid message loss 174 | time.Sleep(2 * DefaultBlockTime) 175 | for i := 10; i < 50; i++ { 176 | require.NoError(t, publisher.Publish(topic, message.NewMessage(watermill.NewShortUUID(), []byte("test"+strconv.Itoa(i))))) 177 | } 178 | 179 | for i := 10; i < 50; i++ { 180 | msg := <-messages1 181 | if msg == nil { 182 | t.Fatal("msg nil") 183 | } 184 | t.Logf("subscriber 1: %v %v %v", msg.UUID, msg.Metadata, string(msg.Payload)) 185 | require.Equal(t, "test"+strconv.Itoa(i), string(msg.Payload)) 186 | msg.Ack() 187 | } 188 | for i := 10; i < 50; i++ { 189 | msg := <-messages2 190 | if msg == nil { 191 | t.Fatal("msg nil") 192 | } 193 | t.Logf("subscriber 2: %v %v %v", msg.UUID, msg.Metadata, string(msg.Payload)) 194 | require.Equal(t, "test"+strconv.Itoa(i), string(msg.Payload)) 195 | msg.Ack() 196 | } 197 | 198 | require.NoError(t, publisher.Close()) 199 | require.NoError(t, subscriber1.Close()) 200 | require.NoError(t, subscriber2.Close()) 201 | } 202 | 203 | func TestFanOutWithFullMessageReplay(t *testing.T) { 204 | topic := watermill.NewShortUUID() 205 | 206 | subscriber1, err := NewSubscriber( 207 | SubscriberConfig{ 208 | Client: redisClientOrFail(t), 209 | Consumer: watermill.NewShortUUID(), 210 | ConsumerGroup: "", 211 | FanOutOldestId: "0", 212 | }, 213 | watermill.NewStdLogger(true, false), 214 | ) 215 | require.NoError(t, err) 216 | 217 | subscriber2, err := NewSubscriber( 218 | SubscriberConfig{ 219 | Client: redisClientOrFail(t), 220 | Consumer: watermill.NewShortUUID(), 221 | ConsumerGroup: "", 222 | FanOutOldestId: "0", 223 | }, 224 | watermill.NewStdLogger(true, false), 225 | ) 226 | require.NoError(t, err) 227 | 228 | publisher, err := NewPublisher( 229 | PublisherConfig{ 230 | Client: redisClientOrFail(t), 231 | }, 232 | watermill.NewStdLogger(false, false), 233 | ) 234 | require.NoError(t, err) 235 | for i := 0; i < 10; i++ { 236 | require.NoError(t, publisher.Publish(topic, message.NewMessage(watermill.NewShortUUID(), []byte("test"+strconv.Itoa(i))))) 237 | } 238 | 239 | messages1, err := subscriber1.Subscribe(context.Background(), topic) 240 | require.NoError(t, err) 241 | messages2, err := subscriber2.Subscribe(context.Background(), topic) 242 | require.NoError(t, err) 243 | 244 | // wait for initial XREAD before publishing messages to avoid message loss 245 | time.Sleep(2 * DefaultBlockTime) 246 | for i := 10; i < 50; i++ { 247 | require.NoError(t, publisher.Publish(topic, message.NewMessage(watermill.NewShortUUID(), []byte("test"+strconv.Itoa(i))))) 248 | } 249 | 250 | for i := 0; i < 50; i++ { 251 | msg := <-messages1 252 | require.NotNil(t, msg) 253 | 254 | t.Logf("subscriber 1: %v %v %v", msg.UUID, msg.Metadata, string(msg.Payload)) 255 | require.Equal(t, "test"+strconv.Itoa(i), string(msg.Payload)) 256 | msg.Ack() 257 | } 258 | for i := 0; i < 50; i++ { 259 | msg := <-messages2 260 | require.NotNil(t, msg) 261 | 262 | t.Logf("subscriber 2: %v %v %v", msg.UUID, msg.Metadata, string(msg.Payload)) 263 | require.Equal(t, "test"+strconv.Itoa(i), string(msg.Payload)) 264 | msg.Ack() 265 | } 266 | 267 | require.NoError(t, publisher.Close()) 268 | require.NoError(t, subscriber1.Close()) 269 | require.NoError(t, subscriber2.Close()) 270 | } 271 | 272 | func TestClaimIdle(t *testing.T) { 273 | // should be long enough to be robust even for CI boxes 274 | testInterval := 250 * time.Millisecond 275 | 276 | topic := watermill.NewShortUUID() 277 | consumerGroup := watermill.NewShortUUID() 278 | testLogger := watermill.NewStdLogger(true, false) 279 | 280 | router, err := message.NewRouter(message.RouterConfig{ 281 | CloseTimeout: testInterval, 282 | }, testLogger) 283 | require.NoError(t, err) 284 | 285 | type messageWithMeta struct { 286 | msgID int 287 | subscriberID int 288 | } 289 | 290 | receivedCh := make(chan *messageWithMeta) 291 | 292 | // let's start a few subscribers; each will wait between 3 and 5 intervals every time 293 | // it receives a message 294 | nSubscribers := 20 295 | seen := make(map[string]map[string]bool) 296 | var seenLock sync.Mutex 297 | for subscriberID := 0; subscriberID < nSubscribers; subscriberID++ { 298 | // need to assign to a variable local to the loop because of how golang 299 | // handles loop variables in function literals 300 | subID := subscriberID 301 | 302 | suscriber, err := NewSubscriber( 303 | SubscriberConfig{ 304 | Client: redisClientOrFail(t), 305 | Consumer: strconv.Itoa(subID), 306 | ConsumerGroup: consumerGroup, 307 | ClaimInterval: testInterval, 308 | MaxIdleTime: 2 * testInterval, 309 | // we're only going to claim messages for consumers with odd IDs 310 | ShouldClaimPendingMessage: func(ext redis.XPendingExt) bool { 311 | idleConsumerID, err := strconv.Atoi(ext.Consumer) 312 | require.NoError(t, err) 313 | 314 | if idleConsumerID%2 == 0 { 315 | return false 316 | } 317 | 318 | seenLock.Lock() 319 | defer seenLock.Unlock() 320 | 321 | if seen[ext.ID] == nil { 322 | seen[ext.ID] = make(map[string]bool) 323 | } 324 | if seen[ext.ID][ext.Consumer] { 325 | return false 326 | } 327 | seen[ext.ID][ext.Consumer] = true 328 | return true 329 | }, 330 | }, 331 | testLogger, 332 | ) 333 | require.NoError(t, err) 334 | 335 | router.AddNoPublisherHandler( 336 | strconv.Itoa(subID), 337 | topic, 338 | suscriber, 339 | func(msg *message.Message) error { 340 | msgID, err := strconv.Atoi(string(msg.Payload)) 341 | require.NoError(t, err) 342 | 343 | receivedCh <- &messageWithMeta{ 344 | msgID: msgID, 345 | subscriberID: subID, 346 | } 347 | sleepInterval := (3 + 2*rand.Float64()) * float64(testInterval) 348 | time.Sleep(time.Duration(sleepInterval)) 349 | 350 | return nil 351 | }, 352 | ) 353 | } 354 | 355 | runCtx, cancel := context.WithCancel(context.Background()) 356 | var wg sync.WaitGroup 357 | wg.Add(1) 358 | go func() { 359 | defer wg.Done() 360 | require.NoError(t, router.Run(runCtx)) 361 | }() 362 | 363 | // now let's push a few messages 364 | publisher, err := NewPublisher( 365 | PublisherConfig{ 366 | Client: redisClientOrFail(t), 367 | }, 368 | testLogger, 369 | ) 370 | require.NoError(t, err) 371 | 372 | nMessages := 100 373 | for msgID := 0; msgID < nMessages; msgID++ { 374 | msg := message.NewMessage(watermill.NewShortUUID(), []byte(strconv.Itoa(msgID))) 375 | require.NoError(t, publisher.Publish(topic, msg)) 376 | } 377 | 378 | // now let's wait to receive them 379 | receivedByID := make(map[int][]*messageWithMeta) 380 | for len(receivedByID) != nMessages { 381 | select { 382 | case msg := <-receivedCh: 383 | receivedByID[msg.msgID] = append(receivedByID[msg.msgID], msg) 384 | case <-time.After(8 * testInterval): 385 | t.Fatalf("timed out waiting for new messages, only received %d unique messages", len(receivedByID)) 386 | } 387 | } 388 | 389 | // shut down the router and the subscribers 390 | cancel() 391 | wg.Wait() 392 | 393 | // now let's look at what we've received: 394 | // * at least some messages should have been retried 395 | // * for retried messages, there should be at most one consumer with an even ID 396 | nMsgsWithRetries := 0 397 | for _, withSameID := range receivedByID { 398 | require.Greater(t, len(withSameID), 0) 399 | if len(withSameID) == 1 { 400 | // this message was not retried at all 401 | continue 402 | } 403 | 404 | nMsgsWithRetries++ 405 | 406 | nEvenConsumers := 0 407 | for _, msg := range withSameID { 408 | if msg.subscriberID%2 == 0 { 409 | nEvenConsumers++ 410 | } 411 | } 412 | assert.LessOrEqual(t, nEvenConsumers, 1) 413 | } 414 | 415 | assert.GreaterOrEqual(t, nMsgsWithRetries, 3) 416 | } 417 | 418 | func TestSubscriber_ClaimAllMessages(t *testing.T) { 419 | rdb := redisClientOrFail(t) 420 | 421 | logger := watermill.NewStdLogger(true, true) 422 | 423 | topic := watermill.NewShortUUID() 424 | consumerGroup := watermill.NewShortUUID() 425 | 426 | // This one should claim all messages 427 | subGood, err := NewSubscriber(SubscriberConfig{ 428 | Client: rdb, 429 | ConsumerGroup: consumerGroup, 430 | Consumer: "good", 431 | MaxIdleTime: 500 * time.Millisecond, 432 | ClaimInterval: 500 * time.Millisecond, 433 | CheckConsumersInterval: 1 * time.Second, 434 | ConsumerTimeout: 2 * time.Second, 435 | }, logger) 436 | require.NoError(t, err) 437 | 438 | // This one never acks 439 | subBad, err := NewSubscriber(SubscriberConfig{ 440 | Client: rdb, 441 | ConsumerGroup: consumerGroup, 442 | Consumer: "bad", 443 | }, logger) 444 | require.NoError(t, err) 445 | 446 | pub, err := NewPublisher(PublisherConfig{ 447 | Client: rdb, 448 | }, logger) 449 | require.NoError(t, err) 450 | 451 | for i := 0; i < 10; i++ { 452 | err = pub.Publish(topic, message.NewMessage(watermill.NewUUID(), []byte(strconv.Itoa(i)))) 453 | assert.NoError(t, err) 454 | } 455 | 456 | badCtx, badCancel := context.WithCancel(context.Background()) 457 | defer badCancel() 458 | 459 | msgs, err := subBad.Subscribe(badCtx, topic) 460 | require.NoError(t, err) 461 | 462 | // Pull a message, don't ack it! 463 | <-msgs 464 | 465 | // Cancel the bad subscriber 466 | badCancel() 467 | 468 | goodCtx, goodCancel := context.WithCancel(context.Background()) 469 | defer goodCancel() 470 | 471 | msgs, err = subGood.Subscribe(goodCtx, topic) 472 | require.NoError(t, err) 473 | 474 | var processedMessages []string 475 | 476 | // Try to receive all messages 477 | for i := 0; i < 10; i++ { 478 | select { 479 | case msg, ok := <-msgs: 480 | assert.True(t, ok) 481 | processedMessages = append(processedMessages, string(msg.Payload)) 482 | msg.Ack() 483 | case <-time.After(5 * time.Second): 484 | t.Fatal("Timeout waiting to receive all messages") 485 | } 486 | } 487 | 488 | sort.Strings(processedMessages) 489 | var expected []string 490 | for i := 0; i < 10; i++ { 491 | expected = append(expected, strconv.Itoa(i)) 492 | } 493 | assert.Equal(t, expected, processedMessages) 494 | 495 | assert.Eventually(t, func() bool { 496 | xic, _ := rdb.XInfoConsumers(context.Background(), topic, consumerGroup).Result() 497 | return len(xic) == 1 && xic[0].Name == "good" 498 | }, 5*time.Second, 100*time.Millisecond, "Idle consumer should be deleted") 499 | } 500 | 501 | type threadSafeBuffer struct { 502 | b bytes.Buffer 503 | m sync.Mutex 504 | } 505 | 506 | func (b *threadSafeBuffer) Write(p []byte) (n int, err error) { 507 | b.m.Lock() 508 | defer b.m.Unlock() 509 | return b.b.Write(p) 510 | } 511 | func (b *threadSafeBuffer) String() string { 512 | b.m.Lock() 513 | defer b.m.Unlock() 514 | return b.b.String() 515 | } 516 | 517 | func TestSubscriber_Read(t *testing.T) { 518 | 519 | t.Run("Without ShouldStopOnReadErrors", func(t *testing.T) { 520 | t.Parallel() 521 | rdb := redisClientOrFail(t) 522 | var buf threadSafeBuffer 523 | logger := watermill.NewStdLoggerWithOut(&buf, false, false) 524 | topic := watermill.NewShortUUID() 525 | consumerGroup := watermill.NewShortUUID() 526 | consumer := watermill.NewShortUUID() 527 | 528 | // This one should claim all messages 529 | sub, err := NewSubscriber(SubscriberConfig{ 530 | Client: rdb, 531 | ConsumerGroup: consumerGroup, 532 | Consumer: consumer, 533 | MaxIdleTime: 500 * time.Millisecond, 534 | ClaimInterval: 500 * time.Millisecond, 535 | CheckConsumersInterval: 1 * time.Second, 536 | ConsumerTimeout: 2 * time.Second, 537 | }, logger) 538 | require.NoError(t, err) 539 | 540 | ctx, cancel := context.WithCancel(context.Background()) 541 | defer cancel() 542 | 543 | msg, err := sub.Subscribe(ctx, topic) 544 | require.NoError(t, err) 545 | go func() { 546 | for { 547 | select { 548 | case <-msg: 549 | case <-ctx.Done(): 550 | return 551 | } 552 | } 553 | }() 554 | require.NotContains(t, buf.String(), "read fail") 555 | require.NotContains(t, buf.String(), "NOGROUP No such key '"+topic+"'") 556 | time.Sleep(30 * time.Millisecond) 557 | rdb.Del(ctx, topic) 558 | err = rdb.XAdd(context.Background(), &redis.XAddArgs{ 559 | Stream: topic, 560 | Values: map[string]any{ 561 | "test": "test", 562 | }, 563 | Approx: true, 564 | }).Err() 565 | require.NoError(t, err) 566 | time.Sleep(600 * time.Millisecond) 567 | require.Contains(t, buf.String(), "read fail") 568 | require.Contains(t, buf.String(), "NOGROUP No such key '"+topic+"'") 569 | require.NotContains(t, buf.String(), "stop reading after error") 570 | require.NoError(t, sub.Close()) 571 | }) 572 | 573 | t.Run("With ShouldStopOnReadErrors", func(t *testing.T) { 574 | t.Parallel() 575 | rdb := redisClientOrFail(t) 576 | var buf threadSafeBuffer 577 | logger := watermill.NewStdLoggerWithOut(&buf, false, false) 578 | topic := watermill.NewShortUUID() 579 | consumerGroup := watermill.NewShortUUID() 580 | consumer := watermill.NewShortUUID() 581 | 582 | sub, err := NewSubscriber(SubscriberConfig{ 583 | Client: rdb, 584 | ConsumerGroup: consumerGroup, 585 | Consumer: consumer, 586 | MaxIdleTime: 500 * time.Millisecond, 587 | ClaimInterval: 500 * time.Millisecond, 588 | CheckConsumersInterval: 1 * time.Second, 589 | ConsumerTimeout: 2 * time.Second, 590 | ShouldStopOnReadErrors: func(err error) bool { 591 | return err != nil && redis.HasErrorPrefix(err, "NOGROUP") 592 | }, 593 | }, logger) 594 | require.NoError(t, err) 595 | 596 | ctx, cancel := context.WithCancel(context.Background()) 597 | defer cancel() 598 | 599 | msg, err := sub.Subscribe(ctx, topic) 600 | require.NoError(t, err) 601 | done := make(chan bool) 602 | go func() { 603 | for { 604 | select { 605 | case _, ok := <-msg: 606 | if !ok { 607 | done <- true 608 | } 609 | case <-ctx.Done(): 610 | return 611 | } 612 | } 613 | }() 614 | require.NotContains(t, buf.String(), "read fail") 615 | require.NotContains(t, buf.String(), "NOGROUP No such key '"+topic+"'") 616 | time.Sleep(30 * time.Millisecond) 617 | rdb.Del(ctx, topic) 618 | err = rdb.XAdd(context.Background(), &redis.XAddArgs{ 619 | Stream: topic, 620 | Values: map[string]any{ 621 | "test": "test", 622 | }, 623 | Approx: true, 624 | }).Err() 625 | require.NoError(t, err) 626 | time.Sleep(600 * time.Millisecond) 627 | require.True(t, <-done, true) 628 | require.Contains(t, buf.String(), "NOGROUP No such key '"+topic+"'") 629 | require.Contains(t, buf.String(), "stop reading after error") 630 | require.NoError(t, sub.Close()) 631 | }) 632 | 633 | } 634 | -------------------------------------------------------------------------------- /pkg/redisstream/subscriber.go: -------------------------------------------------------------------------------- 1 | package redisstream 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | "time" 7 | 8 | "github.com/Rican7/retry" 9 | "github.com/ThreeDotsLabs/watermill" 10 | "github.com/ThreeDotsLabs/watermill/message" 11 | "github.com/pkg/errors" 12 | "github.com/redis/go-redis/v9" 13 | ) 14 | 15 | const ( 16 | groupStartid = ">" 17 | redisBusyGroup = "BUSYGROUP Consumer Group name already exists" 18 | ) 19 | 20 | const ( 21 | // NoSleep can be set to SubscriberConfig.NackResendSleep 22 | NoSleep time.Duration = -1 23 | 24 | DefaultBlockTime = time.Millisecond * 100 25 | 26 | DefaultClaimInterval = time.Second * 5 27 | 28 | DefaultClaimBatchSize = int64(100) 29 | 30 | DefaultMaxIdleTime = time.Second * 60 31 | 32 | DefaultCheckConsumersInterval = time.Second * 300 33 | DefaultConsumerTimeout = time.Second * 600 34 | ) 35 | 36 | type Subscriber struct { 37 | config SubscriberConfig 38 | client redis.UniversalClient 39 | logger watermill.LoggerAdapter 40 | closing chan struct{} 41 | subscribersWg sync.WaitGroup 42 | 43 | closed bool 44 | closeMutex sync.Mutex 45 | } 46 | 47 | // NewSubscriber creates a new redis stream Subscriber. 48 | func NewSubscriber(config SubscriberConfig, logger watermill.LoggerAdapter) (*Subscriber, error) { 49 | config.setDefaults() 50 | 51 | if err := config.Validate(); err != nil { 52 | return nil, err 53 | } 54 | 55 | if logger == nil { 56 | logger = &watermill.NopLogger{} 57 | } 58 | 59 | return &Subscriber{ 60 | config: config, 61 | client: config.Client, 62 | logger: logger, 63 | closing: make(chan struct{}), 64 | }, nil 65 | } 66 | 67 | type SubscriberConfig struct { 68 | Client redis.UniversalClient 69 | 70 | Unmarshaller Unmarshaller 71 | 72 | // Redis stream consumer id, paired with ConsumerGroup. 73 | Consumer string 74 | // When empty, fan-out mode will be used. 75 | ConsumerGroup string 76 | 77 | // How long after Nack message should be redelivered. 78 | NackResendSleep time.Duration 79 | 80 | // Block to wait next redis stream message. 81 | BlockTime time.Duration 82 | 83 | // Claim idle pending message interval. 84 | ClaimInterval time.Duration 85 | 86 | // How many pending messages are claimed at most each claim interval. 87 | ClaimBatchSize int64 88 | 89 | // How long should we treat a pending message as claimable. 90 | MaxIdleTime time.Duration 91 | 92 | // Check consumer status interval. 93 | CheckConsumersInterval time.Duration 94 | 95 | // After this timeout an idle consumer with no pending messages will be removed from the consumer group. 96 | ConsumerTimeout time.Duration 97 | 98 | // Start consumption from the specified message ID. 99 | // When using "0", the consumer group will consume from the very first message. 100 | // When using "$", the consumer group will consume from the latest message. 101 | OldestId string 102 | 103 | // If consumer group in not set, for fanout start consumption from the specified message ID. 104 | // When using "0", the consumer will consume from the very first message. 105 | // When using "$", the consumer will consume from the latest message. 106 | FanOutOldestId string 107 | 108 | // If this is set, it will be called to decide whether a pending message that 109 | // has been idle for more than MaxIdleTime should actually be claimed. 110 | // If this is not set, then all pending messages that have been idle for more than MaxIdleTime will be claimed. 111 | // This can be useful e.g. for tasks where the processing time can be very variable - 112 | // so we can't just use a short MaxIdleTime; but at the same time dead 113 | // consumers should be spotted quickly - so we can't just use a long MaxIdleTime either. 114 | // In such cases, if we have another way for checking consumers' health, then we can 115 | // leverage that in this callback. 116 | ShouldClaimPendingMessage func(redis.XPendingExt) bool 117 | 118 | // If this is set, it will be called to decide whether a reading error 119 | // should return the read method and close the subscriber or just log the error 120 | // and continue. 121 | ShouldStopOnReadErrors func(error) bool 122 | } 123 | 124 | func (sc *SubscriberConfig) setDefaults() { 125 | if sc.Unmarshaller == nil { 126 | sc.Unmarshaller = DefaultMarshallerUnmarshaller{} 127 | } 128 | if sc.Consumer == "" { 129 | sc.Consumer = watermill.NewShortUUID() 130 | } 131 | if sc.NackResendSleep == 0 { 132 | sc.NackResendSleep = NoSleep 133 | } 134 | if sc.BlockTime == 0 { 135 | sc.BlockTime = DefaultBlockTime 136 | } 137 | if sc.ClaimInterval == 0 { 138 | sc.ClaimInterval = DefaultClaimInterval 139 | } 140 | if sc.ClaimBatchSize == 0 { 141 | sc.ClaimBatchSize = DefaultClaimBatchSize 142 | } 143 | if sc.MaxIdleTime == 0 { 144 | sc.MaxIdleTime = DefaultMaxIdleTime 145 | } 146 | if sc.CheckConsumersInterval == 0 { 147 | sc.CheckConsumersInterval = DefaultCheckConsumersInterval 148 | } 149 | if sc.ConsumerTimeout == 0 { 150 | sc.ConsumerTimeout = DefaultConsumerTimeout 151 | } 152 | // Consume from scratch by default 153 | if sc.OldestId == "" { 154 | sc.OldestId = "0" 155 | } 156 | 157 | if sc.FanOutOldestId == "" { 158 | sc.FanOutOldestId = "$" 159 | } 160 | } 161 | 162 | func (sc *SubscriberConfig) Validate() error { 163 | if sc.Client == nil { 164 | return errors.New("redis client is empty") 165 | } 166 | return nil 167 | } 168 | 169 | func (s *Subscriber) Subscribe(ctx context.Context, topic string) (<-chan *message.Message, error) { 170 | if s.closed { 171 | return nil, errors.New("subscriber closed") 172 | } 173 | 174 | s.subscribersWg.Add(1) 175 | 176 | logFields := watermill.LogFields{ 177 | "provider": "redis", 178 | "topic": topic, 179 | "consumer_group": s.config.ConsumerGroup, 180 | "consumer_uuid": s.config.Consumer, 181 | } 182 | s.logger.Info("Subscribing to redis stream topic", logFields) 183 | 184 | // we don't want to have buffered channel to not consume messsage from redis stream when consumer is not consuming 185 | output := make(chan *message.Message) 186 | 187 | consumeClosed, err := s.consumeMessages(ctx, topic, output, logFields) 188 | if err != nil { 189 | s.subscribersWg.Done() 190 | return nil, err 191 | } 192 | 193 | go func() { 194 | <-consumeClosed 195 | close(output) 196 | s.subscribersWg.Done() 197 | }() 198 | 199 | return output, nil 200 | } 201 | 202 | func (s *Subscriber) consumeMessages(ctx context.Context, topic string, output chan *message.Message, logFields watermill.LogFields) (consumeMessageClosed chan struct{}, err error) { 203 | s.logger.Info("Starting consuming", logFields) 204 | 205 | ctx, cancel := context.WithCancel(ctx) 206 | go func() { 207 | select { 208 | case <-s.closing: 209 | s.logger.Debug("Closing subscriber, cancelling consumeMessages", logFields) 210 | cancel() 211 | case <-ctx.Done(): 212 | // avoid goroutine leak 213 | } 214 | }() 215 | if s.config.ConsumerGroup != "" { 216 | // create consumer group 217 | if _, err := s.client.XGroupCreateMkStream(ctx, topic, s.config.ConsumerGroup, s.config.OldestId).Result(); err != nil && err.Error() != redisBusyGroup { 218 | return nil, err 219 | } 220 | } 221 | 222 | consumeMessageClosed, err = s.consumeStreams(ctx, topic, output, logFields) 223 | if err != nil { 224 | s.logger.Debug( 225 | "Starting consume failed, cancelling context", 226 | logFields.Add(watermill.LogFields{"err": err}), 227 | ) 228 | cancel() 229 | return nil, err 230 | } 231 | 232 | return consumeMessageClosed, nil 233 | } 234 | 235 | func (s *Subscriber) consumeStreams(ctx context.Context, stream string, output chan *message.Message, logFields watermill.LogFields) (chan struct{}, error) { 236 | messageHandler := s.createMessageHandler(output) 237 | consumeMessageClosed := make(chan struct{}) 238 | 239 | go func() { 240 | defer close(consumeMessageClosed) 241 | 242 | readChannel := make(chan *redis.XStream, 1) 243 | go s.read(ctx, stream, readChannel, logFields) 244 | 245 | for { 246 | select { 247 | case xs := <-readChannel: 248 | if xs == nil { 249 | s.logger.Debug("readStreamChannel is closed, stopping readStream", logFields) 250 | return 251 | } 252 | if err := messageHandler.processMessage(ctx, xs.Stream, &xs.Messages[0], logFields); err != nil { 253 | s.logger.Error("processMessage fail", err, logFields) 254 | return 255 | } 256 | case <-s.closing: 257 | s.logger.Debug("Subscriber is closing, stopping readStream", logFields) 258 | return 259 | case <-ctx.Done(): 260 | s.logger.Debug("Ctx was cancelled, stopping readStream", logFields) 261 | return 262 | } 263 | } 264 | }() 265 | 266 | return consumeMessageClosed, nil 267 | } 268 | 269 | func (s *Subscriber) read(ctx context.Context, stream string, readChannel chan<- *redis.XStream, logFields watermill.LogFields) { 270 | wg := &sync.WaitGroup{} 271 | subCtx, subCancel := context.WithCancel(ctx) 272 | defer func() { 273 | subCancel() 274 | wg.Wait() 275 | close(readChannel) 276 | }() 277 | var ( 278 | streamsGroup = []string{stream, groupStartid} 279 | 280 | fanOutStartid = s.config.FanOutOldestId 281 | countFanOut int64 = 0 282 | blockTime time.Duration = 0 283 | 284 | xss []redis.XStream 285 | xs *redis.XStream 286 | err error 287 | ) 288 | 289 | if s.config.ConsumerGroup != "" { 290 | // 1. get pending message from idle consumer 291 | wg.Add(1) 292 | s.claim(subCtx, stream, readChannel, false, wg, logFields) 293 | 294 | // 2. background 295 | wg.Add(1) 296 | go s.claim(subCtx, stream, readChannel, true, wg, logFields) 297 | 298 | // check consumer status and remove idling consumers if possible 299 | wg.Add(1) 300 | go s.checkConsumers(subCtx, stream, wg, logFields) 301 | } 302 | 303 | for { 304 | select { 305 | case <-s.closing: 306 | return 307 | case <-ctx.Done(): 308 | return 309 | default: 310 | if s.config.ConsumerGroup != "" { 311 | xss, err = s.client.XReadGroup( 312 | ctx, 313 | &redis.XReadGroupArgs{ 314 | Group: s.config.ConsumerGroup, 315 | Consumer: s.config.Consumer, 316 | Streams: streamsGroup, 317 | Count: 1, 318 | Block: blockTime, 319 | }).Result() 320 | } else { 321 | xss, err = s.client.XRead( 322 | ctx, 323 | &redis.XReadArgs{ 324 | Streams: []string{stream, fanOutStartid}, 325 | Count: countFanOut, 326 | Block: blockTime, 327 | }).Result() 328 | } 329 | if err == redis.Nil { 330 | continue 331 | } else if err != nil { 332 | if s.config.ShouldStopOnReadErrors != nil { 333 | if s.config.ShouldStopOnReadErrors(err) { 334 | s.logger.Error("stop reading after error", err, logFields) 335 | return 336 | } 337 | } 338 | // prevent excessive output from abnormal connections 339 | time.Sleep(500 * time.Millisecond) 340 | s.logger.Error("read fail", err, logFields) 341 | } 342 | if len(xss) < 1 || len(xss[0].Messages) < 1 { 343 | continue 344 | } 345 | // update last delivered message 346 | xs = &xss[0] 347 | if s.config.ConsumerGroup == "" { 348 | fanOutStartid = xs.Messages[0].ID 349 | countFanOut = 1 350 | } 351 | 352 | blockTime = s.config.BlockTime 353 | 354 | select { 355 | case <-s.closing: 356 | return 357 | case <-ctx.Done(): 358 | return 359 | case readChannel <- xs: 360 | } 361 | } 362 | } 363 | } 364 | 365 | func (s *Subscriber) claim(ctx context.Context, stream string, readChannel chan<- *redis.XStream, keep bool, wg *sync.WaitGroup, logFields watermill.LogFields) { 366 | var ( 367 | xps []redis.XPendingExt 368 | err error 369 | xp redis.XPendingExt 370 | xm []redis.XMessage 371 | tick = time.NewTicker(s.config.ClaimInterval) 372 | initCh = make(chan byte, 1) 373 | ) 374 | defer func() { 375 | tick.Stop() 376 | close(initCh) 377 | wg.Done() 378 | }() 379 | if !keep { // if not keep, run immediately 380 | initCh <- 1 381 | } 382 | 383 | OUTER_LOOP: 384 | for { 385 | select { 386 | case <-s.closing: 387 | return 388 | case <-ctx.Done(): 389 | return 390 | case <-tick.C: 391 | case <-initCh: 392 | } 393 | 394 | xps, err = s.client.XPendingExt(ctx, &redis.XPendingExtArgs{ 395 | Stream: stream, 396 | Group: s.config.ConsumerGroup, 397 | Idle: s.config.MaxIdleTime, 398 | Start: "0", 399 | End: "+", 400 | Count: s.config.ClaimBatchSize, 401 | }).Result() 402 | if err != nil { 403 | s.logger.Error( 404 | "xpendingext fail", 405 | err, 406 | logFields, 407 | ) 408 | continue 409 | } 410 | for _, xp = range xps { 411 | shouldClaim := xp.Idle >= s.config.MaxIdleTime 412 | if shouldClaim && s.config.ShouldClaimPendingMessage != nil { 413 | shouldClaim = s.config.ShouldClaimPendingMessage(xp) 414 | } 415 | 416 | if shouldClaim { 417 | // assign the ownership of a pending message to the current consumer 418 | xm, err = s.client.XClaim(ctx, &redis.XClaimArgs{ 419 | Stream: stream, 420 | Group: s.config.ConsumerGroup, 421 | Consumer: s.config.Consumer, 422 | // this is important: it ensures that 2 concurrent subscribers 423 | // won't claim the same pending message at the same time 424 | MinIdle: s.config.MaxIdleTime, 425 | Messages: []string{xp.ID}, 426 | }).Result() 427 | if err == redis.Nil { 428 | // Any messages that are nil, should be xacked and skipped 429 | s.client.XAck(ctx, stream, s.config.ConsumerGroup, xp.ID) 430 | continue 431 | } else if err != nil { 432 | s.logger.Error( 433 | "xclaim fail", 434 | err, 435 | logFields.Add(watermill.LogFields{"xp": xp}), 436 | ) 437 | continue OUTER_LOOP 438 | } 439 | if len(xm) > 0 { 440 | select { 441 | case <-s.closing: 442 | return 443 | case <-ctx.Done(): 444 | return 445 | case readChannel <- &redis.XStream{Stream: stream, Messages: xm}: 446 | } 447 | } 448 | } 449 | } 450 | if len(xps) == 0 || int64(len(xps)) < s.config.ClaimBatchSize { // done 451 | if !keep { 452 | return 453 | } 454 | continue 455 | } 456 | } 457 | } 458 | 459 | func (s *Subscriber) checkConsumers(ctx context.Context, stream string, wg *sync.WaitGroup, logFields watermill.LogFields) { 460 | tick := time.NewTicker(s.config.CheckConsumersInterval) 461 | defer func() { 462 | tick.Stop() 463 | wg.Done() 464 | }() 465 | 466 | for { 467 | select { 468 | case <-s.closing: 469 | return 470 | case <-ctx.Done(): 471 | return 472 | case <-tick.C: 473 | } 474 | xics, err := s.client.XInfoConsumers(ctx, stream, s.config.ConsumerGroup).Result() 475 | if err != nil { 476 | s.logger.Error( 477 | "xinfoconsumers failed", 478 | err, 479 | logFields, 480 | ) 481 | } 482 | for _, xic := range xics { 483 | if xic.Idle < s.config.ConsumerTimeout { 484 | continue 485 | } 486 | if xic.Pending == 0 { 487 | if err = s.client.XGroupDelConsumer(ctx, stream, s.config.ConsumerGroup, xic.Name).Err(); err != nil { 488 | s.logger.Error( 489 | "xgroupdelconsumer failed", 490 | err, 491 | logFields, 492 | ) 493 | } 494 | } 495 | } 496 | } 497 | } 498 | 499 | func (s *Subscriber) createMessageHandler(output chan *message.Message) messageHandler { 500 | return messageHandler{ 501 | outputChannel: output, 502 | rc: s.client, 503 | consumerGroup: s.config.ConsumerGroup, 504 | unmarshaller: s.config.Unmarshaller, 505 | nackResendSleep: s.config.NackResendSleep, 506 | logger: s.logger, 507 | closing: s.closing, 508 | } 509 | } 510 | 511 | func (s *Subscriber) Close() error { 512 | s.closeMutex.Lock() 513 | defer s.closeMutex.Unlock() 514 | 515 | if s.closed { 516 | return nil 517 | } 518 | 519 | s.closed = true 520 | close(s.closing) 521 | s.subscribersWg.Wait() 522 | 523 | if err := s.client.Close(); err != nil { 524 | return err 525 | } 526 | 527 | s.logger.Debug("Redis stream subscriber closed", nil) 528 | 529 | return nil 530 | } 531 | 532 | type messageHandler struct { 533 | outputChannel chan<- *message.Message 534 | rc redis.UniversalClient 535 | consumerGroup string 536 | unmarshaller Unmarshaller 537 | 538 | nackResendSleep time.Duration 539 | 540 | logger watermill.LoggerAdapter 541 | closing chan struct{} 542 | } 543 | 544 | func (h *messageHandler) processMessage(ctx context.Context, stream string, xm *redis.XMessage, messageLogFields watermill.LogFields) error { 545 | receivedMsgLogFields := messageLogFields.Add(watermill.LogFields{ 546 | "xid": xm.ID, 547 | }) 548 | 549 | h.logger.Trace("Received message from redis stream", receivedMsgLogFields) 550 | 551 | msg, err := h.unmarshaller.Unmarshal(xm.Values) 552 | if err != nil { 553 | return errors.Wrapf(err, "message unmarshal failed") 554 | } 555 | 556 | ctx, cancelCtx := context.WithCancel(ctx) 557 | msg.SetContext(ctx) 558 | defer cancelCtx() 559 | 560 | receivedMsgLogFields = receivedMsgLogFields.Add(watermill.LogFields{ 561 | "message_uuid": msg.UUID, 562 | "stream": stream, 563 | "xid": xm.ID, 564 | }) 565 | 566 | ResendLoop: 567 | for { 568 | select { 569 | case h.outputChannel <- msg: 570 | h.logger.Trace("Message sent to consumer", receivedMsgLogFields) 571 | case <-h.closing: 572 | h.logger.Trace("Closing, message discarded", receivedMsgLogFields) 573 | return nil 574 | case <-ctx.Done(): 575 | h.logger.Trace("Closing, ctx cancelled before sent to consumer", receivedMsgLogFields) 576 | return nil 577 | } 578 | 579 | select { 580 | case <-msg.Acked(): 581 | if h.consumerGroup != "" { 582 | // deadly retry ack 583 | err := retry.Retry(func(attempt uint) error { 584 | err := h.rc.XAck(ctx, stream, h.consumerGroup, xm.ID).Err() 585 | return err 586 | }, func(attempt uint) bool { 587 | if attempt != 0 { 588 | time.Sleep(time.Millisecond * 100) 589 | } 590 | return true 591 | }, func(attempt uint) bool { 592 | select { 593 | case <-h.closing: 594 | case <-ctx.Done(): 595 | default: 596 | return true 597 | } 598 | return false 599 | }) 600 | if err != nil { 601 | h.logger.Error("Message Acked fail", err, receivedMsgLogFields) 602 | } 603 | } 604 | h.logger.Trace("Message Acked", receivedMsgLogFields) 605 | break ResendLoop 606 | case <-msg.Nacked(): 607 | h.logger.Trace("Message Nacked", receivedMsgLogFields) 608 | 609 | // reset acks, etc. 610 | msg = msg.Copy() 611 | if h.nackResendSleep != NoSleep { 612 | time.Sleep(h.nackResendSleep) 613 | } 614 | 615 | continue ResendLoop 616 | case <-h.closing: 617 | h.logger.Trace("Closing, message discarded before ack", receivedMsgLogFields) 618 | return nil 619 | case <-ctx.Done(): 620 | h.logger.Trace("Closing, ctx cancelled before ack", receivedMsgLogFields) 621 | return nil 622 | } 623 | } 624 | 625 | return nil 626 | } 627 | --------------------------------------------------------------------------------