├── .gitconfig ├── .github └── workflows │ └── main.yml ├── .gitignore ├── .goreleaser.yml ├── Dockerfile ├── Dockerfile-alpine ├── LICENSE.txt ├── Makefile ├── Readme.md ├── acceptance-tests ├── acceptance.bats ├── entrypoint.sh ├── gen-test-certs.sh ├── redis-confs │ ├── users.acl │ ├── with_password.conf │ └── with_username_and_password.conf └── tests │ ├── 1mkeys.sh │ ├── large-zset.sh │ ├── multiple-dbs.sh │ ├── select-db-with-password.sh │ ├── select-db-with-username-password.sh │ └── select-db.sh ├── bin └── .gitignore ├── docker-compose.yml ├── go.mod ├── go.sum ├── main.go ├── pkg ├── config │ ├── config.go │ └── config_test.go └── redisdump │ ├── redisdump.go │ ├── redisdump_test.go │ └── tlsutils.go ├── utils └── generator │ └── main.go └── vendor ├── github.com └── mediocregopher │ └── radix │ └── v3 │ ├── .gitignore │ ├── CHANGELOG.md │ ├── CONTRIBUTING.md │ ├── LICENSE.txt │ ├── README.md │ ├── action.go │ ├── cluster.go │ ├── cluster_crc16.go │ ├── cluster_scanner.go │ ├── cluster_topo.go │ ├── conn.go │ ├── internal │ └── bytesutil │ │ └── bytesutil.go │ ├── pipeliner.go │ ├── pool.go │ ├── pubsub.go │ ├── pubsub_persistent.go │ ├── pubsub_stub.go │ ├── radix.go │ ├── resp │ ├── resp.go │ ├── resp2 │ │ └── resp.go │ └── util.go │ ├── scanner.go │ ├── sentinel.go │ ├── stream.go │ ├── stub.go │ ├── timer.go │ └── trace │ ├── cluster.go │ └── pool.go ├── golang.org └── x │ └── xerrors │ ├── LICENSE │ ├── PATENTS │ ├── README │ ├── adaptor.go │ ├── codereview.cfg │ ├── doc.go │ ├── errors.go │ ├── fmt.go │ ├── format.go │ ├── frame.go │ ├── internal │ └── internal.go │ └── wrap.go └── modules.txt /.gitconfig: -------------------------------------------------------------------------------- 1 | [safe] 2 | directory = /go/src/github.com/yannh/redis-dump-go 3 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: ci 2 | on: push 3 | 4 | jobs: 5 | test: 6 | runs-on: ubuntu-latest 7 | steps: 8 | - name: checkout 9 | uses: actions/checkout@v2 10 | 11 | - name: test 12 | run: make docker-test 13 | 14 | - name: build 15 | run: make goreleaser-build-static 16 | 17 | - name: build-generator 18 | run: make docker-build-generator-static 19 | 20 | - name: acceptance-tests 21 | run: make acceptance-tests 22 | 23 | goreleaser: 24 | runs-on: ubuntu-latest 25 | needs: 26 | - test 27 | if: startsWith(github.ref, 'refs/tags/v') 28 | steps: 29 | - name: checkout 30 | uses: actions/checkout@v2 31 | with: 32 | fetch-depth: 0 # https://github.com/goreleaser/goreleaser-action/issues/56 33 | 34 | - name: goreleaser 35 | run: | 36 | echo "${{ github.token }}" | docker login https://ghcr.io -u ${GITHUB_ACTOR} --password-stdin 37 | make release 38 | env: 39 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 40 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | dist 2 | bin 3 | acceptance-tests/tests/tls 4 | .idea 5 | -------------------------------------------------------------------------------- /.goreleaser.yml: -------------------------------------------------------------------------------- 1 | project_name: redis-dump-go 2 | builds: 3 | - main: ./ 4 | env: 5 | - CGO_ENABLED=0 6 | - GOFLAGS = -mod=vendor 7 | - GO111MODULE = on 8 | goos: 9 | - windows 10 | - linux 11 | - darwin 12 | goarch: 13 | - 386 14 | - amd64 15 | - arm 16 | - arm64 17 | flags: 18 | - -trimpath 19 | - -tags=netgo 20 | - -a 21 | ldflags: 22 | - -extldflags "-static" 23 | - -X main.version={{.Tag}} 24 | 25 | archives: 26 | - format: tar.gz 27 | format_overrides: 28 | - goos: windows 29 | format: zip 30 | name_template: "{{ .ProjectName }}-{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" 31 | 32 | dockers: 33 | - image_templates: 34 | - 'ghcr.io/yannh/redis-dump-go:latest' 35 | - 'ghcr.io/yannh/redis-dump-go:{{ .Tag }}' 36 | - 'ghcr.io/yannh/redis-dump-go:{{ .Tag }}-amd64' 37 | dockerfile: Dockerfile 38 | build_flag_templates: 39 | - "--platform=linux/amd64" 40 | goos: linux 41 | goarch: amd64 42 | - image_templates: 43 | - 'ghcr.io/yannh/redis-dump-go:latest-alpine' 44 | - 'ghcr.io/yannh/redis-dump-go:{{ .Tag }}-alpine' 45 | - 'ghcr.io/yannh/redis-dump-go:{{ .Tag }}-amd64-alpine' 46 | dockerfile: Dockerfile-alpine 47 | build_flag_templates: 48 | - "--platform=linux/amd64" 49 | goos: linux 50 | goarch: amd64 51 | 52 | checksum: 53 | name_template: 'CHECKSUMS' 54 | 55 | snapshot: 56 | name_template: "{{ .Tag }}-next" 57 | 58 | changelog: 59 | sort: asc 60 | filters: 61 | exclude: 62 | - '^test:' -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:latest as certs 2 | RUN apk add ca-certificates 3 | 4 | FROM scratch AS redis-dump-go 5 | MAINTAINER Yann HAMON 6 | COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt 7 | COPY redis-dump-go / 8 | ENTRYPOINT ["/redis-dump-go"] -------------------------------------------------------------------------------- /Dockerfile-alpine: -------------------------------------------------------------------------------- 1 | FROM alpine:3.20.2 2 | MAINTAINER Yann HAMON 3 | RUN apk add ca-certificates 4 | COPY redis-dump-go / 5 | ENTRYPOINT ["/redis-dump-go"] 6 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 17 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 19 | IN THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | #!/usr/bin/make 2 | 3 | .PHONY: test build build-static build-generator-static docker-image save-image push-image docker-test docker-build-static docker-build-generator-static release acceptance-tests 4 | 5 | RELEASE_VERSION ?= latest 6 | 7 | export GOFLAGS=-mod=vendor 8 | 9 | all: test build 10 | 11 | test: 12 | go test -race ./... 13 | go vet ./... 14 | 15 | build: 16 | go build -o bin/redis-dump-go 17 | 18 | build-static: 19 | git config --global --add safe.directory $$PWD 20 | CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o bin/redis-dump-go 21 | 22 | build-generator-static: 23 | git config --global --add safe.directory $$PWD 24 | CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o bin/generator ./utils/generator/main.go 25 | 26 | docker-image: 27 | docker build -t redis-dump-go:${RELEASE_VERSION} . 28 | 29 | save-image: 30 | docker save --output redis-dump-go-image.tar redis-dump-go:${RELEASE_VERSION} 31 | 32 | push-image: 33 | docker tag redis-dump-go:latest ghcr.io/yannh/redis-dump-go:${RELEASE_VERSION} 34 | docker push ghcr.io/yannh/redis-dump-go:${RELEASE_VERSION} 35 | 36 | docker-test: 37 | docker run -t -v $$PWD:/go/src/github.com/yannh/redis-dump-go -w /go/src/github.com/yannh/redis-dump-go golang:1.22.5 make test 38 | 39 | docker-build-static: 40 | docker run -t -v $$PWD:/go/src/github.com/yannh/redis-dump-go -w /go/src/github.com/yannh/redis-dump-go golang:1.22.5 make build-static 41 | 42 | docker-build-generator-static: 43 | docker run -t -v $$PWD:/go/src/github.com/yannh/redis-dump-go -w /go/src/github.com/yannh/redis-dump-go golang:1.22.5 make build-generator-static 44 | 45 | goreleaser-build-static: 46 | docker run -t -e GOOS=linux -e GOARCH=amd64 -v $$PWD:/go/src/github.com/yannh/redis-dump-go -w /go/src/github.com/yannh/redis-dump-go goreleaser/goreleaser:v2.1.0 build --single-target --clean --snapshot 47 | cp dist/redis-dump-go_linux_amd64_v1/redis-dump-go bin/ 48 | 49 | release: 50 | docker run -e GITHUB_TOKEN -e GIT_OWNER -t -v /var/run/docker.sock:/var/run/docker.sock -v $$PWD:/go/src/github.com/yannh/redis-dump-go -w /go/src/github.com/yannh/redis-dump-go goreleaser/goreleaser:v1.22.1 release --clean 51 | 52 | acceptance-tests: docker-build-static docker-build-generator-static 53 | docker-compose run tests 54 | -------------------------------------------------------------------------------- /Readme.md: -------------------------------------------------------------------------------- 1 | [![Build status](https://github.com/yannh/redis-dump-go/workflows/build/badge.svg?branch=master)](https://github.com/yannh/redis-dump-go/actions?query=branch%3Amaster) [![go report card](https://goreportcard.com/badge/github.com/yannh/redis-dump-go)](https://goreportcard.com/report/github.com/yannh/redis-dump-go) 2 | 3 | # Redis-dump-go 4 | 5 | Dump Redis keys to a file. Similar in spirit to https://www.npmjs.com/package/redis-dump and https://github.com/delano/redis-dump but: 6 | 7 | * Will dump keys across **several processes & connections** 8 | * Uses SCAN rather than KEYS * for much **reduced memory footprint** with large databases 9 | * Easy to deploy & containerize - **single binary**. 10 | * Generates a [RESP](https://redis.io/topics/protocol) file rather than a JSON or a list of commands. This is **faster to ingest**, and [recommended by Redis](https://redis.io/topics/mass-insert) for mass-inserts. 11 | 12 | Warning: like similar tools, Redis-dump-go does NOT provide Point-in-Time backups. Please use [Redis backups methods](https://redis.io/topics/persistence) when possible. 13 | 14 | ## Features 15 | 16 | * Dumps all databases present on the Redis server 17 | * Keys TTL are preserved by default 18 | * Configurable Output (Redis commands, RESP) 19 | * Redis password-authentication 20 | 21 | ## Installation 22 | 23 | Download the appropriate version for your operating system on [ŧhe release page](https://github.com/yannh/redis-dump-go/releases), 24 | or use the [Docker image](https://github.com/users/yannh/packages/container/package/redis-dump-go): 25 | 26 | ```bash 27 | $ docker run ghcr.io/yannh/redis-dump-go:latest -h 28 | Usage of /redis-dump-go: 29 | [...] 30 | ``` 31 | _Bandwidth costs_: Redis-dump-go is hosted on on Github Container Registry which is currently in Beta. During that period, 32 | [bandwidth is free](https://docs.github.com/en/packages/guides/about-github-container-registry). After that period, 33 | a Github Account might be required / bandwidth costs might be applicable. 34 | 35 | ## Run 36 | 37 | ``` 38 | $ ./bin/redis-dump-go -h 39 | Usage of ./bin/redis-dump-go: 40 | -batchSize int 41 | HSET/RPUSH/SADD/ZADD only add 'batchSize' items at a time (default 1000) 42 | -db uint 43 | only dump this database (default: all databases) 44 | -filter string 45 | Key filter to use (default "*") 46 | -host string 47 | Server host (default "127.0.0.1") 48 | -n int 49 | Parallel workers (default 10) 50 | -noscan 51 | Use KEYS * instead of SCAN - for Redis <=2.8 52 | -output string 53 | Output type - can be resp or commands (default "resp") 54 | -port int 55 | Server port (default 6379) 56 | -s Silent mode (disable logging of progress / stats) 57 | -ttl 58 | Preserve Keys TTL (default true) 59 | 60 | $ ./bin/redis-dump-go > dump.resp 61 | Database 0: 9 element dumped 62 | Database 1: 1 element dumped 63 | ``` 64 | 65 | For password-protected Redis servers, set the shell variable REDISDUMPGO\_AUTH: 66 | 67 | ``` 68 | $ export REDISDUMPGO_AUTH=myRedisPassword 69 | $ redis-dump-go 70 | ``` 71 | 72 | ## Build 73 | 74 | Given a correctly configured Go environment: 75 | 76 | ``` 77 | $ go get github.com/yannh/redis-dump-go 78 | $ cd ${GOPATH}/src/github.com/yannh/redis-dump-go 79 | $ go test ./... 80 | $ go install 81 | ``` 82 | 83 | ## Importing the data 84 | 85 | ``` 86 | redis-cli --pipe < redis-backup.txt 87 | ``` 88 | 89 | ## Release Notes & Gotchas 90 | 91 | * By default, no cleanup is performed before inserting data. When importing the resulting file, hashes, sets and queues will be merged with data already present in the Redis. 92 | -------------------------------------------------------------------------------- /acceptance-tests/acceptance.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | @test "Prints help on -h" { 4 | run /redis-dump-go -h 5 | [ "${lines[0]}" == 'Usage: /redis-dump-go [OPTION]...' ] 6 | [ "$status" -eq 0 ] 7 | } 8 | 9 | @test "fail on incorrect parameters" { 10 | run /redis-dump-go -fail 11 | [ "$status" -eq 1 ] 12 | } 13 | 14 | @test "Pass when using a non-default db" { 15 | run tests/select-db.sh 16 | [ "$status" -eq 0 ] 17 | } 18 | 19 | @test "Pass when using a non-default db, and a password" { 20 | run tests/select-db-with-password.sh 21 | [ "$status" -eq 0 ] 22 | } 23 | 24 | @test "Pass when using a non-default db, and a password with username" { 25 | run tests/select-db-with-username-password.sh 26 | [ "$status" -eq 0 ] 27 | } 28 | 29 | @test "Dumping / restoring all databases" { 30 | run tests/multiple-dbs.sh 31 | [ "$status" -eq 0 ] 32 | } 33 | 34 | # https://github.com/yannh/redis-dump-go/issues/11 35 | # https://github.com/yannh/redis-dump-go/issues/18 36 | @test "Pass when importing a ZSET with 1M entries" { 37 | run tests/large-zset.sh 38 | [ "$status" -eq 0 ] 39 | } 40 | 41 | @test "Pass when importing 1M key/values" { 42 | run tests/1mkeys.sh 43 | [ "$status" -eq 0 ] 44 | } 45 | -------------------------------------------------------------------------------- /acceptance-tests/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | echo "-> Installing Redis-cli and Bats" 4 | apk add redis bats ncurses 5 | 6 | echo "-> Waiting for Redis to start..." 7 | timeout 30 sh -c 'until redis-cli -h redis -p 6379 PING >/dev/null; do sleep 1; done' 8 | 9 | echo "-> Running acceptance tests..." 10 | bats --tap acceptance.bats --verbose-run -------------------------------------------------------------------------------- /acceptance-tests/gen-test-certs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copied from https://github.com/redis/redis/blob/unstable/utils/gen-test-certs.sh 4 | # BSD License https://github.com/redis/redis/blob/unstable/COPYING 5 | 6 | # Generate some test certificates which are used by the regression test suite: 7 | # 8 | # tests/tls/ca.{crt,key} Self signed CA certificate. 9 | # tests/tls/redis.{crt,key} A certificate with no key usage/policy restrictions. 10 | # tests/tls/client.{crt,key} A certificate restricted for SSL client usage. 11 | # tests/tls/server.{crt,key} A certificate restricted for SSL server usage. 12 | # tests/tls/redis.dh DH Params file. 13 | 14 | generate_cert() { 15 | local name=$1 16 | local cn="$2" 17 | local opts="$3" 18 | 19 | local keyfile=tests/tls/${name}.key 20 | local certfile=tests/tls/${name}.crt 21 | 22 | [ -f $keyfile ] || openssl genrsa -out $keyfile 2048 23 | openssl req \ 24 | -new -sha256 \ 25 | -subj "/O=Redis Test/CN=$cn" \ 26 | -key $keyfile | \ 27 | openssl x509 \ 28 | -req -sha256 \ 29 | -CA tests/tls/ca.crt \ 30 | -CAkey tests/tls/ca.key \ 31 | -CAserial tests/tls/ca.txt \ 32 | -CAcreateserial \ 33 | -days 365 \ 34 | $opts \ 35 | -out $certfile 36 | } 37 | 38 | mkdir -p tests/tls 39 | [ -f tests/tls/ca.key ] || openssl genrsa -out tests/tls/ca.key 4096 40 | openssl req \ 41 | -x509 -new -nodes -sha256 \ 42 | -key tests/tls/ca.key \ 43 | -days 3650 \ 44 | -subj '/O=Redis Test/CN=Certificate Authority' \ 45 | -out tests/tls/ca.crt 46 | 47 | cat > tests/tls/openssl.cnf <<_END_ 48 | [ server_cert ] 49 | keyUsage = digitalSignature, keyEncipherment 50 | nsCertType = server 51 | [ client_cert ] 52 | keyUsage = digitalSignature, keyEncipherment 53 | nsCertType = client 54 | _END_ 55 | 56 | generate_cert server "Server-only" "-extfile tests/tls/openssl.cnf -extensions server_cert" 57 | generate_cert client "Client-only" "-extfile tests/tls/openssl.cnf -extensions client_cert" 58 | generate_cert redis "Generic-cert" 59 | 60 | [ -f tests/tls/redis.dh ] || openssl dhparam -out tests/tls/redis.dh 2048 -------------------------------------------------------------------------------- /acceptance-tests/redis-confs/users.acl: -------------------------------------------------------------------------------- 1 | user default on >somepassword allkeys allchannels +@all 2 | user test on >testpassword allkeys allchannels +@all 3 | -------------------------------------------------------------------------------- /acceptance-tests/redis-confs/with_password.conf: -------------------------------------------------------------------------------- 1 | port 6380 2 | requirepass somepassword 3 | -------------------------------------------------------------------------------- /acceptance-tests/redis-confs/with_username_and_password.conf: -------------------------------------------------------------------------------- 1 | port 6380 2 | aclfile /usr/local/etc/redis/users.acl -------------------------------------------------------------------------------- /acceptance-tests/tests/1mkeys.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | echo "-> Filling Redis with Mock Data..." 4 | redis-cli -h redis FLUSHDB 5 | /generator -output resp -type strings -n 1000000 | redis-cli -h redis --pipe 6 | DBSIZE=`redis-cli -h redis dbsize` 7 | 8 | echo "-> Dumping DB..." 9 | time /redis-dump-go -host redis -n 250 -output resp >backup 10 | 11 | echo "-> Flushing DB and restoring dump..." 12 | redis-cli -h redis FLUSHDB 13 | redis-cli -h redis --pipe Comparing DB sizes..." 18 | if [ $DBSIZE -ne $NEWDBSIZE ]; then 19 | echo "ERROR - restored DB has $NEWDBSIZE elements, expected $DBSIZE" 20 | exit 1 21 | else 22 | echo "OK - $NEWDBSIZE elements" 23 | exit 0 24 | fi -------------------------------------------------------------------------------- /acceptance-tests/tests/large-zset.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | echo "-> Filling Redis with Mock Data..." 4 | redis-cli -h redis FLUSHDB 5 | /generator -output resp -type zset -n 1000000 | redis-cli -h redis --pipe 6 | KEYNAME=`redis-cli -h redis KEYS '*'` 7 | COUNT=`redis-cli -h redis ZCOUNT $KEYNAME -inf +inf` 8 | 9 | echo "-> Dumping DB..." 10 | /redis-dump-go -host redis -output resp >backup 11 | 12 | echo "-> Flushing DB and restoring dump..." 13 | redis-cli -h redis FLUSHDB 14 | redis-cli -h redis --pipe Comparing ZSET sizes..." 19 | if [ $COUNT -ne $NEWCOUNT ]; then 20 | echo "ERROR - restored ZSET $KEYNAME has $NEWCOUNT elements, expected $COUNT" 21 | exit 1 22 | else 23 | echo "OK - ZSET $KEYNAME has $NEWCOUNT elements" 24 | exit 0 25 | fi -------------------------------------------------------------------------------- /acceptance-tests/tests/multiple-dbs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | NDBS=3 4 | 5 | echo "-> Filling Redis with Mock Data..." 6 | redis-cli -h redis FLUSHALL 7 | for DB in `seq 1 $NDBS`; do 8 | redis-cli -h redis -n $DB SET thisdb $DB 9 | done 10 | 11 | echo "-> Dumping DB..." 12 | time /redis-dump-go -host redis -n 250 -output commands >backup 13 | 14 | echo "-> Flushing DB and restoring dump..." 15 | redis-cli -h redis FLUSHALL 16 | redis-cli -h redis -n $DB --pipe Expecting right amount of DBS..." 21 | if [ $NDBS -ne $NEWNDBS ]; then 22 | echo "ERROR - only $NEWNDBS found, expected $NDBS" 23 | exit 1 24 | else 25 | echo "OK - $NEWNDBS dbs" 26 | exit 0 27 | fi 28 | -------------------------------------------------------------------------------- /acceptance-tests/tests/select-db-with-password.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | export DB=2 4 | export REDIS_PORT=6380 5 | export REDISDUMPGO_AUTH=somepassword 6 | export REDISCMD="redis-cli -h redis_with_password -p $REDIS_PORT --pass $REDISDUMPGO_AUTH -n 2" 7 | 8 | echo "-> Filling Redis with Mock Data..." 9 | $REDISCMD FLUSHDB 10 | /generator -output resp -type strings -n 100 | $REDISCMD --pipe 11 | DBSIZE=`$REDISCMD dbsize` 12 | 13 | echo "-> Dumping DB..." 14 | time /redis-dump-go -host redis_with_password -n 250 -port $REDIS_PORT -db $DB -output resp >backup 15 | 16 | echo "-> Flushing DB and restoring dump..." 17 | $REDISCMD FLUSHDB 18 | $REDISCMD --pipe Comparing DB sizes..." 22 | if [ $DBSIZE -ne $NEWDBSIZE ]; then 23 | echo "ERROR - restored DB has $NEWDBSIZE elements, expected $DBSIZE" 24 | exit 1 25 | else 26 | echo "OK - $NEWDBSIZE elements" 27 | exit 0 28 | fi 29 | -------------------------------------------------------------------------------- /acceptance-tests/tests/select-db-with-username-password.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | export DB=2 4 | export REDIS_PORT=6380 5 | export REDIS_USER=test 6 | export REDISDUMPGO_AUTH=testpassword 7 | export REDISCMD="redis-cli -h redis_with_username_and_password -p $REDIS_PORT --user $REDIS_USER --pass $REDISDUMPGO_AUTH -n 2" 8 | echo $REDISCMD 9 | echo "-> Filling Redis with Mock Data..." 10 | $REDISCMD FLUSHDB 11 | /generator -output resp -type strings -n 100 | $REDISCMD --pipe 12 | DBSIZE=`$REDISCMD dbsize` 13 | 14 | echo "-> Dumping DB..." 15 | time /redis-dump-go -host redis_with_username_and_password -n 250 -port $REDIS_PORT -db $DB -user $REDIS_USER -output resp >backup 16 | 17 | echo "-> Flushing DB and restoring dump..." 18 | $REDISCMD FLUSHDB 19 | $REDISCMD --pipe Comparing DB sizes..." 23 | if [ $DBSIZE -ne $NEWDBSIZE ]; then 24 | echo "ERROR - restored DB has $NEWDBSIZE elements, expected $DBSIZE" 25 | exit 1 26 | else 27 | echo "OK - $NEWDBSIZE elements" 28 | exit 0 29 | fi 30 | -------------------------------------------------------------------------------- /acceptance-tests/tests/select-db.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | export DB=2 4 | 5 | echo "-> Filling Redis with Mock Data..." 6 | redis-cli -h redis -n $DB FLUSHDB 7 | /generator -output resp -type strings -n 100 | redis-cli -h redis -n $DB --pipe 8 | DBSIZE=`redis-cli -h redis -n $DB dbsize` 9 | 10 | echo "-> Dumping DB..." 11 | time /redis-dump-go -host redis -n 250 -db $DB -output resp >backup 12 | 13 | echo "-> Flushing DB and restoring dump..." 14 | redis-cli -h redis -n $DB FLUSHDB 15 | redis-cli -h redis -n $DB --pipe Comparing DB sizes..." 20 | if [ $DBSIZE -ne $NEWDBSIZE ]; then 21 | echo "ERROR - restored DB has $NEWDBSIZE elements, expected $DBSIZE" 22 | exit 1 23 | else 24 | echo "OK - $NEWDBSIZE elements" 25 | exit 0 26 | fi 27 | -------------------------------------------------------------------------------- /bin/.gitignore: -------------------------------------------------------------------------------- 1 | redis-dump-go -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.3" 2 | 3 | services: 4 | redis: 5 | image: "redis:alpine" 6 | ports: 7 | - "6379:6379" 8 | redis_with_password: 9 | image: "redis:alpine" 10 | volumes: 11 | - ./acceptance-tests/redis-confs:/usr/local/etc/redis 12 | ports: 13 | - target: 6380 14 | published: 6380 15 | protocol: tcp 16 | mode: host 17 | entrypoint: ["/usr/local/bin/redis-server", "/usr/local/etc/redis/with_password.conf"] 18 | redis_with_username_and_password: 19 | image: "redis:alpine" 20 | volumes: 21 | - ./acceptance-tests/redis-confs:/usr/local/etc/redis 22 | ports: 23 | - target: 6381 24 | published: 6381 25 | protocol: tcp 26 | mode: host 27 | entrypoint: ["/usr/local/bin/redis-server", "/usr/local/etc/redis/with_username_and_password.conf"] 28 | tests: 29 | image: "alpine:latest" 30 | volumes: 31 | - ./acceptance-tests:/acceptance-tests 32 | - ./bin/generator:/generator 33 | - ./bin/redis-dump-go:/redis-dump-go 34 | depends_on: 35 | - "redis" 36 | - "redis_with_password" 37 | - "redis_with_username_and_password" 38 | working_dir: /acceptance-tests 39 | entrypoint: ["/acceptance-tests/entrypoint.sh"] 40 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/yannh/redis-dump-go 2 | 3 | go 1.22 4 | 5 | require github.com/mediocregopher/radix/v3 v3.8.1 6 | 7 | require golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect 8 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 2 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 3 | github.com/mediocregopher/radix/v3 v3.8.1 h1:rOkHflVuulFKlwsLY01/M2cM2tWCjDoETcMqKbAWu1M= 4 | github.com/mediocregopher/radix/v3 v3.8.1/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= 5 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 6 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 7 | github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= 8 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 9 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 10 | golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 h1:LLhsEBxRTBLuKlQxFBYUOU8xyFgXv6cOTp2HASDlsDk= 11 | golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= 12 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "log" 7 | "os" 8 | "sync" 9 | 10 | "github.com/yannh/redis-dump-go/pkg/config" 11 | "github.com/yannh/redis-dump-go/pkg/redisdump" 12 | ) 13 | 14 | type progressLogger struct { 15 | stats map[uint8]int 16 | } 17 | 18 | func newProgressLogger() *progressLogger { 19 | return &progressLogger{ 20 | stats: map[uint8]int{}, 21 | } 22 | } 23 | 24 | func (p *progressLogger) drawProgress(to io.Writer, db uint8, nDumped int) { 25 | if _, ok := p.stats[db]; !ok && len(p.stats) > 0 { 26 | // We switched database, write to a new line 27 | fmt.Fprintf(to, "\n") 28 | } 29 | 30 | p.stats[db] = nDumped 31 | if nDumped == 0 { 32 | return 33 | } 34 | 35 | fmt.Fprintf(to, "\rDatabase %d: %d element dumped", db, nDumped) 36 | } 37 | 38 | func realMain() int { 39 | var err error 40 | 41 | c, outBuf, err := config.FromFlags(os.Args[0], os.Args[1:]) 42 | if outBuf != "" { 43 | out := os.Stderr 44 | errCode := 1 45 | if c.Help { 46 | out = os.Stdout 47 | errCode = 0 48 | } 49 | fmt.Fprintln(out, outBuf) 50 | return errCode 51 | } 52 | if err != nil { 53 | fmt.Fprintf(os.Stderr, "failed parsing command line: %s\n", err.Error()) 54 | return 1 55 | } 56 | 57 | var tlshandler *redisdump.TlsHandler = nil 58 | if c.Tls == true { 59 | tlshandler, err = redisdump.NewTlsHandler(c.CaCert, c.Cert, c.Key, c.Insecure) 60 | if err != nil { 61 | fmt.Fprintln(os.Stderr, err.Error()) 62 | return 1 63 | } 64 | } 65 | 66 | var serializer func([]string) string 67 | switch c.Output { 68 | case "resp": 69 | serializer = redisdump.RESPSerializer 70 | 71 | case "commands": 72 | serializer = redisdump.RedisCmdSerializer 73 | 74 | default: 75 | log.Fatalf("Failed parsing parameter flag: can only be resp or json") 76 | } 77 | 78 | redisPassword := os.Getenv("REDISDUMPGO_AUTH") 79 | 80 | progressNotifs := make(chan redisdump.ProgressNotification) 81 | var wg sync.WaitGroup 82 | wg.Add(1) 83 | 84 | defer func() { 85 | close(progressNotifs) 86 | wg.Wait() 87 | if !(c.Silent) { 88 | fmt.Fprint(os.Stderr, "\n") 89 | } 90 | }() 91 | 92 | pl := newProgressLogger() 93 | go func() { 94 | for n := range progressNotifs { 95 | if !(c.Silent) { 96 | pl.drawProgress(os.Stderr, n.Db, n.Done) 97 | } 98 | } 99 | wg.Done() 100 | }() 101 | 102 | logger := log.New(os.Stdout, "", 0) 103 | 104 | var db = new(uint8) 105 | // If the user passed a db as parameter, we only dump that db 106 | if c.Db >= 0 { 107 | *db = uint8(c.Db) 108 | } else { 109 | db = redisdump.AllDBs 110 | } 111 | 112 | s := redisdump.Host{ 113 | Host: c.Host, 114 | Port: c.Port, 115 | Username: c.Username, 116 | Password: redisPassword, 117 | TlsHandler: tlshandler, 118 | } 119 | 120 | if err = redisdump.DumpServer(s, db, c.Filter, c.NWorkers, c.WithTTL, c.BatchSize, c.Noscan, logger, serializer, progressNotifs); err != nil { 121 | fmt.Fprintf(os.Stderr, "%s", err) 122 | return 1 123 | } 124 | 125 | return 0 126 | } 127 | 128 | func main() { 129 | os.Exit(realMain()) 130 | } 131 | -------------------------------------------------------------------------------- /pkg/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "bytes" 5 | "flag" 6 | "fmt" 7 | ) 8 | 9 | type Config struct { 10 | Host string 11 | Port int 12 | Db int 13 | Username string 14 | Filter string 15 | Noscan bool 16 | BatchSize int 17 | NWorkers int 18 | WithTTL bool 19 | Output string 20 | Silent bool 21 | Tls bool 22 | Insecure bool 23 | CaCert string 24 | Cert string 25 | Key string 26 | Help bool 27 | } 28 | 29 | func isFlagPassed(flags *flag.FlagSet, name string) bool { 30 | found := false 31 | flags.Visit(func(f *flag.Flag) { 32 | if f.Name == name { 33 | found = true 34 | } 35 | }) 36 | return found 37 | } 38 | 39 | func FromFlags(progName string, args []string) (Config, string, error) { 40 | c := Config{} 41 | 42 | flags := flag.NewFlagSet(progName, flag.ContinueOnError) 43 | var outBuf bytes.Buffer 44 | flags.SetOutput(&outBuf) 45 | 46 | flags.StringVar(&c.Host, "host", "127.0.0.1", "Server host") 47 | flags.IntVar(&c.Port, "port", 6379, "Server port") 48 | flags.IntVar(&c.Db, "db", -1, "only dump this database (default: all databases)") 49 | flags.StringVar(&c.Username, "user", "", "Username") 50 | flags.StringVar(&c.Filter, "filter", "*", "Key filter to use") 51 | flags.BoolVar(&c.Noscan, "noscan", false, "Use KEYS * instead of SCAN - for Redis <=2.8") 52 | flags.IntVar(&c.BatchSize, "batchSize", 1000, "HSET/RPUSH/SADD/ZADD only add 'batchSize' items at a time") 53 | flags.IntVar(&c.NWorkers, "n", 10, "Parallel workers") 54 | flags.BoolVar(&c.WithTTL, "ttl", true, "Preserve Keys TTL") 55 | flags.StringVar(&c.Output, "output", "resp", "Output type - can be resp or commands") 56 | flags.BoolVar(&c.Silent, "s", false, "Silent mode (disable logging of progress / stats)") 57 | flags.BoolVar(&c.Tls, "tls", false, "Establish a secure TLS connection") 58 | flags.BoolVar(&c.Insecure, "insecure", false, "Allow insecure TLS connection by skipping cert validation") 59 | flags.StringVar(&c.CaCert, "cacert", "", "CA Certificate file to verify with") 60 | flags.StringVar(&c.Cert, "cert", "", "Private key file to authenticate with") 61 | flags.StringVar(&c.Key, "key", "", "SSL private key file path") 62 | flags.BoolVar(&c.Help, "h", false, "show help information") 63 | flags.Usage = func() { 64 | fmt.Fprintf(&outBuf, "Usage: %s [OPTION]...\n", progName) 65 | flags.PrintDefaults() 66 | } 67 | 68 | err := flags.Parse(args) 69 | 70 | if c.Help { 71 | flags.Usage() 72 | } 73 | 74 | return c, outBuf.String(), err 75 | } 76 | -------------------------------------------------------------------------------- /pkg/config/config_test.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | ) 7 | 8 | func TestFromFlags(t *testing.T) { 9 | testCases := []struct { 10 | args []string 11 | conf Config 12 | }{ 13 | { 14 | []string{}, 15 | Config{ 16 | Db: -1, 17 | Host: "127.0.0.1", 18 | Port: 6379, 19 | Filter: "*", 20 | BatchSize: 1000, 21 | NWorkers: 10, 22 | WithTTL: true, 23 | Output: "resp", 24 | Insecure: false, 25 | }, 26 | }, 27 | { 28 | []string{"-db", "2"}, 29 | Config{ 30 | Db: 2, 31 | Host: "127.0.0.1", 32 | Port: 6379, 33 | Filter: "*", 34 | BatchSize: 1000, 35 | NWorkers: 10, 36 | WithTTL: true, 37 | Output: "resp", 38 | Insecure: false, 39 | }, 40 | }, 41 | { 42 | []string{"-ttl=false"}, 43 | Config{ 44 | Db: -1, 45 | Host: "127.0.0.1", 46 | Port: 6379, 47 | Filter: "*", 48 | BatchSize: 1000, 49 | NWorkers: 10, 50 | WithTTL: false, 51 | Output: "resp", 52 | Insecure: false, 53 | }, 54 | }, 55 | { 56 | []string{"-host", "redis", "-port", "1234", "-batchSize", "10", "-n", "5", "-output", "commands"}, 57 | Config{ 58 | Db: -1, 59 | Host: "redis", 60 | Port: 1234, 61 | Filter: "*", 62 | BatchSize: 10, 63 | NWorkers: 5, 64 | WithTTL: true, 65 | Output: "commands", 66 | Insecure: false, 67 | }, 68 | }, 69 | { 70 | []string{"-host", "redis", "-port", "1234", "-batchSize", "10", "-user", "test", "-insecure"}, 71 | Config{ 72 | Db: -1, 73 | Host: "redis", 74 | Port: 1234, 75 | Filter: "*", 76 | BatchSize: 10, 77 | NWorkers: 10, 78 | WithTTL: true, 79 | Output: "resp", 80 | Username: "test", 81 | Insecure: true, 82 | }, 83 | }, 84 | { 85 | []string{"-host", "redis", "-port", "1234", "-batchSize", "10", "-user", "test"}, 86 | Config{ 87 | Db: -1, 88 | Host: "redis", 89 | Port: 1234, 90 | Filter: "*", 91 | BatchSize: 10, 92 | NWorkers: 10, 93 | WithTTL: true, 94 | Output: "resp", 95 | Username: "test", 96 | }, 97 | }, 98 | { 99 | []string{"-db", "1"}, 100 | Config{ 101 | Db: 1, 102 | Host: "127.0.0.1", 103 | Port: 6379, 104 | Filter: "*", 105 | BatchSize: 1000, 106 | NWorkers: 10, 107 | WithTTL: true, 108 | Output: "resp", 109 | Insecure: false, 110 | }, 111 | }, 112 | { 113 | []string{"-h"}, 114 | Config{ 115 | Db: -1, 116 | Host: "127.0.0.1", 117 | Port: 6379, 118 | Filter: "*", 119 | BatchSize: 1000, 120 | NWorkers: 10, 121 | WithTTL: true, 122 | Output: "resp", 123 | Help: true, 124 | Insecure: false, 125 | }, 126 | }, 127 | } 128 | 129 | for i, testCase := range testCases { 130 | cfg, _, _ := FromFlags("redis-dump-go", testCase.args) 131 | if reflect.DeepEqual(cfg, testCase.conf) != true { 132 | t.Errorf("test %d: failed parsing config - expected , got: \n%+v\n%+v", i, testCase.conf, cfg) 133 | } 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /pkg/redisdump/redisdump.go: -------------------------------------------------------------------------------- 1 | package redisdump 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "log" 7 | "os" 8 | "strconv" 9 | "strings" 10 | "time" 11 | 12 | radix "github.com/mediocregopher/radix/v3" 13 | ) 14 | 15 | var AllDBs *uint8 = nil 16 | 17 | func ttlToRedisCmd(k string, val int64) []string { 18 | return []string{"EXPIREAT", k, fmt.Sprint(time.Now().Unix() + val)} 19 | } 20 | 21 | func stringToRedisCmd(k, val string) []string { 22 | return []string{"SET", k, val} 23 | } 24 | 25 | func hashToRedisCmds(hashKey string, val map[string]string, batchSize int) [][]string { 26 | cmds := [][]string{} 27 | 28 | cmd := []string{"HSET", hashKey} 29 | n := 0 30 | for k, v := range val { 31 | if n >= batchSize { 32 | n = 0 33 | cmds = append(cmds, cmd) 34 | cmd = []string{"HSET", hashKey} 35 | } 36 | cmd = append(cmd, k, v) 37 | n++ 38 | } 39 | 40 | if n > 0 { 41 | cmds = append(cmds, cmd) 42 | } 43 | 44 | return cmds 45 | } 46 | 47 | func setToRedisCmds(setKey string, val []string, batchSize int) [][]string { 48 | cmds := [][]string{} 49 | cmd := []string{"SADD", setKey} 50 | n := 0 51 | for _, v := range val { 52 | if n >= batchSize { 53 | n = 0 54 | cmds = append(cmds, cmd) 55 | cmd = []string{"SADD", setKey} 56 | } 57 | cmd = append(cmd, v) 58 | n++ 59 | } 60 | 61 | if n > 0 { 62 | cmds = append(cmds, cmd) 63 | } 64 | 65 | return cmds 66 | } 67 | 68 | func listToRedisCmds(listKey string, val []string, batchSize int) [][]string { 69 | cmds := [][]string{} 70 | cmd := []string{"RPUSH", listKey} 71 | n := 0 72 | for _, v := range val { 73 | if n >= batchSize { 74 | n = 0 75 | cmds = append(cmds, cmd) 76 | cmd = []string{"RPUSH", listKey} 77 | } 78 | cmd = append(cmd, v) 79 | n++ 80 | } 81 | 82 | if n > 0 { 83 | cmds = append(cmds, cmd) 84 | } 85 | 86 | return cmds 87 | } 88 | 89 | // We break down large ZSETs to multiple ZADD commands 90 | 91 | func zsetToRedisCmds(zsetKey string, val []string, batchSize int) [][]string { 92 | cmds := [][]string{} 93 | var key string 94 | 95 | cmd := []string{"ZADD", zsetKey} 96 | n := 0 97 | for i, v := range val { 98 | if i%2 == 0 { 99 | key = v 100 | continue 101 | } 102 | 103 | if n >= batchSize { 104 | n = 0 105 | cmds = append(cmds, cmd) 106 | cmd = []string{"ZADD", zsetKey} 107 | } 108 | cmd = append(cmd, v, key) 109 | n++ 110 | } 111 | 112 | if n > 0 { 113 | cmds = append(cmds, cmd) 114 | } 115 | 116 | return cmds 117 | } 118 | 119 | type Serializer func([]string) string 120 | 121 | // RedisCmdSerializer will serialize cmd to a string with redis commands 122 | func RedisCmdSerializer(cmd []string) string { 123 | if len(cmd) == 0 { 124 | return "" 125 | } 126 | 127 | buf := strings.Builder{} 128 | buf.WriteString(fmt.Sprintf("%s", cmd[0])) 129 | for i := 1; i < len(cmd); i++ { 130 | if strings.Contains(cmd[i], " ") || len(cmd[i]) == 0 { 131 | buf.WriteString(fmt.Sprintf(" \"%s\"", cmd[i])) 132 | } else { 133 | buf.WriteString(fmt.Sprintf(" %s", cmd[i])) 134 | } 135 | } 136 | 137 | return buf.String() 138 | } 139 | 140 | // RESPSerializer will serialize cmd to RESP 141 | func RESPSerializer(cmd []string) string { 142 | buf := strings.Builder{} 143 | buf.WriteString("*" + strconv.Itoa(len(cmd)) + "\r\n") 144 | for _, arg := range cmd { 145 | buf.WriteString("$" + strconv.Itoa(len(arg)) + "\r\n" + arg + "\r\n") 146 | } 147 | return buf.String() 148 | } 149 | 150 | type radixCmder func(rcv interface{}, cmd string, args ...string) radix.CmdAction 151 | 152 | func dumpKeys(client radix.Client, cmd radixCmder, keys []string, withTTL bool, batchSize int, logger *log.Logger, serializer Serializer) error { 153 | var err error 154 | var redisCmds [][]string 155 | 156 | for _, key := range keys { 157 | keyType := "" 158 | 159 | err = client.Do(cmd(&keyType, "TYPE", key)) 160 | if err != nil { 161 | return err 162 | } 163 | switch keyType { 164 | case "string": 165 | var val string 166 | if err = client.Do(cmd(&val, "GET", key)); err != nil { 167 | return err 168 | } 169 | redisCmds = [][]string{stringToRedisCmd(key, val)} 170 | 171 | case "list": 172 | var val []string 173 | if err = client.Do(cmd(&val, "LRANGE", key, "0", "-1")); err != nil { 174 | return err 175 | } 176 | redisCmds = listToRedisCmds(key, val, batchSize) 177 | 178 | case "set": 179 | var val []string 180 | if err = client.Do(cmd(&val, "SMEMBERS", key)); err != nil { 181 | return err 182 | } 183 | redisCmds = setToRedisCmds(key, val, batchSize) 184 | 185 | case "hash": 186 | var val map[string]string 187 | if err = client.Do(cmd(&val, "HGETALL", key)); err != nil { 188 | return err 189 | } 190 | redisCmds = hashToRedisCmds(key, val, batchSize) 191 | 192 | case "zset": 193 | var val []string 194 | if err = client.Do(cmd(&val, "ZRANGEBYSCORE", key, "-inf", "+inf", "WITHSCORES")); err != nil { 195 | return err 196 | } 197 | redisCmds = zsetToRedisCmds(key, val, batchSize) 198 | 199 | case "none": 200 | 201 | default: 202 | return fmt.Errorf("Key %s is of unreconized type %s", key, keyType) 203 | } 204 | 205 | for _, redisCmd := range redisCmds { 206 | logger.Print(serializer(redisCmd)) 207 | } 208 | 209 | if withTTL { 210 | var ttl int64 211 | if err = client.Do(cmd(&ttl, "TTL", key)); err != nil { 212 | return err 213 | } 214 | if ttl > 0 { 215 | cmd := ttlToRedisCmd(key, ttl) 216 | logger.Print(serializer(cmd)) 217 | } 218 | } 219 | } 220 | 221 | return nil 222 | } 223 | 224 | func dumpKeysWorker(client radix.Client, keyBatches <-chan []string, withTTL bool, batchSize int, logger *log.Logger, serializer Serializer, errors chan<- error, done chan<- bool) { 225 | for keyBatch := range keyBatches { 226 | if err := dumpKeys(client, radix.Cmd, keyBatch, withTTL, batchSize, logger, serializer); err != nil { 227 | errors <- err 228 | } 229 | } 230 | done <- true 231 | } 232 | 233 | // ProgressNotification message indicates the progress in dumping the Redis server, 234 | // and can be used to provide a progress visualisation such as a progress bar. 235 | // Done is the number of items dumped, Total is the total number of items to dump. 236 | type ProgressNotification struct { 237 | Db uint8 238 | Done int 239 | } 240 | 241 | func parseKeyspaceInfo(keyspaceInfo string) ([]uint8, error) { 242 | var dbs []uint8 243 | 244 | scanner := bufio.NewScanner(strings.NewReader(keyspaceInfo)) 245 | 246 | for scanner.Scan() { 247 | line := strings.TrimSpace(scanner.Text()) 248 | 249 | if !strings.HasPrefix(line, "db") { 250 | continue 251 | } 252 | 253 | dbIndexString := line[2:strings.IndexAny(line, ":")] 254 | dbIndex, err := strconv.ParseUint(dbIndexString, 10, 8) 255 | if err != nil { 256 | return nil, err 257 | } 258 | 259 | dbs = append(dbs, uint8(dbIndex)) 260 | } 261 | 262 | return dbs, nil 263 | } 264 | 265 | func getDBIndexes(client *radix.Pool) ([]uint8, error) { 266 | var keyspaceInfo string 267 | if err := client.Do(radix.Cmd(&keyspaceInfo, "INFO", "keyspace")); err != nil { 268 | return nil, err 269 | } 270 | 271 | return parseKeyspaceInfo(keyspaceInfo) 272 | } 273 | 274 | func scanKeys(client radix.Client, cmd radixCmder, db uint8, keyBatchSize int, filter string, keyBatches chan<- []string, progressNotifications chan<- ProgressNotification) error { 275 | s := radix.NewScanner(client, radix.ScanOpts{Command: "SCAN", Pattern: filter, Count: keyBatchSize}) 276 | 277 | nProcessed := 0 278 | var key string 279 | var keyBatch []string 280 | for s.Next(&key) { 281 | keyBatch = append(keyBatch, key) 282 | if len(keyBatch) >= keyBatchSize { 283 | nProcessed += len(keyBatch) 284 | keyBatches <- keyBatch 285 | keyBatch = nil 286 | progressNotifications <- ProgressNotification{Db: db, Done: nProcessed} 287 | } 288 | } 289 | 290 | keyBatches <- keyBatch 291 | nProcessed += len(keyBatch) 292 | progressNotifications <- ProgressNotification{Db: db, Done: nProcessed} 293 | 294 | return s.Close() 295 | } 296 | 297 | func min(a, b int) int { 298 | if a <= b { 299 | return a 300 | } 301 | return b 302 | } 303 | 304 | func scanKeysLegacy(client radix.Client, cmd radixCmder, db uint8, keyBatchSize int, filter string, keyBatches chan<- []string, progressNotifications chan<- ProgressNotification) error { 305 | var err error 306 | var keys []string 307 | if err = client.Do(cmd(&keys, "KEYS", filter)); err != nil { 308 | return err 309 | } 310 | 311 | for i := 0; i < len(keys); i += keyBatchSize { 312 | batchEnd := min(i+keyBatchSize, len(keys)) 313 | keyBatches <- keys[i:batchEnd] 314 | if progressNotifications != nil { 315 | progressNotifications <- ProgressNotification{db, i} 316 | } 317 | } 318 | 319 | return nil 320 | } 321 | 322 | // RedisURL builds a connect URL given a Host, port, db & password 323 | func RedisURL(redisHost string, redisPort string) string { 324 | return fmt.Sprintf("redis://%s:%s", redisHost, redisPort) 325 | } 326 | 327 | func redisDialOpts(redisUsername string, redisPassword string, tlsHandler *TlsHandler, db *uint8) ([]radix.DialOpt, error) { 328 | dialOpts := []radix.DialOpt{ 329 | radix.DialTimeout(5 * time.Minute), 330 | } 331 | if redisPassword != "" { 332 | if redisUsername != "" { 333 | dialOpts = append(dialOpts, radix.DialAuthUser(redisUsername, redisPassword)) 334 | } else { 335 | dialOpts = append(dialOpts, radix.DialAuthPass(redisPassword)) 336 | } 337 | } 338 | if tlsHandler != nil { 339 | tlsCfg, err := tlsConfig(tlsHandler) 340 | if err != nil { 341 | return nil, err 342 | } 343 | dialOpts = append(dialOpts, radix.DialUseTLS(tlsCfg)) 344 | } 345 | 346 | if db != nil { 347 | dialOpts = append(dialOpts, radix.DialSelectDB(int(*db))) 348 | } 349 | 350 | return dialOpts, nil 351 | } 352 | 353 | func dumpDB(client radix.Client, db *uint8, filter string, nWorkers int, withTTL bool, batchSize int, noscan bool, logger *log.Logger, serializer Serializer, progress chan<- ProgressNotification) error { 354 | keyGenerator := scanKeys 355 | if noscan { 356 | keyGenerator = scanKeysLegacy 357 | } 358 | 359 | logger.Print(serializer([]string{"SELECT", fmt.Sprint(*db)})) 360 | 361 | errors := make(chan error) 362 | nErrors := 0 363 | go func() { 364 | for err := range errors { 365 | fmt.Fprintln(os.Stderr, "Error: "+err.Error()) 366 | nErrors++ 367 | } 368 | }() 369 | 370 | done := make(chan bool) 371 | keyBatches := make(chan []string) 372 | for i := 0; i < nWorkers; i++ { 373 | go dumpKeysWorker(client, keyBatches, withTTL, batchSize, logger, serializer, errors, done) 374 | } 375 | 376 | keyGenerator(client, radix.Cmd, *db, 100, filter, keyBatches, progress) 377 | close(keyBatches) 378 | 379 | for i := 0; i < nWorkers; i++ { 380 | <-done 381 | } 382 | 383 | return nil 384 | } 385 | 386 | type Host struct { 387 | Host string 388 | Port int 389 | Username string 390 | Password string 391 | TlsHandler *TlsHandler 392 | } 393 | 394 | // DumpServer dumps all Keys from the redis server given by redisURL, 395 | // to the Logger logger. Progress notification informations 396 | // are regularly sent to the channel progressNotifications 397 | func DumpServer(s Host, db *uint8, filter string, nWorkers int, withTTL bool, batchSize int, noscan bool, logger *log.Logger, serializer func([]string) string, progress chan<- ProgressNotification) error { 398 | redisURL := RedisURL(s.Host, fmt.Sprint(s.Port)) 399 | getConnFunc := func(db *uint8) func(network, addr string) (radix.Conn, error) { 400 | return func(network, addr string) (radix.Conn, error) { 401 | dialOpts, err := redisDialOpts(s.Username, s.Password, s.TlsHandler, db) 402 | if err != nil { 403 | return nil, err 404 | } 405 | 406 | return radix.Dial(network, addr, dialOpts...) 407 | } 408 | } 409 | 410 | dbs := []uint8{} 411 | if db != AllDBs { 412 | dbs = []uint8{*db} 413 | } else { 414 | client, err := radix.NewPool("tcp", redisURL, nWorkers, radix.PoolConnFunc(getConnFunc(nil))) 415 | if err != nil { 416 | return err 417 | } 418 | 419 | dbs, err = getDBIndexes(client) 420 | if err != nil { 421 | return err 422 | } 423 | client.Close() 424 | } 425 | 426 | for _, db := range dbs { 427 | client, err := radix.NewPool("tcp", redisURL, nWorkers, radix.PoolConnFunc(getConnFunc(&db))) 428 | if err != nil { 429 | return err 430 | } 431 | defer client.Close() 432 | 433 | if err = dumpDB(client, &db, filter, nWorkers, withTTL, batchSize, noscan, logger, serializer, progress); err != nil { 434 | return err 435 | } 436 | } 437 | 438 | return nil 439 | } 440 | -------------------------------------------------------------------------------- /pkg/redisdump/redisdump_test.go: -------------------------------------------------------------------------------- 1 | package redisdump 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "fmt" 7 | "io" 8 | "log" 9 | "regexp" 10 | "strings" 11 | "testing" 12 | 13 | "github.com/mediocregopher/radix/v3" 14 | ) 15 | 16 | func testEqString(a, b []string) bool { 17 | 18 | if a == nil && b == nil { 19 | return true 20 | } 21 | 22 | if a == nil || b == nil { 23 | return false 24 | } 25 | 26 | if len(a) != len(b) { 27 | return false 28 | } 29 | 30 | for i := range a { 31 | if a[i] != b[i] { 32 | return false 33 | } 34 | } 35 | 36 | return true 37 | } 38 | 39 | func testEqUint8(a, b []uint8) bool { 40 | // If one is nil, the other must also be nil. 41 | if (a == nil) != (b == nil) { 42 | return false 43 | } 44 | if len(a) != len(b) { 45 | return false 46 | } 47 | for i := range a { 48 | if a[i] != b[i] { 49 | return false 50 | } 51 | } 52 | 53 | return true 54 | } 55 | 56 | func TestStringToRedisCmd(t *testing.T) { 57 | type testCase struct { 58 | key, value string 59 | expected []string 60 | } 61 | 62 | testCases := []testCase{ 63 | {key: "city", value: "Paris", expected: []string{"SET", "city", "Paris"}}, 64 | {key: "fullname", value: "Jean-Paul Sartre", expected: []string{"SET", "fullname", "Jean-Paul Sartre"}}, 65 | {key: "unicode", value: "😈", expected: []string{"SET", "unicode", "😈"}}, 66 | } 67 | 68 | for _, test := range testCases { 69 | res := stringToRedisCmd(test.key, test.value) 70 | if !testEqString(res, test.expected) { 71 | t.Errorf("Failed generating redis command from string for: %s %s", test.key, test.value) 72 | } 73 | } 74 | } 75 | 76 | func TestHashToRedisCmds(t *testing.T) { 77 | type testCase struct { 78 | key string 79 | value map[string]string 80 | cmdMaxLen int 81 | expected [][]string 82 | } 83 | 84 | testCases := []testCase{ 85 | {key: "Paris", value: map[string]string{"country": "France", "weather": "sunny", "poi": "Tour Eiffel"}, cmdMaxLen: 1, expected: [][]string{{"HSET", "Paris", "country", "France"}, {"HSET", "Paris", "weather", "sunny"}, {"HSET", "Paris", "poi", "Tour Eiffel"}}}, 86 | {key: "Paris", value: map[string]string{"country": "France", "weather": "sunny", "poi": "Tour Eiffel"}, cmdMaxLen: 2, expected: [][]string{{"HSET", "Paris", "country", "France", "weather", "sunny"}, {"HSET", "Paris", "poi", "Tour Eiffel"}}}, 87 | {key: "Paris", value: map[string]string{"country": "France", "weather": "sunny", "poi": "Tour Eiffel"}, cmdMaxLen: 3, expected: [][]string{{"HSET", "Paris", "country", "France", "weather", "sunny", "poi", "Tour Eiffel"}}}, 88 | {key: "Paris", value: map[string]string{"country": "France", "weather": "sunny", "poi": "Tour Eiffel"}, cmdMaxLen: 4, expected: [][]string{{"HSET", "Paris", "country", "France", "weather", "sunny", "poi", "Tour Eiffel"}}}, 89 | } 90 | 91 | for _, test := range testCases { 92 | res := hashToRedisCmds(test.key, test.value, test.cmdMaxLen) 93 | for i := 0; i < len(res); i++ { 94 | for j := 2; j < len(res[i]); j += 2 { 95 | found := false 96 | for k := 0; k < len(test.expected); k++ { 97 | for l := 2; l < len(test.expected[k]); l += 2 { 98 | if res[i][j] == test.expected[k][l] && res[i][j+1] == test.expected[k][l+1] { 99 | found = true 100 | } 101 | } 102 | } 103 | 104 | if found == false { 105 | t.Errorf("Failed generating redis command from Hash for: %s %s, got %s", test.key, test.value, res) 106 | } 107 | } 108 | } 109 | } 110 | } 111 | 112 | func TestSetToRedisCmds(t *testing.T) { 113 | type testCase struct { 114 | key string 115 | value []string 116 | cmdMaxLen int 117 | expected [][]string 118 | } 119 | 120 | testCases := []testCase{ 121 | {key: "myset", value: []string{"1", "2", "3"}, cmdMaxLen: 1, expected: [][]string{{"SADD", "myset", "1"}, {"SADD", "myset", "2"}, {"SADD", "myset", "3"}}}, 122 | {key: "myset", value: []string{"1", "2", "3"}, cmdMaxLen: 2, expected: [][]string{{"SADD", "myset", "1", "2"}, {"SADD", "myset", "3"}}}, 123 | {key: "myset", value: []string{"1", "2", "3"}, cmdMaxLen: 3, expected: [][]string{{"SADD", "myset", "1", "2", "3"}}}, 124 | {key: "myset", value: []string{"1", "2", "3"}, cmdMaxLen: 4, expected: [][]string{{"SADD", "myset", "1", "2", "3"}}}, 125 | } 126 | 127 | for _, testCase := range testCases { 128 | res := setToRedisCmds(testCase.key, testCase.value, testCase.cmdMaxLen) 129 | if len(testCase.expected) != len(res) { 130 | t.Errorf("Failed generating redis command from SET for %s %s %d: got %s", testCase.key, testCase.value, testCase.cmdMaxLen, res) 131 | continue 132 | } 133 | 134 | for i := 0; i < len(testCase.expected); i++ { 135 | if len(testCase.expected[i]) != len(res[i]) { 136 | t.Errorf("Failed generating redis command from SET for %s %s %d: got %s", testCase.key, testCase.value, testCase.cmdMaxLen, res) 137 | continue 138 | } 139 | for j := 0; j < len(testCase.expected[i]); j++ { 140 | if res[i][j] != testCase.expected[i][j] { 141 | t.Errorf("Failed generating redis command from SET for %s %s %d: got %s", testCase.key, testCase.value, testCase.cmdMaxLen, res) 142 | } 143 | } 144 | } 145 | } 146 | } 147 | 148 | func TestZsetToRedisCmds(t *testing.T) { 149 | type testCase struct { 150 | key string 151 | value []string 152 | cmdMaxLen int 153 | expected [][]string 154 | } 155 | 156 | testCases := []testCase{ 157 | {key: "todo", value: []string{"task1", "1", "task2", "2", "task3", "3"}, cmdMaxLen: 1, expected: [][]string{{"ZADD", "todo", "1", "task1"}, {"ZADD", "todo", "2", "task2"}, {"ZADD", "todo", "3", "task3"}}}, 158 | {key: "todo", value: []string{"task1", "1", "task2", "2", "task3", "3"}, cmdMaxLen: 2, expected: [][]string{{"ZADD", "todo", "1", "task1", "2", "task2"}, {"ZADD", "todo", "3", "task3"}}}, 159 | {key: "todo", value: []string{"task1", "1", "task2", "2", "task3", "3"}, cmdMaxLen: 3, expected: [][]string{{"ZADD", "todo", "1", "task1", "2", "task2", "3", "task3"}}}, 160 | {key: "todo", value: []string{"task1", "1", "task2", "2", "task3", "3"}, cmdMaxLen: 4, expected: [][]string{{"ZADD", "todo", "1", "task1", "2", "task2", "3", "task3"}}}, 161 | } 162 | 163 | for _, testCase := range testCases { 164 | res := zsetToRedisCmds(testCase.key, testCase.value, testCase.cmdMaxLen) 165 | if len(testCase.expected) != len(res) { 166 | t.Errorf("Failed generating redis command from ZSET for %s %s %d: got %s", testCase.key, testCase.value, testCase.cmdMaxLen, res) 167 | continue 168 | } 169 | for i := 0; i < len(res); i++ { 170 | if len(testCase.expected[i]) != len(res[i]) { 171 | t.Errorf("Failed generating redis command from ZSET for %s %s %d: got %s", testCase.key, testCase.value, testCase.cmdMaxLen, res) 172 | continue 173 | } 174 | for j := 2; j < len(res[i]); j += 2 { 175 | found := false 176 | if res[i][j] == testCase.expected[i][j] && res[i][j+1] == testCase.expected[i][j+1] { 177 | found = true 178 | } 179 | 180 | if found == false { 181 | t.Errorf("Failed generating redis command from ZSet for: %s %s %d, got %s", testCase.key, testCase.value, testCase.cmdMaxLen, res) 182 | } 183 | } 184 | } 185 | } 186 | } 187 | 188 | func TestRESPSerializer(t *testing.T) { 189 | type testCase struct { 190 | command []string 191 | expected string 192 | } 193 | 194 | testCases := []testCase{ 195 | {command: []string{"SET", "key", "value"}, expected: "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$5\r\nvalue\r\n"}, 196 | {command: []string{"SET", "key1", "😈"}, expected: "*3\r\n$3\r\nSET\r\n$4\r\nkey1\r\n$4\r\n😈\r\n"}, 197 | } 198 | 199 | for _, test := range testCases { 200 | s := RESPSerializer(test.command) 201 | if s != test.expected { 202 | t.Errorf("Failed serializing command to redis protocol: expected %s, got %s", test.expected, s) 203 | } 204 | } 205 | } 206 | 207 | func TestRedisCmdSerializer(t *testing.T) { 208 | type testCase struct { 209 | command []string 210 | expected string 211 | } 212 | 213 | testCases := []testCase{ 214 | {command: []string{"HELLO"}, expected: "HELLO"}, 215 | {command: []string{"HGETALL", "key"}, expected: "HGETALL key"}, 216 | {command: []string{"SET", "key name 1", "key value 1"}, expected: "SET \"key name 1\" \"key value 1\""}, 217 | {command: []string{"SET", "key", ""}, expected: "SET key \"\""}, 218 | {command: []string{"HSET", "key1", "key value 1"}, expected: "HSET key1 \"key value 1\""}, 219 | } 220 | 221 | for _, test := range testCases { 222 | s := RedisCmdSerializer(test.command) 223 | if s != test.expected { 224 | t.Errorf("Failed serializing command to redis protocol: expected %s, got %s", test.expected, s) 225 | } 226 | } 227 | } 228 | 229 | func TestParseKeyspaceInfo(t *testing.T) { 230 | keyspaceInfo := `# Keyspace 231 | db0:keys=2,expires=1,avg_ttl=1009946407050 232 | db2:keys=1,expires=0,avg_ttl=0` 233 | 234 | dbIds, err := parseKeyspaceInfo(keyspaceInfo) 235 | if err != nil { 236 | t.Errorf("Failed parsing keyspaceInfo" + err.Error()) 237 | } 238 | if !testEqUint8(dbIds, []uint8{0, 2}) { 239 | t.Errorf("Failed parsing keyspaceInfo: got %v", dbIds) 240 | } 241 | } 242 | 243 | func TestRedisDialOpts(t *testing.T) { 244 | for i, testCase := range []struct { 245 | redisUsername string 246 | redisPassword string 247 | tlsHandler *TlsHandler 248 | db uint8 249 | nDialOpts int 250 | err error 251 | }{ 252 | { 253 | "", 254 | "", 255 | nil, 256 | 1, 257 | 2, 258 | nil, 259 | }, { 260 | "", 261 | "test", 262 | &TlsHandler{}, 263 | 1, 264 | 4, 265 | nil, 266 | }, { 267 | "test", 268 | "test", 269 | &TlsHandler{}, 270 | 1, 271 | 4, 272 | nil, 273 | }, 274 | } { 275 | dOpts, err := redisDialOpts(testCase.redisUsername, testCase.redisPassword, testCase.tlsHandler, &testCase.db) 276 | if err != testCase.err { 277 | t.Errorf("expected error to be %+v, got %+v", testCase.err, err) 278 | } 279 | 280 | // DialOpts are functions and are pretty difficult to compare :( 281 | // "Functions are equal only if they are both nil" 282 | // Therefore we only compare that we are getting the right amount 283 | if len(dOpts) != testCase.nDialOpts { 284 | t.Errorf("test %d, expected %d dialOpts, got %d", i, testCase.nDialOpts, len(dOpts)) 285 | } 286 | 287 | } 288 | } 289 | 290 | type mockRadixClient struct{} 291 | 292 | func (m *mockRadixClient) Do(action radix.Action) error { 293 | return action.Run(nil) 294 | } 295 | func (m *mockRadixClient) Close() error { 296 | return nil 297 | } 298 | 299 | type mockRadixAction struct { 300 | rcv interface{} 301 | cmd string 302 | args []string 303 | } 304 | 305 | func (m *mockRadixAction) Keys() []string { 306 | return nil 307 | } 308 | 309 | func (m *mockRadixAction) Run(conn radix.Conn) error { 310 | if m.cmd == "TYPE" { 311 | key := m.args[0] 312 | // if the key name contains string, the object is of type string 313 | if strings.Contains(key, "string") { 314 | switch v := m.rcv.(type) { 315 | case *string: 316 | *v = "string" 317 | } 318 | } 319 | if strings.Contains(key, "list") { 320 | switch v := m.rcv.(type) { 321 | case *string: 322 | *v = "list" 323 | } 324 | } 325 | if strings.Contains(key, "zset") { 326 | switch v := m.rcv.(type) { 327 | case *string: 328 | *v = "zset" 329 | } 330 | } 331 | 332 | return nil 333 | } 334 | 335 | if m.cmd == "GET" { 336 | switch v := m.rcv.(type) { 337 | case *string: 338 | *v = "stringvalue" 339 | default: 340 | fmt.Printf("DEFAULT") 341 | } 342 | 343 | return nil 344 | } 345 | 346 | if m.cmd == "TTL" { 347 | switch v := m.rcv.(type) { 348 | case *int64: 349 | *v = 5 350 | } 351 | 352 | return nil 353 | } 354 | 355 | if m.cmd == "KEYS" { 356 | switch v := m.rcv.(type) { 357 | case *[]string: 358 | a := []string{"key1", "key2", "key3", "key4", "key5"} 359 | *v = a 360 | } 361 | 362 | return nil 363 | } 364 | 365 | if m.cmd == "LRANGE" { 366 | switch v := m.rcv.(type) { 367 | case *[]string: 368 | a := []string{"listkey1", "listval1", "listkey2", "listval2"} 369 | *v = a 370 | 371 | default: 372 | fmt.Printf("ERROR") 373 | } 374 | return nil 375 | } 376 | 377 | if m.cmd == "ZRANGEBYSCORE" { 378 | switch v := m.rcv.(type) { 379 | case *[]string: 380 | a := []string{"listkey1", "1", "listkey2", "2"} 381 | *v = a 382 | 383 | default: 384 | fmt.Printf("ERROR") 385 | } 386 | return nil 387 | } 388 | 389 | return nil 390 | } 391 | 392 | func (m *mockRadixAction) MarshalRESP(io.Writer) error { 393 | return nil 394 | } 395 | 396 | func (m *mockRadixAction) UnmarshalRESP(reader *bufio.Reader) error { 397 | return nil 398 | } 399 | 400 | func getMockRadixAction(rcv interface{}, cmd string, args ...string) radix.CmdAction { 401 | return &mockRadixAction{ 402 | rcv: rcv, 403 | cmd: cmd, 404 | args: args, 405 | } 406 | } 407 | 408 | func TestDumpKeys(t *testing.T) { 409 | for i, testCase := range []struct { 410 | keys []string 411 | withTTL bool 412 | expectMatch string 413 | }{ 414 | { 415 | []string{"somestring"}, 416 | false, 417 | "^SET somestring stringvalue\n$", 418 | }, 419 | { 420 | []string{"somestring", "somelist"}, 421 | false, 422 | "^SET somestring stringvalue\nRPUSH somelist listkey1 listval1 listkey2 listval2\n$", 423 | }, 424 | { 425 | []string{"somestring"}, 426 | true, 427 | "^SET somestring stringvalue\nEXPIREAT somestring [0-9]+\n$", 428 | }, 429 | { 430 | []string{"somezset"}, 431 | false, 432 | "^ZADD somezset 1 listkey1 2 listkey2\n$", 433 | }, 434 | } { 435 | var m mockRadixClient 436 | var b bytes.Buffer 437 | l := log.New(&b, "", 0) 438 | err := dumpKeys(&m, getMockRadixAction, testCase.keys, testCase.withTTL, 5, l, RedisCmdSerializer) 439 | if err != nil { 440 | t.Errorf("received error %+v", err) 441 | } 442 | match, _ := regexp.MatchString(testCase.expectMatch, b.String()) 443 | if !match { 444 | t.Errorf("test %d: expected to match %s, got %s", i, testCase.expectMatch, b.String()) 445 | } 446 | } 447 | } 448 | 449 | func TestScanKeysLegacy(t *testing.T) { 450 | for i, testCase := range []struct { 451 | n int 452 | bSize int 453 | err error 454 | }{ 455 | { 456 | 5, 457 | 100, 458 | nil, 459 | }, 460 | { 461 | 5, 462 | 4, 463 | nil, 464 | }, 465 | { 466 | 5, 467 | 5, 468 | nil, 469 | }, 470 | } { 471 | var m mockRadixClient 472 | keyBatches := make(chan []string) 473 | 474 | n := 0 475 | done := make(chan bool) 476 | go func() { 477 | for b := range keyBatches { 478 | n += len(b) 479 | } 480 | done <- true 481 | }() 482 | 483 | err := scanKeysLegacy(&m, getMockRadixAction, 0, 100, "*", keyBatches, nil) 484 | close(keyBatches) 485 | <-done 486 | if err != testCase.err { 487 | t.Errorf("test %d, expected err to be %s, got %s", i, testCase.err, err) 488 | } 489 | if n != testCase.n { 490 | t.Errorf("test %d, expected %d keys, got %d", i, testCase.n, n) 491 | } 492 | } 493 | } 494 | -------------------------------------------------------------------------------- /pkg/redisdump/tlsutils.go: -------------------------------------------------------------------------------- 1 | package redisdump 2 | 3 | import ( 4 | "crypto/tls" 5 | "crypto/x509" 6 | "errors" 7 | "fmt" 8 | "io/ioutil" 9 | ) 10 | 11 | type TlsHandler struct { 12 | skipVerify bool 13 | caCertPath string 14 | certPath string 15 | keyPath string 16 | } 17 | 18 | func NewTlsHandler(caCertPath, certPath, keyPath string, insecure bool) (*TlsHandler, error) { 19 | if caCertPath == "" && certPath == "" && keyPath == "" { 20 | if insecure { 21 | return &TlsHandler{ 22 | skipVerify: true, 23 | }, nil 24 | } else { 25 | return nil, errors.New("no cert is set. if skip cert validation to set -insecure option") 26 | } 27 | } 28 | 29 | return &TlsHandler{ 30 | skipVerify: false, 31 | caCertPath: caCertPath, 32 | certPath: certPath, 33 | keyPath: keyPath, 34 | }, nil 35 | } 36 | 37 | func tlsConfig(tlsHandler *TlsHandler) (*tls.Config, error) { 38 | if tlsHandler == nil { 39 | return nil, nil 40 | } 41 | 42 | if tlsHandler.skipVerify { 43 | return &tls.Config{ 44 | InsecureSkipVerify: true, 45 | }, nil 46 | } 47 | 48 | certPool := x509.NewCertPool() 49 | // ca cert is optional 50 | if tlsHandler.caCertPath != "" { 51 | pem, err := ioutil.ReadFile(tlsHandler.caCertPath) 52 | if err != nil { 53 | return nil, fmt.Errorf("connectionpool: unable to open CA certs: %v", err) 54 | } 55 | 56 | if !certPool.AppendCertsFromPEM(pem) { 57 | return nil, fmt.Errorf("connectionpool: failed parsing or CA certs") 58 | } 59 | } 60 | 61 | tlsCfg := &tls.Config{ 62 | Certificates: []tls.Certificate{}, 63 | RootCAs: certPool, 64 | } 65 | 66 | if tlsHandler.certPath != "" && tlsHandler.keyPath != "" { 67 | cert, err := tls.LoadX509KeyPair(tlsHandler.certPath, tlsHandler.keyPath) 68 | if err != nil { 69 | return nil, err 70 | } 71 | tlsCfg.Certificates = append(tlsCfg.Certificates, cert) 72 | } 73 | 74 | return tlsCfg, nil 75 | } 76 | -------------------------------------------------------------------------------- /utils/generator/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "io" 7 | "math/rand" 8 | "os" 9 | "strings" 10 | 11 | "github.com/yannh/redis-dump-go/pkg/redisdump" 12 | ) 13 | 14 | var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") 15 | 16 | func randSeq(n int) string { 17 | b := make([]rune, n) 18 | for i := range b { 19 | b[i] = letters[rand.Intn(len(letters))] 20 | } 21 | return string(b) 22 | } 23 | 24 | func GenerateStrings(w io.Writer, nKeys int, serializer redisdump.Serializer) { 25 | for i := 0; i < nKeys; i++ { 26 | io.WriteString(w, serializer([]string{"SET", randSeq(8), randSeq(16)})+"\n") 27 | } 28 | } 29 | 30 | func GenerateZSET(w io.Writer, nKeys int, serializer redisdump.Serializer) { 31 | zsetKey := randSeq(16) 32 | for i := 0; i < nKeys; i++ { 33 | io.WriteString(w, serializer([]string{"ZADD", zsetKey, "1", randSeq(16)})+"\n") 34 | } 35 | } 36 | 37 | func main() { 38 | nKeys := flag.Int("n", 100, "Number of keys to generate") 39 | sType := flag.String("type", "strings", "zset or strings") 40 | oType := flag.String("output", "resp", "resp or commands") 41 | flag.Parse() 42 | 43 | var s redisdump.Serializer 44 | switch strings.ToLower(*oType) { 45 | case "resp": 46 | s = redisdump.RESPSerializer 47 | 48 | case "commands": 49 | s = redisdump.RedisCmdSerializer 50 | 51 | default: 52 | fmt.Fprintf(os.Stderr, "Unrecognised type %s, should be strings or zset", *sType) 53 | os.Exit(1) 54 | } 55 | 56 | switch strings.ToLower(*sType) { 57 | case "zset": 58 | GenerateZSET(os.Stdout, *nKeys, s) 59 | 60 | case "strings": 61 | GenerateStrings(os.Stdout, *nKeys, s) 62 | 63 | default: 64 | fmt.Fprintf(os.Stderr, "Unrecognised type %s, should be strings or zset", *sType) 65 | os.Exit(1) 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /vendor/github.com/mediocregopher/radix/v3/.gitignore: -------------------------------------------------------------------------------- 1 | test-tmp 2 | -------------------------------------------------------------------------------- /vendor/github.com/mediocregopher/radix/v3/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | Changelog from v3.0.1 and up. Prior changes don't have a changelog. 2 | 3 | # v3.8.1 4 | 5 | * Fixed `NewCluster` not returning an error if it can't connect to any of the 6 | redis instances given. (#319) 7 | 8 | * Fix deadlock in `Cluster` when using `DoSecondary`. (#317) 9 | 10 | * Fix parsing for `CLUSTER SLOTS` command, which changed slightly with redis 11 | 7.0. (#322) 12 | 13 | # v3.8.0 14 | 15 | **New** 16 | 17 | * Add `PoolMaxLifetime` option for `Pool`. (PR #294) 18 | 19 | **Fixes And Improvements** 20 | 21 | * Switched to using `errors` package, rather than `golang.org/x/xerrors`. (PR 22 | #300) 23 | 24 | * Switch to using Github Actions from travis. (PR #300) 25 | 26 | * Fixed IPv6 addresses breaking `Cluster`. (Issue #288) 27 | 28 | # v3.7.1 29 | 30 | * Release the RLock in `Sentinel`'s `Do`. (PR #272) 31 | 32 | # v3.7.0 33 | 34 | **New** 35 | 36 | * Add `FallbackToUndelivered` option to `StreamReaderOpts`. (PR #244) 37 | 38 | * Add `ClusterOnInitAllowUnavailable`. (PR #247) 39 | 40 | **Fixes and Improvements** 41 | 42 | * Fix reading a RESP error into a `*interface{}` panicking. (PR #240) 43 | 44 | # v3.6.0 45 | 46 | **New** 47 | 48 | * Add `Tuple` type, which makes unmarshaling `EXEC` and `EVAL` results easier. 49 | 50 | * Add `PersistentPubSubErrCh`, so that asynchronous errors within 51 | `PersistentPubSub` can be exposed to the user. 52 | 53 | * Add `FlatCmd` method to `EvalScript`. 54 | 55 | * Add `StreamEntries` unmarshaler to make unmarshaling `XREAD` and `XREADGROUP` 56 | results easier. 57 | 58 | **Fixes and Improvements** 59 | 60 | * Fix wrapped errors not being handled correctly by `Cluster`. (PR #229) 61 | 62 | * Fix `PersistentPubSub` deadlocking when a method was called after `Close`. 63 | (PR #230) 64 | 65 | * Fix `StreamReader` not correctly handling the case of reading from multiple 66 | streams when one is empty. (PR #224) 67 | 68 | # v3.5.2 69 | 70 | * Improve docs for `WithConn` and `PubSubConn`. 71 | 72 | * Fix `PubSubConn`'s `Subscribe` and `PSubscribe` methods potentially mutating 73 | the passed in array of strings. (Issue #217) 74 | 75 | * Fix `StreamEntry` not properly handling unmarshaling an entry with a nil 76 | fields array. (PR #218) 77 | 78 | # v3.5.1 79 | 80 | * Add `EmptyArray` field to `MaybeNil`. (PR #211) 81 | 82 | * Fix `Cluster` not properly re-initializing itself when the cluster goes 83 | completely down. (PR #209) 84 | 85 | # v3.5.0 86 | 87 | Huge thank you to @nussjustin for all the work he's been doing on this project, 88 | this release is almost entirely his doing. 89 | 90 | **New** 91 | 92 | * Add support for `TYPE` option to `Scanner`. (PR #187) 93 | 94 | * Add `Sentinel.DoSecondary` method. (PR #197) 95 | 96 | * Add `DialAuthUser`, to support username+password authentication. (PR #195) 97 | 98 | * Add `Cluster.DoSecondary` method. (PR #198) 99 | 100 | **Fixes and Improvements** 101 | 102 | * Fix pipeline behavior when a decode error is encountered. (PR #180) 103 | 104 | * Fix `Reason` in `PoolConnClosed` in the case of the Pool being full. (PR #186) 105 | 106 | * Refactor `PersistentPubSub` to be cleaner, fixing a panic in the process. 107 | (PR #185, Issue #184) 108 | 109 | * Fix marshaling of nil pointers in structs. (PR #192) 110 | 111 | * Wrap errors which get returned from pipeline decoding. (PR #191) 112 | 113 | * Simplify and improve pipeline error handling. (PR #190) 114 | 115 | * Dodge a `[]byte` allocation when in `StreamReader.Next`. (PR #196) 116 | 117 | * Remove excess lock in Pool. (PR #202) 118 | 119 | 120 | # v3.4.2 121 | 122 | * Fix alignment for atomic values in structs (PR #171) 123 | 124 | * Fix closing of sentinel instances while updating state (PR #173) 125 | 126 | # v3.4.1 127 | 128 | * Update xerrors package (PR #165) 129 | 130 | * Have cluster Pools be closed outside of lock, to reduce contention during 131 | failover events (PR #168) 132 | 133 | # v3.4.0 134 | 135 | * Add `PersistentPubSubWithOpts` function, deprecating the old 136 | `PersistentPubSub` function. (PR #156) 137 | 138 | * Make decode errors a bit more helpful. (PR #157) 139 | 140 | * Refactor Pool to rely on its inner lock less, simplifying the code quite a bit 141 | and hopefully speeding up certain actions. (PR #160) 142 | 143 | * Various documentation updates. (PR #138, Issue #162) 144 | 145 | # v3.3.2 146 | 147 | * Have `resp2.Error` match with a `resp.ErrDiscarded` when using `errors.As`. 148 | Fixes EVAL, among probably other problems. (PR #152) 149 | 150 | # v3.3.1 151 | 152 | * Use `xerrors` internally. (PR #113) 153 | 154 | * Handle unmarshal errors better. Previously an unmarshaling error could leave 155 | the connection in an inconsistent state, because the full message wouldn't get 156 | completely read off the wire. After a lot of work, this has been fixed. (PR 157 | #127, #139, #145) 158 | 159 | * Handle CLUSTERDOWN errors better. Upon seeing a CLUSTERDOWN, all commands will 160 | be delayed by a small amount of time. The delay will be stopped as soon as the 161 | first non-CLUSTERDOWN result is seen from the Cluster. The idea is that, if a 162 | failover happens, commands which are incoming will be paused long enough for 163 | the cluster to regain it sanity, thus minimizing the number of failed commands 164 | during the failover. (PR #137) 165 | 166 | * Fix cluster redirect tracing. (PR #142) 167 | 168 | # v3.3.0 169 | 170 | **New** 171 | 172 | * Add `trace` package with tracing callbacks for `Pool` and `Cluster`. 173 | (`Sentinel` coming soon!) (PR #100, PR #108, PR #111) 174 | 175 | * Add `SentinelAddrs` method to `Sentinel` (PR #118) 176 | 177 | * Add `DialUseTLS` option. (PR #104) 178 | 179 | **Fixes and Improvements** 180 | 181 | * Fix `NewSentinel` not handling URL AUTH parameters correctly (PR #120) 182 | 183 | * Change `DefaultClientFunc`'s pool size from 20 to 4, on account of pipelining 184 | being enabled by default. (Issue #107) 185 | 186 | * Reuse `reflect.Value` instances when unmarshaling into certain map types. (PR 187 | #96). 188 | 189 | * Fix a panic in `FlatCmd`. (PR #97) 190 | 191 | * Reuse field name `string` when unmarshaling into a struct. (PR #95) 192 | 193 | * Reduce PubSub allocations significantly. (PR #92 + Issue #91) 194 | 195 | * Reduce allocations in `Conn`. (PR #84) 196 | 197 | # v3.2.3 198 | 199 | * Optimize Scanner implementation. 200 | 201 | * Fix bug with using types which implement resp.LenReader, encoding.TextMarshaler, and encoding.BinaryMarshaler. The encoder wasn't properly taking into account the interfaces when counting the number of elements in the message. 202 | 203 | # v3.2.2 204 | 205 | * Give Pool an ErrCh so that errors which happen internally may be reported to 206 | the user, if they care. 207 | 208 | * Fix `PubSubConn`'s deadlock problems during Unsubscribe commands. 209 | 210 | * Small speed optimizations in network protocol code. 211 | 212 | # v3.2.1 213 | 214 | * Move benchmarks to a submodule in order to clean up `go.mod` a bit. 215 | 216 | # v3.2.0 217 | 218 | * Add `StreamReader` type to make working with redis' new [Stream][stream] 219 | functionality easier. 220 | 221 | * Make `Sentinel` properly respond to `Client` method calls. Previously it 222 | always created a new `Client` instance when a secondary was requested, now it 223 | keeps track of instances internally. 224 | 225 | * Make default `Dial` call have a timeout for connect/read/write. At the same 226 | time, normalize default timeout values across the project. 227 | 228 | * Implicitly pipeline commands in the default Pool implementation whenever 229 | possible. This gives a throughput increase of nearly 5x for a normal parallel 230 | workload. 231 | 232 | [stream]: https://redis.io/topics/streams-intro 233 | 234 | # v3.1.0 235 | 236 | * Add support for marshaling/unmarshaling structs. 237 | 238 | # v3.0.1 239 | 240 | * Make `Stub` support `Pipeline` properly. 241 | -------------------------------------------------------------------------------- /vendor/github.com/mediocregopher/radix/v3/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # The rulez 2 | 3 | There's a couple. They're not even really rules, more just telling you what you 4 | can expect. 5 | 6 | * Issues are ALWAYS welcome, whether or not you think it's a dumb question or 7 | it's been asked before. I make a very real attempt to respond to all issues in 8 | 24 hours. You can email me directly if I don't make this deadline. 9 | 10 | * Please always preface a pull request by making an issue. It can save you some 11 | time if it turns out that something you consider an issue is actually intended 12 | behavior, and saves me the difficult task of telling you that I'm going to let 13 | the work you put in go to waste. 14 | 15 | * The API never breaks. All PRs which aren't backwards compatible will not be 16 | accepted. Similarly, if I do commit something which isn't backwards compatible 17 | with an older behavior please submit an issue ASAP so I can fix it. 18 | -------------------------------------------------------------------------------- /vendor/github.com/mediocregopher/radix/v3/LICENSE.txt: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 17 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 19 | IN THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /vendor/github.com/mediocregopher/radix/v3/README.md: -------------------------------------------------------------------------------- 1 | # Radix 2 | 3 | Radix is a full-featured [Redis][redis] client for Go. See the reference links 4 | below for documentation and general usage examples. 5 | 6 | **[v3 Documentation](https://pkg.go.dev/github.com/mediocregopher/radix/v3#section-documentation)** 7 | 8 | **[v4 Documentation](https://pkg.go.dev/github.com/mediocregopher/radix/v4#section-documentation)** 9 | 10 | **[Discussion/Support Chat](https://matrix.to/#/#radix:waffle.farm)** 11 | 12 | Please open an issue, or start a discussion in the chat, before opening a pull request! 13 | 14 | ## Features 15 | 16 | * Standard print-like API which supports **all current and future redis commands**. 17 | 18 | * Connection pool which uses **connection sharing** to minimize system calls. 19 | 20 | * Full support for [Sentinel][sentinel] and [Cluster][cluster]. 21 | 22 | * Helpers for [EVAL][eval], [SCAN][scan], [Streams][stream], and [Pipelining][pipelining]. 23 | 24 | * Support for [pubsub][pubsub], as well as persistent pubsub wherein if a 25 | connection is lost a new one transparently replaces it. 26 | 27 | * API design allows for custom implementations of nearly anything. 28 | 29 | ## Versions 30 | 31 | There are two major versions of radix being supported: 32 | 33 | * v3 is the more mature version, but lacks the polished API of v4. v3 is only accepting bug fixes at this point. 34 | 35 | * v4 has feature parity with v3 and more! The biggest selling points are: 36 | 37 | * More polished API. 38 | * Full [RESP3][resp3] support. 39 | * Support for [context.Context][context] on all blocking operations. 40 | * Connection sharing (called "implicit pipelining" in v3) now works with Pipeline and EvalScript. 41 | 42 | View the [CHANGELOG][v4changelog] for more details. 43 | 44 | [v4changelog]: https://github.com/mediocregopher/radix/blob/v4/CHANGELOG.md 45 | 46 | ## Installation and Usage 47 | 48 | Radix always aims to support the most recent two versions of go, and is likely 49 | to support others prior to those two. 50 | 51 | [Module][module]-aware mode: 52 | 53 | go get github.com/mediocregopher/radix/v3 54 | // import github.com/mediocregopher/radix/v3 55 | 56 | go get github.com/mediocregopher/radix/v4 57 | // import github.com/mediocregopher/radix/v4 58 | 59 | ## Testing 60 | 61 | # requires a redis server running on 127.0.0.1:6379 62 | go test github.com/mediocregopher/radix/v3 63 | go test github.com/mediocregopher/radix/v4 64 | 65 | ## Benchmarks 66 | 67 | Benchmarks were run in as close to a "real" environment as possible. Two GCE 68 | instances were booted up, one hosting the redis server with 2vCPUs, the other 69 | running the benchmarks (found in the `bench` directory) with 16vCPUs. 70 | 71 | The benchmarks test a variety of situations against many different redis 72 | drivers, and the results are very large. You can view them [here][bench 73 | results]. Below are some highlights (I've tried to be fair here): 74 | 75 | For a typical workload, which is lots of concurrent commands with relatively 76 | small amounts of data, radix outperforms all tested drivers except 77 | [redispipe][redispipe]: 78 | 79 | ``` 80 | BenchmarkDrivers/parallel/no_pipeline/small_kv/radixv4-64 17815254 2917 ns/op 199 B/op 6 allocs/op 81 | BenchmarkDrivers/parallel/no_pipeline/small_kv/radixv3-64 16688293 3120 ns/op 109 B/op 4 allocs/op 82 | BenchmarkDrivers/parallel/no_pipeline/small_kv/redigo-64 3504063 15092 ns/op 168 B/op 9 allocs/op 83 | BenchmarkDrivers/parallel/no_pipeline/small_kv/redispipe_pause150us-64 31668576 1680 ns/op 217 B/op 11 allocs/op 84 | BenchmarkDrivers/parallel/no_pipeline/small_kv/redispipe_pause0-64 31149280 1685 ns/op 218 B/op 11 allocs/op 85 | BenchmarkDrivers/parallel/no_pipeline/small_kv/go-redis-64 3768988 14409 ns/op 411 B/op 13 allocs/op 86 | ``` 87 | 88 | The story is similar for pipelining commands concurrently (radixv3 doesn't do as 89 | well here, because it doesn't support connection sharing for pipeline commands): 90 | 91 | ``` 92 | BenchmarkDrivers/parallel/pipeline/small_kv/radixv4-64 24720337 2245 ns/op 508 B/op 13 allocs/op 93 | BenchmarkDrivers/parallel/pipeline/small_kv/radixv3-64 6921868 7757 ns/op 165 B/op 7 allocs/op 94 | BenchmarkDrivers/parallel/pipeline/small_kv/redigo-64 6738849 8080 ns/op 170 B/op 9 allocs/op 95 | BenchmarkDrivers/parallel/pipeline/small_kv/redispipe_pause150us-64 44479539 1148 ns/op 316 B/op 12 allocs/op 96 | BenchmarkDrivers/parallel/pipeline/small_kv/redispipe_pause0-64 45290868 1126 ns/op 315 B/op 12 allocs/op 97 | BenchmarkDrivers/parallel/pipeline/small_kv/go-redis-64 6740984 7903 ns/op 475 B/op 15 allocs/op 98 | ``` 99 | 100 | For larger amounts of data being transferred the differences become less 101 | noticeable, but both radix versions come out on top: 102 | 103 | ``` 104 | BenchmarkDrivers/parallel/no_pipeline/large_kv/radixv4-64 2395707 22766 ns/op 12553 B/op 4 allocs/op 105 | BenchmarkDrivers/parallel/no_pipeline/large_kv/radixv3-64 3150398 17087 ns/op 12745 B/op 4 allocs/op 106 | BenchmarkDrivers/parallel/no_pipeline/large_kv/redigo-64 1593054 34038 ns/op 24742 B/op 9 allocs/op 107 | BenchmarkDrivers/parallel/no_pipeline/large_kv/redispipe_pause150us-64 2105118 25085 ns/op 16962 B/op 11 allocs/op 108 | BenchmarkDrivers/parallel/no_pipeline/large_kv/redispipe_pause0-64 2354427 24280 ns/op 17295 B/op 11 allocs/op 109 | BenchmarkDrivers/parallel/no_pipeline/large_kv/go-redis-64 1519354 35745 ns/op 14033 B/op 14 allocs/op 110 | ``` 111 | 112 | All results above show the high-concurrency results (`-cpu 64`). Concurrencies 113 | of 16 and 32 are also included in the results, but didn't show anything 114 | different. 115 | 116 | For serial workloads, which involve a single connection performing commands 117 | one after the other, radix is either as fast or within a couple % of the other 118 | drivers tested. This use-case is much less common, and so when tradeoffs have 119 | been made between parallel and serial performance radix has general leaned 120 | towards parallel. 121 | 122 | Serial non-pipelined: 123 | 124 | ``` 125 | BenchmarkDrivers/serial/no_pipeline/small_kv/radixv4-16 346915 161493 ns/op 67 B/op 4 allocs/op 126 | BenchmarkDrivers/serial/no_pipeline/small_kv/radixv3-16 428313 138011 ns/op 67 B/op 4 allocs/op 127 | BenchmarkDrivers/serial/no_pipeline/small_kv/redigo-16 416103 134438 ns/op 134 B/op 8 allocs/op 128 | BenchmarkDrivers/serial/no_pipeline/small_kv/redispipe_pause150us-16 86734 635637 ns/op 217 B/op 11 allocs/op 129 | BenchmarkDrivers/serial/no_pipeline/small_kv/redispipe_pause0-16 340320 158732 ns/op 216 B/op 11 allocs/op 130 | BenchmarkDrivers/serial/no_pipeline/small_kv/go-redis-16 429703 138854 ns/op 408 B/op 13 allocs/op 131 | ``` 132 | 133 | Serial pipelined: 134 | 135 | ``` 136 | BenchmarkDrivers/serial/pipeline/small_kv/radixv4-16 624417 82336 ns/op 83 B/op 5 allocs/op 137 | BenchmarkDrivers/serial/pipeline/small_kv/radixv3-16 784947 68540 ns/op 163 B/op 7 allocs/op 138 | BenchmarkDrivers/serial/pipeline/small_kv/redigo-16 770983 69976 ns/op 134 B/op 8 allocs/op 139 | BenchmarkDrivers/serial/pipeline/small_kv/redispipe_pause150us-16 175623 320512 ns/op 312 B/op 12 allocs/op 140 | BenchmarkDrivers/serial/pipeline/small_kv/redispipe_pause0-16 642673 82225 ns/op 312 B/op 12 allocs/op 141 | BenchmarkDrivers/serial/pipeline/small_kv/go-redis-16 787364 72240 ns/op 472 B/op 15 allocs/op 142 | ``` 143 | 144 | Serial large values: 145 | 146 | ``` 147 | BenchmarkDrivers/serial/no_pipeline/large_kv/radixv4-16 253586 217600 ns/op 12521 B/op 4 allocs/op 148 | BenchmarkDrivers/serial/no_pipeline/large_kv/radixv3-16 317356 179608 ns/op 12717 B/op 4 allocs/op 149 | BenchmarkDrivers/serial/no_pipeline/large_kv/redigo-16 244226 231179 ns/op 24704 B/op 8 allocs/op 150 | BenchmarkDrivers/serial/no_pipeline/large_kv/redispipe_pause150us-16 80174 674066 ns/op 13780 B/op 11 allocs/op 151 | BenchmarkDrivers/serial/no_pipeline/large_kv/redispipe_pause0-16 251810 209890 ns/op 13778 B/op 11 allocs/op 152 | BenchmarkDrivers/serial/no_pipeline/large_kv/go-redis-16 236379 225677 ns/op 13976 B/op 14 allocs/op 153 | ``` 154 | 155 | [bench results]: https://github.com/mediocregopher/radix/blob/v4/bench/bench_results.txt 156 | 157 | ## Copyright and licensing 158 | 159 | Unless otherwise noted, the source files are distributed under the *MIT License* 160 | found in the LICENSE.txt file. 161 | 162 | [redis]: http://redis.io 163 | [eval]: https://redis.io/commands/eval 164 | [scan]: https://redis.io/commands/scan 165 | [stream]: https://redis.io/topics/streams-intro 166 | [pipelining]: https://redis.io/topics/pipelining 167 | [pubsub]: https://redis.io/topics/pubsub 168 | [sentinel]: http://redis.io/topics/sentinel 169 | [cluster]: http://redis.io/topics/cluster-spec 170 | [module]: https://github.com/golang/go/wiki/Modules 171 | [redispipe]: https://github.com/joomcode/redispipe 172 | [context]: https://pkg.go.dev/context 173 | [resp3]: https://github.com/antirez/RESP3/blob/master/spec.md 174 | -------------------------------------------------------------------------------- /vendor/github.com/mediocregopher/radix/v3/cluster_crc16.go: -------------------------------------------------------------------------------- 1 | package radix 2 | 3 | import ( 4 | "bytes" 5 | ) 6 | 7 | var tab = [256]uint16{ 8 | 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, 9 | 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, 10 | 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, 11 | 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, 12 | 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, 13 | 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, 14 | 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, 15 | 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, 16 | 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, 17 | 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, 18 | 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, 19 | 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, 20 | 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, 21 | 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, 22 | 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, 23 | 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, 24 | 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, 25 | 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, 26 | 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, 27 | 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, 28 | 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, 29 | 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, 30 | 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, 31 | 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, 32 | 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, 33 | 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, 34 | 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, 35 | 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, 36 | 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, 37 | 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, 38 | 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, 39 | 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, 40 | } 41 | 42 | const numSlots = 16384 43 | 44 | // CRC16 returns checksum for a given set of bytes based on the crc algorithm 45 | // defined for hashing redis keys in a cluster setup. 46 | func CRC16(buf []byte) uint16 { 47 | crc := uint16(0) 48 | for _, b := range buf { 49 | index := byte(crc>>8) ^ b 50 | crc = (crc << 8) ^ tab[index] 51 | } 52 | return crc 53 | } 54 | 55 | // ClusterSlot returns the slot number the key belongs to in any redis cluster, 56 | // taking into account key hash tags. 57 | func ClusterSlot(key []byte) uint16 { 58 | if start := bytes.Index(key, []byte("{")); start >= 0 { 59 | if end := bytes.Index(key[start+1:], []byte("}")); end > 0 { 60 | key = key[start+1 : start+1+end] 61 | } 62 | } 63 | return CRC16(key) % numSlots 64 | } 65 | -------------------------------------------------------------------------------- /vendor/github.com/mediocregopher/radix/v3/cluster_scanner.go: -------------------------------------------------------------------------------- 1 | package radix 2 | 3 | import ( 4 | "strings" 5 | ) 6 | 7 | type clusterScanner struct { 8 | cluster *Cluster 9 | opts ScanOpts 10 | 11 | addrs []string 12 | currScanner Scanner 13 | lastErr error 14 | } 15 | 16 | // NewScanner will return a Scanner which will scan over every node in the 17 | // cluster. This will panic if the ScanOpt's Command isn't "SCAN". For scanning 18 | // operations other than "SCAN" (e.g. "HSCAN", "ZSCAN") use the normal 19 | // NewScanner function. 20 | // 21 | // If the cluster topology changes during a scan the Scanner may or may not 22 | // error out due to it, depending on the nature of the change. 23 | func (c *Cluster) NewScanner(o ScanOpts) Scanner { 24 | if strings.ToUpper(o.Command) != "SCAN" { 25 | panic("Cluster.NewScanner can only perform SCAN operations") 26 | } 27 | 28 | var addrs []string 29 | for _, node := range c.Topo().Primaries() { 30 | addrs = append(addrs, node.Addr) 31 | } 32 | 33 | cs := &clusterScanner{ 34 | cluster: c, 35 | opts: o, 36 | addrs: addrs, 37 | } 38 | cs.nextScanner() 39 | 40 | return cs 41 | } 42 | 43 | func (cs *clusterScanner) closeCurr() { 44 | if cs.currScanner != nil { 45 | if err := cs.currScanner.Close(); err != nil && cs.lastErr == nil { 46 | cs.lastErr = err 47 | } 48 | cs.currScanner = nil 49 | } 50 | } 51 | 52 | func (cs *clusterScanner) scannerForAddr(addr string) bool { 53 | client, _ := cs.cluster.rpool(addr) 54 | if client != nil { 55 | cs.closeCurr() 56 | cs.currScanner = NewScanner(client, cs.opts) 57 | return true 58 | } 59 | return false 60 | } 61 | 62 | func (cs *clusterScanner) nextScanner() { 63 | for { 64 | if len(cs.addrs) == 0 { 65 | cs.closeCurr() 66 | return 67 | } 68 | addr := cs.addrs[0] 69 | cs.addrs = cs.addrs[1:] 70 | if cs.scannerForAddr(addr) { 71 | return 72 | } 73 | } 74 | } 75 | 76 | func (cs *clusterScanner) Next(res *string) bool { 77 | for { 78 | if cs.currScanner == nil { 79 | return false 80 | } else if out := cs.currScanner.Next(res); out { 81 | return true 82 | } 83 | cs.nextScanner() 84 | } 85 | } 86 | 87 | func (cs *clusterScanner) Close() error { 88 | cs.closeCurr() 89 | return cs.lastErr 90 | } 91 | -------------------------------------------------------------------------------- /vendor/github.com/mediocregopher/radix/v3/cluster_topo.go: -------------------------------------------------------------------------------- 1 | package radix 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "io" 7 | "net" 8 | "sort" 9 | "strconv" 10 | 11 | "github.com/mediocregopher/radix/v3/resp" 12 | "github.com/mediocregopher/radix/v3/resp/resp2" 13 | ) 14 | 15 | // ClusterNode describes a single node in the cluster at a moment in time. 16 | type ClusterNode struct { 17 | // older versions of redis might not actually send back the id, so it may be 18 | // blank 19 | Addr, ID string 20 | // start is inclusive, end is exclusive 21 | Slots [][2]uint16 22 | // address and id this node is the secondary of, if it's a secondary 23 | SecondaryOfAddr, SecondaryOfID string 24 | } 25 | 26 | // ClusterTopo describes the cluster topology at a given moment. It will be 27 | // sorted first by slot number of each node and then by secondary status, so 28 | // primaries will come before secondaries. 29 | type ClusterTopo []ClusterNode 30 | 31 | // MarshalRESP implements the resp.Marshaler interface, and will marshal the 32 | // ClusterTopo in the same format as the return from CLUSTER SLOTS. 33 | func (tt ClusterTopo) MarshalRESP(w io.Writer) error { 34 | m := map[[2]uint16]topoSlotSet{} 35 | for _, t := range tt { 36 | for _, slots := range t.Slots { 37 | tss := m[slots] 38 | tss.slots = slots 39 | tss.nodes = append(tss.nodes, t) 40 | m[slots] = tss 41 | } 42 | } 43 | 44 | // we sort the topoSlotSets by their slot number so that the order is 45 | // deterministic, mostly so tests pass consistently, I'm not sure if actual 46 | // redis has any contract on the order 47 | allTSS := make([]topoSlotSet, 0, len(m)) 48 | for _, tss := range m { 49 | allTSS = append(allTSS, tss) 50 | } 51 | sort.Slice(allTSS, func(i, j int) bool { 52 | return allTSS[i].slots[0] < allTSS[j].slots[0] 53 | }) 54 | 55 | if err := (resp2.ArrayHeader{N: len(allTSS)}).MarshalRESP(w); err != nil { 56 | return err 57 | } 58 | for _, tss := range allTSS { 59 | if err := tss.MarshalRESP(w); err != nil { 60 | return err 61 | } 62 | } 63 | return nil 64 | } 65 | 66 | // UnmarshalRESP implements the resp.Unmarshaler interface, but only supports 67 | // unmarshaling the return from CLUSTER SLOTS. The unmarshaled nodes will be 68 | // sorted before they are returned. 69 | func (tt *ClusterTopo) UnmarshalRESP(br *bufio.Reader) error { 70 | var arrHead resp2.ArrayHeader 71 | if err := arrHead.UnmarshalRESP(br); err != nil { 72 | return err 73 | } 74 | slotSets := make([]topoSlotSet, arrHead.N) 75 | for i := range slotSets { 76 | if err := (&(slotSets[i])).UnmarshalRESP(br); err != nil { 77 | return err 78 | } 79 | } 80 | 81 | nodeAddrM := map[string]ClusterNode{} 82 | for _, tss := range slotSets { 83 | for _, n := range tss.nodes { 84 | if existingN, ok := nodeAddrM[n.Addr]; ok { 85 | existingN.Slots = append(existingN.Slots, n.Slots...) 86 | nodeAddrM[n.Addr] = existingN 87 | } else { 88 | nodeAddrM[n.Addr] = n 89 | } 90 | } 91 | } 92 | 93 | for _, n := range nodeAddrM { 94 | *tt = append(*tt, n) 95 | } 96 | tt.sort() 97 | return nil 98 | } 99 | 100 | func (tt ClusterTopo) sort() { 101 | // first go through each node and make sure the individual slot sets are 102 | // sorted 103 | for _, node := range tt { 104 | sort.Slice(node.Slots, func(i, j int) bool { 105 | return node.Slots[i][0] < node.Slots[j][0] 106 | }) 107 | } 108 | 109 | sort.Slice(tt, func(i, j int) bool { 110 | if tt[i].Slots[0] != tt[j].Slots[0] { 111 | return tt[i].Slots[0][0] < tt[j].Slots[0][0] 112 | } 113 | // we want secondaries to come after, which actually means they should 114 | // be sorted as greater 115 | return tt[i].SecondaryOfAddr == "" 116 | }) 117 | 118 | } 119 | 120 | // Map returns the topology as a mapping of node address to its ClusterNode. 121 | func (tt ClusterTopo) Map() map[string]ClusterNode { 122 | m := make(map[string]ClusterNode, len(tt)) 123 | for _, t := range tt { 124 | m[t.Addr] = t 125 | } 126 | return m 127 | } 128 | 129 | // Primaries returns a ClusterTopo instance containing only the primary nodes 130 | // from the ClusterTopo being called on. 131 | func (tt ClusterTopo) Primaries() ClusterTopo { 132 | mtt := make(ClusterTopo, 0, len(tt)) 133 | for _, node := range tt { 134 | if node.SecondaryOfAddr == "" { 135 | mtt = append(mtt, node) 136 | } 137 | } 138 | return mtt 139 | } 140 | 141 | // we only use this type during unmarshalling, the topo Unmarshal method will 142 | // convert these into ClusterNodes. 143 | type topoSlotSet struct { 144 | slots [2]uint16 145 | nodes []ClusterNode 146 | } 147 | 148 | func (tss topoSlotSet) MarshalRESP(w io.Writer) error { 149 | var err error 150 | marshal := func(m resp.Marshaler) { 151 | if err == nil { 152 | err = m.MarshalRESP(w) 153 | } 154 | } 155 | 156 | marshal(resp2.ArrayHeader{N: 2 + len(tss.nodes)}) 157 | marshal(resp2.Any{I: tss.slots[0]}) 158 | marshal(resp2.Any{I: tss.slots[1] - 1}) 159 | 160 | for _, n := range tss.nodes { 161 | 162 | host, portStr, _ := net.SplitHostPort(n.Addr) 163 | 164 | port, err := strconv.Atoi(portStr) 165 | if err != nil { 166 | return err 167 | } 168 | 169 | node := []interface{}{host, port} 170 | if n.ID != "" { 171 | node = append(node, n.ID) 172 | } 173 | marshal(resp2.Any{I: node}) 174 | } 175 | 176 | return err 177 | } 178 | 179 | func (tss *topoSlotSet) UnmarshalRESP(br *bufio.Reader) error { 180 | var arrHead resp2.ArrayHeader 181 | if err := arrHead.UnmarshalRESP(br); err != nil { 182 | return err 183 | } 184 | 185 | // first two array elements are the slot numbers. We increment the second to 186 | // preserve inclusive start/exclusive end, which redis doesn't 187 | for i := range tss.slots { 188 | if err := (resp2.Any{I: &tss.slots[i]}).UnmarshalRESP(br); err != nil { 189 | return err 190 | } 191 | } 192 | tss.slots[1]++ 193 | arrHead.N -= len(tss.slots) 194 | 195 | var primaryNode ClusterNode 196 | for i := 0; i < arrHead.N; i++ { 197 | 198 | var nodeArrHead resp2.ArrayHeader 199 | if err := nodeArrHead.UnmarshalRESP(br); err != nil { 200 | return err 201 | } else if nodeArrHead.N < 2 { 202 | return fmt.Errorf("expected at least 2 array elements, got %d", nodeArrHead.N) 203 | } 204 | 205 | var ip resp2.BulkString 206 | if err := ip.UnmarshalRESP(br); err != nil { 207 | return err 208 | } 209 | 210 | var port resp2.Int 211 | if err := port.UnmarshalRESP(br); err != nil { 212 | return err 213 | } 214 | 215 | nodeArrHead.N -= 2 216 | 217 | var id resp2.BulkString 218 | if nodeArrHead.N > 0 { 219 | if err := id.UnmarshalRESP(br); err != nil { 220 | return err 221 | } 222 | nodeArrHead.N-- 223 | } 224 | 225 | // discard anything after 226 | for i := 0; i < nodeArrHead.N; i++ { 227 | if err := (resp2.Any{}).UnmarshalRESP(br); err != nil { 228 | return err 229 | } 230 | } 231 | 232 | node := ClusterNode{ 233 | Addr: net.JoinHostPort(ip.S, strconv.FormatInt(port.I, 10)), 234 | ID: id.S, 235 | Slots: [][2]uint16{tss.slots}, 236 | } 237 | 238 | if i == 0 { 239 | primaryNode = node 240 | } else { 241 | node.SecondaryOfAddr = primaryNode.Addr 242 | node.SecondaryOfID = primaryNode.ID 243 | } 244 | 245 | tss.nodes = append(tss.nodes, node) 246 | } 247 | 248 | return nil 249 | } 250 | -------------------------------------------------------------------------------- /vendor/github.com/mediocregopher/radix/v3/conn.go: -------------------------------------------------------------------------------- 1 | package radix 2 | 3 | import ( 4 | "bufio" 5 | "crypto/tls" 6 | "net" 7 | "net/url" 8 | "strconv" 9 | "strings" 10 | "time" 11 | 12 | "github.com/mediocregopher/radix/v3/resp" 13 | ) 14 | 15 | // Conn is a Client wrapping a single network connection which synchronously 16 | // reads/writes data using the redis resp protocol. 17 | // 18 | // A Conn can be used directly as a Client, but in general you probably want to 19 | // use a *Pool instead. 20 | type Conn interface { 21 | // The Do method of a Conn is _not_ expected to be thread-safe with the 22 | // other methods of Conn, and merely calls the Action's Run method with 23 | // itself as the argument. 24 | Client 25 | 26 | // Encode and Decode may be called at the same time by two different 27 | // go-routines, but each should only be called once at a time (i.e. two 28 | // routines shouldn't call Encode at the same time, same with Decode). 29 | // 30 | // Encode and Decode should _not_ be called at the same time as Do. 31 | // 32 | // If either Encode or Decode encounter a net.Error the Conn will be 33 | // automatically closed. 34 | // 35 | // Encode is expected to encode an entire resp message, not a partial one. 36 | // In other words, when sending commands to redis, Encode should only be 37 | // called once per command. Similarly, Decode is expected to decode an 38 | // entire resp response. 39 | Encode(resp.Marshaler) error 40 | Decode(resp.Unmarshaler) error 41 | 42 | // Returns the underlying network connection, as-is. Read, Write, and Close 43 | // should not be called on the returned Conn. 44 | NetConn() net.Conn 45 | } 46 | 47 | // ConnFunc is a function which returns an initialized, ready-to-be-used Conn. 48 | // Functions like NewPool or NewCluster take in a ConnFunc in order to allow for 49 | // things like calls to AUTH on each new connection, setting timeouts, custom 50 | // Conn implementations, etc... See the package docs for more details. 51 | type ConnFunc func(network, addr string) (Conn, error) 52 | 53 | // DefaultConnFunc is a ConnFunc which will return a Conn for a redis instance 54 | // using sane defaults. 55 | var DefaultConnFunc = func(network, addr string) (Conn, error) { 56 | return Dial(network, addr) 57 | } 58 | 59 | func wrapDefaultConnFunc(addr string) ConnFunc { 60 | _, opts := parseRedisURL(addr) 61 | return func(network, addr string) (Conn, error) { 62 | return Dial(network, addr, opts...) 63 | } 64 | } 65 | 66 | type connWrap struct { 67 | net.Conn 68 | brw *bufio.ReadWriter 69 | } 70 | 71 | // NewConn takes an existing net.Conn and wraps it to support the Conn interface 72 | // of this package. The Read and Write methods on the original net.Conn should 73 | // not be used after calling this method. 74 | func NewConn(conn net.Conn) Conn { 75 | return &connWrap{ 76 | Conn: conn, 77 | brw: bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)), 78 | } 79 | } 80 | 81 | func (cw *connWrap) Do(a Action) error { 82 | return a.Run(cw) 83 | } 84 | 85 | func (cw *connWrap) Encode(m resp.Marshaler) error { 86 | if err := m.MarshalRESP(cw.brw); err != nil { 87 | return err 88 | } 89 | return cw.brw.Flush() 90 | } 91 | 92 | func (cw *connWrap) Decode(u resp.Unmarshaler) error { 93 | return u.UnmarshalRESP(cw.brw.Reader) 94 | } 95 | 96 | func (cw *connWrap) NetConn() net.Conn { 97 | return cw.Conn 98 | } 99 | 100 | type dialOpts struct { 101 | connectTimeout, readTimeout, writeTimeout time.Duration 102 | authUser, authPass string 103 | selectDB string 104 | useTLSConfig bool 105 | tlsConfig *tls.Config 106 | } 107 | 108 | // DialOpt is an optional behavior which can be applied to the Dial function to 109 | // effect its behavior, or the behavior of the Conn it creates. 110 | type DialOpt func(*dialOpts) 111 | 112 | // DialConnectTimeout determines the timeout value to pass into net.DialTimeout 113 | // when creating the connection. If not set then net.Dial is called instead. 114 | func DialConnectTimeout(d time.Duration) DialOpt { 115 | return func(do *dialOpts) { 116 | do.connectTimeout = d 117 | } 118 | } 119 | 120 | // DialReadTimeout determines the deadline to set when reading from a dialed 121 | // connection. If not set then SetReadDeadline is never called. 122 | func DialReadTimeout(d time.Duration) DialOpt { 123 | return func(do *dialOpts) { 124 | do.readTimeout = d 125 | } 126 | } 127 | 128 | // DialWriteTimeout determines the deadline to set when writing to a dialed 129 | // connection. If not set then SetWriteDeadline is never called. 130 | func DialWriteTimeout(d time.Duration) DialOpt { 131 | return func(do *dialOpts) { 132 | do.writeTimeout = d 133 | } 134 | } 135 | 136 | // DialTimeout is the equivalent to using DialConnectTimeout, DialReadTimeout, 137 | // and DialWriteTimeout all with the same value. 138 | func DialTimeout(d time.Duration) DialOpt { 139 | return func(do *dialOpts) { 140 | DialConnectTimeout(d)(do) 141 | DialReadTimeout(d)(do) 142 | DialWriteTimeout(d)(do) 143 | } 144 | } 145 | 146 | const defaultAuthUser = "default" 147 | 148 | // DialAuthPass will cause Dial to perform an AUTH command once the connection 149 | // is created, using the given pass. 150 | // 151 | // If this is set and a redis URI is passed to Dial which also has a password 152 | // set, this takes precedence. 153 | // 154 | // Using DialAuthPass is equivalent to calling DialAuthUser with user "default" 155 | // and is kept for compatibility with older package versions. 156 | func DialAuthPass(pass string) DialOpt { 157 | return DialAuthUser(defaultAuthUser, pass) 158 | } 159 | 160 | // DialAuthUser will cause Dial to perform an AUTH command once the connection 161 | // is created, using the given user and pass. 162 | // 163 | // If this is set and a redis URI is passed to Dial which also has a username 164 | // and password set, this takes precedence. 165 | func DialAuthUser(user, pass string) DialOpt { 166 | return func(do *dialOpts) { 167 | do.authUser = user 168 | do.authPass = pass 169 | } 170 | } 171 | 172 | // DialSelectDB will cause Dial to perform a SELECT command once the connection 173 | // is created, using the given database index. 174 | // 175 | // If this is set and a redis URI is passed to Dial which also has a database 176 | // index set, this takes precedence. 177 | func DialSelectDB(db int) DialOpt { 178 | return func(do *dialOpts) { 179 | do.selectDB = strconv.Itoa(db) 180 | } 181 | } 182 | 183 | // DialUseTLS will cause Dial to perform a TLS handshake using the provided 184 | // config. If config is nil the config is interpreted as equivalent to the zero 185 | // configuration. See https://golang.org/pkg/crypto/tls/#Config 186 | func DialUseTLS(config *tls.Config) DialOpt { 187 | return func(do *dialOpts) { 188 | do.tlsConfig = config 189 | do.useTLSConfig = true 190 | } 191 | } 192 | 193 | type timeoutConn struct { 194 | net.Conn 195 | readTimeout, writeTimeout time.Duration 196 | } 197 | 198 | func (tc *timeoutConn) Read(b []byte) (int, error) { 199 | if tc.readTimeout > 0 { 200 | err := tc.Conn.SetReadDeadline(time.Now().Add(tc.readTimeout)) 201 | if err != nil { 202 | return 0, err 203 | } 204 | } 205 | return tc.Conn.Read(b) 206 | } 207 | 208 | func (tc *timeoutConn) Write(b []byte) (int, error) { 209 | if tc.writeTimeout > 0 { 210 | err := tc.Conn.SetWriteDeadline(time.Now().Add(tc.writeTimeout)) 211 | if err != nil { 212 | return 0, err 213 | } 214 | } 215 | return tc.Conn.Write(b) 216 | } 217 | 218 | var defaultDialOpts = []DialOpt{ 219 | DialTimeout(10 * time.Second), 220 | } 221 | 222 | func parseRedisURL(urlStr string) (string, []DialOpt) { 223 | // do a quick check before we bust out url.Parse, in case that is very 224 | // unperformant 225 | if !strings.HasPrefix(urlStr, "redis://") { 226 | return urlStr, nil 227 | } 228 | 229 | u, err := url.Parse(urlStr) 230 | if err != nil { 231 | return urlStr, nil 232 | } 233 | 234 | q := u.Query() 235 | 236 | username := defaultAuthUser 237 | if n := u.User.Username(); n != "" { 238 | username = n 239 | } else if n := q.Get("username"); n != "" { 240 | username = n 241 | } 242 | 243 | password := q.Get("password") 244 | if p, ok := u.User.Password(); ok { 245 | password = p 246 | } 247 | 248 | opts := []DialOpt{ 249 | DialAuthUser(username, password), 250 | } 251 | 252 | dbStr := q.Get("db") 253 | if u.Path != "" && u.Path != "/" { 254 | dbStr = u.Path[1:] 255 | } 256 | 257 | if dbStr, err := strconv.Atoi(dbStr); err == nil { 258 | opts = append(opts, DialSelectDB(dbStr)) 259 | } 260 | 261 | return u.Host, opts 262 | } 263 | 264 | // Dial is a ConnFunc which creates a Conn using net.Dial and NewConn. It takes 265 | // in a number of options which can overwrite its default behavior as well. 266 | // 267 | // In place of a host:port address, Dial also accepts a URI, as per: 268 | // https://www.iana.org/assignments/uri-schemes/prov/redis 269 | // If the URI has an AUTH password or db specified Dial will attempt to perform 270 | // the AUTH and/or SELECT as well. 271 | // 272 | // If either DialAuthPass or DialSelectDB is used it overwrites the associated 273 | // value passed in by the URI. 274 | // 275 | // The default options Dial uses are: 276 | // 277 | // DialTimeout(10 * time.Second) 278 | // 279 | func Dial(network, addr string, opts ...DialOpt) (Conn, error) { 280 | var do dialOpts 281 | for _, opt := range defaultDialOpts { 282 | opt(&do) 283 | } 284 | addr, addrOpts := parseRedisURL(addr) 285 | for _, opt := range addrOpts { 286 | opt(&do) 287 | } 288 | for _, opt := range opts { 289 | opt(&do) 290 | } 291 | 292 | var netConn net.Conn 293 | var err error 294 | dialer := net.Dialer{} 295 | if do.connectTimeout > 0 { 296 | dialer.Timeout = do.connectTimeout 297 | } 298 | if do.useTLSConfig { 299 | netConn, err = tls.DialWithDialer(&dialer, network, addr, do.tlsConfig) 300 | } else { 301 | netConn, err = dialer.Dial(network, addr) 302 | } 303 | 304 | if err != nil { 305 | return nil, err 306 | } 307 | 308 | // If the netConn is a net.TCPConn (or some wrapper for it) and so can have 309 | // keepalive enabled, do so with a sane (though slightly aggressive) 310 | // default. 311 | { 312 | type keepaliveConn interface { 313 | SetKeepAlive(bool) error 314 | SetKeepAlivePeriod(time.Duration) error 315 | } 316 | 317 | if kaConn, ok := netConn.(keepaliveConn); ok { 318 | if err = kaConn.SetKeepAlive(true); err != nil { 319 | netConn.Close() 320 | return nil, err 321 | } else if err = kaConn.SetKeepAlivePeriod(10 * time.Second); err != nil { 322 | netConn.Close() 323 | return nil, err 324 | } 325 | } 326 | } 327 | 328 | conn := NewConn(&timeoutConn{ 329 | readTimeout: do.readTimeout, 330 | writeTimeout: do.writeTimeout, 331 | Conn: netConn, 332 | }) 333 | 334 | if do.authUser != "" && do.authUser != defaultAuthUser { 335 | if err := conn.Do(Cmd(nil, "AUTH", do.authUser, do.authPass)); err != nil { 336 | conn.Close() 337 | return nil, err 338 | } 339 | } else if do.authPass != "" { 340 | if err := conn.Do(Cmd(nil, "AUTH", do.authPass)); err != nil { 341 | conn.Close() 342 | return nil, err 343 | } 344 | } 345 | 346 | if do.selectDB != "" { 347 | if err := conn.Do(Cmd(nil, "SELECT", do.selectDB)); err != nil { 348 | conn.Close() 349 | return nil, err 350 | } 351 | } 352 | 353 | return conn, nil 354 | } 355 | -------------------------------------------------------------------------------- /vendor/github.com/mediocregopher/radix/v3/internal/bytesutil/bytesutil.go: -------------------------------------------------------------------------------- 1 | // Package bytesutil provides utility functions for working with bytes and byte streams that are useful when 2 | // working with the RESP protocol. 3 | package bytesutil 4 | 5 | import ( 6 | "bufio" 7 | "fmt" 8 | "io" 9 | "strconv" 10 | "sync" 11 | 12 | "errors" 13 | 14 | "github.com/mediocregopher/radix/v3/resp" 15 | ) 16 | 17 | // AnyIntToInt64 converts a value of any of Go's integer types (signed and unsigned) into a signed int64. 18 | // 19 | // If m is not of one of Go's built in integer types the call will panic. 20 | func AnyIntToInt64(m interface{}) int64 { 21 | switch mt := m.(type) { 22 | case int: 23 | return int64(mt) 24 | case int8: 25 | return int64(mt) 26 | case int16: 27 | return int64(mt) 28 | case int32: 29 | return int64(mt) 30 | case int64: 31 | return mt 32 | case uint: 33 | return int64(mt) 34 | case uint8: 35 | return int64(mt) 36 | case uint16: 37 | return int64(mt) 38 | case uint32: 39 | return int64(mt) 40 | case uint64: 41 | return int64(mt) 42 | } 43 | panic(fmt.Sprintf("anyIntToInt64 got bad arg: %#v", m)) 44 | } 45 | 46 | var bytePool = sync.Pool{ 47 | New: func() interface{} { 48 | b := make([]byte, 0, 64) 49 | return &b 50 | }, 51 | } 52 | 53 | // GetBytes returns a non-nil pointer to a byte slice from a pool of byte slices. 54 | // 55 | // The returned byte slice should be put back into the pool using PutBytes after usage. 56 | func GetBytes() *[]byte { 57 | return bytePool.Get().(*[]byte) 58 | } 59 | 60 | // PutBytes puts the given byte slice pointer into a pool that can be accessed via GetBytes. 61 | // 62 | // After calling PutBytes the given pointer and byte slice must not be accessed anymore. 63 | func PutBytes(b *[]byte) { 64 | *b = (*b)[:0] 65 | bytePool.Put(b) 66 | } 67 | 68 | // ParseInt is a specialized version of strconv.ParseInt that parses a base-10 encoded signed integer from a []byte. 69 | // 70 | // This can be used to avoid allocating a string, since strconv.ParseInt only takes a string. 71 | func ParseInt(b []byte) (int64, error) { 72 | if len(b) == 0 { 73 | return 0, errors.New("empty slice given to parseInt") 74 | } 75 | 76 | var neg bool 77 | if b[0] == '-' || b[0] == '+' { 78 | neg = b[0] == '-' 79 | b = b[1:] 80 | } 81 | 82 | n, err := ParseUint(b) 83 | if err != nil { 84 | return 0, err 85 | } 86 | 87 | if neg { 88 | return -int64(n), nil 89 | } 90 | 91 | return int64(n), nil 92 | } 93 | 94 | // ParseUint is a specialized version of strconv.ParseUint that parses a base-10 encoded integer from a []byte. 95 | // 96 | // This can be used to avoid allocating a string, since strconv.ParseUint only takes a string. 97 | func ParseUint(b []byte) (uint64, error) { 98 | if len(b) == 0 { 99 | return 0, errors.New("empty slice given to parseUint") 100 | } 101 | 102 | var n uint64 103 | 104 | for i, c := range b { 105 | if c < '0' || c > '9' { 106 | return 0, fmt.Errorf("invalid character %c at position %d in parseUint", c, i) 107 | } 108 | 109 | n *= 10 110 | n += uint64(c - '0') 111 | } 112 | 113 | return n, nil 114 | } 115 | 116 | // Expand expands the given byte slice to exactly n bytes. 117 | // 118 | // If cap(b) < n, a new slice will be allocated and filled with the bytes from b. 119 | func Expand(b []byte, n int) []byte { 120 | if cap(b) < n { 121 | nb := make([]byte, n) 122 | copy(nb, b) 123 | return nb 124 | } 125 | return b[:n] 126 | } 127 | 128 | // BufferedBytesDelim reads a line from br and checks that the line ends with \r\n, returning the line without \r\n. 129 | func BufferedBytesDelim(br *bufio.Reader) ([]byte, error) { 130 | b, err := br.ReadSlice('\n') 131 | if err != nil { 132 | return nil, err 133 | } else if len(b) < 2 || b[len(b)-2] != '\r' { 134 | return nil, fmt.Errorf("malformed resp %q", b) 135 | } 136 | return b[:len(b)-2], err 137 | } 138 | 139 | // BufferedIntDelim reads the current line from br as an integer. 140 | func BufferedIntDelim(br *bufio.Reader) (int64, error) { 141 | b, err := BufferedBytesDelim(br) 142 | if err != nil { 143 | return 0, err 144 | } 145 | return ParseInt(b) 146 | } 147 | 148 | // ReadNAppend appends exactly n bytes from r into b. 149 | func ReadNAppend(r io.Reader, b []byte, n int) ([]byte, error) { 150 | if n == 0 { 151 | return b, nil 152 | } 153 | m := len(b) 154 | b = Expand(b, len(b)+n) 155 | _, err := io.ReadFull(r, b[m:]) 156 | return b, err 157 | } 158 | 159 | // ReadNDiscard discards exactly n bytes from r. 160 | func ReadNDiscard(r io.Reader, n int) error { 161 | type discarder interface { 162 | Discard(int) (int, error) 163 | } 164 | 165 | if n == 0 { 166 | return nil 167 | } 168 | 169 | switch v := r.(type) { 170 | case discarder: 171 | _, err := v.Discard(n) 172 | return err 173 | case io.Seeker: 174 | _, err := v.Seek(int64(n), io.SeekCurrent) 175 | return err 176 | } 177 | 178 | scratch := GetBytes() 179 | defer PutBytes(scratch) 180 | *scratch = (*scratch)[:cap(*scratch)] 181 | if len(*scratch) < n { 182 | *scratch = make([]byte, 8192) 183 | } 184 | 185 | for { 186 | buf := *scratch 187 | if len(buf) > n { 188 | buf = buf[:n] 189 | } 190 | nr, err := r.Read(buf) 191 | n -= nr 192 | if n == 0 || err != nil { 193 | return err 194 | } 195 | } 196 | } 197 | 198 | // ReadInt reads the next n bytes from r as a signed 64 bit integer. 199 | func ReadInt(r io.Reader, n int) (int64, error) { 200 | scratch := GetBytes() 201 | defer PutBytes(scratch) 202 | 203 | var err error 204 | if *scratch, err = ReadNAppend(r, *scratch, n); err != nil { 205 | return 0, err 206 | } 207 | i, err := ParseInt(*scratch) 208 | if err != nil { 209 | return 0, resp.ErrDiscarded{Err: err} 210 | } 211 | return i, nil 212 | } 213 | 214 | // ReadUint reads the next n bytes from r as an unsigned 64 bit integer. 215 | func ReadUint(r io.Reader, n int) (uint64, error) { 216 | scratch := GetBytes() 217 | defer PutBytes(scratch) 218 | 219 | var err error 220 | if *scratch, err = ReadNAppend(r, *scratch, n); err != nil { 221 | return 0, err 222 | } 223 | ui, err := ParseUint(*scratch) 224 | if err != nil { 225 | return 0, resp.ErrDiscarded{Err: err} 226 | } 227 | return ui, nil 228 | } 229 | 230 | // ReadFloat reads the next n bytes from r as a 64 bit floating point number with the given precision. 231 | func ReadFloat(r io.Reader, precision, n int) (float64, error) { 232 | scratch := GetBytes() 233 | defer PutBytes(scratch) 234 | 235 | var err error 236 | if *scratch, err = ReadNAppend(r, *scratch, n); err != nil { 237 | return 0, err 238 | } 239 | f, err := strconv.ParseFloat(string(*scratch), precision) 240 | if err != nil { 241 | return 0, resp.ErrDiscarded{Err: err} 242 | } 243 | return f, nil 244 | } 245 | -------------------------------------------------------------------------------- /vendor/github.com/mediocregopher/radix/v3/pipeliner.go: -------------------------------------------------------------------------------- 1 | package radix 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "strings" 7 | "sync" 8 | "time" 9 | 10 | "github.com/mediocregopher/radix/v3/resp" 11 | ) 12 | 13 | var blockingCmds = map[string]bool{ 14 | "WAIT": true, 15 | 16 | // taken from https://github.com/joomcode/redispipe#limitations 17 | "BLPOP": true, 18 | "BRPOP": true, 19 | "BRPOPLPUSH": true, 20 | 21 | "BZPOPMIN": true, 22 | "BZPOPMAX": true, 23 | 24 | "XREAD": true, 25 | "XREADGROUP": true, 26 | 27 | "SAVE": true, 28 | } 29 | 30 | type pipeliner struct { 31 | c Client 32 | 33 | limit int 34 | window time.Duration 35 | 36 | // reqsBufCh contains buffers for collecting commands and acts as a semaphore 37 | // to limit the number of concurrent flushes. 38 | reqsBufCh chan []CmdAction 39 | 40 | reqCh chan *pipelinerCmd 41 | reqWG sync.WaitGroup 42 | 43 | l sync.RWMutex 44 | closed bool 45 | } 46 | 47 | var _ Client = (*pipeliner)(nil) 48 | 49 | func newPipeliner(c Client, concurrency, limit int, window time.Duration) *pipeliner { 50 | if concurrency < 1 { 51 | concurrency = 1 52 | } 53 | 54 | p := &pipeliner{ 55 | c: c, 56 | 57 | limit: limit, 58 | window: window, 59 | 60 | reqsBufCh: make(chan []CmdAction, concurrency), 61 | 62 | reqCh: make(chan *pipelinerCmd, 32), // https://xkcd.com/221/ 63 | } 64 | 65 | p.reqWG.Add(1) 66 | go func() { 67 | defer p.reqWG.Done() 68 | p.reqLoop() 69 | }() 70 | 71 | for i := 0; i < cap(p.reqsBufCh); i++ { 72 | if p.limit > 0 { 73 | p.reqsBufCh <- make([]CmdAction, 0, limit) 74 | } else { 75 | p.reqsBufCh <- nil 76 | } 77 | } 78 | 79 | return p 80 | } 81 | 82 | // CanDo checks if the given Action can be executed / passed to p.Do. 83 | // 84 | // If CanDo returns false, the Action must not be given to Do. 85 | func (p *pipeliner) CanDo(a Action) bool { 86 | // there is currently no way to get the command for CmdAction implementations 87 | // from outside the radix package so we can not multiplex those commands. User 88 | // defined pipelines are not pipelined to let the user better control them. 89 | if cmdA, ok := a.(*cmdAction); ok { 90 | return !blockingCmds[strings.ToUpper(cmdA.cmd)] 91 | } 92 | return false 93 | } 94 | 95 | // Do executes the given Action as part of the pipeline. 96 | // 97 | // If a is not a CmdAction, Do panics. 98 | func (p *pipeliner) Do(a Action) error { 99 | req := getPipelinerCmd(a.(CmdAction)) // get this outside the lock to avoid 100 | 101 | p.l.RLock() 102 | if p.closed { 103 | p.l.RUnlock() 104 | return errClientClosed 105 | } 106 | p.reqCh <- req 107 | p.l.RUnlock() 108 | 109 | err := <-req.resCh 110 | poolPipelinerCmd(req) 111 | return err 112 | } 113 | 114 | // Close closes the pipeliner and makes sure that all background goroutines 115 | // are stopped before returning. 116 | // 117 | // Close does *not* close the underlying Client. 118 | func (p *pipeliner) Close() error { 119 | p.l.Lock() 120 | defer p.l.Unlock() 121 | 122 | if p.closed { 123 | return nil 124 | } 125 | 126 | close(p.reqCh) 127 | p.reqWG.Wait() 128 | 129 | for i := 0; i < cap(p.reqsBufCh); i++ { 130 | <-p.reqsBufCh 131 | } 132 | 133 | p.c, p.closed = nil, true 134 | return nil 135 | } 136 | 137 | func (p *pipeliner) reqLoop() { 138 | t := getTimer(time.Hour) 139 | defer putTimer(t) 140 | 141 | t.Stop() 142 | 143 | reqs := <-p.reqsBufCh 144 | defer func() { 145 | p.reqsBufCh <- reqs 146 | }() 147 | 148 | for { 149 | select { 150 | case req, ok := <-p.reqCh: 151 | if !ok { 152 | reqs = p.flush(reqs) 153 | return 154 | } 155 | 156 | reqs = append(reqs, req) 157 | 158 | if p.limit > 0 && len(reqs) == p.limit { 159 | // if we reached the pipeline limit, execute now to avoid unnecessary waiting 160 | t.Stop() 161 | 162 | reqs = p.flush(reqs) 163 | } else if len(reqs) == 1 { 164 | t.Reset(p.window) 165 | } 166 | case <-t.C: 167 | reqs = p.flush(reqs) 168 | } 169 | } 170 | } 171 | 172 | func (p *pipeliner) flush(reqs []CmdAction) []CmdAction { 173 | if len(reqs) == 0 { 174 | return reqs 175 | } 176 | 177 | go func() { 178 | defer func() { 179 | p.reqsBufCh <- reqs[:0] 180 | }() 181 | 182 | pp := &pipelinerPipeline{pipeline: pipeline(reqs)} 183 | defer pp.flush() 184 | 185 | if err := p.c.Do(pp); err != nil { 186 | pp.doErr = err 187 | } 188 | }() 189 | 190 | return <-p.reqsBufCh 191 | } 192 | 193 | type pipelinerCmd struct { 194 | CmdAction 195 | 196 | resCh chan error 197 | 198 | unmarshalCalled bool 199 | unmarshalErr error 200 | } 201 | 202 | var ( 203 | _ resp.Unmarshaler = (*pipelinerCmd)(nil) 204 | ) 205 | 206 | func (p *pipelinerCmd) sendRes(err error) { 207 | p.resCh <- err 208 | } 209 | 210 | func (p *pipelinerCmd) UnmarshalRESP(br *bufio.Reader) error { 211 | p.unmarshalErr = p.CmdAction.UnmarshalRESP(br) 212 | p.unmarshalCalled = true // important: we set this after unmarshalErr in case the call to UnmarshalRESP panics 213 | return p.unmarshalErr 214 | } 215 | 216 | var pipelinerCmdPool sync.Pool 217 | 218 | func getPipelinerCmd(cmd CmdAction) *pipelinerCmd { 219 | req, _ := pipelinerCmdPool.Get().(*pipelinerCmd) 220 | if req != nil { 221 | *req = pipelinerCmd{ 222 | CmdAction: cmd, 223 | resCh: req.resCh, 224 | } 225 | return req 226 | } 227 | return &pipelinerCmd{ 228 | CmdAction: cmd, 229 | // using a buffer of 1 is faster than no buffer in most cases 230 | resCh: make(chan error, 1), 231 | } 232 | } 233 | 234 | func poolPipelinerCmd(req *pipelinerCmd) { 235 | req.CmdAction = nil 236 | pipelinerCmdPool.Put(req) 237 | } 238 | 239 | type pipelinerPipeline struct { 240 | pipeline 241 | doErr error 242 | } 243 | 244 | func (p *pipelinerPipeline) flush() { 245 | for _, req := range p.pipeline { 246 | var err error 247 | 248 | cmd := req.(*pipelinerCmd) 249 | if cmd.unmarshalCalled { 250 | err = cmd.unmarshalErr 251 | } else { 252 | err = p.doErr 253 | } 254 | cmd.sendRes(err) 255 | } 256 | } 257 | 258 | func (p *pipelinerPipeline) Run(c Conn) (err error) { 259 | defer func() { 260 | if v := recover(); v != nil { 261 | err = fmt.Errorf("%s", v) 262 | } 263 | }() 264 | if err := c.Encode(p); err != nil { 265 | return err 266 | } 267 | errConn := ioErrConn{Conn: c} 268 | for _, req := range p.pipeline { 269 | if _ = errConn.Decode(req); errConn.lastIOErr != nil { 270 | return errConn.lastIOErr 271 | } 272 | } 273 | return nil 274 | } 275 | -------------------------------------------------------------------------------- /vendor/github.com/mediocregopher/radix/v3/pubsub.go: -------------------------------------------------------------------------------- 1 | package radix 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "io" 7 | "net" 8 | "sync" 9 | "time" 10 | 11 | "errors" 12 | 13 | "github.com/mediocregopher/radix/v3/resp" 14 | "github.com/mediocregopher/radix/v3/resp/resp2" 15 | ) 16 | 17 | // PubSubMessage describes a message being published to a subscribed channel. 18 | type PubSubMessage struct { 19 | Type string // "message" or "pmessage" 20 | Pattern string // will be set if Type is "pmessage" 21 | Channel string 22 | Message []byte 23 | } 24 | 25 | // MarshalRESP implements the Marshaler interface. 26 | func (m PubSubMessage) MarshalRESP(w io.Writer) error { 27 | var err error 28 | marshal := func(m resp.Marshaler) { 29 | if err == nil { 30 | err = m.MarshalRESP(w) 31 | } 32 | } 33 | 34 | if m.Type == "message" { 35 | marshal(resp2.ArrayHeader{N: 3}) 36 | marshal(resp2.BulkString{S: m.Type}) 37 | } else if m.Type == "pmessage" { 38 | marshal(resp2.ArrayHeader{N: 4}) 39 | marshal(resp2.BulkString{S: m.Type}) 40 | marshal(resp2.BulkString{S: m.Pattern}) 41 | } else { 42 | return errors.New("unknown message Type") 43 | } 44 | marshal(resp2.BulkString{S: m.Channel}) 45 | marshal(resp2.BulkStringBytes{B: m.Message}) 46 | return err 47 | } 48 | 49 | var errNotPubSubMessage = errors.New("message is not a PubSubMessage") 50 | 51 | // UnmarshalRESP implements the Unmarshaler interface. 52 | func (m *PubSubMessage) UnmarshalRESP(br *bufio.Reader) error { 53 | // This method will fully consume the message on the wire, regardless of if 54 | // it is a PubSubMessage or not. If it is not then errNotPubSubMessage is 55 | // returned. 56 | 57 | // When in subscribe mode redis only allows (P)(UN)SUBSCRIBE commands, which 58 | // all return arrays, and PING, which returns an array when in subscribe 59 | // mode. HOWEVER, when all channels have been unsubscribed from then the 60 | // connection will be taken _out_ of subscribe mode. This is theoretically 61 | // fine, since the driver will still only allow the 5 commands, except PING 62 | // will return a simple string when in the non-subscribed state. So this 63 | // needs to check for that. 64 | if prefix, err := br.Peek(1); err != nil { 65 | return err 66 | } else if bytes.Equal(prefix, resp2.SimpleStringPrefix) { 67 | // if it's a simple string, discard it (it's probably PONG) and error 68 | if err := (resp2.Any{}).UnmarshalRESP(br); err != nil { 69 | return err 70 | } 71 | return resp.ErrDiscarded{Err: errNotPubSubMessage} 72 | } 73 | 74 | var ah resp2.ArrayHeader 75 | if err := ah.UnmarshalRESP(br); err != nil { 76 | return err 77 | } else if ah.N < 2 { 78 | return errors.New("message has too few elements") 79 | } 80 | 81 | var msgType resp2.BulkStringBytes 82 | if err := msgType.UnmarshalRESP(br); err != nil { 83 | return err 84 | } 85 | 86 | switch string(msgType.B) { 87 | case "message": 88 | m.Type = "message" 89 | if ah.N != 3 { 90 | return errors.New("message has wrong number of elements") 91 | } 92 | case "pmessage": 93 | m.Type = "pmessage" 94 | if ah.N != 4 { 95 | return errors.New("message has wrong number of elements") 96 | } 97 | 98 | var pattern resp2.BulkString 99 | if err := pattern.UnmarshalRESP(br); err != nil { 100 | return err 101 | } 102 | m.Pattern = pattern.S 103 | default: 104 | // if it's not a PubSubMessage then discard the rest of the array 105 | for i := 1; i < ah.N; i++ { 106 | if err := (resp2.Any{}).UnmarshalRESP(br); err != nil { 107 | return err 108 | } 109 | } 110 | return errNotPubSubMessage 111 | } 112 | 113 | var channel resp2.BulkString 114 | if err := channel.UnmarshalRESP(br); err != nil { 115 | return err 116 | } 117 | m.Channel = channel.S 118 | 119 | var msg resp2.BulkStringBytes 120 | if err := msg.UnmarshalRESP(br); err != nil { 121 | return err 122 | } 123 | m.Message = msg.B 124 | 125 | return nil 126 | } 127 | 128 | //////////////////////////////////////////////////////////////////////////////// 129 | 130 | type chanSet map[string]map[chan<- PubSubMessage]bool 131 | 132 | func (cs chanSet) add(s string, ch chan<- PubSubMessage) { 133 | m, ok := cs[s] 134 | if !ok { 135 | m = map[chan<- PubSubMessage]bool{} 136 | cs[s] = m 137 | } 138 | m[ch] = true 139 | } 140 | 141 | func (cs chanSet) del(s string, ch chan<- PubSubMessage) bool { 142 | m, ok := cs[s] 143 | if !ok { 144 | return true 145 | } 146 | delete(m, ch) 147 | if len(m) == 0 { 148 | delete(cs, s) 149 | return true 150 | } 151 | return false 152 | } 153 | 154 | func (cs chanSet) missing(ss []string) []string { 155 | out := make([]string, 0, len(ss)) 156 | for _, s := range ss { 157 | if _, ok := cs[s]; !ok { 158 | out = append(out, s) 159 | } 160 | } 161 | return out 162 | } 163 | 164 | func (cs chanSet) inverse() map[chan<- PubSubMessage][]string { 165 | inv := map[chan<- PubSubMessage][]string{} 166 | for s, m := range cs { 167 | for ch := range m { 168 | inv[ch] = append(inv[ch], s) 169 | } 170 | } 171 | return inv 172 | } 173 | 174 | //////////////////////////////////////////////////////////////////////////////// 175 | 176 | // PubSubConn wraps an existing Conn to support redis' pubsub system. 177 | // User-created channels can be subscribed to redis channels to receive 178 | // PubSubMessages which have been published. 179 | // 180 | // If any methods return an error it means the PubSubConn has been Close'd and 181 | // subscribed msgCh's will no longer receive PubSubMessages from it. All methods 182 | // are threadsafe, but should be called in a different go-routine than that 183 | // which is reading from the PubSubMessage channels. 184 | // 185 | // NOTE the PubSubMessage channels should never block. If any channels block 186 | // when being written to they will block all other channels from receiving a 187 | // publish and block methods from returning. 188 | type PubSubConn interface { 189 | // Subscribe subscribes the PubSubConn to the given set of channels. msgCh 190 | // will receieve a PubSubMessage for every publish written to any of the 191 | // channels. This may be called multiple times for the same channels and 192 | // different msgCh's, each msgCh will receieve a copy of the PubSubMessage 193 | // for each publish. 194 | Subscribe(msgCh chan<- PubSubMessage, channels ...string) error 195 | 196 | // Unsubscribe unsubscribes the msgCh from the given set of channels, if it 197 | // was subscribed at all. 198 | // 199 | // NOTE even if msgCh is not subscribed to any other redis channels, it 200 | // should still be considered "active", and therefore still be having 201 | // messages read from it, until Unsubscribe has returned 202 | Unsubscribe(msgCh chan<- PubSubMessage, channels ...string) error 203 | 204 | // PSubscribe is like Subscribe, but it subscribes msgCh to a set of 205 | // patterns and not individual channels. 206 | PSubscribe(msgCh chan<- PubSubMessage, patterns ...string) error 207 | 208 | // PUnsubscribe is like Unsubscribe, but it unsubscribes msgCh from a set of 209 | // patterns and not individual channels. 210 | // 211 | // NOTE even if msgCh is not subscribed to any other redis channels, it 212 | // should still be considered "active", and therefore still be having 213 | // messages read from it, until PUnsubscribe has returned 214 | PUnsubscribe(msgCh chan<- PubSubMessage, patterns ...string) error 215 | 216 | // Ping performs a simple Ping command on the PubSubConn, returning an error 217 | // if it failed for some reason 218 | Ping() error 219 | 220 | // Close closes the PubSubConn so it can't be used anymore. All subscribed 221 | // channels will stop receiving PubSubMessages from this Conn (but will not 222 | // themselves be closed). 223 | // 224 | // NOTE all msgChs should be considered "active", and therefore still be 225 | // having messages read from them, until Close has returned. 226 | Close() error 227 | } 228 | 229 | type pubSubConn struct { 230 | conn Conn 231 | 232 | csL sync.RWMutex 233 | subs chanSet 234 | psubs chanSet 235 | 236 | // These are used for writing commands and waiting for their response (e.g. 237 | // SUBSCRIBE, PING). See the do method for how that works. 238 | cmdL sync.Mutex 239 | cmdResCh chan error 240 | 241 | close sync.Once 242 | closeErr error 243 | 244 | // This one is optional, and kind of cheating. We use it in persistent to 245 | // get on-the-fly updates of when the connection fails. Maybe one day this 246 | // could be exposed if there's a clean way of doing so, or another way 247 | // accomplishing the same thing could be done instead. 248 | closeErrCh chan error 249 | 250 | // only used during testing 251 | testEventCh chan string 252 | } 253 | 254 | // PubSub wraps the given Conn so that it becomes a PubSubConn. The passed in 255 | // Conn should not be used after this call. 256 | func PubSub(rc Conn) PubSubConn { 257 | return newPubSub(rc, nil) 258 | } 259 | 260 | func newPubSub(rc Conn, closeErrCh chan error) PubSubConn { 261 | c := &pubSubConn{ 262 | conn: rc, 263 | subs: chanSet{}, 264 | psubs: chanSet{}, 265 | cmdResCh: make(chan error, 1), 266 | closeErrCh: closeErrCh, 267 | } 268 | go c.spin() 269 | 270 | // Periodically call Ping so the connection has a keepalive on the 271 | // application level. If the Conn is closed Ping will return an error and 272 | // this will clean itself up. 273 | go func() { 274 | t := time.NewTicker(5 * time.Second) 275 | defer t.Stop() 276 | for range t.C { 277 | if err := c.Ping(); err != nil { 278 | return 279 | } 280 | } 281 | }() 282 | 283 | return c 284 | } 285 | 286 | func (c *pubSubConn) testEvent(str string) { 287 | if c.testEventCh != nil { 288 | c.testEventCh <- str 289 | } 290 | } 291 | 292 | func (c *pubSubConn) publish(m PubSubMessage) { 293 | c.csL.RLock() 294 | defer c.csL.RUnlock() 295 | 296 | var subs map[chan<- PubSubMessage]bool 297 | if m.Type == "pmessage" { 298 | subs = c.psubs[m.Pattern] 299 | } else { 300 | subs = c.subs[m.Channel] 301 | } 302 | 303 | for ch := range subs { 304 | ch <- m 305 | } 306 | } 307 | 308 | func (c *pubSubConn) spin() { 309 | for { 310 | var m PubSubMessage 311 | err := c.conn.Decode(&m) 312 | if nerr := net.Error(nil); errors.As(err, &nerr) && nerr.Timeout() { 313 | c.testEvent("timeout") 314 | continue 315 | } else if errors.Is(err, errNotPubSubMessage) { 316 | c.cmdResCh <- nil 317 | continue 318 | } else if err != nil { 319 | // closeInner returns the error from closing the Conn, which doesn't 320 | // really matter here. 321 | _ = c.closeInner(err) 322 | return 323 | } 324 | c.publish(m) 325 | } 326 | } 327 | 328 | // NOTE cmdL _must_ be held to use do. 329 | func (c *pubSubConn) do(exp int, cmd string, args ...string) error { 330 | rcmd := Cmd(nil, cmd, args...) 331 | if err := c.conn.Encode(rcmd); err != nil { 332 | return err 333 | } 334 | 335 | for i := 0; i < exp; i++ { 336 | err, ok := <-c.cmdResCh 337 | if err != nil { 338 | return err 339 | } else if !ok { 340 | return errors.New("connection closed") 341 | } 342 | } 343 | return nil 344 | } 345 | 346 | func (c *pubSubConn) closeInner(cmdResErr error) error { 347 | c.close.Do(func() { 348 | c.csL.Lock() 349 | defer c.csL.Unlock() 350 | c.closeErr = c.conn.Close() 351 | c.subs = nil 352 | c.psubs = nil 353 | 354 | if cmdResErr != nil { 355 | select { 356 | case c.cmdResCh <- cmdResErr: 357 | default: 358 | } 359 | } 360 | if c.closeErrCh != nil { 361 | c.closeErrCh <- cmdResErr 362 | close(c.closeErrCh) 363 | } 364 | close(c.cmdResCh) 365 | }) 366 | return c.closeErr 367 | } 368 | 369 | func (c *pubSubConn) Close() error { 370 | return c.closeInner(nil) 371 | } 372 | 373 | func (c *pubSubConn) Subscribe(msgCh chan<- PubSubMessage, channels ...string) error { 374 | c.cmdL.Lock() 375 | defer c.cmdL.Unlock() 376 | 377 | c.csL.RLock() 378 | missing := c.subs.missing(channels) 379 | c.csL.RUnlock() 380 | 381 | if len(missing) > 0 { 382 | if err := c.do(len(missing), "SUBSCRIBE", missing...); err != nil { 383 | return err 384 | } 385 | } 386 | 387 | c.csL.Lock() 388 | for _, channel := range channels { 389 | c.subs.add(channel, msgCh) 390 | } 391 | c.csL.Unlock() 392 | 393 | return nil 394 | } 395 | 396 | func (c *pubSubConn) Unsubscribe(msgCh chan<- PubSubMessage, channels ...string) error { 397 | c.cmdL.Lock() 398 | defer c.cmdL.Unlock() 399 | 400 | c.csL.Lock() 401 | emptyChannels := make([]string, 0, len(channels)) 402 | for _, channel := range channels { 403 | if empty := c.subs.del(channel, msgCh); empty { 404 | emptyChannels = append(emptyChannels, channel) 405 | } 406 | } 407 | c.csL.Unlock() 408 | 409 | if len(emptyChannels) == 0 { 410 | return nil 411 | } 412 | 413 | return c.do(len(emptyChannels), "UNSUBSCRIBE", emptyChannels...) 414 | } 415 | 416 | func (c *pubSubConn) PSubscribe(msgCh chan<- PubSubMessage, patterns ...string) error { 417 | c.cmdL.Lock() 418 | defer c.cmdL.Unlock() 419 | 420 | c.csL.RLock() 421 | missing := c.psubs.missing(patterns) 422 | c.csL.RUnlock() 423 | 424 | if len(missing) > 0 { 425 | if err := c.do(len(missing), "PSUBSCRIBE", missing...); err != nil { 426 | return err 427 | } 428 | } 429 | 430 | c.csL.Lock() 431 | for _, pattern := range patterns { 432 | c.psubs.add(pattern, msgCh) 433 | } 434 | c.csL.Unlock() 435 | 436 | return nil 437 | } 438 | 439 | func (c *pubSubConn) PUnsubscribe(msgCh chan<- PubSubMessage, patterns ...string) error { 440 | c.cmdL.Lock() 441 | defer c.cmdL.Unlock() 442 | 443 | c.csL.Lock() 444 | emptyPatterns := make([]string, 0, len(patterns)) 445 | for _, pattern := range patterns { 446 | if empty := c.psubs.del(pattern, msgCh); empty { 447 | emptyPatterns = append(emptyPatterns, pattern) 448 | } 449 | } 450 | c.csL.Unlock() 451 | 452 | if len(emptyPatterns) == 0 { 453 | return nil 454 | } 455 | 456 | return c.do(len(emptyPatterns), "PUNSUBSCRIBE", emptyPatterns...) 457 | } 458 | 459 | func (c *pubSubConn) Ping() error { 460 | c.cmdL.Lock() 461 | defer c.cmdL.Unlock() 462 | 463 | return c.do(1, "PING") 464 | } 465 | -------------------------------------------------------------------------------- /vendor/github.com/mediocregopher/radix/v3/pubsub_persistent.go: -------------------------------------------------------------------------------- 1 | package radix 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | "time" 7 | ) 8 | 9 | type persistentPubSubOpts struct { 10 | connFn ConnFunc 11 | abortAfter int 12 | errCh chan<- error 13 | } 14 | 15 | // PersistentPubSubOpt is an optional parameter which can be passed into 16 | // PersistentPubSub in order to affect its behavior. 17 | type PersistentPubSubOpt func(*persistentPubSubOpts) 18 | 19 | // PersistentPubSubConnFunc causes PersistentPubSub to use the given ConnFunc 20 | // when connecting to its destination. 21 | func PersistentPubSubConnFunc(connFn ConnFunc) PersistentPubSubOpt { 22 | return func(opts *persistentPubSubOpts) { 23 | opts.connFn = connFn 24 | } 25 | } 26 | 27 | // PersistentPubSubAbortAfter changes PersistentPubSub's reconnect behavior. 28 | // Usually PersistentPubSub will try to reconnect forever upon a disconnect, 29 | // blocking any methods which have been called until reconnect is successful. 30 | // 31 | // When PersistentPubSubAbortAfter is used, it will give up after that many 32 | // attempts and return the error to the method which has been blocked the 33 | // longest. Another method will need to be called in order for PersistentPubSub 34 | // to resume trying to reconnect. 35 | func PersistentPubSubAbortAfter(attempts int) PersistentPubSubOpt { 36 | return func(opts *persistentPubSubOpts) { 37 | opts.abortAfter = attempts 38 | } 39 | } 40 | 41 | // PersistentPubSubErrCh takes a channel which asynchronous errors 42 | // encountered by the PersistentPubSub can be read off of. If the channel blocks 43 | // the error will be dropped. The channel will be closed when PersistentPubSub 44 | // is closed. 45 | func PersistentPubSubErrCh(errCh chan<- error) PersistentPubSubOpt { 46 | return func(opts *persistentPubSubOpts) { 47 | opts.errCh = errCh 48 | } 49 | } 50 | 51 | type pubSubCmd struct { 52 | // msgCh can be set along with one of subscribe/unsubscribe/etc... 53 | msgCh chan<- PubSubMessage 54 | subscribe, unsubscribe, psubscribe, punsubscribe []string 55 | 56 | // ... or one of ping or close can be set 57 | ping, close bool 58 | 59 | // resCh is always set 60 | resCh chan error 61 | } 62 | 63 | type persistentPubSub struct { 64 | dial func() (Conn, error) 65 | opts persistentPubSubOpts 66 | 67 | subs, psubs chanSet 68 | 69 | curr PubSubConn 70 | currErrCh chan error 71 | 72 | cmdCh chan pubSubCmd 73 | 74 | closeErr error 75 | closeCh chan struct{} 76 | closeOnce sync.Once 77 | } 78 | 79 | // PersistentPubSubWithOpts is like PubSub, but instead of taking in an existing 80 | // Conn to wrap it will create one on the fly. If the connection is ever 81 | // terminated then a new one will be created and will be reset to the previous 82 | // connection's state. 83 | // 84 | // This is effectively a way to have a permanent PubSubConn established which 85 | // supports subscribing/unsubscribing but without the hassle of implementing 86 | // reconnect/re-subscribe logic. 87 | // 88 | // With default options, neither this function nor any of the methods on the 89 | // returned PubSubConn will ever return an error, they will instead block until 90 | // a connection can be successfully reinstated. 91 | // 92 | // PersistentPubSubWithOpts takes in a number of options which can overwrite its 93 | // default behavior. The default options PersistentPubSubWithOpts uses are: 94 | // 95 | // PersistentPubSubConnFunc(DefaultConnFunc) 96 | // 97 | func PersistentPubSubWithOpts( 98 | network, addr string, options ...PersistentPubSubOpt, 99 | ) ( 100 | PubSubConn, error, 101 | ) { 102 | opts := persistentPubSubOpts{ 103 | connFn: DefaultConnFunc, 104 | } 105 | for _, opt := range options { 106 | opt(&opts) 107 | } 108 | 109 | p := &persistentPubSub{ 110 | dial: func() (Conn, error) { return opts.connFn(network, addr) }, 111 | opts: opts, 112 | subs: chanSet{}, 113 | psubs: chanSet{}, 114 | cmdCh: make(chan pubSubCmd), 115 | closeCh: make(chan struct{}), 116 | } 117 | if err := p.refresh(); err != nil { 118 | return nil, err 119 | } 120 | go p.spin() 121 | return p, nil 122 | } 123 | 124 | // PersistentPubSub is deprecated in favor of PersistentPubSubWithOpts instead. 125 | func PersistentPubSub(network, addr string, connFn ConnFunc) PubSubConn { 126 | var opts []PersistentPubSubOpt 127 | if connFn != nil { 128 | opts = append(opts, PersistentPubSubConnFunc(connFn)) 129 | } 130 | // since PersistentPubSubAbortAfter isn't used, this will never return an 131 | // error, panic if it does 132 | p, err := PersistentPubSubWithOpts(network, addr, opts...) 133 | if err != nil { 134 | panic(fmt.Sprintf("PersistentPubSubWithOpts impossibly returned an error: %v", err)) 135 | } 136 | return p 137 | } 138 | 139 | // refresh only returns an error if the connection could not be made. 140 | func (p *persistentPubSub) refresh() error { 141 | if p.curr != nil { 142 | p.curr.Close() 143 | <-p.currErrCh 144 | p.curr = nil 145 | p.currErrCh = nil 146 | } 147 | 148 | attempt := func() (PubSubConn, chan error, error) { 149 | c, err := p.dial() 150 | if err != nil { 151 | return nil, nil, err 152 | } 153 | errCh := make(chan error, 1) 154 | pc := newPubSub(c, errCh) 155 | 156 | for msgCh, channels := range p.subs.inverse() { 157 | if err := pc.Subscribe(msgCh, channels...); err != nil { 158 | pc.Close() 159 | return nil, nil, err 160 | } 161 | } 162 | 163 | for msgCh, patterns := range p.psubs.inverse() { 164 | if err := pc.PSubscribe(msgCh, patterns...); err != nil { 165 | pc.Close() 166 | return nil, nil, err 167 | } 168 | } 169 | return pc, errCh, nil 170 | } 171 | 172 | var attempts int 173 | for { 174 | var err error 175 | if p.curr, p.currErrCh, err = attempt(); err == nil { 176 | return nil 177 | } 178 | attempts++ 179 | if p.opts.abortAfter > 0 && attempts >= p.opts.abortAfter { 180 | return err 181 | } 182 | time.Sleep(200 * time.Millisecond) 183 | } 184 | } 185 | 186 | func (p *persistentPubSub) execCmd(cmd pubSubCmd) error { 187 | if p.curr == nil { 188 | if err := p.refresh(); err != nil { 189 | return err 190 | } 191 | } 192 | 193 | // For all subscribe/unsubscribe/etc... commands the modifications to 194 | // p.subs/p.psubs are made first, so that if the actual call to curr fails 195 | // then refresh will still instate the new desired subscription. 196 | var err error 197 | switch { 198 | case len(cmd.subscribe) > 0: 199 | for _, channel := range cmd.subscribe { 200 | p.subs.add(channel, cmd.msgCh) 201 | } 202 | err = p.curr.Subscribe(cmd.msgCh, cmd.subscribe...) 203 | 204 | case len(cmd.unsubscribe) > 0: 205 | for _, channel := range cmd.unsubscribe { 206 | p.subs.del(channel, cmd.msgCh) 207 | } 208 | err = p.curr.Unsubscribe(cmd.msgCh, cmd.unsubscribe...) 209 | 210 | case len(cmd.psubscribe) > 0: 211 | for _, channel := range cmd.psubscribe { 212 | p.psubs.add(channel, cmd.msgCh) 213 | } 214 | err = p.curr.PSubscribe(cmd.msgCh, cmd.psubscribe...) 215 | 216 | case len(cmd.punsubscribe) > 0: 217 | for _, channel := range cmd.punsubscribe { 218 | p.psubs.del(channel, cmd.msgCh) 219 | } 220 | err = p.curr.PUnsubscribe(cmd.msgCh, cmd.punsubscribe...) 221 | 222 | case cmd.ping: 223 | err = p.curr.Ping() 224 | 225 | case cmd.close: 226 | if p.curr != nil { 227 | err = p.curr.Close() 228 | <-p.currErrCh 229 | } 230 | 231 | default: 232 | // don't do anything I guess 233 | } 234 | 235 | if err != nil { 236 | return p.refresh() 237 | } 238 | return nil 239 | } 240 | 241 | func (p *persistentPubSub) err(err error) { 242 | select { 243 | case p.opts.errCh <- err: 244 | default: 245 | } 246 | } 247 | 248 | func (p *persistentPubSub) spin() { 249 | for { 250 | select { 251 | case err := <-p.currErrCh: 252 | p.err(err) 253 | if err := p.refresh(); err != nil { 254 | p.err(err) 255 | } 256 | case cmd := <-p.cmdCh: 257 | cmd.resCh <- p.execCmd(cmd) 258 | if cmd.close { 259 | return 260 | } 261 | } 262 | } 263 | } 264 | 265 | func (p *persistentPubSub) cmd(cmd pubSubCmd) error { 266 | cmd.resCh = make(chan error, 1) 267 | select { 268 | case p.cmdCh <- cmd: 269 | return <-cmd.resCh 270 | case <-p.closeCh: 271 | return fmt.Errorf("closed") 272 | } 273 | } 274 | 275 | func (p *persistentPubSub) Subscribe(msgCh chan<- PubSubMessage, channels ...string) error { 276 | return p.cmd(pubSubCmd{ 277 | msgCh: msgCh, 278 | subscribe: channels, 279 | }) 280 | } 281 | 282 | func (p *persistentPubSub) Unsubscribe(msgCh chan<- PubSubMessage, channels ...string) error { 283 | return p.cmd(pubSubCmd{ 284 | msgCh: msgCh, 285 | unsubscribe: channels, 286 | }) 287 | } 288 | 289 | func (p *persistentPubSub) PSubscribe(msgCh chan<- PubSubMessage, channels ...string) error { 290 | return p.cmd(pubSubCmd{ 291 | msgCh: msgCh, 292 | psubscribe: channels, 293 | }) 294 | } 295 | 296 | func (p *persistentPubSub) PUnsubscribe(msgCh chan<- PubSubMessage, channels ...string) error { 297 | return p.cmd(pubSubCmd{ 298 | msgCh: msgCh, 299 | punsubscribe: channels, 300 | }) 301 | } 302 | 303 | func (p *persistentPubSub) Ping() error { 304 | return p.cmd(pubSubCmd{ping: true}) 305 | } 306 | 307 | func (p *persistentPubSub) Close() error { 308 | p.closeOnce.Do(func() { 309 | p.closeErr = p.cmd(pubSubCmd{close: true}) 310 | close(p.closeCh) 311 | if p.opts.errCh != nil { 312 | close(p.opts.errCh) 313 | } 314 | }) 315 | return p.closeErr 316 | } 317 | -------------------------------------------------------------------------------- /vendor/github.com/mediocregopher/radix/v3/pubsub_stub.go: -------------------------------------------------------------------------------- 1 | package radix 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "strings" 7 | "sync" 8 | 9 | "errors" 10 | 11 | "github.com/mediocregopher/radix/v3/resp" 12 | "github.com/mediocregopher/radix/v3/resp/resp2" 13 | ) 14 | 15 | var errPubSubMode = resp2.Error{ 16 | E: errors.New("ERR only (P)SUBSCRIBE / (P)UNSUBSCRIBE / PING / QUIT allowed in this context"), 17 | } 18 | 19 | type multiMarshal []resp.Marshaler 20 | 21 | func (mm multiMarshal) MarshalRESP(w io.Writer) error { 22 | for _, m := range mm { 23 | if err := m.MarshalRESP(w); err != nil { 24 | return err 25 | } 26 | } 27 | return nil 28 | } 29 | 30 | type pubSubStub struct { 31 | Conn 32 | fn func([]string) interface{} 33 | inCh <-chan PubSubMessage 34 | 35 | closeOnce sync.Once 36 | closeCh chan struct{} 37 | closeErr error 38 | 39 | l sync.Mutex 40 | pubsubMode bool 41 | subbed, psubbed map[string]bool 42 | 43 | // this is only used for tests 44 | mDoneCh chan struct{} 45 | } 46 | 47 | // PubSubStub returns a (fake) Conn, much like Stub does, which pretends it is a 48 | // Conn to a real redis instance, but is instead using the given callback to 49 | // service requests. It is primarily useful for writing tests. 50 | // 51 | // PubSubStub differes from Stub in that Encode calls for (P)SUBSCRIBE, 52 | // (P)UNSUBSCRIBE, MESSAGE, and PING will be intercepted and handled as per 53 | // redis' expected pubsub functionality. A PubSubMessage may be written to the 54 | // returned channel at any time, and if the PubSubStub has had (P)SUBSCRIBE 55 | // called matching that PubSubMessage it will be written to the PubSubStub's 56 | // internal buffer as expected. 57 | // 58 | // This is intended to be used so that it can mock services which can perform 59 | // both normal redis commands and pubsub (e.g. a real redis instance, redis 60 | // sentinel). Once created this stub can be passed into PubSub and treated like 61 | // a real connection. 62 | func PubSubStub(remoteNetwork, remoteAddr string, fn func([]string) interface{}) (Conn, chan<- PubSubMessage) { 63 | ch := make(chan PubSubMessage) 64 | s := &pubSubStub{ 65 | fn: fn, 66 | inCh: ch, 67 | closeCh: make(chan struct{}), 68 | subbed: map[string]bool{}, 69 | psubbed: map[string]bool{}, 70 | mDoneCh: make(chan struct{}, 1), 71 | } 72 | s.Conn = Stub(remoteNetwork, remoteAddr, s.innerFn) 73 | go s.spin() 74 | return s, ch 75 | } 76 | 77 | func (s *pubSubStub) innerFn(ss []string) interface{} { 78 | s.l.Lock() 79 | defer s.l.Unlock() 80 | 81 | writeRes := func(mm multiMarshal, cmd, subj string) multiMarshal { 82 | c := len(s.subbed) + len(s.psubbed) 83 | s.pubsubMode = c > 0 84 | return append(mm, resp2.Any{I: []interface{}{cmd, subj, c}}) 85 | } 86 | 87 | switch strings.ToUpper(ss[0]) { 88 | case "PING": 89 | if !s.pubsubMode { 90 | return s.fn(ss) 91 | } 92 | return []string{"pong", ""} 93 | case "SUBSCRIBE": 94 | var mm multiMarshal 95 | for _, channel := range ss[1:] { 96 | s.subbed[channel] = true 97 | mm = writeRes(mm, "subscribe", channel) 98 | } 99 | return mm 100 | case "UNSUBSCRIBE": 101 | var mm multiMarshal 102 | for _, channel := range ss[1:] { 103 | delete(s.subbed, channel) 104 | mm = writeRes(mm, "unsubscribe", channel) 105 | } 106 | return mm 107 | case "PSUBSCRIBE": 108 | var mm multiMarshal 109 | for _, pattern := range ss[1:] { 110 | s.psubbed[pattern] = true 111 | mm = writeRes(mm, "psubscribe", pattern) 112 | } 113 | return mm 114 | case "PUNSUBSCRIBE": 115 | var mm multiMarshal 116 | for _, pattern := range ss[1:] { 117 | delete(s.psubbed, pattern) 118 | mm = writeRes(mm, "punsubscribe", pattern) 119 | } 120 | return mm 121 | case "MESSAGE": 122 | m := PubSubMessage{ 123 | Type: "message", 124 | Channel: ss[1], 125 | Message: []byte(ss[2]), 126 | } 127 | 128 | var mm multiMarshal 129 | if s.subbed[m.Channel] { 130 | mm = append(mm, m) 131 | } 132 | return mm 133 | case "PMESSAGE": 134 | m := PubSubMessage{ 135 | Type: "pmessage", 136 | Pattern: ss[1], 137 | Channel: ss[2], 138 | Message: []byte(ss[3]), 139 | } 140 | 141 | var mm multiMarshal 142 | if s.psubbed[m.Pattern] { 143 | mm = append(mm, m) 144 | } 145 | return mm 146 | default: 147 | if s.pubsubMode { 148 | return errPubSubMode 149 | } 150 | return s.fn(ss) 151 | } 152 | } 153 | 154 | func (s *pubSubStub) Close() error { 155 | s.closeOnce.Do(func() { 156 | close(s.closeCh) 157 | s.closeErr = s.Conn.Close() 158 | }) 159 | return s.closeErr 160 | } 161 | 162 | func (s *pubSubStub) spin() { 163 | for { 164 | select { 165 | case m, ok := <-s.inCh: 166 | if !ok { 167 | panic("PubSubStub message channel was closed") 168 | } 169 | if m.Type == "" { 170 | if m.Pattern == "" { 171 | m.Type = "message" 172 | } else { 173 | m.Type = "pmessage" 174 | } 175 | } 176 | if err := s.Conn.Encode(m); err != nil { 177 | panic(fmt.Sprintf("error encoding message in PubSubStub: %s", err)) 178 | } 179 | select { 180 | case s.mDoneCh <- struct{}{}: 181 | default: 182 | } 183 | case <-s.closeCh: 184 | return 185 | } 186 | } 187 | } 188 | -------------------------------------------------------------------------------- /vendor/github.com/mediocregopher/radix/v3/radix.go: -------------------------------------------------------------------------------- 1 | // Package radix implements all functionality needed to work with redis and all 2 | // things related to it, including redis cluster, pubsub, sentinel, scanning, 3 | // lua scripting, and more. 4 | // 5 | // Creating a client 6 | // 7 | // For a single node redis instance use NewPool to create a connection pool. The 8 | // connection pool is thread-safe and will automatically create, reuse, and 9 | // recreate connections as needed: 10 | // 11 | // pool, err := radix.NewPool("tcp", "127.0.0.1:6379", 10) 12 | // if err != nil { 13 | // // handle error 14 | // } 15 | // 16 | // If you're using sentinel or cluster you should use NewSentinel or NewCluster 17 | // (respectively) to create your client instead. 18 | // 19 | // Commands 20 | // 21 | // Any redis command can be performed by passing a Cmd into a Client's Do 22 | // method. Each Cmd should only be used once. The return from the Cmd can be 23 | // captured into any appopriate go primitive type, or a slice, map, or struct, 24 | // if the command returns an array. 25 | // 26 | // err := client.Do(radix.Cmd(nil, "SET", "foo", "someval")) 27 | // 28 | // var fooVal string 29 | // err := client.Do(radix.Cmd(&fooVal, "GET", "foo")) 30 | // 31 | // var fooValB []byte 32 | // err := client.Do(radix.Cmd(&fooValB, "GET", "foo")) 33 | // 34 | // var barI int 35 | // err := client.Do(radix.Cmd(&barI, "INCR", "bar")) 36 | // 37 | // var bazEls []string 38 | // err := client.Do(radix.Cmd(&bazEls, "LRANGE", "baz", "0", "-1")) 39 | // 40 | // var buzMap map[string]string 41 | // err := client.Do(radix.Cmd(&buzMap, "HGETALL", "buz")) 42 | // 43 | // FlatCmd can also be used if you wish to use non-string arguments like 44 | // integers, slices, maps, or structs, and have them automatically be flattened 45 | // into a single string slice. 46 | // 47 | // Struct Scanning 48 | // 49 | // Cmd and FlatCmd can unmarshal results into a struct. The results must be a 50 | // key/value array, such as that returned by HGETALL. Exported field names will 51 | // be used as keys, unless the fields have the "redis" tag: 52 | // 53 | // type MyType struct { 54 | // Foo string // Will be populated with the value for key "Foo" 55 | // Bar string `redis:"BAR"` // Will be populated with the value for key "BAR" 56 | // Baz string `redis:"-"` // Will not be populated 57 | // } 58 | // 59 | // Embedded structs will inline that struct's fields into the parent's: 60 | // 61 | // type MyOtherType struct { 62 | // // adds fields "Foo" and "BAR" (from above example) to MyOtherType 63 | // MyType 64 | // Biz int 65 | // } 66 | // 67 | // The same rules for field naming apply when a struct is passed into FlatCmd as 68 | // an argument. 69 | // 70 | // Actions 71 | // 72 | // Cmd and FlatCmd both implement the Action interface. Other Actions include 73 | // Pipeline, WithConn, and EvalScript.Cmd. Any of these may be passed into any 74 | // Client's Do method. 75 | // 76 | // var fooVal string 77 | // p := radix.Pipeline( 78 | // radix.FlatCmd(nil, "SET", "foo", 1), 79 | // radix.Cmd(&fooVal, "GET", "foo"), 80 | // ) 81 | // if err := client.Do(p); err != nil { 82 | // panic(err) 83 | // } 84 | // fmt.Printf("fooVal: %q\n", fooVal) 85 | // 86 | // Transactions 87 | // 88 | // There are two ways to perform transactions in redis. The first is with the 89 | // MULTI/EXEC commands, which can be done using the WithConn Action (see its 90 | // example). The second is using EVAL with lua scripting, which can be done 91 | // using the EvalScript Action (again, see its example). 92 | // 93 | // EVAL with lua scripting is recommended in almost all cases. It only requires 94 | // a single round-trip, it's infinitely more flexible than MULTI/EXEC, it's 95 | // simpler to code, and for complex transactions, which would otherwise need a 96 | // WATCH statement with MULTI/EXEC, it's significantly faster. 97 | // 98 | // AUTH and other settings via ConnFunc and ClientFunc 99 | // 100 | // All the client creation functions (e.g. NewPool) take in either a ConnFunc or 101 | // a ClientFunc via their options. These can be used in order to set up timeouts 102 | // on connections, perform authentication commands, or even implement custom 103 | // pools. 104 | // 105 | // // this is a ConnFunc which will set up a connection which is authenticated 106 | // // and has a 1 minute timeout on all operations 107 | // customConnFunc := func(network, addr string) (radix.Conn, error) { 108 | // return radix.Dial(network, addr, 109 | // radix.DialTimeout(1 * time.Minute), 110 | // radix.DialAuthPass("mySuperSecretPassword"), 111 | // ) 112 | // } 113 | // 114 | // // this pool will use our ConnFunc for all connections it creates 115 | // pool, err := radix.NewPool("tcp", redisAddr, 10, PoolConnFunc(customConnFunc)) 116 | // 117 | // // this cluster will use the ClientFunc to create a pool to each node in the 118 | // // cluster. The pools also use our customConnFunc, but have more connections 119 | // poolFunc := func(network, addr string) (radix.Client, error) { 120 | // return radix.NewPool(network, addr, 100, PoolConnFunc(customConnFunc)) 121 | // } 122 | // cluster, err := radix.NewCluster([]string{redisAddr1, redisAddr2}, ClusterPoolFunc(poolFunc)) 123 | // 124 | // Custom implementations 125 | // 126 | // All interfaces in this package were designed such that they could have custom 127 | // implementations. There is no dependency within radix that demands any 128 | // interface be implemented by a particular underlying type, so feel free to 129 | // create your own Pools or Conns or Actions or whatever makes your life easier. 130 | // 131 | // Errors 132 | // 133 | // Errors returned from redis can be explicitly checked for using the the 134 | // resp2.Error type. Note that the errors.As function, introduced in go 1.13, 135 | // should be used. 136 | // 137 | // var redisErr resp2.Error 138 | // err := client.Do(radix.Cmd(nil, "AUTH", "wrong password")) 139 | // if errors.As(err, &redisErr) { 140 | // log.Printf("redis error returned: %s", redisErr.E) 141 | // } 142 | // 143 | // Use the golang.org/x/xerrors package if you're using an older version of go. 144 | // 145 | // Implicit pipelining 146 | // 147 | // Implicit pipelining is an optimization implemented and enabled in the default 148 | // Pool implementation (and therefore also used by Cluster and Sentinel) which 149 | // involves delaying concurrent Cmds and FlatCmds a small amount of time and 150 | // sending them to redis in a single batch, similar to manually using a Pipeline. 151 | // By doing this radix significantly reduces the I/O and CPU overhead for 152 | // concurrent requests. 153 | // 154 | // Note that only commands which do not block are eligible for implicit pipelining. 155 | // 156 | // See the documentation on Pool for more information about the current 157 | // implementation of implicit pipelining and for how to configure or disable 158 | // the feature. 159 | // 160 | // For a performance comparisons between Clients with and without implicit 161 | // pipelining see the benchmark results in the README.md. 162 | // 163 | package radix 164 | 165 | import ( 166 | "errors" 167 | ) 168 | 169 | var errClientClosed = errors.New("client is closed") 170 | 171 | // Client describes an entity which can carry out Actions, e.g. a connection 172 | // pool for a single redis instance or the cluster client. 173 | // 174 | // Implementations of Client are expected to be thread-safe, except in cases 175 | // like Conn where they specify otherwise. 176 | type Client interface { 177 | // Do performs an Action, returning any error. 178 | Do(Action) error 179 | 180 | // Once Close() is called all future method calls on the Client will return 181 | // an error 182 | Close() error 183 | } 184 | 185 | // ClientFunc is a function which can be used to create a Client for a single 186 | // redis instance on the given network/address. 187 | type ClientFunc func(network, addr string) (Client, error) 188 | 189 | // DefaultClientFunc is a ClientFunc which will return a Client for a redis 190 | // instance using sane defaults. 191 | var DefaultClientFunc = func(network, addr string) (Client, error) { 192 | return NewPool(network, addr, 4) 193 | } 194 | -------------------------------------------------------------------------------- /vendor/github.com/mediocregopher/radix/v3/resp/resp.go: -------------------------------------------------------------------------------- 1 | // Package resp is an umbrella package which covers both the old RESP protocol 2 | // (resp2) and the new one (resp3), allowing clients to choose which one they 3 | // care to use 4 | package resp 5 | 6 | import ( 7 | "bufio" 8 | "io" 9 | ) 10 | 11 | // Marshaler is the interface implemented by types that can marshal themselves 12 | // into valid RESP. 13 | type Marshaler interface { 14 | MarshalRESP(io.Writer) error 15 | } 16 | 17 | // Unmarshaler is the interface implemented by types that can unmarshal a RESP 18 | // description of themselves. UnmarshalRESP should _always_ fully consume a RESP 19 | // message off the reader, unless there is an error returned from the reader 20 | // itself. 21 | // 22 | // Note that, unlike Marshaler, Unmarshaler _must_ take in a *bufio.Reader. 23 | type Unmarshaler interface { 24 | UnmarshalRESP(*bufio.Reader) error 25 | } 26 | 27 | // ErrDiscarded is used to wrap an error encountered while unmarshaling a 28 | // message. If an error was encountered during unmarshaling but the rest of the 29 | // message was successfully discarded off of the wire, then the error can be 30 | // wrapped in this type. 31 | type ErrDiscarded struct { 32 | Err error 33 | } 34 | 35 | func (ed ErrDiscarded) Error() string { 36 | return ed.Err.Error() 37 | } 38 | 39 | // Unwrap implements the errors.Wrapper interface. 40 | func (ed ErrDiscarded) Unwrap() error { 41 | return ed.Err 42 | } 43 | -------------------------------------------------------------------------------- /vendor/github.com/mediocregopher/radix/v3/resp/util.go: -------------------------------------------------------------------------------- 1 | package resp 2 | 3 | import ( 4 | "io" 5 | ) 6 | 7 | // LenReader adds an additional method to io.Reader, returning how many bytes 8 | // are left till be read until an io.EOF is reached. 9 | type LenReader interface { 10 | io.Reader 11 | Len() int64 12 | } 13 | 14 | type lenReader struct { 15 | r io.Reader 16 | l int64 17 | } 18 | 19 | // NewLenReader wraps an existing io.Reader whose length is known so that it 20 | // implements LenReader. 21 | func NewLenReader(r io.Reader, l int64) LenReader { 22 | return &lenReader{r: r, l: l} 23 | } 24 | 25 | func (lr *lenReader) Read(b []byte) (int, error) { 26 | n, err := lr.r.Read(b) 27 | lr.l -= int64(n) 28 | return n, err 29 | } 30 | 31 | func (lr *lenReader) Len() int64 { 32 | return lr.l 33 | } 34 | -------------------------------------------------------------------------------- /vendor/github.com/mediocregopher/radix/v3/scanner.go: -------------------------------------------------------------------------------- 1 | package radix 2 | 3 | import ( 4 | "bufio" 5 | "strconv" 6 | "strings" 7 | 8 | "errors" 9 | 10 | "github.com/mediocregopher/radix/v3/resp/resp2" 11 | ) 12 | 13 | // Scanner is used to iterate through the results of a SCAN call (or HSCAN, 14 | // SSCAN, etc...) 15 | // 16 | // Once created, repeatedly call Next() on it to fill the passed in string 17 | // pointer with the next result. Next will return false if there's no more 18 | // results to retrieve or if an error occurred, at which point Close should be 19 | // called to retrieve any error. 20 | type Scanner interface { 21 | Next(*string) bool 22 | Close() error 23 | } 24 | 25 | // ScanOpts are various parameters which can be passed into ScanWithOpts. Some 26 | // fields are required depending on which type of scan is being done. 27 | type ScanOpts struct { 28 | // The scan command to do, e.g. "SCAN", "HSCAN", etc... 29 | Command string 30 | 31 | // The key to perform the scan on. Only necessary when Command isn't "SCAN" 32 | Key string 33 | 34 | // An optional pattern to filter returned keys by 35 | Pattern string 36 | 37 | // An optional count hint to send to redis to indicate number of keys to 38 | // return per call. This does not affect the actual results of the scan 39 | // command, but it may be useful for optimizing certain datasets 40 | Count int 41 | 42 | // An optional type name to filter for values of the given type. 43 | // The type names are the same as returned by the "TYPE" command. 44 | // This if only available in Redis 6 or newer and only works with "SCAN". 45 | // If used with an older version of Redis or with a Command other than 46 | // "SCAN", scanning will fail. 47 | Type string 48 | } 49 | 50 | func (o ScanOpts) cmd(rcv interface{}, cursor string) CmdAction { 51 | cmdStr := strings.ToUpper(o.Command) 52 | args := make([]string, 0, 8) 53 | if cmdStr != "SCAN" { 54 | args = append(args, o.Key) 55 | } 56 | 57 | args = append(args, cursor) 58 | if o.Pattern != "" { 59 | args = append(args, "MATCH", o.Pattern) 60 | } 61 | if o.Count > 0 { 62 | args = append(args, "COUNT", strconv.Itoa(o.Count)) 63 | } 64 | if o.Type != "" { 65 | args = append(args, "TYPE", o.Type) 66 | } 67 | 68 | return Cmd(rcv, cmdStr, args...) 69 | } 70 | 71 | // ScanAllKeys is a shortcut ScanOpts which can be used to scan all keys. 72 | var ScanAllKeys = ScanOpts{ 73 | Command: "SCAN", 74 | } 75 | 76 | type scanner struct { 77 | Client 78 | ScanOpts 79 | res scanResult 80 | resIdx int 81 | err error 82 | } 83 | 84 | // NewScanner creates a new Scanner instance which will iterate over the redis 85 | // instance's Client using the ScanOpts. 86 | // 87 | // NOTE if Client is a *Cluster this will not work correctly, use the NewScanner 88 | // method on Cluster instead. 89 | func NewScanner(c Client, o ScanOpts) Scanner { 90 | return &scanner{ 91 | Client: c, 92 | ScanOpts: o, 93 | res: scanResult{ 94 | cur: "0", 95 | }, 96 | } 97 | } 98 | 99 | func (s *scanner) Next(res *string) bool { 100 | for { 101 | if s.err != nil { 102 | return false 103 | } 104 | 105 | for s.resIdx < len(s.res.keys) { 106 | *res = s.res.keys[s.resIdx] 107 | s.resIdx++ 108 | if *res != "" { 109 | return true 110 | } 111 | } 112 | 113 | if s.res.cur == "0" && s.res.keys != nil { 114 | return false 115 | } 116 | 117 | s.err = s.Client.Do(s.cmd(&s.res, s.res.cur)) 118 | s.resIdx = 0 119 | } 120 | } 121 | 122 | func (s *scanner) Close() error { 123 | return s.err 124 | } 125 | 126 | type scanResult struct { 127 | cur string 128 | keys []string 129 | } 130 | 131 | func (s *scanResult) UnmarshalRESP(br *bufio.Reader) error { 132 | var ah resp2.ArrayHeader 133 | if err := ah.UnmarshalRESP(br); err != nil { 134 | return err 135 | } else if ah.N != 2 { 136 | return errors.New("not enough parts returned") 137 | } 138 | 139 | var c resp2.BulkString 140 | if err := c.UnmarshalRESP(br); err != nil { 141 | return err 142 | } 143 | 144 | s.cur = c.S 145 | s.keys = s.keys[:0] 146 | 147 | return (resp2.Any{I: &s.keys}).UnmarshalRESP(br) 148 | } 149 | -------------------------------------------------------------------------------- /vendor/github.com/mediocregopher/radix/v3/stream.go: -------------------------------------------------------------------------------- 1 | package radix 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "fmt" 7 | "io" 8 | "math" 9 | "strconv" 10 | "time" 11 | 12 | "errors" 13 | 14 | "github.com/mediocregopher/radix/v3/internal/bytesutil" 15 | "github.com/mediocregopher/radix/v3/resp" 16 | "github.com/mediocregopher/radix/v3/resp/resp2" 17 | ) 18 | 19 | // StreamEntryID represents an ID used in a Redis stream with the format