├── redisconn ├── bench │ ├── stub.go │ ├── go.mod │ ├── go.sum │ └── bench_test.go ├── eachshard.go ├── doc.go ├── scan.go ├── deadline_io.go ├── request.go ├── error.go ├── logger.go └── conn_test.go ├── rediscluster ├── bench │ ├── stub.go │ ├── go.mod │ ├── go.sum │ └── bench_test.go ├── event_release.go ├── redisclusterutil │ ├── doc.go │ ├── crc16_test.go │ ├── resolve.go │ ├── slots.go │ ├── master_only.go │ ├── crc16.go │ ├── cluster_test.go │ └── cluster.go ├── alias.go ├── event_debug.go ├── doc.go ├── eachshard.go ├── rw_policy.go ├── error.go ├── scan.go ├── roundrobin.go ├── logger.go ├── slotrange.go └── mapping.go ├── go.mod ├── .travis.yml ├── .gitignore ├── testbed ├── testbed.go ├── test_certs │ ├── server.rsa.crt │ └── server.rsa.key ├── server.go └── cluster.go ├── .github └── workflows │ └── ci.yml ├── LICENSE ├── redis ├── doc.go ├── command_type_test.go ├── response.go ├── request.go ├── chan_future.go ├── command_type.go ├── sync.go ├── example_test.go ├── reader.go ├── sender.go ├── error.go ├── reader_test.go ├── sync_context.go ├── request_writer.go └── request_test.go ├── go.sum ├── Makefile ├── bin └── clean-cluster │ └── main.go ├── doc.go ├── example_test.go ├── redisdumb └── conn.go └── README.md /redisconn/bench/stub.go: -------------------------------------------------------------------------------- 1 | package bench 2 | -------------------------------------------------------------------------------- /rediscluster/bench/stub.go: -------------------------------------------------------------------------------- 1 | package bench 2 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/joomcode/redispipe 2 | 3 | go 1.16 4 | 5 | require ( 6 | github.com/joomcode/errorx v1.0.3 7 | github.com/stretchr/testify v1.7.0 8 | ) 9 | -------------------------------------------------------------------------------- /rediscluster/event_release.go: -------------------------------------------------------------------------------- 1 | // +build !debugredis 2 | 3 | package rediscluster 4 | 5 | // DebugEvent is stub implementation of test-related method. 6 | func DebugEvent(ev string) {} 7 | -------------------------------------------------------------------------------- /rediscluster/redisclusterutil/doc.go: -------------------------------------------------------------------------------- 1 | // Package redisclusterutil implements some protocol level details of cluster specification. 2 | // 3 | // It could be used independently from rediscluster package. 4 | package redisclusterutil 5 | -------------------------------------------------------------------------------- /rediscluster/alias.go: -------------------------------------------------------------------------------- 1 | package rediscluster 2 | 3 | import ( 4 | "github.com/joomcode/redispipe/redis" 5 | ) 6 | 7 | // Request is an alias for redis.Request 8 | type Request = redis.Request 9 | 10 | // Future is an alias for redis.Future 11 | type Future = redis.Future 12 | -------------------------------------------------------------------------------- /redisconn/eachshard.go: -------------------------------------------------------------------------------- 1 | package redisconn 2 | 3 | import "github.com/joomcode/redispipe/redis" 4 | 5 | // EachShard implements redis.Sender.EachShard. 6 | // It just calls callback once with Connection itself. 7 | func (c *Connection) EachShard(cb func(redis.Sender, error) bool) { 8 | cb(c, nil) 9 | } 10 | -------------------------------------------------------------------------------- /redisconn/bench/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/joomcode/redispipe/redisconn/bench 2 | 3 | go 1.16 4 | 5 | replace github.com/joomcode/redispipe => ../.. 6 | 7 | require ( 8 | github.com/gomodule/redigo v1.8.4 9 | github.com/joomcode/redispipe v0.0.0-00010101000000-000000000000 10 | github.com/mediocregopher/radix/v3 v3.7.0 11 | ) 12 | -------------------------------------------------------------------------------- /rediscluster/redisclusterutil/crc16_test.go: -------------------------------------------------------------------------------- 1 | package redisclusterutil 2 | 3 | // copied from github.com/mediocregopher/radix.v2/cluster/crc16.go 4 | 5 | import ( 6 | "testing" 7 | ) 8 | 9 | func TestCRC16(t *testing.T) { 10 | if c := CRC16([]byte("123456789")); c != 0x31c3 { 11 | t.Fatalf("checksum came out to %x not %x", c, 0x31c3) 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /rediscluster/bench/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/joomcode/redispipe/rediscluster/bench 2 | 3 | go 1.16 4 | 5 | replace github.com/joomcode/redispipe => ../.. 6 | 7 | require ( 8 | github.com/joomcode/redispipe v0.0.0-00010101000000-000000000000 9 | github.com/mediocregopher/radix/v3 v3.7.0 10 | github.com/wuxibin89/redis-go-cluster v1.0.1-0.20161207023922-222d81891f1d 11 | ) 12 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | 3 | go: 4 | - "1.9.x" 5 | - "1.13.x" 6 | 7 | env: 8 | matrix: 9 | - STAGE=testredis 10 | - STAGE=testconn 11 | - STAGE=testcluster 12 | 13 | addons: 14 | apt: 15 | packages: 16 | - realpath 17 | 18 | before_install: 19 | - make /tmp/redis-server/redis-server 20 | 21 | cache: 22 | directories: 23 | - /tmp/redis-server 24 | 25 | install: 26 | - go get -t -v ./... 27 | 28 | script: make $STAGE 29 | -------------------------------------------------------------------------------- /redisconn/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package redisconn implements connection to single redis server. 3 | 4 | Connection is "wrapper" around a single tcp (unix-socket) connection. All requests are fed into a 5 | single connection, and responses are asynchronously read from it. 6 | Connection is thread-safe, meaning it doesn't need external synchronization. 7 | Connect is responsible for reconnection, but it does not retry requests in the case of networking problems. 8 | */ 9 | package redisconn 10 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.dll 4 | *.so 5 | *.dylib 6 | 7 | # Test binary, build with `go test -c` 8 | *.test 9 | 10 | # Output of the go coverage tool, specifically when used with LiteIDE 11 | *.out 12 | 13 | # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 14 | .glide/ 15 | 16 | # goland 17 | .idea/ 18 | 19 | #our version with redis with patched cluster 20 | redis-server 21 | 22 | # temporary test files 23 | rediscluster/redis_test_* 24 | -------------------------------------------------------------------------------- /rediscluster/event_debug.go: -------------------------------------------------------------------------------- 1 | // +build debugredis 2 | 3 | package rediscluster 4 | 5 | import "sync" 6 | 7 | var DebugMtx sync.Mutex 8 | var debugEvents = []string{} 9 | var DebugDisable = false 10 | 11 | func DebugEvent(ev string) { 12 | if !DebugDisable { 13 | DebugMtx.Lock() 14 | debugEvents = append(debugEvents, ev) 15 | DebugMtx.Unlock() 16 | } 17 | } 18 | 19 | func DebugEvents() []string { 20 | DebugMtx.Lock() 21 | defer DebugMtx.Unlock() 22 | return debugEvents 23 | } 24 | 25 | func DebugEventsReset() { 26 | DebugMtx.Lock() 27 | defer DebugMtx.Unlock() 28 | debugEvents = nil 29 | } 30 | -------------------------------------------------------------------------------- /rediscluster/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package rediscluster implements a connector for redis cluster. 3 | 4 | Cluster automatically learns and periodically refreshes cluster configuration. 5 | It could send requests to slaves (if a corresponding policy is used), and could retry 6 | read requests within replicaset and write requests with connections to the same master host 7 | (if it is known that requests were not sent). 8 | 9 | It reacts on set CLUSTER_SELF:MASTER_ONLY stored in the cluster itself to force master-only 10 | policy on some slots. It is used by proprietary tool for correct and fast cluster 11 | rebalancing. 12 | */ 13 | package rediscluster 14 | -------------------------------------------------------------------------------- /rediscluster/redisclusterutil/resolve.go: -------------------------------------------------------------------------------- 1 | package redisclusterutil 2 | 3 | import "net" 4 | 5 | // Resolve just resolves hostname:port to ipaddr:port 6 | func Resolve(addr string) (string, error) { 7 | ip, port, err := net.SplitHostPort(addr) 8 | if err != nil { 9 | return "", err 10 | } 11 | ips, err := net.LookupHost(ip) 12 | if err != nil { 13 | return "", err 14 | } 15 | return net.JoinHostPort(ips[0], port), nil 16 | } 17 | 18 | func GetHost(addr string) (string, error) { 19 | host, _, err := net.SplitHostPort(addr) 20 | if err != nil { 21 | return "", err 22 | } 23 | 24 | return host, nil 25 | } 26 | 27 | func IsIPAddress(addr string) bool { 28 | return net.ParseIP(addr) != nil 29 | } 30 | -------------------------------------------------------------------------------- /rediscluster/eachshard.go: -------------------------------------------------------------------------------- 1 | package rediscluster 2 | 3 | import ( 4 | "github.com/joomcode/redispipe/redis" 5 | ) 6 | 7 | // EachShard implements redis.Sender.EachShard 8 | func (c *Cluster) EachShard(cb func(redis.Sender, error) bool) { 9 | cfg := c.getConfig() 10 | for _, shard := range cfg.shards { 11 | node := cfg.nodes[shard.addr[0]] 12 | if node == nil { 13 | cb(nil, c.err(ErrNoAliveConnection).WithProperty(redis.EKAddress, shard.addr[0])) 14 | return 15 | } 16 | conn := node.getConn(c.opts.ConnHostPolicy, preferConnected, nil) 17 | if conn == nil { 18 | cb(nil, c.err(ErrNoAliveConnection).WithProperty(redis.EKAddress, shard.addr[0])) 19 | return 20 | } 21 | if !cb(conn, nil) { 22 | return 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /redisconn/scan.go: -------------------------------------------------------------------------------- 1 | package redisconn 2 | 3 | import ( 4 | "github.com/joomcode/redispipe/redis" 5 | ) 6 | 7 | // Scanner is an implementation of redis.Scanner 8 | type Scanner struct { 9 | redis.ScannerBase 10 | c *Connection 11 | } 12 | 13 | // Next is an implementation of redis.Scanner.Next 14 | func (s *Scanner) Next(cb redis.Future) { 15 | if s.Err != nil { 16 | cb.Resolve(s.Err, 0) 17 | return 18 | } 19 | if s.IterLast() { 20 | cb.Resolve(nil, 0) 21 | return 22 | } 23 | s.DoNext(cb, s.c) 24 | } 25 | 26 | // Scanner implements redis.Sender.Scanner 27 | func (c *Connection) Scanner(opts redis.ScanOpts) redis.Scanner { 28 | return &Scanner{ 29 | ScannerBase: redis.ScannerBase{ScanOpts: opts}, 30 | c: c, 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /redisconn/deadline_io.go: -------------------------------------------------------------------------------- 1 | package redisconn 2 | 3 | import ( 4 | "io" 5 | "net" 6 | "time" 7 | ) 8 | 9 | // deadlineIO is a wrapper that sets read deadline before each Read. 10 | type deadlineIO struct { 11 | to time.Duration 12 | c net.Conn 13 | } 14 | 15 | func newDeadlineIO(c net.Conn, to time.Duration) io.ReadWriter { 16 | if to > 0 { 17 | return &deadlineIO{c: c, to: to} 18 | } 19 | return c 20 | } 21 | 22 | // Write implements io.Writer. 23 | // It doesn't set write deadline. 24 | func (d *deadlineIO) Write(b []byte) (int, error) { 25 | return d.c.Write(b) 26 | } 27 | 28 | // Read implements io.Reader 29 | // It sets read deadline before each call to Read. 30 | func (d *deadlineIO) Read(b []byte) (int, error) { 31 | d.c.SetReadDeadline(time.Now().Add(d.to)) 32 | return d.c.Read(b) 33 | } 34 | -------------------------------------------------------------------------------- /testbed/testbed.go: -------------------------------------------------------------------------------- 1 | // Package testbed is a tool for running redis-server for tests. 2 | package testbed 3 | 4 | import ( 5 | "io/ioutil" 6 | "os" 7 | "os/exec" 8 | ) 9 | 10 | // Binary is a path to redis-server 11 | var Binary = func() string { p, _ := exec.LookPath("redis-server"); return p }() 12 | 13 | // Dir is temporary directory where redis will run. 14 | var Dir = "" 15 | var tlsCluster = os.Getenv("TLS_ENABLED") == "ENABLED" 16 | 17 | // InitDir initiates Dir with temporary directory in base. 18 | func InitDir(base string) { 19 | if Dir == "" { 20 | var err error 21 | Dir, err = ioutil.TempDir(base, "redis_test_") 22 | if err != nil { 23 | panic(err) 24 | } 25 | } 26 | } 27 | 28 | // RmDir removes temporary directory. 29 | func RmDir() { 30 | if Dir == "" { 31 | return 32 | } 33 | if err := os.RemoveAll(Dir); err != nil { 34 | panic(err) 35 | } 36 | Dir = "" 37 | } 38 | -------------------------------------------------------------------------------- /redisconn/request.go: -------------------------------------------------------------------------------- 1 | package redisconn 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/joomcode/redispipe/redis" 7 | ) 8 | 9 | // Request is an alias for redis.Request 10 | type Request = redis.Request 11 | 12 | // Future is an alias for redis.Future 13 | type Future = redis.Future 14 | 15 | type future struct { 16 | Future 17 | N uint64 18 | 19 | start int64 20 | bytesIn int64 21 | bytesOut int64 22 | req Request 23 | } 24 | 25 | var epoch = time.Now() 26 | 27 | func nownano() int64 { 28 | return int64(time.Now().Sub(epoch)) 29 | } 30 | 31 | func (c *Connection) resolve(f future, res interface{}) { 32 | if f.start != 0 && f.req.Cmd != "" { 33 | delta := nownano() - f.start 34 | c.opts.Logger.ReqStat(c, f.req, res, delta, f.bytesIn, f.bytesOut) 35 | if f.req.Cmd == "PING" { 36 | c.storePingLatency(time.Duration(delta)) 37 | } 38 | } 39 | f.Future.Resolve(res, f.N) 40 | } 41 | -------------------------------------------------------------------------------- /rediscluster/rw_policy.go: -------------------------------------------------------------------------------- 1 | package rediscluster 2 | 3 | // PolicyMan wraps Cluster and change default policy for Send and SendMany methods. 4 | // PolicyMan implements redis.Sender. 5 | type PolicyMan struct { 6 | *Cluster 7 | // Policy is default policy for Send and SendMany 8 | Policy ReplicaPolicyEnum 9 | } 10 | 11 | // Send implements redis.Sender.Send 12 | // It calls Cluster.SendWithPolicy with specified default policy. 13 | func (p PolicyMan) Send(req Request, cb Future, off uint64) { 14 | p.Cluster.SendWithPolicy(p.Policy, req, cb, off) 15 | } 16 | 17 | // SendMany implements redis.Sender.SendMany 18 | // It sends requests with specified default policy. 19 | func (p PolicyMan) SendMany(reqs []Request, cb Future, off uint64) { 20 | for i, req := range reqs { 21 | p.Cluster.SendWithPolicy(p.Policy, req, cb, off+uint64(i)) 22 | } 23 | } 24 | 25 | // WithPolicy returns PolicyMan with specified policy. 26 | func (c *Cluster) WithPolicy(policy ReplicaPolicyEnum) PolicyMan { 27 | return PolicyMan{c, policy} 28 | } 29 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: [push] 4 | 5 | jobs: 6 | build: 7 | 8 | runs-on: ubuntu-latest 9 | 10 | strategy: 11 | matrix: 12 | go: ["1.19", "1.20", "1.21", "1.22", "1.23", "1.24"] 13 | stage: [testredis, testconn, testcluster] 14 | 15 | steps: 16 | 17 | - name: Checkout 18 | uses: actions/checkout@v4 19 | 20 | - name: Set up Go ${{ matrix.go }} 21 | uses: actions/setup-go@v5 22 | with: 23 | go-version: ${{ matrix.go }} 24 | 25 | - name: Cache go modules 26 | uses: actions/cache@v4 27 | with: 28 | path: ~/go/pkg/mod 29 | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} 30 | restore-keys: | 31 | ${{ runner.os }}-go- 32 | 33 | - name: Cache redis build 34 | uses: actions/cache@v4 35 | with: 36 | path: | 37 | /tmp/redis-server 38 | key: ${{ runner.os }}-redis-${{ hashFiles('Makefile') }} 39 | 40 | - name: Install redis 41 | run: make /tmp/redis-server/redis-server 42 | 43 | 44 | - name: Build 45 | run: make ${{ matrix.stage }} 46 | 47 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Joom 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /rediscluster/redisclusterutil/slots.go: -------------------------------------------------------------------------------- 1 | package redisclusterutil 2 | 3 | import ( 4 | "math/rand" 5 | 6 | "github.com/joomcode/redispipe/redis" 7 | ) 8 | 9 | // ReqSlot returns slot number targeted by this command. 10 | func ReqSlot(req redis.Request) (uint16, bool) { 11 | key, ok := req.Key() 12 | if key == "RANDOMKEY" && !ok { 13 | return uint16(rand.Intn(NumSlots)), true 14 | } 15 | return Slot(key), ok 16 | } 17 | 18 | // BatchSlot returns slot common for all requests in batch (if there is such common slot). 19 | func BatchSlot(reqs []redis.Request) (uint16, bool) { 20 | var slot uint16 21 | var set bool 22 | for _, req := range reqs { 23 | s, ok := ReqSlot(req) 24 | if !ok { 25 | continue 26 | } 27 | if !set { 28 | slot = s 29 | set = true 30 | } else if slot != s { 31 | return 0, false 32 | } 33 | } 34 | return slot, set 35 | } 36 | 37 | // BatchKey returns first key from a batch that is targeted to common slot. 38 | func BatchKey(reqs []redis.Request) (string, bool) { 39 | var key string 40 | var slot uint16 41 | var set bool 42 | for _, req := range reqs { 43 | k, ok := req.Key() 44 | if !ok { 45 | continue 46 | } 47 | s := Slot(k) 48 | if !set { 49 | key, slot = k, s 50 | set = true 51 | } else if slot != s { 52 | return "", false 53 | } 54 | } 55 | return key, set 56 | } 57 | -------------------------------------------------------------------------------- /testbed/test_certs/server.rsa.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDazCCAlOgAwIBAgIUe1Q+jj/7OtcK2uN+cWClbXY3sbowDQYJKoZIhvcNAQEL 3 | BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM 4 | GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMzAyMjQxNjA4MzVaFw0zMzAy 5 | MjExNjA4MzVaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw 6 | HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB 7 | AQUAA4IBDwAwggEKAoIBAQC37hjVbrblSu+dnOkGCjRG1Bc1+UyH7V7/4U82C8hE 8 | ZKPby+Ex0cz0rEyLo2we0XszsErGfvgwq4UE9Evx7gcHkBMV6qx8ak4jrf1hwQT1 9 | IA9XRKDCSDY+4n6y/zc4tMZ2JOl163NeoAlfWrzTYKqY/nGTI04pGfvAB2ay6Jr2 10 | JByqw+Mh6c3MA4hUjoWjI9goB9qq1DKkme17GSRUgOiF9mGjDTVpo8Fi0l9F6ihL 11 | uPZErIIa9VAt7UHX/rc+lZijzwHs5QNNYrS0IUIS3UCH4dvCOaEkcmOyXQza/8UE 12 | Hx7aq6WSTNr+QhOvJc1SDhk8HgnjPrsP3Br5uKr9cfg7AgMBAAGjUzBRMB0GA1Ud 13 | DgQWBBShDBMvAhe1uf43tuSfAfXJnLRb/jAfBgNVHSMEGDAWgBShDBMvAhe1uf43 14 | tuSfAfXJnLRb/jAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAk 15 | Y07/NP/rGl98sxTHVVpxcJNg7csrYZ2YLd92yB/3gWL5WLroTiMAp1lwYaF9h4wM 16 | zjSobr69BQJ+ATHhJEtMzwe3s85bxSvF2qMZv/qsqLENe8yrg7fHIdp4zbroGqkp 17 | goljh7nomP9UENH1WAoXVwPLFRvigVIFBAMGIsmR/rAqPArsCOwvD1vjMCzrAqUd 18 | Y7JHxs3hcyM3FRuFkVdMS0e/eAXlhEsIS5IHeWsOyj3hFEz3jWrQjiuvYN73Eq29 19 | s6QR7R6f2ilFjOKk5ef/1Mimhjc4KhM/MO9Sj4HYLLKCClgLDIurg72sGvU2vqmC 20 | dExl3GybH3zCGUD67CH8 21 | -----END CERTIFICATE----- 22 | -------------------------------------------------------------------------------- /redis/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package redis contains common parts for other packages. 3 | 4 | - main interfaces visible to user (Sender, Scanner, ScanOpts) 5 | 6 | - wrappers for synchronous interface over Sender (Sync, SyncCtx) 7 | and chan-based-future interface (ChanFutured) 8 | 9 | - request writing, 10 | 11 | - response parsing, 12 | 13 | - root errorx namespace and common error types. 14 | 15 | Usually you get Sender from redisconn.Connect or rediscluster.NewCluster, then wrap with Sync or SyncCtx, and use their 16 | sync methods without any locking: 17 | 18 | sender, err := redisconn.Connect(ctx, "127.0.0.1:6379", redisconn.Opts{}) 19 | sync := redis.Sync{sender} 20 | go func() { 21 | res := sync.Do("GET", "x") 22 | if err := redis.AsError(res); err != nil { 23 | log.Println("failed", err) 24 | } 25 | log.Println("found x", res) 26 | }() 27 | go func() { 28 | results := sync.SendMany([]redis.Request{ 29 | redis.Req("GET", "k1"), 30 | redis.Req("Incr", "k2"), 31 | redis.Req("HMGET, "h1", "hk1", "hk2"), 32 | }) 33 | if err := redis.AsError(results[0]); err != nil { 34 | log.Println("failed", err) 35 | } 36 | if results[0] == nil { 37 | log.Println("not found") 38 | } else { 39 | log.Println("k1: ", results[0]) 40 | } 41 | }() 42 | 43 | See more documentation in root redispipe package. 44 | */ 45 | package redis 46 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 2 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 3 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 4 | github.com/joomcode/errorx v1.0.3 h1:3e1mi0u7/HTPNdg6d6DYyKGBhA5l9XpsfuVE29NxnWw= 5 | github.com/joomcode/errorx v1.0.3/go.mod h1:eQzdtdlNyN7etw6YCS4W4+lu442waxZYw5yvz0ULrRo= 6 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 7 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 8 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 9 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 10 | github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= 11 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 12 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 13 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 14 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= 15 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 16 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | REDIS_ARCHIVE ?= https://github.com/redis/redis/archive 2 | REDIS_VERSION ?= 6.2.10 3 | 4 | test: testcluster testconn testredis 5 | 6 | /tmp/redis-server/redis-server: 7 | @echo "Building redis-$(REDIS_VERSION)..." 8 | sudo apt-get install -y libssl-dev 9 | wget -nv -c $(REDIS_ARCHIVE)/$(REDIS_VERSION).tar.gz -O - | tar -xzC . 10 | cd redis-$(REDIS_VERSION) && make -j 4 USE_JEMALLOC=no BUILD_TLS=yes 11 | if [ ! -e /tmp/redis-server ] ; then mkdir /tmp/redis-server ; fi 12 | mv redis-$(REDIS_VERSION)/src/redis-server /tmp/redis-server 13 | rm redis-$(REDIS_VERSION) -rf 14 | 15 | testredis: /tmp/redis-server/redis-server 16 | PATH=/tmp/redis-server/:${PATH} go test ./redis 17 | 18 | testconn: /tmp/redis-server/redis-server 19 | killall redis-server || true 20 | rm ./redisconn/redis_test_* -r || true 21 | PATH=/tmp/redis-server/:${PATH} go test -count 1 ./redisconn 22 | 23 | testcluster: /tmp/redis-server/redis-server 24 | killall redis-server || true 25 | rm ./rediscluster/redis_test_* -r || true 26 | PATH=/tmp/redis-server/:${PATH} go test -count 1 -tags debugredis ./rediscluster 27 | rm ./rediscluster/redis_test_* -r || true 28 | PATH=/tmp/redis-server/:${PATH} TLS_ENABLED=ENABLED go test -count 1 -tags debugredis ./rediscluster 29 | 30 | bench: benchconn benchcluster 31 | 32 | benchconn: /tmp/redis-server/redis-server 33 | PATH=/tmp/redis-server/:${PATH} ; cd ./redisconn/bench ; go test -count 1 -run FooBar -bench . -benchmem . 34 | 35 | benchcluster: /tmp/redis-server/redis-server 36 | PATH=/tmp/redis-server/:${PATH} ; cd ./rediscluster/bench ; go test -count 1 -tags debugredis -run FooBar -bench . -benchmem . 37 | 38 | clean: 39 | rm -r */redis_test_* 40 | -------------------------------------------------------------------------------- /rediscluster/error.go: -------------------------------------------------------------------------------- 1 | package rediscluster 2 | 3 | import ( 4 | "github.com/joomcode/errorx" 5 | "github.com/joomcode/redispipe/redis" 6 | ) 7 | 8 | var ( 9 | // ErrCluster - some cluster related errors. 10 | ErrCluster = redis.Errors.NewSubNamespace("cluster") 11 | // ErrClusterSlots - fetching slots configuration failed 12 | ErrClusterSlots = ErrCluster.NewType("retrieve_slots") 13 | // ErrAddressNotResolved - address could not be resolved 14 | // Cluster resolves named hosts specified as start points. If this resolution fails, this error returned. 15 | ErrAddressNotResolved = ErrCluster.NewType("resolve_address") 16 | // ErrAddressHostname - hostname could not be extracted from address 17 | ErrAddressHostname = ErrCluster.NewType("address_hostname") 18 | // ErrClusterConfigEmpty - no addresses found in config. 19 | ErrClusterConfigEmpty = ErrCluster.NewType("config_empty") 20 | // ErrNoAliveConnection - no alive connection to shard 21 | ErrNoAliveConnection = ErrCluster.NewType("no_alive_connection", redis.ErrTraitConnectivity) 22 | ) 23 | 24 | var ( 25 | // EKCluster - cluster for error 26 | EKCluster = errorx.RegisterProperty("cluster") 27 | // EKClusterName - cluster name 28 | EKClusterName = errorx.RegisterPrintableProperty("clusterName") 29 | // EKPolicy - policy used to choose between master and replicas. 30 | EKPolicy = errorx.RegisterPrintableProperty("policy") 31 | ) 32 | 33 | func withNewProperty(err *errorx.Error, p errorx.Property, v interface{}) *errorx.Error { 34 | _, ok := err.Property(p) 35 | if ok { 36 | return err 37 | } 38 | return err.WithProperty(p, v) 39 | } 40 | 41 | func movedTo(err *errorx.Error) string { 42 | a, ok := err.Property(redis.EKMovedTo) 43 | if !ok { 44 | return "" 45 | } 46 | return a.(string) 47 | } 48 | -------------------------------------------------------------------------------- /bin/clean-cluster/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "fmt" 7 | "log" 8 | "sync" 9 | "time" 10 | 11 | "github.com/joomcode/redispipe/redis" 12 | "github.com/joomcode/redispipe/rediscluster" 13 | ) 14 | 15 | var sleep = flag.Duration("sleep", 50*time.Millisecond, "sleep between batches") 16 | var addr = flag.String("addr", "", "address of one of cluster instances (required)") 17 | var match = flag.String("match", "", "match expression to delete (required)") 18 | 19 | func main() { 20 | flag.Parse() 21 | if *match == "" { 22 | log.Fatal("Match argument should be specified and not empty") 23 | } 24 | if *addr == "" { 25 | log.Fatal("Redis address should be specified and not empty") 26 | } 27 | con, err := rediscluster.NewCluster( 28 | context.Background(), 29 | []string{*addr}, 30 | rediscluster.Opts{}, 31 | ) 32 | if err != nil { 33 | log.Fatal(err) 34 | } 35 | var wg sync.WaitGroup 36 | con.EachShard(func(sh redis.Sender, err error) bool { 37 | if err != nil { 38 | log.Fatal(err) 39 | } 40 | wg.Add(1) 41 | go func() { 42 | defer wg.Done() 43 | sync := redis.Sync{sh} 44 | iter := sync.Scanner(redis.ScanOpts{ 45 | Match: *match, 46 | Count: 1000, 47 | }) 48 | for { 49 | keys, err := iter.Next() 50 | if err != nil { 51 | if err == redis.ScanEOF { 52 | break 53 | } 54 | log.Fatal(err) 55 | } 56 | if len(keys) != 0 { 57 | reqs := make([]redis.Request, len(keys)) 58 | for i, key := range keys { 59 | reqs[i] = redis.Req("DEL", key) 60 | } 61 | sync.SendMany(reqs) 62 | fmt.Printf("%q\n", keys[0]) 63 | } 64 | if *sleep > 0 { 65 | time.Sleep(*sleep) 66 | } 67 | } 68 | }() 69 | return true 70 | }) 71 | wg.Wait() 72 | } 73 | -------------------------------------------------------------------------------- /redisconn/error.go: -------------------------------------------------------------------------------- 1 | package redisconn 2 | 3 | import ( 4 | "github.com/joomcode/errorx" 5 | "github.com/joomcode/redispipe/redis" 6 | ) 7 | 8 | var ( 9 | // ErrConnection - connection was not established at the moment request were done, 10 | // request is definitely not sent anywhere. 11 | ErrConnection = redis.Errors.NewSubNamespace("connection", redis.ErrTraitNotSent, redis.ErrTraitConnectivity) 12 | // ErrNotConnected - connection were not established at the moment 13 | ErrNotConnected = ErrConnection.NewType("not_connected") 14 | // ErrDial - could not connect. 15 | ErrDial = ErrConnection.NewType("could_not_connect") 16 | // ErrAuth - password didn't match 17 | ErrAuth = ErrConnection.NewType("count_not_auth", ErrTraitInitPermanent) 18 | // ErrInit - other error during initial conversation with redis 19 | ErrInit = ErrConnection.NewType("initialization_error", ErrTraitInitPermanent) 20 | // ErrConnSetup - other connection initialization error (including io errors) 21 | ErrConnSetup = ErrConnection.NewType("initialization_temp_error") 22 | 23 | // ErrTraitInitPermanent signals about non-transient error in initial communication with redis. 24 | // It means that either authentication fails or selected database doesn't exists or redis 25 | // behaves in unexpected way. 26 | ErrTraitInitPermanent = errorx.RegisterTrait("init_permanent") 27 | ) 28 | 29 | var ( 30 | // EKConnection - key for connection that handled request. 31 | EKConnection = errorx.RegisterProperty("connection") 32 | // EKDb - db number to select. 33 | EKDb = errorx.RegisterPrintableProperty("db") 34 | ) 35 | 36 | func withNewProperty(err *errorx.Error, p errorx.Property, v interface{}) *errorx.Error { 37 | _, ok := err.Property(p) 38 | if ok { 39 | return err 40 | } 41 | return err.WithProperty(p, v) 42 | } 43 | -------------------------------------------------------------------------------- /testbed/test_certs/server.rsa.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC37hjVbrblSu+d 3 | nOkGCjRG1Bc1+UyH7V7/4U82C8hEZKPby+Ex0cz0rEyLo2we0XszsErGfvgwq4UE 4 | 9Evx7gcHkBMV6qx8ak4jrf1hwQT1IA9XRKDCSDY+4n6y/zc4tMZ2JOl163NeoAlf 5 | WrzTYKqY/nGTI04pGfvAB2ay6Jr2JByqw+Mh6c3MA4hUjoWjI9goB9qq1DKkme17 6 | GSRUgOiF9mGjDTVpo8Fi0l9F6ihLuPZErIIa9VAt7UHX/rc+lZijzwHs5QNNYrS0 7 | IUIS3UCH4dvCOaEkcmOyXQza/8UEHx7aq6WSTNr+QhOvJc1SDhk8HgnjPrsP3Br5 8 | uKr9cfg7AgMBAAECggEAE1DXXc63OWRBvsEkPaSjsc6DM4FfVqUIhYHBYlEhcoFJ 9 | LgN+vk8koYtYrI94gtIICLkaWTcrVF0m5orLUPho15P7VSFkhNpbI2cZxYLSRPXd 10 | dpI3+b9ApyD9IkooH/XoI68jr2UPJCBVa6SpUN+FevS5s7SQ+EMIy7VbRS2lXHTn 11 | tg/O0YluaA0+2plKk01m3hz7l1Uuiomr0MGtvCh6ZD5nCvUYuT6xYVnTmKPQXw/t 12 | aIcLELDeMs921uSzvVrfGJrEtffYGsQ8GUDtzQic606X+6u21ILg8MUHvuc+atVJ 13 | CCIjkH1RI5EAvyYu6BJfKV/qYF/lxcwdjVZANwmoXQKBgQDJ9CDmtogw7/5BEAby 14 | NJNSrUX8R+MKTJy5ngeIwfk6bt1/8AccI8x3b3TVLTicuU64E8585iYF693zyW/5 15 | 9TYnfHOMnp/MbQbO6jApnPMOChLf1ezbjeIOw6VwzPWoubzvSu6wLwhQNu85mZ/L 16 | NZ2aQ5kKwMIrBsBOA/ch7e8XNQKBgQDpJyv7iFQ0rLh0OX7iW503qlYWGBNKFzkq 17 | 66/bhJxgrXNBubbA3USjy6Hxx+0dJGp8XYXJ2gkwfkrUyQl/yAByHyke3158dzGh 18 | Ln/jXXF2IoxWR8aNAUVPRfFDMlMKHhgr04opDEVuFh4dfuubQViNQ23QdjkCdg8o 19 | 318YRbEPrwKBgB+jUkAymlVoU6GnvB7Xo7jREmS6clQbunXNzwpasQu5cJPpa4O6 20 | C/8uA9Sdt/+9fBqUZ8XjXFOTJvtZNGSpSKmY8pU2CHzDG8zwnUj3oJAzfc95ORkQ 21 | Ojjr55ArW9Tp/DJUv930EE5YvNN+QK1aIe2X53LqzcBl11yhfGbhWpJ9AoGAGFSN 22 | I2+DkgegLAi/8/Epg+CToTSb9rRbs93qeRbqlrAjAe3WY2PzwMAEQ03gsZTWQ+oi 23 | hwDoypWzS2c7RYlieoZD7UPUAVsS48YNdHWQ3IoYaPyOfLJwQCiFV8TNo5WmYhRu 24 | K4BrUmSeeed/wLDeA8fQkttzcY1OYa6FpuvG4WMCgYBDEG5ANGnk28CO9qOKlUtq 25 | g7kD/idRnF3J8Qgq3n8EG+OyWmV8GQcLUCj97VCKS08gkyN1J88twjx/7EvYqFEF 26 | l/Zq0dqjiMX4z/tCcO32nTyx2EIpSnv0QmjxZxT40gtOp2ZrZqZ0TZDAcf38cSBF 27 | a6HpXrBzsvQp+hJOm6eOLQ== 28 | -----END PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /rediscluster/scan.go: -------------------------------------------------------------------------------- 1 | package rediscluster 2 | 3 | import ( 4 | "github.com/joomcode/redispipe/redis" 5 | "github.com/joomcode/redispipe/rediscluster/redisclusterutil" 6 | ) 7 | 8 | // Scanner is an implementation of redis.Scanner. 9 | // 10 | // If it were called for SCAN command, it will iterate through all shards. 11 | type Scanner struct { 12 | redis.ScannerBase 13 | 14 | c *Cluster 15 | addrs []string 16 | } 17 | 18 | // Scanner implements redis.Sender.Scanner. 19 | func (c *Cluster) Scanner(opts redis.ScanOpts) redis.Scanner { 20 | var addrs []string 21 | 22 | if opts.Cmd == "" || opts.Cmd == "SCAN" { 23 | cfg := c.getConfig() 24 | addrs = make([]string, 0, len(cfg.masters)) 25 | for addr := range cfg.masters { 26 | addrs = append(addrs, addr) 27 | } 28 | if len(addrs) == 0 { 29 | s := &Scanner{} 30 | s.Err = c.err(ErrClusterConfigEmpty) 31 | return s 32 | } 33 | } else { 34 | // other commands operates on single key 35 | key := opts.Key 36 | slot := redisclusterutil.Slot(key) 37 | shard := c.getConfig().slot2shard(slot) 38 | addrs = shard.addr[:1] 39 | } 40 | 41 | return &Scanner{ 42 | ScannerBase: redis.ScannerBase{ScanOpts: opts}, 43 | 44 | c: c, 45 | addrs: addrs, 46 | } 47 | } 48 | 49 | // Next implements redis.Scanner.Next 50 | // Under the hood, it will scan each shard one after another. 51 | func (s *Scanner) Next(cb redis.Future) { 52 | if s.Err != nil { 53 | cb.Resolve(s.Err, 0) 54 | return 55 | } 56 | if s.IterLast() { 57 | s.addrs = s.addrs[1:] 58 | s.Iter = nil 59 | } 60 | if len(s.addrs) == 0 && s.Iter == nil { 61 | cb.Resolve(nil, 0) 62 | return 63 | } 64 | conn := s.c.connForAddress(s.addrs[0]) 65 | if conn == nil { 66 | s.Err = s.c.err(ErrNoAliveConnection). 67 | WithProperty(redis.EKAddress, s.addrs[0]) 68 | cb.Resolve(s.Err, 0) 69 | return 70 | } 71 | s.DoNext(cb, conn) 72 | } 73 | -------------------------------------------------------------------------------- /redis/command_type_test.go: -------------------------------------------------------------------------------- 1 | package redis_test 2 | 3 | import ( 4 | "strings" 5 | "testing" 6 | 7 | "github.com/joomcode/redispipe/redis" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestCommandType(t *testing.T) { 13 | assert.True(t, redis.ReplicaSafe("GET")) 14 | assert.True(t, redis.ReplicaSafe("Get")) 15 | assert.True(t, redis.ReplicaSafe("get")) 16 | assert.False(t, redis.ReplicaSafe("SET")) 17 | assert.False(t, redis.ReplicaSafe("Set")) 18 | assert.False(t, redis.ReplicaSafe("set")) 19 | 20 | assert.True(t, redis.Blocking("BLPOP")) 21 | assert.True(t, redis.Blocking("Blpop")) 22 | assert.True(t, redis.Blocking("blpop")) 23 | assert.False(t, redis.Blocking("LPOP")) 24 | assert.False(t, redis.Blocking("Lpop")) 25 | assert.False(t, redis.Blocking("lpop")) 26 | 27 | assert.True(t, redis.Dangerous("SUBSCRIBE")) 28 | assert.True(t, redis.Dangerous("Subscribe")) 29 | assert.True(t, redis.Dangerous("subscribe")) 30 | assert.False(t, redis.Dangerous("PUBLISH")) 31 | assert.False(t, redis.Dangerous("Publish")) 32 | assert.False(t, redis.Dangerous("publish")) 33 | } 34 | 35 | var sum int 36 | 37 | func BenchmarkCommandType(b *testing.B) { 38 | var cmds = strings.Split("PING ECHO DUMP MEMORY EXISTS GET GETRANGE RANDOMKEY KEYS TYPE TTL PTTL "+ 39 | "BITCOUNT BITPOS GETBIT "+ 40 | "GEOHASH GEOPOS GEODIST GEORADIUS_RO GEORADIUSBYMEMBER_RO "+ 41 | "HEXISTS HGET HGETALL HKEYS HLEN HMGET HSTRLEN HVALS "+ 42 | "LINDEX LLEN LRANGE "+ 43 | "PFCOUNT "+ 44 | "SCARD SDIFF SINTER SISMEMBER SMEMBERS SRANDMEMBER STRLEN SUNION "+ 45 | "ZCARD ZCOUNT ZLEXCOUNT ZRANGE ZRANGEBYLEX ZREVRANGEBYLEX "+ 46 | "ZRANGEBYSCORE ZRANK ZREVRANGE ZREVRANGEBYSCORE ZREVRANK ZSCORE "+ 47 | "XPENDING XREVRANGE XREAD XLEN ", " ")[:3] 48 | 49 | for i := 0; i < b.N; i++ { 50 | for _, cmd := range cmds { 51 | if redis.ReplicaSafe(cmd) { 52 | sum++ 53 | } 54 | if redis.Blocking(cmd) { 55 | sum++ 56 | } 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /redis/response.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/joomcode/errorx" 7 | ) 8 | 9 | // AsError casts interface to error (if it is error) 10 | func AsError(v interface{}) error { 11 | e, _ := v.(error) 12 | return e 13 | } 14 | 15 | // AsErrorx casts interface to *errorx.Error. 16 | // It panics if value is error but not *redis.Error. 17 | func AsErrorx(v interface{}) *errorx.Error { 18 | e, _ := v.(*errorx.Error) 19 | if e == nil { 20 | if _, ok := v.(error); ok { 21 | panic(fmt.Errorf("result should be either *rediserror.Error, or not error at all, but got %#v", v)) 22 | } 23 | } 24 | return e 25 | } 26 | 27 | // ScanResponse parses response of Scan command, returns iterator and array of keys. 28 | func ScanResponse(res interface{}) ([]byte, []string, error) { 29 | if err := AsError(res); err != nil { 30 | return nil, nil, err 31 | } 32 | var ok bool 33 | var arr []interface{} 34 | var it []byte 35 | var keys []interface{} 36 | var strs []string 37 | if arr, ok = res.([]interface{}); !ok { 38 | goto wrong 39 | } 40 | if it, ok = arr[0].([]byte); !ok { 41 | goto wrong 42 | } 43 | if keys, ok = arr[1].([]interface{}); !ok { 44 | goto wrong 45 | } 46 | strs = make([]string, len(keys)) 47 | for i, k := range keys { 48 | var b []byte 49 | if b, ok = k.([]byte); !ok { 50 | goto wrong 51 | } 52 | strs[i] = string(b) 53 | } 54 | return it, strs, nil 55 | 56 | wrong: 57 | return nil, nil, ErrResponseUnexpected.NewWithNoMessage().WithProperty(EKResponse, res) 58 | } 59 | 60 | // TransactionResponse parses response of EXEC command, returns array of answers. 61 | func TransactionResponse(res interface{}) ([]interface{}, error) { 62 | if arr, ok := res.([]interface{}); ok { 63 | return arr, nil 64 | } 65 | if res == nil { 66 | res = ErrExecEmpty.NewWithNoMessage() 67 | } 68 | if _, ok := res.(error); !ok { 69 | res = ErrResponseUnexpected.NewWithNoMessage().WithProperty(EKResponse, res) 70 | } 71 | return nil, res.(error) 72 | } 73 | -------------------------------------------------------------------------------- /rediscluster/bench/go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 2 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 3 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 4 | github.com/garyburd/redigo v1.6.2/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= 5 | github.com/joomcode/errorx v1.0.3 h1:3e1mi0u7/HTPNdg6d6DYyKGBhA5l9XpsfuVE29NxnWw= 6 | github.com/joomcode/errorx v1.0.3/go.mod h1:eQzdtdlNyN7etw6YCS4W4+lu442waxZYw5yvz0ULrRo= 7 | github.com/mediocregopher/radix.v2 v0.0.0-20181115013041-b67df6e626f9/go.mod h1:fLRUbhbSd5Px2yKUaGYYPltlyxi1guJz1vCmo1RQL50= 8 | github.com/mediocregopher/radix/v3 v3.7.0 h1:SM9zJdme5pYGEVvh1HttjBjDmIaNBDKy+oDCv5w81Wo= 9 | github.com/mediocregopher/radix/v3 v3.7.0/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= 10 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 11 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 12 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 13 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 14 | github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= 15 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 16 | github.com/wuxibin89/redis-go-cluster v1.0.1-0.20161207023922-222d81891f1d h1:tXP1B4pzBLfNwopt8tc7EIno9dxoLE9mrW7cddo3AE8= 17 | github.com/wuxibin89/redis-go-cluster v1.0.1-0.20161207023922-222d81891f1d/go.mod h1:tqX224sOzDC3Z3yeJj3Ti3NJH0gxqr1B+/TzpIWfHiQ= 18 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= 19 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 20 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 21 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= 22 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 23 | -------------------------------------------------------------------------------- /redisconn/bench/go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 2 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 3 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 4 | github.com/garyburd/redigo v1.6.2/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= 5 | github.com/gomodule/redigo v1.8.4 h1:Z5JUg94HMTR1XpwBaSH4vq3+PNSIykBLxMdglbw10gg= 6 | github.com/gomodule/redigo v1.8.4/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= 7 | github.com/joomcode/errorx v1.0.3 h1:3e1mi0u7/HTPNdg6d6DYyKGBhA5l9XpsfuVE29NxnWw= 8 | github.com/joomcode/errorx v1.0.3/go.mod h1:eQzdtdlNyN7etw6YCS4W4+lu442waxZYw5yvz0ULrRo= 9 | github.com/mediocregopher/radix.v2 v0.0.0-20181115013041-b67df6e626f9/go.mod h1:fLRUbhbSd5Px2yKUaGYYPltlyxi1guJz1vCmo1RQL50= 10 | github.com/mediocregopher/radix/v3 v3.7.0 h1:SM9zJdme5pYGEVvh1HttjBjDmIaNBDKy+oDCv5w81Wo= 11 | github.com/mediocregopher/radix/v3 v3.7.0/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= 12 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 13 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 14 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 15 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 16 | github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= 17 | github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= 18 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 19 | github.com/wuxibin89/redis-go-cluster v1.0.1-0.20161207023922-222d81891f1d/go.mod h1:tqX224sOzDC3Z3yeJj3Ti3NJH0gxqr1B+/TzpIWfHiQ= 20 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= 21 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 22 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 23 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 24 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= 25 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 26 | -------------------------------------------------------------------------------- /redis/request.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import "fmt" 4 | 5 | // Req - convenient wrapper to create Request. 6 | func Req(cmd string, args ...interface{}) Request { 7 | return Request{cmd, args} 8 | } 9 | 10 | // Request represents request to be passed to redis. 11 | type Request struct { 12 | // Cmd is a redis command to be sent. 13 | // It could contain single space, then it will be split, and last part will be serialized as an argument. 14 | Cmd string 15 | Args []interface{} 16 | } 17 | 18 | func (r Request) String() string { 19 | args := r.Args 20 | if len(args) > 5 { 21 | args = args[:5] 22 | } 23 | argss := make([]string, 0, 1+len(args)) 24 | for _, arg := range args { 25 | argStr := fmt.Sprintf("%v", arg) 26 | if len(argStr) > 32 { 27 | argStr = argStr[:32] + "..." 28 | } 29 | argss = append(argss, argStr) 30 | } 31 | if len(r.Args) > 5 { 32 | argss = append(argss, "...") 33 | } 34 | return fmt.Sprintf("Req(%q, %q)", r.Cmd, argss) 35 | } 36 | 37 | // Key returns first field of request that should be used as a key for redis cluster. 38 | func (r Request) Key() (string, bool) { 39 | if r.Cmd == "RANDOMKEY" { 40 | return "RANDOMKEY", false 41 | } 42 | var n int 43 | switch r.Cmd { 44 | case "EVAL", "EVALSHA": 45 | n = 2 46 | case "BITOP": 47 | n = 1 48 | default: 49 | n = 0 50 | } 51 | if len(r.Args) <= n { 52 | return "", false 53 | } 54 | return ArgToString(r.Args[n]) 55 | } 56 | 57 | // Future is interface accepted by Sender to signal request completion. 58 | type Future interface { 59 | // Resolve is called by sender to pass result (or error) for particular request. 60 | // Single future could be used for accepting multiple results. 61 | // n argument is used then to distinguish request this result is for. 62 | Resolve(res interface{}, n uint64) 63 | // Cancelled method could inform sender that request is abandoned. 64 | // It is called usually before sending request, and if Cancelled returns non-nil error, 65 | // then Sender calls Resolve with ErrRequestCancelled error wrapped around returned error. 66 | Cancelled() error 67 | } 68 | 69 | // FuncFuture simple wrapper that makes Future from function. 70 | type FuncFuture func(res interface{}, n uint64) 71 | 72 | // Cancelled implements Future.Cancelled (always false) 73 | func (f FuncFuture) Cancelled() error { return nil } 74 | 75 | // Resolve implements Future.Resolve (by calling wrapped function). 76 | func (f FuncFuture) Resolve(res interface{}, n uint64) { f(res, n) } 77 | -------------------------------------------------------------------------------- /rediscluster/roundrobin.go: -------------------------------------------------------------------------------- 1 | package rediscluster 2 | 3 | import ( 4 | "math/rand" 5 | "sync" 6 | "sync/atomic" 7 | "time" 8 | ) 9 | 10 | // RoundRobinSeed is the source of decision which replica to use for each particular request when 11 | // replica-policy is MasterAndSlaves or PreferSlaves. 12 | type RoundRobinSeed interface { 13 | // Current returns "deterministic random" value used for choosing replica. 14 | Current() uint32 15 | } 16 | 17 | // FairRoundRobinSeed implements RoundRobinSeed by returning new value every time using atomic increment. 18 | // It doesn't works well in practice because it reduces pipeline efficiency. 19 | // It is presented only as example. 20 | type FairRoundRobinSeed struct{ v uint32 } 21 | 22 | // Current implements RoundRobinSeed.Current method. 23 | func (d *FairRoundRobinSeed) Current() uint32 { 24 | return atomic.AddUint32(&d.v, 1) 25 | } 26 | 27 | // TimedRoundRobinSeed is implementation of RoundRobinSeed. 28 | // It runs goroutine which periodically stores new random value, 29 | // and returns this value between this updates. 30 | // It improves pipeline efficiency, and it is used as default implementation. 31 | type TimedRoundRobinSeed struct { 32 | v uint32 33 | stop uint32 34 | } 35 | 36 | // NewTimedRoundRobinSeed returns TimedRoundRobinSeed which updates its value every `interval`. 37 | func NewTimedRoundRobinSeed(interval time.Duration) *TimedRoundRobinSeed { 38 | rr := &TimedRoundRobinSeed{} 39 | go func() { 40 | rnd := rand.New(rand.NewSource(time.Now().UnixNano())) 41 | t := time.NewTicker(interval) 42 | defer t.Stop() 43 | for atomic.LoadUint32(&rr.stop) == 0 { 44 | <-t.C 45 | atomic.StoreUint32(&rr.v, rnd.Uint32()) 46 | } 47 | }() 48 | return rr 49 | } 50 | 51 | // Current is implementation of RoundRobinSeed.Current. 52 | // It returns same value during `interval` period. 53 | func (rr *TimedRoundRobinSeed) Current() uint32 { 54 | return atomic.LoadUint32(&rr.v) 55 | } 56 | 57 | // Stop signals value changing goroutine to quit. 58 | func (rr *TimedRoundRobinSeed) Stop() { 59 | atomic.StoreUint32(&rr.stop, 1) 60 | } 61 | 62 | var defaultSeed *TimedRoundRobinSeed 63 | var defaultSeedOnce sync.Once 64 | 65 | // DefaultRoundRobinSeed returns singleton of TimedRoundRobinSeed with random interval between 45ms and 100ms. 66 | func DefaultRoundRobinSeed() *TimedRoundRobinSeed { 67 | defaultSeedOnce.Do(func() { 68 | v := uint64(time.Now().UnixNano()) 69 | v ^= (v<<40 | v>>24) ^ (v<<15 | v>>49) 70 | v ^= v >> 1 71 | defaultSeed = NewTimedRoundRobinSeed(time.Duration(45000+v%55000) * time.Microsecond) 72 | }) 73 | return defaultSeed 74 | } 75 | -------------------------------------------------------------------------------- /redis/chan_future.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | // ChanFutured wraps Sender and provides asynchronous interface through future implemented 4 | // with channel. 5 | type ChanFutured struct { 6 | S Sender 7 | } 8 | 9 | // Send sends requests and returns ChanFuture for result. 10 | func (s ChanFutured) Send(r Request) *ChanFuture { 11 | f := &ChanFuture{wait: make(chan struct{})} 12 | s.S.Send(r, f, 0) 13 | return f 14 | } 15 | 16 | // SendMany sends several requests and returns slice of ChanFuture for results. 17 | func (s ChanFutured) SendMany(reqs []Request) ChanFutures { 18 | futures := make(ChanFutures, len(reqs)) 19 | for i := range futures { 20 | futures[i] = &ChanFuture{wait: make(chan struct{})} 21 | } 22 | s.S.SendMany(reqs, futures, 0) 23 | return futures 24 | } 25 | 26 | // SendTransaction sends several requests as MULTI+EXEC transaction, 27 | // returns ChanTransaction - wrapper around ChanFuture with additional method. 28 | func (s ChanFutured) SendTransaction(r []Request) *ChanTransaction { 29 | future := &ChanTransaction{ 30 | ChanFuture: ChanFuture{wait: make(chan struct{})}, 31 | } 32 | s.S.SendTransaction(r, future, 0) 33 | return future 34 | } 35 | 36 | // ChanFuture - future implemented with channel as signal of fulfillment. 37 | type ChanFuture struct { 38 | r interface{} 39 | wait chan struct{} 40 | } 41 | 42 | // Value waits for result to be fulfilled and returns result. 43 | func (f *ChanFuture) Value() interface{} { 44 | <-f.wait 45 | return f.r 46 | } 47 | 48 | // Done returns channel that will be closed on fulfillment. 49 | func (f *ChanFuture) Done() <-chan struct{} { 50 | return f.wait 51 | } 52 | 53 | // Resolve - implementation of Future.Resolve 54 | func (f *ChanFuture) Resolve(res interface{}, _ uint64) { 55 | f.r = res 56 | close(f.wait) 57 | } 58 | 59 | // Cancelled - implementation of Future.Cancelled (always false). 60 | func (f *ChanFuture) Cancelled() error { 61 | return nil 62 | } 63 | 64 | // ChanFutures - implementation of Future over slice of *ChanFuture 65 | type ChanFutures []*ChanFuture 66 | 67 | // Cancelled - implementation of Future.Cancelled (always false). 68 | func (f ChanFutures) Cancelled() error { 69 | return nil 70 | } 71 | 72 | // Resolve - implementation of Future.Resolve. 73 | // It resolves ChanFuture corresponding to index. 74 | func (f ChanFutures) Resolve(res interface{}, i uint64) { 75 | f[i].Resolve(res, i) 76 | } 77 | 78 | // ChanTransaction - wrapper over ChanFuture with additional convenient method. 79 | type ChanTransaction struct { 80 | ChanFuture 81 | } 82 | 83 | // Results - parses result of transaction and returns it as an array of results. 84 | func (f *ChanTransaction) Results() ([]interface{}, error) { 85 | <-f.wait 86 | return TransactionResponse(f.r) 87 | } 88 | -------------------------------------------------------------------------------- /rediscluster/redisclusterutil/master_only.go: -------------------------------------------------------------------------------- 1 | package redisclusterutil 2 | 3 | import ( 4 | "strconv" 5 | 6 | "github.com/joomcode/redispipe/redis" 7 | ) 8 | 9 | // MasterOnlyKey is a key of redis's SET which contains slots number. 10 | // Cluster connector's main loop will switch these slots to MasterOnly mode on configuration refreshing. 11 | // When slot migrates, slave redis instance behaves stupidly: they do not know about migration, and therefore 12 | // they doesn't response with "ASKING" error for migrated key, and doesn't response to "ASKING" command on new shard. 13 | // MasterOnlyKey is used within custom cluster migration utility to correctly migrate slot. 14 | const MasterOnlyKey = "CLUSTER_SELF:MASTER_ONLY" 15 | 16 | // RequestMasterOnly fetches content of key as a int set, and returns it as a map. 17 | // If key is empty (""), then MasterOnlyKey is used. 18 | func RequestMasterOnly(c redis.Sender, key string) (set map[uint16]struct{}, valid bool, err error) { 19 | if key == "" { 20 | key = MasterOnlyKey 21 | } 22 | resp := redis.Sync{c}.Do("SMEMBERS", key) 23 | return ParseMasterOnly(resp) 24 | } 25 | 26 | // ParseMasterOnly parses content of MasterOnlyKey. 27 | func ParseMasterOnly(resp interface{}) (set map[uint16]struct{}, valid bool, err error) { 28 | if err := redis.AsError(resp); err != nil { 29 | return nil, false, err 30 | } 31 | if slots, ok := resp.([]interface{}); ok { 32 | valid = true 33 | if len(slots) > 0 { 34 | set = make(map[uint16]struct{}) 35 | } 36 | for _, sl := range slots { 37 | if b, ok := sl.([]byte); ok { 38 | var slot int 39 | slot, err = strconv.Atoi(string(b)) 40 | if err != nil { 41 | return nil, false, err 42 | } 43 | set[uint16(slot)] = struct{}{} 44 | } 45 | } 46 | } else if resp == nil { 47 | valid = true 48 | } 49 | return set, valid, nil 50 | } 51 | 52 | // SetMasterOnly sets MasterOnlyKey to contain specified slots. 53 | // It is used before slot migration. 54 | func SetMasterOnly(c redis.Sender, key string, slots []uint16) error { 55 | if key == "" { 56 | key = MasterOnlyKey 57 | } 58 | args := append(make([]interface{}, 0, len(slots)+1), key) 59 | for _, slot := range slots { 60 | args = append(args, slot) 61 | } 62 | resp := redis.Sync{c}.Do("SADD", args...) 63 | return redis.AsError(resp) 64 | } 65 | 66 | // UnsetMasterOnly unsets slots from MasterOnlyKey. 67 | // It is used after slot migration. 68 | func UnsetMasterOnly(c redis.Sender, key string, slots []uint16) error { 69 | if key == "" { 70 | key = MasterOnlyKey 71 | } 72 | args := append(make([]interface{}, 0, len(slots)+1), key) 73 | for _, slot := range slots { 74 | args = append(args, slot) 75 | } 76 | resp := redis.Sync{c}.Do("SREM", args...) 77 | return redis.AsError(resp) 78 | } 79 | -------------------------------------------------------------------------------- /rediscluster/redisclusterutil/crc16.go: -------------------------------------------------------------------------------- 1 | package redisclusterutil 2 | 3 | // copied from github.com/mediocregopher/radix.v2/cluster/crc16.go 4 | 5 | import ( 6 | "strings" 7 | ) 8 | 9 | var tab = [256]uint16{ 10 | 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, 11 | 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, 12 | 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, 13 | 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, 14 | 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, 15 | 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, 16 | 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, 17 | 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, 18 | 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, 19 | 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, 20 | 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, 21 | 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, 22 | 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, 23 | 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, 24 | 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, 25 | 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, 26 | 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, 27 | 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, 28 | 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, 29 | 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, 30 | 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, 31 | 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, 32 | 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, 33 | 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, 34 | 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, 35 | 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, 36 | 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, 37 | 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, 38 | 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, 39 | 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, 40 | 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, 41 | 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, 42 | } 43 | 44 | // NumSlots is the number of slots keys are sharded into in a redis cluster 45 | const NumSlots = 16384 46 | 47 | // CRC16 returns checksum for a given set of bytes based on the crc algorithm 48 | // defined for hashing redis keys in a cluster setup 49 | func CRC16(buf []byte) uint16 { 50 | crc := uint16(0) 51 | for _, b := range buf { 52 | index := byte(crc>>8) ^ b 53 | crc = (crc << 8) ^ tab[index] 54 | } 55 | return crc 56 | } 57 | 58 | // Slot returns the cluster slot the given key will fall into, taking into 59 | // account curly braces within the key as per the spec. 60 | func Slot(key string) uint16 { 61 | if start := strings.Index(key, "{"); start >= 0 { 62 | if end := strings.Index(key[start+1:], "}"); end > 0 { 63 | key = key[start+1 : start+1+end] 64 | } 65 | } 66 | return CRC16([]byte(key)) % NumSlots 67 | } 68 | -------------------------------------------------------------------------------- /redisconn/logger.go: -------------------------------------------------------------------------------- 1 | package redisconn 2 | 3 | import "log" 4 | 5 | // Logger is a type for custom event and stat reporter. 6 | type Logger interface { 7 | // Report will be called when some events happens during connection's lifetime. 8 | // Default implementation just prints this information using standard log package. 9 | Report(conn *Connection, event LogEvent) 10 | // ReqStat is called after request receives it's answer with request/result information 11 | // and time spend to fulfill request. 12 | // Default implementation is no-op. 13 | ReqStat(conn *Connection, req Request, res interface{}, nanos, bytesIn, bytesOut int64) 14 | } 15 | 16 | // LogEvent is a sum-type for events to be logged. 17 | type LogEvent interface { 18 | logEvent() // tagging method 19 | } 20 | 21 | // LogConnecting is an event logged when Connection starts dialing to redis. 22 | type LogConnecting struct{} 23 | 24 | // LogConnected is logged when Connection established connection to redis. 25 | type LogConnected struct { 26 | LocalAddr string // - local ip:port 27 | RemoteAddr string // - remote ip:port 28 | } 29 | 30 | // LogConnectFailed is logged when connection establishing were unsuccessful. 31 | type LogConnectFailed struct { 32 | Error error // - failure reason 33 | } 34 | 35 | // LogDisconnected is logged when connection were broken. 36 | type LogDisconnected struct { 37 | Error error // - disconnection reason 38 | LocalAddr string // - local ip:port 39 | RemoteAddr string // - remote ip:port 40 | } 41 | 42 | // LogContextClosed is logged when Connection's context were closed, or Connection.Close() called. 43 | // Ie when connection is explicitly closed by user. 44 | type LogContextClosed struct { 45 | Error error // - ctx.Err() 46 | } 47 | 48 | func (LogConnecting) logEvent() {} 49 | func (LogConnected) logEvent() {} 50 | func (LogConnectFailed) logEvent() {} 51 | func (LogDisconnected) logEvent() {} 52 | func (LogContextClosed) logEvent() {} 53 | 54 | func (conn *Connection) report(event LogEvent) { 55 | conn.opts.Logger.Report(conn, event) 56 | } 57 | 58 | // DefaultLogger is default implementation of Logger 59 | type DefaultLogger struct{} 60 | 61 | // Report implements Logger.Report 62 | func (d DefaultLogger) Report(conn *Connection, event LogEvent) { 63 | switch ev := event.(type) { 64 | case LogConnecting: 65 | log.Printf("redis: connecting to %s", conn.Addr()) 66 | case LogConnected: 67 | log.Printf("redis: connected to %s (localAddr: %s, remAddr: %s)", 68 | conn.Addr(), ev.LocalAddr, ev.RemoteAddr) 69 | case LogConnectFailed: 70 | log.Printf("redis: connection to %s failed: %s", conn.Addr(), ev.Error.Error()) 71 | case LogDisconnected: 72 | log.Printf("redis: connection to %s broken (localAddr: %s, remAddr: %s): %s", conn.Addr(), 73 | ev.LocalAddr, ev.RemoteAddr, ev.Error.Error()) 74 | case LogContextClosed: 75 | log.Printf("redis: connect to %s explicitly closed: %s", conn.Addr(), ev.Error.Error()) 76 | default: 77 | log.Printf("redis: unexpected event: %#v", event) 78 | } 79 | } 80 | 81 | // ReqStat implements Logger.ReqStat 82 | func (d DefaultLogger) ReqStat(_ *Connection, _ Request, _ interface{}, _, _, _ int64) { 83 | // noop 84 | } 85 | 86 | // NoopLogger is noop implementation of Logger 87 | // Useful in tests 88 | type NoopLogger struct{} 89 | 90 | // Report implements Logger.Report 91 | func (d NoopLogger) Report(*Connection, LogEvent) {} 92 | 93 | // ReqStat implements Logger.ReqStat 94 | func (d NoopLogger) ReqStat(_ *Connection, _ Request, _ interface{}, _, _, _ int64) {} 95 | -------------------------------------------------------------------------------- /redis/command_type.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import "strings" 4 | 5 | // hackish case insensitive hash function 6 | func fnv1a64NoCase(s string) uint64 { 7 | h := uint64(14695981039346656037) 8 | for _, c := range []byte(s) { 9 | h ^= uint64(c) &^ 0x20 10 | h *= 1099511628211 11 | } 12 | return h 13 | } 14 | 15 | func makeSet(names []string) []uint64 { 16 | l := 1 17 | for ; l < len(names)*2; l *= 2 { 18 | } 19 | hsh := make([]uint64, l) 20 | for _, name := range names { 21 | h := fnv1a64NoCase(name) 22 | pos := int(h) & (l - 1) 23 | for hsh[pos] != 0 { 24 | pos = (pos + 1) & (l - 1) 25 | } 26 | hsh[pos] = h 27 | } 28 | return hsh 29 | } 30 | 31 | func checkSet(name string, hsh []uint64) bool { 32 | h := fnv1a64NoCase(name) 33 | msk := len(hsh) - 1 34 | pos := int(h) 35 | for { 36 | switch hsh[pos&msk] { 37 | case h: 38 | return true 39 | case 0: 40 | return false 41 | } 42 | pos++ 43 | } 44 | } 45 | 46 | var replicaSafe = makeSet(strings.Split( 47 | "PING ECHO DUMP MEMORY EXISTS GET GETRANGE RANDOMKEY KEYS TYPE TTL PTTL "+ 48 | "BITCOUNT BITPOS GETBIT "+ 49 | "GEOHASH GEOPOS GEODIST GEORADIUS_RO GEORADIUSBYMEMBER_RO "+ 50 | "HEXISTS HGET HGETALL HKEYS HLEN HMGET HSTRLEN HVALS "+ 51 | "LINDEX LLEN LRANGE "+ 52 | "PFCOUNT "+ 53 | "SCARD SDIFF SINTER SISMEMBER SMEMBERS SRANDMEMBER STRLEN SUNION "+ 54 | "ZCARD ZCOUNT ZLEXCOUNT ZRANGE ZRANGEBYLEX ZREVRANGEBYLEX "+ 55 | "ZRANGEBYSCORE ZRANK ZREVRANGE ZREVRANGEBYSCORE ZREVRANK ZSCORE "+ 56 | "XPENDING XREVRANGE XREAD XLEN ", " ")) 57 | 58 | // ReplicaSafe returns true if command is readonly and "safe to run on replica". 59 | // Some commands like "scan" are not included, because their result could differ between 60 | // master and replica. 61 | func ReplicaSafe(name string) bool { 62 | return checkSet(name, replicaSafe) 63 | } 64 | 65 | var blocking = makeSet(strings.Split("BLPOP BRPOP BLPOPPUSH BZPOPMIN BZPOPMAX XREAD XREADGROUP SAVE WATCH", " ")) 66 | 67 | // Blocking returns true if command is known to be blocking. 68 | // Blocking commands could stall whole pipeline and therefore affect other commands sent 69 | // through this connection. It is undesirable and prevented by default. 70 | // 71 | // This commands are forbidden in default configuration, but could be enabled with `SingleThreaded` 72 | // connection option. 73 | // 74 | // `WATCH` command is also included here because while it is dangerous in concurrent environment, 75 | // it is safe to be used in single threaded case. 76 | func Blocking(name string) bool { 77 | return checkSet(name, blocking) 78 | } 79 | 80 | var subscribeHash = fnv1a64NoCase("SUBSCRIBE") 81 | var psubscribeHash = fnv1a64NoCase("PSUBSCRIBE") 82 | 83 | // Dangerous returns true if command is not safe to use with the connector. 84 | // Currently it includes `SUBSCRIBE`, `PSUBSCRIBE` commands, because they changes connection protocol mode. 85 | func Dangerous(name string) bool { 86 | h := fnv1a64NoCase(name) 87 | return h == subscribeHash || h == psubscribeHash 88 | } 89 | 90 | // ForbiddenCommand returns true if command is not allowed to run. 91 | func ForbiddenCommand(name string, singleThreaded bool) error { 92 | h := fnv1a64NoCase(name) 93 | if h == subscribeHash || h == psubscribeHash { 94 | return ErrCommandForbidden.New("command %s could not be used with this connector", name) 95 | } 96 | if !singleThreaded && checkSet(name, blocking) { 97 | return ErrCommandForbidden.New("blocking command %s could be used only in 'scripting mode'", name) 98 | } 99 | return nil 100 | } 101 | -------------------------------------------------------------------------------- /redis/sync.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "sync" 5 | 6 | "github.com/joomcode/errorx" 7 | ) 8 | 9 | // Sync provides convenient synchronouse interface over asynchronouse Sender. 10 | type Sync struct { 11 | S Sender 12 | } 13 | 14 | // Do is convenient method to construct and send request. 15 | // Returns value that could be either result or error. 16 | func (s Sync) Do(cmd string, args ...interface{}) interface{} { 17 | return s.Send(Request{cmd, args}) 18 | } 19 | 20 | // Send sends request to redis. 21 | // Returns value that could be either result or error. 22 | func (s Sync) Send(r Request) interface{} { 23 | var res syncRes 24 | res.Add(1) 25 | s.S.Send(r, &res, 0) 26 | res.Wait() 27 | if CollectTrace { 28 | if err := AsErrorx(res.r); err != nil { 29 | res.r = errorx.EnsureStackTrace(err) 30 | } 31 | } 32 | return res.r 33 | } 34 | 35 | // SendMany sends several requests in "parallel" and returns slice or results in a same order. 36 | // Each result could be value or error. 37 | func (s Sync) SendMany(reqs []Request) []interface{} { 38 | if len(reqs) == 0 { 39 | return nil 40 | } 41 | 42 | res := syncBatch{ 43 | r: make([]interface{}, len(reqs)), 44 | } 45 | res.Add(len(reqs)) 46 | s.S.SendMany(reqs, &res, 0) 47 | res.Wait() 48 | if CollectTrace { 49 | for i, v := range res.r { 50 | if err := AsErrorx(v); err != nil { 51 | res.r[i] = errorx.EnsureStackTrace(err) 52 | } 53 | } 54 | } 55 | return res.r 56 | } 57 | 58 | // SendTransaction sends several requests as a single MULTI+EXEC transaction. 59 | // It returns array of responses and an error, if transaction fails. 60 | // Since Redis transaction either fully executed or fully failed, 61 | // all values are valid if err == nil. 62 | func (s Sync) SendTransaction(reqs []Request) ([]interface{}, error) { 63 | var res syncRes 64 | res.Add(1) 65 | s.S.SendTransaction(reqs, &res, 0) 66 | res.Wait() 67 | ress, err := TransactionResponse(res.r) 68 | if CollectTrace && err != nil { 69 | err = errorx.EnsureStackTrace(err) 70 | } 71 | return ress, err 72 | } 73 | 74 | // Scanner returns synchronous iterator over redis keyspace/key. 75 | func (s Sync) Scanner(opts ScanOpts) SyncIterator { 76 | return SyncIterator{s.S.Scanner(opts)} 77 | } 78 | 79 | type syncRes struct { 80 | r interface{} 81 | sync.WaitGroup 82 | } 83 | 84 | // Cancelled implements Future.Cancelled 85 | func (s *syncRes) Cancelled() error { 86 | return nil 87 | } 88 | 89 | // Resolve implements Future.Resolve 90 | func (s *syncRes) Resolve(res interface{}, _ uint64) { 91 | s.r = res 92 | s.Done() 93 | } 94 | 95 | type syncBatch struct { 96 | r []interface{} 97 | sync.WaitGroup 98 | } 99 | 100 | // Cancelled implements Future.Cancelled 101 | func (s *syncBatch) Cancelled() error { 102 | return nil 103 | } 104 | 105 | // Resolve implements Future.Resolve 106 | func (s *syncBatch) Resolve(res interface{}, i uint64) { 107 | s.r[i] = res 108 | s.Done() 109 | } 110 | 111 | // SyncIterator is synchronous iterator over repeating *SCAN command. 112 | type SyncIterator struct { 113 | s Scanner 114 | } 115 | 116 | // Next returns next bunch of keys, or error. 117 | // ScanEOF error signals for regular iteration completion. 118 | func (s SyncIterator) Next() ([]string, error) { 119 | var res syncRes 120 | res.Add(1) 121 | s.s.Next(&res) 122 | res.Wait() 123 | if err := AsError(res.r); err != nil { 124 | if CollectTrace { 125 | err = errorx.EnsureStackTrace(err.(*errorx.Error)) 126 | } 127 | return nil, err 128 | } else if res.r == nil { 129 | return nil, ScanEOF 130 | } else { 131 | return res.r.([]string), nil 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /redis/example_test.go: -------------------------------------------------------------------------------- 1 | package redis_test 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "log" 8 | "time" 9 | 10 | "github.com/joomcode/redispipe/redis" 11 | "github.com/joomcode/redispipe/redisconn" 12 | "github.com/joomcode/redispipe/testbed" 13 | ) 14 | 15 | func ExampleAppendRequest() { 16 | req, err := redis.AppendRequest(nil, redis.Req("GET", "one")) 17 | fmt.Printf("%q\n%v\n", req, err) 18 | req, err = redis.AppendRequest(req, redis.Req("INCRBY", "cnt", 5)) 19 | fmt.Printf("%q\n%v\n", req, err) 20 | req, err = redis.AppendRequest(req, redis.Req("SENDFOO", time.Second)) 21 | fmt.Printf("%q\n%v\n", req, err) 22 | 23 | // Output: 24 | // "*2\r\n$3\r\nGET\r\n$3\r\none\r\n" 25 | // 26 | // "*2\r\n$3\r\nGET\r\n$3\r\none\r\n*3\r\n$6\r\nINCRBY\r\n$3\r\ncnt\r\n$1\r\n5\r\n" 27 | // 28 | // "*2\r\n$3\r\nGET\r\n$3\r\none\r\n*3\r\n$6\r\nINCRBY\r\n$3\r\ncnt\r\n$1\r\n5\r\n" 29 | // redispipe.request.argument_type: {request: Req("SENDFOO", ["1s"]), argpos: 0, val: 1s} 30 | } 31 | 32 | func ExampleAsError() { 33 | vals := []interface{}{ 34 | nil, 35 | 1, 36 | "hello", 37 | errors.New("high"), 38 | redis.ErrResult.New("goodbye"), 39 | } 40 | 41 | for _, v := range vals { 42 | fmt.Printf("%T %v => %T %v\n", v, v, redis.AsError(v), redis.AsError(v)) 43 | } 44 | 45 | // Output: 46 | // => 47 | // int 1 => 48 | // string hello => 49 | // *errors.errorString high => *errors.errorString high 50 | // *errorx.Error redispipe.result: goodbye => *errorx.Error redispipe.result: goodbye 51 | } 52 | 53 | func ExampleScanner() { 54 | defer runServer(46231)() 55 | ctx := context.Background() 56 | conn, _ := redisconn.Connect(ctx, "127.0.0.1:46231", redisconn.Opts{ 57 | Logger: redisconn.NoopLogger{}, 58 | }) 59 | sync := redis.Sync{conn} 60 | sync.Do("SET", "key1", "val1") 61 | sync.Do("SET", "key2", "val2") 62 | scan := sync.Scanner(redis.ScanOpts{Match: "key*"}) 63 | for { 64 | keys, err := scan.Next() 65 | if err != nil { 66 | if err != redis.ScanEOF { 67 | log.Fatal(err) 68 | } 69 | break 70 | } 71 | for _, key := range keys { 72 | fmt.Println(key) 73 | } 74 | } 75 | 76 | // Unordered output: 77 | // key1 78 | // key2 79 | } 80 | 81 | func ExampleSync() { 82 | defer runServer(46231)() 83 | ctx := context.Background() 84 | conn, _ := redisconn.Connect(ctx, "127.0.0.1:46231", redisconn.Opts{ 85 | Logger: redisconn.NoopLogger{}, 86 | }) 87 | sync := redis.Sync{conn} 88 | 89 | res := sync.Do("SET", "key1", "1") 90 | fmt.Println(res) 91 | 92 | res = sync.Send(redis.Req("SET", "key2", "2")) 93 | fmt.Println(res) 94 | 95 | ress := sync.SendMany([]redis.Request{ 96 | redis.Req("GET", "key1"), 97 | redis.Req("GET", "key2"), 98 | }) 99 | fmt.Printf("%q\n", ress) 100 | 101 | res = sync.Do("HSET", "key1", "field1", "val1") 102 | fmt.Println(redis.AsError(res)) 103 | 104 | ress, err := sync.SendTransaction([]redis.Request{ 105 | redis.Req("INCR", "key1"), 106 | redis.Req("INCRBY", "key2", -1), 107 | redis.Req("GET", "key1"), 108 | redis.Req("GET", "key2"), 109 | }) 110 | fmt.Println(err) 111 | fmt.Printf("%q\n", ress) 112 | 113 | // Output: 114 | // OK 115 | // OK 116 | // ["1" "2"] 117 | // redispipe.result: WRONGTYPE Operation against a key holding the wrong kind of value {request: Req("HSET", ["key1" "field1" "val1"]), address: 127.0.0.1:46231} 118 | // 119 | // ['\x02' '\x01' "2" "1"] 120 | } 121 | 122 | func runServer(port int) func() { 123 | testbed.InitDir(".") 124 | s := testbed.Server{Port: uint16(port)} 125 | s.Start() 126 | return func() { 127 | s.Stop() 128 | testbed.RmDir() 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /redis/reader.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "io" 7 | "strings" 8 | 9 | "github.com/joomcode/errorx" 10 | ) 11 | 12 | // ReadResponse reads single RESP answer from bufio.Reader 13 | func ReadResponse(b *bufio.Reader) (interface{}, int) { 14 | line, isPrefix, err := b.ReadLine() 15 | if err != nil { 16 | return ErrIO.WrapWithNoMessage(err), 0 17 | } 18 | 19 | if isPrefix { 20 | return ErrHeaderlineTooLarge.NewWithNoMessage().WithProperty(EKLine, line), len(line) 21 | } 22 | 23 | if len(line) == 0 { 24 | return ErrHeaderlineEmpty.NewWithNoMessage(), 0 25 | } 26 | 27 | var v int64 28 | switch line[0] { 29 | case '+': 30 | return string(line[1:]), len(line) 31 | case '-': 32 | // detect MOVED and ASK 33 | txt := string(line[1:]) 34 | moved := strings.HasPrefix(txt, "MOVED ") 35 | ask := strings.HasPrefix(txt, "ASK ") 36 | if moved || ask { 37 | parts := bytes.Split(line, []byte(" ")) 38 | if len(parts) < 3 { 39 | return ErrResponseFormat.NewWithNoMessage().WithProperty(EKLine, line), len(line) 40 | } 41 | slot, err := parseInt(parts[1]) 42 | if err != nil { 43 | return err.WithProperty(EKLine, line), len(line) 44 | } 45 | kind := ErrAsk 46 | if moved { 47 | kind = ErrMoved 48 | } 49 | return kind.New(txt).WithProperty(EKMovedTo, string(parts[2])).WithProperty(EKSlot, slot), len(line) 50 | } 51 | if strings.HasPrefix(txt, "CLUSTERDOWN") { 52 | return ErrClusterDown.New(txt), len(line) 53 | } 54 | if strings.HasPrefix(txt, "LOADING") { 55 | return ErrLoading.New(txt), len(line) 56 | } 57 | if strings.HasPrefix(txt, "EXECABORT") { 58 | return ErrExecAbort.New(txt), len(line) 59 | } 60 | if strings.HasPrefix(txt, "TRYAGAIN") { 61 | return ErrTryAgain.New(txt), len(line) 62 | } 63 | return ErrResult.New(txt), len(line) 64 | case ':': 65 | v, err := parseInt(line[1:]) 66 | if err != nil { 67 | return err.WithProperty(EKLine, line), len(line) 68 | } 69 | return v, len(line) 70 | case '$': 71 | var rerr *errorx.Error 72 | if v, rerr = parseInt(line[1:]); rerr != nil { 73 | return rerr.WithProperty(EKLine, line), len(line) 74 | } 75 | if v < 0 { 76 | return nil, len(line) 77 | } 78 | nBytes := 0 79 | buf := make([]byte, v+2, v+2) 80 | if nBytes, err = io.ReadFull(b, buf); err != nil { 81 | return ErrIO.WrapWithNoMessage(err), nBytes + len(line) 82 | } 83 | if buf[v] != '\r' || buf[v+1] != '\n' { 84 | return ErrNoFinalRN.NewWithNoMessage(), nBytes + len(line) 85 | } 86 | return buf[:v:v], nBytes + len(line) 87 | case '*': 88 | var rerr *errorx.Error 89 | if v, rerr = parseInt(line[1:]); rerr != nil { 90 | return rerr.WithProperty(EKLine, line), len(line) 91 | } 92 | if v < 0 { 93 | return nil, len(line) 94 | } 95 | totalResponseBytes := len(line) 96 | result := make([]interface{}, v) 97 | for i := int64(0); i < v; i++ { 98 | currentResponseBytes := 0 99 | result[i], currentResponseBytes = ReadResponse(b) 100 | totalResponseBytes += currentResponseBytes 101 | if e, ok := result[i].(*errorx.Error); ok && !e.IsOfType(ErrResult) { 102 | return e, totalResponseBytes 103 | } 104 | } 105 | return result, totalResponseBytes 106 | default: 107 | return ErrUnknownHeaderType.NewWithNoMessage(), len(line) 108 | } 109 | } 110 | 111 | func parseInt(buf []byte) (int64, *errorx.Error) { 112 | if len(buf) == 0 { 113 | return 0, ErrIntegerParsing.New("empty buffer") 114 | } 115 | 116 | neg := buf[0] == '-' 117 | if neg { 118 | buf = buf[1:] 119 | } 120 | v := int64(0) 121 | for _, b := range buf { 122 | if b < '0' || b > '9' { 123 | return 0, ErrIntegerParsing.New("contains non-digit") 124 | } 125 | v *= 10 126 | v += int64(b - '0') 127 | } 128 | if neg { 129 | v = -v 130 | } 131 | return v, nil 132 | } 133 | -------------------------------------------------------------------------------- /testbed/server.go: -------------------------------------------------------------------------------- 1 | package testbed 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "github.com/joomcode/redispipe/redisdumb" 7 | "io" 8 | "os" 9 | "os/exec" 10 | "path/filepath" 11 | "strconv" 12 | "syscall" 13 | ) 14 | 15 | // Server is a handle for running redis-server. 16 | type Server struct { 17 | Port uint16 18 | TlsPort uint16 19 | Args []string 20 | Cmd *exec.Cmd 21 | Paused bool 22 | Conn redisdumb.Conn 23 | } 24 | 25 | // PortStr returns server's port as a string 26 | func (s *Server) PortStr(port uint16) string { 27 | return strconv.Itoa(int(port)) 28 | } 29 | 30 | // Addr - address + port 31 | func (s *Server) Addr() string { 32 | return "127.0.0.1:" + s.PortStr(s.Port) 33 | } 34 | 35 | func (s *Server) TlsAddr() string { 36 | return "127.0.0.1:" + s.PortStr(s.TlsPort) 37 | } 38 | 39 | // Start starts redis and waits for its initialization. 40 | func (s *Server) Start() { 41 | if s.Cmd != nil { 42 | return 43 | } 44 | s.Paused = false 45 | port := s.PortStr(s.Port) 46 | tlsPort := s.PortStr(s.TlsPort) 47 | var effectivePort string 48 | if tlsCluster { 49 | effectivePort = tlsPort 50 | } else { 51 | effectivePort = port 52 | } 53 | args := append([]string{ 54 | "--bind", "127.0.0.1", 55 | "--port", port, 56 | "--dbfilename", "dump-" + effectivePort + ".rdb", 57 | "--tls-port", tlsPort, 58 | "--cluster-allow-replica-migration", "no", 59 | "--tls-cert-file", "../../testbed/test_certs/server.rsa.crt", 60 | "--tls-key-file", "../../testbed/test_certs/server.rsa.key", 61 | "--tls-ca-cert-file", "../../testbed/test_certs/server.rsa.crt", 62 | "--tls-auth-clients", "no", 63 | }, s.Args...) 64 | var err error 65 | s.Cmd = exec.Command(Binary, args...) 66 | s.Cmd.Dir = Dir 67 | 68 | _stdout, _ := s.Cmd.StdoutPipe() 69 | logfile, err := os.Create(filepath.Join(s.Cmd.Dir, "log-"+effectivePort+".log")) 70 | if err != nil { 71 | panic(err) 72 | } 73 | _tee := io.TeeReader(_stdout, logfile) 74 | stdout := bufio.NewReader(_tee) 75 | 76 | err = s.Cmd.Start() 77 | if err != nil { 78 | panic(err) 79 | } 80 | for { 81 | l, isPrefix, err := stdout.ReadLine() 82 | if err != nil { 83 | panic(err) 84 | } 85 | if isPrefix { 86 | panic("logline too long") 87 | } 88 | if bytes.Contains(l, []byte("eady to accept connections")) { 89 | break 90 | } 91 | } 92 | go func() { 93 | defer logfile.Close() 94 | for { 95 | _, _, err := stdout.ReadLine() 96 | if err != nil { 97 | break 98 | } 99 | } 100 | }() 101 | s.Conn.Addr = s.Addr() 102 | s.Conn.TlsAddr = s.TlsAddr() 103 | } 104 | 105 | // Running returns true if server should be running at the moment. 106 | func (s *Server) Running() bool { 107 | return s.Cmd != nil 108 | } 109 | 110 | // RunningNow returns true if server should be running and it is not paused (with SIGSTOP). 111 | func (s *Server) RunningNow() bool { 112 | return s.Cmd != nil && !s.Paused 113 | } 114 | 115 | // Pause pauses server with SIGSTOP. 116 | func (s *Server) Pause() { 117 | if s.Paused { 118 | return 119 | } 120 | if err := s.Cmd.Process.Signal(syscall.SIGSTOP); err != nil { 121 | panic(err) 122 | } 123 | s.Paused = true 124 | } 125 | 126 | // Resume resumes server with SIGCONT. 127 | func (s *Server) Resume() { 128 | if !s.Paused { 129 | return 130 | } 131 | if err := s.Cmd.Process.Signal(syscall.SIGCONT); err != nil { 132 | panic(err) 133 | } 134 | s.Paused = false 135 | } 136 | 137 | // Stop kills server. 138 | func (s *Server) Stop() { 139 | if s.Paused { 140 | s.Resume() 141 | } 142 | if s.Cmd == nil { 143 | return 144 | } 145 | p := s.Cmd 146 | s.Cmd = nil 147 | if err := p.Process.Kill(); err != nil { 148 | panic(err) 149 | } 150 | p.Wait() 151 | } 152 | 153 | // Do executes command on server. 154 | func (s *Server) Do(cmd string, args ...interface{}) interface{} { 155 | return s.Conn.Do(cmd, args...) 156 | } 157 | 158 | // DoSure executes command and panics if it returns error. 159 | func (s *Server) DoSure(cmd string, args ...interface{}) interface{} { 160 | r := s.Do(cmd, args...) 161 | if err, ok := r.(error); ok { 162 | panic(err) 163 | } 164 | return r 165 | } 166 | -------------------------------------------------------------------------------- /doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package redispipe - high throughput Redis connector with implicit pipelining. 3 | 4 | https://redis.io/topics/pipelining 5 | 6 | Pipelining improves maximum throughput that redis can serve, and reduces CPU usage both on 7 | redis server and on client. Mostly it comes from saving system CPU consumption. 8 | 9 | But it is not always possible to use pipelining explicitly: usually there are dozens of 10 | concurrent goroutines, each sends just one request at a time. To handle usual workload, 11 | pipelining has to be implicit. 12 | 13 | All known Golang redis connectors use connection-per-request working model with a connection pool, 14 | and provide only explicit pipelining. This showed far from optimal performance under highly concurrent load. 15 | 16 | This connector was created as implicitly pipelined from the ground up to achieve maximum performance 17 | in a highly concurrent environment. It writes all requests to a single connection to redis, and 18 | continuously reads answers from another goroutine. 19 | 20 | Note that it trades a bit of latency for throughput, and therefore may be not optimal for 21 | non-concurrent usage. 22 | 23 | Capabilities 24 | 25 | - fast, 26 | 27 | - thread-safe: no need to lock around connection, no need to "return to pool", etc, 28 | 29 | - pipelining is implicit, 30 | 31 | - transactions supported (but without WATCH), 32 | 33 | - hook for custom logging, 34 | 35 | - hook for request timing reporting. 36 | 37 | Limitations 38 | 39 | - by default, it is not allowed to send blocking calls, because it will block the whole pipeline: 40 | `BLPOP`, `BRPOP`, `BRPOPLPUSH`, `BZPOPMIN`, `BZPOPMAX`, `XREAD`, `XREADGROUP`, `SAVE`. 41 | However, you could set `ScriptMode: true` option to enable these commands. 42 | `ScriptMode: true` also turns default `WritePause` to -1 (meaning it almost disables forced batching). 43 | 44 | - `WATCH` is also forbidden by default: it is useless and even harmful when concurrent goroutines 45 | use the same connection. 46 | It is also allowed with `ScriptMode: true`, but you should be sure you use connection only 47 | from single goroutine. 48 | 49 | - `SUBSCRIBE` and `PSUBSCRIBE` commands are forbidden. They switch the connection work mode to a 50 | completely different mode of communication, therefore it could not be combined with regular 51 | commands. This connector doesn't implement subscribing mode. 52 | 53 | Structure 54 | 55 | - root package is empty 56 | 57 | - common functionality is in redis subpackage 58 | 59 | - singe connection is in redisconn subpackage 60 | 61 | - cluster support is in rediscluster subpackage 62 | 63 | Usage 64 | 65 | Both redisconn.Connect and rediscluster.NewCluster creates implementations of redis.Sender. 66 | redis.Sender provides asynchronous api for sending request/requests/transactions. That api 67 | accepts redis.Future interface implementations as an argument and fulfills it asynchronously. 68 | Usually you don't need to provide your own redis.Future implementation, but rather use 69 | synchronous wrappers. 70 | 71 | To use convenient synchronous api, one should wrap "sender" with one of wrappers: 72 | 73 | - redis.Sync{sender} - provides simple synchronouse api, 74 | 75 | - redis.SyncCtx{sender} - provides same api, but all methods accept context.Context, and 76 | methods return immediately if that context is closed, 77 | 78 | - redis.ChanFutured{sender} - provides api with future through channel closing. 79 | 80 | Types accepted as command arguments: nil, []byte, string, int (and all other integer types), 81 | float64, float32, bool. All arguments are converted to redis bulk strings as usual (ie 82 | string and bytes - as is; numbers - in decimal notation). bool converted as "0/1", 83 | nil converted to empty string. 84 | 85 | In difference to other redis packages, no custom types are used for request results. Results 86 | are de-serialized into plain go types and are returned as interface{}: 87 | 88 | redis | go 89 | -------------|------- 90 | plain string | string 91 | bulk string | []byte 92 | integer | int64 93 | array | []interface{} 94 | error | error (*errorx.Error) 95 | 96 | IO, connection, and other errors are not returned separately but as result (and has same 97 | *errorx.Error underlying type). 98 | */ 99 | package redispipe 100 | -------------------------------------------------------------------------------- /rediscluster/redisclusterutil/cluster_test.go: -------------------------------------------------------------------------------- 1 | package redisclusterutil 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func TestParseSlotsInfo(t *testing.T) { 11 | clusterSlotsResponse := []interface{}{ 12 | []interface{}{ 13 | int64(0), 14 | int64(5460), 15 | []interface{}{ 16 | []byte("127.0.0.1"), 17 | int64(30001), 18 | []byte("09dbe9720cda62f7865eabc5fd8857c5d2678366"), 19 | []interface{}{ 20 | "hostname", 21 | "host-1.redis.example.com", 22 | }, 23 | }, 24 | []interface{}{ 25 | []byte("127.0.0.1"), 26 | int64(30004), 27 | []byte("821d8ca00d7ccf931ed3ffc7e3db0599d2271abf"), 28 | []interface{}{ 29 | "hostname", 30 | "host-2.redis.example.com", 31 | }, 32 | }, 33 | }, 34 | []interface{}{ 35 | int64(5461), 36 | int64(10922), 37 | []interface{}{ 38 | []byte("127.0.0.1"), 39 | int64(30002), 40 | []byte("c9d93d9f2c0c524ff34cc11838c2003d8c29e013"), 41 | []interface{}{ 42 | "hostname", 43 | "host-3.redis.example.com", 44 | }, 45 | }, 46 | []interface{}{ 47 | []byte("127.0.0.1"), 48 | int64(30005), 49 | []byte("faadb3eb99009de4ab72ad6b6ed87634c7ee410f"), 50 | []interface{}{ 51 | "hostname", 52 | "host-4.redis.example.com", 53 | }, 54 | }, 55 | }, 56 | []interface{}{ 57 | int64(10923), 58 | int64(16383), 59 | []interface{}{ 60 | []byte("192.168.11.131"), 61 | int64(30003), 62 | []byte("044ec91f325b7595e76dbcb18cc688b6a5b434a1"), 63 | []interface{}{ 64 | "hostname", 65 | "host-5.redis.example.com", 66 | }, 67 | }, 68 | []interface{}{ 69 | []byte("127.0.0.1"), 70 | int64(30006), 71 | []byte("58e6e48d41228013e5d9c1c37c5060693925e97e"), 72 | []interface{}{ 73 | "hostname", 74 | "host-6.redis.example.com", 75 | }, 76 | }, 77 | }, 78 | } 79 | 80 | expectedSlots := []SlotsRange{ 81 | { 82 | From: 0, 83 | To: 5460, 84 | Addrs: []string{ 85 | "127.0.0.1:30001", 86 | "127.0.0.1:30004", 87 | }, 88 | }, 89 | { 90 | From: 5461, 91 | To: 10922, 92 | Addrs: []string{ 93 | "127.0.0.1:30002", 94 | "127.0.0.1:30005", 95 | }, 96 | }, 97 | { 98 | From: 10923, 99 | To: 16383, 100 | Addrs: []string{ 101 | "192.168.11.131:30003", 102 | "127.0.0.1:30006", 103 | }, 104 | }, 105 | } 106 | 107 | slots, err := ParseSlotsInfo(clusterSlotsResponse) 108 | require.NoError(t, err) 109 | 110 | assert.Equal(t, expectedSlots, slots) 111 | } 112 | 113 | func TestParseSlotsInfo_EmptyAddress(t *testing.T) { 114 | clusterSlotsResponse := []interface{}{ 115 | []interface{}{ 116 | int64(0), 117 | int64(5460), 118 | []interface{}{ 119 | []byte("127.0.0.1"), 120 | int64(30001), 121 | []byte("09dbe9720cda62f7865eabc5fd8857c5d2678366"), 122 | []interface{}{ 123 | "hostname", 124 | "host-1.redis.example.com", 125 | }, 126 | }, 127 | []interface{}{ 128 | []interface{}{}, 129 | int64(0), 130 | []byte("821d8ca00d7ccf931ed3ffc7e3db0599d2271abf"), 131 | []interface{}{ 132 | "hostname", 133 | "host-2.redis.example.com", 134 | }, 135 | }, 136 | }, 137 | []interface{}{ 138 | int64(5461), 139 | int64(10922), 140 | []interface{}{ 141 | []interface{}{}, 142 | int64(0), 143 | []byte("c9d93d9f2c0c524ff34cc11838c2003d8c29e013"), 144 | []interface{}{ 145 | "hostname", 146 | "host-3.redis.example.com", 147 | }, 148 | }, 149 | []interface{}{ 150 | []byte("127.0.0.1"), 151 | int64(30005), 152 | []byte("faadb3eb99009de4ab72ad6b6ed87634c7ee410f"), 153 | []interface{}{ 154 | "hostname", 155 | "host-4.redis.example.com", 156 | }, 157 | }, 158 | }, 159 | } 160 | 161 | expectedSlots := []SlotsRange{ 162 | { 163 | From: 0, 164 | To: 5460, 165 | Addrs: []string{ 166 | "127.0.0.1:30001", 167 | }, 168 | }, 169 | { 170 | From: 5461, 171 | To: 10922, 172 | Addrs: []string{ 173 | "127.0.0.1:30005", 174 | }, 175 | }, 176 | } 177 | 178 | slots, err := ParseSlotsInfo(clusterSlotsResponse) 179 | require.NoError(t, err) 180 | 181 | assert.Equal(t, expectedSlots, slots) 182 | } 183 | -------------------------------------------------------------------------------- /redis/sender.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "errors" 5 | ) 6 | 7 | // Sender is interface of client implementation. 8 | // It provides interface in term of Future, and could be either single connection, 9 | // connection to cluster, or whatever. 10 | type Sender interface { 11 | // Send sends request to redis. When response will arrive, cb.Resolve(result, n) will be called. 12 | // Note: cb.Resolve could be called before Send returns. 13 | Send(r Request, cb Future, n uint64) 14 | // SendMany sends many requests at once. 15 | // When responses will arrive, cb.Resolve will be called with distinct n values: 16 | // - first request's response will be passed as cb.Resolve(response, n) 17 | // - second request's response will be passed as cb.Resolve(response, n+1) 18 | // - third ... cb.Resolve(response, n+2) 19 | // Note: responses could arrive in arbitrary order. 20 | SendMany(r []Request, cb Future, n uint64) 21 | // SendTransaction sends several requests as MULTI+EXEC redis transaction. 22 | // Response will be passed only once as an array of responses to commands (as EXEC does) 23 | // cb.Resolve([]interface{res1, res2, res3, ...}, n) 24 | SendTransaction(r []Request, cb Future, n uint64) 25 | // Scanner returns scanner object that scans keyspace sequentially. 26 | Scanner(opts ScanOpts) Scanner 27 | // EachShard synchronously calls callback for each shard. 28 | // Single-connection client will call it only once, but clustered will call for every master. 29 | // If callback is called with error, it will not be called again. 30 | // If callback returns false, iteration stops. 31 | EachShard(func(Sender, error) bool) 32 | // Close closes client. All following requests will be immediately resolved with error. 33 | Close() 34 | } 35 | 36 | // Scanner is an object used for scanning redis key space. It is returned by Sender.Scanner(). 37 | type Scanner interface { 38 | // Next will call cb.Resolve(result, 0) where `results` is keys part of result of SCAN/HSCAN/SSCAN/ZSCAN 39 | // (ie iterator part is handled internally). 40 | // When iteration completes, cb.Resolve(nil, 0) will be called. 41 | Next(cb Future) 42 | } 43 | 44 | // ScanEOF is error returned by Sync wrappers when iteration exhausted. 45 | var ScanEOF = errors.New("Iteration finished") 46 | 47 | // tools for scanning 48 | 49 | // ScanOpts is options for scanning 50 | type ScanOpts struct { 51 | // Cmd - command to be sent. Could be 'SCAN', 'SSCAN', 'HSCAN', 'ZSCAN' 52 | // default is 'SCAN' 53 | Cmd string 54 | // Key - key for SSCAN, HSCAN and ZSCAN command 55 | Key string 56 | // Match - pattern for filtering keys 57 | Match string 58 | // Count - soft-limit of single *SCAN answer 59 | Count int 60 | } 61 | 62 | // Request returns corresponding request to be send. 63 | // Used mostly internally 64 | func (s ScanOpts) Request(it []byte) Request { 65 | if len(it) == 0 { 66 | it = []byte("0") 67 | } 68 | args := make([]interface{}, 0, 6) 69 | if s.Cmd == "" { 70 | s.Cmd = "SCAN" 71 | } 72 | if s.Cmd != "SCAN" { 73 | args = append(args, s.Key) 74 | } 75 | args = append(args, it) 76 | if s.Match != "" { 77 | args = append(args, "MATCH", s.Match) 78 | } 79 | if s.Count > 0 { 80 | args = append(args, "COUNT", s.Count) 81 | } 82 | return Request{s.Cmd, args} 83 | } 84 | 85 | // ScannerBase is internal "parent" object for scanner implementations 86 | type ScannerBase struct { 87 | // ScanOpts - options for this scanning 88 | ScanOpts 89 | // Iter - current iterator state 90 | Iter []byte 91 | // Err - error occurred. Implementation should stop iteration if Err is nil. 92 | Err error 93 | cb Future 94 | } 95 | 96 | // DoNext - perform next step of iteration - send corresponding *SCAN command 97 | func (s *ScannerBase) DoNext(cb Future, snd Sender) { 98 | s.cb = cb 99 | snd.Send(s.ScanOpts.Request(s.Iter), s, 0) 100 | } 101 | 102 | // IterLast - return true if iterator is at the end of this server/key keyspace. 103 | func (s *ScannerBase) IterLast() bool { 104 | return len(s.Iter) == 1 && s.Iter[0] == '0' 105 | } 106 | 107 | // Cancelled - implements Future.Cancelled method 108 | func (s *ScannerBase) Cancelled() error { 109 | return s.cb.Cancelled() 110 | } 111 | 112 | // Resolve - implements Future.Resolve. 113 | // Accepts result of *SCAN command, remembers error and iterator 114 | // and calls Resolve on underlying future. 115 | func (s *ScannerBase) Resolve(res interface{}, _ uint64) { 116 | var keys []string 117 | s.Iter, keys, s.Err = ScanResponse(res) 118 | cb := s.cb 119 | s.cb = nil 120 | if s.Err != nil { 121 | cb.Resolve(s.Err, 0) 122 | } else { 123 | cb.Resolve(keys, 0) 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /redisconn/bench/bench_test.go: -------------------------------------------------------------------------------- 1 | package bench 2 | 3 | import ( 4 | "context" 5 | "runtime" 6 | . "testing" 7 | 8 | "github.com/joomcode/redispipe/redis" 9 | "github.com/joomcode/redispipe/testbed" 10 | 11 | redigo "github.com/gomodule/redigo/redis" 12 | "github.com/joomcode/redispipe/redisconn" 13 | 14 | radix "github.com/mediocregopher/radix/v3" 15 | ) 16 | 17 | func benchServer(port int) func() { 18 | testbed.InitDir(".") 19 | s := testbed.Server{Port: uint16(port)} 20 | s.Start() 21 | return func() { 22 | s.Stop() 23 | testbed.RmDir() 24 | } 25 | } 26 | 27 | func BenchmarkSerialGetSet(b *B) { 28 | defer benchServer(45678)() 29 | b.Run("radix", func(b *B) { 30 | rdxv2, err := radix.Dial("tcp", "127.0.0.1:45678") 31 | if err != nil { 32 | b.Fatal(err) 33 | return 34 | } 35 | defer rdxv2.Close() 36 | b.ResetTimer() 37 | for i := 0; i < b.N; i++ { 38 | if err := rdxv2.Do(radix.Cmd(nil, "SET", "foo", "bar")); err != nil { 39 | b.Fatal(err) 40 | } 41 | if err := rdxv2.Do(radix.Cmd(nil, "GET", "foo")); err != nil { 42 | b.Fatal(err) 43 | } 44 | } 45 | }) 46 | 47 | b.Run("redigo", func(b *B) { 48 | red := newRedigo() 49 | defer red.Close() 50 | b.ResetTimer() 51 | for i := 0; i < b.N; i++ { 52 | if _, err := red.Do("SET", "foo", "bar"); err != nil { 53 | b.Fatal(err) 54 | } 55 | if _, err := redigo.String(red.Do("GET", "foo")); err != nil { 56 | b.Fatal(err) 57 | } 58 | } 59 | }) 60 | 61 | b.Run("redispipe", func(b *B) { 62 | pipe, err := redisconn.Connect(context.Background(), "127.0.0.1:45678", redisconn.Opts{ 63 | Logger: redisconn.NoopLogger{}, 64 | }) 65 | defer pipe.Close() 66 | if err != nil { 67 | b.Fatal(err) 68 | } 69 | sync := redis.Sync{pipe} 70 | b.ResetTimer() 71 | for i := 0; i < b.N; i++ { 72 | if res := sync.Do("SET", "foo", "bar"); redis.AsError(res) != nil { 73 | b.Fatal(res) 74 | } 75 | if res := sync.Do("GET", "foo"); redis.AsError(res) != nil { 76 | b.Fatal(res) 77 | } 78 | } 79 | }) 80 | 81 | b.Run("redispipe_pause0", func(b *B) { 82 | pipe, err := redisconn.Connect(context.Background(), "127.0.0.1:45678", redisconn.Opts{ 83 | Logger: redisconn.NoopLogger{}, 84 | WritePause: -1, 85 | }) 86 | defer pipe.Close() 87 | if err != nil { 88 | b.Fatal(err) 89 | } 90 | sync := redis.Sync{pipe} 91 | b.ResetTimer() 92 | for i := 0; i < b.N; i++ { 93 | if res := sync.Do("SET", "foo", "bar"); redis.AsError(res) != nil { 94 | b.Fatal(res) 95 | } 96 | if res := sync.Do("GET", "foo"); redis.AsError(res) != nil { 97 | b.Fatal(res) 98 | } 99 | } 100 | }) 101 | } 102 | 103 | func BenchmarkParallelGetSet(b *B) { 104 | defer benchServer(45678)() 105 | parallel := runtime.GOMAXPROCS(0) * 2 106 | 107 | do := func(b *B, fn func()) { 108 | b.SetParallelism(parallel) 109 | b.RunParallel(func(pb *PB) { 110 | for pb.Next() { 111 | fn() 112 | } 113 | }) 114 | } 115 | 116 | b.Run("radix", func(b *B) { 117 | rdx2, err := radix.NewPool("tcp", "127.0.0.1:45678", parallel) 118 | if err != nil { 119 | b.Fatal(err) 120 | } 121 | defer rdx2.Close() 122 | b.ResetTimer() 123 | do(b, func() { 124 | if err := rdx2.Do(radix.Cmd(nil, "SET", "foo", "bar")); err != nil { 125 | b.Fatal(err) 126 | } 127 | if err := rdx2.Do(radix.Cmd(nil, "GET", "foo")); err != nil { 128 | b.Fatal(err) 129 | } 130 | }) 131 | }) 132 | 133 | b.Run("redigo", func(b *B) { 134 | red := &redigo.Pool{ 135 | MaxIdle: parallel, 136 | Dial: func() (redigo.Conn, error) { 137 | return newRedigo(), nil 138 | }, 139 | } 140 | defer red.Close() 141 | b.ResetTimer() 142 | do(b, func() { 143 | conn := red.Get() 144 | defer conn.Close() 145 | if _, err := conn.Do("SET", "foo", "bar"); err != nil { 146 | b.Fatal(err) 147 | } 148 | if _, err := redigo.String(conn.Do("GET", "foo")); err != nil { 149 | b.Fatal(err) 150 | } 151 | }) 152 | }) 153 | 154 | b.Run("redispipe", func(b *B) { 155 | pipe, err := redisconn.Connect(context.Background(), "127.0.0.1:45678", redisconn.Opts{ 156 | Logger: redisconn.NoopLogger{}, 157 | }) 158 | if err != nil { 159 | b.Fatal(err) 160 | } 161 | defer pipe.Close() 162 | sync := redis.Sync{pipe} 163 | b.ResetTimer() 164 | do(b, func() { 165 | if res := sync.Do("SET", "foo", "bar"); redis.AsError(res) != nil { 166 | b.Fatal(res) 167 | } 168 | if res := sync.Do("GET", "foo"); redis.AsError(res) != nil { 169 | b.Fatal(err) 170 | } 171 | }) 172 | }) 173 | } 174 | 175 | func newRedigo() redigo.Conn { 176 | c, err := redigo.Dial("tcp", "127.0.0.1:45678") 177 | if err != nil { 178 | panic(err) 179 | } 180 | return c 181 | } 182 | -------------------------------------------------------------------------------- /example_test.go: -------------------------------------------------------------------------------- 1 | package redispipe_test 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | 8 | "github.com/joomcode/redispipe/redis" 9 | "github.com/joomcode/redispipe/rediscluster" 10 | "github.com/joomcode/redispipe/redisconn" 11 | ) 12 | 13 | const databaseno = 0 14 | const password = "" 15 | 16 | var myhandle interface{} = nil 17 | 18 | func Example_usage() { 19 | ctx := context.Background() 20 | cluster := false 21 | 22 | SingleRedis := func(ctx context.Context) (redis.Sender, error) { 23 | opts := redisconn.Opts{ 24 | DB: databaseno, 25 | Password: password, 26 | Logger: redisconn.NoopLogger{}, // shut up logging. Could be your custom implementation. 27 | Handle: myhandle, // custom data, useful for custom logging 28 | // Other parameters (usually, no need to change) 29 | // IOTimeout, DialTimeout, ReconnectTimeout, TCPKeepAlive, Concurrency, WritePause, AsyncDial 30 | } 31 | conn, err := redisconn.Connect(ctx, "127.0.0.1:6379", opts) 32 | return conn, err 33 | } 34 | 35 | ClusterRedis := func(ctx context.Context) (redis.Sender, error) { 36 | opts := rediscluster.Opts{ 37 | HostOpts: redisconn.Opts{ 38 | // No DB 39 | Password: password, 40 | // Usually, no need for special logger 41 | }, 42 | Name: "mycluster", // name of a cluster 43 | Logger: rediscluster.NoopLogger{}, // shut up logging. Could be your custom implementation. 44 | Handle: myhandle, // custom data, useful for custom logging 45 | // Other parameters (usually, no need to change): 46 | // ConnsPerHost, ConnHostPolicy, CheckInterval, MovedRetries, WaitToMigrate, RoundRobinSeed, 47 | } 48 | addresses := []string{"127.0.0.1:20001"} // one or more of cluster addresses 49 | cluster, err := rediscluster.NewCluster(ctx, addresses, opts) 50 | return cluster, err 51 | } 52 | 53 | var sender redis.Sender 54 | var err error 55 | if cluster { 56 | sender, err = ClusterRedis(ctx) 57 | } else { 58 | sender, err = SingleRedis(ctx) 59 | } 60 | if err != nil { 61 | log.Fatal(err) 62 | } 63 | defer sender.Close() 64 | 65 | sync := redis.SyncCtx{sender} // wrapper for synchronous api 66 | 67 | res := sync.Do(ctx, "SET", "key", "ho") 68 | if err := redis.AsError(res); err != nil { 69 | log.Fatal(err) 70 | } 71 | fmt.Printf("result: %q\n", res) 72 | 73 | res = sync.Do(ctx, "GET", "key") 74 | if err := redis.AsError(res); err != nil { 75 | log.Fatal(err) 76 | } 77 | fmt.Printf("result: %q\n", res) 78 | 79 | res = sync.Send(ctx, redis.Req("HMSET", "hashkey", "field1", "val1", "field2", "val2")) 80 | if err := redis.AsError(res); err != nil { 81 | log.Fatal(err) 82 | } 83 | 84 | res = sync.Send(ctx, redis.Req("HMGET", "hashkey", "field1", "field2", "field3")) 85 | if err := redis.AsError(res); err != nil { 86 | log.Fatal(err) 87 | } 88 | for i, v := range res.([]interface{}) { 89 | fmt.Printf("%d: %T %q\n", i, v, v) 90 | } 91 | 92 | res = sync.Send(ctx, redis.Req("HMGET", "key", "field1")) 93 | if err := redis.AsError(res); err != nil { 94 | if rerr := redis.AsErrorx(res); rerr != nil && rerr.IsOfType(redis.ErrResult) { 95 | fmt.Printf("expected error: %v\n", rerr) 96 | } else { 97 | fmt.Printf("unexpected error: %v\n", err) 98 | } 99 | } else { 100 | fmt.Printf("unexpected missed error\n") 101 | } 102 | 103 | results := sync.SendMany(ctx, []redis.Request{ 104 | redis.Req("GET", "key"), 105 | redis.Req("HMGET", "hashkey", "field1", "field3"), 106 | }) 107 | // results is []interface{}, each element is result for corresponding request 108 | for i, res := range results { 109 | fmt.Printf("result[%d]: %T %q\n", i, res, res) 110 | } 111 | 112 | results, err = sync.SendTransaction(ctx, []redis.Request{ 113 | redis.Req("SET", "a{x}", "b"), 114 | redis.Req("SET", "b{x}", 0), 115 | redis.Req("INCRBY", "b{x}", 3), 116 | }) 117 | if err != nil { 118 | log.Fatal(err) 119 | } 120 | for i, res := range results { 121 | fmt.Printf("tresult[%d]: %T %q\n", i, res, res) 122 | } 123 | 124 | scanner := sync.Scanner(ctx, redis.ScanOpts{Match: "*key*"}) 125 | for { 126 | keys, err := scanner.Next() 127 | if err != nil { 128 | if err != redis.ScanEOF { 129 | log.Fatal(err) 130 | } 131 | break 132 | } 133 | fmt.Printf("keys: %q", keys) 134 | } 135 | 136 | // Output: 137 | // result: "OK" 138 | // result: "ho" 139 | // 0: []uint8 "val1" 140 | // 1: []uint8 "val2" 141 | // 2: %!q() 142 | // expected error: redispipe.result: WRONGTYPE Operation against a key holding the wrong kind of value {request: Req("HMGET", ["key" "field1"]), address: 127.0.0.1:6379} 143 | // result[0]: []uint8 "ho" 144 | // result[1]: []interface {} ["val1" ] 145 | // tresult[0]: string "OK" 146 | // tresult[1]: string "OK" 147 | // tresult[2]: int64 '\x03' 148 | // keys: ["key" "hashkey"] 149 | } 150 | -------------------------------------------------------------------------------- /rediscluster/logger.go: -------------------------------------------------------------------------------- 1 | package rediscluster 2 | 3 | import ( 4 | "log" 5 | 6 | "github.com/joomcode/redispipe/redisconn" 7 | ) 8 | 9 | // Logger is used for loggin cluster-related events and requests statistic. 10 | type Logger interface { 11 | // Report will be called when some events happens during cluster's lifetime. 12 | // Default implementation just prints this information using standard log package. 13 | Report(c *Cluster, event LogEvent) 14 | // ReqStat is called after request receives it's answer with request/result information 15 | // and time spend to fulfill request. 16 | // Default implementation is no-op. 17 | ReqStat(c *Cluster, conn *redisconn.Connection, req Request, res interface{}, nanos, bytesIn, bytesOut int64) 18 | } 19 | 20 | func (c *Cluster) report(event LogEvent) { 21 | c.opts.Logger.Report(c, event) 22 | } 23 | 24 | // LogEvent is a sumtype for events to be logged. 25 | type LogEvent interface { 26 | logEvent() 27 | } 28 | 29 | // LogHostEvent is a wrapper for per-connection event 30 | type LogHostEvent struct { 31 | Conn *redisconn.Connection // Connection which triggers event. 32 | Event redisconn.LogEvent 33 | } 34 | 35 | // LogClusterSlotsError is logged when CLUSTER SLOTS failed. 36 | type LogClusterSlotsError struct { 37 | Conn *redisconn.Connection // Connection which were used for CLUSTER SLOTS 38 | Error error // observed error 39 | } 40 | 41 | // LogSlotRangeError is logged when no host were able to respond to CLUSTER SLOTS. 42 | type LogSlotRangeError struct{} 43 | 44 | // LogContextClosed is logged when cluster's context is closed. 45 | type LogContextClosed struct{ Error error } 46 | 47 | func (LogHostEvent) logEvent() {} 48 | func (LogClusterSlotsError) logEvent() {} 49 | func (LogSlotRangeError) logEvent() {} 50 | func (LogContextClosed) logEvent() {} 51 | 52 | // DefaultLogger is a default Logger implementation 53 | type DefaultLogger struct{} 54 | 55 | // Report implements Logger.Report. 56 | func (d DefaultLogger) Report(cluster *Cluster, event LogEvent) { 57 | switch ev := event.(type) { 58 | case LogHostEvent: 59 | switch cev := ev.Event.(type) { 60 | case redisconn.LogConnecting: 61 | log.Printf("rediscluster %s: connecting to %s", cluster.Name(), ev.Conn.Addr()) 62 | case redisconn.LogConnected: 63 | log.Printf("rediscluster %s: connected to %s (localAddr: %s, remAddr: %s)", 64 | cluster.Name(), ev.Conn.Addr(), cev.LocalAddr, cev.RemoteAddr) 65 | case redisconn.LogConnectFailed: 66 | log.Printf("rediscluster %s: connection to %s failed: %s", 67 | cluster.Name(), ev.Conn.Addr(), cev.Error.Error()) 68 | case redisconn.LogDisconnected: 69 | log.Printf("rediscluster %s: connection to %s broken (localAddr: %s, remAddr: %s): %s", 70 | cluster.Name(), ev.Conn.Addr(), cev.LocalAddr, cev.RemoteAddr, cev.Error.Error()) 71 | case redisconn.LogContextClosed: 72 | log.Printf("rediscluster %s: connect to %s explicitly closed: %s", 73 | cluster.Name(), ev.Conn.Addr(), cev.Error.Error()) 74 | default: 75 | log.Printf("rediscluster %s: unexpected connection event for %s: %s", 76 | cluster.Name(), ev.Conn.Addr(), event) 77 | } 78 | case LogClusterSlotsError: 79 | log.Printf("rediscluster %s: 'CLUSTER SLOTS' request to %s failed: %s", 80 | cluster.Name(), ev.Conn.Addr(), ev.Error.Error()) 81 | case LogSlotRangeError: 82 | log.Printf("rediscluster %s: no alive nodes to request 'CLUSTER SLOTS'", 83 | cluster.Name()) 84 | case LogContextClosed: 85 | log.Printf("rediscluster %s: shutting down (%s)", cluster.Name(), ev.Error) 86 | } 87 | } 88 | 89 | // ReqStat implements Logger.ReqStat as no-op. 90 | func (d DefaultLogger) ReqStat(_ *Cluster, _ *redisconn.Connection, _ Request, _ interface{}, _, _, _ int64) { 91 | // noop 92 | } 93 | 94 | // defaultConnLogger implements redisconn.Logger to log individual connection events in context of cluster. 95 | type defaultConnLogger struct { 96 | *Cluster 97 | } 98 | 99 | // Report implements redisconn.Logger.Report 100 | func (d defaultConnLogger) Report(conn *redisconn.Connection, event redisconn.LogEvent) { 101 | d.Cluster.opts.Logger.Report(d.Cluster, LogHostEvent{Conn: conn, Event: event}) 102 | } 103 | 104 | // Report implements redisconn.Logger.ReqStat 105 | func (d defaultConnLogger) ReqStat(conn *redisconn.Connection, req Request, res interface{}, nanos, bytesIn, bytesOut int64) { 106 | d.Cluster.opts.Logger.ReqStat(d.Cluster, conn, req, res, nanos, bytesIn, bytesOut) 107 | } 108 | 109 | // NoopLogger implements Logger with no logging at all. 110 | type NoopLogger struct{} 111 | 112 | // Report implements Logger.Report 113 | func (d NoopLogger) Report(conn *Cluster, event LogEvent) {} 114 | 115 | // ReqStat implements Logger.ReqStat 116 | func (d NoopLogger) ReqStat(c *Cluster, conn *redisconn.Connection, req Request, res interface{}, nanos, bytesIn, bytesOut int64) { 117 | } 118 | -------------------------------------------------------------------------------- /redis/error.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "github.com/joomcode/errorx" 5 | ) 6 | 7 | var ( 8 | // Errors is a root namespaces of all redispipe errors. 9 | Errors = errorx.NewNamespace("redispipe").ApplyModifiers(errorx.TypeModifierOmitStackTrace) 10 | 11 | // ErrOpts - options are wrong 12 | ErrOpts = Errors.NewSubNamespace("opts") 13 | // ErrContextIsNil - context is not passed to constructor 14 | ErrContextIsNil = ErrOpts.NewType("context_is_nil") 15 | // ErrNoAddressProvided - no address is given to constructor 16 | ErrNoAddressProvided = ErrOpts.NewType("no_address") 17 | 18 | // ErrTraitNotSent signals request were not written to wire 19 | ErrTraitNotSent = errorx.RegisterTrait("request_not_sent") 20 | 21 | // ErrContextClosed - context were explicitly closed (or connection / cluster were shut down) 22 | ErrContextClosed = Errors.NewType("connection_context_closed", ErrTraitNotSent) 23 | 24 | // ErrTraitConnectivity marks all networking and io errors 25 | ErrTraitConnectivity = errorx.RegisterTrait("network") 26 | 27 | // ErrIO - io error: read/write error, or timeout, or connection closed while reading/writing 28 | // It is not known if request were processed or not 29 | ErrIO = Errors.NewType("io error", ErrTraitConnectivity) 30 | 31 | // ErrRequest - request malformed. Can not serialize request, no reason to retry. 32 | ErrRequest = Errors.NewSubNamespace("request") 33 | // ErrArgumentType - argument is not serializable 34 | ErrArgumentType = ErrRequest.NewType("argument_type") 35 | // ErrBatchFormat - some other command in batch is malformed 36 | ErrBatchFormat = ErrRequest.NewType("batch_format") 37 | // ErrNoSlotKey - no key to determine cluster slot 38 | ErrNoSlotKey = ErrRequest.NewType("no_slot_key") 39 | // ErrRequestCancelled - request already cancelled 40 | ErrRequestCancelled = ErrRequest.NewType("request_cancelled") 41 | // ErrCommandForbidden - command is blocking or dangerous 42 | ErrCommandForbidden = ErrRequest.NewType("command_forbidden") 43 | 44 | // ErrResponse - response malformed. Redis returns unexpected response. 45 | ErrResponse = Errors.NewSubNamespace("response") 46 | // ErrResponseFormat - response is not valid Redis response 47 | ErrResponseFormat = ErrResponse.NewType("format") 48 | // ErrResponseUnexpected - response is valid redis response, but its structure/type unexpected 49 | ErrResponseUnexpected = ErrResponse.NewType("unexpected") 50 | // ErrHeaderlineTooLarge - header line too large 51 | ErrHeaderlineTooLarge = ErrResponse.NewType("headerline_too_large") 52 | // ErrHeaderlineEmpty - header line is empty 53 | ErrHeaderlineEmpty = ErrResponse.NewType("headerline_empty") 54 | // ErrIntegerParsing - integer malformed 55 | ErrIntegerParsing = ErrResponse.NewType("integer_parsiing") 56 | // ErrNoFinalRN - no final "\r\n" 57 | ErrNoFinalRN = ErrResponse.NewType("no_final_rn") 58 | // ErrUnknownHeaderType - unknown header type 59 | ErrUnknownHeaderType = ErrResponse.NewType("unknown_headerline_type") 60 | // ErrPing - ping receives wrong response 61 | ErrPing = ErrResponse.NewType("ping") 62 | 63 | // ErrTraitClusterMove signals that error happens due to cluster rebalancing. 64 | ErrTraitClusterMove = errorx.RegisterTrait("cluster_move") 65 | 66 | // ErrResult - just regular redis response. 67 | ErrResult = Errors.NewType("result") 68 | // ErrMoved - MOVED response 69 | ErrMoved = ErrResult.NewSubtype("moved", ErrTraitClusterMove) 70 | // ErrAsk - ASK response 71 | ErrAsk = ErrResult.NewSubtype("ask", ErrTraitClusterMove) 72 | // ErrClusterDown - CLUSTERDOWN response 73 | ErrClusterDown = ErrResult.NewSubtype("clusterdown", ErrTraitNotSent) 74 | // ErrLoading - redis didn't finish start 75 | ErrLoading = ErrResult.NewSubtype("loading", ErrTraitNotSent) 76 | // ErrExecEmpty - EXEC returns nil (WATCH failed) (it is strange, cause we don't support WATCH) 77 | ErrExecEmpty = ErrResult.NewSubtype("exec_empty") 78 | // ErrExecAbort - EXEC returns EXECABORT 79 | ErrExecAbort = ErrResult.NewSubtype("exec_abort") 80 | // ErrTryAgain - EXEC returns TryAgain 81 | ErrTryAgain = ErrResult.NewSubtype("exec_try_again") 82 | ) 83 | 84 | var ( 85 | // EKLine - set by response parser for unrecognized header lines. 86 | EKLine = errorx.RegisterProperty("line") 87 | // EKMovedTo - set by response parser for MOVED and ASK responses. 88 | EKMovedTo = errorx.RegisterProperty("movedto") 89 | // EKSlot - set by response parser for MOVED and ASK responses. 90 | EKSlot = errorx.RegisterPrintableProperty("slot") 91 | // EKVal - set by request writer and checker to argument value which could not be serialized. 92 | EKVal = errorx.RegisterPrintableProperty("val") 93 | // EKArgPos - set by request writer and checker to argument position which could not be serialized. 94 | EKArgPos = errorx.RegisterPrintableProperty("argpos") 95 | // EKRequest - request that triggered error. 96 | EKRequest = errorx.RegisterPrintableProperty("request") 97 | // EKRequests - batch requests that triggered error. 98 | EKRequests = errorx.RegisterPrintableProperty("requests") 99 | // EKResponse - unexpected response 100 | EKResponse = errorx.RegisterProperty("response") 101 | // EKAddress - address of redis that has a problems 102 | EKAddress = errorx.RegisterPrintableProperty("address") 103 | ) 104 | 105 | var ( 106 | // CollectTrace - should Sync and SyncCtx wrappers collect stack traces on a call side. 107 | CollectTrace = false 108 | ) 109 | -------------------------------------------------------------------------------- /redis/reader_test.go: -------------------------------------------------------------------------------- 1 | package redis_test 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "strings" 7 | "testing" 8 | 9 | "github.com/joomcode/errorx" 10 | 11 | . "github.com/joomcode/redispipe/redis" 12 | "github.com/stretchr/testify/assert" 13 | ) 14 | 15 | func lines2bufio(lines ...string) *bufio.Reader { 16 | buf := strings.Join(lines, "") 17 | return bufio.NewReader(strings.NewReader(buf)) 18 | } 19 | 20 | func readLines(lines ...string) interface{} { 21 | r, _ := ReadResponse(lines2bufio(lines...)) 22 | return r 23 | } 24 | 25 | func checkErrType(t *testing.T, res interface{}, kind *errorx.Type) bool { 26 | if assert.IsType(t, (*errorx.Error)(nil), res) { 27 | err := res.(*errorx.Error) 28 | return assert.True(t, err.IsOfType(kind)) 29 | } 30 | return false 31 | } 32 | 33 | func TestReadResponse_IOAndFormatErrors(t *testing.T) { 34 | var res interface{} 35 | 36 | res = readLines("") 37 | checkErrType(t, res, ErrIO) 38 | 39 | res = readLines("\n") 40 | checkErrType(t, res, ErrHeaderlineEmpty) 41 | 42 | res = readLines("\r\n") 43 | checkErrType(t, res, ErrHeaderlineEmpty) 44 | 45 | res = readLines("$\r\n") 46 | checkErrType(t, res, ErrIntegerParsing) 47 | 48 | res = readLines("/\r\n") 49 | checkErrType(t, res, ErrUnknownHeaderType) 50 | 51 | res = readLines("+" + strings.Repeat("A", 1024*1024) + "\r\n") 52 | checkErrType(t, res, ErrHeaderlineTooLarge) 53 | 54 | res = readLines(":\r\n") 55 | checkErrType(t, res, ErrIntegerParsing) 56 | 57 | res = readLines(":1.1\r\n") 58 | checkErrType(t, res, ErrIntegerParsing) 59 | 60 | res = readLines(":a\r\n") 61 | checkErrType(t, res, ErrIntegerParsing) 62 | 63 | res = readLines("$a\r\n") 64 | checkErrType(t, res, ErrIntegerParsing) 65 | 66 | res = readLines("*a\r\n") 67 | checkErrType(t, res, ErrIntegerParsing) 68 | 69 | res = readLines("$0\r\n") 70 | checkErrType(t, res, ErrIO) 71 | 72 | res = readLines("$1\r\n") 73 | checkErrType(t, res, ErrIO) 74 | 75 | res = readLines("$1\r\na") 76 | checkErrType(t, res, ErrIO) 77 | 78 | res = readLines("$1\r\nabc") 79 | checkErrType(t, res, ErrNoFinalRN) 80 | 81 | res = readLines("*1\r\n") 82 | checkErrType(t, res, ErrIO) 83 | 84 | res = readLines("*1\r\n$1\r\n") 85 | checkErrType(t, res, ErrIO) 86 | 87 | res = readLines("*1\r\n$1\r\nabc") 88 | checkErrType(t, res, ErrNoFinalRN) 89 | 90 | res = readLines("-MOVED 1234\r\n") 91 | checkErrType(t, res, ErrResponseFormat) 92 | 93 | res = readLines("-MOVED asdf 1.1.1.1:3456\r\n") 94 | checkErrType(t, res, ErrIntegerParsing) 95 | 96 | res = readLines("-ASK 1234\r\n") 97 | checkErrType(t, res, ErrResponseFormat) 98 | 99 | res = readLines("-ASK asdf 1.1.1.1:3456\r\n") 100 | checkErrType(t, res, ErrIntegerParsing) 101 | } 102 | 103 | func TestReadResponse_Correct(t *testing.T) { 104 | var res interface{} 105 | 106 | res = readLines("+\r\n") 107 | assert.Equal(t, "", res) 108 | 109 | res = readLines("+asdf\r\n") 110 | assert.Equal(t, "asdf", res) 111 | 112 | res = readLines("-\r\n") 113 | if checkErrType(t, res, ErrResult) { 114 | assert.Equal(t, "", res.(*errorx.Error).Message()) 115 | } 116 | 117 | res = readLines("-asdf\r\n") 118 | if checkErrType(t, res, ErrResult) { 119 | assert.Equal(t, "asdf", res.(*errorx.Error).Message()) 120 | } 121 | 122 | res = readLines("-MOVED 1234 1.1.1.1:3456\r\n") 123 | if checkErrType(t, res, ErrMoved) { 124 | err := res.(*errorx.Error) 125 | assert.Equal(t, "MOVED 1234 1.1.1.1:3456", err.Message()) 126 | v, _ := err.Property(EKMovedTo) 127 | assert.Equal(t, "1.1.1.1:3456", v) 128 | v, _ = err.Property(EKSlot) 129 | assert.Equal(t, int64(1234), v) 130 | } 131 | 132 | res = readLines("-ASK 1234 1.1.1.1:3456\r\n") 133 | if checkErrType(t, res, ErrAsk) { 134 | err := res.(*errorx.Error) 135 | assert.Equal(t, "ASK 1234 1.1.1.1:3456", err.Message()) 136 | v, _ := err.Property(EKMovedTo) 137 | assert.Equal(t, "1.1.1.1:3456", v) 138 | v, _ = err.Property(EKSlot) 139 | assert.Equal(t, int64(1234), v) 140 | } 141 | 142 | res = readLines("-LOADING\r\n") 143 | if checkErrType(t, res, ErrLoading) { 144 | err := res.(*errorx.Error) 145 | assert.Equal(t, "LOADING", err.Message()) 146 | } 147 | 148 | for i := -1000; i <= 1000; i++ { 149 | res = readLines(fmt.Sprintf(":%d\r\n", i)) 150 | assert.Equal(t, int64(i), res) 151 | } 152 | 153 | res = readLines(":9223372036854775807\r\n") 154 | assert.Equal(t, int64(9223372036854775807), res) 155 | 156 | res = readLines(":-9223372036854775808\r\n") 157 | assert.Equal(t, int64(-9223372036854775808), res) 158 | 159 | res = readLines("$0\r\n", "\r\n") 160 | assert.Equal(t, []byte(""), res) 161 | assert.Equal(t, len(res.([]byte)), cap(res.([]byte))) 162 | 163 | res = readLines("$1\r\n", "a\r\n") 164 | assert.Equal(t, []byte("a"), res) 165 | assert.Equal(t, len(res.([]byte)), cap(res.([]byte))) 166 | 167 | res = readLines("$4\r\n", "asdf\r\n") 168 | assert.Equal(t, []byte("asdf"), res) 169 | assert.Equal(t, len(res.([]byte)), cap(res.([]byte))) 170 | 171 | big := strings.Repeat("a", 1024*1024) 172 | res = readLines(fmt.Sprintf("$%d\r\n", len(big)), big, "\r\n") 173 | assert.Equal(t, []byte(big), res) 174 | assert.Equal(t, len(res.([]byte)), cap(res.([]byte))) 175 | 176 | res = readLines("*0\r\n") 177 | assert.Equal(t, []interface{}{}, res) 178 | 179 | res = readLines("*1\r\n", "+OK\r\n") 180 | assert.Equal(t, []interface{}{"OK"}, res) 181 | 182 | res = readLines("*2\r\n", "+OK\r\n", "*2\r\n", ":1\r\n", "+OK\r\n") 183 | assert.Equal(t, []interface{}{"OK", []interface{}{int64(1), "OK"}}, res) 184 | 185 | res = readLines("$-1\r\n") 186 | assert.Nil(t, res) 187 | 188 | res = readLines("*-1\r\n") 189 | assert.Nil(t, res) 190 | } 191 | -------------------------------------------------------------------------------- /rediscluster/bench/bench_test.go: -------------------------------------------------------------------------------- 1 | package bench 2 | 3 | import ( 4 | "context" 5 | "math/rand" 6 | "runtime" 7 | "strconv" 8 | "sync/atomic" 9 | . "testing" 10 | "time" 11 | 12 | "github.com/joomcode/redispipe/rediscluster" 13 | "github.com/joomcode/redispipe/testbed" 14 | "github.com/joomcode/redispipe/redis" 15 | "github.com/joomcode/redispipe/redisconn" 16 | 17 | redigo "github.com/wuxibin89/redis-go-cluster" 18 | radix "github.com/mediocregopher/radix/v3" 19 | ) 20 | 21 | func benchCluster(port int) func() { 22 | testbed.InitDir(".") 23 | cl := testbed.NewCluster(uint16(port)) 24 | cl.Start() 25 | return func() { 26 | cl.Stop() 27 | testbed.RmDir() 28 | } 29 | } 30 | 31 | func BenchmarkSerialGetSet(b *B) { 32 | defer benchCluster(45000)() 33 | rng := rand.New(rand.NewSource(1)) 34 | b.Run("radix_pause0", func(b *B) { 35 | rdxv2, err := radix.NewCluster( 36 | []string{"127.0.0.1:45000"}, 37 | radix.ClusterPoolFunc(func(network, addr string) (radix.Client, error) { 38 | return radix.NewPool(network, addr, 4, 39 | radix.PoolPipelineWindow(0, 0)) 40 | }), 41 | ) 42 | if err != nil { 43 | b.Fatal(err) 44 | return 45 | } 46 | defer rdxv2.Close() 47 | b.ResetTimer() 48 | for i := 0; i < b.N; i++ { 49 | key := "foo" + strconv.Itoa(rng.Intn(65536)) 50 | if err := rdxv2.Do(radix.Cmd(nil, "SET", key, "bar")); err != nil { 51 | b.Fatal(err) 52 | } 53 | if err := rdxv2.Do(radix.Cmd(nil, "GET", key)); err != nil { 54 | b.Fatal(err) 55 | } 56 | } 57 | }) 58 | 59 | b.Run("redigo", func(b *B) { 60 | red := newRedigo() 61 | defer red.Close() 62 | b.ResetTimer() 63 | for i := 0; i < b.N; i++ { 64 | key := "foo" + strconv.Itoa(rng.Intn(65536)) 65 | if _, err := red.Do("SET", key, "bar"); err != nil { 66 | b.Fatal(err) 67 | } 68 | if _, err := redigo.String(red.Do("GET", key)); err != nil { 69 | b.Fatal(err) 70 | } 71 | } 72 | }) 73 | 74 | b.Run("redispipe", func(b *B) { 75 | pipe, err := rediscluster.NewCluster(context.Background(), []string{"127.0.0.1:45000"}, rediscluster.Opts{ 76 | Logger: rediscluster.NoopLogger{}, 77 | HostOpts: redisconn.Opts{ 78 | Logger: redisconn.NoopLogger{}, 79 | }, 80 | }) 81 | defer pipe.Close() 82 | if err != nil { 83 | b.Fatal(err) 84 | } 85 | sync := redis.Sync{pipe} 86 | b.ResetTimer() 87 | for i := 0; i < b.N; i++ { 88 | key := "foo" + strconv.Itoa(rng.Intn(65536)) 89 | if res := sync.Do("SET", key, "bar"); redis.AsError(res) != nil { 90 | b.Fatal(res) 91 | } 92 | if res := sync.Do("GET", key); redis.AsError(res) != nil { 93 | b.Fatal(res) 94 | } 95 | } 96 | }) 97 | 98 | b.Run("redispipe_pause0", func(b *B) { 99 | pipe, err := rediscluster.NewCluster(context.Background(), []string{"127.0.0.1:45000"}, rediscluster.Opts{ 100 | Logger: rediscluster.NoopLogger{}, 101 | HostOpts: redisconn.Opts{ 102 | Logger: redisconn.NoopLogger{}, 103 | WritePause: -1, 104 | }, 105 | }) 106 | defer pipe.Close() 107 | if err != nil { 108 | b.Fatal(err) 109 | } 110 | sync := redis.Sync{pipe} 111 | b.ResetTimer() 112 | for i := 0; i < b.N; i++ { 113 | key := "foo" + strconv.Itoa(rng.Intn(65536)) 114 | if res := sync.Do("SET", key, "bar"); redis.AsError(res) != nil { 115 | b.Fatal(res) 116 | } 117 | if res := sync.Do("GET", key); redis.AsError(res) != nil { 118 | b.Fatal(res) 119 | } 120 | } 121 | }) 122 | } 123 | 124 | func BenchmarkParallelGetSet(b *B) { 125 | defer benchCluster(45000)() 126 | parallel := runtime.GOMAXPROCS(0) * 8 127 | i := uint32(1) 128 | 129 | do := func(b *B, fn func(*rand.Rand)) { 130 | b.SetParallelism(parallel) 131 | b.RunParallel(func(pb *PB) { 132 | rng := rand.New(rand.NewSource(int64(atomic.AddUint32(&i, 1)))) 133 | for pb.Next() { 134 | fn(rng) 135 | } 136 | }) 137 | } 138 | 139 | b.Run("radix", func(b *B) { 140 | rdx2, err := radix.NewCluster([]string{"127.0.0.1:45000"}) 141 | defer rdx2.Close() 142 | if err != nil { 143 | b.Fatal(err) 144 | } 145 | b.ResetTimer() 146 | do(b, func(rng *rand.Rand) { 147 | key := "foo" + strconv.Itoa(rng.Intn(65536)) 148 | if err := rdx2.Do(radix.Cmd(nil, "SET", key, "bar")); err != nil { 149 | b.Fatal(err) 150 | } 151 | if err := rdx2.Do(radix.Cmd(nil, "GET", key)); err != nil { 152 | b.Fatal(err) 153 | } 154 | }) 155 | }) 156 | 157 | b.Run("redigo", func(b *B) { 158 | red := newRedigo() 159 | defer red.Close() 160 | b.ResetTimer() 161 | do(b, func(rng *rand.Rand) { 162 | key := "foo" + strconv.Itoa(rng.Intn(65536)) 163 | if _, err := red.Do("SET", key, "bar"); err != nil { 164 | b.Fatal(err) 165 | } 166 | if _, err := redigo.String(red.Do("GET", key)); err != nil { 167 | b.Fatal(err) 168 | } 169 | }) 170 | }) 171 | 172 | b.Run("redispipe", func(b *B) { 173 | pipe, err := rediscluster.NewCluster(context.Background(), []string{"127.0.0.1:45000"}, rediscluster.Opts{ 174 | Logger: rediscluster.NoopLogger{}, 175 | HostOpts: redisconn.Opts{ 176 | Logger: redisconn.NoopLogger{}, 177 | }, 178 | }) 179 | defer pipe.Close() 180 | if err != nil { 181 | b.Fatal(err) 182 | } 183 | sync := redis.Sync{pipe} 184 | b.ResetTimer() 185 | do(b, func(rng *rand.Rand) { 186 | key := "foo" + strconv.Itoa(rng.Intn(65536)) 187 | if res := sync.Do("SET", key, "bar"); redis.AsError(res) != nil { 188 | b.Fatal(res) 189 | } 190 | if res := sync.Do("GET", key); redis.AsError(res) != nil { 191 | b.Fatal(err) 192 | } 193 | }) 194 | }) 195 | } 196 | 197 | func newRedigo() *redigo.Cluster { 198 | c, err := redigo.NewCluster(&redigo.Options{ 199 | StartNodes: []string{"127.0.0.1:45000"}, 200 | ConnTimeout: time.Minute, 201 | KeepAlive: 128, 202 | AliveTime: time.Minute, 203 | }) 204 | if err != nil { 205 | panic(err) 206 | } 207 | return c 208 | } 209 | -------------------------------------------------------------------------------- /redisdumb/conn.go: -------------------------------------------------------------------------------- 1 | // Package redisdumb contains dumbest implementation of redis.Sender 2 | package redisdumb 3 | 4 | import ( 5 | "bufio" 6 | "crypto/tls" 7 | "net" 8 | "time" 9 | 10 | "github.com/joomcode/redispipe/redis" 11 | "github.com/joomcode/redispipe/rediscluster/redisclusterutil" 12 | "github.com/joomcode/redispipe/redisconn" 13 | ) 14 | 15 | // ConnType - type of connection (simple server, or cluster aware). 16 | type ConnType int 17 | 18 | const ( 19 | // TypeSimple (default) is for connection to single server. 20 | TypeSimple ConnType = 0 21 | // TypeCluster is for connection which awares for cluster redirects. 22 | TypeCluster ConnType = 1 23 | ) 24 | 25 | // DefaultTimeout is default timeout. 26 | var DefaultTimeout time.Duration = 5 * time.Second 27 | 28 | // Conn is a simplest blocking implementation of redis.Sender. 29 | type Conn struct { 30 | Addr string 31 | TlsAddr string 32 | C net.Conn 33 | R *bufio.Reader 34 | Timeout time.Duration 35 | Type ConnType 36 | TLSEnabled bool 37 | TLSConfig *tls.Config 38 | } 39 | 40 | // Do issues command to servers. 41 | // It handles reconnection and redirection (if Conn.Type==TypeCluster). 42 | func (c *Conn) Do(cmd string, args ...interface{}) interface{} { 43 | timeout := c.Timeout 44 | if timeout == 0 { 45 | timeout = DefaultTimeout 46 | } 47 | try := 1 48 | if c.C != nil { 49 | try = 2 50 | } 51 | var req []byte 52 | var err error 53 | var asking bool 54 | for i := 0; i < try; i++ { 55 | if c.C == nil { 56 | if c.TLSEnabled { 57 | dialer := net.Dialer{ 58 | Timeout: timeout, 59 | } 60 | tlsDialer := tls.Dialer{NetDialer: &dialer, Config: c.TLSConfig} 61 | c.C, err = tlsDialer.Dial("tcp", c.TlsAddr) 62 | } else { 63 | c.C, err = net.DialTimeout("tcp", c.Addr, timeout) 64 | } 65 | if err != nil { 66 | return redisconn.ErrDial.WrapWithNoMessage(err) 67 | } 68 | c.R = bufio.NewReader(c.C) 69 | } 70 | if asking { 71 | c.Do("ASKING") 72 | } 73 | c.C.SetDeadline(time.Now().Add(timeout)) 74 | req, err = redis.AppendRequest(nil, redis.Request{cmd, args}) 75 | if err == nil { 76 | if _, err = c.C.Write(req); err == nil { 77 | res, _ := redis.ReadResponse(c.R) 78 | rerr := redis.AsErrorx(res) 79 | if rerr == nil { 80 | return res 81 | } 82 | err = rerr 83 | if c.Type == TypeCluster && rerr.HasTrait(redis.ErrTraitClusterMove) { 84 | asking = rerr.IsOfType(redis.ErrAsk) 85 | v, _ := rerr.Property(redis.EKMovedTo) 86 | c.Addr = v.(string) 87 | if try < 5 { 88 | try++ 89 | } 90 | } 91 | } else { 92 | err = redis.ErrIO.WrapWithNoMessage(err) 93 | } 94 | } 95 | c.C.Close() 96 | c.C = nil 97 | } 98 | return err 99 | } 100 | 101 | // Send implements redis.Sender.Send 102 | func (c *Conn) Send(r redis.Request, cb redis.Future, n uint64) { 103 | res := c.Do(r.Cmd, r.Args...) 104 | cb.Resolve(res, n) 105 | } 106 | 107 | // SendMany implements redis.Sender.SendMany. 108 | // Note, it does it in a dumb way: commands are executed sequentially. 109 | func (c *Conn) SendMany(reqs []redis.Request, cb redis.Future, n uint64) { 110 | for i, r := range reqs { 111 | res := c.Do(r.Cmd, r.Args...) 112 | cb.Resolve(res, n+uint64(i)) 113 | } 114 | } 115 | 116 | // SendTransaction implements redis.Sender.SendTransaction. 117 | func (c *Conn) SendTransaction(reqs []redis.Request, cb redis.Future, n uint64) { 118 | if c.Type == TypeCluster { 119 | // first, redirect ourself to right master 120 | key, ok := redisclusterutil.BatchKey(reqs) 121 | if !ok { 122 | cb.Resolve(redis.ErrNoSlotKey.New("no key to determine slot"), n) 123 | return 124 | } 125 | c.Do("TYPE", key) 126 | // ok, now we are at right shard, I hope. 127 | } 128 | res := c.Do("MULTI") 129 | if err := redis.AsError(res); err != nil { 130 | cb.Resolve(res, n) 131 | return 132 | } 133 | for _, r := range reqs { 134 | res = c.Do(r.Cmd, r.Args...) 135 | if err := redis.AsErrorx(res); !err.IsOfType(redis.ErrResult) { 136 | cb.Resolve(res, n) 137 | return 138 | } 139 | } 140 | res = c.Do("EXEC") 141 | cb.Resolve(res, n) 142 | } 143 | 144 | // EachShard implements redis.Sender.EachShard. 145 | func (c *Conn) EachShard(f func(redis.Sender, error) bool) { 146 | // TODO: correctly iterate through cluster 147 | switch c.Type { 148 | case TypeSimple: 149 | f(c, nil) 150 | case TypeCluster: 151 | nodeInfos, err := redisclusterutil.ParseClusterNodes(c.Do("CLUSTER NODES")) 152 | if err != nil { 153 | f(c, err) 154 | } 155 | for _, nodeInfo := range nodeInfos { 156 | if nodeInfo.AddrValid() && nodeInfo.IsMaster() { 157 | con := &Conn{Addr: nodeInfo.Addr} 158 | if !f(con, nil) { 159 | return 160 | } 161 | } 162 | } 163 | } 164 | } 165 | 166 | // Scanner implements redis.Scanner 167 | type Scanner struct { 168 | redis.ScannerBase 169 | c *Conn 170 | } 171 | 172 | // Next implements redis.Scanner.Next 173 | func (s *Scanner) Next(cb redis.Future) { 174 | if s.Err != nil { 175 | cb.Resolve(s.Err, 0) 176 | return 177 | } 178 | if s.IterLast() { 179 | cb.Resolve(nil, 0) 180 | return 181 | } 182 | s.DoNext(cb, s.c) 183 | } 184 | 185 | // Scanner implements redis.Sender.Scanner 186 | func (c *Conn) Scanner(opts redis.ScanOpts) redis.Scanner { 187 | return &Scanner{ 188 | ScannerBase: redis.ScannerBase{ScanOpts: opts}, 189 | c: c, 190 | } 191 | } 192 | 193 | // Close closes connection (implements redis.Sender.Close) 194 | func (c *Conn) Close() { 195 | if c.C != nil { 196 | c.C.Close() 197 | c.C = nil 198 | } 199 | } 200 | 201 | // Do is shortcut for issuing single command to redis by address. 202 | func Do(addr string, cmd string, args ...interface{}) interface{} { 203 | conn, err := net.DialTimeout("tcp", addr, DefaultTimeout) 204 | if err != nil { 205 | return redisconn.ErrDial.WrapWithNoMessage(err) 206 | } 207 | defer conn.Close() 208 | conn.SetDeadline(time.Now().Add(DefaultTimeout)) 209 | req, rerr := redis.AppendRequest(nil, redis.Request{cmd, args}) 210 | if rerr != nil { 211 | return rerr 212 | } 213 | if _, err = conn.Write(req); err != nil { 214 | return redis.ErrIO.WrapWithNoMessage(err) 215 | } 216 | res, _ := redis.ReadResponse(bufio.NewReader(conn)) 217 | return res 218 | } 219 | -------------------------------------------------------------------------------- /redis/sync_context.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "context" 5 | "sync/atomic" 6 | 7 | "github.com/joomcode/errorx" 8 | ) 9 | 10 | // SyncCtx (like Sync) provides convenient synchronous interface over asynchronous Sender. 11 | // Its methods accept context.Context to allow early request cancelling. 12 | // Note that if context were cancelled after request were send, redis still will execute it, 13 | // but you will have no way to know about that fact. 14 | type SyncCtx struct { 15 | S Sender 16 | } 17 | 18 | // Do is convenient method to construct and send request. 19 | // Returns value that could be either result or error. 20 | // When context is cancelled, Do returns ErrRequestCancelled error. 21 | func (s SyncCtx) Do(ctx context.Context, cmd string, args ...interface{}) interface{} { 22 | return s.Send(ctx, Request{cmd, args}) 23 | } 24 | 25 | // Send sends request to redis. 26 | // Returns value that could be either result or error. 27 | // When context is cancelled, Send returns ErrRequestCancelled error. 28 | func (s SyncCtx) Send(ctx context.Context, r Request) interface{} { 29 | ctxch := ctx.Done() 30 | if ctxch == nil { 31 | return Sync{s.S}.Send(r) 32 | } 33 | 34 | res := ctxRes{active: newActive(ctx)} 35 | 36 | s.S.Send(r, &res, 0) 37 | 38 | select { 39 | case <-ctxch: 40 | err := ErrRequestCancelled.WrapWithNoMessage(ctx.Err()) 41 | if CollectTrace { 42 | err = errorx.EnsureStackTrace(err) 43 | } 44 | return err 45 | case <-res.ch: 46 | if CollectTrace { 47 | if err := AsError(res.r); err != nil { 48 | res.r = errorx.EnsureStackTrace(err) 49 | } 50 | } 51 | return res.r 52 | } 53 | } 54 | 55 | // SendMany sends several requests in "parallel" and returns slice or results in a same order. 56 | // Each result could be value or error. 57 | // When context is cancelled, SendMany returns slice of ErrRequestCancelled errors. 58 | func (s SyncCtx) SendMany(ctx context.Context, reqs []Request) []interface{} { 59 | ctxch := ctx.Done() 60 | if ctxch == nil { 61 | return Sync{s.S}.SendMany(reqs) 62 | } 63 | 64 | if len(reqs) == 0 { 65 | return nil 66 | } 67 | 68 | res := ctxBatch{ 69 | active: newActive(ctx), 70 | r: make([]interface{}, len(reqs)), 71 | o: make([]uint32, len(reqs)), 72 | cnt: 0, 73 | } 74 | 75 | s.S.SendMany(reqs, &res, 0) 76 | 77 | select { 78 | case <-ctxch: 79 | err := ErrRequestCancelled.WrapWithNoMessage(ctx.Err()) 80 | if CollectTrace { 81 | err = errorx.EnsureStackTrace(err) 82 | } 83 | for i := range res.r { 84 | res.Resolve(err, uint64(i)) 85 | } 86 | <-res.ch 87 | case <-res.ch: 88 | } 89 | if CollectTrace { 90 | for i, v := range res.r { 91 | if err := AsErrorx(v); err != nil { 92 | res.r[i] = errorx.EnsureStackTrace(err) 93 | } 94 | } 95 | } 96 | return res.r 97 | } 98 | 99 | // SendTransaction sends several requests as a single MULTI+EXEC transaction. 100 | // It returns array of responses and an error, if transaction fails. 101 | // Since Redis transaction either fully executed or fully failed, 102 | // all values are valid if err == nil. But some of them could be error on their own. 103 | // When context is cancelled, SendTransaction returns ErrRequestCancelled error. 104 | func (s SyncCtx) SendTransaction(ctx context.Context, reqs []Request) ([]interface{}, error) { 105 | ctxch := ctx.Done() 106 | if ctxch == nil { 107 | return Sync{s.S}.SendTransaction(reqs) 108 | } 109 | 110 | res := ctxRes{active: newActive(ctx)} 111 | 112 | s.S.SendTransaction(reqs, &res, 0) 113 | 114 | var r interface{} 115 | select { 116 | case <-ctxch: 117 | r = ErrRequestCancelled.WrapWithNoMessage(ctx.Err()) 118 | case <-res.ch: 119 | r = res.r 120 | } 121 | 122 | ress, err := TransactionResponse(r) 123 | if CollectTrace && err != nil { 124 | err = errorx.EnsureStackTrace(err) 125 | } 126 | return ress, err 127 | } 128 | 129 | // Scanner returns synchronous iterator over redis keyspace/key. 130 | // Scanner will stop iteration if context were cancelled. 131 | func (s SyncCtx) Scanner(ctx context.Context, opts ScanOpts) SyncCtxIterator { 132 | return SyncCtxIterator{ctx, s.S.Scanner(opts)} 133 | } 134 | 135 | type active struct { 136 | ctx context.Context 137 | ch chan struct{} 138 | } 139 | 140 | func newActive(ctx context.Context) active { 141 | return active{ctx, make(chan struct{})} 142 | } 143 | 144 | // Cancelled implements Future.Cancelled 145 | func (c active) Cancelled() error { 146 | select { 147 | case <-c.ctx.Done(): 148 | return c.ctx.Err() 149 | default: 150 | return nil 151 | } 152 | } 153 | 154 | func (c active) done() { 155 | close(c.ch) 156 | } 157 | 158 | type ctxRes struct { 159 | active 160 | r interface{} 161 | } 162 | 163 | // Resolve implements Future.Resolve 164 | func (c *ctxRes) Resolve(r interface{}, _ uint64) { 165 | c.r = r 166 | c.done() 167 | } 168 | 169 | type ctxBatch struct { 170 | active 171 | r []interface{} 172 | o []uint32 173 | cnt uint32 174 | } 175 | 176 | // Resolve implements Future.Resolve 177 | func (s *ctxBatch) Resolve(res interface{}, i uint64) { 178 | if atomic.CompareAndSwapUint32(&s.o[i], 0, 1) { 179 | s.r[i] = res 180 | if int(atomic.AddUint32(&s.cnt, 1)) == len(s.r) { 181 | s.done() 182 | } 183 | } 184 | } 185 | 186 | // SyncCtxIterator is synchronous iterator over repeating *SCAN command. 187 | // It will stop iteration if context were cancelled. 188 | type SyncCtxIterator struct { 189 | ctx context.Context 190 | s Scanner 191 | } 192 | 193 | // Next returns next bunch of keys, or error. 194 | // ScanEOF error signals for regular iteration completion. 195 | // It will return ErrRequestCancelled error if context were cancelled. 196 | func (s SyncCtxIterator) Next() ([]string, error) { 197 | res := ctxRes{active: newActive(s.ctx)} 198 | s.s.Next(&res) 199 | select { 200 | case <-s.ctx.Done(): 201 | err := ErrRequestCancelled.WrapWithNoMessage(s.ctx.Err()) 202 | if CollectTrace { 203 | err = errorx.EnsureStackTrace(err) 204 | } 205 | return nil, err 206 | case <-res.ch: 207 | } 208 | if err := AsError(res.r); err != nil { 209 | if CollectTrace { 210 | err = errorx.EnsureStackTrace(err) 211 | } 212 | return nil, err 213 | } else if res.r == nil { 214 | return nil, ScanEOF 215 | } else { 216 | return res.r.([]string), nil 217 | } 218 | } 219 | -------------------------------------------------------------------------------- /redis/request_writer.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "strconv" 5 | 6 | "github.com/joomcode/errorx" 7 | ) 8 | 9 | // AppendRequest appends request to byte slice as RESP request (ie as array of strings). 10 | // 11 | // It could fail if some request value is not nil, integer, float, string or byte slice. 12 | // In case of error it still returns modified buffer, but truncated to original size, it could be used save reallocation. 13 | // 14 | // Note: command could contain single space. In that case, it will be split and last part will be prepended to arguments. 15 | func AppendRequest(buf []byte, req Request) ([]byte, error) { 16 | oldSize := len(buf) 17 | space := -1 18 | for i, c := range []byte(req.Cmd) { 19 | if c == ' ' { 20 | space = i 21 | break 22 | } 23 | } 24 | if space == -1 { 25 | buf = appendHead(buf, '*', len(req.Args)+1) 26 | buf = appendHead(buf, '$', len(req.Cmd)) 27 | buf = append(buf, req.Cmd...) 28 | buf = append(buf, '\r', '\n') 29 | } else { 30 | buf = appendHead(buf, '*', len(req.Args)+2) 31 | buf = appendHead(buf, '$', space) 32 | buf = append(buf, req.Cmd[:space]...) 33 | buf = append(buf, '\r', '\n') 34 | buf = appendHead(buf, '$', len(req.Cmd)-space-1) 35 | buf = append(buf, req.Cmd[space+1:]...) 36 | buf = append(buf, '\r', '\n') 37 | } 38 | for i, val := range req.Args { 39 | switch v := val.(type) { 40 | case string: 41 | buf = appendHead(buf, '$', len(v)) 42 | buf = append(buf, v...) 43 | case []byte: 44 | buf = appendHead(buf, '$', len(v)) 45 | buf = append(buf, v...) 46 | case int: 47 | buf = appendBulkInt(buf, int64(v)) 48 | case uint: 49 | buf = appendBulkUint(buf, uint64(v)) 50 | case int64: 51 | buf = appendBulkInt(buf, int64(v)) 52 | case uint64: 53 | buf = appendBulkUint(buf, uint64(v)) 54 | case int32: 55 | buf = appendBulkInt(buf, int64(v)) 56 | case uint32: 57 | buf = appendBulkUint(buf, uint64(v)) 58 | case int8: 59 | buf = appendBulkInt(buf, int64(v)) 60 | case uint8: 61 | buf = appendBulkUint(buf, uint64(v)) 62 | case int16: 63 | buf = appendBulkInt(buf, int64(v)) 64 | case uint16: 65 | buf = appendBulkUint(buf, uint64(v)) 66 | case bool: 67 | if v { 68 | buf = append(buf, "$1\r\n1"...) 69 | } else { 70 | buf = append(buf, "$1\r\n0"...) 71 | } 72 | case float32: 73 | str := strconv.FormatFloat(float64(v), 'f', -1, 32) 74 | buf = appendHead(buf, '$', len(str)) 75 | buf = append(buf, str...) 76 | case float64: 77 | str := strconv.FormatFloat(v, 'f', -1, 64) 78 | buf = appendHead(buf, '$', len(str)) 79 | buf = append(buf, str...) 80 | case nil: 81 | buf = append(buf, "$0\r\n"...) 82 | default: 83 | return buf[:oldSize], ErrArgumentType.NewWithNoMessage(). 84 | WithProperty(EKVal, val). 85 | WithProperty(EKArgPos, i). 86 | WithProperty(EKRequest, req) 87 | } 88 | buf = append(buf, '\r', '\n') 89 | } 90 | return buf, nil 91 | } 92 | 93 | func appendInt(b []byte, i int64) []byte { 94 | var u uint64 95 | if i >= 0 && i <= 9 { 96 | b = append(b, byte(i)+'0') 97 | return b 98 | } 99 | if i > 0 { 100 | u = uint64(i) 101 | } else { 102 | b = append(b, '-') 103 | u = uint64(-i) 104 | } 105 | return appendUint(b, u) 106 | } 107 | 108 | func appendUint(b []byte, u uint64) []byte { 109 | if u <= 9 { 110 | b = append(b, byte(u)+'0') 111 | return b 112 | } 113 | digits := [20]byte{} 114 | p := 20 115 | for u > 0 { 116 | n := u / 10 117 | p-- 118 | digits[p] = byte(u-n*10) + '0' 119 | u = n 120 | } 121 | return append(b, digits[p:]...) 122 | } 123 | 124 | func appendHead(b []byte, t byte, i int) []byte { 125 | if i < 0 { 126 | panic("negative length header") 127 | } 128 | b = append(b, t) 129 | b = appendUint(b, uint64(i)) 130 | return append(b, '\r', '\n') 131 | } 132 | 133 | func appendBulkInt(b []byte, i int64) []byte { 134 | if i >= -99999999 && i <= 999999999 { 135 | b = append(b, '$', '0', '\r', '\n') 136 | } else { 137 | b = append(b, '$', '0', '0', '\r', '\n') 138 | } 139 | l := len(b) 140 | b = appendInt(b, i) 141 | li := byte(len(b) - l) 142 | if li < 10 { 143 | b[l-3] = li + '0' 144 | } else { 145 | d := li / 10 146 | b[l-4] = d + '0' 147 | b[l-3] = li - (d * 10) + '0' 148 | } 149 | return b 150 | } 151 | 152 | func appendBulkUint(b []byte, i uint64) []byte { 153 | if i <= 999999999 { 154 | b = append(b, '$', '0', '\r', '\n') 155 | } else { 156 | b = append(b, '$', '0', '0', '\r', '\n') 157 | } 158 | l := len(b) 159 | b = appendUint(b, i) 160 | li := byte(len(b) - l) 161 | if li < 10 { 162 | b[l-3] = li + '0' 163 | } else { 164 | d := li / 10 165 | b[l-4] = d + '0' 166 | b[l-3] = li - (d * 10) + '0' 167 | } 168 | return b 169 | } 170 | 171 | // ArgToString returns string representataion of an argument. 172 | // Used in cluster to determine cluster slot. 173 | // Have to be in sync with AppendRequest 174 | func ArgToString(arg interface{}) (string, bool) { 175 | var bufarr [20]byte 176 | var buf []byte 177 | switch v := arg.(type) { 178 | case string: 179 | return v, true 180 | case []byte: 181 | return string(v), true 182 | case int: 183 | buf = appendInt(bufarr[:0], int64(v)) 184 | case uint: 185 | buf = appendUint(bufarr[:0], uint64(v)) 186 | case int64: 187 | buf = appendInt(bufarr[:0], int64(v)) 188 | case uint64: 189 | buf = appendUint(bufarr[:0], uint64(v)) 190 | case int32: 191 | buf = appendInt(bufarr[:0], int64(v)) 192 | case uint32: 193 | buf = appendUint(bufarr[:0], uint64(v)) 194 | case int8: 195 | buf = appendInt(bufarr[:0], int64(v)) 196 | case uint8: 197 | buf = appendUint(bufarr[:0], uint64(v)) 198 | case int16: 199 | buf = appendInt(bufarr[:0], int64(v)) 200 | case uint16: 201 | buf = appendUint(bufarr[:0], uint64(v)) 202 | case bool: 203 | if v { 204 | return "1", true 205 | } 206 | return "0", true 207 | case float32: 208 | return strconv.FormatFloat(float64(v), 'f', -1, 32), true 209 | case float64: 210 | return strconv.FormatFloat(v, 'f', -1, 64), true 211 | case nil: 212 | return "", true 213 | default: 214 | return "", false 215 | } 216 | return string(buf), true 217 | } 218 | 219 | // CheckRequest checks requests command and arguments to be compatible with connector. 220 | func CheckRequest(req Request, singleThreaded bool) error { 221 | if err := ForbiddenCommand(req.Cmd, singleThreaded); err != nil { 222 | return err.(*errorx.Error).WithProperty(EKRequest, req) 223 | } 224 | for i, arg := range req.Args { 225 | switch val := arg.(type) { 226 | case string, []byte, int, uint, int64, uint64, int32, uint32, int8, uint8, int16, uint16, bool, float32, float64, nil: 227 | // ok 228 | default: 229 | return ErrArgumentType.NewWithNoMessage(). 230 | WithProperty(EKVal, val). 231 | WithProperty(EKArgPos, i). 232 | WithProperty(EKRequest, req) 233 | } 234 | } 235 | return nil 236 | } 237 | -------------------------------------------------------------------------------- /rediscluster/slotrange.go: -------------------------------------------------------------------------------- 1 | package rediscluster 2 | 3 | import ( 4 | "bytes" 5 | "math" 6 | "sync/atomic" 7 | "time" 8 | 9 | "github.com/joomcode/redispipe/redis" 10 | "github.com/joomcode/redispipe/rediscluster/redisclusterutil" 11 | ) 12 | 13 | const masterOnlyFlag = 0x4000 14 | 15 | func (c *Cluster) slotRangesAndInternalMasterOnly() ([]redisclusterutil.SlotsRange, error) { 16 | nodes := c.getConfig().nodes 17 | 18 | var ranges []redisclusterutil.SlotsRange 19 | var err error 20 | Outter: 21 | for _, node := range nodes { 22 | for _, conn := range node.conns { 23 | resp := redis.Sync{conn}.Do("CLUSTER SLOTS") 24 | ranges, err = redisclusterutil.ParseSlotsInfo(resp) 25 | if err == nil { 26 | break Outter 27 | } 28 | c.report(LogClusterSlotsError{Conn: conn, Error: err}) 29 | continue 30 | } 31 | } 32 | if err != nil { 33 | c.report(LogSlotRangeError{}) 34 | return nil, c.err(ErrClusterSlots) 35 | } 36 | 37 | // look for reminder about future migrations 38 | internalForce, internalForceSet, _ := redisclusterutil.RequestMasterOnly(c, "") 39 | c.m.Lock() 40 | if internalForceSet { 41 | c.internallyForceMasterOnly = internalForce 42 | } 43 | c.m.Unlock() 44 | 45 | return ranges, nil 46 | } 47 | 48 | func (c *Cluster) updateMappings(slotRanges []redisclusterutil.SlotsRange) { 49 | shards := make(map[string][]string) 50 | for _, r := range slotRanges { 51 | shards[r.Addrs[0]] = r.Addrs 52 | } 53 | 54 | addrs := make(map[string]struct{}) 55 | for _, rng := range slotRanges { 56 | for _, addr := range rng.Addrs { 57 | addrs[addr] = struct{}{} 58 | } 59 | } 60 | 61 | c.m.Lock() 62 | defer c.m.Unlock() 63 | 64 | oldConfig := c.getConfig() 65 | oldNodes := c.prevNodes 66 | c.prevNodes = oldConfig.nodes 67 | 68 | newConfig := *oldConfig 69 | newConfig.nodes = make(nodeMap, len(c.prevNodes)) 70 | 71 | for addr := range addrs { 72 | if node, ok := c.prevNodes[addr]; ok { 73 | atomic.AddUint32(&node.refcnt, 1) 74 | newConfig.nodes[addr] = node 75 | } else if node, ok := oldNodes[addr]; ok { 76 | atomic.AddUint32(&node.refcnt, 1) 77 | newConfig.nodes[addr] = node 78 | } else { 79 | node, _ = c.newNode(addr, false) 80 | newConfig.nodes[addr] = node 81 | } 82 | } 83 | 84 | newConfig.shards = make(shardMap, len(oldConfig.shards)) 85 | newConfig.masters = make(masterMap, len(oldConfig.masters)) 86 | 87 | var random uint16 88 | for master, addrs := range shards { 89 | shardno := uint16(len(newConfig.shards)) 90 | 91 | oldshard := func() *shard { 92 | var ok bool 93 | var oldnum uint16 94 | oldnum, ok = oldConfig.masters[master] 95 | if !ok { 96 | return nil 97 | } 98 | sh, ok := oldConfig.shards[oldnum] 99 | if !ok { 100 | return nil 101 | } 102 | if len(addrs) != len(sh.addr) { 103 | return nil 104 | } 105 | for i, addr := range addrs { 106 | if sh.addr[i] != addr { 107 | return nil 108 | } 109 | } 110 | return sh 111 | }() 112 | 113 | if oldshard != nil { 114 | newConfig.shards[shardno] = oldshard 115 | } else { 116 | shard := &shard{ 117 | addr: addrs, 118 | good: (uint32(1) << uint(len(addrs))) - 1, 119 | pingWeights: make([]uint32, len(addrs)), 120 | } 121 | newConfig.shards[shardno] = shard 122 | for i := range shard.pingWeights { 123 | shard.pingWeights[i] = 1 124 | } 125 | } 126 | newConfig.masters[addrs[0]] = shardno 127 | random = shardno 128 | } 129 | 130 | c.nodeWait.Lock() 131 | c.nodeWait.promises = make(map[string]*[]connThen, 1) 132 | c.nodeWait.Unlock() 133 | 134 | go newConfig.setConnRoles() 135 | 136 | var sh uint32 137 | for i := 0; i < redisclusterutil.NumSlots; i++ { 138 | var cur uint32 139 | if len(slotRanges) != 0 && i > slotRanges[0].To { 140 | slotRanges = slotRanges[1:] 141 | } 142 | if len(slotRanges) == 0 || i < slotRanges[0].From { 143 | cur = uint32(random) 144 | } else { 145 | cur = uint32(newConfig.masters[slotRanges[0].Addrs[0]]) 146 | } 147 | if _, ok := c.internallyForceMasterOnly[uint16(i)]; ok { 148 | cur |= masterOnlyFlag 149 | DebugEvent("automatic masteronly") 150 | } 151 | if i&1 == 0 { 152 | sh = cur 153 | } else { 154 | sh |= cur << 16 155 | newConfig.slots[i/2] = sh 156 | } 157 | } 158 | 159 | c.storeConfig(&newConfig) 160 | 161 | time.AfterFunc(3*time.Millisecond, func() { 162 | for _, node := range oldNodes { 163 | if atomic.AddUint32(&node.refcnt, ^uint32(0)) != 0 { 164 | continue 165 | } 166 | for _, conn := range node.conns { 167 | conn.Close() 168 | } 169 | } 170 | }) 171 | time.AfterFunc(8*time.Millisecond, func() { 172 | for _, node := range newConfig.nodes { 173 | node.updatePingLatency() 174 | } 175 | for _, shard := range newConfig.shards { 176 | sumLatency := uint32(0) 177 | minLatencyID := 0 178 | minLatency := uint32(math.MaxUint32) 179 | 180 | for i, addr := range shard.addr { 181 | node := newConfig.nodes[addr] 182 | pingLatency := atomic.LoadUint32(&node.ping) 183 | if pingLatency < minLatency { 184 | minLatency = pingLatency 185 | minLatencyID = i 186 | } 187 | 188 | sumLatency += pingLatency 189 | } 190 | for i, addr := range shard.addr { 191 | node := newConfig.nodes[addr] 192 | 193 | weight := sumLatency / atomic.LoadUint32(&node.ping) 194 | if atomic.LoadUint32(&c.forceMinLatencyReplica) == enabled && i == minLatencyID { 195 | const alwaysPrefer = 1_000_000 196 | weight = alwaysPrefer 197 | } 198 | 199 | atomic.StoreUint32(&shard.pingWeights[i], weight) 200 | } 201 | } 202 | }) 203 | } 204 | 205 | func (s *shard) setReplicaInfo(res interface{}, n uint64) { 206 | haserr := false 207 | if err := redis.AsError(res); err != nil { 208 | haserr = true 209 | } else if n&1 == 0 { 210 | str, ok := res.(string) 211 | haserr = !(ok && str == "OK") 212 | } else if buf, ok := res.([]byte); !ok { 213 | haserr = true 214 | } else if bytes.Contains(buf, []byte("master_link_status:down")) || bytes.Contains(buf, []byte("loading:1")) { 215 | haserr = true 216 | } 217 | for { 218 | oldstate := atomic.LoadUint32(&s.good) 219 | newstate := oldstate 220 | if haserr { 221 | newstate &^= 1 << (n / 2) 222 | } else { 223 | newstate |= 1 << (n / 2) 224 | } 225 | if newstate == oldstate { 226 | break 227 | } 228 | if atomic.CompareAndSwapUint32(&s.good, oldstate, newstate) { 229 | break 230 | } 231 | } 232 | } 233 | 234 | func (cfg *clusterConfig) setConnRoles() { 235 | for _, sh := range cfg.shards { 236 | for i, addr := range sh.addr { 237 | node := cfg.nodes[addr] 238 | if node == nil { 239 | continue 240 | } 241 | for _, conn := range node.conns { 242 | if i == 0 { 243 | conn.Send(Request{"READWRITE", nil}, nil, 0) 244 | } else { 245 | conn.SendBatch([]Request{{"READONLY", nil}, {"INFO", nil}}, 246 | redis.FuncFuture(sh.setReplicaInfo), uint64(i*2)) 247 | } 248 | } 249 | } 250 | } 251 | } 252 | -------------------------------------------------------------------------------- /testbed/cluster.go: -------------------------------------------------------------------------------- 1 | package testbed 2 | 3 | import ( 4 | "bytes" 5 | "crypto/tls" 6 | "log" 7 | "time" 8 | 9 | "github.com/joomcode/redispipe/rediscluster/redisclusterutil" 10 | ) 11 | 12 | // Node is wrapper for Server with its NodeId 13 | type Node struct { 14 | Server 15 | NodeId []byte 16 | } 17 | 18 | // Cluster is a tool for starting/stopping redis cluster for tests. 19 | type Cluster struct { 20 | Node []Node 21 | } 22 | 23 | // NewCluster instantiate cluster of 6 nodes (3 masters and 3 slaves). 24 | // Master are on ports startport, startport+1, startport+2, 25 | // and slaves are on ports startport+3, startport+4, startport+5 26 | func NewCluster(startport uint16) *Cluster { 27 | cl := &Cluster{} 28 | cl.Node = make([]Node, 6) 29 | 30 | for i := range cl.Node { 31 | effectivePort := startport + uint16(i) 32 | if tlsCluster { 33 | cl.Node[i].Port = 0 34 | cl.Node[i].TlsPort = effectivePort 35 | cl.Node[i].Conn.TLSConfig = &tls.Config{InsecureSkipVerify: true} 36 | cl.Node[i].Conn.TLSEnabled = true 37 | } else { 38 | cl.Node[i].Port = effectivePort 39 | } 40 | 41 | cl.Node[i].Args = []string{ 42 | "--cluster-enabled", "yes", 43 | "--cluster-config-file", "node-" + cl.Node[i].PortStr(effectivePort) + ".conf", 44 | "--cluster-node-timeout", "200", 45 | "--cluster-slave-validity-factor", "1000", 46 | "--slave-serve-stale-data", "yes", 47 | "--cluster-require-full-coverage", "no", 48 | } 49 | if tlsCluster { 50 | cl.Node[i].Args = append([]string{ 51 | "--tls-cluster", "yes", 52 | "--tls-replication", "yes", 53 | }, cl.Node[i].Args...) 54 | } 55 | cl.Node[i].Start() 56 | cl.Node[i].SetupNodeId() 57 | cl.Node[i].DoSure("CLUSTER SET-CONFIG-EPOCH", i+1) 58 | } 59 | for i := 0; i < 5; i++ { 60 | for j := i + 1; j < 6; j++ { 61 | var effectivePort uint16 62 | if tlsCluster { 63 | effectivePort = cl.Node[j].TlsPort 64 | 65 | } else { 66 | effectivePort = cl.Node[j].Port 67 | } 68 | cl.Node[i].DoSure("CLUSTER MEET", "127.0.0.1", effectivePort) 69 | } 70 | } 71 | time.Sleep(1 * time.Second) 72 | cl.Node[0].AddSlots(0, 5499) 73 | cl.Node[1].AddSlots(5500, 10999) 74 | cl.Node[2].AddSlots(11000, redisclusterutil.NumSlots-1) 75 | cl.Node[3].DoSure("CLUSTER REPLICATE", cl.Node[0].NodeId) 76 | cl.Node[4].DoSure("CLUSTER REPLICATE", cl.Node[1].NodeId) 77 | cl.Node[5].DoSure("CLUSTER REPLICATE", cl.Node[2].NodeId) 78 | cl.WaitClusterOk() 79 | 80 | return cl 81 | } 82 | 83 | // Stop stops all cluster's servers 84 | func (cl *Cluster) Stop() { 85 | for i := range cl.Node { 86 | func() { 87 | defer recover() 88 | cl.Node[i].Stop() 89 | }() 90 | } 91 | } 92 | 93 | // Start starts all cluster's servers 94 | func (cl *Cluster) Start() { 95 | for i := range cl.Node { 96 | cl.Node[i].Start() 97 | } 98 | cl.WaitClusterOk() 99 | } 100 | 101 | func RaiseClusterPanic() { 102 | panic("cluster didn't stabilize") 103 | } 104 | 105 | // WaitClusterOk wait for cluster configuration to be stable. 106 | func (cl *Cluster) WaitClusterOk() { 107 | i := 0 108 | t := time.AfterFunc(30*time.Second, RaiseClusterPanic) 109 | defer t.Stop() 110 | for !cl.ClusterOk() { 111 | if i++; i == 10 { 112 | cl.AttemptFailover() 113 | } 114 | time.Sleep(100 * time.Millisecond) 115 | } 116 | } 117 | 118 | // ClusterOk checks cluster configuration. 119 | func (cl *Cluster) ClusterOk() bool { 120 | stopped := []int{} 121 | for i := range cl.Node { 122 | if !cl.Node[i].RunningNow() { 123 | stopped = append(stopped, i) 124 | } 125 | } 126 | var hashsum uint64 127 | for i := range cl.Node { 128 | if !cl.Node[i].RunningNow() { 129 | continue 130 | } 131 | res := cl.Node[i].Do("CLUSTER INFO") 132 | buf, ok := res.([]byte) 133 | if !ok { 134 | return false 135 | } 136 | if !bytes.Contains(buf, []byte("cluster_state:ok")) { 137 | return false 138 | } 139 | res = cl.Node[i].Do("INFO REPLICATION") 140 | buf, ok = res.([]byte) 141 | if !ok { 142 | return false 143 | } 144 | if !bytes.Contains(buf, []byte("role:master")) && 145 | !bytes.Contains(buf, []byte("master_link_status:up")) { 146 | return false 147 | } 148 | res = cl.Node[i].Do("CLUSTER NODES") 149 | buf, ok = res.([]byte) 150 | if !ok { 151 | return false 152 | } 153 | masters := 0 154 | for _, line := range bytes.Split(buf, []byte("\n")) { 155 | hasStopped := false 156 | for _, j := range stopped { 157 | if bytes.HasPrefix(line, cl.Node[j].NodeId) { 158 | if !bytes.Contains(line, []byte("fail ")) { 159 | return false 160 | } 161 | hasStopped = true 162 | } 163 | } 164 | if !hasStopped && bytes.Contains(line, []byte("master")) && !bytes.Contains(line, []byte("fail")) { 165 | masters++ 166 | } 167 | } 168 | if masters != 3+(len(cl.Node)-6) { 169 | return false 170 | 171 | } 172 | infos, _ := redisclusterutil.ParseClusterNodes(res) 173 | hash := infos.HashSum() 174 | if hash != hashsum && hashsum != 0 { 175 | return false 176 | } 177 | hashsum = hash 178 | } 179 | return true 180 | } 181 | 182 | // AttemptFailover tries to issue CLUSTER FAILOVER FORCE to slaves of falled masters. 183 | // This is work around replication bug present in Redis till 4.0.9 (including) 184 | func (cl *Cluster) AttemptFailover() { 185 | for i := range cl.Node[:6] { 186 | if !cl.Node[i].RunningNow() { 187 | var effectivePortMaster, effectivePortSlave uint16 188 | if tlsCluster { 189 | effectivePortMaster = cl.Node[i].TlsPort 190 | } else { 191 | effectivePortMaster = cl.Node[i].Port 192 | } 193 | slave := (i + 3) % 6 194 | if tlsCluster { 195 | effectivePortSlave = cl.Node[slave].TlsPort 196 | } else { 197 | effectivePortSlave = cl.Node[slave].Port 198 | } 199 | log.Printf("FORCE FAILOVER %d=>%d", effectivePortMaster, effectivePortSlave) 200 | cl.Node[slave].Do("CLUSTER FAILOVER", "FORCE") 201 | } 202 | } 203 | } 204 | 205 | // InitMoveSlot issues start for slot migration. 206 | func (cl *Cluster) InitMoveSlot(slot, from, to int) { 207 | cl.Node[to].DoSure("CLUSTER SETSLOT", slot, "IMPORTING", cl.Node[from].NodeId) 208 | cl.Node[from].DoSure("CLUSTER SETSLOT", slot, "MIGRATING", cl.Node[to].NodeId) 209 | } 210 | 211 | // CancelMoveSlot resets slot migration. 212 | func (cl *Cluster) CancelMoveSlot(slot int) { 213 | for i := 0; i < 3; i++ { 214 | cl.Node[i].DoSure("CLUSTER SETSLOT", slot, "STABLE") 215 | } 216 | } 217 | 218 | // FinishMoveSlot finalizes slot migration 219 | func (cl *Cluster) FinishMoveSlot(slot, from, to int) { 220 | cl.Node[to].Do("CLUSTER SETSLOT", slot, "NODE", cl.Node[to].NodeId) 221 | cl.Node[from].Do("CLUSTER SETSLOT", slot, "NODE", cl.Node[to].NodeId) 222 | cl.Node[to].Do("CLUSTER BUMPEPOCH", "BROADCAST") // proprietary extension 223 | cl.Node[to].Do("CLUSTER BUMPEPOCH") 224 | } 225 | 226 | // MoveSlot moves slot's keys from host to host. 227 | func (cl *Cluster) MoveSlot(slot, from, to int) { 228 | cl.InitMoveSlot(slot, from, to) 229 | for { 230 | keysi := cl.Node[from].DoSure("CLUSTER GETKEYSINSLOT", slot, 100) 231 | keys := keysi.([]interface{}) 232 | if len(keys) == 0 { 233 | break 234 | } 235 | var effectivePort uint16 236 | if tlsCluster { 237 | effectivePort = cl.Node[to].TlsPort 238 | } else { 239 | effectivePort = cl.Node[to].Port 240 | } 241 | args := []interface{}{"127.0.0.1", effectivePort, nil, 0, 5000, "REPLACE", "KEYS"} 242 | args = append(args, keys...) 243 | cl.Node[from].DoSure("MIGRATE", args...) 244 | } 245 | cl.FinishMoveSlot(slot, from, to) 246 | 247 | cl.WaitClusterOk() 248 | } 249 | 250 | // StartSeventhNode start additional node 251 | func (cl *Cluster) StartSeventhNode() { 252 | var effectivePort uint16 253 | cl.Node = append(cl.Node, Node{}) 254 | if tlsCluster { 255 | effectivePort = cl.Node[0].TlsPort + 6 256 | cl.Node[6].Port = 0 257 | cl.Node[6].TlsPort = effectivePort 258 | } else { 259 | effectivePort = cl.Node[0].Port + 6 260 | cl.Node[6].Port = effectivePort 261 | } 262 | cl.Node[6].Args = []string{ 263 | "--cluster-enabled", "yes", 264 | "--cluster-config-file", "node-" + cl.Node[6].PortStr(effectivePort) + ".conf", 265 | "--cluster-node-timeout", "200", 266 | "--cluster-slave-validity-factor", "1000", 267 | "--slave-serve-stale-data", "yes", 268 | "--cluster-require-full-coverage", "no", 269 | } 270 | if tlsCluster { 271 | cl.Node[6].Args = append([]string{ 272 | "--tls-cluster", "yes", 273 | "--tls-replication", "yes", 274 | }, cl.Node[6].Args...) 275 | cl.Node[6].Conn.TLSEnabled = true 276 | cl.Node[6].Conn.TLSConfig = &tls.Config{InsecureSkipVerify: true} 277 | } 278 | 279 | cl.Node[6].Start() 280 | cl.Node[6].SetupNodeId() 281 | cl.Node[6].DoSure("CLUSTER SET-CONFIG-EPOCH", 0) 282 | for i := 0; i < 6; i++ { 283 | cl.Node[i].DoSure("CLUSTER MEET", "127.0.0.1", effectivePort) 284 | } 285 | time.Sleep(3 * time.Second) 286 | cl.WaitClusterOk() 287 | } 288 | 289 | // StopSeventhNode stops additional node 290 | func (cl *Cluster) StopSeventhNode() { 291 | cl.Node[6].Stop() 292 | cl.Node = cl.Node[:6] 293 | } 294 | 295 | // SetupNodeId learns nodeid of this node 296 | func (n *Node) SetupNodeId() { 297 | res := n.Do("CLUSTER NODES") 298 | lines := bytes.Split(res.([]byte), []byte{'\n'}) 299 | for _, line := range lines { 300 | if bytes.Contains(line, []byte("myself")) { 301 | n.NodeId = bytes.Split(line, []byte(" "))[0] 302 | break 303 | } 304 | } 305 | } 306 | 307 | // AddSlots issues CLUSTER ADDSLOTS command. 308 | func (n *Node) AddSlots(from, to int) { 309 | var args = []interface{}{} 310 | for i := from; i <= to; i++ { 311 | args = append(args, i) 312 | } 313 | n.DoSure("CLUSTER ADDSLOTS", args...) 314 | } 315 | -------------------------------------------------------------------------------- /redis/request_test.go: -------------------------------------------------------------------------------- 1 | package redis_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/joomcode/redispipe/redis" 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestRequestKey(t *testing.T) { 11 | var k string 12 | var ok bool 13 | 14 | k, ok = Req("GET", 1).Key() 15 | assert.Equal(t, "1", k) 16 | assert.True(t, ok) 17 | 18 | _, ok = Req("GET").Key() 19 | assert.False(t, ok) 20 | 21 | k, ok = Req("SET", 1, 2).Key() 22 | assert.Equal(t, "1", k) 23 | assert.True(t, ok) 24 | 25 | k, ok = Req("RANDOMKEY").Key() 26 | assert.Equal(t, "RANDOMKEY", k) 27 | assert.False(t, ok) 28 | 29 | k, ok = Req("EVAL", "return KEY[1]", 1, 2, 3).Key() 30 | assert.Equal(t, "2", k) 31 | assert.True(t, ok) 32 | 33 | k, ok = Req("EVALSHA", "1234abcdef", 1, 2, 3).Key() 34 | assert.Equal(t, "2", k) 35 | assert.True(t, ok) 36 | 37 | k, ok = Req("BITOP", "AND", 1, 2).Key() 38 | assert.Equal(t, "1", k) 39 | assert.True(t, ok) 40 | } 41 | 42 | func TestArgToString(t *testing.T) { 43 | var k string 44 | var ok bool 45 | 46 | k, ok = ArgToString(int(0)) 47 | assert.Equal(t, "0", k) 48 | assert.True(t, ok) 49 | 50 | k, ok = ArgToString(uint(0)) 51 | assert.Equal(t, "0", k) 52 | assert.True(t, ok) 53 | 54 | k, ok = ArgToString(uint(1)) 55 | assert.Equal(t, "1", k) 56 | assert.True(t, ok) 57 | 58 | k, ok = ArgToString(int8(6)) 59 | assert.Equal(t, "6", k) 60 | assert.True(t, ok) 61 | 62 | k, ok = ArgToString(int8(-31)) 63 | assert.Equal(t, "-31", k) 64 | assert.True(t, ok) 65 | 66 | k, ok = ArgToString(uint8(156)) 67 | assert.Equal(t, "156", k) 68 | assert.True(t, ok) 69 | 70 | k, ok = ArgToString(int16(781)) 71 | assert.Equal(t, "781", k) 72 | assert.True(t, ok) 73 | 74 | k, ok = ArgToString(int16(-3906)) 75 | assert.Equal(t, "-3906", k) 76 | assert.True(t, ok) 77 | 78 | k, ok = ArgToString(uint16(19351)) 79 | assert.Equal(t, "19351", k) 80 | assert.True(t, ok) 81 | 82 | k, ok = ArgToString(int32(97656)) 83 | assert.Equal(t, "97656", k) 84 | assert.True(t, ok) 85 | 86 | k, ok = ArgToString(int32(-488281)) 87 | assert.Equal(t, "-488281", k) 88 | assert.True(t, ok) 89 | 90 | k, ok = ArgToString(uint32(2441406)) 91 | assert.Equal(t, "2441406", k) 92 | assert.True(t, ok) 93 | 94 | k, ok = ArgToString(int64(12207031)) 95 | assert.Equal(t, "12207031", k) 96 | assert.True(t, ok) 97 | 98 | k, ok = ArgToString(int64(-61035156)) 99 | assert.Equal(t, "-61035156", k) 100 | assert.True(t, ok) 101 | 102 | k, ok = ArgToString(uint64(305175781)) 103 | assert.Equal(t, "305175781", k) 104 | assert.True(t, ok) 105 | 106 | k, ok = ArgToString(int64(9223372036854775807)) 107 | assert.Equal(t, "9223372036854775807", k) 108 | assert.True(t, ok) 109 | 110 | k, ok = ArgToString(int64(-9223372036854775808)) 111 | assert.Equal(t, "-9223372036854775808", k) 112 | assert.True(t, ok) 113 | 114 | k, ok = ArgToString(uint64(18446744073709551615)) 115 | assert.Equal(t, "18446744073709551615", k) 116 | assert.True(t, ok) 117 | 118 | k, ok = ArgToString(float32(0.0)) 119 | assert.Equal(t, "0", k) 120 | assert.True(t, ok) 121 | 122 | k, ok = ArgToString(float32(0.25)) 123 | assert.Equal(t, "0.25", k) 124 | assert.True(t, ok) 125 | 126 | k, ok = ArgToString(float32(-10000.25)) 127 | assert.Equal(t, "-10000.25", k) 128 | assert.True(t, ok) 129 | 130 | k, ok = ArgToString(float64(0.0)) 131 | assert.Equal(t, "0", k) 132 | assert.True(t, ok) 133 | 134 | k, ok = ArgToString(float64(0.25)) 135 | assert.Equal(t, "0.25", k) 136 | assert.True(t, ok) 137 | 138 | k, ok = ArgToString(float64(-10000.25)) 139 | assert.Equal(t, "-10000.25", k) 140 | assert.True(t, ok) 141 | 142 | k, ok = ArgToString(true) 143 | assert.Equal(t, "1", k) 144 | assert.True(t, ok) 145 | 146 | k, ok = ArgToString(false) 147 | assert.Equal(t, "0", k) 148 | assert.True(t, ok) 149 | 150 | k, ok = ArgToString(nil) 151 | assert.Equal(t, "", k) 152 | assert.True(t, ok) 153 | 154 | k, ok = ArgToString("asdf") 155 | assert.Equal(t, "asdf", k) 156 | assert.True(t, ok) 157 | 158 | k, ok = ArgToString([]byte("asdf")) 159 | assert.Equal(t, "asdf", k) 160 | assert.True(t, ok) 161 | 162 | k, ok = ArgToString(make(chan int)) 163 | assert.Equal(t, "", k) 164 | assert.False(t, ok) 165 | } 166 | 167 | func TestAppendRequestArgument(t *testing.T) { 168 | var k []byte 169 | var err error 170 | 171 | k, err = AppendRequest(nil, Req("CMD", int(0))) 172 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$1\r\n0\r\n"), k) 173 | assert.Nil(t, err) 174 | 175 | k, err = AppendRequest(nil, Req("CMD", uint(1))) 176 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$1\r\n1\r\n"), k) 177 | assert.Nil(t, err) 178 | 179 | k, err = AppendRequest(nil, Req("CMD", int8(6))) 180 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$1\r\n6\r\n"), k) 181 | assert.Nil(t, err) 182 | 183 | k, err = AppendRequest(nil, Req("CMD", int8(-31))) 184 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$3\r\n-31\r\n"), k) 185 | assert.Nil(t, err) 186 | 187 | k, err = AppendRequest(nil, Req("CMD", uint8(156))) 188 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$3\r\n156\r\n"), k) 189 | assert.Nil(t, err) 190 | 191 | k, err = AppendRequest(nil, Req("CMD", int16(781))) 192 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$3\r\n781\r\n"), k) 193 | assert.Nil(t, err) 194 | 195 | k, err = AppendRequest(nil, Req("CMD", int16(-3906))) 196 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$5\r\n-3906\r\n"), k) 197 | assert.Nil(t, err) 198 | 199 | k, err = AppendRequest(nil, Req("CMD", uint16(19351))) 200 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$5\r\n19351\r\n"), k) 201 | assert.Nil(t, err) 202 | 203 | k, err = AppendRequest(nil, Req("CMD", int32(97656))) 204 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$5\r\n97656\r\n"), k) 205 | assert.Nil(t, err) 206 | 207 | k, err = AppendRequest(nil, Req("CMD", int32(-488281))) 208 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$7\r\n-488281\r\n"), k) 209 | assert.Nil(t, err) 210 | 211 | k, err = AppendRequest(nil, Req("CMD", uint32(2441406))) 212 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$7\r\n2441406\r\n"), k) 213 | assert.Nil(t, err) 214 | 215 | k, err = AppendRequest(nil, Req("CMD", int64(12207031))) 216 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$8\r\n12207031\r\n"), k) 217 | assert.Nil(t, err) 218 | 219 | k, err = AppendRequest(nil, Req("CMD", int64(-61035156))) 220 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$9\r\n-61035156\r\n"), k) 221 | assert.Nil(t, err) 222 | 223 | k, err = AppendRequest(nil, Req("CMD", uint64(305175781))) 224 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$9\r\n305175781\r\n"), k) 225 | assert.Nil(t, err) 226 | 227 | k, err = AppendRequest(nil, Req("CMD", int64(9223372036854775807))) 228 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$19\r\n9223372036854775807\r\n"), k) 229 | assert.Nil(t, err) 230 | 231 | k, err = AppendRequest(nil, Req("CMD", int64(-9223372036854775808))) 232 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$20\r\n-9223372036854775808\r\n"), k) 233 | assert.Nil(t, err) 234 | 235 | k, err = AppendRequest(nil, Req("CMD", uint64(18446744073709551615))) 236 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$20\r\n18446744073709551615\r\n"), k) 237 | assert.Nil(t, err) 238 | 239 | k, err = AppendRequest(nil, Req("CMD", float32(0.0))) 240 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$1\r\n0\r\n"), k) 241 | assert.Nil(t, err) 242 | 243 | k, err = AppendRequest(nil, Req("CMD", float32(0.25))) 244 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$4\r\n0.25\r\n"), k) 245 | assert.Nil(t, err) 246 | 247 | k, err = AppendRequest(nil, Req("CMD", float32(-10000.25))) 248 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$9\r\n-10000.25\r\n"), k) 249 | assert.Nil(t, err) 250 | 251 | k, err = AppendRequest(nil, Req("CMD", float64(0.0))) 252 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$1\r\n0\r\n"), k) 253 | assert.Nil(t, err) 254 | 255 | k, err = AppendRequest(nil, Req("CMD", float64(0.25))) 256 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$4\r\n0.25\r\n"), k) 257 | assert.Nil(t, err) 258 | 259 | k, err = AppendRequest(nil, Req("CMD", float64(-10000.25))) 260 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$9\r\n-10000.25\r\n"), k) 261 | assert.Nil(t, err) 262 | 263 | k, err = AppendRequest(nil, Req("CMD", true)) 264 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$1\r\n1\r\n"), k) 265 | assert.Nil(t, err) 266 | 267 | k, err = AppendRequest(nil, Req("CMD", false)) 268 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$1\r\n0\r\n"), k) 269 | assert.Nil(t, err) 270 | 271 | k, err = AppendRequest(nil, Req("CMD", nil)) 272 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$0\r\n\r\n"), k) 273 | assert.Nil(t, err) 274 | 275 | k, err = AppendRequest(nil, Req("CMD", "")) 276 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$0\r\n\r\n"), k) 277 | assert.Nil(t, err) 278 | 279 | k, err = AppendRequest(nil, Req("CMD", "asdf")) 280 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$4\r\nasdf\r\n"), k) 281 | assert.Nil(t, err) 282 | 283 | k, err = AppendRequest(nil, Req("CMD", "abcdefghijklmnopqrstuvwxyz")) 284 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$26\r\nabcdefghijklmnopqrstuvwxyz\r\n"), k) 285 | assert.Nil(t, err) 286 | 287 | k, err = AppendRequest(nil, Req("CMD", []byte("asdf"))) 288 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$4\r\nasdf\r\n"), k) 289 | assert.Nil(t, err) 290 | 291 | big := make([]byte, 12345) 292 | k, err = AppendRequest(nil, Req("CMD", big)) 293 | res := []byte("*2\r\n$3\r\nCMD\r\n$12345\r\n") 294 | res = append(append(res, big...), "\r\n"...) 295 | assert.Equal(t, res, k) 296 | assert.Nil(t, err) 297 | 298 | k, err = AppendRequest(nil, Req("CMD", make(chan int))) 299 | assert.Len(t, k, 0) 300 | assert.NotNil(t, err) 301 | rerr := AsErrorx(err) 302 | assert.True(t, rerr.IsOfType(ErrArgumentType)) 303 | } 304 | 305 | func TestAppendRequestCmdAndArgcount(t *testing.T) { 306 | var k []byte 307 | var err error 308 | 309 | k, err = AppendRequest(nil, Req("CMD", "hi")) 310 | assert.Equal(t, []byte("*2\r\n$3\r\nCMD\r\n$2\r\nhi\r\n"), k) 311 | assert.Nil(t, err) 312 | 313 | k, err = AppendRequest(nil, Req("CMD", "hi", "ho")) 314 | assert.Equal(t, []byte("*3\r\n$3\r\nCMD\r\n$2\r\nhi\r\n$2\r\nho\r\n"), k) 315 | assert.Nil(t, err) 316 | 317 | k, err = AppendRequest(nil, Req("CMD", "hi", "ho", "hu")) 318 | assert.Equal(t, []byte("*4\r\n$3\r\nCMD\r\n$2\r\nhi\r\n$2\r\nho\r\n$2\r\nhu\r\n"), k) 319 | assert.Nil(t, err) 320 | 321 | // split by first space 322 | k, err = AppendRequest(nil, Req("CMD ONE", "hi")) 323 | assert.Equal(t, []byte("*3\r\n$3\r\nCMD\r\n$3\r\nONE\r\n$2\r\nhi\r\n"), k) 324 | assert.Nil(t, err) 325 | 326 | k, err = AppendRequest(nil, Req("CMD ONE", "hi", "ho")) 327 | assert.Equal(t, []byte("*4\r\n$3\r\nCMD\r\n$3\r\nONE\r\n$2\r\nhi\r\n$2\r\nho\r\n"), k) 328 | assert.Nil(t, err) 329 | 330 | k, err = AppendRequest(nil, Req("CMD ONE", "hi", "ho", "hu")) 331 | assert.Equal(t, []byte("*5\r\n$3\r\nCMD\r\n$3\r\nONE\r\n$2\r\nhi\r\n$2\r\nho\r\n$2\r\nhu\r\n"), k) 332 | assert.Nil(t, err) 333 | 334 | // no split by second space 335 | k, err = AppendRequest(nil, Req("CMD ONE TWO", "hi")) 336 | assert.Equal(t, []byte("*3\r\n$3\r\nCMD\r\n$7\r\nONE TWO\r\n$2\r\nhi\r\n"), k) 337 | assert.Nil(t, err) 338 | 339 | k, err = AppendRequest(nil, Req("CMD ONE TWO", "hi", "ho")) 340 | assert.Equal(t, []byte("*4\r\n$3\r\nCMD\r\n$7\r\nONE TWO\r\n$2\r\nhi\r\n$2\r\nho\r\n"), k) 341 | assert.Nil(t, err) 342 | 343 | k, err = AppendRequest(nil, Req("CMD ONE TWO", "hi", "ho", "hu")) 344 | assert.Equal(t, []byte("*5\r\n$3\r\nCMD\r\n$7\r\nONE TWO\r\n$2\r\nhi\r\n$2\r\nho\r\n$2\r\nhu\r\n"), k) 345 | assert.Nil(t, err) 346 | } 347 | -------------------------------------------------------------------------------- /rediscluster/redisclusterutil/cluster.go: -------------------------------------------------------------------------------- 1 | package redisclusterutil 2 | 3 | import ( 4 | "fmt" 5 | "hash/fnv" 6 | "sort" 7 | "strconv" 8 | "strings" 9 | 10 | "github.com/joomcode/redispipe/redis" 11 | ) 12 | 13 | // SlotMoving is a flag about direction of slot migration. 14 | type SlotMoving byte 15 | 16 | const ( 17 | // SlotMigrating indicates slot is migrating from this instance. 18 | SlotMigrating SlotMoving = 1 19 | // SlotImporting indicates slot is importing into this instance. 20 | SlotImporting SlotMoving = 2 21 | ) 22 | 23 | // SlotsRange represents slice of slots 24 | type SlotsRange struct { 25 | From int 26 | To int 27 | Addrs []string // addresses of hosts hosting this range of slots. First address is a master, and other are slaves. 28 | } 29 | 30 | // ParseSlotsInfo parses result of CLUSTER SLOTS command 31 | func ParseSlotsInfo(res interface{}) ([]SlotsRange, error) { 32 | const NumSlots = 1 << 14 33 | if err := redis.AsError(res); err != nil { 34 | return nil, err 35 | } 36 | 37 | errf := func(f string, args ...interface{}) ([]SlotsRange, error) { 38 | msg := fmt.Sprintf(f, args...) 39 | err := redis.ErrResponseUnexpected.New(msg) 40 | return nil, err 41 | } 42 | 43 | var rawranges []interface{} 44 | var ok bool 45 | if rawranges, ok = res.([]interface{}); !ok { 46 | return errf("type is not array: %+v", res) 47 | } 48 | if len(rawranges) == 0 { 49 | return errf("host doesn't know about slots (probably it is not in cluster)") 50 | } 51 | 52 | ranges := make([]SlotsRange, len(rawranges)) 53 | for i, rawelem := range rawranges { 54 | var rawrange []interface{} 55 | var ok bool 56 | var i64 int64 57 | r := SlotsRange{} 58 | if rawrange, ok = rawelem.([]interface{}); !ok || len(rawrange) < 3 { 59 | return errf("format mismatch: res[%d]=%+v", i, rawelem) 60 | } 61 | if i64, ok = rawrange[0].(int64); !ok || i64 < 0 || i64 >= NumSlots { 62 | return errf("format mismatch: res[%d][0]=%+v", i, rawrange[0]) 63 | } 64 | r.From = int(i64) 65 | if i64, ok = rawrange[1].(int64); !ok || i64 < 0 || i64 >= NumSlots { 66 | return errf("format mismatch: res[%d][1]=%+v", i, rawrange[1]) 67 | } 68 | r.To = int(i64) 69 | if r.From > r.To { 70 | return errf("range wrong: res[%d]=%+v (%+v)", i, rawrange) 71 | } 72 | for j := 2; j < len(rawrange); j++ { 73 | rawaddr, ok := rawrange[j].([]interface{}) 74 | if !ok || len(rawaddr) < 2 { 75 | return errf("address format mismatch: res[%d][%d] = %+v, missing lines", 76 | i, j, rawrange[j]) 77 | } 78 | host, hasHost := rawaddr[0].([]byte) 79 | port, hasPort := rawaddr[1].(int64) 80 | if !hasHost && hasPort && port == 0 { 81 | // Due to possible Redis cluster misconfiguration we can receive zero address 82 | // for one of the replicas. It is totally fine to skip the misconfigured replica 83 | // and go with the remaining ones without inducing denial of service. 84 | arr, isArray := rawaddr[0].([]interface{}) 85 | if isArray && len(arr) == 0 { 86 | continue 87 | } 88 | } 89 | if !hasHost || !hasPort || port <= 0 || port+10000 > 65535 { 90 | return errf("address format mismatch: res[%d][%d] = %+v", 91 | i, j, rawaddr) 92 | } 93 | r.Addrs = append(r.Addrs, string(host)+":"+strconv.Itoa(int(port))) 94 | } 95 | sort.Strings(r.Addrs[1:]) 96 | ranges[i] = r 97 | } 98 | sort.Slice(ranges, func(i, j int) bool { 99 | return ranges[i].From < ranges[j].From 100 | }) 101 | return ranges, nil 102 | } 103 | 104 | // InstanceInfo represents line of CLUSTER NODES result. 105 | type InstanceInfo struct { 106 | Uuid string 107 | Addr string 108 | IP string 109 | Port int 110 | Port2 int 111 | Fail bool 112 | MySelf bool 113 | // NoAddr means that node were missed due to misconfiguration. 114 | // More probably, redis instance with other UUID were started on the same port. 115 | NoAddr bool 116 | SlaveOf string 117 | Connected bool 118 | Slots [][2]uint16 119 | Migrating []SlotMigration 120 | } 121 | 122 | // InstanceInfos represents CLUSTER NODES result 123 | type InstanceInfos []InstanceInfo 124 | 125 | // SlotMigration represents one migrating slot. 126 | type SlotMigration struct { 127 | Number uint16 128 | Moving SlotMoving 129 | Peer string 130 | } 131 | 132 | // HasAddr returns true if it is addressless instance (replaced with instance with other UUID), 133 | // it will have no port 134 | func (ii *InstanceInfo) HasAddr() bool { 135 | return !ii.NoAddr && ii.Port != 0 136 | } 137 | 138 | // AddrValid returns true if instance is successfully configure. 139 | // Note that it could differ from HasAddr in some corner cases. 140 | func (ii *InstanceInfo) AddrValid() bool { 141 | return ii.IP != "" && ii.Port != 0 142 | } 143 | 144 | // IsMaster returns if this instance is master 145 | func (ii *InstanceInfo) IsMaster() bool { 146 | return ii.SlaveOf == "" 147 | } 148 | 149 | // HashSum calculates signature of cluster configuration. 150 | // It assumes, configuration were sorted in some way. 151 | // If configuration fetched from all hosts has same signature, then cluster is in stable state. 152 | func (iis InstanceInfos) HashSum() uint64 { 153 | hsh := fnv.New64a() 154 | for _, ii := range iis { 155 | if !ii.AddrValid() && len(ii.Slots) == 0 { // looks like redis-cli also ignores hosts without slots 156 | continue 157 | } 158 | fmt.Fprintf(hsh, "%s\t%s\t%d\t%v\t%s", ii.Uuid, ii.Addr, ii.Port2, ii.Fail, ii.SlaveOf) 159 | for _, slots := range ii.Slots { 160 | fmt.Fprintf(hsh, "\t%d-%d", slots[0], slots[1]) 161 | } 162 | hsh.Write([]byte("\n")) 163 | } 164 | return hsh.Sum64() 165 | } 166 | 167 | // CollectAddressesAndMigrations collects all node's addresses and all slot migrations. 168 | func (iis InstanceInfos) CollectAddressesAndMigrations(addrs map[string]struct{}, migrating map[uint16]struct{}) { 169 | for _, ii := range iis { 170 | if ii.AddrValid() { 171 | addrs[ii.Addr] = struct{}{} 172 | } 173 | if migrating != nil { 174 | for _, m := range ii.Migrating { 175 | migrating[m.Number] = struct{}{} 176 | } 177 | } 178 | } 179 | } 180 | 181 | // SlotsRanges returns sorted SlotsRange-s made from slots information of cluster configuration. 182 | func (iis InstanceInfos) SlotsRanges() []SlotsRange { 183 | uuid2addrs := make(map[string][]string) 184 | for _, ii := range iis { 185 | if !ii.AddrValid() { 186 | continue 187 | } 188 | if ii.IsMaster() { 189 | uuid2addrs[ii.Uuid] = append([]string{ii.Addr}, uuid2addrs[ii.Uuid]...) 190 | } else { 191 | uuid2addrs[ii.SlaveOf] = append(uuid2addrs[ii.SlaveOf], ii.Addr) 192 | } 193 | } 194 | ranges := make([]SlotsRange, 0, 16) 195 | for _, ii := range iis { 196 | if !ii.AddrValid() || !ii.IsMaster() || len(ii.Slots) == 0 { 197 | continue 198 | } 199 | for _, slots := range ii.Slots { 200 | ranges = append(ranges, SlotsRange{ 201 | From: int(slots[0]), 202 | To: int(slots[1]), 203 | Addrs: uuid2addrs[ii.Uuid], 204 | }) 205 | } 206 | } 207 | sort.Slice(ranges, func(i, j int) bool { 208 | return ranges[i].From < ranges[j].From 209 | }) 210 | return ranges 211 | } 212 | 213 | // MySelf returns info line for the host information were collected from. 214 | func (iis InstanceInfos) MySelf() *InstanceInfo { 215 | for _, ii := range iis { 216 | if ii.MySelf { 217 | return &ii 218 | } 219 | } 220 | return nil 221 | } 222 | 223 | // MergeWith merges sorted cluster information, giving preference to myself lines. 224 | // It could be used to obtain "union of all cluster configuration visions" in custom tools managing cluster. 225 | func (iis InstanceInfos) MergeWith(other InstanceInfos) InstanceInfos { 226 | // assume they are sorted by uuid 227 | // common case : they are same 228 | if len(iis) == len(other) { 229 | res := make(InstanceInfos, len(iis)) 230 | copy(res, iis) 231 | for i := range res { 232 | if res[i].Uuid != other[i].Uuid { 233 | goto RealMerge 234 | } 235 | if !res[i].MySelf && other[i].MySelf { 236 | res[i] = other[i] 237 | } 238 | } 239 | return res 240 | } 241 | RealMerge: 242 | res := make(InstanceInfos, 0, len(iis)) 243 | i, j := 0, 0 244 | for i < len(iis) && j < len(other) { 245 | if iis[i].Uuid == other[j].Uuid { 246 | if !other[j].MySelf { 247 | res = append(res, iis[i]) 248 | } else { 249 | res = append(res, other[j]) 250 | } 251 | i++ 252 | j++ 253 | } else if iis[i].Uuid < other[j].Uuid { 254 | res = append(res, iis[i]) 255 | i++ 256 | } else { 257 | res = append(res, other[j]) 258 | j++ 259 | } 260 | } 261 | if i < len(iis) { 262 | res = append(res, iis[i:]...) 263 | } 264 | if j < len(other) { 265 | res = append(res, iis[j:]...) 266 | } 267 | return res 268 | } 269 | 270 | // Hosts returns set of instance addresses. 271 | func (iis InstanceInfos) Hosts() []string { 272 | res := make([]string, 0, len(iis)) 273 | for i := range iis { 274 | if iis[i].AddrValid() { 275 | res = append(res, iis[i].Addr) 276 | } 277 | } 278 | return res 279 | } 280 | 281 | // ParseClusterNodes parses result of CLUSTER NODES command. 282 | func ParseClusterNodes(res interface{}) (InstanceInfos, error) { 283 | var err error 284 | if err = redis.AsError(res); err != nil { 285 | return nil, err 286 | } 287 | 288 | errf := func(f string, args ...interface{}) (InstanceInfos, error) { 289 | msg := fmt.Sprintf(f, args...) 290 | err := redis.ErrResponseUnexpected.New(msg) 291 | return nil, err 292 | } 293 | 294 | infob, ok := res.([]byte) 295 | if !ok { 296 | return errf("type is not []bytes, but %t", res) 297 | } 298 | info := string(infob) 299 | lines := strings.Split(info, "\n") 300 | infos := InstanceInfos{} 301 | for _, line := range lines { 302 | if len(line) < 16 { 303 | continue 304 | } 305 | parts := strings.Split(line, " ") 306 | ipp := strings.Split(parts[1], "@") 307 | addrparts := strings.Split(ipp[0], ":") 308 | if len(ipp) != 2 || len(addrparts) != 2 { 309 | return errf("ip-port is not in 'ip:port@port2' format, but %q", line) 310 | } 311 | node := InstanceInfo{ 312 | Uuid: parts[0], 313 | Addr: ipp[0], 314 | } 315 | node.IP = addrparts[0] 316 | node.Port, _ = strconv.Atoi(addrparts[1]) 317 | node.Port2, _ = strconv.Atoi(ipp[1]) 318 | 319 | node.Fail = strings.Contains(parts[2], "fail") 320 | if strings.Contains(parts[2], "slave") { 321 | node.SlaveOf = parts[3] 322 | } 323 | node.NoAddr = strings.Contains(parts[2], "noaddr") 324 | node.MySelf = strings.Contains(parts[2], "myself") 325 | node.Connected = parts[7] == "connected" 326 | 327 | for _, slot := range parts[8:] { 328 | if slot[0] == '[' { 329 | var uuid string 330 | var slotn int 331 | dir := SlotImporting 332 | 333 | if ix := strings.Index(slot, "-<-"); ix != -1 { 334 | slotn, err = strconv.Atoi(slot[1:ix]) 335 | if err != nil { 336 | return errf("slot number is not an integer: %q", slot[1:ix]) 337 | } 338 | uuid = slot[ix+3 : len(slot)-1] 339 | } else if ix = strings.Index(slot, "->-"); ix != -1 { 340 | slotn, err = strconv.Atoi(slot[1:ix]) 341 | if err != nil { 342 | return errf("slot number is not an integer: %q", slot[1:ix]) 343 | } 344 | uuid = slot[ix+3 : len(slot)-1] 345 | dir = SlotMigrating 346 | } 347 | migrating := SlotMigration{ 348 | Number: uint16(slotn), 349 | Moving: dir, 350 | Peer: uuid, 351 | } 352 | node.Migrating = append(node.Migrating, migrating) 353 | } else if ix := strings.IndexByte(slot, '-'); ix != -1 { 354 | from, err := strconv.Atoi(slot[:ix]) 355 | if err != nil { 356 | return errf("slot number is not an integer: %q", slot) 357 | } 358 | to, err := strconv.Atoi(slot[ix+1:]) 359 | if err != nil { 360 | return errf("slot number is not an integer: %q", slot) 361 | } 362 | node.Slots = append(node.Slots, [2]uint16{uint16(from), uint16(to)}) 363 | } else { 364 | slotn, err := strconv.Atoi(slot) 365 | if err != nil { 366 | return errf("slot number is not an integer: %q", slot) 367 | } 368 | node.Slots = append(node.Slots, [2]uint16{uint16(slotn), uint16(slotn)}) 369 | } 370 | } 371 | infos = append(infos, node) 372 | } 373 | sort.Slice(infos, func(i, j int) bool { 374 | return infos[i].Uuid < infos[j].Uuid 375 | }) 376 | return infos, nil 377 | } 378 | -------------------------------------------------------------------------------- /rediscluster/mapping.go: -------------------------------------------------------------------------------- 1 | package rediscluster 2 | 3 | import ( 4 | "crypto/tls" 5 | "fmt" 6 | "sync/atomic" 7 | "unsafe" 8 | 9 | "github.com/joomcode/errorx" 10 | 11 | "github.com/joomcode/redispipe/redis" 12 | "github.com/joomcode/redispipe/rediscluster/redisclusterutil" 13 | "github.com/joomcode/redispipe/redisconn" 14 | ) 15 | 16 | // storeConfig atomically stores config 17 | func (c *Cluster) storeConfig(cfg *clusterConfig) { 18 | p := (*unsafe.Pointer)(unsafe.Pointer(&c.config)) 19 | atomic.StorePointer(p, unsafe.Pointer(cfg)) 20 | } 21 | 22 | // getConfig loads config atomically 23 | func (c *Cluster) getConfig() *clusterConfig { 24 | p := (*unsafe.Pointer)(unsafe.Pointer(&c.config)) 25 | return (*clusterConfig)(atomic.LoadPointer(p)) 26 | } 27 | 28 | // ClusterHandle is used to wrap cluster's handle and set it as connection's handle. 29 | // You can use it in connection's logging. 30 | type ClusterHandle struct { 31 | Handle interface{} 32 | Address string 33 | N int 34 | } 35 | 36 | // newNode creates handle for a connection, that will be established in a future. 37 | func (c *Cluster) newNode(addr string, initial bool) (*node, error) { 38 | var err error 39 | connectionAddr := addr 40 | 41 | // If redis hosts are mentioned by names, a couple of connections will be established and closed shortly. 42 | // Let's resolve them to ip addresses. 43 | connectionAddr, err = redisclusterutil.Resolve(connectionAddr) 44 | if err != nil { 45 | return nil, ErrAddressNotResolved.WrapWithNoMessage(err) 46 | } 47 | 48 | nodeOpts, err := c.nodeOpts(addr) 49 | if err != nil { 50 | return nil, err 51 | } 52 | 53 | node := &node{ 54 | opts: *nodeOpts, 55 | addr: addr, 56 | refcnt: 1, 57 | } 58 | node.opts.AsyncDial = true 59 | node.conns = make([]*redisconn.Connection, c.opts.ConnsPerHost) 60 | for i := range node.conns { 61 | node.opts.Handle = ClusterHandle{c.opts.Handle, addr, i} 62 | node.conns[i], err = redisconn.Connect(c.ctx, connectionAddr, node.opts) 63 | if err != nil { 64 | if initial { 65 | return nil, err 66 | } 67 | // Since we are connected in async mode, there are should no be 68 | // errors. If there is error, it is configuration error. 69 | // There could no be configuration error after start. 70 | panic(err) 71 | } 72 | } 73 | return node, nil 74 | } 75 | 76 | func (c *Cluster) nodeOpts(addr string) (*redisconn.Opts, error) { 77 | nodeOpts := c.opts.HostOpts 78 | 79 | if !nodeOpts.TLSEnabled { 80 | return &nodeOpts, nil 81 | } 82 | 83 | originalHost, err := redisclusterutil.GetHost(addr) 84 | if err != nil { 85 | return nil, ErrAddressHostname.WrapWithNoMessage(err) 86 | } 87 | 88 | if !redisclusterutil.IsIPAddress(originalHost) { 89 | // preserve original hostname for TLS verification 90 | if nodeOpts.TLSConfig != nil { 91 | nodeOpts.TLSConfig = nodeOpts.TLSConfig.Clone() 92 | } else { 93 | nodeOpts.TLSConfig = &tls.Config{} 94 | } 95 | nodeOpts.TLSConfig.ServerName = originalHost 96 | } 97 | 98 | return &nodeOpts, nil 99 | } 100 | 101 | type connThen func(conn *redisconn.Connection, err error) 102 | 103 | // Call callback with connection to specified address. 104 | // If connection is already established, callback will be called immediately. 105 | // Otherwise, callback will be called after connection established. 106 | func (c *Cluster) ensureConnForAddress(addr string, then connThen) { 107 | node := c.getConfig().nodes[addr] 108 | if node != nil { 109 | // there is node for address, so call callback now. 110 | conn := node.getConn(c.opts.ConnHostPolicy, preferConnected, nil) 111 | if conn != nil { 112 | then(conn, nil) 113 | } else { 114 | err := c.err(ErrNoAliveConnection).WithProperty(redis.EKAddress, addr) 115 | then(nil, err) 116 | } 117 | return 118 | } 119 | 120 | c.nodeWait.Lock() 121 | defer c.nodeWait.Unlock() 122 | 123 | if future, ok := c.nodeWait.promises[addr]; ok { 124 | // there are already queued callback. 125 | // It means, goroutine with connection establishing is already run. 126 | // Add our callback to queue, and exit. 127 | *future = append(*future, then) 128 | return 129 | } 130 | 131 | // initiate queue for this address 132 | future := &[]connThen{then} 133 | promises := c.nodeWait.promises 134 | promises[addr] = future 135 | 136 | go func() { 137 | node := c.addNode(addr) 138 | var err error 139 | conn := node.getConn(c.opts.ConnHostPolicy, mayBeConnected, nil) 140 | if conn == nil { 141 | err = c.err(ErrNoAliveConnection).WithProperty(redis.EKAddress, addr) 142 | } 143 | c.nodeWait.Lock() 144 | delete(promises, addr) 145 | c.nodeWait.Unlock() 146 | // since we deleted from promises under lock, no one could append to *future any more. 147 | // lets run callbacks. 148 | for _, cb := range *future { 149 | cb(conn, err) 150 | } 151 | }() 152 | } 153 | 154 | // addNode creates host handle and adds it to cluster configuration. 155 | func (c *Cluster) addNode(addr string) *node { 156 | DebugEvent("addNode") 157 | var node *node 158 | var ok bool 159 | if node, ok = c.getConfig().nodes[addr]; ok { 160 | return node 161 | } 162 | 163 | c.m.Lock() 164 | defer c.m.Unlock() 165 | 166 | oldConf := c.getConfig() 167 | if node, ok = oldConf.nodes[addr]; ok { 168 | // someone could already create same node 169 | return node 170 | } 171 | 172 | // we could not update configuration in-place (threadsafety, bla-bla-bla). 173 | // So we have to copy configuration and node map. 174 | newConf := *oldConf 175 | newConf.nodes = make(nodeMap, len(oldConf.nodes)+1) 176 | for a, node := range oldConf.nodes { 177 | newConf.nodes[a] = node 178 | } 179 | 180 | if node, ok = c.prevNodes[addr]; ok { 181 | atomic.AddUint32(&node.refcnt, 1) 182 | } else { 183 | node, _ = c.newNode(addr, false) 184 | } 185 | newConf.nodes[addr] = node 186 | 187 | c.storeConfig(&newConf) 188 | 189 | return node 190 | } 191 | 192 | func (cfg *clusterConfig) slot2shardno(slot uint16) uint16 { 193 | pos, off := slot/2, 16*(slot&1) 194 | sh32 := atomic.LoadUint32(&cfg.slots[pos]) 195 | sh16 := uint16((sh32 >> off) & 0x3fff) 196 | return sh16 197 | } 198 | 199 | // slotSetShard sets slot2shard mapping 200 | func (cfg *clusterConfig) slotSetShard(slot, shard uint16) { 201 | pos, off := slot/2, 16*(slot&1) 202 | sh32 := atomic.LoadUint32(&cfg.slots[pos]) 203 | if uint16((sh32>>off)&0x3fff) == shard { 204 | return 205 | } 206 | sh32 &^= 0xffff << off 207 | sh32 |= uint32(shard) << off 208 | // yep, we doesn't do any synchronization here. 209 | // If we lost update now, it will be naturally retried with other MOVED redis response. 210 | atomic.StoreUint32(&cfg.slots[pos], sh32) 211 | } 212 | 213 | func (cfg *clusterConfig) slotMarkAsking(slot uint16) { 214 | pos, off := slot/2, 16*(slot&1) 215 | sh32 := atomic.LoadUint32(&cfg.slots[pos]) 216 | flag := uint32(masterOnlyFlag << off) 217 | if sh32&flag == 0 { 218 | sh32 |= flag 219 | // Again: no synchronization, because any updates will be retried with redis responses. 220 | atomic.StoreUint32(&cfg.slots[pos], sh32) 221 | } 222 | } 223 | 224 | func (cfg *clusterConfig) slotIsAsking(slot uint16) bool { 225 | pos, off := slot/2, 16*(slot&1) 226 | sh32 := atomic.LoadUint32(&cfg.slots[pos]) 227 | flag := uint32(masterOnlyFlag << off) 228 | return sh32&flag != 0 229 | } 230 | 231 | func (cfg *clusterConfig) slot2shard(slot uint16) *shard { 232 | sh16 := cfg.slot2shardno(slot) 233 | shard := cfg.shards[sh16] 234 | return shard 235 | } 236 | 237 | var rr, rs = func() ([32]uint32, [32]uint32) { 238 | var rr [32]uint32 // {1, 1, 1, ...} 239 | var rs [32]uint32 // {1, 3, 3, ...} 240 | for i := range rr[:] { 241 | rr[i] = 1 242 | rs[i] = 3 243 | } 244 | rs[0] = 1 245 | return rr, rs 246 | }() 247 | 248 | // connForSlot returns established connection for slot, if it exists. 249 | func (c *Cluster) connForSlot(slot uint16, policy ReplicaPolicyEnum, seen []*redisconn.Connection) (*redisconn.Connection, *errorx.Error) { 250 | cfg := c.getConfig() 251 | shard := cfg.slot2shard(slot) 252 | 253 | if shard == nil { 254 | return nil, c.err(ErrClusterConfigEmpty).WithProperty(redis.EKSlot, slot) 255 | } 256 | 257 | conn := c.connForPolicy(policy, seen, shard, cfg) 258 | if conn == nil { 259 | c.ForceReloading() 260 | return nil, c.err(ErrNoAliveConnection).WithProperty(redis.EKSlot, slot).WithProperty(EKPolicy, policy) 261 | } 262 | return conn, nil 263 | } 264 | 265 | func (c *Cluster) connForPolicy(policy ReplicaPolicyEnum, seen []*redisconn.Connection, shard *shard, cfg *clusterConfig) *redisconn.Connection { 266 | switch policy { 267 | case MasterOnly: 268 | return c.connForPolicyMaster(seen, shard, cfg) 269 | case MasterAndSlaves, PreferSlaves: 270 | return c.connForPolicySlaves(policy, seen, shard, cfg) 271 | default: 272 | panic("unknown policy") 273 | } 274 | } 275 | 276 | func (c *Cluster) connForPolicyMaster(seen []*redisconn.Connection, shard *shard, cfg *clusterConfig) *redisconn.Connection { 277 | nodes := cfg.nodes 278 | 279 | addr := shard.addr[0] 280 | node := nodes[addr] 281 | if node == nil { 282 | return nil 283 | } 284 | return node.getConn(c.opts.ConnHostPolicy, preferConnected, seen) 285 | } 286 | 287 | func (c *Cluster) connForPolicySlaves(policy ReplicaPolicyEnum, seen []*redisconn.Connection, shard *shard, cfg *clusterConfig) *redisconn.Connection { 288 | weights := c.weightsForPolicySlaves(policy, shard) 289 | 290 | health := atomic.LoadUint32(&shard.good) // load health information 291 | healthWeight := c.getHealthWeight(weights, health) 292 | off := c.opts.RoundRobinSeed.Current() 293 | 294 | // First, we try already established connections. 295 | // If no one found, then connections thar are connecting at the moment are tried. 296 | for _, needState := range []int{needConnected, mayBeConnected} { 297 | mask, maskWeight := health, healthWeight 298 | 299 | for mask != 0 { 300 | r := nextRng(&off, maskWeight) 301 | k := uint(0) 302 | for i, w := range weights { 303 | if mask&(1<>16) % mod 469 | } 470 | -------------------------------------------------------------------------------- /redisconn/conn_test.go: -------------------------------------------------------------------------------- 1 | package redisconn_test 2 | 3 | import ( 4 | "context" 5 | "crypto/tls" 6 | "errors" 7 | "runtime" 8 | "strconv" 9 | "strings" 10 | "testing" 11 | "time" 12 | 13 | "github.com/joomcode/errorx" 14 | 15 | "github.com/joomcode/redispipe/redis" 16 | . "github.com/joomcode/redispipe/redisconn" 17 | "github.com/joomcode/redispipe/testbed" 18 | "github.com/stretchr/testify/require" 19 | "github.com/stretchr/testify/suite" 20 | ) 21 | 22 | type Suite struct { 23 | suite.Suite 24 | s testbed.Server 25 | 26 | ctx context.Context 27 | ctxcancel func() 28 | } 29 | 30 | func (s *Suite) SetupSuite() { 31 | testbed.InitDir(".") 32 | s.s.Port = 45678 33 | s.s.TlsPort = 55678 34 | s.s.Start() 35 | } 36 | 37 | func (s *Suite) SetupTest() { 38 | s.s.Start() 39 | s.ctx, s.ctxcancel = context.WithTimeout(context.Background(), 55*time.Second) 40 | } 41 | 42 | func (s *Suite) TearDownTest() { 43 | s.ctxcancel() 44 | s.ctx, s.ctxcancel = nil, nil 45 | } 46 | 47 | func (s *Suite) TearDownSuite() { 48 | s.s.Stop() 49 | testbed.RmDir() 50 | } 51 | 52 | func (s *Suite) r() *require.Assertions { 53 | return s.Require() 54 | } 55 | 56 | func (s *Suite) AsError(v interface{}) *errorx.Error { 57 | s.r().IsType((*errorx.Error)(nil), v) 58 | return v.(*errorx.Error) 59 | } 60 | 61 | var defopts = Opts{ 62 | IOTimeout: 200 * time.Millisecond, 63 | } 64 | 65 | func (s *Suite) ping(conn *Connection, timeout time.Duration) interface{} { 66 | start := time.Now() 67 | res := redis.Sync{conn}.Do("PING") 68 | done := time.Now() 69 | if timeout == 0 { 70 | timeout = defopts.IOTimeout 71 | } 72 | s.r().WithinDuration(start, done, timeout*5/4) 73 | return res 74 | } 75 | 76 | func (s *Suite) goodPing(conn *Connection, timeout time.Duration) { 77 | s.Equal("PONG", s.ping(conn, timeout)) 78 | } 79 | 80 | func (s *Suite) badPing(conn *Connection, timeout time.Duration) { 81 | res := s.ping(conn, timeout) 82 | rerr := s.AsError(res) 83 | s.T().Log("badPing", rerr) 84 | s.True(rerr.HasTrait(redis.ErrTraitConnectivity)) 85 | } 86 | 87 | func (s *Suite) waitReconnect(conn *Connection) { 88 | start := time.Now() 89 | for { 90 | at := time.Now() 91 | res := redis.Sync{conn}.Do("PING") 92 | done := time.Now() 93 | s.r().WithinDuration(at, done, defopts.IOTimeout*3/2) 94 | if rerr := redis.AsErrorx(res); rerr != nil { 95 | s.True(rerr.IsOfType(ErrNotConnected)) 96 | s.r().WithinDuration(start, at, defopts.IOTimeout*2) 97 | } else { 98 | s.Equal("PONG", res) 99 | s.r().WithinDuration(start, at, defopts.IOTimeout*3) 100 | break 101 | } 102 | runtime.Gosched() 103 | } 104 | } 105 | 106 | func TestConn(t *testing.T) { 107 | suite.Run(t, new(Suite)) 108 | } 109 | 110 | func (s *Suite) TestConnects() { 111 | conn, err := Connect(s.ctx, s.s.Addr(), defopts) 112 | s.r().Nil(err) 113 | defer conn.Close() 114 | s.goodPing(conn, 0) 115 | } 116 | 117 | func (s *Suite) TestConnectsTls() { 118 | tlsopts := Opts{ 119 | IOTimeout: defopts.IOTimeout, 120 | TLSEnabled: true, 121 | TLSConfig: &tls.Config{ 122 | InsecureSkipVerify: true, 123 | }, 124 | } 125 | conn, err := Connect(s.ctx, s.s.TlsAddr(), tlsopts) 126 | s.r().Nil(err) 127 | defer conn.Close() 128 | s.goodPing(conn, 0) 129 | } 130 | 131 | func (s *Suite) TestConnectsDb() { 132 | conn1, err := Connect(s.ctx, s.s.Addr(), defopts) 133 | s.r().Nil(err) 134 | defer conn1.Close() 135 | 136 | sync1 := redis.Sync{conn1} 137 | res := sync1.Do("SET", "db", 0) 138 | s.r().NoError(redis.AsError(res)) 139 | res = sync1.Do("GET", "db") 140 | s.r().Equal(res, []byte("0")) 141 | 142 | opts2 := defopts 143 | opts2.DB = 1 144 | conn2, err := Connect(s.ctx, s.s.Addr(), opts2) 145 | s.r().Nil(err) 146 | 147 | sync2 := redis.Sync{conn2} 148 | res = sync2.Do("GET", "db") 149 | s.r().Nil(res) 150 | res = sync2.Do("SET", "db", 1) 151 | s.r().NoError(redis.AsError(res)) 152 | res = sync2.Do("GET", "db") 153 | s.r().Equal(res, []byte("1")) 154 | 155 | res = sync1.Do("GET", "db") 156 | s.r().Equal(res, []byte("0")) 157 | } 158 | 159 | func (s *Suite) TestFailedWithWrongDB() { 160 | opts := defopts 161 | opts.DB = 1024 162 | conn, err := Connect(s.ctx, s.s.Addr(), opts) 163 | s.r().Nil(conn) 164 | s.r().Error(err) 165 | } 166 | 167 | func (s *Suite) TestFailedWithNonEmptyPassword() { 168 | opts := defopts 169 | opts.Password = "asdf" 170 | conn, err := Connect(s.ctx, s.s.Addr(), opts) 171 | s.r().Nil(conn) 172 | s.r().Error(err) 173 | s.r().True(redis.AsErrorx(err).IsOfType(ErrAuth)) 174 | } 175 | 176 | func (s *Suite) Test_justToCover() { 177 | // this test just to increase code coverage 178 | opts := defopts 179 | opts.Handle = &struct{}{} 180 | opts.IOTimeout = -1 181 | opts.TCPKeepAlive = -1 182 | 183 | conn, err := Connect(nil, s.s.Addr(), opts) 184 | s.r().Nil(conn) 185 | s.r().Error(err) 186 | conn, err = Connect(s.ctx, "", opts) 187 | s.r().Nil(conn) 188 | s.r().Error(err) 189 | 190 | conn, err = Connect(s.ctx, "tcp://"+s.s.Addr(), opts) 191 | s.r().Nil(err) 192 | defer conn.Close() 193 | s.r().Equal("tcp://"+s.s.Addr(), conn.Addr()) 194 | s.r().NotNil(conn.Ctx()) 195 | s.r().NotEqual(s.ctx, conn.Ctx()) // because it is derived from 196 | s.r().True(conn.MayBeConnected()) 197 | s.r().True(conn.ConnectedNow()) 198 | s.r().Equal(s.s.Addr(), conn.RemoteAddr()) 199 | s.r().True(strings.HasPrefix(conn.LocalAddr(), "127.0.0.1:")) 200 | s.r().Equal(opts.Handle, conn.Handle()) 201 | 202 | var c cancelledFuture 203 | conn.Send(redis.Req("GET", "a"), &c, 0) 204 | s.r().Equal(1, c.cnt) 205 | s.r().Error(redis.AsError(c.res)) 206 | conn.SendTransaction([]redis.Request{}, &c, 0) 207 | s.r().Equal(2, c.cnt) 208 | s.r().Error(redis.AsError(c.res)) 209 | 210 | conn.Send(redis.Req("GET", make(chan int)), nil, 0) 211 | conn.SendMany([]redis.Request{redis.Req("GET", 1)}, nil, 0) 212 | } 213 | 214 | type cancelledFuture struct { 215 | cnt int 216 | res interface{} 217 | } 218 | 219 | func (c *cancelledFuture) Cancelled() error { 220 | return errors.New("cancelled") 221 | } 222 | 223 | func (c *cancelledFuture) Resolve(res interface{}, n uint64) { 224 | c.res = res 225 | c.cnt++ 226 | } 227 | 228 | func (s *Suite) TestSendMany_FailedWholeBatchBecauseOfOne() { 229 | conn, err := Connect(s.ctx, s.s.Addr(), defopts) 230 | s.r().Nil(err) 231 | defer conn.Close() 232 | 233 | results := redis.Sync{conn}.SendMany([]redis.Request{ 234 | redis.Req("GET", "a"), 235 | redis.Req("GET", "b"), 236 | redis.Req("DO_BAD_THING", make(chan int)), 237 | redis.Req("SYNC"), 238 | }) 239 | s.r().Len(results, 4) 240 | for _, res := range results { 241 | s.r().Error(redis.AsError(res)) 242 | } 243 | } 244 | 245 | func (s *Suite) TestStopped_DoesntConnectWithNegativeReconnectPause() { 246 | s.s.Stop() 247 | opts := defopts 248 | opts.ReconnectPause = -1 249 | _, err := Connect(s.ctx, s.s.Addr(), opts) 250 | s.r().NotNil(err) 251 | rerr := s.AsError(err) 252 | s.True(rerr.IsOfType(ErrDial)) 253 | } 254 | 255 | func (s *Suite) TestStopped_Reconnects() { 256 | s.s.Stop() 257 | 258 | conn, err := Connect(s.ctx, s.s.Addr(), defopts) 259 | s.r().Nil(err) 260 | defer conn.Close() 261 | 262 | s.badPing(conn, 0) 263 | 264 | s.s.Start() 265 | s.waitReconnect(conn) 266 | 267 | s.s.Stop() 268 | time.Sleep(1 * time.Millisecond) 269 | s.badPing(conn, 0) 270 | 271 | s.s.Start() 272 | s.waitReconnect(conn) 273 | } 274 | 275 | func (s *Suite) TestStopped_Reconnects2() { 276 | conn, err := Connect(s.ctx, s.s.Addr(), defopts) 277 | s.r().Nil(err) 278 | defer conn.Close() 279 | 280 | s.goodPing(conn, 0) 281 | 282 | s.s.Stop() 283 | time.Sleep(1 * time.Millisecond) 284 | s.badPing(conn, 0) 285 | 286 | s.s.Start() 287 | s.waitReconnect(conn) 288 | 289 | s.s.Stop() 290 | time.Sleep(1 * time.Millisecond) 291 | s.badPing(conn, 0) 292 | 293 | s.s.Start() 294 | s.waitReconnect(conn) 295 | } 296 | 297 | func (s *Suite) TestTimeout() { 298 | conn, err := Connect(s.ctx, s.s.Addr(), defopts) 299 | s.r().Nil(err) 300 | defer conn.Close() 301 | 302 | s.goodPing(conn, 0) 303 | 304 | s.s.Pause() 305 | events := 0 306 | start := time.Now() 307 | for events != 7 { 308 | res := s.ping(conn, 0) 309 | rerr := s.AsError(res) 310 | switch { 311 | case rerr.IsOfType(redis.ErrIO): 312 | events |= 1 313 | case rerr.IsOfType(ErrConnSetup): 314 | events |= 2 315 | case rerr.IsOfType(ErrNotConnected): 316 | events |= 4 317 | } 318 | s.r().WithinDuration(start, time.Now(), defopts.IOTimeout*10) 319 | } 320 | 321 | s.s.Resume() 322 | s.waitReconnect(conn) 323 | } 324 | 325 | func (s *Suite) TestTransaction() { 326 | conn, err := Connect(s.ctx, s.s.Addr(), defopts) 327 | s.r().Nil(err) 328 | defer conn.Close() 329 | 330 | sconn := redis.SyncCtx{conn} 331 | 332 | // transaction just works 333 | res, err := sconn.SendTransaction(s.ctx, []redis.Request{ 334 | redis.Req("PING"), 335 | redis.Req("PING", "asdf"), 336 | }) 337 | s.Nil(err) 338 | if s.IsType([]interface{}{}, res) && s.Len(res, 2) { 339 | s.r().Equal("PONG", res[0]) 340 | s.r().Equal([]byte("asdf"), res[1]) 341 | } 342 | 343 | s.s.DoSure("SET", "tran:x", 1) 344 | 345 | // transaction daesn't execute in case of wrong command 346 | _, err = sconn.SendTransaction(s.ctx, []redis.Request{ 347 | redis.Req("INCR", "tran:x"), 348 | redis.Req("PANG"), 349 | }) 350 | s.NotNil(err) 351 | rerr := s.AsError(err) 352 | s.True(rerr.IsOfType(redis.ErrResult)) 353 | s.True(strings.HasPrefix(rerr.Message(), "EXECABORT")) 354 | 355 | s.Equal([]byte("1"), s.s.DoSure("GET", "tran:x")) 356 | 357 | // transaction is executed partially (that is redis's behavior): 358 | // - first command executed well 359 | // - second command returns with error. 360 | res, err = sconn.SendTransaction(s.ctx, []redis.Request{ 361 | redis.Req("INCR", "tran:x"), 362 | redis.Req("HSET", "tran:x", "y", "1"), 363 | }) 364 | s.Nil(err) 365 | if s.IsType([]interface{}{}, res) && s.Len(res, 2) { 366 | s.r().Equal(int64(2), res[0]) 367 | rerr := s.AsError(res[1]) 368 | s.True(rerr.IsOfType(redis.ErrResult)) 369 | s.True(strings.HasPrefix(rerr.Message(), "WRONGTYPE")) 370 | } 371 | 372 | s.Equal([]byte("2"), s.s.DoSure("GET", "tran:x")) 373 | } 374 | 375 | func (s *Suite) TestScan() { 376 | conn, err := Connect(s.ctx, s.s.Addr(), defopts) 377 | s.r().Nil(err) 378 | defer conn.Close() 379 | 380 | sconn := redis.SyncCtx{conn} 381 | for i := 0; i < 1000; i++ { 382 | sconn.Do(s.ctx, "SET", "scan:"+strconv.Itoa(i), i) 383 | } 384 | 385 | allkeys := make(map[string]struct{}, 1000) 386 | for scanner := sconn.Scanner(s.ctx, redis.ScanOpts{Match: "scan:*"}); ; { 387 | keys, err := scanner.Next() 388 | if err != nil { 389 | s.Equal(redis.ScanEOF, err) 390 | break 391 | } 392 | for _, key := range keys { 393 | _, ok := allkeys[key] 394 | s.False(ok) 395 | allkeys[key] = struct{}{} 396 | } 397 | } 398 | s.Len(allkeys, 1000) 399 | } 400 | 401 | // stress test for "good case" when redis works without issues. 402 | func (s *Suite) TestAllReturns_Good() { 403 | conn, err := Connect(context.Background(), s.s.Addr(), defopts) 404 | s.r().Nil(err) 405 | defer conn.Close() 406 | 407 | const N = 200 408 | const K = 200 409 | ch := make(chan struct{}, N) 410 | 411 | sconn := redis.SyncCtx{conn} 412 | for i := 0; i < N; i++ { 413 | go func(i int) { 414 | for j := 0; j < K; j++ { 415 | sij := strconv.Itoa(i*N + j) 416 | res := sconn.Do(s.ctx, "PING", sij) 417 | if !s.IsType([]byte{}, res) || !s.Equal(sij, string(res.([]byte))) { 418 | return 419 | } 420 | ress := sconn.SendMany(s.ctx, []redis.Request{ 421 | redis.Req("PING", "a"+sij), 422 | redis.Req("PING", "b"+sij), 423 | }) 424 | if !s.IsType([]byte{}, ress[0]) || !s.Equal("a"+sij, string(ress[0].([]byte))) { 425 | return 426 | } 427 | if !s.IsType([]byte{}, ress[1]) || !s.Equal("b"+sij, string(ress[1].([]byte))) { 428 | return 429 | } 430 | } 431 | ch <- struct{}{} 432 | }(i) 433 | } 434 | 435 | cnt := 0 436 | Loop: 437 | for cnt < N { 438 | select { 439 | case <-s.ctx.Done(): 440 | break Loop 441 | case <-ch: 442 | cnt++ 443 | } 444 | } 445 | s.Equal(N, cnt, "Not all goroutines finished") 446 | } 447 | 448 | // stress test for "bad case" when redis occasionally stops and stalls. 449 | func (s *Suite) TestAllReturns_Bad() { 450 | conn, err := Connect(context.Background(), s.s.Addr(), defopts) 451 | s.r().Nil(err) 452 | defer conn.Close() 453 | 454 | const N = 200 455 | fin := make(chan struct{}) 456 | goods := make([]chan bool, N) 457 | checks := make(chan bool, N) 458 | finch := make(chan struct{}, N) 459 | 460 | sconn := redis.SyncCtx{conn} 461 | ctx := s.ctx 462 | for i := 0; i < N; i++ { 463 | goods[i] = make(chan bool, 1) 464 | go func(i int) { 465 | check, good := true, true 466 | Loop: 467 | for j := 0; ; j++ { 468 | select { 469 | case good = <-goods[i]: 470 | check = true 471 | case <-fin: 472 | break Loop 473 | case <-ctx.Done(): 474 | break Loop 475 | default: 476 | } 477 | sij := strconv.Itoa(i*N + j) 478 | res := sconn.Do(ctx, "PING", sij) 479 | ress := sconn.SendMany(ctx, []redis.Request{ 480 | redis.Req("PING", "a"+sij), 481 | redis.Req("PING", "b"+sij), 482 | }) 483 | if check && good { 484 | ok := s.IsType([]byte{}, res) && s.Equal(sij, string(res.([]byte))) 485 | ok = ok && s.IsType([]byte{}, ress[0]) && s.Equal("a"+sij, string(ress[0].([]byte))) 486 | ok = ok && s.IsType([]byte{}, ress[1]) && s.Equal("b"+sij, string(ress[1].([]byte))) 487 | checks <- ok 488 | } else if check && !good { 489 | ok := s.IsType((*errorx.Error)(nil), res) 490 | ok = ok && s.IsType((*errorx.Error)(nil), ress[0]) 491 | ok = ok && s.IsType((*errorx.Error)(nil), ress[1]) 492 | checks <- ok 493 | } 494 | check = false 495 | runtime.Gosched() 496 | } 497 | finch <- struct{}{} 498 | }(i) 499 | } 500 | 501 | isAllGood := true 502 | sendgoods := func(need bool) bool { 503 | for i := 0; i < N; i++ { 504 | select { 505 | case <-s.ctx.Done(): 506 | isAllGood = false 507 | return false 508 | case goods[i] <- need: 509 | } 510 | } 511 | return true 512 | } 513 | allgood := func() bool { 514 | ok := true 515 | for i := 0; i < N; i++ { 516 | select { 517 | case <-s.ctx.Done(): 518 | isAllGood = false 519 | return false 520 | case cur := <-checks: 521 | ok = ok && cur 522 | } 523 | } 524 | isAllGood = ok 525 | return ok 526 | } 527 | 528 | time.Sleep(defopts.IOTimeout * 2) 529 | for k := 0; k < 10; k++ { 530 | if !allgood() { 531 | break 532 | } 533 | 534 | // kill redis: OS will report about disconnect 535 | s.s.Stop() 536 | time.Sleep(defopts.IOTimeout * 3) 537 | if !sendgoods(false) || !allgood() { 538 | break 539 | } 540 | 541 | s.s.Start() 542 | time.Sleep(defopts.IOTimeout * 2) 543 | if !sendgoods(true) || !allgood() { 544 | break 545 | } 546 | 547 | // stop redis: connection is stalled as when network looses packets. 548 | s.s.Pause() 549 | time.Sleep(defopts.IOTimeout * 2) 550 | if !sendgoods(false) || !allgood() { 551 | break 552 | } 553 | 554 | s.s.Resume() 555 | time.Sleep(defopts.IOTimeout * 2) 556 | if !sendgoods(true) { 557 | break 558 | } 559 | } 560 | 561 | if isAllGood { 562 | s.True(allgood()) 563 | } 564 | 565 | close(fin) 566 | 567 | cnt := 0 568 | Loop: 569 | for cnt < N { 570 | select { 571 | case <-s.ctx.Done(): 572 | break Loop 573 | case <-finch: 574 | cnt++ 575 | } 576 | } 577 | s.Equal(N, cnt, "Not all goroutines finished") 578 | } 579 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # RedisPipe 2 | 3 | RedisPipe – is a client for redis that uses "implicit pipelining" for highest performance. 4 | 5 | [![Github Actions Build Status](https://github.com/joomcode/redispipe/workflows/CI/badge.svg)](https://github.com/joomcode/redispipe/actions) 6 | [![GoDoc](https://godoc.org/github.com/joomcode/redispipe?status.svg)](https://godoc.org/github.com/joomcode/redispipe) 7 | [![Report Card](https://goreportcard.com/badge/github.com/joomcode/redispipe)](https://goreportcard.com/report/github.com/joomcode/redispipe) 8 | 9 | - [Highlights](#highlights) 10 | - [Introduction](#introduction) 11 | - [Performance](#performance) 12 | - [Limitations](#limitations) 13 | - [Installation](#installation) 14 | - [Usage](#usage) 15 | - [Contributing](#contributing) 16 | - [License](#license) 17 | 18 | ## Highlights 19 | - scalable: the more throughput you try to get, the more efficient it is. 20 | - cares about redis: redis needs less CPU to perform same throughput. 21 | - thread-safe: no need to lock around connection, no need to "return to pool", etc. 22 | - pipelining is implicit. 23 | - transactions are supported (but without `WATCH`). 24 | - hook for custom logging. 25 | - hook for request timing reporting. 26 | 27 | ## Introduction 28 | 29 | https://redis.io/topics/pipelining 30 | 31 | Pipelining improves the maximum throughput that redis can serve, and reduces CPU usage both on 32 | redis server and on the client side. Mostly it comes from saving system CPU consumption. 33 | 34 | But it is not always possible to use pipelining explicitly: usually there are dozens of 35 | concurrent goroutines, each sends just one request at a time. To handle the usual workload, 36 | pipelining has to be implicit. 37 | 38 | "Implicit pipelining" is used in many drivers for other languages: 39 | - https://github.com/NodeRedis/node_redis , https://github.com/h0x91b/redis-fast-driver , 40 | and probably, other nodejs clients, 41 | - https://github.com/andrew-bn/RedisBoost - C# connector, 42 | - some C/C++ clients, 43 | - all Dart clients , 44 | - some Erlang and Elixir clients, 45 | - https://github.com/informatikr/hedis - Haskel client. 46 | - http://aredis.sourceforge.net/ - Java client explicitly made for transparent pipelining, 47 | - https://github.com/lettuce-io/lettuce-core - Java client capable for transparent pipelining, 48 | - https://github.com/aio-libs/aioredis - Python's async connector, and some of other async 49 | python clients 50 | - Ruby's EventMachine related connectors, 51 | - etc 52 | 53 | At the moment this connector were created there was no such connector for Golang. 54 | All known Golang redis connectors use a connection-per-request model with a connection pool, 55 | and provide only explicit pipelining. 56 | 57 | This connector was created as implicitly pipelined from the ground up to achieve maximum performance 58 | in a highly concurrent environment. It writes all requests to single connection to redis, and 59 | continuously reads answers from another goroutine. 60 | 61 | Note that it trades a bit of latency for throughput, and therefore could be not optimal for 62 | low-concurrent low-request-per-second usage. Write loop latency is configurable as `WritePause` 63 | parameter in connection options, and could be disabled at all, or increased to higher values 64 | (150µs is the value used in production, 50µs is default value, -1 disables write pause). Implicit 65 | runtime latency for switching goroutines still remains, however, and could not be removed. 66 | 67 | ## Performance 68 | 69 | ### Single redis 70 | 71 | ``` 72 | goos: linux 73 | goarch: amd64 74 | pkg: github.com/joomcode/redispipe/rediscluster 75 | cpu: Intel(R) Core(TM) i7-10750H CPU @ 2.60GHz 76 | BenchmarkSerialGetSet/radix_pause0-12 17691 63132 ns/op 68 B/op 4 allocs/op 77 | BenchmarkSerialGetSet/redigo-12 19519 60064 ns/op 239 B/op 13 allocs/op 78 | BenchmarkSerialGetSet/redispipe-12 504 2661790 ns/op 290 B/op 12 allocs/op 79 | BenchmarkSerialGetSet/redispipe_pause0-12 13669 84925 ns/op 208 B/op 12 allocs/op 80 | BenchmarkParallelGetSet/radix-12 621036 1817 ns/op 78 B/op 4 allocs/op 81 | BenchmarkParallelGetSet/redigo-12 7466 153584 ns/op 4008 B/op 20 allocs/op 82 | BenchmarkParallelGetSet/redispipe-12 665428 1599 ns/op 231 B/op 12 allocs/op 83 | ``` 84 | 85 | You can see a couple of things: 86 | - first, redispipe has highest performance in Parallel benchmarks, 87 | - second, redispipe has lower performance for single-threaded cases. 88 | 89 | That is true: redispipe trades latency for throughput. Every single request has additional 90 | latency for hidden batching in a connector. But thanks to batching, more requests can be sent 91 | to redis and answered by redis in an interval of time. 92 | 93 | `SerialGetSet/redispipe_pause0` shows single-threaded results with disabled additional latency 94 | for "batching" (`WritePause: -1`). This way redispipe is quite close to other connectors in 95 | performance, though there is still small overhead of internal design. But I would not recommend 96 | disable batching (unless your use case is single threaded), because it increases CPU usage under 97 | highly concurrent load both on client and on redis-server. 98 | 99 | To be honestly, github.com/mediocregopher/radix/v3 is also able to perform implicit pipelining 100 | and does it by default. Therefore it is almost as fast as redispipe in ParallelGetSet. 101 | SerialGetSet is tested with disabled pipelining, because otherwise it will be as slow as 102 | redispipe without pause0. 103 | 104 | ### Cluster 105 | 106 | ``` 107 | go test -count 1 -tags=debugredis -run FooBar -bench . -benchmem -benchtime 5s ./rediscluster 108 | goos: linux 109 | goarch: amd64 110 | pkg: github.com/joomcode/redispipe/rediscluster 111 | BenchmarkSerialGetSet/radixv2-8 200000 53585 ns/op 1007 B/op 31 allocs/op 112 | BenchmarkSerialGetSet/redigo-8 200000 40705 ns/op 246 B/op 12 allocs/op 113 | BenchmarkSerialGetSet/redispipe-8 30000 279838 ns/op 220 B/op 12 allocs/op 114 | BenchmarkSerialGetSet/redispipe_pause0-8 200000 56356 ns/op 216 B/op 12 allocs/op 115 | BenchmarkParallelGetSet/radixv2-8 1000000 9245 ns/op 1268 B/op 32 allocs/op 116 | BenchmarkParallelGetSet/redigo-8 1000000 6886 ns/op 399 B/op 13 allocs/op 117 | BenchmarkParallelGetSet/redispipe-8 5000000 1636 ns/op 219 B/op 12 allocs/op 118 | ``` 119 | 120 | With cluster configuration, internal cluster meta-info management adds additional overhead 121 | inside of the Go process. And redispipe/rediscluster attempts to provide almost lockless cluster 122 | info handling on the way of request execution. 123 | 124 | While `redigo` is almost as fast in Parallel tests, it also happens to be limited by Redis's CPU 125 | usage (three redis processes eats whole 3 cpu cores). It uses a huge number of connections, 126 | and it is not trivial to recognize non-default setting that should be set to achieve this result 127 | (both KeepAlive and AliveTime should be set as high as 128). 128 | ( [github.com/chasex/redis-go-cluster](https://github.com/chasex/redis-go-cluster) is used). 129 | 130 | Each Redis uses less than 60% CPU core when `redispipe` is used, despite serving more requests. 131 | 132 | ### Practice 133 | 134 | In practice, performance gain is lesser, because your application does other useful work aside 135 | from sending requests to Redis. But gain is still noticeable. At our setup, we have around 10-15% 136 | less CPU usage on Redis (ie 50%CPU->35%CPU), and 5-10% improvement on the client side. 137 | `WritePause` is usually set to higher value (150µs) than default. 138 | 139 | ## Limitations 140 | 141 | - by default, it is not allowed to send blocking calls, because it will block the whole pipeline: 142 | `BLPOP`, `BRPOP`, `BRPOPLPUSH`, `BZPOPMIN`, `BZPOPMAX`, `XREAD`, `XREADGROUP`, `SAVE`. 143 | However, you could set `ScriptMode: true` option to enable these commands. 144 | `ScriptMode: true` also turns default `WritePause` to -1 (meaning it practically disables forced 145 | batching). 146 | - `WATCH` is also forbidden by default: it is useless and even harmful when concurrent goroutines 147 | use the same connection. 148 | It is also allowed with `ScriptMode: true`, but you should be sure you use connection only 149 | from a single goroutine. 150 | - `SUBSCRIBE` and `PSUBSCRIBE` commands are forbidden. They switch connection work mode to a 151 | completely different mode of communication, therefore it could not be combined with regular 152 | commands. This connector doesn't implement subscribing mode. 153 | 154 | ## Installation 155 | 156 | - Single connection: `go get github.com/joomcode/redispipe/redisconn` 157 | - Cluster connection: `go get github.com/joomcode/redispipe/rediscluster` 158 | 159 | ## Usage 160 | 161 | Both `redisconn.Connect` and `rediscluster.NewCluster` creates implementations of `redis.Sender`. 162 | `redis.Sender` provides asynchronous api for sending request/requests/transactions. That api 163 | accepts `redis.Future` interface implementations as an argument and fullfills it asynchronously. 164 | Usually you don't need to provide your own `redis.Future` implementation, but rather use 165 | synchronous wrappers. 166 | 167 | To use convenient synchronous api, one should wrap "sender" with one of wrappers: 168 | - `redis.Sync{sender}` - provides simple synchronouse api 169 | - `redis.SyncCtx{sender}` - provides same api, but all methods accepts `context.Context`, and 170 | methods returns immediately if that context is closed. 171 | - `redis.ChanFutured{sender}` - provides api with future through channel closing. 172 | 173 | Types accepted as command arguments: `nil`, `[]byte`, `string`, `int` (and all other integer types), 174 | `float64`, `float32`, `bool`. All arguments are converted to redis bulk strings as usual (ie 175 | string and bytes - as is; numbers - in decimal notation). `bool` converted as "0/1", 176 | `nil` converted to empty string. 177 | 178 | In difference to other redis packages, no custom types are used for request results. Results 179 | are de-serialized into plain go types and are returned as `interface{}`: 180 | 181 | redis | go 182 | -------------|------- 183 | plain string | `string` 184 | bulk string | `[]byte` 185 | integer | `int64` 186 | array | `[]interface{}` 187 | error | `error` (`*errorx.Error`) 188 | 189 | IO, connection, and other errors are not returned separately, but as result (and has same 190 | `*errorx.Error` underlying type). 191 | 192 | ```go 193 | package redispipe_test 194 | 195 | import ( 196 | "context" 197 | "fmt" 198 | "log" 199 | 200 | "github.com/joomcode/redispipe/redis" 201 | "github.com/joomcode/redispipe/rediscluster" 202 | "github.com/joomcode/redispipe/redisconn" 203 | ) 204 | 205 | const databaseno = 0 206 | const password = "" 207 | 208 | var myhandle interface{} = nil 209 | 210 | func Example_usage() { 211 | ctx := context.Background() 212 | cluster := false 213 | 214 | SingleRedis := func(ctx context.Context) (redis.Sender, error) { 215 | opts := redisconn.Opts{ 216 | DB: databaseno, 217 | Password: password, 218 | Logger: redisconn.NoopLogger{}, // shut up logging. Could be your custom implementation. 219 | Handle: myhandle, // custom data, useful for custom logging 220 | // Other parameters (usually, no need to change) 221 | // IOTimeout, DialTimeout, ReconnectTimeout, TCPKeepAlive, Concurrency, WritePause, Async 222 | } 223 | conn, err := redisconn.Connect(ctx, "127.0.0.1:6379", opts) 224 | return conn, err 225 | } 226 | 227 | ClusterRedis := func(ctx context.Context) (redis.Sender, error) { 228 | opts := rediscluster.Opts{ 229 | HostOpts: redisconn.Opts{ 230 | // No DB 231 | Password: password, 232 | // Usually, no need for special logger 233 | }, 234 | Name: "mycluster", // name of a cluster 235 | Logger: rediscluster.NoopLogger{}, // shut up logging. Could be your custom implementation. 236 | Handle: myhandle, // custom data, useful for custom logging 237 | // Other parameters (usually, no need to change): 238 | // ConnsPerHost, ConnHostPolicy, CheckInterval, MovedRetries, WaitToMigrate, RoundRobinSeed, 239 | } 240 | addresses := []string{"127.0.0.1:20001"} // one or more of cluster addresses 241 | cluster, err := rediscluster.NewCluster(ctx, addresses, opts) 242 | return cluster, err 243 | } 244 | 245 | var sender redis.Sender 246 | var err error 247 | if cluster { 248 | sender, err = ClusterRedis(ctx) 249 | } else { 250 | sender, err = SingleRedis(ctx) 251 | } 252 | if err != nil { 253 | log.Fatal(err) 254 | } 255 | defer sender.Close() 256 | 257 | sync := redis.SyncCtx{sender} // wrapper for synchronous api 258 | 259 | res := sync.Do(ctx, "SET", "key", "ho") 260 | if err := redis.AsError(res); err != nil { 261 | log.Fatal(err) 262 | } 263 | fmt.Printf("result: %q\n", res) 264 | 265 | res = sync.Do(ctx, "GET", "key") 266 | if err := redis.AsError(res); err != nil { 267 | log.Fatal(err) 268 | } 269 | fmt.Printf("result: %q\n", res) 270 | 271 | res = sync.Send(ctx, redis.Req("HMSET", "hashkey", "field1", "val1", "field2", "val2")) 272 | if err := redis.AsError(res); err != nil { 273 | log.Fatal(err) 274 | } 275 | 276 | res = sync.Send(ctx, redis.Req("HMGET", "hashkey", "field1", "field2", "field3")) 277 | if err := redis.AsError(res); err != nil { 278 | log.Fatal(err) 279 | } 280 | for i, v := range res.([]interface{}) { 281 | fmt.Printf("%d: %T %q\n", i, v, v) 282 | } 283 | 284 | res = sync.Send(ctx, redis.Req("HMGET", "key", "field1")) 285 | if err := redis.AsError(res); err != nil { 286 | if rerr := redis.AsErrorx(res); rerr != nil && rerr.IsOfType(redis.ErrResult) { 287 | fmt.Printf("expected error: %v\n", rerr) 288 | } else { 289 | fmt.Printf("unexpected error: %v\n", err) 290 | } 291 | } else { 292 | fmt.Printf("unexpected missed error\n") 293 | } 294 | 295 | results := sync.SendMany(ctx, []redis.Request{ 296 | redis.Req("GET", "key"), 297 | redis.Req("HMGET", "hashkey", "field1", "field3"), 298 | }) 299 | // results is []interface{}, each element is result for corresponding request 300 | for i, res := range results { 301 | fmt.Printf("result[%d]: %T %q\n", i, res, res) 302 | } 303 | 304 | results, err = sync.SendTransaction(ctx, []redis.Request{ 305 | redis.Req("SET", "a{x}", "b"), 306 | redis.Req("SET", "b{x}", 0), 307 | redis.Req("INCRBY", "b{x}", 3), 308 | }) 309 | if err != nil { 310 | log.Fatal(err) 311 | } 312 | for i, res := range results { 313 | fmt.Printf("tresult[%d]: %T %q\n", i, res, res) 314 | } 315 | 316 | // Output: 317 | // result: "OK" 318 | // result: "ho" 319 | // 0: []uint8 "val1" 320 | // 1: []uint8 "val2" 321 | // 2: %!q() 322 | // expected error: WRONGTYPE Operation against a key holding the wrong kind of value (ErrResult {connection: *redisconn.Connection{addr: 127.0.0.1:6379}}) 323 | // result[0]: []uint8 "ho" 324 | // result[1]: []interface {} ["val1" ] 325 | // tresult[0]: string "OK" 326 | // tresult[1]: string "OK" 327 | // tresult[2]: int64 '\x03' 328 | } 329 | ``` 330 | 331 | ## Contributing 332 | 333 | - Ask questions in [Issues](https://github.com/joomcode/redispipe/issues) 334 | - Ask questions on [StackOverflow](https://stackoverflow.com/questions/ask?tags=go+redis). 335 | - Report about bugs using github [Issues](https://github.com/joomcode/redispipe/issues), 336 | - Request new features or report about intentions to implement feature using github 337 | [Issues](https://github.com/joomcode/redispipe/issues), 338 | - Send [pull requests](https://github.com/joomcode/redispipe/pulls) to fix reported bugs or 339 | to implement discussed features. 340 | - Be kind. 341 | - Be lenient to our misunderstanding of your problem and our unwillingness to bloat library. 342 | 343 | ## License 344 | 345 | [MIT License](https://github.com/joomcode/redispipe/blob/master/LICENSE) 346 | --------------------------------------------------------------------------------