├── .gitignore ├── LICENSE ├── README.md ├── TODO.md ├── main.go ├── script └── build └── vendor ├── gopkg.in ├── bsm │ └── ratelimit.v1 │ │ ├── Makefile │ │ ├── README.md │ │ ├── ratelimit.go │ │ └── ratelimit_test.go └── redis.v3 │ ├── LICENSE │ ├── Makefile │ ├── README.md │ ├── bench_test.go │ ├── cluster.go │ ├── cluster_client_test.go │ ├── cluster_pipeline.go │ ├── cluster_test.go │ ├── command.go │ ├── command_test.go │ ├── commands.go │ ├── commands_test.go │ ├── doc.go │ ├── error.go │ ├── example_test.go │ ├── export_test.go │ ├── internal │ ├── consistenthash │ │ ├── consistenthash.go │ │ └── consistenthash_test.go │ ├── hashtag │ │ ├── hashtag.go │ │ └── hashtag_test.go │ └── pool │ │ ├── bench_test.go │ │ ├── conn.go │ │ ├── main_test.go │ │ ├── pool.go │ │ ├── pool_single.go │ │ ├── pool_sticky.go │ │ └── pool_test.go │ ├── main_test.go │ ├── multi.go │ ├── multi_test.go │ ├── options.go │ ├── parser.go │ ├── parser_test.go │ ├── pipeline.go │ ├── pipeline_test.go │ ├── pool_test.go │ ├── pubsub.go │ ├── pubsub_test.go │ ├── race_test.go │ ├── redis.go │ ├── redis_test.go │ ├── ring.go │ ├── ring_test.go │ ├── safe.go │ ├── script.go │ ├── sentinel.go │ ├── sentinel_test.go │ ├── testdata │ └── redis.conf │ └── unsafe.go └── manifest /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 2 | *.o 3 | *.a 4 | *.so 5 | 6 | # Folders 7 | _obj 8 | _test 9 | 10 | # Architecture specific extensions/prefixes 11 | *.[568vq] 12 | [568vq].out 13 | 14 | *.cgo1.go 15 | *.cgo2.c 16 | _cgo_defun.c 17 | _cgo_gotypes.go 18 | _cgo_export.* 19 | 20 | _testmain.go 21 | 22 | *.exe 23 | *.test 24 | *.prof 25 | bin/ 26 | .DS_Store -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Niels Stevens 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Redis Dumper 2 | 3 | This script dumps all the entries from one Redis DB into a file in the redis protocol 4 | format. See [here](http://redis.io/topics/protocol) and [here](http://redis.io/topics/mass-insert). 5 | This allows use to pipe the resulting file directly into redis with pipe command 6 | like this. 7 | 8 | ```bash 9 | # dump redis database 10 | $ redis-dumper -h 127.0.0.1 -p 6379 -n 0 11 | 12 | # restore redis database from file 13 | $ cat redis_db_0_dump.rdb | redis-cli --pipe 14 | ``` 15 | 16 | This script is especially created to get contents from AWS Elasticache but works 17 | with all Redis instances. 18 | -------------------------------------------------------------------------------- /TODO.md: -------------------------------------------------------------------------------- 1 | 1. process multiple db's at once 2 | 2. split files into chuck when they get to large 3 | 3. optionally get ttl (now restore uses 0 which means no ttl) 4 | 4. split looping keys and appending into seperate go routine 5 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "flag" 6 | "fmt" 7 | "log" 8 | "os" 9 | "strconv" 10 | "time" 11 | 12 | "gopkg.in/redis.v3" 13 | ) 14 | 15 | var ( 16 | version string 17 | redisDB int64 18 | redisHost, redisPort, redisPassword string 19 | documentation = `Redis Dumper 20 | 21 | This script dumps all the entries from one Redis DB into a file in the redis protocol format. 22 | See here (http://redis.io/topics/protocol) and here (http://redis.io/topics/mass-insert). 23 | This allows use to pipe the resulting file directly into redis with pipe command like this 24 | 25 | > cat redis_db_0_dump.rdb | redis-cli --pipe 26 | 27 | This script is especially created to get contents from AWS Elasticache but works with all Redis instances 28 | 29 | ` 30 | ) 31 | 32 | const restoreCommand = "*4\r\n$7\r\nRESTORE\r\n" 33 | 34 | func init() { 35 | flag.Int64Var(&redisDB, "n", 0, "Database number") 36 | flag.StringVar(&redisHost, "h", "127.0.0.1", "Server hostname") 37 | flag.StringVar(&redisPort, "p", "6379", "Server port") 38 | flag.StringVar(&redisPassword, "a", "", "Password to use when connecting to the server") 39 | flag.Usage = func() { 40 | fmt.Fprintf(os.Stderr, documentation) 41 | fmt.Fprintf(os.Stderr, "Usage of Redis Dumper:\n") 42 | flag.PrintDefaults() 43 | fmt.Fprintf(os.Stderr, "\nCurrent Version: %s\n", version) 44 | } 45 | } 46 | 47 | func main() { 48 | flag.Parse() 49 | log.Println("Start processing") 50 | 51 | options := &redis.Options{ 52 | DB: redisDB, 53 | Addr: fmt.Sprintf("%v:%v", redisHost, redisPort), 54 | } 55 | 56 | if len(redisPassword) != 0 { 57 | options.Password = redisPassword 58 | } 59 | 60 | client := redis.NewClient(options) 61 | 62 | file, writer := createFile() 63 | defer file.Close() 64 | 65 | var cursor int64 66 | for { 67 | var keys []string 68 | var err error 69 | cursor, keys, err = client.Scan(cursor, "", 1000).Result() 70 | if err != nil { 71 | log.Fatalf("Couldn't iterate through set: %v", err) 72 | } 73 | 74 | for _, key := range keys { 75 | processKey(client, writer, key) 76 | } 77 | writer.Flush() 78 | 79 | if cursor == 0 { 80 | break 81 | } 82 | } 83 | 84 | log.Println("End processing") 85 | } 86 | 87 | func processKey(client *redis.Client, writer *bufio.Writer, key string) { 88 | dump, err := client.Dump(key).Result() 89 | if err != nil { 90 | log.Printf("ERROR: couldn't dump key %s: %v", key, err) 91 | return 92 | } 93 | 94 | ttl, err := client.TTL(key).Result() 95 | if err != nil { 96 | log.Printf("ERROR: couldn't dump key %s: %v", key, err) 97 | return 98 | } 99 | 100 | writer.WriteString(createRestoreCommand(key, dump, &ttl)) 101 | } 102 | 103 | func createRestoreCommand(key, dump string, ttl *time.Duration) string { 104 | seconds := int(ttl.Seconds() * 1000) 105 | if seconds < 0 { 106 | seconds = 0 107 | } 108 | ttlString := strconv.Itoa(seconds) 109 | 110 | result := restoreCommand 111 | 112 | for _, val := range [3]string{key, ttlString, dump} { 113 | result += "$" + strconv.Itoa(len(val)) + "\r\n" + val + "\r\n" 114 | } 115 | 116 | return result 117 | } 118 | 119 | func createFile() (*os.File, *bufio.Writer) { 120 | file, err := os.Create(fmt.Sprintf("redis_db_%d_dump.rdb", redisDB)) 121 | if err != nil { 122 | log.Fatalf("Couldn't create file: %v", err) 123 | } 124 | 125 | return file, bufio.NewWriter(file) 126 | } 127 | -------------------------------------------------------------------------------- /script/build: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | readonly gitTag=$(git tag --sort=refname | tail -n 1) 5 | 6 | name="redis-dumper" 7 | 8 | if [ "$1" == "linux" ]; then 9 | export GOOS=linux 10 | name="${name}-linux" 11 | fi 12 | 13 | CGO_ENABLED=0 go build -ldflags "-X main.version=$gitTag -s -extldflags -static" -a -o bin/${name} 14 | -------------------------------------------------------------------------------- /vendor/gopkg.in/bsm/ratelimit.v1/Makefile: -------------------------------------------------------------------------------- 1 | default: test 2 | 3 | testdeps: 4 | @go get github.com/onsi/ginkgo 5 | @go get github.com/onsi/gomega 6 | 7 | test: testdeps 8 | @go test ./... 9 | 10 | testrace: testdeps 11 | @go test ./... -race 12 | 13 | testall: test testrace 14 | 15 | bench: 16 | @go test ./... -run=NONE -bench=. 17 | -------------------------------------------------------------------------------- /vendor/gopkg.in/bsm/ratelimit.v1/README.md: -------------------------------------------------------------------------------- 1 | # RateLimit [![Build Status](https://travis-ci.org/bsm/ratelimit.png?branch=master)](https://travis-ci.org/bsm/ratelimit) 2 | 3 | Simple, thread-safe Go rate-limiter. 4 | Inspired by Antti Huima's algorithm on http://stackoverflow.com/a/668327 5 | 6 | ### Example 7 | 8 | ```go 9 | package main 10 | 11 | import ( 12 | "github.com/bsm/ratelimit" 13 | "log" 14 | ) 15 | 16 | func main() { 17 | // Create a new rate-limiter, allowing up-to 10 calls 18 | // per second 19 | rl := ratelimit.New(10, time.Second) 20 | 21 | for i:=0; i<20; i++ { 22 | if rl.Limit() { 23 | fmt.Println("DOH! Over limit!") 24 | } else { 25 | fmt.Println("OK") 26 | } 27 | } 28 | } 29 | ``` 30 | 31 | ### Licence 32 | 33 | ``` 34 | Copyright (c) 2015 Black Square Media 35 | 36 | Permission is hereby granted, free of charge, to any person obtaining 37 | a copy of this software and associated documentation files (the 38 | "Software"), to deal in the Software without restriction, including 39 | without limitation the rights to use, copy, modify, merge, publish, 40 | distribute, sublicense, and/or sell copies of the Software, and to 41 | permit persons to whom the Software is furnished to do so, subject to 42 | the following conditions: 43 | 44 | The above copyright notice and this permission notice shall be 45 | included in all copies or substantial portions of the Software. 46 | 47 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 48 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 49 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 50 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 51 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 52 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 53 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 54 | ``` 55 | -------------------------------------------------------------------------------- /vendor/gopkg.in/bsm/ratelimit.v1/ratelimit.go: -------------------------------------------------------------------------------- 1 | /* 2 | Simple, thread-safe Go rate-limiter. 3 | Inspired by Antti Huima's algorithm on http://stackoverflow.com/a/668327 4 | 5 | Example: 6 | 7 | // Create a new rate-limiter, allowing up-to 10 calls 8 | // per second 9 | rl := ratelimit.New(10, time.Second) 10 | 11 | for i:=0; i<20; i++ { 12 | if rl.Limit() { 13 | fmt.Println("DOH! Over limit!") 14 | } else { 15 | fmt.Println("OK") 16 | } 17 | } 18 | */ 19 | package ratelimit 20 | 21 | import ( 22 | "sync/atomic" 23 | "time" 24 | ) 25 | 26 | // RateLimiter instances are thread-safe. 27 | type RateLimiter struct { 28 | rate, allowance, max, unit, lastCheck uint64 29 | } 30 | 31 | // New creates a new rate limiter instance 32 | func New(rate int, per time.Duration) *RateLimiter { 33 | nano := uint64(per) 34 | if nano < 1 { 35 | nano = uint64(time.Second) 36 | } 37 | if rate < 1 { 38 | rate = 1 39 | } 40 | 41 | return &RateLimiter{ 42 | rate: uint64(rate), // store the rate 43 | allowance: uint64(rate) * nano, // set our allowance to max in the beginning 44 | max: uint64(rate) * nano, // remember our maximum allowance 45 | unit: nano, // remember our unit size 46 | 47 | lastCheck: unixNano(), 48 | } 49 | } 50 | 51 | // UpdateRate allows to update the allowed rate 52 | func (rl *RateLimiter) UpdateRate(rate int) { 53 | atomic.StoreUint64(&rl.rate, uint64(rate)) 54 | atomic.StoreUint64(&rl.max, uint64(rate)*rl.unit) 55 | } 56 | 57 | // Limit returns true if rate was exceeded 58 | func (rl *RateLimiter) Limit() bool { 59 | // Calculate the number of ns that have passed since our last call 60 | now := unixNano() 61 | passed := now - atomic.SwapUint64(&rl.lastCheck, now) 62 | 63 | // Add them to our allowance 64 | rate := atomic.LoadUint64(&rl.rate) 65 | current := atomic.AddUint64(&rl.allowance, passed*rate) 66 | 67 | // Ensure our allowance is not over maximum 68 | if max := atomic.LoadUint64(&rl.max); current > max { 69 | atomic.AddUint64(&rl.allowance, max-current) 70 | current = max 71 | } 72 | 73 | // If our allowance is less than one unit, rate-limit! 74 | if current < rl.unit { 75 | return true 76 | } 77 | 78 | // Not limited, subtract a unit 79 | atomic.AddUint64(&rl.allowance, -rl.unit) 80 | return false 81 | } 82 | 83 | // Undo reverts the last Limit() call, returning consumed allowance 84 | func (rl *RateLimiter) Undo() { 85 | current := atomic.AddUint64(&rl.allowance, rl.unit) 86 | 87 | // Ensure our allowance is not over maximum 88 | if max := atomic.LoadUint64(&rl.max); current > max { 89 | atomic.AddUint64(&rl.allowance, max-current) 90 | } 91 | } 92 | 93 | // now as unix nanoseconds 94 | func unixNano() uint64 { 95 | return uint64(time.Now().UnixNano()) 96 | } 97 | -------------------------------------------------------------------------------- /vendor/gopkg.in/bsm/ratelimit.v1/ratelimit_test.go: -------------------------------------------------------------------------------- 1 | package ratelimit 2 | 3 | import ( 4 | "sync" 5 | "testing" 6 | "time" 7 | 8 | . "github.com/onsi/ginkgo" 9 | . "github.com/onsi/gomega" 10 | ) 11 | 12 | var _ = Describe("RateLimiter", func() { 13 | 14 | It("should accurately rate-limit at small rates", func() { 15 | var count int 16 | rl := New(10, time.Minute) 17 | for !rl.Limit() { 18 | count++ 19 | } 20 | Expect(count).To(Equal(10)) 21 | }) 22 | 23 | It("should accurately rate-limit at large rates", func() { 24 | var count int 25 | rl := New(100000, time.Hour) 26 | for !rl.Limit() { 27 | count++ 28 | } 29 | Expect(count).To(BeNumerically("~", 100000, 10)) 30 | }) 31 | 32 | It("should accurately rate-limit at large intervals", func() { 33 | var count int 34 | rl := New(100, 360*24*time.Hour) 35 | for !rl.Limit() { 36 | count++ 37 | } 38 | Expect(count).To(Equal(100)) 39 | }) 40 | 41 | It("should correctly increase allowance", func() { 42 | n := 25 43 | rl := New(n, 50*time.Millisecond) 44 | for i := 0; i < n; i++ { 45 | Expect(rl.Limit()).To(BeFalse(), "on cycle %d", i) 46 | } 47 | Expect(rl.Limit()).To(BeTrue()) 48 | Eventually(rl.Limit, "60ms", "10ms").Should(BeFalse()) 49 | }) 50 | 51 | It("should correctly spread allowance", func() { 52 | var count int 53 | rl := New(5, 10*time.Millisecond) 54 | start := time.Now() 55 | for time.Now().Sub(start) < 100*time.Millisecond { 56 | if !rl.Limit() { 57 | count++ 58 | } 59 | } 60 | Expect(count).To(BeNumerically("~", 54, 1)) 61 | }) 62 | 63 | It("should undo", func() { 64 | rl := New(5, time.Minute) 65 | 66 | Expect(rl.Limit()).To(BeFalse()) 67 | Expect(rl.Limit()).To(BeFalse()) 68 | Expect(rl.Limit()).To(BeFalse()) 69 | Expect(rl.Limit()).To(BeFalse()) 70 | Expect(rl.Limit()).To(BeFalse()) 71 | Expect(rl.Limit()).To(BeTrue()) 72 | 73 | rl.Undo() 74 | Expect(rl.Limit()).To(BeFalse()) 75 | Expect(rl.Limit()).To(BeTrue()) 76 | }) 77 | 78 | It("should be thread-safe", func() { 79 | c := 100 80 | n := 100 81 | wg := sync.WaitGroup{} 82 | rl := New(c*n, time.Hour) 83 | for i := 0; i < c; i++ { 84 | wg.Add(1) 85 | 86 | go func(thread int) { 87 | defer GinkgoRecover() 88 | defer wg.Done() 89 | 90 | for j := 0; j < n; j++ { 91 | Expect(rl.Limit()).To(BeFalse(), "thread %d, cycle %d", thread, j) 92 | } 93 | }(i) 94 | } 95 | wg.Wait() 96 | Expect(rl.Limit()).To(BeTrue()) 97 | }) 98 | 99 | It("should allow to upate rate", func() { 100 | var count int 101 | rl := New(5, 50*time.Millisecond) 102 | for !rl.Limit() { 103 | count++ 104 | } 105 | Expect(count).To(Equal(5)) 106 | 107 | rl.UpdateRate(10) 108 | time.Sleep(50 * time.Millisecond) 109 | 110 | for !rl.Limit() { 111 | count++ 112 | } 113 | Expect(count).To(Equal(15)) 114 | }) 115 | 116 | }) 117 | 118 | // -------------------------------------------------------------------- 119 | 120 | func BenchmarkLimit(b *testing.B) { 121 | rl := New(1000, time.Second) 122 | 123 | b.ResetTimer() 124 | for i := 0; i < b.N; i++ { 125 | rl.Limit() 126 | } 127 | } 128 | 129 | // -------------------------------------------------------------------- 130 | 131 | func TestGinkgoSuite(t *testing.T) { 132 | RegisterFailHandler(Fail) 133 | RunSpecs(t, "github.com/bsm/ratelimit") 134 | } 135 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016 The github.com/go-redis/redis Contributors. 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are 6 | met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | * Redistributions in binary form must reproduce the above 11 | copyright notice, this list of conditions and the following disclaimer 12 | in the documentation and/or other materials provided with the 13 | distribution. 14 | 15 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 18 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 19 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 20 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 21 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/Makefile: -------------------------------------------------------------------------------- 1 | all: testdeps 2 | go test ./... 3 | go test ./... -short -race 4 | 5 | testdeps: testdata/redis/src/redis-server 6 | 7 | bench: testdeps 8 | go test ./... -test.run=NONE -test.bench=. -test.benchmem 9 | 10 | .PHONY: all test testdeps bench 11 | 12 | testdata/redis: 13 | mkdir -p $@ 14 | wget -qO- https://github.com/antirez/redis/archive/unstable.tar.gz | tar xvz --strip-components=1 -C $@ 15 | 16 | testdata/redis/src/redis-server: testdata/redis 17 | cd $< && make all 18 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/README.md: -------------------------------------------------------------------------------- 1 | # Redis client for Golang [![Build Status](https://travis-ci.org/go-redis/redis.png?branch=master)](https://travis-ci.org/go-redis/redis) 2 | 3 | Supports: 4 | 5 | - Redis 3 commands except QUIT, MONITOR, SLOWLOG and SYNC. 6 | - [Pub/Sub](http://godoc.org/gopkg.in/redis.v3#PubSub). 7 | - [Transactions](http://godoc.org/gopkg.in/redis.v3#Multi). 8 | - [Pipelining](http://godoc.org/gopkg.in/redis.v3#Client.Pipeline). 9 | - [Scripting](http://godoc.org/gopkg.in/redis.v3#Script). 10 | - [Timeouts](http://godoc.org/gopkg.in/redis.v3#Options). 11 | - [Redis Sentinel](http://godoc.org/gopkg.in/redis.v3#NewFailoverClient). 12 | - [Redis Cluster](http://godoc.org/gopkg.in/redis.v3#NewClusterClient). 13 | - [Ring](http://godoc.org/gopkg.in/redis.v3#NewRing). 14 | - [Cache friendly](https://github.com/go-redis/cache). 15 | - [Rate limiting](https://github.com/go-redis/rate). 16 | - [Distributed Locks](https://github.com/bsm/redis-lock). 17 | 18 | API docs: http://godoc.org/gopkg.in/redis.v3. 19 | Examples: http://godoc.org/gopkg.in/redis.v3#pkg-examples. 20 | 21 | ## Installation 22 | 23 | Install: 24 | 25 | go get gopkg.in/redis.v3 26 | 27 | ## Quickstart 28 | 29 | ```go 30 | func ExampleNewClient() { 31 | client := redis.NewClient(&redis.Options{ 32 | Addr: "localhost:6379", 33 | Password: "", // no password set 34 | DB: 0, // use default DB 35 | }) 36 | 37 | pong, err := client.Ping().Result() 38 | fmt.Println(pong, err) 39 | // Output: PONG 40 | } 41 | 42 | func ExampleClient() { 43 | err := client.Set("key", "value", 0).Err() 44 | if err != nil { 45 | panic(err) 46 | } 47 | 48 | val, err := client.Get("key").Result() 49 | if err != nil { 50 | panic(err) 51 | } 52 | fmt.Println("key", val) 53 | 54 | val2, err := client.Get("key2").Result() 55 | if err == redis.Nil { 56 | fmt.Println("key2 does not exists") 57 | } else if err != nil { 58 | panic(err) 59 | } else { 60 | fmt.Println("key2", val2) 61 | } 62 | // Output: key value 63 | // key2 does not exists 64 | } 65 | ``` 66 | 67 | ## Howto 68 | 69 | Please go through [examples](http://godoc.org/gopkg.in/redis.v3#pkg-examples) to get an idea how to use this package. 70 | 71 | ## Look and feel 72 | 73 | Some corner cases: 74 | 75 | SET key value EX 10 NX 76 | set, err := client.SetNX("key", "value", 10*time.Second).Result() 77 | 78 | SORT list LIMIT 0 2 ASC 79 | vals, err := client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result() 80 | 81 | ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2 82 | vals, err := client.ZRangeByScoreWithScores("zset", redis.ZRangeByScore{ 83 | Min: "-inf", 84 | Max: "+inf", 85 | Offset: 0, 86 | Count: 2, 87 | }).Result() 88 | 89 | ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM 90 | vals, err := client.ZInterStore("out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2").Result() 91 | 92 | EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello" 93 | vals, err := client.Eval("return {KEYS[1],ARGV[1]}", []string{"key"}, []string{"hello"}).Result() 94 | 95 | ## Benchmark 96 | 97 | ``` 98 | BenchmarkSetGoRedis10Conns64Bytes-4 200000 7184 ns/op 210 B/op 6 allocs/op 99 | BenchmarkSetGoRedis100Conns64Bytes-4 200000 7174 ns/op 210 B/op 6 allocs/op 100 | BenchmarkSetGoRedis10Conns1KB-4 200000 7341 ns/op 210 B/op 6 allocs/op 101 | BenchmarkSetGoRedis100Conns1KB-4 200000 7425 ns/op 210 B/op 6 allocs/op 102 | BenchmarkSetGoRedis10Conns10KB-4 200000 9480 ns/op 210 B/op 6 allocs/op 103 | BenchmarkSetGoRedis100Conns10KB-4 200000 9301 ns/op 210 B/op 6 allocs/op 104 | BenchmarkSetGoRedis10Conns1MB-4 2000 590321 ns/op 2337 B/op 6 allocs/op 105 | BenchmarkSetGoRedis100Conns1MB-4 2000 588935 ns/op 2337 B/op 6 allocs/op 106 | BenchmarkSetRedigo10Conns64Bytes-4 200000 7238 ns/op 208 B/op 7 allocs/op 107 | BenchmarkSetRedigo100Conns64Bytes-4 200000 7435 ns/op 208 B/op 7 allocs/op 108 | BenchmarkSetRedigo10Conns1KB-4 200000 7635 ns/op 208 B/op 7 allocs/op 109 | BenchmarkSetRedigo100Conns1KB-4 200000 7597 ns/op 208 B/op 7 allocs/op 110 | BenchmarkSetRedigo10Conns10KB-4 100000 17126 ns/op 208 B/op 7 allocs/op 111 | BenchmarkSetRedigo100Conns10KB-4 100000 17030 ns/op 208 B/op 7 allocs/op 112 | BenchmarkSetRedigo10Conns1MB-4 2000 675397 ns/op 226 B/op 7 allocs/op 113 | BenchmarkSetRedigo100Conns1MB-4 2000 669053 ns/op 226 B/op 7 allocs/op 114 | ``` 115 | 116 | ## Shameless plug 117 | 118 | Check my [PostgreSQL client for Go](https://github.com/go-pg/pg). 119 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/bench_test.go: -------------------------------------------------------------------------------- 1 | package redis_test 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | "time" 7 | 8 | redigo "github.com/garyburd/redigo/redis" 9 | 10 | "gopkg.in/redis.v3" 11 | ) 12 | 13 | func benchmarkRedisClient(poolSize int) *redis.Client { 14 | client := redis.NewClient(&redis.Options{ 15 | Addr: ":6379", 16 | DialTimeout: time.Second, 17 | ReadTimeout: time.Second, 18 | WriteTimeout: time.Second, 19 | PoolSize: poolSize, 20 | }) 21 | if err := client.FlushDb().Err(); err != nil { 22 | panic(err) 23 | } 24 | return client 25 | } 26 | 27 | func BenchmarkRedisPing(b *testing.B) { 28 | client := benchmarkRedisClient(10) 29 | defer client.Close() 30 | 31 | b.ResetTimer() 32 | 33 | b.RunParallel(func(pb *testing.PB) { 34 | for pb.Next() { 35 | if err := client.Ping().Err(); err != nil { 36 | b.Fatal(err) 37 | } 38 | } 39 | }) 40 | } 41 | 42 | func BenchmarkRedisSet(b *testing.B) { 43 | client := benchmarkRedisClient(10) 44 | defer client.Close() 45 | 46 | value := string(bytes.Repeat([]byte{'1'}, 10000)) 47 | 48 | b.ResetTimer() 49 | 50 | b.RunParallel(func(pb *testing.PB) { 51 | for pb.Next() { 52 | if err := client.Set("key", value, 0).Err(); err != nil { 53 | b.Fatal(err) 54 | } 55 | } 56 | }) 57 | } 58 | 59 | func BenchmarkRedisGetNil(b *testing.B) { 60 | client := benchmarkRedisClient(10) 61 | defer client.Close() 62 | 63 | b.ResetTimer() 64 | 65 | b.RunParallel(func(pb *testing.PB) { 66 | for pb.Next() { 67 | if err := client.Get("key").Err(); err != redis.Nil { 68 | b.Fatal(err) 69 | } 70 | } 71 | }) 72 | } 73 | 74 | func benchmarkSetGoRedis(b *testing.B, poolSize, payloadSize int) { 75 | client := benchmarkRedisClient(poolSize) 76 | defer client.Close() 77 | 78 | value := string(bytes.Repeat([]byte{'1'}, payloadSize)) 79 | 80 | b.ResetTimer() 81 | 82 | b.RunParallel(func(pb *testing.PB) { 83 | for pb.Next() { 84 | if err := client.Set("key", value, 0).Err(); err != nil { 85 | b.Fatal(err) 86 | } 87 | } 88 | }) 89 | } 90 | 91 | func BenchmarkSetGoRedis10Conns64Bytes(b *testing.B) { 92 | benchmarkSetGoRedis(b, 10, 64) 93 | } 94 | 95 | func BenchmarkSetGoRedis100Conns64Bytes(b *testing.B) { 96 | benchmarkSetGoRedis(b, 100, 64) 97 | } 98 | 99 | func BenchmarkSetGoRedis10Conns1KB(b *testing.B) { 100 | benchmarkSetGoRedis(b, 10, 1024) 101 | } 102 | 103 | func BenchmarkSetGoRedis100Conns1KB(b *testing.B) { 104 | benchmarkSetGoRedis(b, 100, 1024) 105 | } 106 | 107 | func BenchmarkSetGoRedis10Conns10KB(b *testing.B) { 108 | benchmarkSetGoRedis(b, 10, 10*1024) 109 | } 110 | 111 | func BenchmarkSetGoRedis100Conns10KB(b *testing.B) { 112 | benchmarkSetGoRedis(b, 100, 10*1024) 113 | } 114 | 115 | func BenchmarkSetGoRedis10Conns1MB(b *testing.B) { 116 | benchmarkSetGoRedis(b, 10, 1024*1024) 117 | } 118 | 119 | func BenchmarkSetGoRedis100Conns1MB(b *testing.B) { 120 | benchmarkSetGoRedis(b, 100, 1024*1024) 121 | } 122 | 123 | func benchmarkSetRedigo(b *testing.B, poolSize, payloadSize int) { 124 | pool := &redigo.Pool{ 125 | Dial: func() (redigo.Conn, error) { 126 | return redigo.DialTimeout("tcp", ":6379", time.Second, time.Second, time.Second) 127 | }, 128 | MaxActive: poolSize, 129 | MaxIdle: poolSize, 130 | } 131 | defer pool.Close() 132 | 133 | value := string(bytes.Repeat([]byte{'1'}, payloadSize)) 134 | 135 | b.ResetTimer() 136 | 137 | b.RunParallel(func(pb *testing.PB) { 138 | for pb.Next() { 139 | conn := pool.Get() 140 | if _, err := conn.Do("SET", "key", value); err != nil { 141 | b.Fatal(err) 142 | } 143 | conn.Close() 144 | } 145 | }) 146 | } 147 | 148 | func BenchmarkSetRedigo10Conns64Bytes(b *testing.B) { 149 | benchmarkSetRedigo(b, 10, 64) 150 | } 151 | 152 | func BenchmarkSetRedigo100Conns64Bytes(b *testing.B) { 153 | benchmarkSetRedigo(b, 100, 64) 154 | } 155 | 156 | func BenchmarkSetRedigo10Conns1KB(b *testing.B) { 157 | benchmarkSetRedigo(b, 10, 1024) 158 | } 159 | 160 | func BenchmarkSetRedigo100Conns1KB(b *testing.B) { 161 | benchmarkSetRedigo(b, 100, 1024) 162 | } 163 | 164 | func BenchmarkSetRedigo10Conns10KB(b *testing.B) { 165 | benchmarkSetRedigo(b, 10, 10*1024) 166 | } 167 | 168 | func BenchmarkSetRedigo100Conns10KB(b *testing.B) { 169 | benchmarkSetRedigo(b, 100, 10*1024) 170 | } 171 | 172 | func BenchmarkSetRedigo10Conns1MB(b *testing.B) { 173 | benchmarkSetRedigo(b, 10, 1024*1024) 174 | } 175 | 176 | func BenchmarkSetRedigo100Conns1MB(b *testing.B) { 177 | benchmarkSetRedigo(b, 100, 1024*1024) 178 | } 179 | 180 | func BenchmarkRedisSetGetBytes(b *testing.B) { 181 | client := benchmarkRedisClient(10) 182 | defer client.Close() 183 | 184 | value := bytes.Repeat([]byte{'1'}, 10000) 185 | 186 | b.ResetTimer() 187 | 188 | b.RunParallel(func(pb *testing.PB) { 189 | for pb.Next() { 190 | if err := client.Set("key", value, 0).Err(); err != nil { 191 | b.Fatal(err) 192 | } 193 | 194 | got, err := client.Get("key").Bytes() 195 | if err != nil { 196 | b.Fatal(err) 197 | } 198 | if !bytes.Equal(got, value) { 199 | b.Fatalf("got != value") 200 | } 201 | } 202 | }) 203 | } 204 | 205 | func BenchmarkRedisMGet(b *testing.B) { 206 | client := benchmarkRedisClient(10) 207 | defer client.Close() 208 | 209 | if err := client.MSet("key1", "hello1", "key2", "hello2").Err(); err != nil { 210 | b.Fatal(err) 211 | } 212 | 213 | b.ResetTimer() 214 | 215 | b.RunParallel(func(pb *testing.PB) { 216 | for pb.Next() { 217 | if err := client.MGet("key1", "key2").Err(); err != nil { 218 | b.Fatal(err) 219 | } 220 | } 221 | }) 222 | } 223 | 224 | func BenchmarkSetExpire(b *testing.B) { 225 | client := benchmarkRedisClient(10) 226 | defer client.Close() 227 | 228 | b.ResetTimer() 229 | 230 | b.RunParallel(func(pb *testing.PB) { 231 | for pb.Next() { 232 | if err := client.Set("key", "hello", 0).Err(); err != nil { 233 | b.Fatal(err) 234 | } 235 | if err := client.Expire("key", time.Second).Err(); err != nil { 236 | b.Fatal(err) 237 | } 238 | } 239 | }) 240 | } 241 | 242 | func BenchmarkPipeline(b *testing.B) { 243 | client := benchmarkRedisClient(10) 244 | defer client.Close() 245 | 246 | b.ResetTimer() 247 | 248 | b.RunParallel(func(pb *testing.PB) { 249 | for pb.Next() { 250 | _, err := client.Pipelined(func(pipe *redis.Pipeline) error { 251 | pipe.Set("key", "hello", 0) 252 | pipe.Expire("key", time.Second) 253 | return nil 254 | }) 255 | if err != nil { 256 | b.Fatal(err) 257 | } 258 | } 259 | }) 260 | } 261 | 262 | func BenchmarkZAdd(b *testing.B) { 263 | client := benchmarkRedisClient(10) 264 | defer client.Close() 265 | 266 | b.ResetTimer() 267 | 268 | b.RunParallel(func(pb *testing.PB) { 269 | for pb.Next() { 270 | if err := client.ZAdd("key", redis.Z{float64(1), "hello"}).Err(); err != nil { 271 | b.Fatal(err) 272 | } 273 | } 274 | }) 275 | } 276 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/cluster.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "math/rand" 5 | "sync" 6 | "sync/atomic" 7 | "time" 8 | 9 | "gopkg.in/redis.v3/internal/hashtag" 10 | "gopkg.in/redis.v3/internal/pool" 11 | ) 12 | 13 | // ClusterClient is a Redis Cluster client representing a pool of zero 14 | // or more underlying connections. It's safe for concurrent use by 15 | // multiple goroutines. 16 | type ClusterClient struct { 17 | commandable 18 | 19 | opt *ClusterOptions 20 | 21 | slotsMx sync.RWMutex // protects slots and addrs 22 | addrs []string 23 | slots [][]string 24 | 25 | clientsMx sync.RWMutex // protects clients and closed 26 | clients map[string]*Client 27 | 28 | _closed int32 // atomic 29 | 30 | // Reports where slots reloading is in progress. 31 | reloading uint32 32 | } 33 | 34 | // NewClusterClient returns a Redis Cluster client as described in 35 | // http://redis.io/topics/cluster-spec. 36 | func NewClusterClient(opt *ClusterOptions) *ClusterClient { 37 | client := &ClusterClient{ 38 | opt: opt, 39 | addrs: opt.Addrs, 40 | slots: make([][]string, hashtag.SlotNumber), 41 | clients: make(map[string]*Client), 42 | } 43 | client.commandable.process = client.process 44 | client.reloadSlots() 45 | return client 46 | } 47 | 48 | // getClients returns a snapshot of clients for cluster nodes 49 | // this ClusterClient has been working with recently. 50 | // Note that snapshot can contain closed clients. 51 | func (c *ClusterClient) getClients() map[string]*Client { 52 | c.clientsMx.RLock() 53 | clients := make(map[string]*Client, len(c.clients)) 54 | for addr, client := range c.clients { 55 | clients[addr] = client 56 | } 57 | c.clientsMx.RUnlock() 58 | return clients 59 | } 60 | 61 | // Watch creates new transaction and marks the keys to be watched 62 | // for conditional execution of a transaction. 63 | func (c *ClusterClient) Watch(keys ...string) (*Multi, error) { 64 | addr := c.slotMasterAddr(hashtag.Slot(keys[0])) 65 | client, err := c.getClient(addr) 66 | if err != nil { 67 | return nil, err 68 | } 69 | return client.Watch(keys...) 70 | } 71 | 72 | // PoolStats returns accumulated connection pool stats. 73 | func (c *ClusterClient) PoolStats() *PoolStats { 74 | acc := PoolStats{} 75 | for _, client := range c.getClients() { 76 | s := client.connPool.Stats() 77 | acc.Requests += s.Requests 78 | acc.Hits += s.Hits 79 | acc.Waits += s.Waits 80 | acc.Timeouts += s.Timeouts 81 | acc.TotalConns += s.TotalConns 82 | acc.FreeConns += s.FreeConns 83 | } 84 | return &acc 85 | } 86 | 87 | func (c *ClusterClient) closed() bool { 88 | return atomic.LoadInt32(&c._closed) == 1 89 | } 90 | 91 | // Close closes the cluster client, releasing any open resources. 92 | // 93 | // It is rare to Close a ClusterClient, as the ClusterClient is meant 94 | // to be long-lived and shared between many goroutines. 95 | func (c *ClusterClient) Close() error { 96 | if !atomic.CompareAndSwapInt32(&c._closed, 0, 1) { 97 | return pool.ErrClosed 98 | } 99 | 100 | c.clientsMx.Lock() 101 | c.resetClients() 102 | c.clientsMx.Unlock() 103 | c.setSlots(nil) 104 | return nil 105 | } 106 | 107 | // getClient returns a Client for a given address. 108 | func (c *ClusterClient) getClient(addr string) (*Client, error) { 109 | if c.closed() { 110 | return nil, pool.ErrClosed 111 | } 112 | 113 | if addr == "" { 114 | return c.randomClient() 115 | } 116 | 117 | c.clientsMx.RLock() 118 | client, ok := c.clients[addr] 119 | c.clientsMx.RUnlock() 120 | if ok { 121 | return client, nil 122 | } 123 | 124 | c.clientsMx.Lock() 125 | client, ok = c.clients[addr] 126 | if !ok { 127 | opt := c.opt.clientOptions() 128 | opt.Addr = addr 129 | client = NewClient(opt) 130 | c.clients[addr] = client 131 | } 132 | c.clientsMx.Unlock() 133 | 134 | return client, nil 135 | } 136 | 137 | func (c *ClusterClient) slotAddrs(slot int) []string { 138 | c.slotsMx.RLock() 139 | addrs := c.slots[slot] 140 | c.slotsMx.RUnlock() 141 | return addrs 142 | } 143 | 144 | func (c *ClusterClient) slotMasterAddr(slot int) string { 145 | addrs := c.slotAddrs(slot) 146 | if len(addrs) > 0 { 147 | return addrs[0] 148 | } 149 | return "" 150 | } 151 | 152 | // randomClient returns a Client for the first live node. 153 | func (c *ClusterClient) randomClient() (client *Client, err error) { 154 | for i := 0; i < 10; i++ { 155 | n := rand.Intn(len(c.addrs)) 156 | client, err = c.getClient(c.addrs[n]) 157 | if err != nil { 158 | continue 159 | } 160 | err = client.ClusterInfo().Err() 161 | if err == nil { 162 | return client, nil 163 | } 164 | } 165 | return nil, err 166 | } 167 | 168 | func (c *ClusterClient) process(cmd Cmder) { 169 | var ask bool 170 | 171 | slot := hashtag.Slot(cmd.clusterKey()) 172 | 173 | addr := c.slotMasterAddr(slot) 174 | client, err := c.getClient(addr) 175 | if err != nil { 176 | cmd.setErr(err) 177 | return 178 | } 179 | 180 | for attempt := 0; attempt <= c.opt.getMaxRedirects(); attempt++ { 181 | if attempt > 0 { 182 | cmd.reset() 183 | } 184 | 185 | if ask { 186 | pipe := client.Pipeline() 187 | pipe.Process(NewCmd("ASKING")) 188 | pipe.Process(cmd) 189 | _, _ = pipe.Exec() 190 | pipe.Close() 191 | ask = false 192 | } else { 193 | client.Process(cmd) 194 | } 195 | 196 | // If there is no (real) error, we are done! 197 | err := cmd.Err() 198 | if err == nil || err == Nil || err == TxFailedErr { 199 | return 200 | } 201 | 202 | // On network errors try random node. 203 | if isNetworkError(err) { 204 | client, err = c.randomClient() 205 | if err != nil { 206 | return 207 | } 208 | continue 209 | } 210 | 211 | var moved bool 212 | var addr string 213 | moved, ask, addr = isMovedError(err) 214 | if moved || ask { 215 | if moved && c.slotMasterAddr(slot) != addr { 216 | c.lazyReloadSlots() 217 | } 218 | client, err = c.getClient(addr) 219 | if err != nil { 220 | return 221 | } 222 | continue 223 | } 224 | 225 | break 226 | } 227 | } 228 | 229 | // Closes all clients and returns last error if there are any. 230 | func (c *ClusterClient) resetClients() (retErr error) { 231 | for addr, client := range c.clients { 232 | if err := client.Close(); err != nil && retErr == nil { 233 | retErr = err 234 | } 235 | delete(c.clients, addr) 236 | } 237 | return retErr 238 | } 239 | 240 | func (c *ClusterClient) setSlots(slots []ClusterSlotInfo) { 241 | c.slotsMx.Lock() 242 | 243 | seen := make(map[string]struct{}) 244 | for _, addr := range c.addrs { 245 | seen[addr] = struct{}{} 246 | } 247 | 248 | for i := 0; i < hashtag.SlotNumber; i++ { 249 | c.slots[i] = c.slots[i][:0] 250 | } 251 | for _, info := range slots { 252 | for slot := info.Start; slot <= info.End; slot++ { 253 | c.slots[slot] = info.Addrs 254 | } 255 | 256 | for _, addr := range info.Addrs { 257 | if _, ok := seen[addr]; !ok { 258 | c.addrs = append(c.addrs, addr) 259 | seen[addr] = struct{}{} 260 | } 261 | } 262 | } 263 | 264 | c.slotsMx.Unlock() 265 | } 266 | 267 | func (c *ClusterClient) reloadSlots() { 268 | defer atomic.StoreUint32(&c.reloading, 0) 269 | 270 | client, err := c.randomClient() 271 | if err != nil { 272 | Logger.Printf("randomClient failed: %s", err) 273 | return 274 | } 275 | 276 | slots, err := client.ClusterSlots().Result() 277 | if err != nil { 278 | Logger.Printf("ClusterSlots failed: %s", err) 279 | return 280 | } 281 | c.setSlots(slots) 282 | } 283 | 284 | func (c *ClusterClient) lazyReloadSlots() { 285 | if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) { 286 | return 287 | } 288 | go c.reloadSlots() 289 | } 290 | 291 | // reaper closes idle connections to the cluster. 292 | func (c *ClusterClient) reaper(frequency time.Duration) { 293 | ticker := time.NewTicker(frequency) 294 | defer ticker.Stop() 295 | 296 | for _ = range ticker.C { 297 | if c.closed() { 298 | break 299 | } 300 | 301 | var n int 302 | for _, client := range c.getClients() { 303 | nn, err := client.connPool.(*pool.ConnPool).ReapStaleConns() 304 | if err != nil { 305 | Logger.Printf("ReapStaleConns failed: %s", err) 306 | } else { 307 | n += nn 308 | } 309 | } 310 | 311 | s := c.PoolStats() 312 | Logger.Printf( 313 | "reaper: removed %d stale conns (TotalConns=%d FreeConns=%d Requests=%d Hits=%d Timeouts=%d)", 314 | n, s.TotalConns, s.FreeConns, s.Requests, s.Hits, s.Timeouts, 315 | ) 316 | } 317 | } 318 | 319 | //------------------------------------------------------------------------------ 320 | 321 | // ClusterOptions are used to configure a cluster client and should be 322 | // passed to NewClusterClient. 323 | type ClusterOptions struct { 324 | // A seed list of host:port addresses of cluster nodes. 325 | Addrs []string 326 | 327 | // The maximum number of MOVED/ASK redirects to follow before giving up. 328 | // Default is 16 329 | MaxRedirects int 330 | 331 | // Following options are copied from Options struct. 332 | 333 | Password string 334 | 335 | DialTimeout time.Duration 336 | ReadTimeout time.Duration 337 | WriteTimeout time.Duration 338 | 339 | // PoolSize applies per cluster node and not for the whole cluster. 340 | PoolSize int 341 | PoolTimeout time.Duration 342 | IdleTimeout time.Duration 343 | IdleCheckFrequency time.Duration 344 | } 345 | 346 | func (opt *ClusterOptions) getMaxRedirects() int { 347 | if opt.MaxRedirects == -1 { 348 | return 0 349 | } 350 | if opt.MaxRedirects == 0 { 351 | return 16 352 | } 353 | return opt.MaxRedirects 354 | } 355 | 356 | func (opt *ClusterOptions) clientOptions() *Options { 357 | return &Options{ 358 | Password: opt.Password, 359 | 360 | DialTimeout: opt.DialTimeout, 361 | ReadTimeout: opt.ReadTimeout, 362 | WriteTimeout: opt.WriteTimeout, 363 | 364 | PoolSize: opt.PoolSize, 365 | PoolTimeout: opt.PoolTimeout, 366 | IdleTimeout: opt.IdleTimeout, 367 | // IdleCheckFrequency is not copied to disable reaper 368 | } 369 | } 370 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/cluster_client_test.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | . "github.com/onsi/ginkgo" 5 | . "github.com/onsi/gomega" 6 | ) 7 | 8 | func (c *ClusterClient) SlotAddrs(slot int) []string { 9 | return c.slotAddrs(slot) 10 | } 11 | 12 | // SwapSlot swaps a slot's master/slave address 13 | // for testing MOVED redirects 14 | func (c *ClusterClient) SwapSlot(pos int) []string { 15 | c.slotsMx.Lock() 16 | defer c.slotsMx.Unlock() 17 | c.slots[pos][0], c.slots[pos][1] = c.slots[pos][1], c.slots[pos][0] 18 | return c.slots[pos] 19 | } 20 | 21 | var _ = Describe("ClusterClient", func() { 22 | var subject *ClusterClient 23 | 24 | var populate = func() { 25 | subject.setSlots([]ClusterSlotInfo{ 26 | {0, 4095, []string{"127.0.0.1:7000", "127.0.0.1:7004"}}, 27 | {12288, 16383, []string{"127.0.0.1:7003", "127.0.0.1:7007"}}, 28 | {4096, 8191, []string{"127.0.0.1:7001", "127.0.0.1:7005"}}, 29 | {8192, 12287, []string{"127.0.0.1:7002", "127.0.0.1:7006"}}, 30 | }) 31 | } 32 | 33 | BeforeEach(func() { 34 | subject = NewClusterClient(&ClusterOptions{ 35 | Addrs: []string{"127.0.0.1:6379", "127.0.0.1:7003", "127.0.0.1:7006"}, 36 | }) 37 | }) 38 | 39 | AfterEach(func() { 40 | _ = subject.Close() 41 | }) 42 | 43 | It("should initialize", func() { 44 | Expect(subject.addrs).To(HaveLen(3)) 45 | Expect(subject.slots).To(HaveLen(16384)) 46 | }) 47 | 48 | It("should update slots cache", func() { 49 | populate() 50 | Expect(subject.slots[0]).To(Equal([]string{"127.0.0.1:7000", "127.0.0.1:7004"})) 51 | Expect(subject.slots[4095]).To(Equal([]string{"127.0.0.1:7000", "127.0.0.1:7004"})) 52 | Expect(subject.slots[4096]).To(Equal([]string{"127.0.0.1:7001", "127.0.0.1:7005"})) 53 | Expect(subject.slots[8191]).To(Equal([]string{"127.0.0.1:7001", "127.0.0.1:7005"})) 54 | Expect(subject.slots[8192]).To(Equal([]string{"127.0.0.1:7002", "127.0.0.1:7006"})) 55 | Expect(subject.slots[12287]).To(Equal([]string{"127.0.0.1:7002", "127.0.0.1:7006"})) 56 | Expect(subject.slots[12288]).To(Equal([]string{"127.0.0.1:7003", "127.0.0.1:7007"})) 57 | Expect(subject.slots[16383]).To(Equal([]string{"127.0.0.1:7003", "127.0.0.1:7007"})) 58 | Expect(subject.addrs).To(Equal([]string{ 59 | "127.0.0.1:6379", 60 | "127.0.0.1:7003", 61 | "127.0.0.1:7006", 62 | "127.0.0.1:7000", 63 | "127.0.0.1:7004", 64 | "127.0.0.1:7007", 65 | "127.0.0.1:7001", 66 | "127.0.0.1:7005", 67 | "127.0.0.1:7002", 68 | })) 69 | }) 70 | 71 | It("should close", func() { 72 | populate() 73 | Expect(subject.Close()).NotTo(HaveOccurred()) 74 | Expect(subject.clients).To(BeEmpty()) 75 | Expect(subject.slots[0]).To(BeEmpty()) 76 | Expect(subject.slots[8191]).To(BeEmpty()) 77 | Expect(subject.slots[8192]).To(BeEmpty()) 78 | Expect(subject.slots[16383]).To(BeEmpty()) 79 | Expect(subject.Ping().Err().Error()).To(Equal("redis: client is closed")) 80 | }) 81 | }) 82 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/cluster_pipeline.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "gopkg.in/redis.v3/internal/hashtag" 5 | "gopkg.in/redis.v3/internal/pool" 6 | ) 7 | 8 | // ClusterPipeline is not thread-safe. 9 | type ClusterPipeline struct { 10 | commandable 11 | 12 | cluster *ClusterClient 13 | 14 | cmds []Cmder 15 | closed bool 16 | } 17 | 18 | // Pipeline creates a new pipeline which is able to execute commands 19 | // against multiple shards. It's NOT safe for concurrent use by 20 | // multiple goroutines. 21 | func (c *ClusterClient) Pipeline() *ClusterPipeline { 22 | pipe := &ClusterPipeline{ 23 | cluster: c, 24 | cmds: make([]Cmder, 0, 10), 25 | } 26 | pipe.commandable.process = pipe.process 27 | return pipe 28 | } 29 | 30 | func (pipe *ClusterPipeline) process(cmd Cmder) { 31 | pipe.cmds = append(pipe.cmds, cmd) 32 | } 33 | 34 | // Discard resets the pipeline and discards queued commands. 35 | func (pipe *ClusterPipeline) Discard() error { 36 | if pipe.closed { 37 | return pool.ErrClosed 38 | } 39 | pipe.cmds = pipe.cmds[:0] 40 | return nil 41 | } 42 | 43 | func (pipe *ClusterPipeline) Exec() (cmds []Cmder, retErr error) { 44 | if pipe.closed { 45 | return nil, pool.ErrClosed 46 | } 47 | if len(pipe.cmds) == 0 { 48 | return []Cmder{}, nil 49 | } 50 | 51 | cmds = pipe.cmds 52 | pipe.cmds = make([]Cmder, 0, 10) 53 | 54 | cmdsMap := make(map[string][]Cmder) 55 | for _, cmd := range cmds { 56 | slot := hashtag.Slot(cmd.clusterKey()) 57 | addr := pipe.cluster.slotMasterAddr(slot) 58 | cmdsMap[addr] = append(cmdsMap[addr], cmd) 59 | } 60 | 61 | for attempt := 0; attempt <= pipe.cluster.opt.getMaxRedirects(); attempt++ { 62 | failedCmds := make(map[string][]Cmder) 63 | 64 | for addr, cmds := range cmdsMap { 65 | client, err := pipe.cluster.getClient(addr) 66 | if err != nil { 67 | setCmdsErr(cmds, err) 68 | retErr = err 69 | continue 70 | } 71 | 72 | cn, err := client.conn() 73 | if err != nil { 74 | setCmdsErr(cmds, err) 75 | retErr = err 76 | continue 77 | } 78 | 79 | failedCmds, err = pipe.execClusterCmds(cn, cmds, failedCmds) 80 | if err != nil { 81 | retErr = err 82 | } 83 | client.putConn(cn, err, false) 84 | } 85 | 86 | cmdsMap = failedCmds 87 | } 88 | 89 | return cmds, retErr 90 | } 91 | 92 | // Close closes the pipeline, releasing any open resources. 93 | func (pipe *ClusterPipeline) Close() error { 94 | pipe.Discard() 95 | pipe.closed = true 96 | return nil 97 | } 98 | 99 | func (pipe *ClusterPipeline) execClusterCmds( 100 | cn *pool.Conn, cmds []Cmder, failedCmds map[string][]Cmder, 101 | ) (map[string][]Cmder, error) { 102 | if err := writeCmd(cn, cmds...); err != nil { 103 | setCmdsErr(cmds, err) 104 | return failedCmds, err 105 | } 106 | 107 | var firstCmdErr error 108 | for i, cmd := range cmds { 109 | err := cmd.readReply(cn) 110 | if err == nil { 111 | continue 112 | } 113 | if isNetworkError(err) { 114 | cmd.reset() 115 | failedCmds[""] = append(failedCmds[""], cmds[i:]...) 116 | break 117 | } else if moved, ask, addr := isMovedError(err); moved { 118 | pipe.cluster.lazyReloadSlots() 119 | cmd.reset() 120 | failedCmds[addr] = append(failedCmds[addr], cmd) 121 | } else if ask { 122 | cmd.reset() 123 | failedCmds[addr] = append(failedCmds[addr], NewCmd("ASKING"), cmd) 124 | } else if firstCmdErr == nil { 125 | firstCmdErr = err 126 | } 127 | } 128 | 129 | return failedCmds, firstCmdErr 130 | } 131 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/cluster_test.go: -------------------------------------------------------------------------------- 1 | package redis_test 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | "net" 7 | "reflect" 8 | "strconv" 9 | "strings" 10 | "sync" 11 | 12 | "testing" 13 | "time" 14 | 15 | . "github.com/onsi/ginkgo" 16 | . "github.com/onsi/gomega" 17 | 18 | "gopkg.in/redis.v3" 19 | "gopkg.in/redis.v3/internal/hashtag" 20 | ) 21 | 22 | type clusterScenario struct { 23 | ports []string 24 | nodeIds []string 25 | processes map[string]*redisProcess 26 | clients map[string]*redis.Client 27 | } 28 | 29 | func (s *clusterScenario) primary() *redis.Client { 30 | return s.clients[s.ports[0]] 31 | } 32 | 33 | func (s *clusterScenario) masters() []*redis.Client { 34 | result := make([]*redis.Client, 3) 35 | for pos, port := range s.ports[:3] { 36 | result[pos] = s.clients[port] 37 | } 38 | return result 39 | } 40 | 41 | func (s *clusterScenario) slaves() []*redis.Client { 42 | result := make([]*redis.Client, 3) 43 | for pos, port := range s.ports[3:] { 44 | result[pos] = s.clients[port] 45 | } 46 | return result 47 | } 48 | 49 | func (s *clusterScenario) clusterClient(opt *redis.ClusterOptions) *redis.ClusterClient { 50 | addrs := make([]string, len(s.ports)) 51 | for i, port := range s.ports { 52 | addrs[i] = net.JoinHostPort("127.0.0.1", port) 53 | } 54 | if opt == nil { 55 | opt = &redis.ClusterOptions{ 56 | DialTimeout: 10 * time.Second, 57 | ReadTimeout: 30 * time.Second, 58 | WriteTimeout: 30 * time.Second, 59 | PoolSize: 10, 60 | PoolTimeout: 30 * time.Second, 61 | IdleTimeout: time.Second, 62 | IdleCheckFrequency: time.Second, 63 | } 64 | } 65 | opt.Addrs = addrs 66 | return redis.NewClusterClient(opt) 67 | } 68 | 69 | func startCluster(scenario *clusterScenario) error { 70 | // Start processes and collect node ids 71 | for pos, port := range scenario.ports { 72 | process, err := startRedis(port, "--cluster-enabled", "yes") 73 | if err != nil { 74 | return err 75 | } 76 | 77 | client := redis.NewClient(&redis.Options{ 78 | Addr: ":" + port, 79 | }) 80 | 81 | info, err := client.ClusterNodes().Result() 82 | if err != nil { 83 | return err 84 | } 85 | 86 | scenario.processes[port] = process 87 | scenario.clients[port] = client 88 | scenario.nodeIds[pos] = info[:40] 89 | } 90 | 91 | // Meet cluster nodes 92 | for _, client := range scenario.clients { 93 | err := client.ClusterMeet("127.0.0.1", scenario.ports[0]).Err() 94 | if err != nil { 95 | return err 96 | } 97 | } 98 | 99 | // Bootstrap masters 100 | slots := []int{0, 5000, 10000, 16384} 101 | for pos, master := range scenario.masters() { 102 | err := master.ClusterAddSlotsRange(slots[pos], slots[pos+1]-1).Err() 103 | if err != nil { 104 | return err 105 | } 106 | } 107 | 108 | // Bootstrap slaves 109 | for idx, slave := range scenario.slaves() { 110 | masterId := scenario.nodeIds[idx] 111 | 112 | // Wait until master is available 113 | err := eventually(func() error { 114 | s := slave.ClusterNodes().Val() 115 | wanted := masterId 116 | if !strings.Contains(s, wanted) { 117 | return fmt.Errorf("%q does not contain %q", s, wanted) 118 | } 119 | return nil 120 | }, 10*time.Second) 121 | if err != nil { 122 | return err 123 | } 124 | 125 | err = slave.ClusterReplicate(masterId).Err() 126 | if err != nil { 127 | return err 128 | } 129 | } 130 | 131 | // Wait until all nodes have consistent info 132 | for _, client := range scenario.clients { 133 | err := eventually(func() error { 134 | res, err := client.ClusterSlots().Result() 135 | if err != nil { 136 | return err 137 | } 138 | wanted := []redis.ClusterSlotInfo{ 139 | {0, 4999, []string{"127.0.0.1:8220", "127.0.0.1:8223"}}, 140 | {5000, 9999, []string{"127.0.0.1:8221", "127.0.0.1:8224"}}, 141 | {10000, 16383, []string{"127.0.0.1:8222", "127.0.0.1:8225"}}, 142 | } 143 | loop: 144 | for _, info := range res { 145 | for _, info2 := range wanted { 146 | if reflect.DeepEqual(info, info2) { 147 | continue loop 148 | } 149 | } 150 | return fmt.Errorf("cluster did not reach consistent state (%v)", res) 151 | } 152 | return nil 153 | }, 30*time.Second) 154 | if err != nil { 155 | return err 156 | } 157 | } 158 | 159 | return nil 160 | } 161 | 162 | func stopCluster(scenario *clusterScenario) error { 163 | for _, client := range scenario.clients { 164 | if err := client.Close(); err != nil { 165 | return err 166 | } 167 | } 168 | for _, process := range scenario.processes { 169 | if err := process.Close(); err != nil { 170 | return err 171 | } 172 | } 173 | return nil 174 | } 175 | 176 | //------------------------------------------------------------------------------ 177 | 178 | var _ = Describe("Cluster", func() { 179 | Describe("HashSlot", func() { 180 | 181 | It("should calculate hash slots", func() { 182 | tests := []struct { 183 | key string 184 | slot int 185 | }{ 186 | {"123456789", 12739}, 187 | {"{}foo", 9500}, 188 | {"foo{}", 5542}, 189 | {"foo{}{bar}", 8363}, 190 | {"", 10503}, 191 | {"", 5176}, 192 | {string([]byte{83, 153, 134, 118, 229, 214, 244, 75, 140, 37, 215, 215}), 5463}, 193 | } 194 | rand.Seed(100) 195 | 196 | for _, test := range tests { 197 | Expect(hashtag.Slot(test.key)).To(Equal(test.slot), "for %s", test.key) 198 | } 199 | }) 200 | 201 | It("should extract keys from tags", func() { 202 | tests := []struct { 203 | one, two string 204 | }{ 205 | {"foo{bar}", "bar"}, 206 | {"{foo}bar", "foo"}, 207 | {"{user1000}.following", "{user1000}.followers"}, 208 | {"foo{{bar}}zap", "{bar"}, 209 | {"foo{bar}{zap}", "bar"}, 210 | } 211 | 212 | for _, test := range tests { 213 | Expect(hashtag.Slot(test.one)).To(Equal(hashtag.Slot(test.two)), "for %s <-> %s", test.one, test.two) 214 | } 215 | }) 216 | 217 | }) 218 | 219 | Describe("Commands", func() { 220 | 221 | It("should CLUSTER SLOTS", func() { 222 | res, err := cluster.primary().ClusterSlots().Result() 223 | Expect(err).NotTo(HaveOccurred()) 224 | Expect(res).To(HaveLen(3)) 225 | Expect(res).To(ConsistOf([]redis.ClusterSlotInfo{ 226 | {0, 4999, []string{"127.0.0.1:8220", "127.0.0.1:8223"}}, 227 | {5000, 9999, []string{"127.0.0.1:8221", "127.0.0.1:8224"}}, 228 | {10000, 16383, []string{"127.0.0.1:8222", "127.0.0.1:8225"}}, 229 | })) 230 | }) 231 | 232 | It("should CLUSTER NODES", func() { 233 | res, err := cluster.primary().ClusterNodes().Result() 234 | Expect(err).NotTo(HaveOccurred()) 235 | Expect(len(res)).To(BeNumerically(">", 400)) 236 | }) 237 | 238 | It("should CLUSTER INFO", func() { 239 | res, err := cluster.primary().ClusterInfo().Result() 240 | Expect(err).NotTo(HaveOccurred()) 241 | Expect(res).To(ContainSubstring("cluster_known_nodes:6")) 242 | }) 243 | 244 | It("should CLUSTER KEYSLOT", func() { 245 | hashSlot, err := cluster.primary().ClusterKeySlot("somekey").Result() 246 | Expect(err).NotTo(HaveOccurred()) 247 | Expect(hashSlot).To(Equal(int64(hashtag.Slot("somekey")))) 248 | }) 249 | 250 | It("should CLUSTER COUNT-FAILURE-REPORTS", func() { 251 | n, err := cluster.primary().ClusterCountFailureReports(cluster.nodeIds[0]).Result() 252 | Expect(err).NotTo(HaveOccurred()) 253 | Expect(n).To(Equal(int64(0))) 254 | }) 255 | 256 | It("should CLUSTER COUNTKEYSINSLOT", func() { 257 | n, err := cluster.primary().ClusterCountKeysInSlot(10).Result() 258 | Expect(err).NotTo(HaveOccurred()) 259 | Expect(n).To(Equal(int64(0))) 260 | }) 261 | 262 | It("should CLUSTER DELSLOTS", func() { 263 | res, err := cluster.primary().ClusterDelSlotsRange(16000, 16384-1).Result() 264 | Expect(err).NotTo(HaveOccurred()) 265 | Expect(res).To(Equal("OK")) 266 | cluster.primary().ClusterAddSlotsRange(16000, 16384-1) 267 | }) 268 | 269 | It("should CLUSTER SAVECONFIG", func() { 270 | res, err := cluster.primary().ClusterSaveConfig().Result() 271 | Expect(err).NotTo(HaveOccurred()) 272 | Expect(res).To(Equal("OK")) 273 | }) 274 | 275 | It("should CLUSTER SLAVES", func() { 276 | nodesList, err := cluster.primary().ClusterSlaves(cluster.nodeIds[0]).Result() 277 | Expect(err).NotTo(HaveOccurred()) 278 | Expect(nodesList).Should(ContainElement(ContainSubstring("slave"))) 279 | Expect(nodesList).Should(HaveLen(1)) 280 | }) 281 | 282 | It("should CLUSTER READONLY", func() { 283 | res, err := cluster.primary().Readonly().Result() 284 | Expect(err).NotTo(HaveOccurred()) 285 | Expect(res).To(Equal("OK")) 286 | }) 287 | 288 | It("should CLUSTER READWRITE", func() { 289 | res, err := cluster.primary().ReadWrite().Result() 290 | Expect(err).NotTo(HaveOccurred()) 291 | Expect(res).To(Equal("OK")) 292 | }) 293 | }) 294 | 295 | Describe("Client", func() { 296 | var client *redis.ClusterClient 297 | 298 | BeforeEach(func() { 299 | client = cluster.clusterClient(nil) 300 | }) 301 | 302 | AfterEach(func() { 303 | for _, client := range cluster.masters() { 304 | Expect(client.FlushDb().Err()).NotTo(HaveOccurred()) 305 | } 306 | Expect(client.Close()).NotTo(HaveOccurred()) 307 | }) 308 | 309 | It("should GET/SET/DEL", func() { 310 | val, err := client.Get("A").Result() 311 | Expect(err).To(Equal(redis.Nil)) 312 | Expect(val).To(Equal("")) 313 | 314 | val, err = client.Set("A", "VALUE", 0).Result() 315 | Expect(err).NotTo(HaveOccurred()) 316 | Expect(val).To(Equal("OK")) 317 | 318 | val, err = client.Get("A").Result() 319 | Expect(err).NotTo(HaveOccurred()) 320 | Expect(val).To(Equal("VALUE")) 321 | 322 | cnt, err := client.Del("A").Result() 323 | Expect(err).NotTo(HaveOccurred()) 324 | Expect(cnt).To(Equal(int64(1))) 325 | }) 326 | 327 | It("should return pool stats", func() { 328 | Expect(client.PoolStats()).To(BeAssignableToTypeOf(&redis.PoolStats{})) 329 | }) 330 | 331 | It("should follow redirects", func() { 332 | Expect(client.Set("A", "VALUE", 0).Err()).NotTo(HaveOccurred()) 333 | 334 | slot := hashtag.Slot("A") 335 | Expect(client.SwapSlot(slot)).To(Equal([]string{"127.0.0.1:8224", "127.0.0.1:8221"})) 336 | 337 | val, err := client.Get("A").Result() 338 | Expect(err).NotTo(HaveOccurred()) 339 | Expect(val).To(Equal("VALUE")) 340 | 341 | Eventually(func() []string { 342 | return client.SlotAddrs(slot) 343 | }, "5s").Should(Equal([]string{"127.0.0.1:8221", "127.0.0.1:8224"})) 344 | }) 345 | 346 | It("should perform multi-pipelines", func() { 347 | slot := hashtag.Slot("A") 348 | Expect(client.SwapSlot(slot)).To(Equal([]string{"127.0.0.1:8224", "127.0.0.1:8221"})) 349 | 350 | pipe := client.Pipeline() 351 | defer pipe.Close() 352 | 353 | keys := []string{"A", "B", "C", "D", "E", "F", "G"} 354 | for i, key := range keys { 355 | pipe.Set(key, key+"_value", 0) 356 | pipe.Expire(key, time.Duration(i+1)*time.Hour) 357 | } 358 | for _, key := range keys { 359 | pipe.Get(key) 360 | pipe.TTL(key) 361 | } 362 | 363 | cmds, err := pipe.Exec() 364 | Expect(err).NotTo(HaveOccurred()) 365 | Expect(cmds).To(HaveLen(28)) 366 | Expect(cmds[14].(*redis.StringCmd).Val()).To(Equal("A_value")) 367 | Expect(cmds[15].(*redis.DurationCmd).Val()).To(BeNumerically("~", 1*time.Hour, time.Second)) 368 | Expect(cmds[20].(*redis.StringCmd).Val()).To(Equal("D_value")) 369 | Expect(cmds[21].(*redis.DurationCmd).Val()).To(BeNumerically("~", 4*time.Hour, time.Second)) 370 | Expect(cmds[26].(*redis.StringCmd).Val()).To(Equal("G_value")) 371 | Expect(cmds[27].(*redis.DurationCmd).Val()).To(BeNumerically("~", 7*time.Hour, time.Second)) 372 | }) 373 | 374 | It("should return error when there are no attempts left", func() { 375 | Expect(client.Close()).NotTo(HaveOccurred()) 376 | client = cluster.clusterClient(&redis.ClusterOptions{ 377 | MaxRedirects: -1, 378 | }) 379 | 380 | slot := hashtag.Slot("A") 381 | Expect(client.SwapSlot(slot)).To(Equal([]string{"127.0.0.1:8224", "127.0.0.1:8221"})) 382 | 383 | err := client.Get("A").Err() 384 | Expect(err).To(HaveOccurred()) 385 | Expect(err.Error()).To(ContainSubstring("MOVED")) 386 | }) 387 | 388 | It("should Watch", func() { 389 | var incr func(string) error 390 | 391 | // Transactionally increments key using GET and SET commands. 392 | incr = func(key string) error { 393 | tx, err := client.Watch(key) 394 | if err != nil { 395 | return err 396 | } 397 | defer tx.Close() 398 | 399 | n, err := tx.Get(key).Int64() 400 | if err != nil && err != redis.Nil { 401 | return err 402 | } 403 | 404 | _, err = tx.Exec(func() error { 405 | tx.Set(key, strconv.FormatInt(n+1, 10), 0) 406 | return nil 407 | }) 408 | if err == redis.TxFailedErr { 409 | return incr(key) 410 | } 411 | return err 412 | } 413 | 414 | var wg sync.WaitGroup 415 | for i := 0; i < 100; i++ { 416 | wg.Add(1) 417 | go func() { 418 | defer wg.Done() 419 | 420 | err := incr("key") 421 | Expect(err).NotTo(HaveOccurred()) 422 | }() 423 | } 424 | wg.Wait() 425 | 426 | n, err := client.Get("key").Int64() 427 | Expect(err).NotTo(HaveOccurred()) 428 | Expect(n).To(Equal(int64(100))) 429 | }) 430 | }) 431 | }) 432 | 433 | //------------------------------------------------------------------------------ 434 | 435 | func BenchmarkRedisClusterPing(b *testing.B) { 436 | if testing.Short() { 437 | b.Skip("skipping in short mode") 438 | } 439 | 440 | cluster := &clusterScenario{ 441 | ports: []string{"8220", "8221", "8222", "8223", "8224", "8225"}, 442 | nodeIds: make([]string, 6), 443 | processes: make(map[string]*redisProcess, 6), 444 | clients: make(map[string]*redis.Client, 6), 445 | } 446 | if err := startCluster(cluster); err != nil { 447 | b.Fatal(err) 448 | } 449 | defer stopCluster(cluster) 450 | client := cluster.clusterClient(nil) 451 | defer client.Close() 452 | 453 | b.ResetTimer() 454 | 455 | b.RunParallel(func(pb *testing.PB) { 456 | for pb.Next() { 457 | if err := client.Ping().Err(); err != nil { 458 | b.Fatal(err) 459 | } 460 | } 461 | }) 462 | } 463 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/command_test.go: -------------------------------------------------------------------------------- 1 | package redis_test 2 | 3 | import ( 4 | . "github.com/onsi/ginkgo" 5 | . "github.com/onsi/gomega" 6 | 7 | "gopkg.in/redis.v3" 8 | ) 9 | 10 | var _ = Describe("Command", func() { 11 | var client *redis.Client 12 | 13 | BeforeEach(func() { 14 | client = redis.NewClient(redisOptions()) 15 | Expect(client.FlushDb().Err()).NotTo(HaveOccurred()) 16 | }) 17 | 18 | AfterEach(func() { 19 | Expect(client.Close()).NotTo(HaveOccurred()) 20 | }) 21 | 22 | It("should implement Stringer", func() { 23 | set := client.Set("foo", "bar", 0) 24 | Expect(set.String()).To(Equal("SET foo bar: OK")) 25 | 26 | get := client.Get("foo") 27 | Expect(get.String()).To(Equal("GET foo: bar")) 28 | }) 29 | 30 | It("should have correct val/err states", func() { 31 | set := client.Set("key", "hello", 0) 32 | Expect(set.Err()).NotTo(HaveOccurred()) 33 | Expect(set.Val()).To(Equal("OK")) 34 | 35 | get := client.Get("key") 36 | Expect(get.Err()).NotTo(HaveOccurred()) 37 | Expect(get.Val()).To(Equal("hello")) 38 | 39 | Expect(set.Err()).NotTo(HaveOccurred()) 40 | Expect(set.Val()).To(Equal("OK")) 41 | }) 42 | 43 | It("should convert strings via helpers", func() { 44 | set := client.Set("key", "10", 0) 45 | Expect(set.Err()).NotTo(HaveOccurred()) 46 | 47 | n, err := client.Get("key").Int64() 48 | Expect(err).NotTo(HaveOccurred()) 49 | Expect(n).To(Equal(int64(10))) 50 | 51 | un, err := client.Get("key").Uint64() 52 | Expect(err).NotTo(HaveOccurred()) 53 | Expect(un).To(Equal(uint64(10))) 54 | 55 | f, err := client.Get("key").Float64() 56 | Expect(err).NotTo(HaveOccurred()) 57 | Expect(f).To(Equal(float64(10))) 58 | }) 59 | 60 | }) 61 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package redis implements a Redis client. 3 | */ 4 | package redis 5 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/error.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "net" 7 | "strings" 8 | ) 9 | 10 | // Redis nil reply, .e.g. when key does not exist. 11 | var Nil = errorf("redis: nil") 12 | 13 | // Redis transaction failed. 14 | var TxFailedErr = errorf("redis: transaction failed") 15 | 16 | type redisError struct { 17 | s string 18 | } 19 | 20 | func errorf(s string, args ...interface{}) redisError { 21 | return redisError{s: fmt.Sprintf(s, args...)} 22 | } 23 | 24 | func (err redisError) Error() string { 25 | return err.s 26 | } 27 | 28 | func isNetworkError(err error) bool { 29 | if err == io.EOF { 30 | return true 31 | } 32 | _, ok := err.(net.Error) 33 | return ok 34 | } 35 | 36 | func isBadConn(err error, allowTimeout bool) bool { 37 | if err == nil { 38 | return false 39 | } 40 | if _, ok := err.(redisError); ok { 41 | return false 42 | } 43 | if allowTimeout { 44 | if netErr, ok := err.(net.Error); ok && netErr.Timeout() { 45 | return false 46 | } 47 | } 48 | return true 49 | } 50 | 51 | func isMovedError(err error) (moved bool, ask bool, addr string) { 52 | if _, ok := err.(redisError); !ok { 53 | return 54 | } 55 | 56 | parts := strings.SplitN(err.Error(), " ", 3) 57 | if len(parts) != 3 { 58 | return 59 | } 60 | 61 | switch parts[0] { 62 | case "MOVED": 63 | moved = true 64 | addr = parts[2] 65 | case "ASK": 66 | ask = true 67 | addr = parts[2] 68 | } 69 | 70 | return 71 | } 72 | 73 | // shouldRetry reports whether failed command should be retried. 74 | func shouldRetry(err error) bool { 75 | if err == nil { 76 | return false 77 | } 78 | return isNetworkError(err) 79 | } 80 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/example_test.go: -------------------------------------------------------------------------------- 1 | package redis_test 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | "sync" 7 | "time" 8 | 9 | "gopkg.in/redis.v3" 10 | ) 11 | 12 | var client *redis.Client 13 | 14 | func init() { 15 | opt := redisOptions() 16 | opt.Addr = ":6379" 17 | client = redis.NewClient(opt) 18 | client.FlushDb() 19 | } 20 | 21 | func ExampleNewClient() { 22 | client := redis.NewClient(&redis.Options{ 23 | Addr: "localhost:6379", 24 | Password: "", // no password set 25 | DB: 0, // use default DB 26 | }) 27 | 28 | pong, err := client.Ping().Result() 29 | fmt.Println(pong, err) 30 | // Output: PONG 31 | } 32 | 33 | func ExampleNewFailoverClient() { 34 | // See http://redis.io/topics/sentinel for instructions how to 35 | // setup Redis Sentinel. 36 | client := redis.NewFailoverClient(&redis.FailoverOptions{ 37 | MasterName: "master", 38 | SentinelAddrs: []string{":26379"}, 39 | }) 40 | client.Ping() 41 | } 42 | 43 | func ExampleNewClusterClient() { 44 | // See http://redis.io/topics/cluster-tutorial for instructions 45 | // how to setup Redis Cluster. 46 | client := redis.NewClusterClient(&redis.ClusterOptions{ 47 | Addrs: []string{":7000", ":7001", ":7002", ":7003", ":7004", ":7005"}, 48 | }) 49 | client.Ping() 50 | } 51 | 52 | func ExampleNewRing() { 53 | client := redis.NewRing(&redis.RingOptions{ 54 | Addrs: map[string]string{ 55 | "shard1": ":7000", 56 | "shard2": ":7001", 57 | "shard3": ":7002", 58 | }, 59 | }) 60 | client.Ping() 61 | } 62 | 63 | func ExampleClient() { 64 | err := client.Set("key", "value", 0).Err() 65 | if err != nil { 66 | panic(err) 67 | } 68 | 69 | val, err := client.Get("key").Result() 70 | if err != nil { 71 | panic(err) 72 | } 73 | fmt.Println("key", val) 74 | 75 | val2, err := client.Get("key2").Result() 76 | if err == redis.Nil { 77 | fmt.Println("key2 does not exists") 78 | } else if err != nil { 79 | panic(err) 80 | } else { 81 | fmt.Println("key2", val2) 82 | } 83 | // Output: key value 84 | // key2 does not exists 85 | } 86 | 87 | func ExampleClient_Set() { 88 | // Last argument is expiration. Zero means the key has no 89 | // expiration time. 90 | err := client.Set("key", "value", 0).Err() 91 | if err != nil { 92 | panic(err) 93 | } 94 | 95 | // key2 will expire in an hour. 96 | err = client.Set("key2", "value", time.Hour).Err() 97 | if err != nil { 98 | panic(err) 99 | } 100 | } 101 | 102 | func ExampleClient_Incr() { 103 | if err := client.Incr("counter").Err(); err != nil { 104 | panic(err) 105 | } 106 | 107 | n, err := client.Get("counter").Int64() 108 | fmt.Println(n, err) 109 | // Output: 1 110 | } 111 | 112 | func ExampleClient_BLPop() { 113 | if err := client.RPush("queue", "message").Err(); err != nil { 114 | panic(err) 115 | } 116 | 117 | // use `client.BLPop(0, "queue")` for infinite waiting time 118 | result, err := client.BLPop(1*time.Second, "queue").Result() 119 | if err != nil { 120 | panic(err) 121 | } 122 | 123 | fmt.Println(result[0], result[1]) 124 | // Output: queue message 125 | } 126 | 127 | func ExampleClient_Scan() { 128 | client.FlushDb() 129 | for i := 0; i < 33; i++ { 130 | err := client.Set(fmt.Sprintf("key%d", i), "value", 0).Err() 131 | if err != nil { 132 | panic(err) 133 | } 134 | } 135 | 136 | var cursor int64 137 | var n int 138 | for { 139 | var keys []string 140 | var err error 141 | cursor, keys, err = client.Scan(cursor, "", 10).Result() 142 | if err != nil { 143 | panic(err) 144 | } 145 | n += len(keys) 146 | if cursor == 0 { 147 | break 148 | } 149 | } 150 | 151 | fmt.Printf("found %d keys\n", n) 152 | // Output: found 33 keys 153 | } 154 | 155 | func ExampleClient_Pipelined() { 156 | var incr *redis.IntCmd 157 | _, err := client.Pipelined(func(pipe *redis.Pipeline) error { 158 | incr = pipe.Incr("counter1") 159 | pipe.Expire("counter1", time.Hour) 160 | return nil 161 | }) 162 | fmt.Println(incr.Val(), err) 163 | // Output: 1 164 | } 165 | 166 | func ExamplePipeline() { 167 | pipe := client.Pipeline() 168 | defer pipe.Close() 169 | 170 | incr := pipe.Incr("counter2") 171 | pipe.Expire("counter2", time.Hour) 172 | _, err := pipe.Exec() 173 | fmt.Println(incr.Val(), err) 174 | // Output: 1 175 | } 176 | 177 | func ExampleClient_Watch() { 178 | var incr func(string) error 179 | 180 | // Transactionally increments key using GET and SET commands. 181 | incr = func(key string) error { 182 | tx, err := client.Watch(key) 183 | if err != nil { 184 | return err 185 | } 186 | defer tx.Close() 187 | 188 | n, err := tx.Get(key).Int64() 189 | if err != nil && err != redis.Nil { 190 | return err 191 | } 192 | 193 | _, err = tx.Exec(func() error { 194 | tx.Set(key, strconv.FormatInt(n+1, 10), 0) 195 | return nil 196 | }) 197 | if err == redis.TxFailedErr { 198 | return incr(key) 199 | } 200 | return err 201 | } 202 | 203 | var wg sync.WaitGroup 204 | for i := 0; i < 100; i++ { 205 | wg.Add(1) 206 | go func() { 207 | defer wg.Done() 208 | 209 | err := incr("counter3") 210 | if err != nil { 211 | panic(err) 212 | } 213 | }() 214 | } 215 | wg.Wait() 216 | 217 | n, err := client.Get("counter3").Int64() 218 | fmt.Println(n, err) 219 | // Output: 100 220 | } 221 | 222 | func ExamplePubSub() { 223 | pubsub, err := client.Subscribe("mychannel1") 224 | if err != nil { 225 | panic(err) 226 | } 227 | defer pubsub.Close() 228 | 229 | err = client.Publish("mychannel1", "hello").Err() 230 | if err != nil { 231 | panic(err) 232 | } 233 | 234 | msg, err := pubsub.ReceiveMessage() 235 | if err != nil { 236 | panic(err) 237 | } 238 | 239 | fmt.Println(msg.Channel, msg.Payload) 240 | // Output: mychannel1 hello 241 | } 242 | 243 | func ExamplePubSub_Receive() { 244 | pubsub, err := client.Subscribe("mychannel2") 245 | if err != nil { 246 | panic(err) 247 | } 248 | defer pubsub.Close() 249 | 250 | n, err := client.Publish("mychannel2", "hello").Result() 251 | if err != nil { 252 | panic(err) 253 | } 254 | fmt.Println(n, "clients received message") 255 | 256 | for i := 0; i < 2; i++ { 257 | // ReceiveTimeout is a low level API. Use ReceiveMessage instead. 258 | msgi, err := pubsub.ReceiveTimeout(5 * time.Second) 259 | if err != nil { 260 | break 261 | } 262 | 263 | switch msg := msgi.(type) { 264 | case *redis.Subscription: 265 | fmt.Println("subscribed to", msg.Channel) 266 | case *redis.Message: 267 | fmt.Println("received", msg.Payload, "from", msg.Channel) 268 | default: 269 | panic(fmt.Errorf("unknown message: %#v", msgi)) 270 | } 271 | } 272 | 273 | // Output: 1 clients received message 274 | // subscribed to mychannel2 275 | // received hello from mychannel2 276 | } 277 | 278 | func ExampleScript() { 279 | IncrByXX := redis.NewScript(` 280 | if redis.call("GET", KEYS[1]) ~= false then 281 | return redis.call("INCRBY", KEYS[1], ARGV[1]) 282 | end 283 | return false 284 | `) 285 | 286 | n, err := IncrByXX.Run(client, []string{"xx_counter"}, []string{"2"}).Result() 287 | fmt.Println(n, err) 288 | 289 | err = client.Set("xx_counter", "40", 0).Err() 290 | if err != nil { 291 | panic(err) 292 | } 293 | 294 | n, err = IncrByXX.Run(client, []string{"xx_counter"}, []string{"2"}).Result() 295 | fmt.Println(n, err) 296 | 297 | // Output: redis: nil 298 | // 42 299 | } 300 | 301 | func Example_customCommand() { 302 | Get := func(client *redis.Client, key string) *redis.StringCmd { 303 | cmd := redis.NewStringCmd("GET", key) 304 | client.Process(cmd) 305 | return cmd 306 | } 307 | 308 | v, err := Get(client, "key_does_not_exist").Result() 309 | fmt.Printf("%q %s", v, err) 310 | // Output: "" redis: nil 311 | } 312 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/export_test.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "time" 5 | 6 | "gopkg.in/redis.v3/internal/pool" 7 | ) 8 | 9 | func (c *baseClient) Pool() pool.Pooler { 10 | return c.connPool 11 | } 12 | 13 | func (c *PubSub) Pool() pool.Pooler { 14 | return c.base.connPool 15 | } 16 | 17 | func SetReceiveMessageTimeout(d time.Duration) { 18 | receiveMessageTimeout = d 19 | } 20 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/internal/consistenthash/consistenthash.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2013 Google Inc. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Package consistenthash provides an implementation of a ring hash. 18 | package consistenthash 19 | 20 | import ( 21 | "hash/crc32" 22 | "sort" 23 | "strconv" 24 | ) 25 | 26 | type Hash func(data []byte) uint32 27 | 28 | type Map struct { 29 | hash Hash 30 | replicas int 31 | keys []int // Sorted 32 | hashMap map[int]string 33 | } 34 | 35 | func New(replicas int, fn Hash) *Map { 36 | m := &Map{ 37 | replicas: replicas, 38 | hash: fn, 39 | hashMap: make(map[int]string), 40 | } 41 | if m.hash == nil { 42 | m.hash = crc32.ChecksumIEEE 43 | } 44 | return m 45 | } 46 | 47 | // Returns true if there are no items available. 48 | func (m *Map) IsEmpty() bool { 49 | return len(m.keys) == 0 50 | } 51 | 52 | // Adds some keys to the hash. 53 | func (m *Map) Add(keys ...string) { 54 | for _, key := range keys { 55 | for i := 0; i < m.replicas; i++ { 56 | hash := int(m.hash([]byte(strconv.Itoa(i) + key))) 57 | m.keys = append(m.keys, hash) 58 | m.hashMap[hash] = key 59 | } 60 | } 61 | sort.Ints(m.keys) 62 | } 63 | 64 | // Gets the closest item in the hash to the provided key. 65 | func (m *Map) Get(key string) string { 66 | if m.IsEmpty() { 67 | return "" 68 | } 69 | 70 | hash := int(m.hash([]byte(key))) 71 | 72 | // Binary search for appropriate replica. 73 | idx := sort.Search(len(m.keys), func(i int) bool { return m.keys[i] >= hash }) 74 | 75 | // Means we have cycled back to the first replica. 76 | if idx == len(m.keys) { 77 | idx = 0 78 | } 79 | 80 | return m.hashMap[m.keys[idx]] 81 | } 82 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/internal/consistenthash/consistenthash_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2013 Google Inc. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package consistenthash 18 | 19 | import ( 20 | "fmt" 21 | "strconv" 22 | "testing" 23 | ) 24 | 25 | func TestHashing(t *testing.T) { 26 | 27 | // Override the hash function to return easier to reason about values. Assumes 28 | // the keys can be converted to an integer. 29 | hash := New(3, func(key []byte) uint32 { 30 | i, err := strconv.Atoi(string(key)) 31 | if err != nil { 32 | panic(err) 33 | } 34 | return uint32(i) 35 | }) 36 | 37 | // Given the above hash function, this will give replicas with "hashes": 38 | // 2, 4, 6, 12, 14, 16, 22, 24, 26 39 | hash.Add("6", "4", "2") 40 | 41 | testCases := map[string]string{ 42 | "2": "2", 43 | "11": "2", 44 | "23": "4", 45 | "27": "2", 46 | } 47 | 48 | for k, v := range testCases { 49 | if hash.Get(k) != v { 50 | t.Errorf("Asking for %s, should have yielded %s", k, v) 51 | } 52 | } 53 | 54 | // Adds 8, 18, 28 55 | hash.Add("8") 56 | 57 | // 27 should now map to 8. 58 | testCases["27"] = "8" 59 | 60 | for k, v := range testCases { 61 | if hash.Get(k) != v { 62 | t.Errorf("Asking for %s, should have yielded %s", k, v) 63 | } 64 | } 65 | 66 | } 67 | 68 | func TestConsistency(t *testing.T) { 69 | hash1 := New(1, nil) 70 | hash2 := New(1, nil) 71 | 72 | hash1.Add("Bill", "Bob", "Bonny") 73 | hash2.Add("Bob", "Bonny", "Bill") 74 | 75 | if hash1.Get("Ben") != hash2.Get("Ben") { 76 | t.Errorf("Fetching 'Ben' from both hashes should be the same") 77 | } 78 | 79 | hash2.Add("Becky", "Ben", "Bobby") 80 | 81 | if hash1.Get("Ben") != hash2.Get("Ben") || 82 | hash1.Get("Bob") != hash2.Get("Bob") || 83 | hash1.Get("Bonny") != hash2.Get("Bonny") { 84 | t.Errorf("Direct matches should always return the same entry") 85 | } 86 | 87 | } 88 | 89 | func BenchmarkGet8(b *testing.B) { benchmarkGet(b, 8) } 90 | func BenchmarkGet32(b *testing.B) { benchmarkGet(b, 32) } 91 | func BenchmarkGet128(b *testing.B) { benchmarkGet(b, 128) } 92 | func BenchmarkGet512(b *testing.B) { benchmarkGet(b, 512) } 93 | 94 | func benchmarkGet(b *testing.B, shards int) { 95 | 96 | hash := New(50, nil) 97 | 98 | var buckets []string 99 | for i := 0; i < shards; i++ { 100 | buckets = append(buckets, fmt.Sprintf("shard-%d", i)) 101 | } 102 | 103 | hash.Add(buckets...) 104 | 105 | b.ResetTimer() 106 | 107 | for i := 0; i < b.N; i++ { 108 | hash.Get(buckets[i&(shards-1)]) 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/internal/hashtag/hashtag.go: -------------------------------------------------------------------------------- 1 | package hashtag 2 | 3 | import ( 4 | "math/rand" 5 | "strings" 6 | ) 7 | 8 | const SlotNumber = 16384 9 | 10 | // CRC16 implementation according to CCITT standards. 11 | // Copyright 2001-2010 Georges Menie (www.menie.org) 12 | // Copyright 2013 The Go Authors. All rights reserved. 13 | // http://redis.io/topics/cluster-spec#appendix-a-crc16-reference-implementation-in-ansi-c 14 | var crc16tab = [256]uint16{ 15 | 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, 16 | 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, 17 | 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, 18 | 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, 19 | 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, 20 | 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, 21 | 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, 22 | 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, 23 | 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, 24 | 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, 25 | 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, 26 | 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, 27 | 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, 28 | 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, 29 | 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, 30 | 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, 31 | 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, 32 | 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, 33 | 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, 34 | 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, 35 | 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, 36 | 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, 37 | 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, 38 | 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, 39 | 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, 40 | 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, 41 | 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, 42 | 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, 43 | 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, 44 | 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, 45 | 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, 46 | 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, 47 | } 48 | 49 | func Key(key string) string { 50 | if s := strings.IndexByte(key, '{'); s > -1 { 51 | if e := strings.IndexByte(key[s+1:], '}'); e > 0 { 52 | return key[s+1 : s+e+1] 53 | } 54 | } 55 | return key 56 | } 57 | 58 | // hashSlot returns a consistent slot number between 0 and 16383 59 | // for any given string key. 60 | func Slot(key string) int { 61 | key = Key(key) 62 | if key == "" { 63 | return rand.Intn(SlotNumber) 64 | } 65 | return int(crc16sum(key)) % SlotNumber 66 | } 67 | 68 | func crc16sum(key string) (crc uint16) { 69 | for i := 0; i < len(key); i++ { 70 | crc = (crc << 8) ^ crc16tab[(byte(crc>>8)^key[i])&0x00ff] 71 | } 72 | return 73 | } 74 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/internal/hashtag/hashtag_test.go: -------------------------------------------------------------------------------- 1 | package hashtag 2 | 3 | import ( 4 | . "github.com/onsi/ginkgo" 5 | . "github.com/onsi/gomega" 6 | ) 7 | 8 | var _ = Describe("CRC16", func() { 9 | 10 | // http://redis.io/topics/cluster-spec#keys-distribution-model 11 | It("should calculate CRC16", func() { 12 | tests := []struct { 13 | s string 14 | n uint16 15 | }{ 16 | {"123456789", 0x31C3}, 17 | {string([]byte{83, 153, 134, 118, 229, 214, 244, 75, 140, 37, 215, 215}), 21847}, 18 | } 19 | 20 | for _, test := range tests { 21 | Expect(crc16sum(test.s)).To(Equal(test.n), "for %s", test.s) 22 | } 23 | }) 24 | 25 | }) 26 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/internal/pool/bench_test.go: -------------------------------------------------------------------------------- 1 | package pool_test 2 | 3 | import ( 4 | "errors" 5 | "testing" 6 | "time" 7 | 8 | "gopkg.in/redis.v3/internal/pool" 9 | ) 10 | 11 | func benchmarkPoolGetPut(b *testing.B, poolSize int) { 12 | connPool := pool.NewConnPool(dummyDialer, poolSize, time.Second, time.Hour, time.Hour) 13 | connPool.DialLimiter = nil 14 | 15 | b.ResetTimer() 16 | 17 | b.RunParallel(func(pb *testing.PB) { 18 | for pb.Next() { 19 | cn, err := connPool.Get() 20 | if err != nil { 21 | b.Fatal(err) 22 | } 23 | if err = connPool.Put(cn); err != nil { 24 | b.Fatal(err) 25 | } 26 | } 27 | }) 28 | } 29 | 30 | func BenchmarkPoolGetPut10Conns(b *testing.B) { 31 | benchmarkPoolGetPut(b, 10) 32 | } 33 | 34 | func BenchmarkPoolGetPut100Conns(b *testing.B) { 35 | benchmarkPoolGetPut(b, 100) 36 | } 37 | 38 | func BenchmarkPoolGetPut1000Conns(b *testing.B) { 39 | benchmarkPoolGetPut(b, 1000) 40 | } 41 | 42 | func benchmarkPoolGetRemove(b *testing.B, poolSize int) { 43 | connPool := pool.NewConnPool(dummyDialer, poolSize, time.Second, time.Hour, time.Hour) 44 | connPool.DialLimiter = nil 45 | removeReason := errors.New("benchmark") 46 | 47 | b.ResetTimer() 48 | 49 | b.RunParallel(func(pb *testing.PB) { 50 | for pb.Next() { 51 | cn, err := connPool.Get() 52 | if err != nil { 53 | b.Fatal(err) 54 | } 55 | if err := connPool.Remove(cn, removeReason); err != nil { 56 | b.Fatal(err) 57 | } 58 | } 59 | }) 60 | } 61 | 62 | func BenchmarkPoolGetRemove10Conns(b *testing.B) { 63 | benchmarkPoolGetRemove(b, 10) 64 | } 65 | 66 | func BenchmarkPoolGetRemove100Conns(b *testing.B) { 67 | benchmarkPoolGetRemove(b, 100) 68 | } 69 | 70 | func BenchmarkPoolGetRemove1000Conns(b *testing.B) { 71 | benchmarkPoolGetRemove(b, 1000) 72 | } 73 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/internal/pool/conn.go: -------------------------------------------------------------------------------- 1 | package pool 2 | 3 | import ( 4 | "bufio" 5 | "io" 6 | "net" 7 | "time" 8 | ) 9 | 10 | const defaultBufSize = 4096 11 | 12 | var noDeadline = time.Time{} 13 | 14 | type Conn struct { 15 | NetConn net.Conn 16 | Rd *bufio.Reader 17 | Buf []byte 18 | 19 | Inited bool 20 | UsedAt time.Time 21 | 22 | ReadTimeout time.Duration 23 | WriteTimeout time.Duration 24 | } 25 | 26 | func NewConn(netConn net.Conn) *Conn { 27 | cn := &Conn{ 28 | NetConn: netConn, 29 | Buf: make([]byte, defaultBufSize), 30 | 31 | UsedAt: time.Now(), 32 | } 33 | cn.Rd = bufio.NewReader(cn) 34 | return cn 35 | } 36 | 37 | func (cn *Conn) IsStale(timeout time.Duration) bool { 38 | return timeout > 0 && time.Since(cn.UsedAt) > timeout 39 | } 40 | 41 | func (cn *Conn) Read(b []byte) (int, error) { 42 | cn.UsedAt = time.Now() 43 | if cn.ReadTimeout != 0 { 44 | cn.NetConn.SetReadDeadline(cn.UsedAt.Add(cn.ReadTimeout)) 45 | } else { 46 | cn.NetConn.SetReadDeadline(noDeadline) 47 | } 48 | return cn.NetConn.Read(b) 49 | } 50 | 51 | func (cn *Conn) Write(b []byte) (int, error) { 52 | cn.UsedAt = time.Now() 53 | if cn.WriteTimeout != 0 { 54 | cn.NetConn.SetWriteDeadline(cn.UsedAt.Add(cn.WriteTimeout)) 55 | } else { 56 | cn.NetConn.SetWriteDeadline(noDeadline) 57 | } 58 | return cn.NetConn.Write(b) 59 | } 60 | 61 | func (cn *Conn) RemoteAddr() net.Addr { 62 | return cn.NetConn.RemoteAddr() 63 | } 64 | 65 | func (cn *Conn) ReadN(n int) ([]byte, error) { 66 | if d := n - cap(cn.Buf); d > 0 { 67 | cn.Buf = cn.Buf[:cap(cn.Buf)] 68 | cn.Buf = append(cn.Buf, make([]byte, d)...) 69 | } else { 70 | cn.Buf = cn.Buf[:n] 71 | } 72 | _, err := io.ReadFull(cn.Rd, cn.Buf) 73 | return cn.Buf, err 74 | } 75 | 76 | func (cn *Conn) Close() error { 77 | return cn.NetConn.Close() 78 | } 79 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/internal/pool/main_test.go: -------------------------------------------------------------------------------- 1 | package pool_test 2 | 3 | import ( 4 | "net" 5 | "sync" 6 | "testing" 7 | 8 | . "github.com/onsi/ginkgo" 9 | . "github.com/onsi/gomega" 10 | ) 11 | 12 | func TestGinkgoSuite(t *testing.T) { 13 | RegisterFailHandler(Fail) 14 | RunSpecs(t, "pool") 15 | } 16 | 17 | func perform(n int, cbs ...func(int)) { 18 | var wg sync.WaitGroup 19 | for _, cb := range cbs { 20 | for i := 0; i < n; i++ { 21 | wg.Add(1) 22 | go func(cb func(int), i int) { 23 | defer GinkgoRecover() 24 | defer wg.Done() 25 | 26 | cb(i) 27 | }(cb, i) 28 | } 29 | } 30 | wg.Wait() 31 | } 32 | 33 | func dummyDialer() (net.Conn, error) { 34 | return &net.TCPConn{}, nil 35 | } 36 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/internal/pool/pool.go: -------------------------------------------------------------------------------- 1 | package pool 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "io/ioutil" 7 | "log" 8 | "net" 9 | "sync" 10 | "sync/atomic" 11 | "time" 12 | 13 | "gopkg.in/bsm/ratelimit.v1" 14 | ) 15 | 16 | var Logger = log.New(ioutil.Discard, "redis: ", log.LstdFlags) 17 | 18 | var ( 19 | ErrClosed = errors.New("redis: client is closed") 20 | ErrPoolTimeout = errors.New("redis: connection pool timeout") 21 | 22 | errConnClosed = errors.New("connection is closed") 23 | errConnStale = errors.New("connection is stale") 24 | ) 25 | 26 | var timers = sync.Pool{ 27 | New: func() interface{} { 28 | return time.NewTimer(0) 29 | }, 30 | } 31 | 32 | // PoolStats contains pool state information and accumulated stats. 33 | // TODO: remove Waits 34 | type PoolStats struct { 35 | Requests uint32 // number of times a connection was requested by the pool 36 | Hits uint32 // number of times free connection was found in the pool 37 | Waits uint32 // number of times the pool had to wait for a connection 38 | Timeouts uint32 // number of times a wait timeout occurred 39 | 40 | TotalConns uint32 // the number of total connections in the pool 41 | FreeConns uint32 // the number of free connections in the pool 42 | } 43 | 44 | type Pooler interface { 45 | Get() (*Conn, error) 46 | Put(*Conn) error 47 | Remove(*Conn, error) error 48 | Len() int 49 | FreeLen() int 50 | Stats() *PoolStats 51 | Close() error 52 | Closed() bool 53 | } 54 | 55 | type dialer func() (net.Conn, error) 56 | 57 | type ConnPool struct { 58 | _dial dialer 59 | DialLimiter *ratelimit.RateLimiter 60 | OnClose func(*Conn) error 61 | 62 | poolTimeout time.Duration 63 | idleTimeout time.Duration 64 | 65 | queue chan struct{} 66 | 67 | connsMu sync.Mutex 68 | conns []*Conn 69 | 70 | freeConnsMu sync.Mutex 71 | freeConns []*Conn 72 | 73 | stats PoolStats 74 | 75 | _closed int32 // atomic 76 | lastErr atomic.Value 77 | } 78 | 79 | var _ Pooler = (*ConnPool)(nil) 80 | 81 | func NewConnPool(dial dialer, poolSize int, poolTimeout, idleTimeout, idleCheckFrequency time.Duration) *ConnPool { 82 | p := &ConnPool{ 83 | _dial: dial, 84 | DialLimiter: ratelimit.New(3*poolSize, time.Second), 85 | 86 | poolTimeout: poolTimeout, 87 | idleTimeout: idleTimeout, 88 | 89 | queue: make(chan struct{}, poolSize), 90 | conns: make([]*Conn, 0, poolSize), 91 | freeConns: make([]*Conn, 0, poolSize), 92 | } 93 | for i := 0; i < poolSize; i++ { 94 | p.queue <- struct{}{} 95 | } 96 | if idleTimeout > 0 && idleCheckFrequency > 0 { 97 | go p.reaper(idleCheckFrequency) 98 | } 99 | return p 100 | } 101 | 102 | func (p *ConnPool) dial() (net.Conn, error) { 103 | if p.DialLimiter != nil && p.DialLimiter.Limit() { 104 | err := fmt.Errorf( 105 | "redis: you open connections too fast (last_error=%q)", 106 | p.loadLastErr(), 107 | ) 108 | return nil, err 109 | } 110 | 111 | cn, err := p._dial() 112 | if err != nil { 113 | p.storeLastErr(err.Error()) 114 | return nil, err 115 | } 116 | return cn, nil 117 | } 118 | 119 | func (p *ConnPool) NewConn() (*Conn, error) { 120 | netConn, err := p.dial() 121 | if err != nil { 122 | return nil, err 123 | } 124 | return NewConn(netConn), nil 125 | } 126 | 127 | func (p *ConnPool) PopFree() *Conn { 128 | timer := timers.Get().(*time.Timer) 129 | if !timer.Reset(p.poolTimeout) { 130 | <-timer.C 131 | } 132 | 133 | select { 134 | case <-p.queue: 135 | timers.Put(timer) 136 | case <-timer.C: 137 | timers.Put(timer) 138 | atomic.AddUint32(&p.stats.Timeouts, 1) 139 | return nil 140 | } 141 | 142 | p.freeConnsMu.Lock() 143 | cn := p.popFree() 144 | p.freeConnsMu.Unlock() 145 | 146 | if cn == nil { 147 | p.queue <- struct{}{} 148 | } 149 | return cn 150 | } 151 | 152 | func (p *ConnPool) popFree() *Conn { 153 | if len(p.freeConns) == 0 { 154 | return nil 155 | } 156 | 157 | idx := len(p.freeConns) - 1 158 | cn := p.freeConns[idx] 159 | p.freeConns = p.freeConns[:idx] 160 | return cn 161 | } 162 | 163 | // Get returns existed connection from the pool or creates a new one. 164 | func (p *ConnPool) Get() (*Conn, error) { 165 | if p.Closed() { 166 | return nil, ErrClosed 167 | } 168 | 169 | atomic.AddUint32(&p.stats.Requests, 1) 170 | 171 | timer := timers.Get().(*time.Timer) 172 | if !timer.Reset(p.poolTimeout) { 173 | <-timer.C 174 | } 175 | 176 | select { 177 | case <-p.queue: 178 | timers.Put(timer) 179 | case <-timer.C: 180 | timers.Put(timer) 181 | atomic.AddUint32(&p.stats.Timeouts, 1) 182 | return nil, ErrPoolTimeout 183 | } 184 | 185 | p.freeConnsMu.Lock() 186 | cn := p.popFree() 187 | p.freeConnsMu.Unlock() 188 | 189 | if cn != nil { 190 | atomic.AddUint32(&p.stats.Hits, 1) 191 | if !cn.IsStale(p.idleTimeout) { 192 | return cn, nil 193 | } 194 | _ = cn.Close() 195 | } 196 | 197 | newcn, err := p.NewConn() 198 | if err != nil { 199 | p.queue <- struct{}{} 200 | return nil, err 201 | } 202 | 203 | p.connsMu.Lock() 204 | if cn != nil { 205 | p.remove(cn, errConnStale) 206 | } 207 | p.conns = append(p.conns, newcn) 208 | p.connsMu.Unlock() 209 | 210 | return newcn, nil 211 | } 212 | 213 | func (p *ConnPool) Put(cn *Conn) error { 214 | if cn.Rd.Buffered() != 0 { 215 | b, _ := cn.Rd.Peek(cn.Rd.Buffered()) 216 | err := fmt.Errorf("connection has unread data: %q", b) 217 | Logger.Print(err) 218 | return p.Remove(cn, err) 219 | } 220 | p.freeConnsMu.Lock() 221 | p.freeConns = append(p.freeConns, cn) 222 | p.freeConnsMu.Unlock() 223 | p.queue <- struct{}{} 224 | return nil 225 | } 226 | 227 | func (p *ConnPool) Remove(cn *Conn, reason error) error { 228 | _ = cn.Close() 229 | p.connsMu.Lock() 230 | p.remove(cn, reason) 231 | p.connsMu.Unlock() 232 | p.queue <- struct{}{} 233 | return nil 234 | } 235 | 236 | func (p *ConnPool) remove(cn *Conn, reason error) { 237 | p.storeLastErr(reason.Error()) 238 | for i, c := range p.conns { 239 | if c == cn { 240 | p.conns = append(p.conns[:i], p.conns[i+1:]...) 241 | break 242 | } 243 | } 244 | } 245 | 246 | // Len returns total number of connections. 247 | func (p *ConnPool) Len() int { 248 | p.connsMu.Lock() 249 | l := len(p.conns) 250 | p.connsMu.Unlock() 251 | return l 252 | } 253 | 254 | // FreeLen returns number of free connections. 255 | func (p *ConnPool) FreeLen() int { 256 | p.freeConnsMu.Lock() 257 | l := len(p.freeConns) 258 | p.freeConnsMu.Unlock() 259 | return l 260 | } 261 | 262 | func (p *ConnPool) Stats() *PoolStats { 263 | stats := PoolStats{} 264 | stats.Requests = atomic.LoadUint32(&p.stats.Requests) 265 | stats.Hits = atomic.LoadUint32(&p.stats.Hits) 266 | stats.Waits = atomic.LoadUint32(&p.stats.Waits) 267 | stats.Timeouts = atomic.LoadUint32(&p.stats.Timeouts) 268 | stats.TotalConns = uint32(p.Len()) 269 | stats.FreeConns = uint32(p.FreeLen()) 270 | return &stats 271 | } 272 | 273 | func (p *ConnPool) Closed() bool { 274 | return atomic.LoadInt32(&p._closed) == 1 275 | } 276 | 277 | func (p *ConnPool) Close() (retErr error) { 278 | if !atomic.CompareAndSwapInt32(&p._closed, 0, 1) { 279 | return ErrClosed 280 | } 281 | 282 | p.connsMu.Lock() 283 | 284 | // Close all connections. 285 | for _, cn := range p.conns { 286 | if cn == nil { 287 | continue 288 | } 289 | if err := p.closeConn(cn); err != nil && retErr == nil { 290 | retErr = err 291 | } 292 | } 293 | p.conns = nil 294 | p.connsMu.Unlock() 295 | 296 | p.freeConnsMu.Lock() 297 | p.freeConns = nil 298 | p.freeConnsMu.Unlock() 299 | 300 | return retErr 301 | } 302 | 303 | func (p *ConnPool) closeConn(cn *Conn) error { 304 | if p.OnClose != nil { 305 | _ = p.OnClose(cn) 306 | } 307 | return cn.Close() 308 | } 309 | 310 | func (p *ConnPool) ReapStaleConns() (n int, err error) { 311 | <-p.queue 312 | p.freeConnsMu.Lock() 313 | 314 | if len(p.freeConns) == 0 { 315 | p.freeConnsMu.Unlock() 316 | p.queue <- struct{}{} 317 | return 318 | } 319 | 320 | var idx int 321 | var cn *Conn 322 | for idx, cn = range p.freeConns { 323 | if !cn.IsStale(p.idleTimeout) { 324 | break 325 | } 326 | p.connsMu.Lock() 327 | p.remove(cn, errConnStale) 328 | p.connsMu.Unlock() 329 | n++ 330 | } 331 | if idx > 0 { 332 | p.freeConns = append(p.freeConns[:0], p.freeConns[idx:]...) 333 | } 334 | 335 | p.freeConnsMu.Unlock() 336 | p.queue <- struct{}{} 337 | return 338 | } 339 | 340 | func (p *ConnPool) reaper(frequency time.Duration) { 341 | ticker := time.NewTicker(frequency) 342 | defer ticker.Stop() 343 | 344 | for _ = range ticker.C { 345 | if p.Closed() { 346 | break 347 | } 348 | n, err := p.ReapStaleConns() 349 | if err != nil { 350 | Logger.Printf("ReapStaleConns failed: %s", err) 351 | continue 352 | } 353 | s := p.Stats() 354 | Logger.Printf( 355 | "reaper: removed %d stale conns (TotalConns=%d FreeConns=%d Requests=%d Hits=%d Timeouts=%d)", 356 | n, s.TotalConns, s.FreeConns, s.Requests, s.Hits, s.Timeouts, 357 | ) 358 | } 359 | } 360 | 361 | func (p *ConnPool) storeLastErr(err string) { 362 | p.lastErr.Store(err) 363 | } 364 | 365 | func (p *ConnPool) loadLastErr() string { 366 | if v := p.lastErr.Load(); v != nil { 367 | return v.(string) 368 | } 369 | return "" 370 | } 371 | 372 | //------------------------------------------------------------------------------ 373 | 374 | var idleCheckFrequency atomic.Value 375 | 376 | func SetIdleCheckFrequency(d time.Duration) { 377 | idleCheckFrequency.Store(d) 378 | } 379 | 380 | func getIdleCheckFrequency() time.Duration { 381 | v := idleCheckFrequency.Load() 382 | if v == nil { 383 | return time.Minute 384 | } 385 | return v.(time.Duration) 386 | } 387 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/internal/pool/pool_single.go: -------------------------------------------------------------------------------- 1 | package pool 2 | 3 | type SingleConnPool struct { 4 | cn *Conn 5 | } 6 | 7 | var _ Pooler = (*SingleConnPool)(nil) 8 | 9 | func NewSingleConnPool(cn *Conn) *SingleConnPool { 10 | return &SingleConnPool{ 11 | cn: cn, 12 | } 13 | } 14 | 15 | func (p *SingleConnPool) First() *Conn { 16 | return p.cn 17 | } 18 | 19 | func (p *SingleConnPool) Get() (*Conn, error) { 20 | return p.cn, nil 21 | } 22 | 23 | func (p *SingleConnPool) Put(cn *Conn) error { 24 | if p.cn != cn { 25 | panic("p.cn != cn") 26 | } 27 | return nil 28 | } 29 | 30 | func (p *SingleConnPool) Remove(cn *Conn, _ error) error { 31 | if p.cn != cn { 32 | panic("p.cn != cn") 33 | } 34 | return nil 35 | } 36 | 37 | func (p *SingleConnPool) Len() int { 38 | return 1 39 | } 40 | 41 | func (p *SingleConnPool) FreeLen() int { 42 | return 0 43 | } 44 | 45 | func (p *SingleConnPool) Stats() *PoolStats { 46 | return nil 47 | } 48 | 49 | func (p *SingleConnPool) Close() error { 50 | return nil 51 | } 52 | 53 | func (p *SingleConnPool) Closed() bool { 54 | return false 55 | } 56 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/internal/pool/pool_sticky.go: -------------------------------------------------------------------------------- 1 | package pool 2 | 3 | import ( 4 | "errors" 5 | "sync" 6 | ) 7 | 8 | type StickyConnPool struct { 9 | pool *ConnPool 10 | reusable bool 11 | 12 | cn *Conn 13 | closed bool 14 | mx sync.Mutex 15 | } 16 | 17 | var _ Pooler = (*StickyConnPool)(nil) 18 | 19 | func NewStickyConnPool(pool *ConnPool, reusable bool) *StickyConnPool { 20 | return &StickyConnPool{ 21 | pool: pool, 22 | reusable: reusable, 23 | } 24 | } 25 | 26 | func (p *StickyConnPool) First() *Conn { 27 | p.mx.Lock() 28 | cn := p.cn 29 | p.mx.Unlock() 30 | return cn 31 | } 32 | 33 | func (p *StickyConnPool) Get() (*Conn, error) { 34 | defer p.mx.Unlock() 35 | p.mx.Lock() 36 | 37 | if p.closed { 38 | return nil, ErrClosed 39 | } 40 | if p.cn != nil { 41 | return p.cn, nil 42 | } 43 | 44 | cn, err := p.pool.Get() 45 | if err != nil { 46 | return nil, err 47 | } 48 | p.cn = cn 49 | return cn, nil 50 | } 51 | 52 | func (p *StickyConnPool) put() (err error) { 53 | err = p.pool.Put(p.cn) 54 | p.cn = nil 55 | return err 56 | } 57 | 58 | func (p *StickyConnPool) Put(cn *Conn) error { 59 | defer p.mx.Unlock() 60 | p.mx.Lock() 61 | if p.closed { 62 | return ErrClosed 63 | } 64 | if p.cn != cn { 65 | panic("p.cn != cn") 66 | } 67 | return nil 68 | } 69 | 70 | func (p *StickyConnPool) remove(reason error) error { 71 | err := p.pool.Remove(p.cn, reason) 72 | p.cn = nil 73 | return err 74 | } 75 | 76 | func (p *StickyConnPool) Remove(cn *Conn, reason error) error { 77 | defer p.mx.Unlock() 78 | p.mx.Lock() 79 | if p.closed { 80 | return nil 81 | } 82 | if p.cn == nil { 83 | panic("p.cn == nil") 84 | } 85 | if cn != nil && p.cn != cn { 86 | panic("p.cn != cn") 87 | } 88 | return p.remove(reason) 89 | } 90 | 91 | func (p *StickyConnPool) Len() int { 92 | defer p.mx.Unlock() 93 | p.mx.Lock() 94 | if p.cn == nil { 95 | return 0 96 | } 97 | return 1 98 | } 99 | 100 | func (p *StickyConnPool) FreeLen() int { 101 | defer p.mx.Unlock() 102 | p.mx.Lock() 103 | if p.cn == nil { 104 | return 1 105 | } 106 | return 0 107 | } 108 | 109 | func (p *StickyConnPool) Stats() *PoolStats { return nil } 110 | 111 | func (p *StickyConnPool) Close() error { 112 | defer p.mx.Unlock() 113 | p.mx.Lock() 114 | if p.closed { 115 | return ErrClosed 116 | } 117 | p.closed = true 118 | var err error 119 | if p.cn != nil { 120 | if p.reusable { 121 | err = p.put() 122 | } else { 123 | reason := errors.New("redis: sticky not reusable connection") 124 | err = p.remove(reason) 125 | } 126 | } 127 | return err 128 | } 129 | 130 | func (p *StickyConnPool) Closed() bool { 131 | p.mx.Lock() 132 | closed := p.closed 133 | p.mx.Unlock() 134 | return closed 135 | } 136 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/internal/pool/pool_test.go: -------------------------------------------------------------------------------- 1 | package pool_test 2 | 3 | import ( 4 | "errors" 5 | "net" 6 | "testing" 7 | "time" 8 | 9 | . "github.com/onsi/ginkgo" 10 | . "github.com/onsi/gomega" 11 | 12 | "gopkg.in/redis.v3/internal/pool" 13 | ) 14 | 15 | var _ = Describe("ConnPool", func() { 16 | var connPool *pool.ConnPool 17 | 18 | BeforeEach(func() { 19 | connPool = pool.NewConnPool( 20 | dummyDialer, 10, time.Hour, time.Millisecond, time.Millisecond) 21 | }) 22 | 23 | AfterEach(func() { 24 | connPool.Close() 25 | }) 26 | 27 | It("rate limits dial", func() { 28 | var rateErr error 29 | for i := 0; i < 1000; i++ { 30 | cn, err := connPool.Get() 31 | if err != nil { 32 | rateErr = err 33 | break 34 | } 35 | 36 | _ = connPool.Remove(cn, errors.New("test")) 37 | } 38 | 39 | Expect(rateErr).To(MatchError(`redis: you open connections too fast (last_error="test")`)) 40 | }) 41 | 42 | It("should unblock client when conn is removed", func() { 43 | // Reserve one connection. 44 | cn, err := connPool.Get() 45 | Expect(err).NotTo(HaveOccurred()) 46 | 47 | // Reserve all other connections. 48 | var cns []*pool.Conn 49 | for i := 0; i < 9; i++ { 50 | cn, err := connPool.Get() 51 | Expect(err).NotTo(HaveOccurred()) 52 | cns = append(cns, cn) 53 | } 54 | 55 | started := make(chan bool, 1) 56 | done := make(chan bool, 1) 57 | go func() { 58 | defer GinkgoRecover() 59 | 60 | started <- true 61 | _, err := connPool.Get() 62 | Expect(err).NotTo(HaveOccurred()) 63 | done <- true 64 | 65 | err = connPool.Put(cn) 66 | Expect(err).NotTo(HaveOccurred()) 67 | }() 68 | <-started 69 | 70 | // Check that Get is blocked. 71 | select { 72 | case <-done: 73 | Fail("Get is not blocked") 74 | default: 75 | // ok 76 | } 77 | 78 | err = connPool.Remove(cn, errors.New("test")) 79 | Expect(err).NotTo(HaveOccurred()) 80 | 81 | // Check that Ping is unblocked. 82 | select { 83 | case <-done: 84 | // ok 85 | case <-time.After(time.Second): 86 | Fail("Get is not unblocked") 87 | } 88 | 89 | for _, cn := range cns { 90 | err = connPool.Put(cn) 91 | Expect(err).NotTo(HaveOccurred()) 92 | } 93 | }) 94 | }) 95 | 96 | var _ = Describe("conns reaper", func() { 97 | var connPool *pool.ConnPool 98 | 99 | BeforeEach(func() { 100 | connPool = pool.NewConnPool( 101 | dummyDialer, 10, time.Second, time.Millisecond, time.Hour) 102 | 103 | var cns []*pool.Conn 104 | 105 | // add stale connections 106 | for i := 0; i < 3; i++ { 107 | cn, err := connPool.Get() 108 | Expect(err).NotTo(HaveOccurred()) 109 | cn.UsedAt = time.Now().Add(-2 * time.Minute) 110 | cns = append(cns, cn) 111 | } 112 | 113 | // add fresh connections 114 | for i := 0; i < 3; i++ { 115 | cn := pool.NewConn(&net.TCPConn{}) 116 | cn, err := connPool.Get() 117 | Expect(err).NotTo(HaveOccurred()) 118 | cns = append(cns, cn) 119 | } 120 | 121 | for _, cn := range cns { 122 | Expect(connPool.Put(cn)).NotTo(HaveOccurred()) 123 | } 124 | 125 | Expect(connPool.Len()).To(Equal(6)) 126 | Expect(connPool.FreeLen()).To(Equal(6)) 127 | 128 | n, err := connPool.ReapStaleConns() 129 | Expect(err).NotTo(HaveOccurred()) 130 | Expect(n).To(Equal(3)) 131 | }) 132 | 133 | AfterEach(func() { 134 | connPool.Close() 135 | }) 136 | 137 | It("reaps stale connections", func() { 138 | Expect(connPool.Len()).To(Equal(3)) 139 | Expect(connPool.FreeLen()).To(Equal(3)) 140 | }) 141 | 142 | It("pool is functional", func() { 143 | for j := 0; j < 3; j++ { 144 | var freeCns []*pool.Conn 145 | for i := 0; i < 3; i++ { 146 | cn, err := connPool.Get() 147 | Expect(err).NotTo(HaveOccurred()) 148 | Expect(cn).NotTo(BeNil()) 149 | freeCns = append(freeCns, cn) 150 | } 151 | 152 | Expect(connPool.Len()).To(Equal(3)) 153 | Expect(connPool.FreeLen()).To(Equal(0)) 154 | 155 | cn, err := connPool.Get() 156 | Expect(err).NotTo(HaveOccurred()) 157 | Expect(cn).NotTo(BeNil()) 158 | 159 | Expect(connPool.Len()).To(Equal(4)) 160 | Expect(connPool.FreeLen()).To(Equal(0)) 161 | 162 | err = connPool.Remove(cn, errors.New("test")) 163 | Expect(err).NotTo(HaveOccurred()) 164 | 165 | Expect(connPool.Len()).To(Equal(3)) 166 | Expect(connPool.FreeLen()).To(Equal(0)) 167 | 168 | for _, cn := range freeCns { 169 | err := connPool.Put(cn) 170 | Expect(err).NotTo(HaveOccurred()) 171 | } 172 | 173 | Expect(connPool.Len()).To(Equal(3)) 174 | Expect(connPool.FreeLen()).To(Equal(3)) 175 | } 176 | }) 177 | }) 178 | 179 | var _ = Describe("race", func() { 180 | var connPool *pool.ConnPool 181 | var C, N int 182 | 183 | BeforeEach(func() { 184 | C, N = 10, 1000 185 | if testing.Short() { 186 | C = 4 187 | N = 100 188 | } 189 | }) 190 | 191 | AfterEach(func() { 192 | connPool.Close() 193 | }) 194 | 195 | It("does not happen on Get, Put, and Remove", func() { 196 | connPool = pool.NewConnPool( 197 | dummyDialer, 10, time.Minute, time.Millisecond, time.Millisecond) 198 | connPool.DialLimiter = nil 199 | 200 | perform(C, func(id int) { 201 | for i := 0; i < N; i++ { 202 | cn, err := connPool.Get() 203 | Expect(err).NotTo(HaveOccurred()) 204 | if err == nil { 205 | Expect(connPool.Put(cn)).NotTo(HaveOccurred()) 206 | } 207 | } 208 | }, func(id int) { 209 | for i := 0; i < N; i++ { 210 | cn, err := connPool.Get() 211 | Expect(err).NotTo(HaveOccurred()) 212 | if err == nil { 213 | Expect(connPool.Remove(cn, errors.New("test"))).NotTo(HaveOccurred()) 214 | } 215 | } 216 | }) 217 | }) 218 | 219 | It("does not happen on Get and PopFree", func() { 220 | connPool = pool.NewConnPool( 221 | dummyDialer, 10, time.Minute, time.Second, time.Millisecond) 222 | connPool.DialLimiter = nil 223 | 224 | perform(C, func(id int) { 225 | for i := 0; i < N; i++ { 226 | cn, err := connPool.Get() 227 | Expect(err).NotTo(HaveOccurred()) 228 | if err == nil { 229 | Expect(connPool.Put(cn)).NotTo(HaveOccurred()) 230 | } 231 | 232 | cn = connPool.PopFree() 233 | if cn != nil { 234 | Expect(connPool.Put(cn)).NotTo(HaveOccurred()) 235 | } 236 | } 237 | }) 238 | }) 239 | }) 240 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/main_test.go: -------------------------------------------------------------------------------- 1 | package redis_test 2 | 3 | import ( 4 | "errors" 5 | "net" 6 | "os" 7 | "os/exec" 8 | "path/filepath" 9 | "sync" 10 | "sync/atomic" 11 | "testing" 12 | "time" 13 | 14 | . "github.com/onsi/ginkgo" 15 | . "github.com/onsi/gomega" 16 | 17 | "gopkg.in/redis.v3" 18 | ) 19 | 20 | const ( 21 | redisPort = "6380" 22 | redisAddr = ":" + redisPort 23 | redisSecondaryPort = "6381" 24 | ) 25 | 26 | const ( 27 | ringShard1Port = "6390" 28 | ringShard2Port = "6391" 29 | ) 30 | 31 | const ( 32 | sentinelName = "mymaster" 33 | sentinelMasterPort = "8123" 34 | sentinelSlave1Port = "8124" 35 | sentinelSlave2Port = "8125" 36 | sentinelPort = "8126" 37 | ) 38 | 39 | var ( 40 | redisMain *redisProcess 41 | ringShard1, ringShard2 *redisProcess 42 | sentinelMaster, sentinelSlave1, sentinelSlave2, sentinel *redisProcess 43 | ) 44 | 45 | var cluster = &clusterScenario{ 46 | ports: []string{"8220", "8221", "8222", "8223", "8224", "8225"}, 47 | nodeIds: make([]string, 6), 48 | processes: make(map[string]*redisProcess, 6), 49 | clients: make(map[string]*redis.Client, 6), 50 | } 51 | 52 | var _ = BeforeSuite(func() { 53 | var err error 54 | 55 | redisMain, err = startRedis(redisPort) 56 | Expect(err).NotTo(HaveOccurred()) 57 | 58 | ringShard1, err = startRedis(ringShard1Port) 59 | Expect(err).NotTo(HaveOccurred()) 60 | 61 | ringShard2, err = startRedis(ringShard2Port) 62 | Expect(err).NotTo(HaveOccurred()) 63 | 64 | sentinelMaster, err = startRedis(sentinelMasterPort) 65 | Expect(err).NotTo(HaveOccurred()) 66 | 67 | sentinel, err = startSentinel(sentinelPort, sentinelName, sentinelMasterPort) 68 | Expect(err).NotTo(HaveOccurred()) 69 | 70 | sentinelSlave1, err = startRedis( 71 | sentinelSlave1Port, "--slaveof", "127.0.0.1", sentinelMasterPort) 72 | Expect(err).NotTo(HaveOccurred()) 73 | 74 | sentinelSlave2, err = startRedis( 75 | sentinelSlave2Port, "--slaveof", "127.0.0.1", sentinelMasterPort) 76 | Expect(err).NotTo(HaveOccurred()) 77 | 78 | Expect(startCluster(cluster)).NotTo(HaveOccurred()) 79 | }) 80 | 81 | var _ = AfterSuite(func() { 82 | Expect(redisMain.Close()).NotTo(HaveOccurred()) 83 | 84 | Expect(ringShard1.Close()).NotTo(HaveOccurred()) 85 | Expect(ringShard2.Close()).NotTo(HaveOccurred()) 86 | 87 | Expect(sentinel.Close()).NotTo(HaveOccurred()) 88 | Expect(sentinelSlave1.Close()).NotTo(HaveOccurred()) 89 | Expect(sentinelSlave2.Close()).NotTo(HaveOccurred()) 90 | Expect(sentinelMaster.Close()).NotTo(HaveOccurred()) 91 | 92 | Expect(stopCluster(cluster)).NotTo(HaveOccurred()) 93 | }) 94 | 95 | func TestGinkgoSuite(t *testing.T) { 96 | RegisterFailHandler(Fail) 97 | RunSpecs(t, "gopkg.in/redis.v3") 98 | } 99 | 100 | //------------------------------------------------------------------------------ 101 | 102 | func redisOptions() *redis.Options { 103 | return &redis.Options{ 104 | Addr: redisAddr, 105 | DB: 15, 106 | DialTimeout: 10 * time.Second, 107 | ReadTimeout: 30 * time.Second, 108 | WriteTimeout: 30 * time.Second, 109 | PoolSize: 10, 110 | PoolTimeout: 30 * time.Second, 111 | IdleTimeout: time.Second, 112 | IdleCheckFrequency: time.Second, 113 | } 114 | } 115 | 116 | func perform(n int, cbs ...func(int)) { 117 | var wg sync.WaitGroup 118 | for _, cb := range cbs { 119 | for i := 0; i < n; i++ { 120 | wg.Add(1) 121 | go func(cb func(int), i int) { 122 | defer GinkgoRecover() 123 | defer wg.Done() 124 | 125 | cb(i) 126 | }(cb, i) 127 | } 128 | } 129 | wg.Wait() 130 | } 131 | 132 | func eventually(fn func() error, timeout time.Duration) error { 133 | var exit int32 134 | var retErr error 135 | var mu sync.Mutex 136 | done := make(chan struct{}) 137 | 138 | go func() { 139 | for atomic.LoadInt32(&exit) == 0 { 140 | err := fn() 141 | if err == nil { 142 | close(done) 143 | return 144 | } 145 | mu.Lock() 146 | retErr = err 147 | mu.Unlock() 148 | time.Sleep(timeout / 100) 149 | } 150 | }() 151 | 152 | select { 153 | case <-done: 154 | return nil 155 | case <-time.After(timeout): 156 | atomic.StoreInt32(&exit, 1) 157 | mu.Lock() 158 | err := retErr 159 | mu.Unlock() 160 | return err 161 | } 162 | } 163 | 164 | func execCmd(name string, args ...string) (*os.Process, error) { 165 | cmd := exec.Command(name, args...) 166 | if testing.Verbose() { 167 | cmd.Stdout = os.Stdout 168 | cmd.Stderr = os.Stderr 169 | } 170 | return cmd.Process, cmd.Start() 171 | } 172 | 173 | func connectTo(port string) (*redis.Client, error) { 174 | client := redis.NewClient(&redis.Options{ 175 | Addr: ":" + port, 176 | }) 177 | 178 | err := eventually(func() error { 179 | return client.Ping().Err() 180 | }, 30*time.Second) 181 | if err != nil { 182 | return nil, err 183 | } 184 | 185 | return client, nil 186 | } 187 | 188 | type redisProcess struct { 189 | *os.Process 190 | *redis.Client 191 | } 192 | 193 | func (p *redisProcess) Close() error { 194 | if err := p.Kill(); err != nil { 195 | return err 196 | } 197 | 198 | err := eventually(func() error { 199 | if err := p.Client.Ping().Err(); err != nil { 200 | return nil 201 | } 202 | return errors.New("client is not shutdown") 203 | }, 10*time.Second) 204 | if err != nil { 205 | return err 206 | } 207 | 208 | p.Client.Close() 209 | return nil 210 | } 211 | 212 | var ( 213 | redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server")) 214 | redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis.conf")) 215 | ) 216 | 217 | func redisDir(port string) (string, error) { 218 | dir, err := filepath.Abs(filepath.Join("testdata", "instances", port)) 219 | if err != nil { 220 | return "", err 221 | } 222 | if err := os.RemoveAll(dir); err != nil { 223 | return "", err 224 | } 225 | if err := os.MkdirAll(dir, 0775); err != nil { 226 | return "", err 227 | } 228 | return dir, nil 229 | } 230 | 231 | func startRedis(port string, args ...string) (*redisProcess, error) { 232 | dir, err := redisDir(port) 233 | if err != nil { 234 | return nil, err 235 | } 236 | if err = exec.Command("cp", "-f", redisServerConf, dir).Run(); err != nil { 237 | return nil, err 238 | } 239 | 240 | baseArgs := []string{filepath.Join(dir, "redis.conf"), "--port", port, "--dir", dir} 241 | process, err := execCmd(redisServerBin, append(baseArgs, args...)...) 242 | if err != nil { 243 | return nil, err 244 | } 245 | 246 | client, err := connectTo(port) 247 | if err != nil { 248 | process.Kill() 249 | return nil, err 250 | } 251 | return &redisProcess{process, client}, err 252 | } 253 | 254 | func startSentinel(port, masterName, masterPort string) (*redisProcess, error) { 255 | dir, err := redisDir(port) 256 | if err != nil { 257 | return nil, err 258 | } 259 | process, err := execCmd(redisServerBin, os.DevNull, "--sentinel", "--port", port, "--dir", dir) 260 | if err != nil { 261 | return nil, err 262 | } 263 | client, err := connectTo(port) 264 | if err != nil { 265 | process.Kill() 266 | return nil, err 267 | } 268 | for _, cmd := range []*redis.StatusCmd{ 269 | redis.NewStatusCmd("SENTINEL", "MONITOR", masterName, "127.0.0.1", masterPort, "1"), 270 | redis.NewStatusCmd("SENTINEL", "SET", masterName, "down-after-milliseconds", "500"), 271 | redis.NewStatusCmd("SENTINEL", "SET", masterName, "failover-timeout", "1000"), 272 | redis.NewStatusCmd("SENTINEL", "SET", masterName, "parallel-syncs", "1"), 273 | } { 274 | client.Process(cmd) 275 | if err := cmd.Err(); err != nil { 276 | process.Kill() 277 | return nil, err 278 | } 279 | } 280 | return &redisProcess{process, client}, nil 281 | } 282 | 283 | //------------------------------------------------------------------------------ 284 | 285 | type badConnError string 286 | 287 | func (e badConnError) Error() string { return string(e) } 288 | func (e badConnError) Timeout() bool { return false } 289 | func (e badConnError) Temporary() bool { return false } 290 | 291 | type badConn struct { 292 | net.TCPConn 293 | 294 | readDelay, writeDelay time.Duration 295 | readErr, writeErr error 296 | } 297 | 298 | var _ net.Conn = &badConn{} 299 | 300 | func (cn *badConn) Read([]byte) (int, error) { 301 | if cn.readDelay != 0 { 302 | time.Sleep(cn.readDelay) 303 | } 304 | if cn.readErr != nil { 305 | return 0, cn.readErr 306 | } 307 | return 0, badConnError("bad connection") 308 | } 309 | 310 | func (cn *badConn) Write([]byte) (int, error) { 311 | if cn.writeDelay != 0 { 312 | time.Sleep(cn.writeDelay) 313 | } 314 | if cn.writeErr != nil { 315 | return 0, cn.writeErr 316 | } 317 | return 0, badConnError("bad connection") 318 | } 319 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/multi.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | 7 | "gopkg.in/redis.v3/internal/pool" 8 | ) 9 | 10 | var errDiscard = errors.New("redis: Discard can be used only inside Exec") 11 | 12 | // Multi implements Redis transactions as described in 13 | // http://redis.io/topics/transactions. It's NOT safe for concurrent use 14 | // by multiple goroutines, because Exec resets list of watched keys. 15 | // If you don't need WATCH it is better to use Pipeline. 16 | // 17 | // TODO(vmihailenco): rename to Tx and rework API 18 | type Multi struct { 19 | commandable 20 | 21 | base *baseClient 22 | 23 | cmds []Cmder 24 | closed bool 25 | } 26 | 27 | // Watch creates new transaction and marks the keys to be watched 28 | // for conditional execution of a transaction. 29 | func (c *Client) Watch(keys ...string) (*Multi, error) { 30 | tx := c.Multi() 31 | if err := tx.Watch(keys...).Err(); err != nil { 32 | tx.Close() 33 | return nil, err 34 | } 35 | return tx, nil 36 | } 37 | 38 | // Deprecated. Use Watch instead. 39 | func (c *Client) Multi() *Multi { 40 | multi := &Multi{ 41 | base: &baseClient{ 42 | opt: c.opt, 43 | connPool: pool.NewStickyConnPool(c.connPool.(*pool.ConnPool), true), 44 | }, 45 | } 46 | multi.commandable.process = multi.process 47 | return multi 48 | } 49 | 50 | func (c *Multi) process(cmd Cmder) { 51 | if c.cmds == nil { 52 | c.base.process(cmd) 53 | } else { 54 | c.cmds = append(c.cmds, cmd) 55 | } 56 | } 57 | 58 | // Close closes the client, releasing any open resources. 59 | func (c *Multi) Close() error { 60 | c.closed = true 61 | if err := c.Unwatch().Err(); err != nil { 62 | Logger.Printf("Unwatch failed: %s", err) 63 | } 64 | return c.base.Close() 65 | } 66 | 67 | // Watch marks the keys to be watched for conditional execution 68 | // of a transaction. 69 | func (c *Multi) Watch(keys ...string) *StatusCmd { 70 | args := make([]interface{}, 1+len(keys)) 71 | args[0] = "WATCH" 72 | for i, key := range keys { 73 | args[1+i] = key 74 | } 75 | cmd := NewStatusCmd(args...) 76 | c.Process(cmd) 77 | return cmd 78 | } 79 | 80 | // Unwatch flushes all the previously watched keys for a transaction. 81 | func (c *Multi) Unwatch(keys ...string) *StatusCmd { 82 | args := make([]interface{}, 1+len(keys)) 83 | args[0] = "UNWATCH" 84 | for i, key := range keys { 85 | args[1+i] = key 86 | } 87 | cmd := NewStatusCmd(args...) 88 | c.Process(cmd) 89 | return cmd 90 | } 91 | 92 | // Discard discards queued commands. 93 | func (c *Multi) Discard() error { 94 | if c.cmds == nil { 95 | return errDiscard 96 | } 97 | c.cmds = c.cmds[:1] 98 | return nil 99 | } 100 | 101 | // Exec executes all previously queued commands in a transaction 102 | // and restores the connection state to normal. 103 | // 104 | // When using WATCH, EXEC will execute commands only if the watched keys 105 | // were not modified, allowing for a check-and-set mechanism. 106 | // 107 | // Exec always returns list of commands. If transaction fails 108 | // TxFailedErr is returned. Otherwise Exec returns error of the first 109 | // failed command or nil. 110 | func (c *Multi) Exec(f func() error) ([]Cmder, error) { 111 | if c.closed { 112 | return nil, pool.ErrClosed 113 | } 114 | 115 | c.cmds = []Cmder{NewStatusCmd("MULTI")} 116 | if err := f(); err != nil { 117 | return nil, err 118 | } 119 | c.cmds = append(c.cmds, NewSliceCmd("EXEC")) 120 | 121 | cmds := c.cmds 122 | c.cmds = nil 123 | 124 | if len(cmds) == 2 { 125 | return []Cmder{}, nil 126 | } 127 | 128 | // Strip MULTI and EXEC commands. 129 | retCmds := cmds[1 : len(cmds)-1] 130 | 131 | cn, err := c.base.conn() 132 | if err != nil { 133 | setCmdsErr(retCmds, err) 134 | return retCmds, err 135 | } 136 | 137 | err = c.execCmds(cn, cmds) 138 | c.base.putConn(cn, err, false) 139 | return retCmds, err 140 | } 141 | 142 | func (c *Multi) execCmds(cn *pool.Conn, cmds []Cmder) error { 143 | err := writeCmd(cn, cmds...) 144 | if err != nil { 145 | setCmdsErr(cmds[1:len(cmds)-1], err) 146 | return err 147 | } 148 | 149 | statusCmd := NewStatusCmd() 150 | 151 | // Omit last command (EXEC). 152 | cmdsLen := len(cmds) - 1 153 | 154 | // Parse queued replies. 155 | for i := 0; i < cmdsLen; i++ { 156 | if err := statusCmd.readReply(cn); err != nil { 157 | setCmdsErr(cmds[1:len(cmds)-1], err) 158 | return err 159 | } 160 | } 161 | 162 | // Parse number of replies. 163 | line, err := readLine(cn) 164 | if err != nil { 165 | if err == Nil { 166 | err = TxFailedErr 167 | } 168 | setCmdsErr(cmds[1:len(cmds)-1], err) 169 | return err 170 | } 171 | if line[0] != '*' { 172 | err := fmt.Errorf("redis: expected '*', but got line %q", line) 173 | setCmdsErr(cmds[1:len(cmds)-1], err) 174 | return err 175 | } 176 | 177 | var firstCmdErr error 178 | 179 | // Parse replies. 180 | // Loop starts from 1 to omit MULTI cmd. 181 | for i := 1; i < cmdsLen; i++ { 182 | cmd := cmds[i] 183 | if err := cmd.readReply(cn); err != nil { 184 | if firstCmdErr == nil { 185 | firstCmdErr = err 186 | } 187 | } 188 | } 189 | 190 | return firstCmdErr 191 | } 192 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/multi_test.go: -------------------------------------------------------------------------------- 1 | package redis_test 2 | 3 | import ( 4 | "strconv" 5 | "sync" 6 | 7 | . "github.com/onsi/ginkgo" 8 | . "github.com/onsi/gomega" 9 | 10 | "gopkg.in/redis.v3" 11 | ) 12 | 13 | var _ = Describe("Multi", func() { 14 | var client *redis.Client 15 | 16 | BeforeEach(func() { 17 | client = redis.NewClient(redisOptions()) 18 | Expect(client.FlushDb().Err()).NotTo(HaveOccurred()) 19 | }) 20 | 21 | AfterEach(func() { 22 | Expect(client.Close()).NotTo(HaveOccurred()) 23 | }) 24 | 25 | It("should Watch", func() { 26 | var incr func(string) error 27 | 28 | // Transactionally increments key using GET and SET commands. 29 | incr = func(key string) error { 30 | tx, err := client.Watch(key) 31 | if err != nil { 32 | return err 33 | } 34 | defer tx.Close() 35 | 36 | n, err := tx.Get(key).Int64() 37 | if err != nil && err != redis.Nil { 38 | return err 39 | } 40 | 41 | _, err = tx.Exec(func() error { 42 | tx.Set(key, strconv.FormatInt(n+1, 10), 0) 43 | return nil 44 | }) 45 | if err == redis.TxFailedErr { 46 | return incr(key) 47 | } 48 | return err 49 | } 50 | 51 | var wg sync.WaitGroup 52 | for i := 0; i < 100; i++ { 53 | wg.Add(1) 54 | go func() { 55 | defer GinkgoRecover() 56 | defer wg.Done() 57 | 58 | err := incr("key") 59 | Expect(err).NotTo(HaveOccurred()) 60 | }() 61 | } 62 | wg.Wait() 63 | 64 | n, err := client.Get("key").Int64() 65 | Expect(err).NotTo(HaveOccurred()) 66 | Expect(n).To(Equal(int64(100))) 67 | }) 68 | 69 | It("should discard", func() { 70 | multi := client.Multi() 71 | defer func() { 72 | Expect(multi.Close()).NotTo(HaveOccurred()) 73 | }() 74 | 75 | cmds, err := multi.Exec(func() error { 76 | multi.Set("key1", "hello1", 0) 77 | multi.Discard() 78 | multi.Set("key2", "hello2", 0) 79 | return nil 80 | }) 81 | Expect(err).NotTo(HaveOccurred()) 82 | Expect(cmds).To(HaveLen(1)) 83 | 84 | get := client.Get("key1") 85 | Expect(get.Err()).To(Equal(redis.Nil)) 86 | Expect(get.Val()).To(Equal("")) 87 | 88 | get = client.Get("key2") 89 | Expect(get.Err()).NotTo(HaveOccurred()) 90 | Expect(get.Val()).To(Equal("hello2")) 91 | }) 92 | 93 | It("should exec empty", func() { 94 | multi := client.Multi() 95 | defer func() { 96 | Expect(multi.Close()).NotTo(HaveOccurred()) 97 | }() 98 | 99 | cmds, err := multi.Exec(func() error { return nil }) 100 | Expect(err).NotTo(HaveOccurred()) 101 | Expect(cmds).To(HaveLen(0)) 102 | 103 | ping := multi.Ping() 104 | Expect(ping.Err()).NotTo(HaveOccurred()) 105 | Expect(ping.Val()).To(Equal("PONG")) 106 | }) 107 | 108 | It("should exec empty queue", func() { 109 | multi := client.Multi() 110 | defer func() { 111 | Expect(multi.Close()).NotTo(HaveOccurred()) 112 | }() 113 | 114 | cmds, err := multi.Exec(func() error { return nil }) 115 | Expect(err).NotTo(HaveOccurred()) 116 | Expect(cmds).To(HaveLen(0)) 117 | }) 118 | 119 | It("should exec bulks", func() { 120 | multi := client.Multi() 121 | defer func() { 122 | Expect(multi.Close()).NotTo(HaveOccurred()) 123 | }() 124 | 125 | cmds, err := multi.Exec(func() error { 126 | for i := int64(0); i < 20000; i++ { 127 | multi.Incr("key") 128 | } 129 | return nil 130 | }) 131 | Expect(err).NotTo(HaveOccurred()) 132 | Expect(len(cmds)).To(Equal(20000)) 133 | for _, cmd := range cmds { 134 | Expect(cmd.Err()).NotTo(HaveOccurred()) 135 | } 136 | 137 | get := client.Get("key") 138 | Expect(get.Err()).NotTo(HaveOccurred()) 139 | Expect(get.Val()).To(Equal("20000")) 140 | }) 141 | 142 | It("should recover from bad connection", func() { 143 | // Put bad connection in the pool. 144 | cn, err := client.Pool().Get() 145 | Expect(err).NotTo(HaveOccurred()) 146 | 147 | cn.NetConn = &badConn{} 148 | err = client.Pool().Put(cn) 149 | Expect(err).NotTo(HaveOccurred()) 150 | 151 | multi := client.Multi() 152 | defer func() { 153 | Expect(multi.Close()).NotTo(HaveOccurred()) 154 | }() 155 | 156 | _, err = multi.Exec(func() error { 157 | multi.Ping() 158 | return nil 159 | }) 160 | Expect(err).To(MatchError("bad connection")) 161 | 162 | _, err = multi.Exec(func() error { 163 | multi.Ping() 164 | return nil 165 | }) 166 | Expect(err).NotTo(HaveOccurred()) 167 | }) 168 | 169 | It("should recover from bad connection when there are no commands", func() { 170 | // Put bad connection in the pool. 171 | cn, err := client.Pool().Get() 172 | Expect(err).NotTo(HaveOccurred()) 173 | 174 | cn.NetConn = &badConn{} 175 | err = client.Pool().Put(cn) 176 | Expect(err).NotTo(HaveOccurred()) 177 | 178 | { 179 | tx, err := client.Watch("key") 180 | Expect(err).To(MatchError("bad connection")) 181 | Expect(tx).To(BeNil()) 182 | } 183 | 184 | { 185 | tx, err := client.Watch("key") 186 | Expect(err).NotTo(HaveOccurred()) 187 | 188 | err = tx.Ping().Err() 189 | Expect(err).NotTo(HaveOccurred()) 190 | 191 | err = tx.Close() 192 | Expect(err).NotTo(HaveOccurred()) 193 | } 194 | }) 195 | }) 196 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/options.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "net" 5 | "time" 6 | 7 | "gopkg.in/redis.v3/internal/pool" 8 | ) 9 | 10 | type Options struct { 11 | // The network type, either tcp or unix. 12 | // Default is tcp. 13 | Network string 14 | // host:port address. 15 | Addr string 16 | 17 | // Dialer creates new network connection and has priority over 18 | // Network and Addr options. 19 | Dialer func() (net.Conn, error) 20 | 21 | // An optional password. Must match the password specified in the 22 | // requirepass server configuration option. 23 | Password string 24 | // A database to be selected after connecting to server. 25 | DB int64 26 | 27 | // The maximum number of retries before giving up. 28 | // Default is to not retry failed commands. 29 | MaxRetries int 30 | 31 | // Sets the deadline for establishing new connections. If reached, 32 | // dial will fail with a timeout. 33 | // Default is 5 seconds. 34 | DialTimeout time.Duration 35 | // Sets the deadline for socket reads. If reached, commands will 36 | // fail with a timeout instead of blocking. 37 | ReadTimeout time.Duration 38 | // Sets the deadline for socket writes. If reached, commands will 39 | // fail with a timeout instead of blocking. 40 | WriteTimeout time.Duration 41 | 42 | // The maximum number of socket connections. 43 | // Default is 10 connections. 44 | PoolSize int 45 | // Specifies amount of time client waits for connection if all 46 | // connections are busy before returning an error. 47 | // Default is 1 second. 48 | PoolTimeout time.Duration 49 | // Specifies amount of time after which client closes idle 50 | // connections. Should be less than server's timeout. 51 | // Default is to not close idle connections. 52 | IdleTimeout time.Duration 53 | // The frequency of idle checks. 54 | // Default is 1 minute. 55 | IdleCheckFrequency time.Duration 56 | } 57 | 58 | func (opt *Options) getNetwork() string { 59 | if opt.Network == "" { 60 | return "tcp" 61 | } 62 | return opt.Network 63 | } 64 | 65 | func (opt *Options) getDialer() func() (net.Conn, error) { 66 | if opt.Dialer != nil { 67 | return opt.Dialer 68 | } 69 | return func() (net.Conn, error) { 70 | return net.DialTimeout(opt.getNetwork(), opt.Addr, opt.getDialTimeout()) 71 | } 72 | } 73 | 74 | func (opt *Options) getPoolSize() int { 75 | if opt.PoolSize == 0 { 76 | return 10 77 | } 78 | return opt.PoolSize 79 | } 80 | 81 | func (opt *Options) getDialTimeout() time.Duration { 82 | if opt.DialTimeout == 0 { 83 | return 5 * time.Second 84 | } 85 | return opt.DialTimeout 86 | } 87 | 88 | func (opt *Options) getPoolTimeout() time.Duration { 89 | if opt.PoolTimeout == 0 { 90 | return 1 * time.Second 91 | } 92 | return opt.PoolTimeout 93 | } 94 | 95 | func (opt *Options) getIdleTimeout() time.Duration { 96 | return opt.IdleTimeout 97 | } 98 | 99 | func (opt *Options) getIdleCheckFrequency() time.Duration { 100 | if opt.IdleCheckFrequency == 0 { 101 | return time.Minute 102 | } 103 | return opt.IdleCheckFrequency 104 | } 105 | 106 | func newConnPool(opt *Options) *pool.ConnPool { 107 | return pool.NewConnPool( 108 | opt.getDialer(), 109 | opt.getPoolSize(), 110 | opt.getPoolTimeout(), 111 | opt.getIdleTimeout(), 112 | opt.getIdleCheckFrequency(), 113 | ) 114 | } 115 | 116 | // PoolStats contains pool state information and accumulated stats. 117 | type PoolStats struct { 118 | Requests uint32 // number of times a connection was requested by the pool 119 | Hits uint32 // number of times free connection was found in the pool 120 | Waits uint32 // number of times the pool had to wait for a connection 121 | Timeouts uint32 // number of times a wait timeout occurred 122 | 123 | TotalConns uint32 // the number of total connections in the pool 124 | FreeConns uint32 // the number of free connections in the pool 125 | } 126 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/parser.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "net" 7 | "strconv" 8 | 9 | "gopkg.in/redis.v3/internal/pool" 10 | ) 11 | 12 | const ( 13 | errorReply = '-' 14 | statusReply = '+' 15 | intReply = ':' 16 | stringReply = '$' 17 | arrayReply = '*' 18 | ) 19 | 20 | type multiBulkParser func(cn *pool.Conn, n int64) (interface{}, error) 21 | 22 | var ( 23 | errReaderTooSmall = errors.New("redis: reader is too small") 24 | ) 25 | 26 | //------------------------------------------------------------------------------ 27 | 28 | // Copy of encoding.BinaryMarshaler. 29 | type binaryMarshaler interface { 30 | MarshalBinary() (data []byte, err error) 31 | } 32 | 33 | // Copy of encoding.BinaryUnmarshaler. 34 | type binaryUnmarshaler interface { 35 | UnmarshalBinary(data []byte) error 36 | } 37 | 38 | func appendString(b []byte, s string) []byte { 39 | b = append(b, '$') 40 | b = strconv.AppendUint(b, uint64(len(s)), 10) 41 | b = append(b, '\r', '\n') 42 | b = append(b, s...) 43 | b = append(b, '\r', '\n') 44 | return b 45 | } 46 | 47 | func appendBytes(b, bb []byte) []byte { 48 | b = append(b, '$') 49 | b = strconv.AppendUint(b, uint64(len(bb)), 10) 50 | b = append(b, '\r', '\n') 51 | b = append(b, bb...) 52 | b = append(b, '\r', '\n') 53 | return b 54 | } 55 | 56 | func appendArg(b []byte, val interface{}) ([]byte, error) { 57 | switch v := val.(type) { 58 | case nil: 59 | b = appendString(b, "") 60 | case string: 61 | b = appendString(b, v) 62 | case []byte: 63 | b = appendBytes(b, v) 64 | case int: 65 | b = appendString(b, formatInt(int64(v))) 66 | case int8: 67 | b = appendString(b, formatInt(int64(v))) 68 | case int16: 69 | b = appendString(b, formatInt(int64(v))) 70 | case int32: 71 | b = appendString(b, formatInt(int64(v))) 72 | case int64: 73 | b = appendString(b, formatInt(v)) 74 | case uint: 75 | b = appendString(b, formatUint(uint64(v))) 76 | case uint8: 77 | b = appendString(b, formatUint(uint64(v))) 78 | case uint16: 79 | b = appendString(b, formatUint(uint64(v))) 80 | case uint32: 81 | b = appendString(b, formatUint(uint64(v))) 82 | case uint64: 83 | b = appendString(b, formatUint(v)) 84 | case float32: 85 | b = appendString(b, formatFloat(float64(v))) 86 | case float64: 87 | b = appendString(b, formatFloat(v)) 88 | case bool: 89 | if v { 90 | b = appendString(b, "1") 91 | } else { 92 | b = appendString(b, "0") 93 | } 94 | default: 95 | if bm, ok := val.(binaryMarshaler); ok { 96 | bb, err := bm.MarshalBinary() 97 | if err != nil { 98 | return nil, err 99 | } 100 | b = appendBytes(b, bb) 101 | } else { 102 | err := fmt.Errorf( 103 | "redis: can't marshal %T (consider implementing BinaryMarshaler)", val) 104 | return nil, err 105 | } 106 | } 107 | return b, nil 108 | } 109 | 110 | func appendArgs(b []byte, args []interface{}) ([]byte, error) { 111 | b = append(b, arrayReply) 112 | b = strconv.AppendUint(b, uint64(len(args)), 10) 113 | b = append(b, '\r', '\n') 114 | for _, arg := range args { 115 | var err error 116 | b, err = appendArg(b, arg) 117 | if err != nil { 118 | return nil, err 119 | } 120 | } 121 | return b, nil 122 | } 123 | 124 | func scan(b []byte, val interface{}) error { 125 | switch v := val.(type) { 126 | case nil: 127 | return errorf("redis: Scan(nil)") 128 | case *string: 129 | *v = bytesToString(b) 130 | return nil 131 | case *[]byte: 132 | *v = b 133 | return nil 134 | case *int: 135 | var err error 136 | *v, err = strconv.Atoi(bytesToString(b)) 137 | return err 138 | case *int8: 139 | n, err := strconv.ParseInt(bytesToString(b), 10, 8) 140 | if err != nil { 141 | return err 142 | } 143 | *v = int8(n) 144 | return nil 145 | case *int16: 146 | n, err := strconv.ParseInt(bytesToString(b), 10, 16) 147 | if err != nil { 148 | return err 149 | } 150 | *v = int16(n) 151 | return nil 152 | case *int32: 153 | n, err := strconv.ParseInt(bytesToString(b), 10, 16) 154 | if err != nil { 155 | return err 156 | } 157 | *v = int32(n) 158 | return nil 159 | case *int64: 160 | n, err := strconv.ParseInt(bytesToString(b), 10, 64) 161 | if err != nil { 162 | return err 163 | } 164 | *v = n 165 | return nil 166 | case *uint: 167 | n, err := strconv.ParseUint(bytesToString(b), 10, 64) 168 | if err != nil { 169 | return err 170 | } 171 | *v = uint(n) 172 | return nil 173 | case *uint8: 174 | n, err := strconv.ParseUint(bytesToString(b), 10, 8) 175 | if err != nil { 176 | return err 177 | } 178 | *v = uint8(n) 179 | return nil 180 | case *uint16: 181 | n, err := strconv.ParseUint(bytesToString(b), 10, 16) 182 | if err != nil { 183 | return err 184 | } 185 | *v = uint16(n) 186 | return nil 187 | case *uint32: 188 | n, err := strconv.ParseUint(bytesToString(b), 10, 32) 189 | if err != nil { 190 | return err 191 | } 192 | *v = uint32(n) 193 | return nil 194 | case *uint64: 195 | n, err := strconv.ParseUint(bytesToString(b), 10, 64) 196 | if err != nil { 197 | return err 198 | } 199 | *v = n 200 | return nil 201 | case *float32: 202 | n, err := strconv.ParseFloat(bytesToString(b), 32) 203 | if err != nil { 204 | return err 205 | } 206 | *v = float32(n) 207 | return err 208 | case *float64: 209 | var err error 210 | *v, err = strconv.ParseFloat(bytesToString(b), 64) 211 | return err 212 | case *bool: 213 | *v = len(b) == 1 && b[0] == '1' 214 | return nil 215 | default: 216 | if bu, ok := val.(binaryUnmarshaler); ok { 217 | return bu.UnmarshalBinary(b) 218 | } 219 | err := fmt.Errorf( 220 | "redis: can't unmarshal %T (consider implementing BinaryUnmarshaler)", val) 221 | return err 222 | } 223 | } 224 | 225 | //------------------------------------------------------------------------------ 226 | 227 | func readLine(cn *pool.Conn) ([]byte, error) { 228 | line, isPrefix, err := cn.Rd.ReadLine() 229 | if err != nil { 230 | return line, err 231 | } 232 | if isPrefix { 233 | return line, errReaderTooSmall 234 | } 235 | if isNilReply(line) { 236 | return nil, Nil 237 | } 238 | return line, nil 239 | } 240 | 241 | func isNilReply(b []byte) bool { 242 | return len(b) == 3 && 243 | (b[0] == stringReply || b[0] == arrayReply) && 244 | b[1] == '-' && b[2] == '1' 245 | } 246 | 247 | //------------------------------------------------------------------------------ 248 | 249 | func parseErrorReply(cn *pool.Conn, line []byte) error { 250 | return errorf(string(line[1:])) 251 | } 252 | 253 | func parseStatusReply(cn *pool.Conn, line []byte) ([]byte, error) { 254 | return line[1:], nil 255 | } 256 | 257 | func parseIntReply(cn *pool.Conn, line []byte) (int64, error) { 258 | n, err := strconv.ParseInt(bytesToString(line[1:]), 10, 64) 259 | if err != nil { 260 | return 0, err 261 | } 262 | return n, nil 263 | } 264 | 265 | func readIntReply(cn *pool.Conn) (int64, error) { 266 | line, err := readLine(cn) 267 | if err != nil { 268 | return 0, err 269 | } 270 | switch line[0] { 271 | case errorReply: 272 | return 0, parseErrorReply(cn, line) 273 | case intReply: 274 | return parseIntReply(cn, line) 275 | default: 276 | return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line) 277 | } 278 | } 279 | 280 | func parseBytesReply(cn *pool.Conn, line []byte) ([]byte, error) { 281 | if isNilReply(line) { 282 | return nil, Nil 283 | } 284 | 285 | replyLen, err := strconv.Atoi(bytesToString(line[1:])) 286 | if err != nil { 287 | return nil, err 288 | } 289 | 290 | b, err := cn.ReadN(replyLen + 2) 291 | if err != nil { 292 | return nil, err 293 | } 294 | 295 | return b[:replyLen], nil 296 | } 297 | 298 | func readBytesReply(cn *pool.Conn) ([]byte, error) { 299 | line, err := readLine(cn) 300 | if err != nil { 301 | return nil, err 302 | } 303 | switch line[0] { 304 | case errorReply: 305 | return nil, parseErrorReply(cn, line) 306 | case stringReply: 307 | return parseBytesReply(cn, line) 308 | case statusReply: 309 | return parseStatusReply(cn, line) 310 | default: 311 | return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line) 312 | } 313 | } 314 | 315 | func readStringReply(cn *pool.Conn) (string, error) { 316 | b, err := readBytesReply(cn) 317 | if err != nil { 318 | return "", err 319 | } 320 | return string(b), nil 321 | } 322 | 323 | func readFloatReply(cn *pool.Conn) (float64, error) { 324 | b, err := readBytesReply(cn) 325 | if err != nil { 326 | return 0, err 327 | } 328 | return strconv.ParseFloat(bytesToString(b), 64) 329 | } 330 | 331 | func parseArrayHeader(cn *pool.Conn, line []byte) (int64, error) { 332 | if isNilReply(line) { 333 | return 0, Nil 334 | } 335 | 336 | n, err := strconv.ParseInt(bytesToString(line[1:]), 10, 64) 337 | if err != nil { 338 | return 0, err 339 | } 340 | return n, nil 341 | } 342 | 343 | func parseArrayReply(cn *pool.Conn, p multiBulkParser, line []byte) (interface{}, error) { 344 | n, err := parseArrayHeader(cn, line) 345 | if err != nil { 346 | return nil, err 347 | } 348 | return p(cn, n) 349 | } 350 | 351 | func readArrayHeader(cn *pool.Conn) (int64, error) { 352 | line, err := readLine(cn) 353 | if err != nil { 354 | return 0, err 355 | } 356 | switch line[0] { 357 | case errorReply: 358 | return 0, parseErrorReply(cn, line) 359 | case arrayReply: 360 | return parseArrayHeader(cn, line) 361 | default: 362 | return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line) 363 | } 364 | } 365 | 366 | func readArrayReply(cn *pool.Conn, p multiBulkParser) (interface{}, error) { 367 | line, err := readLine(cn) 368 | if err != nil { 369 | return nil, err 370 | } 371 | switch line[0] { 372 | case errorReply: 373 | return nil, parseErrorReply(cn, line) 374 | case arrayReply: 375 | return parseArrayReply(cn, p, line) 376 | default: 377 | return nil, fmt.Errorf("redis: can't parse array reply: %.100q", line) 378 | } 379 | } 380 | 381 | func readReply(cn *pool.Conn, p multiBulkParser) (interface{}, error) { 382 | line, err := readLine(cn) 383 | if err != nil { 384 | return nil, err 385 | } 386 | 387 | switch line[0] { 388 | case errorReply: 389 | return nil, parseErrorReply(cn, line) 390 | case statusReply: 391 | return parseStatusReply(cn, line) 392 | case intReply: 393 | return parseIntReply(cn, line) 394 | case stringReply: 395 | return parseBytesReply(cn, line) 396 | case arrayReply: 397 | return parseArrayReply(cn, p, line) 398 | } 399 | return nil, fmt.Errorf("redis: can't parse %.100q", line) 400 | } 401 | 402 | func readScanReply(cn *pool.Conn) ([]string, int64, error) { 403 | n, err := readArrayHeader(cn) 404 | if err != nil { 405 | return nil, 0, err 406 | } 407 | if n != 2 { 408 | return nil, 0, fmt.Errorf("redis: got %d elements in scan reply, expected 2", n) 409 | } 410 | 411 | b, err := readBytesReply(cn) 412 | if err != nil { 413 | return nil, 0, err 414 | } 415 | 416 | cursor, err := strconv.ParseInt(bytesToString(b), 10, 64) 417 | if err != nil { 418 | return nil, 0, err 419 | } 420 | 421 | n, err = readArrayHeader(cn) 422 | if err != nil { 423 | return nil, 0, err 424 | } 425 | 426 | keys := make([]string, n) 427 | for i := int64(0); i < n; i++ { 428 | key, err := readStringReply(cn) 429 | if err != nil { 430 | return nil, 0, err 431 | } 432 | keys[i] = key 433 | } 434 | 435 | return keys, cursor, err 436 | } 437 | 438 | func sliceParser(cn *pool.Conn, n int64) (interface{}, error) { 439 | vals := make([]interface{}, 0, n) 440 | for i := int64(0); i < n; i++ { 441 | v, err := readReply(cn, sliceParser) 442 | if err == Nil { 443 | vals = append(vals, nil) 444 | } else if err != nil { 445 | return nil, err 446 | } else { 447 | switch vv := v.(type) { 448 | case []byte: 449 | vals = append(vals, string(vv)) 450 | default: 451 | vals = append(vals, v) 452 | } 453 | } 454 | } 455 | return vals, nil 456 | } 457 | 458 | func intSliceParser(cn *pool.Conn, n int64) (interface{}, error) { 459 | ints := make([]int64, 0, n) 460 | for i := int64(0); i < n; i++ { 461 | n, err := readIntReply(cn) 462 | if err != nil { 463 | return nil, err 464 | } 465 | ints = append(ints, n) 466 | } 467 | return ints, nil 468 | } 469 | 470 | func boolSliceParser(cn *pool.Conn, n int64) (interface{}, error) { 471 | bools := make([]bool, 0, n) 472 | for i := int64(0); i < n; i++ { 473 | n, err := readIntReply(cn) 474 | if err != nil { 475 | return nil, err 476 | } 477 | bools = append(bools, n == 1) 478 | } 479 | return bools, nil 480 | } 481 | 482 | func stringSliceParser(cn *pool.Conn, n int64) (interface{}, error) { 483 | ss := make([]string, 0, n) 484 | for i := int64(0); i < n; i++ { 485 | s, err := readStringReply(cn) 486 | if err == Nil { 487 | ss = append(ss, "") 488 | } else if err != nil { 489 | return nil, err 490 | } else { 491 | ss = append(ss, s) 492 | } 493 | } 494 | return ss, nil 495 | } 496 | 497 | func floatSliceParser(cn *pool.Conn, n int64) (interface{}, error) { 498 | nn := make([]float64, 0, n) 499 | for i := int64(0); i < n; i++ { 500 | n, err := readFloatReply(cn) 501 | if err != nil { 502 | return nil, err 503 | } 504 | nn = append(nn, n) 505 | } 506 | return nn, nil 507 | } 508 | 509 | func stringStringMapParser(cn *pool.Conn, n int64) (interface{}, error) { 510 | m := make(map[string]string, n/2) 511 | for i := int64(0); i < n; i += 2 { 512 | key, err := readStringReply(cn) 513 | if err != nil { 514 | return nil, err 515 | } 516 | 517 | value, err := readStringReply(cn) 518 | if err != nil { 519 | return nil, err 520 | } 521 | 522 | m[key] = value 523 | } 524 | return m, nil 525 | } 526 | 527 | func stringIntMapParser(cn *pool.Conn, n int64) (interface{}, error) { 528 | m := make(map[string]int64, n/2) 529 | for i := int64(0); i < n; i += 2 { 530 | key, err := readStringReply(cn) 531 | if err != nil { 532 | return nil, err 533 | } 534 | 535 | n, err := readIntReply(cn) 536 | if err != nil { 537 | return nil, err 538 | } 539 | 540 | m[key] = n 541 | } 542 | return m, nil 543 | } 544 | 545 | func zSliceParser(cn *pool.Conn, n int64) (interface{}, error) { 546 | zz := make([]Z, n/2) 547 | for i := int64(0); i < n; i += 2 { 548 | var err error 549 | 550 | z := &zz[i/2] 551 | 552 | z.Member, err = readStringReply(cn) 553 | if err != nil { 554 | return nil, err 555 | } 556 | 557 | z.Score, err = readFloatReply(cn) 558 | if err != nil { 559 | return nil, err 560 | } 561 | } 562 | return zz, nil 563 | } 564 | 565 | func clusterSlotInfoSliceParser(cn *pool.Conn, n int64) (interface{}, error) { 566 | infos := make([]ClusterSlotInfo, 0, n) 567 | for i := int64(0); i < n; i++ { 568 | n, err := readArrayHeader(cn) 569 | if err != nil { 570 | return nil, err 571 | } 572 | if n < 2 { 573 | err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n) 574 | return nil, err 575 | } 576 | 577 | start, err := readIntReply(cn) 578 | if err != nil { 579 | return nil, err 580 | } 581 | 582 | end, err := readIntReply(cn) 583 | if err != nil { 584 | return nil, err 585 | } 586 | 587 | addrsn := n - 2 588 | info := ClusterSlotInfo{ 589 | Start: int(start), 590 | End: int(end), 591 | Addrs: make([]string, addrsn), 592 | } 593 | 594 | for i := int64(0); i < addrsn; i++ { 595 | n, err := readArrayHeader(cn) 596 | if err != nil { 597 | return nil, err 598 | } 599 | if n != 2 && n != 3 { 600 | err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n) 601 | return nil, err 602 | } 603 | 604 | ip, err := readStringReply(cn) 605 | if err != nil { 606 | return nil, err 607 | } 608 | 609 | port, err := readIntReply(cn) 610 | if err != nil { 611 | return nil, err 612 | } 613 | 614 | if n == 3 { 615 | // TODO: expose id in ClusterSlotInfo 616 | _, err := readStringReply(cn) 617 | if err != nil { 618 | return nil, err 619 | } 620 | } 621 | 622 | info.Addrs[i] = net.JoinHostPort(ip, strconv.FormatInt(port, 10)) 623 | } 624 | 625 | infos = append(infos, info) 626 | } 627 | return infos, nil 628 | } 629 | 630 | func newGeoLocationParser(q *GeoRadiusQuery) multiBulkParser { 631 | return func(cn *pool.Conn, n int64) (interface{}, error) { 632 | var loc GeoLocation 633 | 634 | var err error 635 | loc.Name, err = readStringReply(cn) 636 | if err != nil { 637 | return nil, err 638 | } 639 | if q.WithDist { 640 | loc.Dist, err = readFloatReply(cn) 641 | if err != nil { 642 | return nil, err 643 | } 644 | } 645 | if q.WithGeoHash { 646 | loc.GeoHash, err = readIntReply(cn) 647 | if err != nil { 648 | return nil, err 649 | } 650 | } 651 | if q.WithCoord { 652 | n, err := readArrayHeader(cn) 653 | if err != nil { 654 | return nil, err 655 | } 656 | if n != 2 { 657 | return nil, fmt.Errorf("got %d coordinates, expected 2", n) 658 | } 659 | 660 | loc.Longitude, err = readFloatReply(cn) 661 | if err != nil { 662 | return nil, err 663 | } 664 | loc.Latitude, err = readFloatReply(cn) 665 | if err != nil { 666 | return nil, err 667 | } 668 | } 669 | 670 | return &loc, nil 671 | } 672 | } 673 | 674 | func newGeoLocationSliceParser(q *GeoRadiusQuery) multiBulkParser { 675 | return func(cn *pool.Conn, n int64) (interface{}, error) { 676 | locs := make([]GeoLocation, 0, n) 677 | for i := int64(0); i < n; i++ { 678 | v, err := readReply(cn, newGeoLocationParser(q)) 679 | if err != nil { 680 | return nil, err 681 | } 682 | switch vv := v.(type) { 683 | case []byte: 684 | locs = append(locs, GeoLocation{ 685 | Name: string(vv), 686 | }) 687 | case *GeoLocation: 688 | locs = append(locs, *vv) 689 | default: 690 | return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v) 691 | } 692 | } 693 | return locs, nil 694 | } 695 | } 696 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/parser_test.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "testing" 7 | 8 | "gopkg.in/redis.v3/internal/pool" 9 | ) 10 | 11 | func BenchmarkParseReplyStatus(b *testing.B) { 12 | benchmarkParseReply(b, "+OK\r\n", nil, false) 13 | } 14 | 15 | func BenchmarkParseReplyInt(b *testing.B) { 16 | benchmarkParseReply(b, ":1\r\n", nil, false) 17 | } 18 | 19 | func BenchmarkParseReplyError(b *testing.B) { 20 | benchmarkParseReply(b, "-Error message\r\n", nil, true) 21 | } 22 | 23 | func BenchmarkParseReplyString(b *testing.B) { 24 | benchmarkParseReply(b, "$5\r\nhello\r\n", nil, false) 25 | } 26 | 27 | func BenchmarkParseReplySlice(b *testing.B) { 28 | benchmarkParseReply(b, "*2\r\n$5\r\nhello\r\n$5\r\nworld\r\n", sliceParser, false) 29 | } 30 | 31 | func benchmarkParseReply(b *testing.B, reply string, p multiBulkParser, wanterr bool) { 32 | buf := &bytes.Buffer{} 33 | for i := 0; i < b.N; i++ { 34 | buf.WriteString(reply) 35 | } 36 | cn := &pool.Conn{ 37 | Rd: bufio.NewReader(buf), 38 | Buf: make([]byte, 4096), 39 | } 40 | 41 | b.ResetTimer() 42 | 43 | for i := 0; i < b.N; i++ { 44 | _, err := readReply(cn, p) 45 | if !wanterr && err != nil { 46 | b.Fatal(err) 47 | } 48 | } 49 | } 50 | 51 | func BenchmarkAppendArgs(b *testing.B) { 52 | buf := make([]byte, 0, 64) 53 | args := []interface{}{"hello", "world", "foo", "bar"} 54 | for i := 0; i < b.N; i++ { 55 | appendArgs(buf, args) 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/pipeline.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "sync" 5 | "sync/atomic" 6 | 7 | "gopkg.in/redis.v3/internal/pool" 8 | ) 9 | 10 | // Pipeline implements pipelining as described in 11 | // http://redis.io/topics/pipelining. It's safe for concurrent use 12 | // by multiple goroutines. 13 | type Pipeline struct { 14 | commandable 15 | 16 | client baseClient 17 | 18 | mu sync.Mutex // protects cmds 19 | cmds []Cmder 20 | 21 | closed int32 22 | } 23 | 24 | func (c *Client) Pipeline() *Pipeline { 25 | pipe := &Pipeline{ 26 | client: c.baseClient, 27 | cmds: make([]Cmder, 0, 10), 28 | } 29 | pipe.commandable.process = pipe.process 30 | return pipe 31 | } 32 | 33 | func (c *Client) Pipelined(fn func(*Pipeline) error) ([]Cmder, error) { 34 | pipe := c.Pipeline() 35 | if err := fn(pipe); err != nil { 36 | return nil, err 37 | } 38 | cmds, err := pipe.Exec() 39 | _ = pipe.Close() 40 | return cmds, err 41 | } 42 | 43 | func (pipe *Pipeline) process(cmd Cmder) { 44 | pipe.mu.Lock() 45 | pipe.cmds = append(pipe.cmds, cmd) 46 | pipe.mu.Unlock() 47 | } 48 | 49 | // Close closes the pipeline, releasing any open resources. 50 | func (pipe *Pipeline) Close() error { 51 | atomic.StoreInt32(&pipe.closed, 1) 52 | pipe.Discard() 53 | return nil 54 | } 55 | 56 | func (pipe *Pipeline) isClosed() bool { 57 | return atomic.LoadInt32(&pipe.closed) == 1 58 | } 59 | 60 | // Discard resets the pipeline and discards queued commands. 61 | func (pipe *Pipeline) Discard() error { 62 | defer pipe.mu.Unlock() 63 | pipe.mu.Lock() 64 | if pipe.isClosed() { 65 | return pool.ErrClosed 66 | } 67 | pipe.cmds = pipe.cmds[:0] 68 | return nil 69 | } 70 | 71 | // Exec executes all previously queued commands using one 72 | // client-server roundtrip. 73 | // 74 | // Exec always returns list of commands and error of the first failed 75 | // command if any. 76 | func (pipe *Pipeline) Exec() (cmds []Cmder, retErr error) { 77 | if pipe.isClosed() { 78 | return nil, pool.ErrClosed 79 | } 80 | 81 | defer pipe.mu.Unlock() 82 | pipe.mu.Lock() 83 | 84 | if len(pipe.cmds) == 0 { 85 | return pipe.cmds, nil 86 | } 87 | 88 | cmds = pipe.cmds 89 | pipe.cmds = make([]Cmder, 0, 10) 90 | 91 | failedCmds := cmds 92 | for i := 0; i <= pipe.client.opt.MaxRetries; i++ { 93 | cn, err := pipe.client.conn() 94 | if err != nil { 95 | setCmdsErr(failedCmds, err) 96 | return cmds, err 97 | } 98 | 99 | if i > 0 { 100 | resetCmds(failedCmds) 101 | } 102 | failedCmds, err = execCmds(cn, failedCmds) 103 | pipe.client.putConn(cn, err, false) 104 | if err != nil && retErr == nil { 105 | retErr = err 106 | } 107 | if len(failedCmds) == 0 { 108 | break 109 | } 110 | } 111 | 112 | return cmds, retErr 113 | } 114 | 115 | func execCmds(cn *pool.Conn, cmds []Cmder) ([]Cmder, error) { 116 | if err := writeCmd(cn, cmds...); err != nil { 117 | setCmdsErr(cmds, err) 118 | return cmds, err 119 | } 120 | 121 | var firstCmdErr error 122 | var failedCmds []Cmder 123 | for _, cmd := range cmds { 124 | err := cmd.readReply(cn) 125 | if err == nil { 126 | continue 127 | } 128 | if firstCmdErr == nil { 129 | firstCmdErr = err 130 | } 131 | if shouldRetry(err) { 132 | failedCmds = append(failedCmds, cmd) 133 | } 134 | } 135 | 136 | return failedCmds, firstCmdErr 137 | } 138 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/pipeline_test.go: -------------------------------------------------------------------------------- 1 | package redis_test 2 | 3 | import ( 4 | "strconv" 5 | "sync" 6 | 7 | . "github.com/onsi/ginkgo" 8 | . "github.com/onsi/gomega" 9 | 10 | "gopkg.in/redis.v3" 11 | ) 12 | 13 | var _ = Describe("Pipelining", func() { 14 | var client *redis.Client 15 | 16 | BeforeEach(func() { 17 | client = redis.NewClient(redisOptions()) 18 | Expect(client.FlushDb().Err()).NotTo(HaveOccurred()) 19 | }) 20 | 21 | AfterEach(func() { 22 | Expect(client.Close()).NotTo(HaveOccurred()) 23 | }) 24 | 25 | It("should pipeline", func() { 26 | set := client.Set("key2", "hello2", 0) 27 | Expect(set.Err()).NotTo(HaveOccurred()) 28 | Expect(set.Val()).To(Equal("OK")) 29 | 30 | pipeline := client.Pipeline() 31 | set = pipeline.Set("key1", "hello1", 0) 32 | get := pipeline.Get("key2") 33 | incr := pipeline.Incr("key3") 34 | getNil := pipeline.Get("key4") 35 | 36 | cmds, err := pipeline.Exec() 37 | Expect(err).To(Equal(redis.Nil)) 38 | Expect(cmds).To(HaveLen(4)) 39 | Expect(pipeline.Close()).NotTo(HaveOccurred()) 40 | 41 | Expect(set.Err()).NotTo(HaveOccurred()) 42 | Expect(set.Val()).To(Equal("OK")) 43 | 44 | Expect(get.Err()).NotTo(HaveOccurred()) 45 | Expect(get.Val()).To(Equal("hello2")) 46 | 47 | Expect(incr.Err()).NotTo(HaveOccurred()) 48 | Expect(incr.Val()).To(Equal(int64(1))) 49 | 50 | Expect(getNil.Err()).To(Equal(redis.Nil)) 51 | Expect(getNil.Val()).To(Equal("")) 52 | }) 53 | 54 | It("should discard", func() { 55 | pipeline := client.Pipeline() 56 | 57 | pipeline.Get("key") 58 | pipeline.Discard() 59 | cmds, err := pipeline.Exec() 60 | Expect(err).NotTo(HaveOccurred()) 61 | Expect(cmds).To(HaveLen(0)) 62 | Expect(pipeline.Close()).NotTo(HaveOccurred()) 63 | }) 64 | 65 | It("should support block style", func() { 66 | var get *redis.StringCmd 67 | cmds, err := client.Pipelined(func(pipe *redis.Pipeline) error { 68 | get = pipe.Get("foo") 69 | return nil 70 | }) 71 | Expect(err).To(Equal(redis.Nil)) 72 | Expect(cmds).To(HaveLen(1)) 73 | Expect(cmds[0]).To(Equal(get)) 74 | Expect(get.Err()).To(Equal(redis.Nil)) 75 | Expect(get.Val()).To(Equal("")) 76 | }) 77 | 78 | It("should handle vals/err", func() { 79 | pipeline := client.Pipeline() 80 | 81 | get := pipeline.Get("key") 82 | Expect(get.Err()).NotTo(HaveOccurred()) 83 | Expect(get.Val()).To(Equal("")) 84 | Expect(pipeline.Close()).NotTo(HaveOccurred()) 85 | }) 86 | 87 | It("should pipeline with empty queue", func() { 88 | pipeline := client.Pipeline() 89 | cmds, err := pipeline.Exec() 90 | Expect(err).NotTo(HaveOccurred()) 91 | Expect(cmds).To(HaveLen(0)) 92 | Expect(pipeline.Close()).NotTo(HaveOccurred()) 93 | }) 94 | 95 | It("should increment correctly", func() { 96 | const N = 20000 97 | key := "TestPipelineIncr" 98 | pipeline := client.Pipeline() 99 | for i := 0; i < N; i++ { 100 | pipeline.Incr(key) 101 | } 102 | 103 | cmds, err := pipeline.Exec() 104 | Expect(err).NotTo(HaveOccurred()) 105 | Expect(pipeline.Close()).NotTo(HaveOccurred()) 106 | 107 | Expect(len(cmds)).To(Equal(20000)) 108 | for _, cmd := range cmds { 109 | Expect(cmd.Err()).NotTo(HaveOccurred()) 110 | } 111 | 112 | get := client.Get(key) 113 | Expect(get.Err()).NotTo(HaveOccurred()) 114 | Expect(get.Val()).To(Equal(strconv.Itoa(N))) 115 | }) 116 | 117 | It("should PipelineEcho", func() { 118 | const N = 1000 119 | 120 | wg := &sync.WaitGroup{} 121 | wg.Add(N) 122 | for i := 0; i < N; i++ { 123 | go func(i int) { 124 | defer GinkgoRecover() 125 | defer wg.Done() 126 | 127 | pipeline := client.Pipeline() 128 | 129 | msg1 := "echo" + strconv.Itoa(i) 130 | msg2 := "echo" + strconv.Itoa(i+1) 131 | 132 | echo1 := pipeline.Echo(msg1) 133 | echo2 := pipeline.Echo(msg2) 134 | 135 | cmds, err := pipeline.Exec() 136 | Expect(err).NotTo(HaveOccurred()) 137 | Expect(cmds).To(HaveLen(2)) 138 | 139 | Expect(echo1.Err()).NotTo(HaveOccurred()) 140 | Expect(echo1.Val()).To(Equal(msg1)) 141 | 142 | Expect(echo2.Err()).NotTo(HaveOccurred()) 143 | Expect(echo2.Val()).To(Equal(msg2)) 144 | 145 | Expect(pipeline.Close()).NotTo(HaveOccurred()) 146 | }(i) 147 | } 148 | wg.Wait() 149 | }) 150 | 151 | It("should be thread-safe", func() { 152 | const N = 1000 153 | 154 | pipeline := client.Pipeline() 155 | wg := &sync.WaitGroup{} 156 | wg.Add(N) 157 | for i := 0; i < N; i++ { 158 | go func() { 159 | pipeline.Ping() 160 | wg.Done() 161 | }() 162 | } 163 | wg.Wait() 164 | 165 | cmds, err := pipeline.Exec() 166 | Expect(err).NotTo(HaveOccurred()) 167 | Expect(cmds).To(HaveLen(N)) 168 | 169 | Expect(pipeline.Close()).NotTo(HaveOccurred()) 170 | }) 171 | 172 | }) 173 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/pool_test.go: -------------------------------------------------------------------------------- 1 | package redis_test 2 | 3 | import ( 4 | . "github.com/onsi/ginkgo" 5 | . "github.com/onsi/gomega" 6 | 7 | "gopkg.in/redis.v3" 8 | "gopkg.in/redis.v3/internal/pool" 9 | ) 10 | 11 | var _ = Describe("pool", func() { 12 | var client *redis.Client 13 | 14 | BeforeEach(func() { 15 | client = redis.NewClient(redisOptions()) 16 | Expect(client.FlushDb().Err()).NotTo(HaveOccurred()) 17 | }) 18 | 19 | AfterEach(func() { 20 | Expect(client.Close()).NotTo(HaveOccurred()) 21 | }) 22 | 23 | It("should respect max size", func() { 24 | perform(1000, func(id int) { 25 | val, err := client.Ping().Result() 26 | Expect(err).NotTo(HaveOccurred()) 27 | Expect(val).To(Equal("PONG")) 28 | }) 29 | 30 | pool := client.Pool() 31 | Expect(pool.Len()).To(BeNumerically("<=", 10)) 32 | Expect(pool.FreeLen()).To(BeNumerically("<=", 10)) 33 | Expect(pool.Len()).To(Equal(pool.FreeLen())) 34 | }) 35 | 36 | It("should respect max on multi", func() { 37 | perform(1000, func(id int) { 38 | var ping *redis.StatusCmd 39 | 40 | multi := client.Multi() 41 | cmds, err := multi.Exec(func() error { 42 | ping = multi.Ping() 43 | return nil 44 | }) 45 | Expect(err).NotTo(HaveOccurred()) 46 | Expect(cmds).To(HaveLen(1)) 47 | Expect(ping.Err()).NotTo(HaveOccurred()) 48 | Expect(ping.Val()).To(Equal("PONG")) 49 | Expect(multi.Close()).NotTo(HaveOccurred()) 50 | }) 51 | 52 | pool := client.Pool() 53 | Expect(pool.Len()).To(BeNumerically("<=", 10)) 54 | Expect(pool.FreeLen()).To(BeNumerically("<=", 10)) 55 | Expect(pool.Len()).To(Equal(pool.FreeLen())) 56 | }) 57 | 58 | It("should respect max on pipelines", func() { 59 | perform(1000, func(id int) { 60 | pipe := client.Pipeline() 61 | ping := pipe.Ping() 62 | cmds, err := pipe.Exec() 63 | Expect(err).NotTo(HaveOccurred()) 64 | Expect(cmds).To(HaveLen(1)) 65 | Expect(ping.Err()).NotTo(HaveOccurred()) 66 | Expect(ping.Val()).To(Equal("PONG")) 67 | Expect(pipe.Close()).NotTo(HaveOccurred()) 68 | }) 69 | 70 | pool := client.Pool() 71 | Expect(pool.Len()).To(BeNumerically("<=", 10)) 72 | Expect(pool.FreeLen()).To(BeNumerically("<=", 10)) 73 | Expect(pool.Len()).To(Equal(pool.FreeLen())) 74 | }) 75 | 76 | It("should respect max on pubsub", func() { 77 | connPool := client.Pool() 78 | connPool.(*pool.ConnPool).DialLimiter = nil 79 | 80 | perform(1000, func(id int) { 81 | pubsub := client.PubSub() 82 | Expect(pubsub.Subscribe()).NotTo(HaveOccurred()) 83 | Expect(pubsub.Close()).NotTo(HaveOccurred()) 84 | }) 85 | 86 | Expect(connPool.Len()).To(Equal(connPool.FreeLen())) 87 | Expect(connPool.Len()).To(BeNumerically("<=", 10)) 88 | }) 89 | 90 | It("should remove broken connections", func() { 91 | cn, err := client.Pool().Get() 92 | Expect(err).NotTo(HaveOccurred()) 93 | cn.NetConn = &badConn{} 94 | Expect(client.Pool().Put(cn)).NotTo(HaveOccurred()) 95 | 96 | err = client.Ping().Err() 97 | Expect(err).To(MatchError("bad connection")) 98 | 99 | val, err := client.Ping().Result() 100 | Expect(err).NotTo(HaveOccurred()) 101 | Expect(val).To(Equal("PONG")) 102 | 103 | pool := client.Pool() 104 | Expect(pool.Len()).To(Equal(1)) 105 | Expect(pool.FreeLen()).To(Equal(1)) 106 | 107 | stats := pool.Stats() 108 | Expect(stats.Requests).To(Equal(uint32(4))) 109 | Expect(stats.Hits).To(Equal(uint32(2))) 110 | Expect(stats.Waits).To(Equal(uint32(0))) 111 | Expect(stats.Timeouts).To(Equal(uint32(0))) 112 | }) 113 | 114 | It("should reuse connections", func() { 115 | for i := 0; i < 100; i++ { 116 | val, err := client.Ping().Result() 117 | Expect(err).NotTo(HaveOccurred()) 118 | Expect(val).To(Equal("PONG")) 119 | } 120 | 121 | pool := client.Pool() 122 | Expect(pool.Len()).To(Equal(1)) 123 | Expect(pool.FreeLen()).To(Equal(1)) 124 | 125 | stats := pool.Stats() 126 | Expect(stats.Requests).To(Equal(uint32(101))) 127 | Expect(stats.Hits).To(Equal(uint32(100))) 128 | Expect(stats.Waits).To(Equal(uint32(0))) 129 | Expect(stats.Timeouts).To(Equal(uint32(0))) 130 | }) 131 | }) 132 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/pubsub.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | "time" 7 | 8 | "gopkg.in/redis.v3/internal/pool" 9 | ) 10 | 11 | var receiveMessageTimeout = 5 * time.Second 12 | 13 | // Posts a message to the given channel. 14 | func (c *Client) Publish(channel, message string) *IntCmd { 15 | req := NewIntCmd("PUBLISH", channel, message) 16 | c.Process(req) 17 | return req 18 | } 19 | 20 | // PubSub implements Pub/Sub commands as described in 21 | // http://redis.io/topics/pubsub. It's NOT safe for concurrent use by 22 | // multiple goroutines. 23 | type PubSub struct { 24 | base *baseClient 25 | 26 | channels []string 27 | patterns []string 28 | 29 | nsub int // number of active subscriptions 30 | } 31 | 32 | // Deprecated. Use Subscribe/PSubscribe instead. 33 | func (c *Client) PubSub() *PubSub { 34 | return &PubSub{ 35 | base: &baseClient{ 36 | opt: c.opt, 37 | connPool: pool.NewStickyConnPool(c.connPool.(*pool.ConnPool), false), 38 | }, 39 | } 40 | } 41 | 42 | // Subscribes the client to the specified channels. 43 | func (c *Client) Subscribe(channels ...string) (*PubSub, error) { 44 | pubsub := c.PubSub() 45 | return pubsub, pubsub.Subscribe(channels...) 46 | } 47 | 48 | // Subscribes the client to the given patterns. 49 | func (c *Client) PSubscribe(channels ...string) (*PubSub, error) { 50 | pubsub := c.PubSub() 51 | return pubsub, pubsub.PSubscribe(channels...) 52 | } 53 | 54 | func (c *PubSub) subscribe(redisCmd string, channels ...string) error { 55 | cn, err := c.base.conn() 56 | if err != nil { 57 | return err 58 | } 59 | c.putConn(cn, err) 60 | 61 | args := make([]interface{}, 1+len(channels)) 62 | args[0] = redisCmd 63 | for i, channel := range channels { 64 | args[1+i] = channel 65 | } 66 | cmd := NewSliceCmd(args...) 67 | 68 | return writeCmd(cn, cmd) 69 | } 70 | 71 | // Subscribes the client to the specified channels. 72 | func (c *PubSub) Subscribe(channels ...string) error { 73 | err := c.subscribe("SUBSCRIBE", channels...) 74 | if err == nil { 75 | c.channels = append(c.channels, channels...) 76 | c.nsub += len(channels) 77 | } 78 | return err 79 | } 80 | 81 | // Subscribes the client to the given patterns. 82 | func (c *PubSub) PSubscribe(patterns ...string) error { 83 | err := c.subscribe("PSUBSCRIBE", patterns...) 84 | if err == nil { 85 | c.patterns = append(c.patterns, patterns...) 86 | c.nsub += len(patterns) 87 | } 88 | return err 89 | } 90 | 91 | func remove(ss []string, es ...string) []string { 92 | if len(es) == 0 { 93 | return ss[:0] 94 | } 95 | for _, e := range es { 96 | for i, s := range ss { 97 | if s == e { 98 | ss = append(ss[:i], ss[i+1:]...) 99 | break 100 | } 101 | } 102 | } 103 | return ss 104 | } 105 | 106 | // Unsubscribes the client from the given channels, or from all of 107 | // them if none is given. 108 | func (c *PubSub) Unsubscribe(channels ...string) error { 109 | err := c.subscribe("UNSUBSCRIBE", channels...) 110 | if err == nil { 111 | c.channels = remove(c.channels, channels...) 112 | } 113 | return err 114 | } 115 | 116 | // Unsubscribes the client from the given patterns, or from all of 117 | // them if none is given. 118 | func (c *PubSub) PUnsubscribe(patterns ...string) error { 119 | err := c.subscribe("PUNSUBSCRIBE", patterns...) 120 | if err == nil { 121 | c.patterns = remove(c.patterns, patterns...) 122 | } 123 | return err 124 | } 125 | 126 | func (c *PubSub) Close() error { 127 | return c.base.Close() 128 | } 129 | 130 | func (c *PubSub) Ping(payload string) error { 131 | cn, err := c.base.conn() 132 | if err != nil { 133 | return err 134 | } 135 | 136 | args := []interface{}{"PING"} 137 | if payload != "" { 138 | args = append(args, payload) 139 | } 140 | cmd := NewCmd(args...) 141 | return writeCmd(cn, cmd) 142 | } 143 | 144 | // Message received after a successful subscription to channel. 145 | type Subscription struct { 146 | // Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe". 147 | Kind string 148 | // Channel name we have subscribed to. 149 | Channel string 150 | // Number of channels we are currently subscribed to. 151 | Count int 152 | } 153 | 154 | func (m *Subscription) String() string { 155 | return fmt.Sprintf("%s: %s", m.Kind, m.Channel) 156 | } 157 | 158 | // Message received as result of a PUBLISH command issued by another client. 159 | type Message struct { 160 | Channel string 161 | Pattern string 162 | Payload string 163 | } 164 | 165 | func (m *Message) String() string { 166 | return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload) 167 | } 168 | 169 | // TODO: remove PMessage if favor of Message 170 | 171 | // Message matching a pattern-matching subscription received as result 172 | // of a PUBLISH command issued by another client. 173 | type PMessage struct { 174 | Channel string 175 | Pattern string 176 | Payload string 177 | } 178 | 179 | func (m *PMessage) String() string { 180 | return fmt.Sprintf("PMessage<%s: %s>", m.Channel, m.Payload) 181 | } 182 | 183 | // Pong received as result of a PING command issued by another client. 184 | type Pong struct { 185 | Payload string 186 | } 187 | 188 | func (p *Pong) String() string { 189 | if p.Payload != "" { 190 | return fmt.Sprintf("Pong<%s>", p.Payload) 191 | } 192 | return "Pong" 193 | } 194 | 195 | func (c *PubSub) newMessage(reply []interface{}) (interface{}, error) { 196 | switch kind := reply[0].(string); kind { 197 | case "subscribe", "unsubscribe", "psubscribe", "punsubscribe": 198 | return &Subscription{ 199 | Kind: kind, 200 | Channel: reply[1].(string), 201 | Count: int(reply[2].(int64)), 202 | }, nil 203 | case "message": 204 | return &Message{ 205 | Channel: reply[1].(string), 206 | Payload: reply[2].(string), 207 | }, nil 208 | case "pmessage": 209 | return &PMessage{ 210 | Pattern: reply[1].(string), 211 | Channel: reply[2].(string), 212 | Payload: reply[3].(string), 213 | }, nil 214 | case "pong": 215 | return &Pong{ 216 | Payload: reply[1].(string), 217 | }, nil 218 | default: 219 | return nil, fmt.Errorf("redis: unsupported pubsub notification: %q", kind) 220 | } 221 | } 222 | 223 | // ReceiveTimeout acts like Receive but returns an error if message 224 | // is not received in time. This is low-level API and most clients 225 | // should use ReceiveMessage. 226 | func (c *PubSub) ReceiveTimeout(timeout time.Duration) (interface{}, error) { 227 | if c.nsub == 0 { 228 | c.resubscribe() 229 | } 230 | 231 | cn, err := c.base.conn() 232 | if err != nil { 233 | return nil, err 234 | } 235 | cn.ReadTimeout = timeout 236 | 237 | cmd := NewSliceCmd() 238 | err = cmd.readReply(cn) 239 | c.putConn(cn, err) 240 | if err != nil { 241 | return nil, err 242 | } 243 | 244 | return c.newMessage(cmd.Val()) 245 | } 246 | 247 | // Receive returns a message as a Subscription, Message, PMessage, 248 | // Pong or error. See PubSub example for details. This is low-level 249 | // API and most clients should use ReceiveMessage. 250 | func (c *PubSub) Receive() (interface{}, error) { 251 | return c.ReceiveTimeout(0) 252 | } 253 | 254 | // ReceiveMessage returns a Message or error ignoring Subscription or Pong 255 | // messages. It automatically reconnects to Redis Server and resubscribes 256 | // to channels in case of network errors. 257 | func (c *PubSub) ReceiveMessage() (*Message, error) { 258 | var errNum uint 259 | for { 260 | msgi, err := c.ReceiveTimeout(receiveMessageTimeout) 261 | if err != nil { 262 | if !isNetworkError(err) { 263 | return nil, err 264 | } 265 | 266 | errNum++ 267 | if errNum < 3 { 268 | if netErr, ok := err.(net.Error); ok && netErr.Timeout() { 269 | err := c.Ping("") 270 | if err != nil { 271 | Logger.Printf("PubSub.Ping failed: %s", err) 272 | } 273 | } 274 | } else { 275 | // 3 consequent errors - connection is bad 276 | // and/or Redis Server is down. 277 | // Sleep to not exceed max number of open connections. 278 | time.Sleep(time.Second) 279 | } 280 | continue 281 | } 282 | 283 | // Reset error number, because we received a message. 284 | errNum = 0 285 | 286 | switch msg := msgi.(type) { 287 | case *Subscription: 288 | // Ignore. 289 | case *Pong: 290 | // Ignore. 291 | case *Message: 292 | return msg, nil 293 | case *PMessage: 294 | return &Message{ 295 | Channel: msg.Channel, 296 | Pattern: msg.Pattern, 297 | Payload: msg.Payload, 298 | }, nil 299 | default: 300 | return nil, fmt.Errorf("redis: unknown message: %T", msgi) 301 | } 302 | } 303 | } 304 | 305 | func (c *PubSub) putConn(cn *pool.Conn, err error) { 306 | if !c.base.putConn(cn, err, true) { 307 | c.nsub = 0 308 | } 309 | } 310 | 311 | func (c *PubSub) resubscribe() { 312 | if c.base.closed() { 313 | return 314 | } 315 | if len(c.channels) > 0 { 316 | if err := c.Subscribe(c.channels...); err != nil { 317 | Logger.Printf("Subscribe failed: %s", err) 318 | } 319 | } 320 | if len(c.patterns) > 0 { 321 | if err := c.PSubscribe(c.patterns...); err != nil { 322 | Logger.Printf("PSubscribe failed: %s", err) 323 | } 324 | } 325 | } 326 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/pubsub_test.go: -------------------------------------------------------------------------------- 1 | package redis_test 2 | 3 | import ( 4 | "io" 5 | "net" 6 | "sync" 7 | "time" 8 | 9 | . "github.com/onsi/ginkgo" 10 | . "github.com/onsi/gomega" 11 | 12 | "gopkg.in/redis.v3" 13 | ) 14 | 15 | var _ = Describe("PubSub", func() { 16 | var client *redis.Client 17 | 18 | BeforeEach(func() { 19 | client = redis.NewClient(redisOptions()) 20 | Expect(client.FlushDb().Err()).NotTo(HaveOccurred()) 21 | }) 22 | 23 | AfterEach(func() { 24 | Expect(client.Close()).NotTo(HaveOccurred()) 25 | }) 26 | 27 | It("should support pattern matching", func() { 28 | pubsub, err := client.PSubscribe("mychannel*") 29 | Expect(err).NotTo(HaveOccurred()) 30 | defer pubsub.Close() 31 | 32 | { 33 | msgi, err := pubsub.ReceiveTimeout(time.Second) 34 | Expect(err).NotTo(HaveOccurred()) 35 | subscr := msgi.(*redis.Subscription) 36 | Expect(subscr.Kind).To(Equal("psubscribe")) 37 | Expect(subscr.Channel).To(Equal("mychannel*")) 38 | Expect(subscr.Count).To(Equal(1)) 39 | } 40 | 41 | { 42 | msgi, err := pubsub.ReceiveTimeout(time.Second) 43 | Expect(err.(net.Error).Timeout()).To(Equal(true)) 44 | Expect(msgi).To(BeNil()) 45 | } 46 | 47 | n, err := client.Publish("mychannel1", "hello").Result() 48 | Expect(err).NotTo(HaveOccurred()) 49 | Expect(n).To(Equal(int64(1))) 50 | 51 | Expect(pubsub.PUnsubscribe("mychannel*")).NotTo(HaveOccurred()) 52 | 53 | { 54 | msgi, err := pubsub.ReceiveTimeout(time.Second) 55 | Expect(err).NotTo(HaveOccurred()) 56 | subscr := msgi.(*redis.PMessage) 57 | Expect(subscr.Channel).To(Equal("mychannel1")) 58 | Expect(subscr.Pattern).To(Equal("mychannel*")) 59 | Expect(subscr.Payload).To(Equal("hello")) 60 | } 61 | 62 | { 63 | msgi, err := pubsub.ReceiveTimeout(time.Second) 64 | Expect(err).NotTo(HaveOccurred()) 65 | subscr := msgi.(*redis.Subscription) 66 | Expect(subscr.Kind).To(Equal("punsubscribe")) 67 | Expect(subscr.Channel).To(Equal("mychannel*")) 68 | Expect(subscr.Count).To(Equal(0)) 69 | } 70 | 71 | stats := client.PoolStats() 72 | Expect(stats.Requests - stats.Hits - stats.Waits).To(Equal(uint32(2))) 73 | }) 74 | 75 | It("should pub/sub channels", func() { 76 | channels, err := client.PubSubChannels("mychannel*").Result() 77 | Expect(err).NotTo(HaveOccurred()) 78 | Expect(channels).To(BeEmpty()) 79 | 80 | pubsub, err := client.Subscribe("mychannel", "mychannel2") 81 | Expect(err).NotTo(HaveOccurred()) 82 | defer pubsub.Close() 83 | 84 | channels, err = client.PubSubChannels("mychannel*").Result() 85 | Expect(err).NotTo(HaveOccurred()) 86 | Expect(channels).To(ConsistOf([]string{"mychannel", "mychannel2"})) 87 | 88 | channels, err = client.PubSubChannels("").Result() 89 | Expect(err).NotTo(HaveOccurred()) 90 | Expect(channels).To(BeEmpty()) 91 | 92 | channels, err = client.PubSubChannels("*").Result() 93 | Expect(err).NotTo(HaveOccurred()) 94 | Expect(len(channels)).To(BeNumerically(">=", 2)) 95 | }) 96 | 97 | It("should return the numbers of subscribers", func() { 98 | pubsub, err := client.Subscribe("mychannel", "mychannel2") 99 | Expect(err).NotTo(HaveOccurred()) 100 | defer pubsub.Close() 101 | 102 | channels, err := client.PubSubNumSub("mychannel", "mychannel2", "mychannel3").Result() 103 | Expect(err).NotTo(HaveOccurred()) 104 | Expect(channels).To(Equal(map[string]int64{ 105 | "mychannel": 1, 106 | "mychannel2": 1, 107 | "mychannel3": 0, 108 | })) 109 | }) 110 | 111 | It("should return the numbers of subscribers by pattern", func() { 112 | num, err := client.PubSubNumPat().Result() 113 | Expect(err).NotTo(HaveOccurred()) 114 | Expect(num).To(Equal(int64(0))) 115 | 116 | pubsub, err := client.PSubscribe("*") 117 | Expect(err).NotTo(HaveOccurred()) 118 | defer pubsub.Close() 119 | 120 | num, err = client.PubSubNumPat().Result() 121 | Expect(err).NotTo(HaveOccurred()) 122 | Expect(num).To(Equal(int64(1))) 123 | }) 124 | 125 | It("should pub/sub", func() { 126 | pubsub, err := client.Subscribe("mychannel", "mychannel2") 127 | Expect(err).NotTo(HaveOccurred()) 128 | defer pubsub.Close() 129 | 130 | { 131 | msgi, err := pubsub.ReceiveTimeout(time.Second) 132 | Expect(err).NotTo(HaveOccurred()) 133 | subscr := msgi.(*redis.Subscription) 134 | Expect(subscr.Kind).To(Equal("subscribe")) 135 | Expect(subscr.Channel).To(Equal("mychannel")) 136 | Expect(subscr.Count).To(Equal(1)) 137 | } 138 | 139 | { 140 | msgi, err := pubsub.ReceiveTimeout(time.Second) 141 | Expect(err).NotTo(HaveOccurred()) 142 | subscr := msgi.(*redis.Subscription) 143 | Expect(subscr.Kind).To(Equal("subscribe")) 144 | Expect(subscr.Channel).To(Equal("mychannel2")) 145 | Expect(subscr.Count).To(Equal(2)) 146 | } 147 | 148 | { 149 | msgi, err := pubsub.ReceiveTimeout(time.Second) 150 | Expect(err.(net.Error).Timeout()).To(Equal(true)) 151 | Expect(msgi).NotTo(HaveOccurred()) 152 | } 153 | 154 | n, err := client.Publish("mychannel", "hello").Result() 155 | Expect(err).NotTo(HaveOccurred()) 156 | Expect(n).To(Equal(int64(1))) 157 | 158 | n, err = client.Publish("mychannel2", "hello2").Result() 159 | Expect(err).NotTo(HaveOccurred()) 160 | Expect(n).To(Equal(int64(1))) 161 | 162 | Expect(pubsub.Unsubscribe("mychannel", "mychannel2")).NotTo(HaveOccurred()) 163 | 164 | { 165 | msgi, err := pubsub.ReceiveTimeout(time.Second) 166 | Expect(err).NotTo(HaveOccurred()) 167 | subscr := msgi.(*redis.Message) 168 | Expect(subscr.Channel).To(Equal("mychannel")) 169 | Expect(subscr.Payload).To(Equal("hello")) 170 | } 171 | 172 | { 173 | msgi, err := pubsub.ReceiveTimeout(time.Second) 174 | Expect(err).NotTo(HaveOccurred()) 175 | msg := msgi.(*redis.Message) 176 | Expect(msg.Channel).To(Equal("mychannel2")) 177 | Expect(msg.Payload).To(Equal("hello2")) 178 | } 179 | 180 | { 181 | msgi, err := pubsub.ReceiveTimeout(time.Second) 182 | Expect(err).NotTo(HaveOccurred()) 183 | subscr := msgi.(*redis.Subscription) 184 | Expect(subscr.Kind).To(Equal("unsubscribe")) 185 | Expect(subscr.Channel).To(Equal("mychannel")) 186 | Expect(subscr.Count).To(Equal(1)) 187 | } 188 | 189 | { 190 | msgi, err := pubsub.ReceiveTimeout(time.Second) 191 | Expect(err).NotTo(HaveOccurred()) 192 | subscr := msgi.(*redis.Subscription) 193 | Expect(subscr.Kind).To(Equal("unsubscribe")) 194 | Expect(subscr.Channel).To(Equal("mychannel2")) 195 | Expect(subscr.Count).To(Equal(0)) 196 | } 197 | 198 | stats := client.PoolStats() 199 | Expect(stats.Requests - stats.Hits - stats.Waits).To(Equal(uint32(2))) 200 | }) 201 | 202 | It("should ping/pong", func() { 203 | pubsub, err := client.Subscribe("mychannel") 204 | Expect(err).NotTo(HaveOccurred()) 205 | defer pubsub.Close() 206 | 207 | _, err = pubsub.ReceiveTimeout(time.Second) 208 | Expect(err).NotTo(HaveOccurred()) 209 | 210 | err = pubsub.Ping("") 211 | Expect(err).NotTo(HaveOccurred()) 212 | 213 | msgi, err := pubsub.ReceiveTimeout(time.Second) 214 | Expect(err).NotTo(HaveOccurred()) 215 | pong := msgi.(*redis.Pong) 216 | Expect(pong.Payload).To(Equal("")) 217 | }) 218 | 219 | It("should ping/pong with payload", func() { 220 | pubsub, err := client.Subscribe("mychannel") 221 | Expect(err).NotTo(HaveOccurred()) 222 | defer pubsub.Close() 223 | 224 | _, err = pubsub.ReceiveTimeout(time.Second) 225 | Expect(err).NotTo(HaveOccurred()) 226 | 227 | err = pubsub.Ping("hello") 228 | Expect(err).NotTo(HaveOccurred()) 229 | 230 | msgi, err := pubsub.ReceiveTimeout(time.Second) 231 | Expect(err).NotTo(HaveOccurred()) 232 | pong := msgi.(*redis.Pong) 233 | Expect(pong.Payload).To(Equal("hello")) 234 | }) 235 | 236 | It("should multi-ReceiveMessage", func() { 237 | pubsub, err := client.Subscribe("mychannel") 238 | Expect(err).NotTo(HaveOccurred()) 239 | defer pubsub.Close() 240 | 241 | err = client.Publish("mychannel", "hello").Err() 242 | Expect(err).NotTo(HaveOccurred()) 243 | 244 | err = client.Publish("mychannel", "world").Err() 245 | Expect(err).NotTo(HaveOccurred()) 246 | 247 | msg, err := pubsub.ReceiveMessage() 248 | Expect(err).NotTo(HaveOccurred()) 249 | Expect(msg.Channel).To(Equal("mychannel")) 250 | Expect(msg.Payload).To(Equal("hello")) 251 | 252 | msg, err = pubsub.ReceiveMessage() 253 | Expect(err).NotTo(HaveOccurred()) 254 | Expect(msg.Channel).To(Equal("mychannel")) 255 | Expect(msg.Payload).To(Equal("world")) 256 | }) 257 | 258 | It("should ReceiveMessage after timeout", func() { 259 | timeout := time.Second 260 | redis.SetReceiveMessageTimeout(timeout) 261 | 262 | pubsub, err := client.Subscribe("mychannel") 263 | Expect(err).NotTo(HaveOccurred()) 264 | defer pubsub.Close() 265 | 266 | done := make(chan bool, 1) 267 | go func() { 268 | defer GinkgoRecover() 269 | defer func() { 270 | done <- true 271 | }() 272 | 273 | time.Sleep(timeout + 100*time.Millisecond) 274 | n, err := client.Publish("mychannel", "hello").Result() 275 | Expect(err).NotTo(HaveOccurred()) 276 | Expect(n).To(Equal(int64(1))) 277 | }() 278 | 279 | msg, err := pubsub.ReceiveMessage() 280 | Expect(err).NotTo(HaveOccurred()) 281 | Expect(msg.Channel).To(Equal("mychannel")) 282 | Expect(msg.Payload).To(Equal("hello")) 283 | 284 | Eventually(done).Should(Receive()) 285 | 286 | stats := client.PoolStats() 287 | Expect(stats.Requests).To(Equal(uint32(3))) 288 | Expect(stats.Hits).To(Equal(uint32(1))) 289 | }) 290 | 291 | expectReceiveMessageOnError := func(pubsub *redis.PubSub) { 292 | cn1, err := pubsub.Pool().Get() 293 | Expect(err).NotTo(HaveOccurred()) 294 | cn1.NetConn = &badConn{ 295 | readErr: io.EOF, 296 | writeErr: io.EOF, 297 | } 298 | 299 | done := make(chan bool, 1) 300 | go func() { 301 | defer GinkgoRecover() 302 | defer func() { 303 | done <- true 304 | }() 305 | 306 | time.Sleep(100 * time.Millisecond) 307 | err := client.Publish("mychannel", "hello").Err() 308 | Expect(err).NotTo(HaveOccurred()) 309 | }() 310 | 311 | msg, err := pubsub.ReceiveMessage() 312 | Expect(err).NotTo(HaveOccurred()) 313 | Expect(msg.Channel).To(Equal("mychannel")) 314 | Expect(msg.Payload).To(Equal("hello")) 315 | 316 | Eventually(done).Should(Receive()) 317 | 318 | stats := client.PoolStats() 319 | Expect(stats.Requests).To(Equal(uint32(4))) 320 | Expect(stats.Hits).To(Equal(uint32(1))) 321 | } 322 | 323 | It("Subscribe should reconnect on ReceiveMessage error", func() { 324 | pubsub, err := client.Subscribe("mychannel") 325 | Expect(err).NotTo(HaveOccurred()) 326 | defer pubsub.Close() 327 | 328 | expectReceiveMessageOnError(pubsub) 329 | }) 330 | 331 | It("PSubscribe should reconnect on ReceiveMessage error", func() { 332 | pubsub, err := client.PSubscribe("mychannel") 333 | Expect(err).NotTo(HaveOccurred()) 334 | defer pubsub.Close() 335 | 336 | expectReceiveMessageOnError(pubsub) 337 | }) 338 | 339 | It("should return on Close", func() { 340 | pubsub, err := client.Subscribe("mychannel") 341 | Expect(err).NotTo(HaveOccurred()) 342 | defer pubsub.Close() 343 | 344 | var wg sync.WaitGroup 345 | wg.Add(1) 346 | go func() { 347 | defer GinkgoRecover() 348 | 349 | wg.Done() 350 | 351 | _, err := pubsub.ReceiveMessage() 352 | Expect(err).To(MatchError("redis: client is closed")) 353 | 354 | wg.Done() 355 | }() 356 | 357 | wg.Wait() 358 | wg.Add(1) 359 | 360 | err = pubsub.Close() 361 | Expect(err).NotTo(HaveOccurred()) 362 | 363 | wg.Wait() 364 | }) 365 | 366 | }) 367 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/race_test.go: -------------------------------------------------------------------------------- 1 | package redis_test 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "net" 7 | "strconv" 8 | "testing" 9 | "time" 10 | 11 | . "github.com/onsi/ginkgo" 12 | . "github.com/onsi/gomega" 13 | 14 | "gopkg.in/redis.v3" 15 | "gopkg.in/redis.v3/internal/pool" 16 | ) 17 | 18 | var _ = Describe("races", func() { 19 | var client *redis.Client 20 | var C, N int 21 | 22 | BeforeEach(func() { 23 | client = redis.NewClient(redisOptions()) 24 | Expect(client.FlushDb().Err()).To(BeNil()) 25 | 26 | C, N = 10, 1000 27 | if testing.Short() { 28 | C = 4 29 | N = 100 30 | } 31 | }) 32 | 33 | AfterEach(func() { 34 | err := client.Close() 35 | Expect(err).NotTo(HaveOccurred()) 36 | }) 37 | 38 | It("should echo", func() { 39 | perform(C, func(id int) { 40 | for i := 0; i < N; i++ { 41 | msg := fmt.Sprintf("echo %d %d", id, i) 42 | echo, err := client.Echo(msg).Result() 43 | Expect(err).NotTo(HaveOccurred()) 44 | Expect(echo).To(Equal(msg)) 45 | } 46 | }) 47 | }) 48 | 49 | It("should incr", func() { 50 | key := "TestIncrFromGoroutines" 51 | 52 | perform(C, func(id int) { 53 | for i := 0; i < N; i++ { 54 | err := client.Incr(key).Err() 55 | Expect(err).NotTo(HaveOccurred()) 56 | } 57 | }) 58 | 59 | val, err := client.Get(key).Int64() 60 | Expect(err).NotTo(HaveOccurred()) 61 | Expect(val).To(Equal(int64(C * N))) 62 | }) 63 | 64 | It("should handle many keys", func() { 65 | perform(C, func(id int) { 66 | for i := 0; i < N; i++ { 67 | err := client.Set( 68 | fmt.Sprintf("keys.key-%d-%d", id, i), 69 | fmt.Sprintf("hello-%d-%d", id, i), 70 | 0, 71 | ).Err() 72 | Expect(err).NotTo(HaveOccurred()) 73 | } 74 | }) 75 | 76 | keys := client.Keys("keys.*") 77 | Expect(keys.Err()).NotTo(HaveOccurred()) 78 | Expect(len(keys.Val())).To(Equal(C * N)) 79 | }) 80 | 81 | It("should handle many keys 2", func() { 82 | perform(C, func(id int) { 83 | keys := []string{"non-existent-key"} 84 | for i := 0; i < N; i++ { 85 | key := fmt.Sprintf("keys.key-%d", i) 86 | keys = append(keys, key) 87 | 88 | err := client.Set(key, fmt.Sprintf("hello-%d", i), 0).Err() 89 | Expect(err).NotTo(HaveOccurred()) 90 | } 91 | keys = append(keys, "non-existent-key") 92 | 93 | vals, err := client.MGet(keys...).Result() 94 | Expect(err).NotTo(HaveOccurred()) 95 | Expect(len(vals)).To(Equal(N + 2)) 96 | 97 | for i := 0; i < N; i++ { 98 | Expect(vals[i+1]).To(Equal(fmt.Sprintf("hello-%d", i))) 99 | } 100 | 101 | Expect(vals[0]).To(BeNil()) 102 | Expect(vals[N+1]).To(BeNil()) 103 | }) 104 | }) 105 | 106 | It("should handle big vals in Get", func() { 107 | bigVal := string(bytes.Repeat([]byte{'*'}, 1<<17)) // 128kb 108 | 109 | err := client.Set("key", bigVal, 0).Err() 110 | Expect(err).NotTo(HaveOccurred()) 111 | 112 | // Reconnect to get new connection. 113 | Expect(client.Close()).To(BeNil()) 114 | client = redis.NewClient(redisOptions()) 115 | 116 | perform(C, func(id int) { 117 | for i := 0; i < N; i++ { 118 | got, err := client.Get("key").Result() 119 | Expect(err).NotTo(HaveOccurred()) 120 | Expect(got).To(Equal(bigVal)) 121 | } 122 | }) 123 | 124 | }) 125 | 126 | It("should handle big vals in Set", func() { 127 | C, N = 4, 100 128 | bigVal := string(bytes.Repeat([]byte{'*'}, 1<<17)) // 128kb 129 | 130 | perform(C, func(id int) { 131 | for i := 0; i < N; i++ { 132 | err := client.Set("key", bigVal, 0).Err() 133 | Expect(err).NotTo(HaveOccurred()) 134 | } 135 | }) 136 | }) 137 | 138 | It("should PubSub", func() { 139 | connPool := client.Pool() 140 | connPool.(*pool.ConnPool).DialLimiter = nil 141 | 142 | perform(C, func(id int) { 143 | for i := 0; i < N; i++ { 144 | pubsub, err := client.Subscribe(fmt.Sprintf("mychannel%d", id)) 145 | Expect(err).NotTo(HaveOccurred()) 146 | 147 | go func() { 148 | defer GinkgoRecover() 149 | 150 | time.Sleep(time.Millisecond) 151 | err := pubsub.Close() 152 | Expect(err).NotTo(HaveOccurred()) 153 | }() 154 | 155 | _, err = pubsub.ReceiveMessage() 156 | Expect(err.Error()).To(ContainSubstring("closed")) 157 | 158 | val := "echo" + strconv.Itoa(i) 159 | echo, err := client.Echo(val).Result() 160 | Expect(err).NotTo(HaveOccurred()) 161 | Expect(echo).To(Equal(val)) 162 | } 163 | }) 164 | 165 | Expect(connPool.Len()).To(Equal(connPool.FreeLen())) 166 | Expect(connPool.Len()).To(BeNumerically("<=", 10)) 167 | }) 168 | 169 | It("should select db", func() { 170 | err := client.Set("db", 1, 0).Err() 171 | Expect(err).NotTo(HaveOccurred()) 172 | 173 | perform(C, func(id int) { 174 | opt := redisOptions() 175 | opt.DB = int64(id) 176 | client := redis.NewClient(opt) 177 | for i := 0; i < N; i++ { 178 | err := client.Set("db", id, 0).Err() 179 | Expect(err).NotTo(HaveOccurred()) 180 | 181 | n, err := client.Get("db").Int64() 182 | Expect(err).NotTo(HaveOccurred()) 183 | Expect(n).To(Equal(int64(id))) 184 | } 185 | err := client.Close() 186 | Expect(err).NotTo(HaveOccurred()) 187 | }) 188 | 189 | n, err := client.Get("db").Int64() 190 | Expect(err).NotTo(HaveOccurred()) 191 | Expect(n).To(Equal(int64(1))) 192 | }) 193 | 194 | It("should select DB with read timeout", func() { 195 | perform(C, func(id int) { 196 | opt := redisOptions() 197 | opt.DB = int64(id) 198 | opt.ReadTimeout = time.Nanosecond 199 | client := redis.NewClient(opt) 200 | 201 | perform(C, func(id int) { 202 | err := client.Ping().Err() 203 | Expect(err).To(HaveOccurred()) 204 | Expect(err.(net.Error).Timeout()).To(BeTrue()) 205 | }) 206 | 207 | err := client.Close() 208 | Expect(err).NotTo(HaveOccurred()) 209 | }) 210 | }) 211 | }) 212 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/redis.go: -------------------------------------------------------------------------------- 1 | package redis // import "gopkg.in/redis.v3" 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | "sync/atomic" 8 | 9 | "gopkg.in/redis.v3/internal/pool" 10 | ) 11 | 12 | // Deprecated. Use SetLogger instead. 13 | var Logger *log.Logger 14 | 15 | func init() { 16 | SetLogger(log.New(os.Stderr, "redis: ", log.LstdFlags)) 17 | } 18 | 19 | func SetLogger(logger *log.Logger) { 20 | Logger = logger 21 | pool.Logger = logger 22 | } 23 | 24 | type baseClient struct { 25 | connPool pool.Pooler 26 | opt *Options 27 | 28 | onClose func() error // hook called when client is closed 29 | } 30 | 31 | func (c *baseClient) String() string { 32 | return fmt.Sprintf("Redis<%s db:%d>", c.opt.Addr, c.opt.DB) 33 | } 34 | 35 | func (c *baseClient) conn() (*pool.Conn, error) { 36 | cn, err := c.connPool.Get() 37 | if err != nil { 38 | return nil, err 39 | } 40 | if !cn.Inited { 41 | if err := c.initConn(cn); err != nil { 42 | _ = c.connPool.Remove(cn, err) 43 | return nil, err 44 | } 45 | } 46 | return cn, err 47 | } 48 | 49 | func (c *baseClient) putConn(cn *pool.Conn, err error, allowTimeout bool) bool { 50 | if isBadConn(err, allowTimeout) { 51 | _ = c.connPool.Remove(cn, err) 52 | return false 53 | } 54 | 55 | _ = c.connPool.Put(cn) 56 | return true 57 | } 58 | 59 | func (c *baseClient) initConn(cn *pool.Conn) error { 60 | cn.Inited = true 61 | 62 | if c.opt.Password == "" && c.opt.DB == 0 { 63 | return nil 64 | } 65 | 66 | // Temp client for Auth and Select. 67 | client := newClient(c.opt, pool.NewSingleConnPool(cn)) 68 | 69 | if c.opt.Password != "" { 70 | if err := client.Auth(c.opt.Password).Err(); err != nil { 71 | return err 72 | } 73 | } 74 | 75 | if c.opt.DB > 0 { 76 | if err := client.Select(c.opt.DB).Err(); err != nil { 77 | return err 78 | } 79 | } 80 | 81 | return nil 82 | } 83 | 84 | func (c *baseClient) process(cmd Cmder) { 85 | for i := 0; i <= c.opt.MaxRetries; i++ { 86 | if i > 0 { 87 | cmd.reset() 88 | } 89 | 90 | cn, err := c.conn() 91 | if err != nil { 92 | cmd.setErr(err) 93 | return 94 | } 95 | 96 | readTimeout := cmd.readTimeout() 97 | if readTimeout != nil { 98 | cn.ReadTimeout = *readTimeout 99 | } else { 100 | cn.ReadTimeout = c.opt.ReadTimeout 101 | } 102 | cn.WriteTimeout = c.opt.WriteTimeout 103 | 104 | if err := writeCmd(cn, cmd); err != nil { 105 | c.putConn(cn, err, false) 106 | cmd.setErr(err) 107 | if shouldRetry(err) { 108 | continue 109 | } 110 | return 111 | } 112 | 113 | err = cmd.readReply(cn) 114 | c.putConn(cn, err, readTimeout != nil) 115 | if shouldRetry(err) { 116 | continue 117 | } 118 | 119 | return 120 | } 121 | } 122 | 123 | func (c *baseClient) closed() bool { 124 | return c.connPool.Closed() 125 | } 126 | 127 | // Close closes the client, releasing any open resources. 128 | // 129 | // It is rare to Close a Client, as the Client is meant to be 130 | // long-lived and shared between many goroutines. 131 | func (c *baseClient) Close() error { 132 | var retErr error 133 | if c.onClose != nil { 134 | if err := c.onClose(); err != nil && retErr == nil { 135 | retErr = err 136 | } 137 | } 138 | if err := c.connPool.Close(); err != nil && retErr == nil { 139 | retErr = err 140 | } 141 | return retErr 142 | } 143 | 144 | //------------------------------------------------------------------------------ 145 | 146 | // Client is a Redis client representing a pool of zero or more 147 | // underlying connections. It's safe for concurrent use by multiple 148 | // goroutines. 149 | type Client struct { 150 | baseClient 151 | commandable 152 | } 153 | 154 | func newClient(opt *Options, pool pool.Pooler) *Client { 155 | base := baseClient{opt: opt, connPool: pool} 156 | return &Client{ 157 | baseClient: base, 158 | commandable: commandable{ 159 | process: base.process, 160 | }, 161 | } 162 | } 163 | 164 | // NewClient returns a client to the Redis Server specified by Options. 165 | func NewClient(opt *Options) *Client { 166 | return newClient(opt, newConnPool(opt)) 167 | } 168 | 169 | // PoolStats returns connection pool stats. 170 | func (c *Client) PoolStats() *PoolStats { 171 | s := c.connPool.Stats() 172 | return &PoolStats{ 173 | Requests: atomic.LoadUint32(&s.Requests), 174 | Hits: atomic.LoadUint32(&s.Hits), 175 | Waits: atomic.LoadUint32(&s.Waits), 176 | Timeouts: atomic.LoadUint32(&s.Timeouts), 177 | 178 | TotalConns: atomic.LoadUint32(&s.TotalConns), 179 | FreeConns: atomic.LoadUint32(&s.FreeConns), 180 | } 181 | } 182 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/redis_test.go: -------------------------------------------------------------------------------- 1 | package redis_test 2 | 3 | import ( 4 | "bytes" 5 | "net" 6 | 7 | . "github.com/onsi/ginkgo" 8 | . "github.com/onsi/gomega" 9 | 10 | "gopkg.in/redis.v3" 11 | ) 12 | 13 | var _ = Describe("Client", func() { 14 | var client *redis.Client 15 | 16 | BeforeEach(func() { 17 | client = redis.NewClient(redisOptions()) 18 | Expect(client.FlushDb().Err()).To(BeNil()) 19 | }) 20 | 21 | AfterEach(func() { 22 | client.Close() 23 | }) 24 | 25 | It("should Stringer", func() { 26 | Expect(client.String()).To(Equal("Redis<:6380 db:15>")) 27 | }) 28 | 29 | It("should ping", func() { 30 | val, err := client.Ping().Result() 31 | Expect(err).NotTo(HaveOccurred()) 32 | Expect(val).To(Equal("PONG")) 33 | }) 34 | 35 | It("should return pool stats", func() { 36 | Expect(client.PoolStats()).To(BeAssignableToTypeOf(&redis.PoolStats{})) 37 | }) 38 | 39 | It("should support custom dialers", func() { 40 | custom := redis.NewClient(&redis.Options{ 41 | Addr: ":1234", 42 | Dialer: func() (net.Conn, error) { 43 | return net.Dial("tcp", redisAddr) 44 | }, 45 | }) 46 | 47 | val, err := custom.Ping().Result() 48 | Expect(err).NotTo(HaveOccurred()) 49 | Expect(val).To(Equal("PONG")) 50 | Expect(custom.Close()).NotTo(HaveOccurred()) 51 | }) 52 | 53 | It("should close", func() { 54 | Expect(client.Close()).NotTo(HaveOccurred()) 55 | err := client.Ping().Err() 56 | Expect(err).To(MatchError("redis: client is closed")) 57 | }) 58 | 59 | It("should close pubsub without closing the client", func() { 60 | pubsub := client.PubSub() 61 | Expect(pubsub.Close()).NotTo(HaveOccurred()) 62 | 63 | _, err := pubsub.Receive() 64 | Expect(err).To(MatchError("redis: client is closed")) 65 | Expect(client.Ping().Err()).NotTo(HaveOccurred()) 66 | }) 67 | 68 | It("should close multi without closing the client", func() { 69 | multi := client.Multi() 70 | Expect(multi.Close()).NotTo(HaveOccurred()) 71 | 72 | _, err := multi.Exec(func() error { 73 | multi.Ping() 74 | return nil 75 | }) 76 | Expect(err).To(MatchError("redis: client is closed")) 77 | 78 | Expect(client.Ping().Err()).NotTo(HaveOccurred()) 79 | }) 80 | 81 | It("should close pipeline without closing the client", func() { 82 | pipeline := client.Pipeline() 83 | Expect(pipeline.Close()).NotTo(HaveOccurred()) 84 | 85 | pipeline.Ping() 86 | _, err := pipeline.Exec() 87 | Expect(err).To(MatchError("redis: client is closed")) 88 | 89 | Expect(client.Ping().Err()).NotTo(HaveOccurred()) 90 | }) 91 | 92 | It("should close pubsub when client is closed", func() { 93 | pubsub := client.PubSub() 94 | Expect(client.Close()).NotTo(HaveOccurred()) 95 | Expect(pubsub.Close()).NotTo(HaveOccurred()) 96 | }) 97 | 98 | It("should close multi when client is closed", func() { 99 | multi := client.Multi() 100 | Expect(client.Close()).NotTo(HaveOccurred()) 101 | Expect(multi.Close()).NotTo(HaveOccurred()) 102 | }) 103 | 104 | It("should close pipeline when client is closed", func() { 105 | pipeline := client.Pipeline() 106 | Expect(client.Close()).NotTo(HaveOccurred()) 107 | Expect(pipeline.Close()).NotTo(HaveOccurred()) 108 | }) 109 | 110 | It("should select DB", func() { 111 | db2 := redis.NewClient(&redis.Options{ 112 | Addr: redisAddr, 113 | DB: 2, 114 | }) 115 | Expect(db2.FlushDb().Err()).NotTo(HaveOccurred()) 116 | Expect(db2.Get("db").Err()).To(Equal(redis.Nil)) 117 | Expect(db2.Set("db", 2, 0).Err()).NotTo(HaveOccurred()) 118 | 119 | n, err := db2.Get("db").Int64() 120 | Expect(err).NotTo(HaveOccurred()) 121 | Expect(n).To(Equal(int64(2))) 122 | 123 | Expect(client.Get("db").Err()).To(Equal(redis.Nil)) 124 | 125 | Expect(db2.FlushDb().Err()).NotTo(HaveOccurred()) 126 | Expect(db2.Close()).NotTo(HaveOccurred()) 127 | }) 128 | 129 | It("should process custom commands", func() { 130 | cmd := redis.NewCmd("PING") 131 | client.Process(cmd) 132 | Expect(cmd.Err()).NotTo(HaveOccurred()) 133 | Expect(cmd.Val()).To(Equal("PONG")) 134 | }) 135 | 136 | It("should retry command on network error", func() { 137 | Expect(client.Close()).NotTo(HaveOccurred()) 138 | 139 | client = redis.NewClient(&redis.Options{ 140 | Addr: redisAddr, 141 | MaxRetries: 1, 142 | }) 143 | 144 | // Put bad connection in the pool. 145 | cn, err := client.Pool().Get() 146 | Expect(err).NotTo(HaveOccurred()) 147 | 148 | cn.NetConn = &badConn{} 149 | err = client.Pool().Put(cn) 150 | Expect(err).NotTo(HaveOccurred()) 151 | 152 | err = client.Ping().Err() 153 | Expect(err).NotTo(HaveOccurred()) 154 | }) 155 | 156 | It("should update conn.UsedAt on read/write", func() { 157 | cn, err := client.Pool().Get() 158 | Expect(err).NotTo(HaveOccurred()) 159 | Expect(cn.UsedAt).NotTo(BeZero()) 160 | createdAt := cn.UsedAt 161 | 162 | err = client.Pool().Put(cn) 163 | Expect(err).NotTo(HaveOccurred()) 164 | Expect(cn.UsedAt.Equal(createdAt)).To(BeTrue()) 165 | 166 | err = client.Ping().Err() 167 | Expect(err).NotTo(HaveOccurred()) 168 | 169 | cn, err = client.Pool().Get() 170 | Expect(err).NotTo(HaveOccurred()) 171 | Expect(cn).NotTo(BeNil()) 172 | Expect(cn.UsedAt.After(createdAt)).To(BeTrue()) 173 | }) 174 | 175 | It("should escape special chars", func() { 176 | set := client.Set("key", "hello1\r\nhello2\r\n", 0) 177 | Expect(set.Err()).NotTo(HaveOccurred()) 178 | Expect(set.Val()).To(Equal("OK")) 179 | 180 | get := client.Get("key") 181 | Expect(get.Err()).NotTo(HaveOccurred()) 182 | Expect(get.Val()).To(Equal("hello1\r\nhello2\r\n")) 183 | }) 184 | 185 | It("should handle big vals", func() { 186 | bigVal := string(bytes.Repeat([]byte{'*'}, 1<<17)) // 128kb 187 | 188 | err := client.Set("key", bigVal, 0).Err() 189 | Expect(err).NotTo(HaveOccurred()) 190 | 191 | // Reconnect to get new connection. 192 | Expect(client.Close()).To(BeNil()) 193 | client = redis.NewClient(redisOptions()) 194 | 195 | got, err := client.Get("key").Result() 196 | Expect(err).NotTo(HaveOccurred()) 197 | Expect(len(got)).To(Equal(len(bigVal))) 198 | Expect(got).To(Equal(bigVal)) 199 | }) 200 | 201 | }) 202 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/ring.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "sync" 7 | "time" 8 | 9 | "gopkg.in/redis.v3/internal/consistenthash" 10 | "gopkg.in/redis.v3/internal/hashtag" 11 | "gopkg.in/redis.v3/internal/pool" 12 | ) 13 | 14 | var ( 15 | errRingShardsDown = errors.New("redis: all ring shards are down") 16 | ) 17 | 18 | // RingOptions are used to configure a ring client and should be 19 | // passed to NewRing. 20 | type RingOptions struct { 21 | // A map of name => host:port addresses of ring shards. 22 | Addrs map[string]string 23 | 24 | // Following options are copied from Options struct. 25 | 26 | DB int64 27 | Password string 28 | 29 | MaxRetries int 30 | 31 | DialTimeout time.Duration 32 | ReadTimeout time.Duration 33 | WriteTimeout time.Duration 34 | 35 | PoolSize int 36 | PoolTimeout time.Duration 37 | IdleTimeout time.Duration 38 | IdleCheckFrequency time.Duration 39 | } 40 | 41 | func (opt *RingOptions) clientOptions() *Options { 42 | return &Options{ 43 | DB: opt.DB, 44 | Password: opt.Password, 45 | 46 | DialTimeout: opt.DialTimeout, 47 | ReadTimeout: opt.ReadTimeout, 48 | WriteTimeout: opt.WriteTimeout, 49 | 50 | PoolSize: opt.PoolSize, 51 | PoolTimeout: opt.PoolTimeout, 52 | IdleTimeout: opt.IdleTimeout, 53 | IdleCheckFrequency: opt.IdleCheckFrequency, 54 | } 55 | } 56 | 57 | type ringShard struct { 58 | Client *Client 59 | down int 60 | } 61 | 62 | func (shard *ringShard) String() string { 63 | var state string 64 | if shard.IsUp() { 65 | state = "up" 66 | } else { 67 | state = "down" 68 | } 69 | return fmt.Sprintf("%s is %s", shard.Client, state) 70 | } 71 | 72 | func (shard *ringShard) IsDown() bool { 73 | const threshold = 5 74 | return shard.down >= threshold 75 | } 76 | 77 | func (shard *ringShard) IsUp() bool { 78 | return !shard.IsDown() 79 | } 80 | 81 | // Vote votes to set shard state and returns true if state was changed. 82 | func (shard *ringShard) Vote(up bool) bool { 83 | if up { 84 | changed := shard.IsDown() 85 | shard.down = 0 86 | return changed 87 | } 88 | 89 | if shard.IsDown() { 90 | return false 91 | } 92 | 93 | shard.down++ 94 | return shard.IsDown() 95 | } 96 | 97 | // Ring is a Redis client that uses constistent hashing to distribute 98 | // keys across multiple Redis servers (shards). It's safe for 99 | // concurrent use by multiple goroutines. 100 | // 101 | // Ring monitors the state of each shard and removes dead shards from 102 | // the ring. When shard comes online it is added back to the ring. This 103 | // gives you maximum availability and partition tolerance, but no 104 | // consistency between different shards or even clients. Each client 105 | // uses shards that are available to the client and does not do any 106 | // coordination when shard state is changed. 107 | // 108 | // Ring should be used when you use multiple Redis servers for caching 109 | // and can tolerate losing data when one of the servers dies. 110 | // Otherwise you should use Redis Cluster. 111 | type Ring struct { 112 | commandable 113 | 114 | opt *RingOptions 115 | nreplicas int 116 | 117 | mx sync.RWMutex 118 | hash *consistenthash.Map 119 | shards map[string]*ringShard 120 | 121 | closed bool 122 | } 123 | 124 | func NewRing(opt *RingOptions) *Ring { 125 | const nreplicas = 100 126 | ring := &Ring{ 127 | opt: opt, 128 | nreplicas: nreplicas, 129 | 130 | hash: consistenthash.New(nreplicas, nil), 131 | shards: make(map[string]*ringShard), 132 | } 133 | ring.commandable.process = ring.process 134 | for name, addr := range opt.Addrs { 135 | clopt := opt.clientOptions() 136 | clopt.Addr = addr 137 | ring.addClient(name, NewClient(clopt)) 138 | } 139 | go ring.heartbeat() 140 | return ring 141 | } 142 | 143 | func (ring *Ring) addClient(name string, cl *Client) { 144 | ring.mx.Lock() 145 | ring.hash.Add(name) 146 | ring.shards[name] = &ringShard{Client: cl} 147 | ring.mx.Unlock() 148 | } 149 | 150 | func (ring *Ring) getClient(key string) (*Client, error) { 151 | ring.mx.RLock() 152 | 153 | if ring.closed { 154 | return nil, pool.ErrClosed 155 | } 156 | 157 | name := ring.hash.Get(hashtag.Key(key)) 158 | if name == "" { 159 | ring.mx.RUnlock() 160 | return nil, errRingShardsDown 161 | } 162 | 163 | cl := ring.shards[name].Client 164 | ring.mx.RUnlock() 165 | return cl, nil 166 | } 167 | 168 | func (ring *Ring) process(cmd Cmder) { 169 | cl, err := ring.getClient(cmd.clusterKey()) 170 | if err != nil { 171 | cmd.setErr(err) 172 | return 173 | } 174 | cl.baseClient.process(cmd) 175 | } 176 | 177 | // rebalance removes dead shards from the ring. 178 | func (ring *Ring) rebalance() { 179 | defer ring.mx.Unlock() 180 | ring.mx.Lock() 181 | 182 | ring.hash = consistenthash.New(ring.nreplicas, nil) 183 | for name, shard := range ring.shards { 184 | if shard.IsUp() { 185 | ring.hash.Add(name) 186 | } 187 | } 188 | } 189 | 190 | // heartbeat monitors state of each shard in the ring. 191 | func (ring *Ring) heartbeat() { 192 | ticker := time.NewTicker(100 * time.Millisecond) 193 | defer ticker.Stop() 194 | for _ = range ticker.C { 195 | var rebalance bool 196 | 197 | ring.mx.RLock() 198 | 199 | if ring.closed { 200 | ring.mx.RUnlock() 201 | break 202 | } 203 | 204 | for _, shard := range ring.shards { 205 | err := shard.Client.Ping().Err() 206 | if shard.Vote(err == nil || err == pool.ErrPoolTimeout) { 207 | Logger.Printf("ring shard state changed: %s", shard) 208 | rebalance = true 209 | } 210 | } 211 | 212 | ring.mx.RUnlock() 213 | 214 | if rebalance { 215 | ring.rebalance() 216 | } 217 | } 218 | } 219 | 220 | // Close closes the ring client, releasing any open resources. 221 | // 222 | // It is rare to Close a Ring, as the Ring is meant to be long-lived 223 | // and shared between many goroutines. 224 | func (ring *Ring) Close() (retErr error) { 225 | defer ring.mx.Unlock() 226 | ring.mx.Lock() 227 | 228 | if ring.closed { 229 | return nil 230 | } 231 | ring.closed = true 232 | 233 | for _, shard := range ring.shards { 234 | if err := shard.Client.Close(); err != nil { 235 | retErr = err 236 | } 237 | } 238 | ring.hash = nil 239 | ring.shards = nil 240 | 241 | return retErr 242 | } 243 | 244 | // RingPipeline creates a new pipeline which is able to execute commands 245 | // against multiple shards. It's NOT safe for concurrent use by 246 | // multiple goroutines. 247 | type RingPipeline struct { 248 | commandable 249 | 250 | ring *Ring 251 | 252 | cmds []Cmder 253 | closed bool 254 | } 255 | 256 | func (ring *Ring) Pipeline() *RingPipeline { 257 | pipe := &RingPipeline{ 258 | ring: ring, 259 | cmds: make([]Cmder, 0, 10), 260 | } 261 | pipe.commandable.process = pipe.process 262 | return pipe 263 | } 264 | 265 | func (ring *Ring) Pipelined(fn func(*RingPipeline) error) ([]Cmder, error) { 266 | pipe := ring.Pipeline() 267 | if err := fn(pipe); err != nil { 268 | return nil, err 269 | } 270 | cmds, err := pipe.Exec() 271 | pipe.Close() 272 | return cmds, err 273 | } 274 | 275 | func (pipe *RingPipeline) process(cmd Cmder) { 276 | pipe.cmds = append(pipe.cmds, cmd) 277 | } 278 | 279 | // Discard resets the pipeline and discards queued commands. 280 | func (pipe *RingPipeline) Discard() error { 281 | if pipe.closed { 282 | return pool.ErrClosed 283 | } 284 | pipe.cmds = pipe.cmds[:0] 285 | return nil 286 | } 287 | 288 | // Exec always returns list of commands and error of the first failed 289 | // command if any. 290 | func (pipe *RingPipeline) Exec() (cmds []Cmder, retErr error) { 291 | if pipe.closed { 292 | return nil, pool.ErrClosed 293 | } 294 | if len(pipe.cmds) == 0 { 295 | return pipe.cmds, nil 296 | } 297 | 298 | cmds = pipe.cmds 299 | pipe.cmds = make([]Cmder, 0, 10) 300 | 301 | cmdsMap := make(map[string][]Cmder) 302 | for _, cmd := range cmds { 303 | name := pipe.ring.hash.Get(hashtag.Key(cmd.clusterKey())) 304 | if name == "" { 305 | cmd.setErr(errRingShardsDown) 306 | if retErr == nil { 307 | retErr = errRingShardsDown 308 | } 309 | continue 310 | } 311 | cmdsMap[name] = append(cmdsMap[name], cmd) 312 | } 313 | 314 | for i := 0; i <= pipe.ring.opt.MaxRetries; i++ { 315 | failedCmdsMap := make(map[string][]Cmder) 316 | 317 | for name, cmds := range cmdsMap { 318 | client := pipe.ring.shards[name].Client 319 | cn, err := client.conn() 320 | if err != nil { 321 | setCmdsErr(cmds, err) 322 | if retErr == nil { 323 | retErr = err 324 | } 325 | continue 326 | } 327 | 328 | if i > 0 { 329 | resetCmds(cmds) 330 | } 331 | failedCmds, err := execCmds(cn, cmds) 332 | client.putConn(cn, err, false) 333 | if err != nil && retErr == nil { 334 | retErr = err 335 | } 336 | if len(failedCmds) > 0 { 337 | failedCmdsMap[name] = failedCmds 338 | } 339 | } 340 | 341 | if len(failedCmdsMap) == 0 { 342 | break 343 | } 344 | cmdsMap = failedCmdsMap 345 | } 346 | 347 | return cmds, retErr 348 | } 349 | 350 | // Close closes the pipeline, releasing any open resources. 351 | func (pipe *RingPipeline) Close() error { 352 | pipe.Discard() 353 | pipe.closed = true 354 | return nil 355 | } 356 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/ring_test.go: -------------------------------------------------------------------------------- 1 | package redis_test 2 | 3 | import ( 4 | "crypto/rand" 5 | "fmt" 6 | "time" 7 | 8 | . "github.com/onsi/ginkgo" 9 | . "github.com/onsi/gomega" 10 | 11 | "gopkg.in/redis.v3" 12 | ) 13 | 14 | var _ = Describe("Redis ring", func() { 15 | var ring *redis.Ring 16 | 17 | setRingKeys := func() { 18 | for i := 0; i < 100; i++ { 19 | err := ring.Set(fmt.Sprintf("key%d", i), "value", 0).Err() 20 | Expect(err).NotTo(HaveOccurred()) 21 | } 22 | } 23 | 24 | BeforeEach(func() { 25 | ring = redis.NewRing(&redis.RingOptions{ 26 | Addrs: map[string]string{ 27 | "ringShardOne": ":" + ringShard1Port, 28 | "ringShardTwo": ":" + ringShard2Port, 29 | }, 30 | }) 31 | 32 | // Shards should not have any keys. 33 | Expect(ringShard1.FlushDb().Err()).NotTo(HaveOccurred()) 34 | Expect(ringShard1.Info().Val()).NotTo(ContainSubstring("keys=")) 35 | 36 | Expect(ringShard2.FlushDb().Err()).NotTo(HaveOccurred()) 37 | Expect(ringShard2.Info().Val()).NotTo(ContainSubstring("keys=")) 38 | }) 39 | 40 | AfterEach(func() { 41 | Expect(ring.Close()).NotTo(HaveOccurred()) 42 | }) 43 | 44 | It("uses both shards", func() { 45 | setRingKeys() 46 | 47 | // Both shards should have some keys now. 48 | Expect(ringShard1.Info().Val()).To(ContainSubstring("keys=57")) 49 | Expect(ringShard2.Info().Val()).To(ContainSubstring("keys=43")) 50 | }) 51 | 52 | It("uses single shard when one of the shards is down", func() { 53 | // Stop ringShard2. 54 | Expect(ringShard2.Close()).NotTo(HaveOccurred()) 55 | 56 | // Ring needs 5 * heartbeat time to detect that node is down. 57 | // Give it more to be sure. 58 | heartbeat := 100 * time.Millisecond 59 | time.Sleep(2 * 5 * heartbeat) 60 | 61 | setRingKeys() 62 | 63 | // RingShard1 should have all keys. 64 | Expect(ringShard1.Info().Val()).To(ContainSubstring("keys=100")) 65 | 66 | // Start ringShard2. 67 | var err error 68 | ringShard2, err = startRedis(ringShard2Port) 69 | Expect(err).NotTo(HaveOccurred()) 70 | 71 | // Wait for ringShard2 to come up. 72 | Eventually(func() error { 73 | return ringShard2.Ping().Err() 74 | }, "1s").ShouldNot(HaveOccurred()) 75 | 76 | // Ring needs heartbeat time to detect that node is up. 77 | // Give it more to be sure. 78 | time.Sleep(heartbeat + heartbeat) 79 | 80 | setRingKeys() 81 | 82 | // RingShard2 should have its keys. 83 | Expect(ringShard2.Info().Val()).To(ContainSubstring("keys=43")) 84 | }) 85 | 86 | It("supports hash tags", func() { 87 | for i := 0; i < 100; i++ { 88 | err := ring.Set(fmt.Sprintf("key%d{tag}", i), "value", 0).Err() 89 | Expect(err).NotTo(HaveOccurred()) 90 | } 91 | 92 | Expect(ringShard1.Info().Val()).ToNot(ContainSubstring("keys=")) 93 | Expect(ringShard2.Info().Val()).To(ContainSubstring("keys=100")) 94 | }) 95 | 96 | Describe("pipelining", func() { 97 | It("returns an error when all shards are down", func() { 98 | ring := redis.NewRing(&redis.RingOptions{}) 99 | _, err := ring.Pipelined(func(pipe *redis.RingPipeline) error { 100 | pipe.Ping() 101 | return nil 102 | }) 103 | Expect(err).To(MatchError("redis: all ring shards are down")) 104 | }) 105 | 106 | It("uses both shards", func() { 107 | pipe := ring.Pipeline() 108 | for i := 0; i < 100; i++ { 109 | err := pipe.Set(fmt.Sprintf("key%d", i), "value", 0).Err() 110 | Expect(err).NotTo(HaveOccurred()) 111 | } 112 | cmds, err := pipe.Exec() 113 | Expect(err).NotTo(HaveOccurred()) 114 | Expect(cmds).To(HaveLen(100)) 115 | Expect(pipe.Close()).NotTo(HaveOccurred()) 116 | 117 | for _, cmd := range cmds { 118 | Expect(cmd.Err()).NotTo(HaveOccurred()) 119 | Expect(cmd.(*redis.StatusCmd).Val()).To(Equal("OK")) 120 | } 121 | 122 | // Both shards should have some keys now. 123 | Expect(ringShard1.Info().Val()).To(ContainSubstring("keys=57")) 124 | Expect(ringShard2.Info().Val()).To(ContainSubstring("keys=43")) 125 | }) 126 | 127 | It("is consistent with ring", func() { 128 | var keys []string 129 | for i := 0; i < 100; i++ { 130 | key := make([]byte, 64) 131 | _, err := rand.Read(key) 132 | Expect(err).NotTo(HaveOccurred()) 133 | keys = append(keys, string(key)) 134 | } 135 | 136 | _, err := ring.Pipelined(func(pipe *redis.RingPipeline) error { 137 | for _, key := range keys { 138 | pipe.Set(key, "value", 0).Err() 139 | } 140 | return nil 141 | }) 142 | Expect(err).NotTo(HaveOccurred()) 143 | 144 | for _, key := range keys { 145 | val, err := ring.Get(key).Result() 146 | Expect(err).NotTo(HaveOccurred()) 147 | Expect(val).To(Equal("value")) 148 | } 149 | }) 150 | 151 | It("supports hash tags", func() { 152 | _, err := ring.Pipelined(func(pipe *redis.RingPipeline) error { 153 | for i := 0; i < 100; i++ { 154 | pipe.Set(fmt.Sprintf("key%d{tag}", i), "value", 0).Err() 155 | } 156 | return nil 157 | }) 158 | Expect(err).NotTo(HaveOccurred()) 159 | 160 | Expect(ringShard1.Info().Val()).ToNot(ContainSubstring("keys=")) 161 | Expect(ringShard2.Info().Val()).To(ContainSubstring("keys=100")) 162 | }) 163 | }) 164 | }) 165 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/safe.go: -------------------------------------------------------------------------------- 1 | // +build appengine 2 | 3 | package redis 4 | 5 | func bytesToString(b []byte) string { 6 | return string(b) 7 | } 8 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/script.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "crypto/sha1" 5 | "encoding/hex" 6 | "io" 7 | "strings" 8 | ) 9 | 10 | type scripter interface { 11 | Eval(script string, keys []string, args []string) *Cmd 12 | EvalSha(sha1 string, keys []string, args []string) *Cmd 13 | ScriptExists(scripts ...string) *BoolSliceCmd 14 | ScriptLoad(script string) *StringCmd 15 | } 16 | 17 | type Script struct { 18 | src, hash string 19 | } 20 | 21 | func NewScript(src string) *Script { 22 | h := sha1.New() 23 | io.WriteString(h, src) 24 | return &Script{ 25 | src: src, 26 | hash: hex.EncodeToString(h.Sum(nil)), 27 | } 28 | } 29 | 30 | func (s *Script) Load(c scripter) *StringCmd { 31 | return c.ScriptLoad(s.src) 32 | } 33 | 34 | func (s *Script) Exists(c scripter) *BoolSliceCmd { 35 | return c.ScriptExists(s.src) 36 | } 37 | 38 | func (s *Script) Eval(c scripter, keys []string, args []string) *Cmd { 39 | return c.Eval(s.src, keys, args) 40 | } 41 | 42 | func (s *Script) EvalSha(c scripter, keys []string, args []string) *Cmd { 43 | return c.EvalSha(s.hash, keys, args) 44 | } 45 | 46 | func (s *Script) Run(c scripter, keys []string, args []string) *Cmd { 47 | r := s.EvalSha(c, keys, args) 48 | if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") { 49 | return s.Eval(c, keys, args) 50 | } 51 | return r 52 | } 53 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/sentinel.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "net" 7 | "strings" 8 | "sync" 9 | "time" 10 | 11 | "gopkg.in/redis.v3/internal/pool" 12 | ) 13 | 14 | //------------------------------------------------------------------------------ 15 | 16 | // FailoverOptions are used to configure a failover client and should 17 | // be passed to NewFailoverClient. 18 | type FailoverOptions struct { 19 | // The master name. 20 | MasterName string 21 | // A seed list of host:port addresses of sentinel nodes. 22 | SentinelAddrs []string 23 | 24 | // Following options are copied from Options struct. 25 | 26 | Password string 27 | DB int64 28 | 29 | MaxRetries int 30 | 31 | DialTimeout time.Duration 32 | ReadTimeout time.Duration 33 | WriteTimeout time.Duration 34 | 35 | PoolSize int 36 | PoolTimeout time.Duration 37 | IdleTimeout time.Duration 38 | IdleCheckFrequency time.Duration 39 | } 40 | 41 | func (opt *FailoverOptions) options() *Options { 42 | return &Options{ 43 | Addr: "FailoverClient", 44 | 45 | DB: opt.DB, 46 | Password: opt.Password, 47 | 48 | MaxRetries: opt.MaxRetries, 49 | 50 | DialTimeout: opt.DialTimeout, 51 | ReadTimeout: opt.ReadTimeout, 52 | WriteTimeout: opt.WriteTimeout, 53 | 54 | PoolSize: opt.PoolSize, 55 | PoolTimeout: opt.PoolTimeout, 56 | IdleTimeout: opt.IdleTimeout, 57 | IdleCheckFrequency: opt.IdleCheckFrequency, 58 | } 59 | } 60 | 61 | // NewFailoverClient returns a Redis client that uses Redis Sentinel 62 | // for automatic failover. It's safe for concurrent use by multiple 63 | // goroutines. 64 | func NewFailoverClient(failoverOpt *FailoverOptions) *Client { 65 | opt := failoverOpt.options() 66 | failover := &sentinelFailover{ 67 | masterName: failoverOpt.MasterName, 68 | sentinelAddrs: failoverOpt.SentinelAddrs, 69 | 70 | opt: opt, 71 | } 72 | base := baseClient{ 73 | opt: opt, 74 | connPool: failover.Pool(), 75 | 76 | onClose: func() error { 77 | return failover.Close() 78 | }, 79 | } 80 | return &Client{ 81 | baseClient: base, 82 | commandable: commandable{ 83 | process: base.process, 84 | }, 85 | } 86 | } 87 | 88 | //------------------------------------------------------------------------------ 89 | 90 | type sentinelClient struct { 91 | baseClient 92 | commandable 93 | } 94 | 95 | func newSentinel(opt *Options) *sentinelClient { 96 | base := baseClient{ 97 | opt: opt, 98 | connPool: newConnPool(opt), 99 | } 100 | return &sentinelClient{ 101 | baseClient: base, 102 | commandable: commandable{process: base.process}, 103 | } 104 | } 105 | 106 | func (c *sentinelClient) PubSub() *PubSub { 107 | return &PubSub{ 108 | base: &baseClient{ 109 | opt: c.opt, 110 | connPool: pool.NewStickyConnPool(c.connPool.(*pool.ConnPool), false), 111 | }, 112 | } 113 | } 114 | 115 | func (c *sentinelClient) GetMasterAddrByName(name string) *StringSliceCmd { 116 | cmd := NewStringSliceCmd("SENTINEL", "get-master-addr-by-name", name) 117 | c.Process(cmd) 118 | return cmd 119 | } 120 | 121 | func (c *sentinelClient) Sentinels(name string) *SliceCmd { 122 | cmd := NewSliceCmd("SENTINEL", "sentinels", name) 123 | c.Process(cmd) 124 | return cmd 125 | } 126 | 127 | type sentinelFailover struct { 128 | masterName string 129 | sentinelAddrs []string 130 | 131 | opt *Options 132 | 133 | pool *pool.ConnPool 134 | poolOnce sync.Once 135 | 136 | mu sync.RWMutex 137 | sentinel *sentinelClient 138 | } 139 | 140 | func (d *sentinelFailover) Close() error { 141 | return d.resetSentinel() 142 | } 143 | 144 | func (d *sentinelFailover) dial() (net.Conn, error) { 145 | addr, err := d.MasterAddr() 146 | if err != nil { 147 | return nil, err 148 | } 149 | return net.DialTimeout("tcp", addr, d.opt.DialTimeout) 150 | } 151 | 152 | func (d *sentinelFailover) Pool() *pool.ConnPool { 153 | d.poolOnce.Do(func() { 154 | d.opt.Dialer = d.dial 155 | d.pool = newConnPool(d.opt) 156 | }) 157 | return d.pool 158 | } 159 | 160 | func (d *sentinelFailover) MasterAddr() (string, error) { 161 | defer d.mu.Unlock() 162 | d.mu.Lock() 163 | 164 | // Try last working sentinel. 165 | if d.sentinel != nil { 166 | addr, err := d.sentinel.GetMasterAddrByName(d.masterName).Result() 167 | if err != nil { 168 | Logger.Printf("sentinel: GetMasterAddrByName %q failed: %s", d.masterName, err) 169 | d._resetSentinel() 170 | } else { 171 | addr := net.JoinHostPort(addr[0], addr[1]) 172 | Logger.Printf("sentinel: %q addr is %s", d.masterName, addr) 173 | return addr, nil 174 | } 175 | } 176 | 177 | for i, sentinelAddr := range d.sentinelAddrs { 178 | sentinel := newSentinel(&Options{ 179 | Addr: sentinelAddr, 180 | 181 | DialTimeout: d.opt.DialTimeout, 182 | ReadTimeout: d.opt.ReadTimeout, 183 | WriteTimeout: d.opt.WriteTimeout, 184 | 185 | PoolSize: d.opt.PoolSize, 186 | PoolTimeout: d.opt.PoolTimeout, 187 | IdleTimeout: d.opt.IdleTimeout, 188 | }) 189 | masterAddr, err := sentinel.GetMasterAddrByName(d.masterName).Result() 190 | if err != nil { 191 | Logger.Printf("sentinel: GetMasterAddrByName %q failed: %s", d.masterName, err) 192 | sentinel.Close() 193 | continue 194 | } 195 | 196 | // Push working sentinel to the top. 197 | d.sentinelAddrs[0], d.sentinelAddrs[i] = d.sentinelAddrs[i], d.sentinelAddrs[0] 198 | 199 | d.setSentinel(sentinel) 200 | addr := net.JoinHostPort(masterAddr[0], masterAddr[1]) 201 | Logger.Printf("sentinel: %q addr is %s", d.masterName, addr) 202 | return addr, nil 203 | } 204 | 205 | return "", errors.New("redis: all sentinels are unreachable") 206 | } 207 | 208 | func (d *sentinelFailover) setSentinel(sentinel *sentinelClient) { 209 | d.discoverSentinels(sentinel) 210 | d.sentinel = sentinel 211 | go d.listen(sentinel) 212 | } 213 | 214 | func (d *sentinelFailover) resetSentinel() error { 215 | d.mu.Lock() 216 | err := d._resetSentinel() 217 | d.mu.Unlock() 218 | return err 219 | } 220 | 221 | func (d *sentinelFailover) _resetSentinel() error { 222 | var err error 223 | if d.sentinel != nil { 224 | err = d.sentinel.Close() 225 | d.sentinel = nil 226 | } 227 | return err 228 | } 229 | 230 | func (d *sentinelFailover) discoverSentinels(sentinel *sentinelClient) { 231 | sentinels, err := sentinel.Sentinels(d.masterName).Result() 232 | if err != nil { 233 | Logger.Printf("sentinel: Sentinels %q failed: %s", d.masterName, err) 234 | return 235 | } 236 | for _, sentinel := range sentinels { 237 | vals := sentinel.([]interface{}) 238 | for i := 0; i < len(vals); i += 2 { 239 | key := vals[i].(string) 240 | if key == "name" { 241 | sentinelAddr := vals[i+1].(string) 242 | if !contains(d.sentinelAddrs, sentinelAddr) { 243 | Logger.Printf( 244 | "sentinel: discovered new %q sentinel: %s", 245 | d.masterName, sentinelAddr, 246 | ) 247 | d.sentinelAddrs = append(d.sentinelAddrs, sentinelAddr) 248 | } 249 | } 250 | } 251 | } 252 | } 253 | 254 | // closeOldConns closes connections to the old master after failover switch. 255 | func (d *sentinelFailover) closeOldConns(newMaster string) { 256 | // Good connections that should be put back to the pool. They 257 | // can't be put immediately, because pool.First will return them 258 | // again on next iteration. 259 | cnsToPut := make([]*pool.Conn, 0) 260 | 261 | for { 262 | cn := d.pool.PopFree() 263 | if cn == nil { 264 | break 265 | } 266 | if cn.RemoteAddr().String() != newMaster { 267 | err := fmt.Errorf( 268 | "sentinel: closing connection to the old master %s", 269 | cn.RemoteAddr(), 270 | ) 271 | Logger.Print(err) 272 | d.pool.Remove(cn, err) 273 | } else { 274 | cnsToPut = append(cnsToPut, cn) 275 | } 276 | } 277 | 278 | for _, cn := range cnsToPut { 279 | d.pool.Put(cn) 280 | } 281 | } 282 | 283 | func (d *sentinelFailover) listen(sentinel *sentinelClient) { 284 | var pubsub *PubSub 285 | for { 286 | if pubsub == nil { 287 | pubsub = sentinel.PubSub() 288 | if err := pubsub.Subscribe("+switch-master"); err != nil { 289 | Logger.Printf("sentinel: Subscribe failed: %s", err) 290 | d.resetSentinel() 291 | return 292 | } 293 | } 294 | 295 | msg, err := pubsub.ReceiveMessage() 296 | if err != nil { 297 | Logger.Printf("sentinel: ReceiveMessage failed: %s", err) 298 | pubsub.Close() 299 | d.resetSentinel() 300 | return 301 | } 302 | 303 | switch msg.Channel { 304 | case "+switch-master": 305 | parts := strings.Split(msg.Payload, " ") 306 | if parts[0] != d.masterName { 307 | Logger.Printf("sentinel: ignore new %s addr", parts[0]) 308 | continue 309 | } 310 | 311 | addr := net.JoinHostPort(parts[3], parts[4]) 312 | Logger.Printf( 313 | "sentinel: new %q addr is %s", 314 | d.masterName, addr, 315 | ) 316 | 317 | d.closeOldConns(addr) 318 | } 319 | } 320 | } 321 | 322 | func contains(slice []string, str string) bool { 323 | for _, s := range slice { 324 | if s == str { 325 | return true 326 | } 327 | } 328 | return false 329 | } 330 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/sentinel_test.go: -------------------------------------------------------------------------------- 1 | package redis_test 2 | 3 | import ( 4 | . "github.com/onsi/ginkgo" 5 | . "github.com/onsi/gomega" 6 | 7 | "gopkg.in/redis.v3" 8 | ) 9 | 10 | var _ = Describe("Sentinel", func() { 11 | var client *redis.Client 12 | 13 | BeforeEach(func() { 14 | client = redis.NewFailoverClient(&redis.FailoverOptions{ 15 | MasterName: sentinelName, 16 | SentinelAddrs: []string{":" + sentinelPort}, 17 | }) 18 | Expect(client.FlushDb().Err()).NotTo(HaveOccurred()) 19 | }) 20 | 21 | AfterEach(func() { 22 | Expect(client.Close()).NotTo(HaveOccurred()) 23 | }) 24 | 25 | It("should facilitate failover", func() { 26 | // Set value on master, verify 27 | err := client.Set("foo", "master", 0).Err() 28 | Expect(err).NotTo(HaveOccurred()) 29 | 30 | val, err := sentinelMaster.Get("foo").Result() 31 | Expect(err).NotTo(HaveOccurred()) 32 | Expect(val).To(Equal("master")) 33 | 34 | // Wait until replicated 35 | Eventually(func() string { 36 | return sentinelSlave1.Get("foo").Val() 37 | }, "1s", "100ms").Should(Equal("master")) 38 | Eventually(func() string { 39 | return sentinelSlave2.Get("foo").Val() 40 | }, "1s", "100ms").Should(Equal("master")) 41 | 42 | // Wait until slaves are picked up by sentinel. 43 | Eventually(func() string { 44 | return sentinel.Info().Val() 45 | }, "10s", "100ms").Should(ContainSubstring("slaves=2")) 46 | 47 | // Kill master. 48 | sentinelMaster.Shutdown() 49 | Eventually(func() error { 50 | return sentinelMaster.Ping().Err() 51 | }, "5s", "100ms").Should(HaveOccurred()) 52 | 53 | // Wait for Redis sentinel to elect new master. 54 | Eventually(func() string { 55 | return sentinelSlave1.Info().Val() + sentinelSlave2.Info().Val() 56 | }, "30s", "1s").Should(ContainSubstring("role:master")) 57 | 58 | // Check that client picked up new master. 59 | Eventually(func() error { 60 | return client.Get("foo").Err() 61 | }, "5s", "100ms").ShouldNot(HaveOccurred()) 62 | }) 63 | 64 | It("supports DB selection", func() { 65 | Expect(client.Close()).NotTo(HaveOccurred()) 66 | 67 | client = redis.NewFailoverClient(&redis.FailoverOptions{ 68 | MasterName: sentinelName, 69 | SentinelAddrs: []string{":" + sentinelPort}, 70 | DB: 1, 71 | }) 72 | err := client.Ping().Err() 73 | Expect(err).NotTo(HaveOccurred()) 74 | }) 75 | }) 76 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/testdata/redis.conf: -------------------------------------------------------------------------------- 1 | # Minimal redis.conf 2 | 3 | port 6379 4 | daemonize no 5 | dir . 6 | save "" 7 | appendonly yes 8 | cluster-config-file nodes.conf 9 | cluster-node-timeout 30000 10 | -------------------------------------------------------------------------------- /vendor/gopkg.in/redis.v3/unsafe.go: -------------------------------------------------------------------------------- 1 | // +build !appengine 2 | 3 | package redis 4 | 5 | import ( 6 | "reflect" 7 | "unsafe" 8 | ) 9 | 10 | func bytesToString(b []byte) string { 11 | bytesHeader := (*reflect.SliceHeader)(unsafe.Pointer(&b)) 12 | strHeader := reflect.StringHeader{bytesHeader.Data, bytesHeader.Len} 13 | return *(*string)(unsafe.Pointer(&strHeader)) 14 | } 15 | -------------------------------------------------------------------------------- /vendor/manifest: -------------------------------------------------------------------------------- 1 | { 2 | "version": 0, 3 | "dependencies": [ 4 | { 5 | "importpath": "gopkg.in/bsm/ratelimit.v1", 6 | "repository": "https://gopkg.in/bsm/ratelimit.v1", 7 | "revision": "db14e161995a5177acef654cb0dd785e8ee8bc22", 8 | "branch": "v1" 9 | }, 10 | { 11 | "importpath": "gopkg.in/redis.v3", 12 | "repository": "https://gopkg.in/redis.v3", 13 | "revision": "d2ae7d870764d131887fa31a743bb50abbcf9384", 14 | "branch": "master" 15 | } 16 | ] 17 | } --------------------------------------------------------------------------------