├── .gitignore ├── debug_on.go ├── testdata ├── pki │ ├── .keystore │ ├── .truststore │ ├── ca.crt │ ├── gocql.key │ ├── cassandra.key │ ├── ca.key │ ├── gocql.crt │ └── cassandra.crt └── frames │ └── bench_parse_result.gz ├── debug_off.go ├── doc.go ├── fuzz.go ├── batch_test.go ├── events_test.go ├── compressor.go ├── .travis.yml ├── errors_test.go ├── framer_bench_test.go ├── internal ├── ccm │ ├── ccm_test.go │ └── ccm.go ├── lru │ ├── lru_test.go │ └── lru.go ├── murmur │ ├── murmur_test.go │ ├── murmur.go │ └── murmur_appengine.go └── streams │ ├── streams.go │ └── streams_test.go ├── ring_test.go ├── topology_test.go ├── host_source_test.go ├── compressor_test.go ├── query_executor.go ├── topology.go ├── prepared_cache.go ├── LICENSE ├── control_test.go ├── stress_test.go ├── filters.go ├── cass1batch_test.go ├── tuple_test.go ├── filters_test.go ├── integration.sh ├── errors.go ├── frame_test.go ├── ring.go ├── AUTHORS ├── common_test.go ├── CONTRIBUTING.md ├── token.go ├── uuid_test.go ├── uuid.go ├── events.go ├── session_test.go ├── helpers.go ├── events_ccm_test.go ├── wiki_test.go ├── cluster.go ├── policies_test.go ├── host_source.go ├── README.md ├── token_test.go └── control.go /.gitignore: -------------------------------------------------------------------------------- 1 | gocql-fuzz 2 | fuzz-corpus 3 | fuzz-work 4 | gocql.test 5 | .idea 6 | -------------------------------------------------------------------------------- /debug_on.go: -------------------------------------------------------------------------------- 1 | // +build gocql_debug 2 | 3 | package gocql 4 | 5 | const gocqlDebug = true 6 | -------------------------------------------------------------------------------- /testdata/pki/.keystore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/i/gocql/master/testdata/pki/.keystore -------------------------------------------------------------------------------- /testdata/pki/.truststore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/i/gocql/master/testdata/pki/.truststore -------------------------------------------------------------------------------- /debug_off.go: -------------------------------------------------------------------------------- 1 | // +build !gocql_debug 2 | 3 | package gocql 4 | 5 | const gocqlDebug = false 6 | -------------------------------------------------------------------------------- /testdata/frames/bench_parse_result.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/i/gocql/master/testdata/frames/bench_parse_result.gz -------------------------------------------------------------------------------- /doc.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2012-2015 The gocql Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | // Package gocql implements a fast and robust Cassandra driver for the 6 | // Go programming language. 7 | package gocql 8 | 9 | // TODO(tux21b): write more docs. 10 | -------------------------------------------------------------------------------- /fuzz.go: -------------------------------------------------------------------------------- 1 | // +build gofuzz 2 | 3 | package gocql 4 | 5 | import "bytes" 6 | 7 | func Fuzz(data []byte) int { 8 | var bw bytes.Buffer 9 | 10 | r := bytes.NewReader(data) 11 | 12 | head, err := readHeader(r, make([]byte, 9)) 13 | if err != nil { 14 | return 0 15 | } 16 | 17 | framer := newFramer(r, &bw, nil, byte(head.version)) 18 | err = framer.readFrame(&head) 19 | if err != nil { 20 | return 0 21 | } 22 | 23 | frame, err := framer.parseFrame() 24 | if err != nil { 25 | return 0 26 | } 27 | 28 | if frame != nil { 29 | return 1 30 | } 31 | 32 | return 2 33 | } 34 | -------------------------------------------------------------------------------- /batch_test.go: -------------------------------------------------------------------------------- 1 | // +build all integration 2 | 3 | package gocql 4 | 5 | import ( 6 | "testing" 7 | ) 8 | 9 | func TestBatch_Errors(t *testing.T) { 10 | if *flagProto == 1 { 11 | t.Skip("atomic batches not supported. Please use Cassandra >= 2.0") 12 | } 13 | 14 | session := createSession(t) 15 | defer session.Close() 16 | 17 | if err := createTable(session, `CREATE TABLE gocql_test.batch_errors (id int primary key, val inet)`); err != nil { 18 | t.Fatal(err) 19 | } 20 | 21 | b := session.NewBatch(LoggedBatch) 22 | b.Query("SELECT * FROM batch_errors WHERE id=2 AND val=?", nil) 23 | if err := session.ExecuteBatch(b); err == nil { 24 | t.Fatal("expected to get error for invalid query in batch") 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /events_test.go: -------------------------------------------------------------------------------- 1 | package gocql 2 | 3 | import ( 4 | "net" 5 | "sync" 6 | "testing" 7 | ) 8 | 9 | func TestEventDebounce(t *testing.T) { 10 | const eventCount = 150 11 | wg := &sync.WaitGroup{} 12 | wg.Add(1) 13 | 14 | eventsSeen := 0 15 | debouncer := newEventDebouncer("testDebouncer", func(events []frame) { 16 | defer wg.Done() 17 | eventsSeen += len(events) 18 | }) 19 | defer debouncer.stop() 20 | 21 | for i := 0; i < eventCount; i++ { 22 | debouncer.debounce(&statusChangeEventFrame{ 23 | change: "UP", 24 | host: net.IPv4(127, 0, 0, 1), 25 | port: 9042, 26 | }) 27 | } 28 | 29 | wg.Wait() 30 | if eventCount != eventsSeen { 31 | t.Fatalf("expected to see %d events but got %d", eventCount, eventsSeen) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /compressor.go: -------------------------------------------------------------------------------- 1 | package gocql 2 | 3 | import ( 4 | "github.com/golang/snappy" 5 | ) 6 | 7 | type Compressor interface { 8 | Name() string 9 | Encode(data []byte) ([]byte, error) 10 | Decode(data []byte) ([]byte, error) 11 | } 12 | 13 | // SnappyCompressor implements the Compressor interface and can be used to 14 | // compress incoming and outgoing frames. The snappy compression algorithm 15 | // aims for very high speeds and reasonable compression. 16 | type SnappyCompressor struct{} 17 | 18 | func (s SnappyCompressor) Name() string { 19 | return "snappy" 20 | } 21 | 22 | func (s SnappyCompressor) Encode(data []byte) ([]byte, error) { 23 | return snappy.Encode(nil, data), nil 24 | } 25 | 26 | func (s SnappyCompressor) Decode(data []byte) ([]byte, error) { 27 | return snappy.Decode(nil, data) 28 | } 29 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | 3 | sudo: required 4 | dist: trusty 5 | 6 | cache: 7 | directories: 8 | - $HOME/.ccm/repository 9 | - $HOME/.local/lib/python2.7 10 | 11 | matrix: 12 | fast_finish: true 13 | 14 | env: 15 | global: 16 | - GOMAXPROCS=2 17 | matrix: 18 | - CASS=2.1.12 19 | AUTH=false 20 | - CASS=2.2.5 21 | AUTH=true 22 | - CASS=2.2.5 23 | AUTH=false 24 | - CASS=3.0.8 25 | AUTH=false 26 | 27 | go: 28 | - 1.6 29 | - 1.7 30 | 31 | install: 32 | - pip install --user cql PyYAML six 33 | - git clone https://github.com/pcmanus/ccm.git 34 | - pushd ccm 35 | - ./setup.py install --user 36 | - popd 37 | - go get . 38 | 39 | script: 40 | - set -e 41 | - PATH=$PATH:$HOME/.local/bin bash integration.sh $CASS $AUTH 42 | - go vet . 43 | 44 | notifications: 45 | - email: false 46 | -------------------------------------------------------------------------------- /errors_test.go: -------------------------------------------------------------------------------- 1 | // +build all integration 2 | 3 | package gocql 4 | 5 | import ( 6 | "testing" 7 | ) 8 | 9 | func TestErrorsParse(t *testing.T) { 10 | session := createSession(t) 11 | defer session.Close() 12 | 13 | if err := createTable(session, `CREATE TABLE gocql_test.errors_parse (id int primary key)`); err != nil { 14 | t.Fatal("create:", err) 15 | } 16 | 17 | if err := createTable(session, `CREATE TABLE gocql_test.errors_parse (id int primary key)`); err == nil { 18 | t.Fatal("Should have gotten already exists error from cassandra server.") 19 | } else { 20 | switch e := err.(type) { 21 | case *RequestErrAlreadyExists: 22 | if e.Table != "errors_parse" { 23 | t.Fatalf("expected error table to be 'errors_parse' but was %q", e.Table) 24 | } 25 | default: 26 | t.Fatalf("expected to get RequestErrAlreadyExists instead got %T", e) 27 | } 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /framer_bench_test.go: -------------------------------------------------------------------------------- 1 | package gocql 2 | 3 | import ( 4 | "compress/gzip" 5 | "io/ioutil" 6 | "os" 7 | "testing" 8 | ) 9 | 10 | func readGzipData(path string) ([]byte, error) { 11 | f, err := os.Open(path) 12 | if err != nil { 13 | return nil, err 14 | } 15 | defer f.Close() 16 | 17 | r, err := gzip.NewReader(f) 18 | if err != nil { 19 | return nil, err 20 | } 21 | defer r.Close() 22 | 23 | return ioutil.ReadAll(r) 24 | } 25 | 26 | func BenchmarkParseRowsFrame(b *testing.B) { 27 | data, err := readGzipData("testdata/frames/bench_parse_result.gz") 28 | if err != nil { 29 | b.Fatal(err) 30 | } 31 | 32 | b.ResetTimer() 33 | for i := 0; i < b.N; i++ { 34 | framer := &framer{ 35 | header: &frameHeader{ 36 | version: protoVersion4 | 0x80, 37 | op: opResult, 38 | length: len(data), 39 | }, 40 | rbuf: data, 41 | } 42 | 43 | _, err = framer.parseFrame() 44 | if err != nil { 45 | b.Fatal(err) 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /internal/ccm/ccm_test.go: -------------------------------------------------------------------------------- 1 | // +build ccm 2 | 3 | package ccm 4 | 5 | import ( 6 | "testing" 7 | ) 8 | 9 | func TestCCM(t *testing.T) { 10 | if err := AllUp(); err != nil { 11 | t.Fatal(err) 12 | } 13 | 14 | status, err := Status() 15 | if err != nil { 16 | t.Fatal(err) 17 | } 18 | 19 | if host, ok := status["node1"]; !ok { 20 | t.Fatal("node1 not in status list") 21 | } else if !host.State.IsUp() { 22 | t.Fatal("node1 is not up") 23 | } 24 | 25 | NodeDown("node1") 26 | status, err = Status() 27 | if err != nil { 28 | t.Fatal(err) 29 | } 30 | 31 | if host, ok := status["node1"]; !ok { 32 | t.Fatal("node1 not in status list") 33 | } else if host.State.IsUp() { 34 | t.Fatal("node1 is not down") 35 | } 36 | 37 | NodeUp("node1") 38 | status, err = Status() 39 | if err != nil { 40 | t.Fatal(err) 41 | } 42 | 43 | if host, ok := status["node1"]; !ok { 44 | t.Fatal("node1 not in status list") 45 | } else if !host.State.IsUp() { 46 | t.Fatal("node1 is not up") 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /ring_test.go: -------------------------------------------------------------------------------- 1 | package gocql 2 | 3 | import ( 4 | "net" 5 | "testing" 6 | ) 7 | 8 | func TestRing_AddHostIfMissing_Missing(t *testing.T) { 9 | ring := &ring{} 10 | 11 | host := &HostInfo{peer: net.IPv4(1, 1, 1, 1)} 12 | h1, ok := ring.addHostIfMissing(host) 13 | if ok { 14 | t.Fatal("host was reported as already existing") 15 | } else if !h1.Equal(host) { 16 | t.Fatalf("hosts not equal that are returned %v != %v", h1, host) 17 | } else if h1 != host { 18 | t.Fatalf("returned host same pointer: %p != %p", h1, host) 19 | } 20 | } 21 | 22 | func TestRing_AddHostIfMissing_Existing(t *testing.T) { 23 | ring := &ring{} 24 | 25 | host := &HostInfo{peer: net.IPv4(1, 1, 1, 1)} 26 | ring.addHostIfMissing(host) 27 | 28 | h2 := &HostInfo{peer: net.IPv4(1, 1, 1, 1)} 29 | 30 | h1, ok := ring.addHostIfMissing(h2) 31 | if !ok { 32 | t.Fatal("host was not reported as already existing") 33 | } else if !h1.Equal(host) { 34 | t.Fatalf("hosts not equal that are returned %v != %v", h1, host) 35 | } else if h1 != host { 36 | t.Fatalf("returned host same pointer: %p != %p", h1, host) 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /topology_test.go: -------------------------------------------------------------------------------- 1 | // +build all unit 2 | 3 | package gocql 4 | 5 | import ( 6 | "testing" 7 | ) 8 | 9 | // fakeNode is used as a simple structure to test the RoundRobin API 10 | type fakeNode struct { 11 | conn *Conn 12 | closed bool 13 | } 14 | 15 | // Pick is needed to satisfy the Node interface 16 | func (n *fakeNode) Pick(qry *Query) *Conn { 17 | if n.conn == nil { 18 | n.conn = &Conn{} 19 | } 20 | return n.conn 21 | } 22 | 23 | //Close is needed to satisfy the Node interface 24 | func (n *fakeNode) Close() { 25 | n.closed = true 26 | } 27 | 28 | //TestRoundRobinAPI tests the exported methods of the RoundRobin struct 29 | //to make sure the API behaves accordingly. 30 | func TestRoundRobinAPI(t *testing.T) { 31 | node := &fakeNode{} 32 | rr := NewRoundRobin() 33 | rr.AddNode(node) 34 | 35 | if rr.Size() != 1 { 36 | t.Fatalf("expected size to be 1, got %v", rr.Size()) 37 | } 38 | 39 | if c := rr.Pick(nil); c != node.conn { 40 | t.Fatalf("expected conn %v, got %v", node.conn, c) 41 | } 42 | 43 | rr.Close() 44 | if rr.pool != nil { 45 | t.Fatalf("expected rr.pool to be nil, got %v", rr.pool) 46 | } 47 | 48 | if !node.closed { 49 | t.Fatal("expected node.closed to be true, got false") 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /host_source_test.go: -------------------------------------------------------------------------------- 1 | package gocql 2 | 3 | import "testing" 4 | 5 | func TestUnmarshalCassVersion(t *testing.T) { 6 | tests := [...]struct { 7 | data string 8 | version cassVersion 9 | }{ 10 | {"3.2", cassVersion{3, 2, 0}}, 11 | {"2.10.1-SNAPSHOT", cassVersion{2, 10, 1}}, 12 | {"1.2.3", cassVersion{1, 2, 3}}, 13 | } 14 | 15 | for i, test := range tests { 16 | v := &cassVersion{} 17 | if err := v.UnmarshalCQL(nil, []byte(test.data)); err != nil { 18 | t.Errorf("%d: %v", i, err) 19 | } else if *v != test.version { 20 | t.Errorf("%d: expected %#+v got %#+v", i, test.version, *v) 21 | } 22 | } 23 | } 24 | 25 | func TestCassVersionBefore(t *testing.T) { 26 | tests := [...]struct { 27 | version cassVersion 28 | major, minor, patch int 29 | }{ 30 | {cassVersion{1, 0, 0}, 0, 0, 0}, 31 | {cassVersion{0, 1, 0}, 0, 0, 0}, 32 | {cassVersion{0, 0, 1}, 0, 0, 0}, 33 | 34 | {cassVersion{1, 0, 0}, 0, 1, 0}, 35 | {cassVersion{0, 1, 0}, 0, 0, 1}, 36 | } 37 | 38 | for i, test := range tests { 39 | if !test.version.Before(test.major, test.minor, test.patch) { 40 | t.Errorf("%d: expected v%d.%d.%d to be before %v", i, test.major, test.minor, test.patch, test.version) 41 | } 42 | } 43 | 44 | } 45 | -------------------------------------------------------------------------------- /testdata/pki/ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDLzCCAhegAwIBAgIJAIKbAXgemwsjMA0GCSqGSIb3DQEBCwUAMBQxEjAQBgNV 3 | BAMTCWNhc3NhbmRyYTAeFw0xNDA5MTkyMTE4MTNaFw0yNDA5MTYyMTE4MTNaMBQx 4 | EjAQBgNVBAMTCWNhc3NhbmRyYTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC 5 | ggEBAL5fX0l1WDNa+mO1krxw7k8lfUQn+Ec4L3Mqv6IstGoNdCPq4YRA+SXRD5YC 6 | k/UXrFBWh9Hbs849GiuTYMPdj9HDLYz40RaQjM9GbieS23iy3UStQ0tKhxaaG6FN 7 | 6XBypXFKCTsanu0TkEoDGhAkSzAMcCAC3gkFBzMrZ5qt4HEzjY9rasZ2gthN+xop 8 | nq3t4dDkE8HGaiFJcFvqTor7xmrnAaPjrPzUpvOF/ObIC09omwg/KXdPRx4DKPon 9 | gCMKEE3ckebKnJvbsRX3WO8H5nTHBYZ6v1JxLZz5pqmV+P0NGxldCARM0gCQUBz5 10 | wjMJkD/3e1ETC+q6uwfnAG0hlD8CAwEAAaOBgzCBgDAdBgNVHQ4EFgQUjHzn0nYF 11 | iXEaI1vUWbRR4lwKXOgwRAYDVR0jBD0wO4AUjHzn0nYFiXEaI1vUWbRR4lwKXOih 12 | GKQWMBQxEjAQBgNVBAMTCWNhc3NhbmRyYYIJAIKbAXgemwsjMAwGA1UdEwQFMAMB 13 | Af8wCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBCwUAA4IBAQBCYDdIhtf/Y12Et947 14 | am1B8TzSX+/iQ1V1J3JtvgD5F4fvNjfArat/I3D277WREUTAc76o16BCp2OBGqzO 15 | zf9MvZPkjkAUoyU0TtPUEHyqxq4gZxbWKugIZGYkmQ1hCvSIgA5UnjRL3dylMmZb 16 | Y33JJA2QY63FZwnhmWsM8FYZwh+8MzVCQx3mgXC/k/jS6OuYyIT/KjxQHHjyr5ZS 17 | zAAQln1IcZycLfh1w5MtCFahCIethFcVDnWUWYPcPGDGgMJW7WBpNZdHbLxYY8cI 18 | eCc3Hcrbdc/CG5CaLJeqUidBayjnlUIO/NNgglkJ1KhQzkM6bd+37e0AX1hLIqx7 19 | gIZR 20 | -----END CERTIFICATE----- 21 | -------------------------------------------------------------------------------- /compressor_test.go: -------------------------------------------------------------------------------- 1 | // +build all unit 2 | 3 | package gocql 4 | 5 | import ( 6 | "bytes" 7 | "testing" 8 | 9 | "github.com/golang/snappy" 10 | ) 11 | 12 | func TestSnappyCompressor(t *testing.T) { 13 | c := SnappyCompressor{} 14 | if c.Name() != "snappy" { 15 | t.Fatalf("expected name to be 'snappy', got %v", c.Name()) 16 | } 17 | 18 | str := "My Test String" 19 | //Test Encoding 20 | expected := snappy.Encode(nil, []byte(str)) 21 | if res, err := c.Encode([]byte(str)); err != nil { 22 | t.Fatalf("failed to encode '%v' with error %v", str, err) 23 | } else if bytes.Compare(expected, res) != 0 { 24 | t.Fatal("failed to match the expected encoded value with the result encoded value.") 25 | } 26 | 27 | val, err := c.Encode([]byte(str)) 28 | if err != nil { 29 | t.Fatalf("failed to encode '%v' with error '%v'", str, err) 30 | } 31 | 32 | //Test Decoding 33 | if expected, err := snappy.Decode(nil, val); err != nil { 34 | t.Fatalf("failed to decode '%v' with error %v", val, err) 35 | } else if res, err := c.Decode(val); err != nil { 36 | t.Fatalf("failed to decode '%v' with error %v", val, err) 37 | } else if bytes.Compare(expected, res) != 0 { 38 | t.Fatal("failed to match the expected decoded value with the result decoded value.") 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /query_executor.go: -------------------------------------------------------------------------------- 1 | package gocql 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | type ExecutableQuery interface { 8 | execute(conn *Conn) *Iter 9 | attempt(time.Duration) 10 | retryPolicy() RetryPolicy 11 | GetRoutingKey() ([]byte, error) 12 | RetryableQuery 13 | } 14 | 15 | type queryExecutor struct { 16 | pool *policyConnPool 17 | policy HostSelectionPolicy 18 | } 19 | 20 | func (q *queryExecutor) executeQuery(qry ExecutableQuery) (*Iter, error) { 21 | rt := qry.retryPolicy() 22 | hostIter := q.policy.Pick(qry) 23 | 24 | var iter *Iter 25 | for hostResponse := hostIter(); hostResponse != nil; hostResponse = hostIter() { 26 | host := hostResponse.Info() 27 | if host == nil || !host.IsUp() { 28 | continue 29 | } 30 | 31 | pool, ok := q.pool.getPool(host) 32 | if !ok { 33 | continue 34 | } 35 | 36 | conn := pool.Pick() 37 | if conn == nil { 38 | continue 39 | } 40 | 41 | start := time.Now() 42 | iter = qry.execute(conn) 43 | 44 | qry.attempt(time.Since(start)) 45 | 46 | // Update host 47 | hostResponse.Mark(iter.err) 48 | 49 | // Exit for loop if the query was successful 50 | if iter.err == nil { 51 | return iter, nil 52 | } 53 | 54 | if rt == nil || !rt.Attempt(qry) { 55 | // What do here? Should we just return an error here? 56 | break 57 | } 58 | } 59 | 60 | if iter == nil { 61 | return nil, ErrNoConnections 62 | } 63 | 64 | return iter, nil 65 | } 66 | -------------------------------------------------------------------------------- /topology.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2012 The gocql Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package gocql 6 | 7 | import ( 8 | "sync" 9 | "sync/atomic" 10 | ) 11 | 12 | type Node interface { 13 | Pick(qry *Query) *Conn 14 | Close() 15 | } 16 | 17 | type RoundRobin struct { 18 | pool []Node 19 | pos uint32 20 | mu sync.RWMutex 21 | } 22 | 23 | func NewRoundRobin() *RoundRobin { 24 | return &RoundRobin{} 25 | } 26 | 27 | func (r *RoundRobin) AddNode(node Node) { 28 | r.mu.Lock() 29 | r.pool = append(r.pool, node) 30 | r.mu.Unlock() 31 | } 32 | 33 | func (r *RoundRobin) RemoveNode(node Node) { 34 | r.mu.Lock() 35 | n := len(r.pool) 36 | for i := 0; i < n; i++ { 37 | if r.pool[i] == node { 38 | r.pool[i], r.pool[n-1] = r.pool[n-1], r.pool[i] 39 | r.pool = r.pool[:n-1] 40 | break 41 | } 42 | } 43 | r.mu.Unlock() 44 | } 45 | 46 | func (r *RoundRobin) Size() int { 47 | r.mu.RLock() 48 | n := len(r.pool) 49 | r.mu.RUnlock() 50 | return n 51 | } 52 | 53 | func (r *RoundRobin) Pick(qry *Query) *Conn { 54 | pos := atomic.AddUint32(&r.pos, 1) 55 | var node Node 56 | r.mu.RLock() 57 | if len(r.pool) > 0 { 58 | node = r.pool[pos%uint32(len(r.pool))] 59 | } 60 | r.mu.RUnlock() 61 | if node == nil { 62 | return nil 63 | } 64 | return node.Pick(qry) 65 | } 66 | 67 | func (r *RoundRobin) Close() { 68 | r.mu.Lock() 69 | for i := 0; i < len(r.pool); i++ { 70 | r.pool[i].Close() 71 | } 72 | r.pool = nil 73 | r.mu.Unlock() 74 | } 75 | -------------------------------------------------------------------------------- /prepared_cache.go: -------------------------------------------------------------------------------- 1 | package gocql 2 | 3 | import ( 4 | "github.com/gocql/gocql/internal/lru" 5 | "sync" 6 | ) 7 | 8 | const defaultMaxPreparedStmts = 1000 9 | 10 | // preparedLRU is the prepared statement cache 11 | type preparedLRU struct { 12 | mu sync.Mutex 13 | lru *lru.Cache 14 | } 15 | 16 | // Max adjusts the maximum size of the cache and cleans up the oldest records if 17 | // the new max is lower than the previous value. Not concurrency safe. 18 | func (p *preparedLRU) max(max int) { 19 | p.mu.Lock() 20 | defer p.mu.Unlock() 21 | 22 | for p.lru.Len() > max { 23 | p.lru.RemoveOldest() 24 | } 25 | p.lru.MaxEntries = max 26 | } 27 | 28 | func (p *preparedLRU) clear() { 29 | p.mu.Lock() 30 | defer p.mu.Unlock() 31 | 32 | for p.lru.Len() > 0 { 33 | p.lru.RemoveOldest() 34 | } 35 | } 36 | 37 | func (p *preparedLRU) add(key string, val *inflightPrepare) { 38 | p.mu.Lock() 39 | defer p.mu.Unlock() 40 | p.lru.Add(key, val) 41 | } 42 | 43 | func (p *preparedLRU) remove(key string) bool { 44 | p.mu.Lock() 45 | defer p.mu.Unlock() 46 | return p.lru.Remove(key) 47 | } 48 | 49 | func (p *preparedLRU) execIfMissing(key string, fn func(lru *lru.Cache) *inflightPrepare) (*inflightPrepare, bool) { 50 | p.mu.Lock() 51 | defer p.mu.Unlock() 52 | 53 | val, ok := p.lru.Get(key) 54 | if ok { 55 | return val.(*inflightPrepare), true 56 | } 57 | 58 | return fn(p.lru), false 59 | } 60 | 61 | func (p *preparedLRU) keyFor(addr, keyspace, statement string) string { 62 | // TODO: maybe use []byte for keys? 63 | return addr + keyspace + statement 64 | } 65 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016, The Gocql authors 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | * Neither the name of the copyright holder nor the names of its 15 | contributors may be used to endorse or promote products derived from 16 | this software without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /control_test.go: -------------------------------------------------------------------------------- 1 | package gocql 2 | 3 | import ( 4 | "net" 5 | "testing" 6 | ) 7 | 8 | func TestHostInfo_Lookup(t *testing.T) { 9 | hostLookupPreferV4 = true 10 | defer func() { hostLookupPreferV4 = false }() 11 | 12 | tests := [...]struct { 13 | addr string 14 | ip net.IP 15 | }{ 16 | {"127.0.0.1", net.IPv4(127, 0, 0, 1)}, 17 | {"localhost", net.IPv4(127, 0, 0, 1)}, // TODO: this may be host dependant 18 | } 19 | 20 | for i, test := range tests { 21 | host, err := hostInfo(test.addr, 1) 22 | if err != nil { 23 | t.Errorf("%d: %v", i, err) 24 | continue 25 | } 26 | 27 | if !host.peer.Equal(test.ip) { 28 | t.Errorf("expected ip %v got %v for addr %q", test.ip, host.peer, test.addr) 29 | } 30 | } 31 | } 32 | 33 | func TestParseProtocol(t *testing.T) { 34 | tests := [...]struct { 35 | err error 36 | proto int 37 | }{ 38 | { 39 | err: &protocolError{ 40 | frame: errorFrame{ 41 | code: 0x10, 42 | message: "Invalid or unsupported protocol version (5); the lowest supported version is 3 and the greatest is 4", 43 | }, 44 | }, 45 | proto: 4, 46 | }, 47 | { 48 | err: &protocolError{ 49 | frame: errorFrame{ 50 | frameHeader: frameHeader{ 51 | version: 0x83, 52 | }, 53 | code: 0x10, 54 | message: "Invalid or unsupported protocol version: 5", 55 | }, 56 | }, 57 | proto: 3, 58 | }, 59 | } 60 | 61 | for i, test := range tests { 62 | if proto := parseProtocolFromError(test.err); proto != test.proto { 63 | t.Errorf("%d: exepcted proto %d got %d", i, test.proto, proto) 64 | } 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /testdata/pki/gocql.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEpAIBAAKCAQEArun6nv3iaYUdCA81aLxje5JQf3NQ/EJDNQazXJ4nHhYFaeyI 3 | 1ZxP7+gTaXq1s39mbRQALtavW/8skJGmEQdyXrA3wG3/e3Yr/t5M0o3OQzsaxB3e 4 | ttgmCCWJWaFLlKNXnhlGKG6XEXzmt0GWj0LdZtqG0lPd2PUgzSSLD6vfxBCyZCAd 5 | 4A/0LfbKlL6DrD6oSne2CJc6fnvgPqtoz+72oY6/7L4G0a1s7U810QSXCDOxZVth 6 | Mo1L8DA1S4trBvIacoxpvfOyxKSkcEXjZ6J6ny7LKC2faAPxx9lPg8k9jDQECjsT 7 | h5Lh9+N5fqvAJbHlOAlEPjHfEtTcew41v+4lXwIDAQABAoIBAQCCP9XSwzfwX6Fo 8 | uPqKjY5/HEs5PQPXdPha6ixyEYsLilZptCuI9adI/MZHy4q2qW36V+Ry/IcEuJXU 9 | 6cCB+cue2xYJA2A17Z+BYMRQHiy0P7UEyUFpYrefZWRMDCIeAyxhnGxz+zYfXaTo 10 | Xbzh3WbFCoFO6gjPYGoWmNm8x74PXyunNaMa/gWFECX5MMBXoOk5xSFGbHzI2Cds 11 | iT7sdCQJVbBs7yidYwNqPWQuOwrskFinPIFSc7bZ0Sx9wO3XTIrQFCE94v/AN6yR 12 | 9Q37ida54g5tgtoeg/5EGsUM++i4wqJVoT3tWUHv1jBozO4Lm65uWR/1HcrusVnr 13 | x0TM9SaBAoGBAOMeaZdUrCJXnIiSoqCGDvZmylTAeOo6n2RAiviOYxVB4GP/SSjh 14 | 8VeddFhYT1GCmZ+YjIXnRWK+dSqVukzCuf5xW5mWY7PDNGZe2P6O78lXnY4cb8Nc 15 | Uo9/S2aPnNmNHL2TYVBYUiZj+t2azIQEFvRth4Vu/AHRUG41/USxpwm/AoGBAMUo 16 | GX0xgSFAVpHnTLdzWrHNRrzHgYN8ywPKFgNOASvdgW0BFoqXEvVGc1Ak6uW82m1/ 17 | L9ChOzWjCY7CoT+LPmdUVyGT9/UAPtWeLfo8Owl4tG91jQjePmJFvLoXErryCFRt 18 | SOOvCsTTTq2gN3PREHxY3dj2kJqaCBLCEzx3cYxhAoGBAIUxdrc6/t/9BV3KsPj2 19 | 5Zt3WL0vSzoCOyut9lIiHtV+lrvOIPeK2eCKBIsy7wFcV/+SlQaKRNTN4SSiPml5 20 | 4V3o2NFPsxTfK8HFafiPluw7J7kJ0Dl/0SM6gduZ6WBkMzCyV+WohjTheWOwvrPF 21 | OjkKaunD1qKyQDsCCo/Yp589AoGAdKgnfNZf68bf8nEECcBtt6sY4fbCgYTDszhO 22 | EiKDuurT/CWaquJ9SzgmXxOZEdrO+9838aCVIkWYECrFso23nPhgnfOp0gQVKdzw 23 | o5Ij9JTBXvoVO1wVWZyd8RZZ9Nflad9IM8CNBK1rbnzQkuzvbkQ+8HPkWDYv9Ll1 24 | HGAohcECgYBQeirIumumj1B17WD/KmNe0U0qCHHp+oSW4W2r7pjlEVZzeQmggX4O 25 | anbEngyQaZKeUiUOj9snBDmzLv7S+j5p7Us4d1fbp70sCKuK6tcAnROU8gK8IGiI 26 | I01ypD8Z1Mb556qek56eRWlr71sy6wI1lbQa856cUBvePajUOKsKsw== 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /testdata/pki/cassandra.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEpQIBAAKCAQEA5Zwgnt6Yc0RBDTdMYsOfh1+bT6rP9pBupeCJiHoAxrvXgIdp 3 | LvrwNVmAboIlyLNs9qSXl5OT6vBwcKTht6rawZlmm5MEOs4LgwcGIj2m239oD0mA 4 | vYaou1RtOF8PsPoblySuzJ03mH52zOMbRRshJRcCwBrF+3bDi5PXxYUUClykEucY 5 | aZj1ds14zZlaKWXxaCCX074Js2gb8qOimnNYU37thjKjWtVGA/mztOxjcbq7+2/5 6 | gmPkVUd6euR7F2vX5s87yasMMBXJ7cfW/LZyshR9x/N/ivRjcGSOD9voOkVHzbl7 7 | rsgxwVLRPjQSt3PnuomGmjbtoFpp0NTjthaFrwIDAQABAoIBAQChjdjl73kUoVGk 8 | GuSEGWCFv59nzqfEtJsl23bpr+4b5s8agCxiAe5Bm1fiaXBsZtKkN+rxm8TX6ZUz 9 | rM+ki3KgBW9Mx4SSW6d96dNHBFoC1wJAv1b2A2l1ZVHz9+7ydwgysHzNO1GC2nh8 10 | cM8fMJeBoU8uG6hx5n5wFvYa5CfVoUQh8+Oq0b+mVxEFKHmRPnWp9/jPzL5eBIdr 11 | ulbDt9S3dKJtouHgHBUNdkq/7Ex3QeHrUOahX6Y4eX1rzLnfLYY+0J4EA2PCKvgQ 12 | bfKCxVnnzL6ywviH8eS3ql6OvTfnbK9kCRw7WxX9CC50qKj3EmwC/51MPhWohWlq 13 | jw3qf38BAoGBAPPNyb3vUiyUqoErZxxIPFc2ob3vCjj06cvi7uKpOgrkdgC3iBhz 14 | aCFQ28r7LrxLAHaKvNvwp71Lc7WYo8WWkLI1DVn0dx+GiQYW3DbNcwZOS40ZQz5L 15 | zsjEcG4+cnZmuqGZBMNvQ+xUjkuucxvxPWKpEKM18GfDjgEkKbmDr+uNAoGBAPEY 16 | kVSfSZGtP0MoXIfRkrxBlhvCj9m+p60P37pyHrJBrlrwvxB7x3Oz8S70D6kV8s2g 17 | vVHgOS3VPj17VaQG8a3jBLKjzp5JLe34G8D1Ny8GqDc2wzOBtZySpJbifXuSUSPk 18 | cqF7yiu1cD/wRPlwyWxBX9ZbaxvxnIUwLLd3ygkrAoGBAKQaw42uVkCdvPr/DQOT 19 | d9I4erxO9zGJYQmU8bjtsZz9VJR89QWIQPIT7C3/zuB9F42zKxZcMXwQGo2EddAc 20 | 3b6mSRtgmwJEW10W7BmTRrZa4y3RcFqxSjoHR6pdLEyYL01woy0taqnb7H/yp5aK 21 | VghfxkwllXEyxxXrko5FnpdNAoGBANeJLBunz2BxrnW+doJhZDnytFya4nk6TbKU 22 | 12FaNoEL4PCh+12kGtogSwS74eg6m/citT2mI9gKpHrYcOaT4qmeo4uEj+nH6Eyv 23 | Gzi0wCHFZMr/pSC92/teyc+uKZo4Y1ugFq6w+Tt8GB7BERiisR+bji8XSTkRFemn 24 | +MIIUFFDAoGAM8Va2Q5aTUkfg2mYlNLqT2tUAXVEhbmzjPA6laSo25PQEYWmX7vj 25 | hiU0DPCDJQ/PlPI23xYtDDLNk83Zbx+Oj29GO5pawJY9NvFI8n60EFXfLbP1nEdG 26 | j077QZNZOKfcgJirWi3+RrHSAK4tFftCe7rkV8ZmlMRBY3SDxzKOGcc= 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /stress_test.go: -------------------------------------------------------------------------------- 1 | // +build all integration 2 | 3 | package gocql 4 | 5 | import ( 6 | "sync/atomic" 7 | 8 | "testing" 9 | ) 10 | 11 | func BenchmarkConnStress(b *testing.B) { 12 | const workers = 16 13 | 14 | cluster := createCluster() 15 | cluster.NumConns = 1 16 | session := createSessionFromCluster(cluster, b) 17 | defer session.Close() 18 | 19 | if err := createTable(session, "CREATE TABLE IF NOT EXISTS conn_stress (id int primary key)"); err != nil { 20 | b.Fatal(err) 21 | } 22 | 23 | var seed uint64 24 | writer := func(pb *testing.PB) { 25 | seed := atomic.AddUint64(&seed, 1) 26 | var i uint64 = 0 27 | for pb.Next() { 28 | if err := session.Query("insert into conn_stress (id) values (?)", i*seed).Exec(); err != nil { 29 | b.Error(err) 30 | return 31 | } 32 | i++ 33 | } 34 | } 35 | 36 | b.SetParallelism(workers) 37 | b.RunParallel(writer) 38 | } 39 | 40 | func BenchmarkConnRoutingKey(b *testing.B) { 41 | const workers = 16 42 | 43 | cluster := createCluster() 44 | cluster.NumConns = 1 45 | cluster.PoolConfig.HostSelectionPolicy = TokenAwareHostPolicy(RoundRobinHostPolicy()) 46 | session := createSessionFromCluster(cluster, b) 47 | defer session.Close() 48 | 49 | if err := createTable(session, "CREATE TABLE IF NOT EXISTS routing_key_stress (id int primary key)"); err != nil { 50 | b.Fatal(err) 51 | } 52 | 53 | var seed uint64 54 | writer := func(pb *testing.PB) { 55 | seed := atomic.AddUint64(&seed, 1) 56 | var i uint64 = 0 57 | query := session.Query("insert into routing_key_stress (id) values (?)") 58 | 59 | for pb.Next() { 60 | if _, err := query.Bind(i * seed).GetRoutingKey(); err != nil { 61 | b.Error(err) 62 | return 63 | } 64 | i++ 65 | } 66 | } 67 | 68 | b.SetParallelism(workers) 69 | b.RunParallel(writer) 70 | } 71 | -------------------------------------------------------------------------------- /filters.go: -------------------------------------------------------------------------------- 1 | package gocql 2 | 3 | import "fmt" 4 | 5 | // HostFilter interface is used when a host is discovered via server sent events. 6 | type HostFilter interface { 7 | // Called when a new host is discovered, returning true will cause the host 8 | // to be added to the pools. 9 | Accept(host *HostInfo) bool 10 | } 11 | 12 | // HostFilterFunc converts a func(host HostInfo) bool into a HostFilter 13 | type HostFilterFunc func(host *HostInfo) bool 14 | 15 | func (fn HostFilterFunc) Accept(host *HostInfo) bool { 16 | return fn(host) 17 | } 18 | 19 | // AcceptAllFilter will accept all hosts 20 | func AcceptAllFilter() HostFilter { 21 | return HostFilterFunc(func(host *HostInfo) bool { 22 | return true 23 | }) 24 | } 25 | 26 | func DenyAllFilter() HostFilter { 27 | return HostFilterFunc(func(host *HostInfo) bool { 28 | return false 29 | }) 30 | } 31 | 32 | // DataCentreHostFilter filters all hosts such that they are in the same data centre 33 | // as the supplied data centre. 34 | func DataCentreHostFilter(dataCentre string) HostFilter { 35 | return HostFilterFunc(func(host *HostInfo) bool { 36 | return host.DataCenter() == dataCentre 37 | }) 38 | } 39 | 40 | // WhiteListHostFilter filters incoming hosts by checking that their address is 41 | // in the initial hosts whitelist. 42 | func WhiteListHostFilter(hosts ...string) HostFilter { 43 | hostInfos, err := addrsToHosts(hosts, 9042) 44 | if err != nil { 45 | // dont want to panic here, but rather not break the API 46 | panic(fmt.Errorf("unable to lookup host info from address: %v", err)) 47 | } 48 | 49 | m := make(map[string]bool, len(hostInfos)) 50 | for _, host := range hostInfos { 51 | m[string(host.peer)] = true 52 | } 53 | 54 | return HostFilterFunc(func(host *HostInfo) bool { 55 | return m[string(host.Peer())] 56 | }) 57 | } 58 | -------------------------------------------------------------------------------- /cass1batch_test.go: -------------------------------------------------------------------------------- 1 | // +build all integration 2 | 3 | package gocql 4 | 5 | import ( 6 | "strings" 7 | "testing" 8 | ) 9 | 10 | func TestProto1BatchInsert(t *testing.T) { 11 | session := createSession(t) 12 | defer session.Close() 13 | 14 | if err := createTable(session, "CREATE TABLE gocql_test.large (id int primary key)"); err != nil { 15 | t.Fatal(err) 16 | } 17 | 18 | begin := "BEGIN BATCH" 19 | end := "APPLY BATCH" 20 | query := "INSERT INTO large (id) VALUES (?)" 21 | fullQuery := strings.Join([]string{begin, query, end}, "\n") 22 | args := []interface{}{5} 23 | if err := session.Query(fullQuery, args...).Consistency(Quorum).Exec(); err != nil { 24 | t.Fatal(err) 25 | } 26 | } 27 | 28 | func TestShouldPrepareFunction(t *testing.T) { 29 | var shouldPrepareTests = []struct { 30 | Stmt string 31 | Result bool 32 | }{ 33 | {` 34 | BEGIN BATCH 35 | INSERT INTO users (userID, password) 36 | VALUES ('smith', 'secret') 37 | APPLY BATCH 38 | ; 39 | `, true}, 40 | {`INSERT INTO users (userID, password, name) VALUES ('user2', 'ch@ngem3b', 'second user')`, true}, 41 | {`BEGIN COUNTER BATCH UPDATE stats SET views = views + 1 WHERE pageid = 1 APPLY BATCH`, true}, 42 | {`delete name from users where userID = 'smith';`, true}, 43 | {` UPDATE users SET password = 'secret' WHERE userID = 'smith' `, true}, 44 | {`CREATE TABLE users ( 45 | user_name varchar PRIMARY KEY, 46 | password varchar, 47 | gender varchar, 48 | session_token varchar, 49 | state varchar, 50 | birth_year bigint 51 | );`, false}, 52 | } 53 | 54 | for _, test := range shouldPrepareTests { 55 | q := &Query{stmt: test.Stmt} 56 | if got := q.shouldPrepare(); got != test.Result { 57 | t.Fatalf("%q: got %v, expected %v\n", test.Stmt, got, test.Result) 58 | } 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /testdata/pki/ca.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | Proc-Type: 4,ENCRYPTED 3 | DEK-Info: DES-EDE3-CBC,54C8072C0FF3B3A3 4 | 5 | 27eijmHdgB+s3beNPmU0+iz+muxMD0BVvWkDzyec/uawMv/Cn4c3mYXOcsFxS3BL 6 | +qLT9MEttOmjqhHSaVrDYOPKoJIMpn+bVeKiR08V89icO36shEPy1feGqanagKtw 7 | ecgzFDBTA8ZbqjAhftXlhTwxADebvNms/2aDh5Aw04vIcbo8nQ/8z1Wz8O7Firsn 8 | kaseSTMTC6lxc+pa2V1X6mN0/2UpDi55bZbx1Z/mQ3+1CsdHOx0p7m/KY2m3ysov 9 | XluaC0sqmzHkcwNgDhUs3Jh+apE33vXzLGU+W4BDOwrYJiL6KpspZW/mJj3OEx8B 10 | 8xdAZU3a/ei8NUA/lDStGmcYX+dOysExwJ6GMrCBm9iufZiefDQCQ8yRqWnr6Zop 11 | lsFd+CqHNWYxfWDI1pSUBw3bsgIjevI0f0B7PxkFEF0DmIhCgB324/uqToRzGsOF 12 | 4MSVg6cSK7Sjo/u3r8r75A3aUAcY8NbR3peiZfAPMsTiUcfp4DoU+MJTqkX5PyQq 13 | FNxHOJoARZqjjQ2IhZiUQWfIINHvZ8F9G2K7VaES8A0EATyUghqaRyeLbyI3IYdW 14 | pGZBzrpGtdFlk9AVetHDDlY+gQiurtYhxOsxvlxJJuTj8FV+A5NWSElfPele0OiR 15 | iprE3xkFSk3whHu5L1vnzamvdSlnBWOAE7pQD7kQA6NmcEw/tqnXK0dVdAw8RIFh 16 | 4BKgv0sNrXzBgnzE8+bKLUf1a2Byc/YKuBrI7EpSZ9/VHYvOcgmOxNxMmRS6NYd1 17 | Ly+agQn0AyvsDmSlBZBp8GCzVp6JYBMDKSXyPVN8+wjK9OQM0PZdEdXouMwPCOVN 18 | oNSjhmMtfjOsnG2SZ9tRas3p0qFdfh/N/E6Q7QHG3WD3cUIEweFV9ji1FTSRUrIa 19 | shuKug8MUfNjvDJNMsdGyf6Hi/7Iik++42Rq3ZdTy0ZVkj5snv5yBN77pr2M/J4b 20 | M+dsXjyXPO4SDW3kP/e3RnLRlWmUv1PNdOmNDdjBBUTKgVZ3ur+4HmSY1iDvhlUF 21 | /hz2tz3/XUKQwYuv3KJVlBhLrniXeES36GK+JQadIszrjwb5N4q4p6xrIdIR7XgR 22 | TJCSL1NGPLeQyjK6byWLNPRcCGrvnxWs0k0ev6trMRJL1EjsIFDCJam9szhcXkZP 23 | iYl1d7ZMKPS3cAqCjdaFRSe65cZ+qI/cqxiv122orq/jkDY7ZSA9rWywY4YnYQ7A 24 | BqvcPzC/6K0bteXqmMQkIy/84aSnEts6ecb/4s5e5xXLhHe0dchG0HkasC/Gb+v/ 25 | m9NOqACTerWvSD+Ecv9OvnBjP+GTlA1g7xTiRANLXsTJuiJomtxewXcV6kGZEMmZ 26 | QWerGtPJGGUx36WRWrMiPeBfWZoIbjYGPmOO5mYNXMTjABGGWcFnKAqWUKsFihi9 27 | pC0OpZ7A0dtc9uSm0ZmsHUc3XENMHTeeEN+qgWxVKcMzRKEcnapu/0OcHrOUHDZf 28 | qPoG4EkNnG9kPMq3HzvFPx3qbQ017yl87vAkWy/Edo+ojfHoNghRBVGCw1zt/BMN 29 | eJbFFHop+rQ87omz8WIL4K+zVf91rJ0REVAJssQVDo16O5wrMo+f+c8v2GANQks5 30 | -----END RSA PRIVATE KEY----- 31 | -------------------------------------------------------------------------------- /tuple_test.go: -------------------------------------------------------------------------------- 1 | // +build all integration 2 | 3 | package gocql 4 | 5 | import "testing" 6 | 7 | func TestTupleSimple(t *testing.T) { 8 | if *flagProto < protoVersion3 { 9 | t.Skip("tuple types are only available of proto>=3") 10 | } 11 | 12 | session := createSession(t) 13 | defer session.Close() 14 | 15 | err := createTable(session, `CREATE TABLE gocql_test.tuple_test( 16 | id int, 17 | coord frozen>, 18 | 19 | primary key(id))`) 20 | if err != nil { 21 | t.Fatal(err) 22 | } 23 | 24 | err = session.Query("INSERT INTO tuple_test(id, coord) VALUES(?, (?, ?))", 1, 100, -100).Exec() 25 | if err != nil { 26 | t.Fatal(err) 27 | } 28 | 29 | var ( 30 | id int 31 | coord struct { 32 | x int 33 | y int 34 | } 35 | ) 36 | 37 | iter := session.Query("SELECT id, coord FROM tuple_test WHERE id=?", 1) 38 | if err := iter.Scan(&id, &coord.x, &coord.y); err != nil { 39 | t.Fatal(err) 40 | } 41 | 42 | if id != 1 { 43 | t.Errorf("expected to get id=1 got: %v", id) 44 | } 45 | if coord.x != 100 { 46 | t.Errorf("expected to get coord.x=100 got: %v", coord.x) 47 | } 48 | if coord.y != -100 { 49 | t.Errorf("expected to get coord.y=-100 got: %v", coord.y) 50 | } 51 | } 52 | 53 | func TestTupleMapScan(t *testing.T) { 54 | if *flagProto < protoVersion3 { 55 | t.Skip("tuple types are only available of proto>=3") 56 | } 57 | 58 | session := createSession(t) 59 | defer session.Close() 60 | 61 | err := createTable(session, `CREATE TABLE gocql_test.tuple_map_scan( 62 | id int, 63 | val frozen>, 64 | 65 | primary key(id))`) 66 | if err != nil { 67 | t.Fatal(err) 68 | } 69 | 70 | if err := session.Query(`INSERT INTO tuple_map_scan (id, val) VALUES (1, (1, 2));`).Exec(); err != nil { 71 | t.Fatal(err) 72 | } 73 | 74 | m := make(map[string]interface{}) 75 | err = session.Query(`SELECT * FROM tuple_map_scan`).MapScan(m) 76 | if err != nil { 77 | t.Fatal(err) 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /internal/lru/lru_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2015 To gocql authors 3 | Copyright 2013 Google Inc. 4 | 5 | Licensed under the Apache License, Version 2.0 (the "License"); 6 | you may not use this file except in compliance with the License. 7 | You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | */ 17 | 18 | package lru 19 | 20 | import ( 21 | "testing" 22 | ) 23 | 24 | type simpleStruct struct { 25 | int 26 | string 27 | } 28 | 29 | type complexStruct struct { 30 | int 31 | simpleStruct 32 | } 33 | 34 | var getTests = []struct { 35 | name string 36 | keyToAdd string 37 | keyToGet string 38 | expectedOk bool 39 | }{ 40 | {"string_hit", "mystring", "mystring", true}, 41 | {"string_miss", "mystring", "nonsense", false}, 42 | {"simple_struct_hit", "two", "two", true}, 43 | {"simeple_struct_miss", "two", "noway", false}, 44 | } 45 | 46 | func TestGet(t *testing.T) { 47 | for _, tt := range getTests { 48 | lru := New(0) 49 | lru.Add(tt.keyToAdd, 1234) 50 | val, ok := lru.Get(tt.keyToGet) 51 | if ok != tt.expectedOk { 52 | t.Fatalf("%s: cache hit = %v; want %v", tt.name, ok, !ok) 53 | } else if ok && val != 1234 { 54 | t.Fatalf("%s expected get to return 1234 but got %v", tt.name, val) 55 | } 56 | } 57 | } 58 | 59 | func TestRemove(t *testing.T) { 60 | lru := New(0) 61 | lru.Add("mystring", 1234) 62 | if val, ok := lru.Get("mystring"); !ok { 63 | t.Fatal("TestRemove returned no match") 64 | } else if val != 1234 { 65 | t.Fatalf("TestRemove failed. Expected %d, got %v", 1234, val) 66 | } 67 | 68 | lru.Remove("mystring") 69 | if _, ok := lru.Get("mystring"); ok { 70 | t.Fatal("TestRemove returned a removed entry") 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /filters_test.go: -------------------------------------------------------------------------------- 1 | package gocql 2 | 3 | import ( 4 | "net" 5 | "testing" 6 | ) 7 | 8 | func TestFilter_WhiteList(t *testing.T) { 9 | f := WhiteListHostFilter("127.0.0.1", "127.0.0.2") 10 | tests := [...]struct { 11 | addr net.IP 12 | accept bool 13 | }{ 14 | {net.ParseIP("127.0.0.1"), true}, 15 | {net.ParseIP("127.0.0.2"), true}, 16 | {net.ParseIP("127.0.0.3"), false}, 17 | } 18 | 19 | for i, test := range tests { 20 | if f.Accept(&HostInfo{peer: test.addr}) { 21 | if !test.accept { 22 | t.Errorf("%d: should not have been accepted but was", i) 23 | } 24 | } else if test.accept { 25 | t.Errorf("%d: should have been accepted but wasn't", i) 26 | } 27 | } 28 | } 29 | 30 | func TestFilter_AllowAll(t *testing.T) { 31 | f := AcceptAllFilter() 32 | tests := [...]struct { 33 | addr net.IP 34 | accept bool 35 | }{ 36 | {net.ParseIP("127.0.0.1"), true}, 37 | {net.ParseIP("127.0.0.2"), true}, 38 | {net.ParseIP("127.0.0.3"), true}, 39 | } 40 | 41 | for i, test := range tests { 42 | if f.Accept(&HostInfo{peer: test.addr}) { 43 | if !test.accept { 44 | t.Errorf("%d: should not have been accepted but was", i) 45 | } 46 | } else if test.accept { 47 | t.Errorf("%d: should have been accepted but wasn't", i) 48 | } 49 | } 50 | } 51 | 52 | func TestFilter_DenyAll(t *testing.T) { 53 | f := DenyAllFilter() 54 | tests := [...]struct { 55 | addr net.IP 56 | accept bool 57 | }{ 58 | {net.ParseIP("127.0.0.1"), false}, 59 | {net.ParseIP("127.0.0.2"), false}, 60 | {net.ParseIP("127.0.0.3"), false}, 61 | } 62 | 63 | for i, test := range tests { 64 | if f.Accept(&HostInfo{peer: test.addr}) { 65 | if !test.accept { 66 | t.Errorf("%d: should not have been accepted but was", i) 67 | } 68 | } else if test.accept { 69 | t.Errorf("%d: should have been accepted but wasn't", i) 70 | } 71 | } 72 | } 73 | 74 | func TestFilter_DataCentre(t *testing.T) { 75 | f := DataCentreHostFilter("dc1") 76 | tests := [...]struct { 77 | dc string 78 | accept bool 79 | }{ 80 | {"dc1", true}, 81 | {"dc2", false}, 82 | } 83 | 84 | for i, test := range tests { 85 | if f.Accept(&HostInfo{dataCenter: test.dc}) { 86 | if !test.accept { 87 | t.Errorf("%d: should not have been accepted but was", i) 88 | } 89 | } else if test.accept { 90 | t.Errorf("%d: should have been accepted but wasn't", i) 91 | } 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /integration.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eux 4 | 5 | function run_tests() { 6 | local clusterSize=3 7 | local version=$1 8 | local auth=$2 9 | 10 | if [ "$auth" = true ]; then 11 | clusterSize=1 12 | fi 13 | 14 | local keypath="$(pwd)/testdata/pki" 15 | 16 | local conf=( 17 | "client_encryption_options.enabled: true" 18 | "client_encryption_options.keystore: $keypath/.keystore" 19 | "client_encryption_options.keystore_password: cassandra" 20 | "client_encryption_options.require_client_auth: true" 21 | "client_encryption_options.truststore: $keypath/.truststore" 22 | "client_encryption_options.truststore_password: cassandra" 23 | "concurrent_reads: 2" 24 | "concurrent_writes: 2" 25 | "rpc_server_type: sync" 26 | "rpc_min_threads: 2" 27 | "rpc_max_threads: 2" 28 | "write_request_timeout_in_ms: 5000" 29 | "read_request_timeout_in_ms: 5000" 30 | ) 31 | 32 | ccm remove test || true 33 | 34 | ccm create test -v $version -n $clusterSize -d --vnodes --jvm_arg="-Xmx256m -XX:NewSize=100m" 35 | ccm updateconf "${conf[@]}" 36 | 37 | if [ "$auth" = true ] 38 | then 39 | ccm updateconf 'authenticator: PasswordAuthenticator' 'authorizer: CassandraAuthorizer' 40 | rm -rf $HOME/.ccm/test/node1/data/system_auth 41 | fi 42 | 43 | local proto=2 44 | if [[ $version == 1.2.* ]]; then 45 | proto=1 46 | elif [[ $version == 2.0.* ]]; then 47 | proto=2 48 | elif [[ $version == 2.1.* ]]; then 49 | proto=3 50 | elif [[ $version == 2.2.* || $version == 3.0.* ]]; then 51 | proto=4 52 | ccm updateconf 'enable_user_defined_functions: true' 53 | elif [[ $version == 3.*.* ]]; then 54 | proto=4 55 | ccm updateconf 'enable_user_defined_functions: true' 56 | fi 57 | 58 | sleep 1s 59 | 60 | ccm list 61 | ccm start --wait-for-binary-proto 62 | ccm status 63 | ccm node1 nodetool status 64 | 65 | local args="-gocql.timeout=60s -runssl -proto=$proto -rf=3 -clusterSize=$clusterSize -autowait=2000ms -compressor=snappy -gocql.cversion=$version -cluster=$(ccm liveset) ./..." 66 | 67 | go test -v -tags unit 68 | 69 | if [ "$auth" = true ] 70 | then 71 | sleep 30s 72 | go test -run=TestAuthentication -tags "integration gocql_debug" -timeout=15s -runauth $args 73 | else 74 | sleep 1s 75 | go test -tags "integration gocql_debug" -timeout=5m $args 76 | 77 | ccm clear 78 | ccm start 79 | sleep 1s 80 | 81 | go test -tags "ccm gocql_debug" -timeout=5m $args 82 | fi 83 | 84 | ccm remove 85 | } 86 | 87 | run_tests $1 $2 88 | -------------------------------------------------------------------------------- /internal/murmur/murmur_test.go: -------------------------------------------------------------------------------- 1 | package murmur 2 | 3 | import ( 4 | "strconv" 5 | "testing" 6 | ) 7 | 8 | // Test the implementation of murmur3 9 | func TestMurmur3H1(t *testing.T) { 10 | // these examples are based on adding a index number to a sample string in 11 | // a loop. The expected values were generated by the java datastax murmur3 12 | // implementation. The number of examples here of increasing lengths ensure 13 | // test coverage of all tail-length branches in the murmur3 algorithm 14 | seriesExpected := [...]uint64{ 15 | 0x0000000000000000, // "" 16 | 0x2ac9debed546a380, // "0" 17 | 0x649e4eaa7fc1708e, // "01" 18 | 0xce68f60d7c353bdb, // "012" 19 | 0x0f95757ce7f38254, // "0123" 20 | 0x0f04e459497f3fc1, // "01234" 21 | 0x88c0a92586be0a27, // "012345" 22 | 0x13eb9fb82606f7a6, // "0123456" 23 | 0x8236039b7387354d, // "01234567" 24 | 0x4c1e87519fe738ba, // "012345678" 25 | 0x3f9652ac3effeb24, // "0123456789" 26 | 0x3f33760ded9006c6, // "01234567890" 27 | 0xaed70a6631854cb1, // "012345678901" 28 | 0x8a299a8f8e0e2da7, // "0123456789012" 29 | 0x624b675c779249a6, // "01234567890123" 30 | 0xa4b203bb1d90b9a3, // "012345678901234" 31 | 0xa3293ad698ecb99a, // "0123456789012345" 32 | 0xbc740023dbd50048, // "01234567890123456" 33 | 0x3fe5ab9837d25cdd, // "012345678901234567" 34 | 0x2d0338c1ca87d132, // "0123456789012345678" 35 | } 36 | sample := "" 37 | for i, expected := range seriesExpected { 38 | assertMurmur3H1(t, []byte(sample), expected) 39 | 40 | sample = sample + strconv.Itoa(i%10) 41 | } 42 | 43 | // Here are some test examples from other driver implementations 44 | assertMurmur3H1(t, []byte("hello"), 0xcbd8a7b341bd9b02) 45 | assertMurmur3H1(t, []byte("hello, world"), 0x342fac623a5ebc8e) 46 | assertMurmur3H1(t, []byte("19 Jan 2038 at 3:14:07 AM"), 0xb89e5988b737affc) 47 | assertMurmur3H1(t, []byte("The quick brown fox jumps over the lazy dog."), 0xcd99481f9ee902c9) 48 | } 49 | 50 | // helper function for testing the murmur3 implementation 51 | func assertMurmur3H1(t *testing.T, data []byte, expected uint64) { 52 | actual := Murmur3H1(data) 53 | if actual != expected { 54 | t.Errorf("Expected h1 = %x for data = %x, but was %x", expected, data, actual) 55 | } 56 | } 57 | 58 | // Benchmark of the performance of the murmur3 implementation 59 | func BenchmarkMurmur3H1(b *testing.B) { 60 | data := make([]byte, 1024) 61 | for i := 0; i < 1024; i++ { 62 | data[i] = byte(i) 63 | } 64 | 65 | b.ResetTimer() 66 | b.RunParallel(func(pb *testing.PB) { 67 | for pb.Next() { 68 | h1 := Murmur3H1(data) 69 | if h1 != 7627370222079200297 { 70 | b.Fatalf("expected %d got %d", 7627370222079200297, h1) 71 | } 72 | } 73 | }) 74 | } 75 | -------------------------------------------------------------------------------- /errors.go: -------------------------------------------------------------------------------- 1 | package gocql 2 | 3 | import "fmt" 4 | 5 | const ( 6 | errServer = 0x0000 7 | errProtocol = 0x000A 8 | errCredentials = 0x0100 9 | errUnavailable = 0x1000 10 | errOverloaded = 0x1001 11 | errBootstrapping = 0x1002 12 | errTruncate = 0x1003 13 | errWriteTimeout = 0x1100 14 | errReadTimeout = 0x1200 15 | errReadFailure = 0x1300 16 | errFunctionFailure = 0x1400 17 | errWriteFailure = 0x1500 18 | errSyntax = 0x2000 19 | errUnauthorized = 0x2100 20 | errInvalid = 0x2200 21 | errConfig = 0x2300 22 | errAlreadyExists = 0x2400 23 | errUnprepared = 0x2500 24 | ) 25 | 26 | type RequestError interface { 27 | Code() int 28 | Message() string 29 | Error() string 30 | } 31 | 32 | type errorFrame struct { 33 | frameHeader 34 | 35 | code int 36 | message string 37 | } 38 | 39 | func (e errorFrame) Code() int { 40 | return e.code 41 | } 42 | 43 | func (e errorFrame) Message() string { 44 | return e.message 45 | } 46 | 47 | func (e errorFrame) Error() string { 48 | return e.Message() 49 | } 50 | 51 | func (e errorFrame) String() string { 52 | return fmt.Sprintf("[error code=%x message=%q]", e.code, e.message) 53 | } 54 | 55 | type RequestErrUnavailable struct { 56 | errorFrame 57 | Consistency Consistency 58 | Required int 59 | Alive int 60 | } 61 | 62 | func (e *RequestErrUnavailable) String() string { 63 | return fmt.Sprintf("[request_error_unavailable consistency=%s required=%d alive=%d]", e.Consistency, e.Required, e.Alive) 64 | } 65 | 66 | type RequestErrWriteTimeout struct { 67 | errorFrame 68 | Consistency Consistency 69 | Received int 70 | BlockFor int 71 | WriteType string 72 | } 73 | 74 | type RequestErrWriteFailure struct { 75 | errorFrame 76 | Consistency Consistency 77 | Received int 78 | BlockFor int 79 | NumFailures int 80 | WriteType string 81 | } 82 | 83 | type RequestErrReadTimeout struct { 84 | errorFrame 85 | Consistency Consistency 86 | Received int 87 | BlockFor int 88 | DataPresent byte 89 | } 90 | 91 | type RequestErrAlreadyExists struct { 92 | errorFrame 93 | Keyspace string 94 | Table string 95 | } 96 | 97 | type RequestErrUnprepared struct { 98 | errorFrame 99 | StatementId []byte 100 | } 101 | 102 | type RequestErrReadFailure struct { 103 | errorFrame 104 | Consistency Consistency 105 | Received int 106 | BlockFor int 107 | NumFailures int 108 | DataPresent bool 109 | } 110 | 111 | type RequestErrFunctionFailure struct { 112 | errorFrame 113 | Keyspace string 114 | Function string 115 | ArgTypes []string 116 | } 117 | -------------------------------------------------------------------------------- /internal/murmur/murmur.go: -------------------------------------------------------------------------------- 1 | // +build !appengine 2 | 3 | package murmur 4 | 5 | import ( 6 | "unsafe" 7 | ) 8 | 9 | func Murmur3H1(data []byte) uint64 { 10 | length := len(data) 11 | 12 | var h1, h2, k1, k2 uint64 13 | 14 | const ( 15 | c1 = 0x87c37b91114253d5 16 | c2 = 0x4cf5ad432745937f 17 | ) 18 | 19 | // body 20 | nBlocks := length / 16 21 | for i := 0; i < nBlocks; i++ { 22 | block := (*[2]uint64)(unsafe.Pointer(&data[i*16])) 23 | 24 | k1 = block[0] 25 | k2 = block[1] 26 | 27 | k1 *= c1 28 | k1 = (k1 << 31) | (k1 >> 33) // ROTL64(k1, 31) 29 | k1 *= c2 30 | h1 ^= k1 31 | 32 | h1 = (h1 << 27) | (h1 >> 37) // ROTL64(h1, 27) 33 | h1 += h2 34 | h1 = h1*5 + 0x52dce729 35 | 36 | k2 *= c2 37 | k2 = (k2 << 33) | (k2 >> 31) // ROTL64(k2, 33) 38 | k2 *= c1 39 | h2 ^= k2 40 | 41 | h2 = (h2 << 31) | (h2 >> 33) // ROTL64(h2, 31) 42 | h2 += h1 43 | h2 = h2*5 + 0x38495ab5 44 | } 45 | 46 | // tail 47 | tail := data[nBlocks*16:] 48 | k1 = 0 49 | k2 = 0 50 | switch length & 15 { 51 | case 15: 52 | k2 ^= uint64(tail[14]) << 48 53 | fallthrough 54 | case 14: 55 | k2 ^= uint64(tail[13]) << 40 56 | fallthrough 57 | case 13: 58 | k2 ^= uint64(tail[12]) << 32 59 | fallthrough 60 | case 12: 61 | k2 ^= uint64(tail[11]) << 24 62 | fallthrough 63 | case 11: 64 | k2 ^= uint64(tail[10]) << 16 65 | fallthrough 66 | case 10: 67 | k2 ^= uint64(tail[9]) << 8 68 | fallthrough 69 | case 9: 70 | k2 ^= uint64(tail[8]) 71 | 72 | k2 *= c2 73 | k2 = (k2 << 33) | (k2 >> 31) // ROTL64(k2, 33) 74 | k2 *= c1 75 | h2 ^= k2 76 | 77 | fallthrough 78 | case 8: 79 | k1 ^= uint64(tail[7]) << 56 80 | fallthrough 81 | case 7: 82 | k1 ^= uint64(tail[6]) << 48 83 | fallthrough 84 | case 6: 85 | k1 ^= uint64(tail[5]) << 40 86 | fallthrough 87 | case 5: 88 | k1 ^= uint64(tail[4]) << 32 89 | fallthrough 90 | case 4: 91 | k1 ^= uint64(tail[3]) << 24 92 | fallthrough 93 | case 3: 94 | k1 ^= uint64(tail[2]) << 16 95 | fallthrough 96 | case 2: 97 | k1 ^= uint64(tail[1]) << 8 98 | fallthrough 99 | case 1: 100 | k1 ^= uint64(tail[0]) 101 | 102 | k1 *= c1 103 | k1 = (k1 << 31) | (k1 >> 33) // ROTL64(k1, 31) 104 | k1 *= c2 105 | h1 ^= k1 106 | } 107 | 108 | h1 ^= uint64(length) 109 | h2 ^= uint64(length) 110 | 111 | h1 += h2 112 | h2 += h1 113 | 114 | // finalizer 115 | const ( 116 | fmix1 = 0xff51afd7ed558ccd 117 | fmix2 = 0xc4ceb9fe1a85ec53 118 | ) 119 | 120 | // fmix64(h1) 121 | h1 ^= h1 >> 33 122 | h1 *= fmix1 123 | h1 ^= h1 >> 33 124 | h1 *= fmix2 125 | h1 ^= h1 >> 33 126 | 127 | // fmix64(h2) 128 | h2 ^= h2 >> 33 129 | h2 *= fmix1 130 | h2 ^= h2 >> 33 131 | h2 *= fmix2 132 | h2 ^= h2 >> 33 133 | 134 | h1 += h2 135 | // the following is extraneous since h2 is discarded 136 | // h2 += h1 137 | 138 | return h1 139 | } 140 | -------------------------------------------------------------------------------- /internal/murmur/murmur_appengine.go: -------------------------------------------------------------------------------- 1 | // +build appengine 2 | 3 | package murmur 4 | 5 | import "encoding/binary" 6 | 7 | func Murmur3H1(data []byte) uint64 { 8 | length := len(data) 9 | 10 | var h1, h2, k1, k2 uint64 11 | 12 | const ( 13 | c1 = 0x87c37b91114253d5 14 | c2 = 0x4cf5ad432745937f 15 | ) 16 | 17 | // body 18 | nBlocks := length / 16 19 | for i := 0; i < nBlocks; i++ { 20 | // block := (*[2]uint64)(unsafe.Pointer(&data[i*16])) 21 | 22 | k1 = binary.LittleEndian.Uint64(data[i*16:]) 23 | k2 = binary.LittleEndian.Uint64(data[(i*16)+8:]) 24 | 25 | k1 *= c1 26 | k1 = (k1 << 31) | (k1 >> 33) // ROTL64(k1, 31) 27 | k1 *= c2 28 | h1 ^= k1 29 | 30 | h1 = (h1 << 27) | (h1 >> 37) // ROTL64(h1, 27) 31 | h1 += h2 32 | h1 = h1*5 + 0x52dce729 33 | 34 | k2 *= c2 35 | k2 = (k2 << 33) | (k2 >> 31) // ROTL64(k2, 33) 36 | k2 *= c1 37 | h2 ^= k2 38 | 39 | h2 = (h2 << 31) | (h2 >> 33) // ROTL64(h2, 31) 40 | h2 += h1 41 | h2 = h2*5 + 0x38495ab5 42 | } 43 | 44 | // tail 45 | tail := data[nBlocks*16:] 46 | k1 = 0 47 | k2 = 0 48 | switch length & 15 { 49 | case 15: 50 | k2 ^= uint64(tail[14]) << 48 51 | fallthrough 52 | case 14: 53 | k2 ^= uint64(tail[13]) << 40 54 | fallthrough 55 | case 13: 56 | k2 ^= uint64(tail[12]) << 32 57 | fallthrough 58 | case 12: 59 | k2 ^= uint64(tail[11]) << 24 60 | fallthrough 61 | case 11: 62 | k2 ^= uint64(tail[10]) << 16 63 | fallthrough 64 | case 10: 65 | k2 ^= uint64(tail[9]) << 8 66 | fallthrough 67 | case 9: 68 | k2 ^= uint64(tail[8]) 69 | 70 | k2 *= c2 71 | k2 = (k2 << 33) | (k2 >> 31) // ROTL64(k2, 33) 72 | k2 *= c1 73 | h2 ^= k2 74 | 75 | fallthrough 76 | case 8: 77 | k1 ^= uint64(tail[7]) << 56 78 | fallthrough 79 | case 7: 80 | k1 ^= uint64(tail[6]) << 48 81 | fallthrough 82 | case 6: 83 | k1 ^= uint64(tail[5]) << 40 84 | fallthrough 85 | case 5: 86 | k1 ^= uint64(tail[4]) << 32 87 | fallthrough 88 | case 4: 89 | k1 ^= uint64(tail[3]) << 24 90 | fallthrough 91 | case 3: 92 | k1 ^= uint64(tail[2]) << 16 93 | fallthrough 94 | case 2: 95 | k1 ^= uint64(tail[1]) << 8 96 | fallthrough 97 | case 1: 98 | k1 ^= uint64(tail[0]) 99 | 100 | k1 *= c1 101 | k1 = (k1 << 31) | (k1 >> 33) // ROTL64(k1, 31) 102 | k1 *= c2 103 | h1 ^= k1 104 | } 105 | 106 | h1 ^= uint64(length) 107 | h2 ^= uint64(length) 108 | 109 | h1 += h2 110 | h2 += h1 111 | 112 | // finalizer 113 | const ( 114 | fmix1 = 0xff51afd7ed558ccd 115 | fmix2 = 0xc4ceb9fe1a85ec53 116 | ) 117 | 118 | // fmix64(h1) 119 | h1 ^= h1 >> 33 120 | h1 *= fmix1 121 | h1 ^= h1 >> 33 122 | h1 *= fmix2 123 | h1 ^= h1 >> 33 124 | 125 | // fmix64(h2) 126 | h2 ^= h2 >> 33 127 | h2 *= fmix1 128 | h2 ^= h2 >> 33 129 | h2 *= fmix2 130 | h2 ^= h2 >> 33 131 | 132 | h1 += h2 133 | // the following is extraneous since h2 is discarded 134 | // h2 += h1 135 | 136 | return h1 137 | } 138 | -------------------------------------------------------------------------------- /frame_test.go: -------------------------------------------------------------------------------- 1 | package gocql 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | ) 7 | 8 | func TestFuzzBugs(t *testing.T) { 9 | // these inputs are found using go-fuzz (https://github.com/dvyukov/go-fuzz) 10 | // and should cause a panic unless fixed. 11 | tests := [][]byte{ 12 | []byte("00000\xa0000"), 13 | []byte("\x8000\x0e\x00\x00\x00\x000"), 14 | []byte("\x8000\x00\x00\x00\x00\t0000000000"), 15 | []byte("\xa0\xff\x01\xae\xefqE\xf2\x1a"), 16 | []byte("\x8200\b\x00\x00\x00c\x00\x00\x00\x02000\x01\x00\x00\x00\x03" + 17 | "\x00\n0000000000\x00\x14000000" + 18 | "00000000000000\x00\x020000" + 19 | "\x00\a000000000\x00\x050000000" + 20 | "\xff0000000000000000000" + 21 | "0000000"), 22 | []byte("\x82\xe600\x00\x00\x00\x000"), 23 | []byte("\x8200\b\x00\x00\x00\b0\x00\x00\x00\x040000"), 24 | []byte("\x8200\x00\x00\x00\x00\x100\x00\x00\x12\x00\x00\x0000000" + 25 | "00000"), 26 | []byte("\x83000\b\x00\x00\x00\x14\x00\x00\x00\x020000000" + 27 | "000000000"), 28 | []byte("\x83000\b\x00\x00\x000\x00\x00\x00\x04\x00\x1000000" + 29 | "00000000000000e00000" + 30 | "000\x800000000000000000" + 31 | "0000000000000"), 32 | } 33 | 34 | for i, test := range tests { 35 | t.Logf("test %d input: %q", i, test) 36 | 37 | var bw bytes.Buffer 38 | 39 | r := bytes.NewReader(test) 40 | head, err := readHeader(r, make([]byte, 9)) 41 | if err != nil { 42 | continue 43 | } 44 | 45 | framer := newFramer(r, &bw, nil, byte(head.version)) 46 | err = framer.readFrame(&head) 47 | if err != nil { 48 | continue 49 | } 50 | 51 | frame, err := framer.parseFrame() 52 | if err != nil { 53 | continue 54 | } 55 | 56 | t.Errorf("(%d) expected to fail for input % X", i, test) 57 | t.Errorf("(%d) frame=%+#v", i, frame) 58 | } 59 | } 60 | 61 | func TestFrameWriteTooLong(t *testing.T) { 62 | w := &bytes.Buffer{} 63 | framer := newFramer(nil, w, nil, 2) 64 | 65 | framer.writeHeader(0, opStartup, 1) 66 | framer.writeBytes(make([]byte, maxFrameSize+1)) 67 | err := framer.finishWrite() 68 | if err != ErrFrameTooBig { 69 | t.Fatalf("expected to get %v got %v", ErrFrameTooBig, err) 70 | } 71 | } 72 | 73 | func TestFrameReadTooLong(t *testing.T) { 74 | r := &bytes.Buffer{} 75 | r.Write(make([]byte, maxFrameSize+1)) 76 | // write a new header right after this frame to verify that we can read it 77 | r.Write([]byte{0x02, 0x00, 0x00, byte(opReady), 0x00, 0x00, 0x00, 0x00}) 78 | 79 | framer := newFramer(r, nil, nil, 2) 80 | 81 | head := frameHeader{ 82 | version: 2, 83 | op: opReady, 84 | length: r.Len() - 8, 85 | } 86 | 87 | err := framer.readFrame(&head) 88 | if err != ErrFrameTooBig { 89 | t.Fatalf("expected to get %v got %v", ErrFrameTooBig, err) 90 | } 91 | 92 | head, err = readHeader(r, make([]byte, 8)) 93 | if err != nil { 94 | t.Fatal(err) 95 | } 96 | if head.op != opReady { 97 | t.Fatalf("expected to get header %v got %v", opReady, head.op) 98 | } 99 | } 100 | 101 | func TestParseConsistencyErrorInsteadOfPanic(t *testing.T) { 102 | _, err := ParseConsistencyWrapper("TEST") 103 | if err == nil { 104 | t.Fatal("expected ParseConsistencyWrapper error got nil") 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /ring.go: -------------------------------------------------------------------------------- 1 | package gocql 2 | 3 | import ( 4 | "net" 5 | "sync" 6 | "sync/atomic" 7 | ) 8 | 9 | type ring struct { 10 | // endpoints are the set of endpoints which the driver will attempt to connect 11 | // to in the case it can not reach any of its hosts. They are also used to boot 12 | // strap the initial connection. 13 | endpoints []*HostInfo 14 | 15 | // hosts are the set of all hosts in the cassandra ring that we know of 16 | mu sync.RWMutex 17 | hosts map[string]*HostInfo 18 | 19 | hostList []*HostInfo 20 | pos uint32 21 | 22 | // TODO: we should store the ring metadata here also. 23 | } 24 | 25 | func (r *ring) rrHost() *HostInfo { 26 | // TODO: should we filter hosts that get used here? These hosts will be used 27 | // for the control connection, should we also provide an iterator? 28 | r.mu.RLock() 29 | defer r.mu.RUnlock() 30 | if len(r.hostList) == 0 { 31 | return nil 32 | } 33 | 34 | pos := int(atomic.AddUint32(&r.pos, 1) - 1) 35 | return r.hostList[pos%len(r.hostList)] 36 | } 37 | 38 | func (r *ring) getHost(ip net.IP) *HostInfo { 39 | r.mu.RLock() 40 | host := r.hosts[ip.String()] 41 | r.mu.RUnlock() 42 | return host 43 | } 44 | 45 | func (r *ring) allHosts() []*HostInfo { 46 | r.mu.RLock() 47 | hosts := make([]*HostInfo, 0, len(r.hosts)) 48 | for _, host := range r.hosts { 49 | hosts = append(hosts, host) 50 | } 51 | r.mu.RUnlock() 52 | return hosts 53 | } 54 | 55 | func (r *ring) addHost(host *HostInfo) bool { 56 | ip := host.Peer().String() 57 | 58 | r.mu.Lock() 59 | if r.hosts == nil { 60 | r.hosts = make(map[string]*HostInfo) 61 | } 62 | 63 | _, ok := r.hosts[ip] 64 | if !ok { 65 | r.hostList = append(r.hostList, host) 66 | } 67 | 68 | r.hosts[ip] = host 69 | r.mu.Unlock() 70 | return ok 71 | } 72 | 73 | func (r *ring) addOrUpdate(host *HostInfo) *HostInfo { 74 | if existingHost, ok := r.addHostIfMissing(host); ok { 75 | existingHost.update(host) 76 | host = existingHost 77 | } 78 | return host 79 | } 80 | 81 | func (r *ring) addHostIfMissing(host *HostInfo) (*HostInfo, bool) { 82 | ip := host.Peer().String() 83 | 84 | r.mu.Lock() 85 | if r.hosts == nil { 86 | r.hosts = make(map[string]*HostInfo) 87 | } 88 | 89 | existing, ok := r.hosts[ip] 90 | if !ok { 91 | r.hosts[ip] = host 92 | existing = host 93 | r.hostList = append(r.hostList, host) 94 | } 95 | r.mu.Unlock() 96 | return existing, ok 97 | } 98 | 99 | func (r *ring) removeHost(ip net.IP) bool { 100 | r.mu.Lock() 101 | if r.hosts == nil { 102 | r.hosts = make(map[string]*HostInfo) 103 | } 104 | 105 | k := ip.String() 106 | _, ok := r.hosts[k] 107 | if ok { 108 | for i, host := range r.hostList { 109 | if host.Peer().Equal(ip) { 110 | r.hostList = append(r.hostList[:i], r.hostList[i+1:]...) 111 | break 112 | } 113 | } 114 | } 115 | delete(r.hosts, k) 116 | r.mu.Unlock() 117 | return ok 118 | } 119 | 120 | type clusterMetadata struct { 121 | mu sync.RWMutex 122 | partitioner string 123 | } 124 | 125 | func (c *clusterMetadata) setPartitioner(partitioner string) { 126 | c.mu.RLock() 127 | defer c.mu.RUnlock() 128 | 129 | if c.partitioner != partitioner { 130 | // TODO: update other things now 131 | c.partitioner = partitioner 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | # This source file refers to The gocql Authors for copyright purposes. 2 | 3 | Christoph Hack 4 | Jonathan Rudenberg 5 | Thorsten von Eicken 6 | Matt Robenolt 7 | Phillip Couto 8 | Niklas Korz 9 | Nimi Wariboko Jr 10 | Ghais Issa 11 | Sasha Klizhentas 12 | Konstantin Cherkasov 13 | Ben Hood <0x6e6562@gmail.com> 14 | Pete Hopkins 15 | Chris Bannister 16 | Maxim Bublis 17 | Alex Zorin 18 | Kasper Middelboe Petersen 19 | Harpreet Sawhney 20 | Charlie Andrews 21 | Stanislavs Koikovs 22 | Dan Forest 23 | Miguel Serrano 24 | Stefan Radomski 25 | Josh Wright 26 | Jacob Rhoden 27 | Ben Frye 28 | Fred McCann 29 | Dan Simmons 30 | Muir Manders 31 | Sankar P 32 | Julien Da Silva 33 | Dan Kennedy 34 | Nick Dhupia 35 | Yasuharu Goto 36 | Jeremy Schlatter 37 | Matthias Kadenbach 38 | Dean Elbaz 39 | Mike Berman 40 | Dmitriy Fedorenko 41 | Zach Marcantel 42 | James Maloney 43 | Ashwin Purohit 44 | Dan Kinder 45 | Oliver Beattie 46 | Justin Corpron 47 | Miles Delahunty 48 | Zach Badgett 49 | Maciek Sakrejda 50 | Jeff Mitchell 51 | Baptiste Fontaine 52 | Matt Heath 53 | Jamie Cuthill 54 | Adrian Casajus 55 | John Weldon 56 | Adrien Bustany 57 | Andrey Smirnov 58 | Adam Weiner 59 | Daniel Cannon 60 | Johnny Bergström 61 | Adriano Orioli 62 | Claudiu Raveica 63 | Artem Chernyshev 64 | Ference Fu 65 | LOVOO 66 | nikandfor 67 | Anthony Woods 68 | Alexander Inozemtsev 69 | Rob McColl ; 70 | Viktor Tönköl 71 | Ian Lozinski 72 | Michael Highstead 73 | Sarah Brown 74 | Caleb Doxsey 75 | Frederic Hemery 76 | Pekka Enberg 77 | Mark M 78 | Bartosz Burclaf 79 | Marcus King 80 | Andrew de Andrade 81 | Robert Nix 82 | Nathan Youngman 83 | Charles Law ; 84 | Nathan Davies 85 | -------------------------------------------------------------------------------- /internal/lru/lru.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2015 To gocql authors 3 | Copyright 2013 Google Inc. 4 | 5 | Licensed under the Apache License, Version 2.0 (the "License"); 6 | you may not use this file except in compliance with the License. 7 | You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | */ 17 | 18 | // Package lru implements an LRU cache. 19 | package lru 20 | 21 | import "container/list" 22 | 23 | // Cache is an LRU cache. It is not safe for concurrent access. 24 | // 25 | // This cache has been forked from github.com/golang/groupcache/lru, but 26 | // specialized with string keys to avoid the allocations caused by wrapping them 27 | // in interface{}. 28 | type Cache struct { 29 | // MaxEntries is the maximum number of cache entries before 30 | // an item is evicted. Zero means no limit. 31 | MaxEntries int 32 | 33 | // OnEvicted optionally specificies a callback function to be 34 | // executed when an entry is purged from the cache. 35 | OnEvicted func(key string, value interface{}) 36 | 37 | ll *list.List 38 | cache map[string]*list.Element 39 | } 40 | 41 | type entry struct { 42 | key string 43 | value interface{} 44 | } 45 | 46 | // New creates a new Cache. 47 | // If maxEntries is zero, the cache has no limit and it's assumed 48 | // that eviction is done by the caller. 49 | func New(maxEntries int) *Cache { 50 | return &Cache{ 51 | MaxEntries: maxEntries, 52 | ll: list.New(), 53 | cache: make(map[string]*list.Element), 54 | } 55 | } 56 | 57 | // Add adds a value to the cache. 58 | func (c *Cache) Add(key string, value interface{}) { 59 | if c.cache == nil { 60 | c.cache = make(map[string]*list.Element) 61 | c.ll = list.New() 62 | } 63 | if ee, ok := c.cache[key]; ok { 64 | c.ll.MoveToFront(ee) 65 | ee.Value.(*entry).value = value 66 | return 67 | } 68 | ele := c.ll.PushFront(&entry{key, value}) 69 | c.cache[key] = ele 70 | if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { 71 | c.RemoveOldest() 72 | } 73 | } 74 | 75 | // Get looks up a key's value from the cache. 76 | func (c *Cache) Get(key string) (value interface{}, ok bool) { 77 | if c.cache == nil { 78 | return 79 | } 80 | if ele, hit := c.cache[key]; hit { 81 | c.ll.MoveToFront(ele) 82 | return ele.Value.(*entry).value, true 83 | } 84 | return 85 | } 86 | 87 | // Remove removes the provided key from the cache. 88 | func (c *Cache) Remove(key string) bool { 89 | if c.cache == nil { 90 | return false 91 | } 92 | 93 | if ele, hit := c.cache[key]; hit { 94 | c.removeElement(ele) 95 | return true 96 | } 97 | 98 | return false 99 | } 100 | 101 | // RemoveOldest removes the oldest item from the cache. 102 | func (c *Cache) RemoveOldest() { 103 | if c.cache == nil { 104 | return 105 | } 106 | ele := c.ll.Back() 107 | if ele != nil { 108 | c.removeElement(ele) 109 | } 110 | } 111 | 112 | func (c *Cache) removeElement(e *list.Element) { 113 | c.ll.Remove(e) 114 | kv := e.Value.(*entry) 115 | delete(c.cache, kv.key) 116 | if c.OnEvicted != nil { 117 | c.OnEvicted(kv.key, kv.value) 118 | } 119 | } 120 | 121 | // Len returns the number of items in the cache. 122 | func (c *Cache) Len() int { 123 | if c.cache == nil { 124 | return 0 125 | } 126 | return c.ll.Len() 127 | } 128 | -------------------------------------------------------------------------------- /internal/streams/streams.go: -------------------------------------------------------------------------------- 1 | package streams 2 | 3 | import ( 4 | "math" 5 | "strconv" 6 | "sync/atomic" 7 | ) 8 | 9 | const bucketBits = 64 10 | 11 | // IDGenerator tracks and allocates streams which are in use. 12 | type IDGenerator struct { 13 | NumStreams int 14 | inuseStreams int32 15 | numBuckets uint32 16 | 17 | // streams is a bitset where each bit represents a stream, a 1 implies in use 18 | streams []uint64 19 | offset uint32 20 | } 21 | 22 | func New(protocol int) *IDGenerator { 23 | maxStreams := 128 24 | if protocol > 2 { 25 | maxStreams = 32768 26 | } 27 | 28 | buckets := maxStreams / 64 29 | // reserve stream 0 30 | streams := make([]uint64, buckets) 31 | streams[0] = 1 << 63 32 | 33 | return &IDGenerator{ 34 | NumStreams: maxStreams, 35 | streams: streams, 36 | numBuckets: uint32(buckets), 37 | offset: uint32(buckets) - 1, 38 | } 39 | } 40 | 41 | func streamFromBucket(bucket, streamInBucket int) int { 42 | return (bucket * bucketBits) + streamInBucket 43 | } 44 | 45 | func (s *IDGenerator) GetStream() (int, bool) { 46 | // based closely on the java-driver stream ID generator 47 | // avoid false sharing subsequent requests. 48 | offset := atomic.LoadUint32(&s.offset) 49 | for !atomic.CompareAndSwapUint32(&s.offset, offset, (offset+1)%s.numBuckets) { 50 | offset = atomic.LoadUint32(&s.offset) 51 | } 52 | offset = (offset + 1) % s.numBuckets 53 | 54 | for i := uint32(0); i < s.numBuckets; i++ { 55 | pos := int((i + offset) % s.numBuckets) 56 | 57 | bucket := atomic.LoadUint64(&s.streams[pos]) 58 | if bucket == math.MaxUint64 { 59 | // all streams in use 60 | continue 61 | } 62 | 63 | for j := 0; j < bucketBits; j++ { 64 | mask := uint64(1 << streamOffset(j)) 65 | for bucket&mask == 0 { 66 | if atomic.CompareAndSwapUint64(&s.streams[pos], bucket, bucket|mask) { 67 | atomic.AddInt32(&s.inuseStreams, 1) 68 | return streamFromBucket(int(pos), j), true 69 | } 70 | bucket = atomic.LoadUint64(&s.streams[pos]) 71 | } 72 | } 73 | } 74 | 75 | return 0, false 76 | } 77 | 78 | func bitfmt(b uint64) string { 79 | return strconv.FormatUint(b, 16) 80 | } 81 | 82 | // returns the bucket offset of a given stream 83 | func bucketOffset(i int) int { 84 | return i / bucketBits 85 | } 86 | 87 | func streamOffset(stream int) uint64 { 88 | return bucketBits - uint64(stream%bucketBits) - 1 89 | } 90 | 91 | func isSet(bits uint64, stream int) bool { 92 | return bits>>streamOffset(stream)&1 == 1 93 | } 94 | 95 | func (s *IDGenerator) isSet(stream int) bool { 96 | bits := atomic.LoadUint64(&s.streams[bucketOffset(stream)]) 97 | return isSet(bits, stream) 98 | } 99 | 100 | func (s *IDGenerator) String() string { 101 | size := s.numBuckets * (bucketBits + 1) 102 | buf := make([]byte, 0, size) 103 | for i := 0; i < int(s.numBuckets); i++ { 104 | bits := atomic.LoadUint64(&s.streams[i]) 105 | buf = append(buf, bitfmt(bits)...) 106 | buf = append(buf, ' ') 107 | } 108 | return string(buf[:size-1 : size-1]) 109 | } 110 | 111 | func (s *IDGenerator) Clear(stream int) (inuse bool) { 112 | offset := bucketOffset(stream) 113 | bucket := atomic.LoadUint64(&s.streams[offset]) 114 | 115 | mask := uint64(1) << streamOffset(stream) 116 | if bucket&mask != mask { 117 | // already cleared 118 | return false 119 | } 120 | 121 | for !atomic.CompareAndSwapUint64(&s.streams[offset], bucket, bucket & ^mask) { 122 | bucket = atomic.LoadUint64(&s.streams[offset]) 123 | if bucket&mask != mask { 124 | // already cleared 125 | return false 126 | } 127 | } 128 | 129 | // TODO: make this account for 0 stream being reserved 130 | if atomic.AddInt32(&s.inuseStreams, -1) < 0 { 131 | // TODO(zariel): remove this 132 | panic("negative streams inuse") 133 | } 134 | 135 | return true 136 | } 137 | 138 | func (s *IDGenerator) Available() int { 139 | return s.NumStreams - int(atomic.LoadInt32(&s.inuseStreams)) - 1 140 | } 141 | -------------------------------------------------------------------------------- /internal/ccm/ccm.go: -------------------------------------------------------------------------------- 1 | // +build ccm 2 | 3 | package ccm 4 | 5 | import ( 6 | "bufio" 7 | "bytes" 8 | "errors" 9 | "fmt" 10 | "os/exec" 11 | "strings" 12 | ) 13 | 14 | func execCmd(args ...string) (*bytes.Buffer, error) { 15 | cmd := exec.Command("ccm", args...) 16 | stdout := &bytes.Buffer{} 17 | cmd.Stdout = stdout 18 | cmd.Stderr = &bytes.Buffer{} 19 | if err := cmd.Run(); err != nil { 20 | return nil, errors.New(cmd.Stderr.(*bytes.Buffer).String()) 21 | } 22 | 23 | return stdout, nil 24 | } 25 | 26 | func AllUp() error { 27 | status, err := Status() 28 | if err != nil { 29 | return err 30 | } 31 | 32 | for _, host := range status { 33 | if !host.State.IsUp() { 34 | if err := NodeUp(host.Name); err != nil { 35 | return err 36 | } 37 | } 38 | } 39 | 40 | return nil 41 | } 42 | 43 | func NodeUp(node string) error { 44 | _, err := execCmd(node, "start", "--wait-for-binary-proto", "--wait-other-notice") 45 | return err 46 | } 47 | 48 | func NodeDown(node string) error { 49 | _, err := execCmd(node, "stop") 50 | return err 51 | } 52 | 53 | type Host struct { 54 | State NodeState 55 | Addr string 56 | Name string 57 | } 58 | 59 | type NodeState int 60 | 61 | func (n NodeState) String() string { 62 | if n == NodeStateUp { 63 | return "UP" 64 | } else if n == NodeStateDown { 65 | return "DOWN" 66 | } else { 67 | return fmt.Sprintf("UNKNOWN_STATE_%d", n) 68 | } 69 | } 70 | 71 | func (n NodeState) IsUp() bool { 72 | return n == NodeStateUp 73 | } 74 | 75 | const ( 76 | NodeStateUp NodeState = iota 77 | NodeStateDown 78 | ) 79 | 80 | func Status() (map[string]Host, error) { 81 | // TODO: parse into struct o maniuplate 82 | out, err := execCmd("status", "-v") 83 | if err != nil { 84 | return nil, err 85 | } 86 | 87 | const ( 88 | stateCluster = iota 89 | stateCommas 90 | stateNode 91 | stateOption 92 | ) 93 | 94 | nodes := make(map[string]Host) 95 | // didnt really want to write a full state machine parser 96 | state := stateCluster 97 | sc := bufio.NewScanner(out) 98 | 99 | var host Host 100 | 101 | for sc.Scan() { 102 | switch state { 103 | case stateCluster: 104 | text := sc.Text() 105 | if !strings.HasPrefix(text, "Cluster:") { 106 | return nil, fmt.Errorf("expected 'Cluster:' got %q", text) 107 | } 108 | state = stateCommas 109 | case stateCommas: 110 | text := sc.Text() 111 | if !strings.HasPrefix(text, "-") { 112 | return nil, fmt.Errorf("expected commas got %q", text) 113 | } 114 | state = stateNode 115 | case stateNode: 116 | // assume nodes start with node 117 | text := sc.Text() 118 | if !strings.HasPrefix(text, "node") { 119 | return nil, fmt.Errorf("expected 'node' got %q", text) 120 | } 121 | line := strings.Split(text, ":") 122 | host.Name = line[0] 123 | 124 | nodeState := strings.TrimSpace(line[1]) 125 | switch nodeState { 126 | case "UP": 127 | host.State = NodeStateUp 128 | case "DOWN": 129 | host.State = NodeStateDown 130 | default: 131 | return nil, fmt.Errorf("unknown node state from ccm: %q", nodeState) 132 | } 133 | 134 | state = stateOption 135 | case stateOption: 136 | text := sc.Text() 137 | if text == "" { 138 | state = stateNode 139 | nodes[host.Name] = host 140 | host = Host{} 141 | continue 142 | } 143 | 144 | line := strings.Split(strings.TrimSpace(text), "=") 145 | k, v := line[0], line[1] 146 | if k == "binary" { 147 | // could check errors 148 | // ('127.0.0.1', 9042) 149 | v = v[2:] // ('' 150 | if i := strings.IndexByte(v, '\''); i < 0 { 151 | return nil, fmt.Errorf("invalid binary v=%q", v) 152 | } else { 153 | host.Addr = v[:i] 154 | // dont need port 155 | } 156 | } 157 | default: 158 | return nil, fmt.Errorf("unexpected state: %q", state) 159 | } 160 | } 161 | 162 | if err := sc.Err(); err != nil { 163 | return nil, fmt.Errorf("unable to parse ccm status: %v", err) 164 | } 165 | 166 | return nodes, nil 167 | } 168 | -------------------------------------------------------------------------------- /testdata/pki/gocql.crt: -------------------------------------------------------------------------------- 1 | Certificate: 2 | Data: 3 | Version: 3 (0x2) 4 | Serial Number: 1 (0x1) 5 | Signature Algorithm: sha256WithRSAEncryption 6 | Issuer: CN=cassandra 7 | Validity 8 | Not Before: Sep 19 21:18:33 2014 GMT 9 | Not After : Sep 16 21:18:33 2024 GMT 10 | Subject: CN=gocql 11 | Subject Public Key Info: 12 | Public Key Algorithm: rsaEncryption 13 | RSA Public Key: (2048 bit) 14 | Modulus (2048 bit): 15 | 00:ae:e9:fa:9e:fd:e2:69:85:1d:08:0f:35:68:bc: 16 | 63:7b:92:50:7f:73:50:fc:42:43:35:06:b3:5c:9e: 17 | 27:1e:16:05:69:ec:88:d5:9c:4f:ef:e8:13:69:7a: 18 | b5:b3:7f:66:6d:14:00:2e:d6:af:5b:ff:2c:90:91: 19 | a6:11:07:72:5e:b0:37:c0:6d:ff:7b:76:2b:fe:de: 20 | 4c:d2:8d:ce:43:3b:1a:c4:1d:de:b6:d8:26:08:25: 21 | 89:59:a1:4b:94:a3:57:9e:19:46:28:6e:97:11:7c: 22 | e6:b7:41:96:8f:42:dd:66:da:86:d2:53:dd:d8:f5: 23 | 20:cd:24:8b:0f:ab:df:c4:10:b2:64:20:1d:e0:0f: 24 | f4:2d:f6:ca:94:be:83:ac:3e:a8:4a:77:b6:08:97: 25 | 3a:7e:7b:e0:3e:ab:68:cf:ee:f6:a1:8e:bf:ec:be: 26 | 06:d1:ad:6c:ed:4f:35:d1:04:97:08:33:b1:65:5b: 27 | 61:32:8d:4b:f0:30:35:4b:8b:6b:06:f2:1a:72:8c: 28 | 69:bd:f3:b2:c4:a4:a4:70:45:e3:67:a2:7a:9f:2e: 29 | cb:28:2d:9f:68:03:f1:c7:d9:4f:83:c9:3d:8c:34: 30 | 04:0a:3b:13:87:92:e1:f7:e3:79:7e:ab:c0:25:b1: 31 | e5:38:09:44:3e:31:df:12:d4:dc:7b:0e:35:bf:ee: 32 | 25:5f 33 | Exponent: 65537 (0x10001) 34 | X509v3 extensions: 35 | X509v3 Basic Constraints: 36 | CA:FALSE 37 | X509v3 Subject Key Identifier: 38 | 9F:F1:B2:C4:82:34:D0:2F:FF:E9:7F:19:F1:3B:51:57:BF:E8:95:BB 39 | X509v3 Authority Key Identifier: 40 | keyid:8C:7C:E7:D2:76:05:89:71:1A:23:5B:D4:59:B4:51:E2:5C:0A:5C:E8 41 | DirName:/CN=cassandra 42 | serial:82:9B:01:78:1E:9B:0B:23 43 | 44 | X509v3 Extended Key Usage: 45 | TLS Web Client Authentication 46 | X509v3 Key Usage: 47 | Digital Signature 48 | Signature Algorithm: sha256WithRSAEncryption 49 | 12:aa:1b:a6:58:27:52:32:c9:46:19:32:d3:69:ae:95:ad:23: 50 | 55:ad:12:65:da:2c:4c:72:f3:29:bd:2b:5a:97:3b:b7:68:8b: 51 | 68:80:77:55:e6:32:81:f1:f5:20:54:ba:0e:2b:86:90:d8:44: 52 | cf:f2:9f:ec:4d:39:67:4e:36:6c:9b:49:4a:80:e6:c1:ed:a4: 53 | 41:39:19:16:d2:88:df:17:0c:46:5a:b9:88:53:f5:67:19:f0: 54 | 1f:9a:51:40:1b:40:12:bc:57:db:de:dd:d3:f5:a8:93:68:30: 55 | ac:ba:4e:ee:6b:af:f8:13:3d:11:1a:fa:90:93:d0:68:ce:77: 56 | 5f:85:8b:a4:95:2a:4c:25:7b:53:9c:44:43:b1:d9:fe:0c:83: 57 | b8:19:2a:88:cc:d8:d1:d9:b3:04:eb:45:9b:30:5e:cb:61:e0: 58 | e1:88:23:9c:b0:34:79:62:82:0d:f8:10:ed:96:bb:a0:fd:0d: 59 | 02:cb:c5:d3:47:1f:35:a7:e3:39:31:56:d5:b3:eb:2f:93:8f: 60 | 18:b4:b7:3c:00:03:a7:b4:1c:17:72:91:7e:b6:f6:36:17:3d: 61 | f6:54:3b:87:84:d1:9b:43:d1:88:42:64:20:7a:e3:cc:f7:05: 62 | 98:0e:1c:51:da:20:b7:9b:49:88:e8:c6:e1:de:0d:f5:56:4f: 63 | 79:41:d0:7f 64 | -----BEGIN CERTIFICATE----- 65 | MIIDNTCCAh2gAwIBAgIBATANBgkqhkiG9w0BAQsFADAUMRIwEAYDVQQDEwljYXNz 66 | YW5kcmEwHhcNMTQwOTE5MjExODMzWhcNMjQwOTE2MjExODMzWjAQMQ4wDAYDVQQD 67 | EwVnb2NxbDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK7p+p794mmF 68 | HQgPNWi8Y3uSUH9zUPxCQzUGs1yeJx4WBWnsiNWcT+/oE2l6tbN/Zm0UAC7Wr1v/ 69 | LJCRphEHcl6wN8Bt/3t2K/7eTNKNzkM7GsQd3rbYJggliVmhS5SjV54ZRihulxF8 70 | 5rdBlo9C3WbahtJT3dj1IM0kiw+r38QQsmQgHeAP9C32ypS+g6w+qEp3tgiXOn57 71 | 4D6raM/u9qGOv+y+BtGtbO1PNdEElwgzsWVbYTKNS/AwNUuLawbyGnKMab3zssSk 72 | pHBF42eiep8uyygtn2gD8cfZT4PJPYw0BAo7E4eS4ffjeX6rwCWx5TgJRD4x3xLU 73 | 3HsONb/uJV8CAwEAAaOBlTCBkjAJBgNVHRMEAjAAMB0GA1UdDgQWBBSf8bLEgjTQ 74 | L//pfxnxO1FXv+iVuzBEBgNVHSMEPTA7gBSMfOfSdgWJcRojW9RZtFHiXApc6KEY 75 | pBYwFDESMBAGA1UEAxMJY2Fzc2FuZHJhggkAgpsBeB6bCyMwEwYDVR0lBAwwCgYI 76 | KwYBBQUHAwIwCwYDVR0PBAQDAgeAMA0GCSqGSIb3DQEBCwUAA4IBAQASqhumWCdS 77 | MslGGTLTaa6VrSNVrRJl2ixMcvMpvStalzu3aItogHdV5jKB8fUgVLoOK4aQ2ETP 78 | 8p/sTTlnTjZsm0lKgObB7aRBORkW0ojfFwxGWrmIU/VnGfAfmlFAG0ASvFfb3t3T 79 | 9aiTaDCsuk7ua6/4Ez0RGvqQk9BozndfhYuklSpMJXtTnERDsdn+DIO4GSqIzNjR 80 | 2bME60WbMF7LYeDhiCOcsDR5YoIN+BDtlrug/Q0Cy8XTRx81p+M5MVbVs+svk48Y 81 | tLc8AAOntBwXcpF+tvY2Fz32VDuHhNGbQ9GIQmQgeuPM9wWYDhxR2iC3m0mI6Mbh 82 | 3g31Vk95QdB/ 83 | -----END CERTIFICATE----- 84 | -------------------------------------------------------------------------------- /testdata/pki/cassandra.crt: -------------------------------------------------------------------------------- 1 | Certificate: 2 | Data: 3 | Version: 3 (0x2) 4 | Serial Number: 2 (0x2) 5 | Signature Algorithm: sha256WithRSAEncryption 6 | Issuer: CN=cassandra 7 | Validity 8 | Not Before: Sep 19 21:18:48 2014 GMT 9 | Not After : Sep 16 21:18:48 2024 GMT 10 | Subject: CN=cassandra 11 | Subject Public Key Info: 12 | Public Key Algorithm: rsaEncryption 13 | RSA Public Key: (2048 bit) 14 | Modulus (2048 bit): 15 | 00:e5:9c:20:9e:de:98:73:44:41:0d:37:4c:62:c3: 16 | 9f:87:5f:9b:4f:aa:cf:f6:90:6e:a5:e0:89:88:7a: 17 | 00:c6:bb:d7:80:87:69:2e:fa:f0:35:59:80:6e:82: 18 | 25:c8:b3:6c:f6:a4:97:97:93:93:ea:f0:70:70:a4: 19 | e1:b7:aa:da:c1:99:66:9b:93:04:3a:ce:0b:83:07: 20 | 06:22:3d:a6:db:7f:68:0f:49:80:bd:86:a8:bb:54: 21 | 6d:38:5f:0f:b0:fa:1b:97:24:ae:cc:9d:37:98:7e: 22 | 76:cc:e3:1b:45:1b:21:25:17:02:c0:1a:c5:fb:76: 23 | c3:8b:93:d7:c5:85:14:0a:5c:a4:12:e7:18:69:98: 24 | f5:76:cd:78:cd:99:5a:29:65:f1:68:20:97:d3:be: 25 | 09:b3:68:1b:f2:a3:a2:9a:73:58:53:7e:ed:86:32: 26 | a3:5a:d5:46:03:f9:b3:b4:ec:63:71:ba:bb:fb:6f: 27 | f9:82:63:e4:55:47:7a:7a:e4:7b:17:6b:d7:e6:cf: 28 | 3b:c9:ab:0c:30:15:c9:ed:c7:d6:fc:b6:72:b2:14: 29 | 7d:c7:f3:7f:8a:f4:63:70:64:8e:0f:db:e8:3a:45: 30 | 47:cd:b9:7b:ae:c8:31:c1:52:d1:3e:34:12:b7:73: 31 | e7:ba:89:86:9a:36:ed:a0:5a:69:d0:d4:e3:b6:16: 32 | 85:af 33 | Exponent: 65537 (0x10001) 34 | X509v3 extensions: 35 | X509v3 Basic Constraints: 36 | CA:FALSE 37 | X509v3 Subject Key Identifier: 38 | 4A:D3:EC:63:07:E0:8F:1A:4E:F5:09:43:90:9F:7A:C5:31:D1:8F:D8 39 | X509v3 Authority Key Identifier: 40 | keyid:8C:7C:E7:D2:76:05:89:71:1A:23:5B:D4:59:B4:51:E2:5C:0A:5C:E8 41 | DirName:/CN=cassandra 42 | serial:82:9B:01:78:1E:9B:0B:23 43 | 44 | X509v3 Extended Key Usage: 45 | TLS Web Server Authentication 46 | X509v3 Key Usage: 47 | Digital Signature, Key Encipherment 48 | Signature Algorithm: sha256WithRSAEncryption 49 | ac:bc:80:82:2d:6d:f1:a0:46:eb:00:05:d2:25:9a:83:66:57: 50 | 40:51:6e:ff:db:e3:28:04:7b:16:63:74:ec:55:a0:c0:5b:47: 51 | 13:e1:5a:a5:6d:22:d0:e5:fe:c1:51:e8:f6:c6:9c:f9:be:b7: 52 | be:82:14:e4:a0:b2:0b:9f:ee:68:bc:ac:17:0d:13:50:c6:9e: 53 | 52:91:8c:a0:98:db:4e:2d:f6:3d:6e:85:0a:bb:b9:dd:01:bf: 54 | ad:52:dd:6e:e4:41:01:a5:93:58:dd:3f:cf:bf:15:e6:25:aa: 55 | a0:4f:98:0d:75:8a:3f:5b:ba:67:37:f6:b1:0b:3f:21:34:97: 56 | 50:9a:85:97:2b:b6:05:41:9a:f3:cf:c4:92:23:06:ab:3e:87: 57 | 98:30:eb:cb:d3:83:ab:04:7d:5c:b9:f0:12:d1:43:b3:c5:7d: 58 | 33:9a:2e:2b:80:3a:66:be:f1:8c:08:37:7a:93:9c:9b:60:60: 59 | 53:71:16:70:86:df:ca:5f:a9:0b:e2:8b:3d:af:02:62:3b:61: 60 | 30:da:53:89:e3:d8:0b:88:04:9a:93:6a:f6:28:f8:dd:0d:8f: 61 | 0c:82:5b:c0:e5:f8:0d:ad:06:76:a7:3b:4b:ae:54:37:25:15: 62 | f5:0c:67:0f:77:c5:c4:97:68:09:c3:02:a7:a0:46:10:1c:d1: 63 | 95:3a:4c:94 64 | -----BEGIN CERTIFICATE----- 65 | MIIDOTCCAiGgAwIBAgIBAjANBgkqhkiG9w0BAQsFADAUMRIwEAYDVQQDEwljYXNz 66 | YW5kcmEwHhcNMTQwOTE5MjExODQ4WhcNMjQwOTE2MjExODQ4WjAUMRIwEAYDVQQD 67 | EwljYXNzYW5kcmEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDlnCCe 68 | 3phzREENN0xiw5+HX5tPqs/2kG6l4ImIegDGu9eAh2ku+vA1WYBugiXIs2z2pJeX 69 | k5Pq8HBwpOG3qtrBmWabkwQ6zguDBwYiPabbf2gPSYC9hqi7VG04Xw+w+huXJK7M 70 | nTeYfnbM4xtFGyElFwLAGsX7dsOLk9fFhRQKXKQS5xhpmPV2zXjNmVopZfFoIJfT 71 | vgmzaBvyo6Kac1hTfu2GMqNa1UYD+bO07GNxurv7b/mCY+RVR3p65HsXa9fmzzvJ 72 | qwwwFcntx9b8tnKyFH3H83+K9GNwZI4P2+g6RUfNuXuuyDHBUtE+NBK3c+e6iYaa 73 | Nu2gWmnQ1OO2FoWvAgMBAAGjgZUwgZIwCQYDVR0TBAIwADAdBgNVHQ4EFgQUStPs 74 | YwfgjxpO9QlDkJ96xTHRj9gwRAYDVR0jBD0wO4AUjHzn0nYFiXEaI1vUWbRR4lwK 75 | XOihGKQWMBQxEjAQBgNVBAMTCWNhc3NhbmRyYYIJAIKbAXgemwsjMBMGA1UdJQQM 76 | MAoGCCsGAQUFBwMBMAsGA1UdDwQEAwIFoDANBgkqhkiG9w0BAQsFAAOCAQEArLyA 77 | gi1t8aBG6wAF0iWag2ZXQFFu/9vjKAR7FmN07FWgwFtHE+FapW0i0OX+wVHo9sac 78 | +b63voIU5KCyC5/uaLysFw0TUMaeUpGMoJjbTi32PW6FCru53QG/rVLdbuRBAaWT 79 | WN0/z78V5iWqoE+YDXWKP1u6Zzf2sQs/ITSXUJqFlyu2BUGa88/EkiMGqz6HmDDr 80 | y9ODqwR9XLnwEtFDs8V9M5ouK4A6Zr7xjAg3epOcm2BgU3EWcIbfyl+pC+KLPa8C 81 | YjthMNpTiePYC4gEmpNq9ij43Q2PDIJbwOX4Da0Gdqc7S65UNyUV9QxnD3fFxJdo 82 | CcMCp6BGEBzRlTpMlA== 83 | -----END CERTIFICATE----- 84 | -------------------------------------------------------------------------------- /common_test.go: -------------------------------------------------------------------------------- 1 | package gocql 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "log" 7 | "strings" 8 | "sync" 9 | "testing" 10 | "time" 11 | ) 12 | 13 | var ( 14 | flagCluster = flag.String("cluster", "127.0.0.1", "a comma-separated list of host:port tuples") 15 | flagProto = flag.Int("proto", 2, "protcol version") 16 | flagCQL = flag.String("cql", "3.0.0", "CQL version") 17 | flagRF = flag.Int("rf", 1, "replication factor for test keyspace") 18 | clusterSize = flag.Int("clusterSize", 1, "the expected size of the cluster") 19 | flagRetry = flag.Int("retries", 5, "number of times to retry queries") 20 | flagAutoWait = flag.Duration("autowait", 1000*time.Millisecond, "time to wait for autodiscovery to fill the hosts poll") 21 | flagRunSslTest = flag.Bool("runssl", false, "Set to true to run ssl test") 22 | flagRunAuthTest = flag.Bool("runauth", false, "Set to true to run authentication test") 23 | flagCompressTest = flag.String("compressor", "", "compressor to use") 24 | flagTimeout = flag.Duration("gocql.timeout", 5*time.Second, "sets the connection `timeout` for all operations") 25 | 26 | flagCassVersion cassVersion 27 | clusterHosts []string 28 | ) 29 | 30 | func init() { 31 | flag.Var(&flagCassVersion, "gocql.cversion", "the cassandra version being tested against") 32 | 33 | flag.Parse() 34 | clusterHosts = strings.Split(*flagCluster, ",") 35 | log.SetFlags(log.Lshortfile | log.LstdFlags) 36 | } 37 | 38 | func addSslOptions(cluster *ClusterConfig) *ClusterConfig { 39 | if *flagRunSslTest { 40 | cluster.SslOpts = &SslOptions{ 41 | CertPath: "testdata/pki/gocql.crt", 42 | KeyPath: "testdata/pki/gocql.key", 43 | CaPath: "testdata/pki/ca.crt", 44 | EnableHostVerification: false, 45 | } 46 | } 47 | return cluster 48 | } 49 | 50 | var initOnce sync.Once 51 | 52 | func createTable(s *Session, table string) error { 53 | // lets just be really sure 54 | if err := s.control.awaitSchemaAgreement(); err != nil { 55 | log.Printf("error waiting for schema agreement pre create table=%q err=%v\n", table, err) 56 | return err 57 | } 58 | 59 | if err := s.Query(table).RetryPolicy(nil).Exec(); err != nil { 60 | log.Printf("error creating table table=%q err=%v\n", table, err) 61 | return err 62 | } 63 | 64 | if err := s.control.awaitSchemaAgreement(); err != nil { 65 | log.Printf("error waiting for schema agreement post create table=%q err=%v\n", table, err) 66 | return err 67 | } 68 | 69 | return nil 70 | } 71 | 72 | func createCluster() *ClusterConfig { 73 | cluster := NewCluster(clusterHosts...) 74 | cluster.ProtoVersion = *flagProto 75 | cluster.CQLVersion = *flagCQL 76 | cluster.Timeout = *flagTimeout 77 | cluster.Consistency = Quorum 78 | cluster.MaxWaitSchemaAgreement = 2 * time.Minute // travis might be slow 79 | if *flagRetry > 0 { 80 | cluster.RetryPolicy = &SimpleRetryPolicy{NumRetries: *flagRetry} 81 | } 82 | 83 | switch *flagCompressTest { 84 | case "snappy": 85 | cluster.Compressor = &SnappyCompressor{} 86 | case "": 87 | default: 88 | panic("invalid compressor: " + *flagCompressTest) 89 | } 90 | 91 | cluster = addSslOptions(cluster) 92 | return cluster 93 | } 94 | 95 | func createKeyspace(tb testing.TB, cluster *ClusterConfig, keyspace string) { 96 | c := *cluster 97 | c.Keyspace = "system" 98 | c.Timeout = 30 * time.Second 99 | session, err := c.CreateSession() 100 | if err != nil { 101 | panic(err) 102 | } 103 | defer session.Close() 104 | defer log.Println("closing keyspace session") 105 | 106 | err = createTable(session, `DROP KEYSPACE IF EXISTS `+keyspace) 107 | if err != nil { 108 | panic(fmt.Sprintf("unable to drop keyspace: %v", err)) 109 | } 110 | 111 | err = createTable(session, fmt.Sprintf(`CREATE KEYSPACE %s 112 | WITH replication = { 113 | 'class' : 'SimpleStrategy', 114 | 'replication_factor' : %d 115 | }`, keyspace, *flagRF)) 116 | 117 | if err != nil { 118 | panic(fmt.Sprintf("unable to create keyspace: %v", err)) 119 | } 120 | } 121 | 122 | func createSessionFromCluster(cluster *ClusterConfig, tb testing.TB) *Session { 123 | // Drop and re-create the keyspace once. Different tests should use their own 124 | // individual tables, but can assume that the table does not exist before. 125 | initOnce.Do(func() { 126 | createKeyspace(tb, cluster, "gocql_test") 127 | }) 128 | 129 | cluster.Keyspace = "gocql_test" 130 | session, err := cluster.CreateSession() 131 | if err != nil { 132 | tb.Fatal("createSession:", err) 133 | } 134 | 135 | if err := session.control.awaitSchemaAgreement(); err != nil { 136 | tb.Fatal(err) 137 | } 138 | 139 | return session 140 | } 141 | 142 | func createSession(tb testing.TB) *Session { 143 | cluster := createCluster() 144 | return createSessionFromCluster(cluster, tb) 145 | } 146 | -------------------------------------------------------------------------------- /internal/streams/streams_test.go: -------------------------------------------------------------------------------- 1 | package streams 2 | 3 | import ( 4 | "math" 5 | "strconv" 6 | "sync/atomic" 7 | "testing" 8 | ) 9 | 10 | func TestUsesAllStreams(t *testing.T) { 11 | streams := New(1) 12 | 13 | got := make(map[int]struct{}) 14 | 15 | for i := 1; i < streams.NumStreams; i++ { 16 | stream, ok := streams.GetStream() 17 | if !ok { 18 | t.Fatalf("unable to get stream %d", i) 19 | } 20 | 21 | if _, ok = got[stream]; ok { 22 | t.Fatalf("got an already allocated stream: %d", stream) 23 | } 24 | got[stream] = struct{}{} 25 | 26 | if !streams.isSet(stream) { 27 | bucket := atomic.LoadUint64(&streams.streams[bucketOffset(stream)]) 28 | t.Logf("bucket=%d: %s\n", bucket, strconv.FormatUint(bucket, 2)) 29 | t.Fatalf("stream not set: %d", stream) 30 | } 31 | } 32 | 33 | for i := 1; i < streams.NumStreams; i++ { 34 | if _, ok := got[i]; !ok { 35 | t.Errorf("did not use stream %d", i) 36 | } 37 | } 38 | if _, ok := got[0]; ok { 39 | t.Fatal("expected to not use stream 0") 40 | } 41 | 42 | for i, bucket := range streams.streams { 43 | if bucket != math.MaxUint64 { 44 | t.Errorf("did not use all streams in offset=%d bucket=%s", i, bitfmt(bucket)) 45 | } 46 | } 47 | } 48 | 49 | func TestFullStreams(t *testing.T) { 50 | streams := New(1) 51 | for i := range streams.streams { 52 | streams.streams[i] = math.MaxUint64 53 | } 54 | 55 | stream, ok := streams.GetStream() 56 | if ok { 57 | t.Fatalf("should not get stream when all in use: stream=%d", stream) 58 | } 59 | } 60 | 61 | func TestClearStreams(t *testing.T) { 62 | streams := New(1) 63 | for i := range streams.streams { 64 | streams.streams[i] = math.MaxUint64 65 | } 66 | streams.inuseStreams = int32(streams.NumStreams) 67 | 68 | for i := 0; i < streams.NumStreams; i++ { 69 | streams.Clear(i) 70 | } 71 | 72 | for i, bucket := range streams.streams { 73 | if bucket != 0 { 74 | t.Errorf("did not clear streams in offset=%d bucket=%s", i, bitfmt(bucket)) 75 | } 76 | } 77 | } 78 | 79 | func TestDoubleClear(t *testing.T) { 80 | streams := New(1) 81 | stream, ok := streams.GetStream() 82 | if !ok { 83 | t.Fatal("did not get stream") 84 | } 85 | 86 | if !streams.Clear(stream) { 87 | t.Fatalf("stream not indicated as in use: %d", stream) 88 | } 89 | if streams.Clear(stream) { 90 | t.Fatalf("stream not as in use after clear: %d", stream) 91 | } 92 | } 93 | 94 | func BenchmarkConcurrentUse(b *testing.B) { 95 | streams := New(2) 96 | 97 | b.RunParallel(func(pb *testing.PB) { 98 | for pb.Next() { 99 | stream, ok := streams.GetStream() 100 | if !ok { 101 | b.Error("unable to get stream") 102 | return 103 | } 104 | 105 | if !streams.Clear(stream) { 106 | b.Errorf("stream was already cleared: %d", stream) 107 | return 108 | } 109 | } 110 | }) 111 | } 112 | 113 | func TestStreamOffset(t *testing.T) { 114 | tests := [...]struct { 115 | n int 116 | off uint64 117 | }{ 118 | {0, 63}, 119 | {1, 62}, 120 | {2, 61}, 121 | {3, 60}, 122 | {63, 0}, 123 | {64, 63}, 124 | 125 | {128, 63}, 126 | } 127 | 128 | for _, test := range tests { 129 | if off := streamOffset(test.n); off != test.off { 130 | t.Errorf("n=%d expected %d got %d", test.n, off, test.off) 131 | } 132 | } 133 | } 134 | 135 | func TestIsSet(t *testing.T) { 136 | tests := [...]struct { 137 | stream int 138 | bucket uint64 139 | set bool 140 | }{ 141 | {0, 0, false}, 142 | {0, 1 << 63, true}, 143 | {1, 0, false}, 144 | {1, 1 << 62, true}, 145 | {63, 1, true}, 146 | {64, 1 << 63, true}, 147 | {0, 0x8000000000000000, true}, 148 | } 149 | 150 | for i, test := range tests { 151 | if set := isSet(test.bucket, test.stream); set != test.set { 152 | t.Errorf("[%d] stream=%d expected %v got %v", i, test.stream, test.set, set) 153 | } 154 | } 155 | 156 | for i := 0; i < bucketBits; i++ { 157 | if !isSet(math.MaxUint64, i) { 158 | var shift uint64 = math.MaxUint64 >> streamOffset(i) 159 | t.Errorf("expected isSet for all i=%d got=%d", i, shift) 160 | } 161 | } 162 | } 163 | 164 | func TestBucketOfset(t *testing.T) { 165 | tests := [...]struct { 166 | n int 167 | bucket int 168 | }{ 169 | {0, 0}, 170 | {1, 0}, 171 | {63, 0}, 172 | {64, 1}, 173 | } 174 | 175 | for _, test := range tests { 176 | if bucket := bucketOffset(test.n); bucket != test.bucket { 177 | t.Errorf("n=%d expected %v got %v", test.n, test.bucket, bucket) 178 | } 179 | } 180 | } 181 | 182 | func TestStreamFromBucket(t *testing.T) { 183 | tests := [...]struct { 184 | bucket int 185 | pos int 186 | stream int 187 | }{ 188 | {0, 0, 0}, 189 | {0, 1, 1}, 190 | {0, 2, 2}, 191 | {0, 63, 63}, 192 | {1, 0, 64}, 193 | {1, 1, 65}, 194 | } 195 | 196 | for _, test := range tests { 197 | if stream := streamFromBucket(test.bucket, test.pos); stream != test.stream { 198 | t.Errorf("bucket=%d pos=%d expected %v got %v", test.bucket, test.pos, test.stream, stream) 199 | } 200 | } 201 | } 202 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to gocql 2 | 3 | **TL;DR** - this manifesto sets out the bare minimum requirements for submitting a patch to gocql. 4 | 5 | This guide outlines the process of landing patches in gocql and the general approach to maintaining the code base. 6 | 7 | ## Background 8 | 9 | The goal of the gocql project is to provide a stable and robust CQL driver for Go. gocql is a community driven project that is coordinated by a small team of core developers. 10 | 11 | ## Minimum Requirement Checklist 12 | 13 | The following is a check list of requirements that need to be satisfied in order for us to merge your patch: 14 | 15 | * You should raise a pull request to gocql/gocql on Github 16 | * The pull request has a title that clearly summarizes the purpose of the patch 17 | * The motivation behind the patch is clearly defined in the pull request summary 18 | * Your name and email have been added to the `AUTHORS` file (for copyright purposes) 19 | * The patch will merge cleanly 20 | * The test coverage does not fall below the critical threshold (currently 64%) 21 | * The merge commit passes the regression test suite on Travis 22 | * `go fmt` has been applied to the submitted code 23 | * Functional changes (i.e. new features or changed behavior) are appropriately documented, either as a godoc or in the README (non-functional changes such as bug fixes may not require documentation) 24 | 25 | If there are any requirements that can't be reasonably satisfied, please state this either on the pull request or as part of discussion on the mailing list. Where appropriate, the core team may apply discretion and make an exception to these requirements. 26 | 27 | ## Beyond The Checklist 28 | 29 | In addition to stating the hard requirements, there are a bunch of things that we consider when assessing changes to the library. These soft requirements are helpful pointers of how to get a patch landed quicker and with less fuss. 30 | 31 | ### General QA Approach 32 | 33 | The gocql team needs to consider the ongoing maintainability of the library at all times. Patches that look like they will introduce maintenance issues for the team will not be accepted. 34 | 35 | Your patch will get merged quicker if you have decent test cases that provide test coverage for the new behavior you wish to introduce. 36 | 37 | Unit tests are good, integration tests are even better. An example of a unit test is `marshal_test.go` - this tests the serialization code in isolation. `cassandra_test.go` is an integration test suite that is executed against every version of Cassandra that gocql supports as part of the CI process on Travis. 38 | 39 | That said, the point of writing tests is to provide a safety net to catch regressions, so there is no need to go overboard with tests. Remember that the more tests you write, the more code we will have to maintain. So there's a balance to strike there. 40 | 41 | ### When It's Too Difficult To Automate Testing 42 | 43 | There are legitimate examples of where it is infeasible to write a regression test for a change. Never fear, we will still consider the patch and quite possibly accept the change without a test. The gocql team takes a pragmatic approach to testing. At the end of the day, you could be addressing an issue that is too difficult to reproduce in a test suite, but still occurs in a real production app. In this case, your production app is the test case, and we will have to trust that your change is good. 44 | 45 | Examples of pull requests that have been accepted without tests include: 46 | 47 | * https://github.com/gocql/gocql/pull/181 - this patch would otherwise require a multi-node cluster to be booted as part of the CI build 48 | * https://github.com/gocql/gocql/pull/179 - this bug can only be reproduced under heavy load in certain circumstances 49 | 50 | ### Sign Off Procedure 51 | 52 | Generally speaking, a pull request can get merged by any one of the core gocql team. If your change is minor, chances are that one team member will just go ahead and merge it there and then. As stated earlier, suitable test coverage will increase the likelihood that a single reviewer will assess and merge your change. If your change has no test coverage, or looks like it may have wider implications for the health and stability of the library, the reviewer may elect to refer the change to another team member to achieve consensus before proceeding. Therefore, the tighter and cleaner your patch is, the quicker it will go through the review process. 53 | 54 | ### Supported Features 55 | 56 | gocql is a low level wire driver for Cassandra CQL. By and large, we would like to keep the functional scope of the library as narrow as possible. We think that gocql should be tight and focused, and we will be naturally skeptical of things that could just as easily be implemented in a higher layer. Inevitably you will come across something that could be implemented in a higher layer, save for a minor change to the core API. In this instance, please strike up a conversation with the gocql team. Chances are we will understand what you are trying to achieve and will try to accommodate this in a maintainable way. 57 | 58 | ### Longer Term Evolution 59 | 60 | There are some long term plans for gocql that have to be taken into account when assessing changes. That said, gocql is ultimately a community driven project and we don't have a massive development budget, so sometimes the long term view might need to be de-prioritized ahead of short term changes. 61 | 62 | ## Officially Supported Server Versions 63 | 64 | Currently, the officially supported versions of the Cassandra server include: 65 | 66 | * 1.2.18 67 | * 2.0.9 68 | 69 | Chances are that gocql will work with many other versions. If you would like us to support a particular version of Cassandra, please start a conversation about what version you'd like us to consider. We are more likely to accept a new version if you help out by extending the regression suite to cover the new version to be supported. 70 | 71 | ## The Core Dev Team 72 | 73 | The core development team includes: 74 | 75 | * tux21b 76 | * phillipCouto 77 | * Zariel 78 | * 0x6e6562 79 | -------------------------------------------------------------------------------- /token.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015 The gocql Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package gocql 6 | 7 | import ( 8 | "bytes" 9 | "crypto/md5" 10 | "fmt" 11 | "math/big" 12 | "sort" 13 | "strconv" 14 | "strings" 15 | 16 | "github.com/gocql/gocql/internal/murmur" 17 | ) 18 | 19 | // a token partitioner 20 | type partitioner interface { 21 | Name() string 22 | Hash([]byte) token 23 | ParseString(string) token 24 | } 25 | 26 | // a token 27 | type token interface { 28 | fmt.Stringer 29 | Less(token) bool 30 | } 31 | 32 | // murmur3 partitioner and token 33 | type murmur3Partitioner struct{} 34 | type murmur3Token int64 35 | 36 | func (p murmur3Partitioner) Name() string { 37 | return "Murmur3Partitioner" 38 | } 39 | 40 | func (p murmur3Partitioner) Hash(partitionKey []byte) token { 41 | h1 := murmur.Murmur3H1(partitionKey) 42 | return murmur3Token(int64(h1)) 43 | } 44 | 45 | // murmur3 little-endian, 128-bit hash, but returns only h1 46 | func (p murmur3Partitioner) ParseString(str string) token { 47 | val, _ := strconv.ParseInt(str, 10, 64) 48 | return murmur3Token(val) 49 | } 50 | 51 | func (m murmur3Token) String() string { 52 | return strconv.FormatInt(int64(m), 10) 53 | } 54 | 55 | func (m murmur3Token) Less(token token) bool { 56 | return m < token.(murmur3Token) 57 | } 58 | 59 | // order preserving partitioner and token 60 | type orderedPartitioner struct{} 61 | type orderedToken []byte 62 | 63 | func (p orderedPartitioner) Name() string { 64 | return "OrderedPartitioner" 65 | } 66 | 67 | func (p orderedPartitioner) Hash(partitionKey []byte) token { 68 | // the partition key is the token 69 | return orderedToken(partitionKey) 70 | } 71 | 72 | func (p orderedPartitioner) ParseString(str string) token { 73 | return orderedToken([]byte(str)) 74 | } 75 | 76 | func (o orderedToken) String() string { 77 | return string([]byte(o)) 78 | } 79 | 80 | func (o orderedToken) Less(token token) bool { 81 | return -1 == bytes.Compare(o, token.(orderedToken)) 82 | } 83 | 84 | // random partitioner and token 85 | type randomPartitioner struct{} 86 | type randomToken big.Int 87 | 88 | func (r randomPartitioner) Name() string { 89 | return "RandomPartitioner" 90 | } 91 | 92 | // 2 ** 128 93 | var maxHashInt, _ = new(big.Int).SetString("340282366920938463463374607431768211456", 10) 94 | 95 | func (p randomPartitioner) Hash(partitionKey []byte) token { 96 | sum := md5.Sum(partitionKey) 97 | val := new(big.Int) 98 | val.SetBytes(sum[:]) 99 | if sum[0] > 127 { 100 | val.Sub(val, maxHashInt) 101 | val.Abs(val) 102 | } 103 | 104 | return (*randomToken)(val) 105 | } 106 | 107 | func (p randomPartitioner) ParseString(str string) token { 108 | val := new(big.Int) 109 | val.SetString(str, 10) 110 | return (*randomToken)(val) 111 | } 112 | 113 | func (r *randomToken) String() string { 114 | return (*big.Int)(r).String() 115 | } 116 | 117 | func (r *randomToken) Less(token token) bool { 118 | return -1 == (*big.Int)(r).Cmp((*big.Int)(token.(*randomToken))) 119 | } 120 | 121 | // a data structure for organizing the relationship between tokens and hosts 122 | type tokenRing struct { 123 | partitioner partitioner 124 | tokens []token 125 | hosts []*HostInfo 126 | } 127 | 128 | func newTokenRing(partitioner string, hosts []*HostInfo) (*tokenRing, error) { 129 | tokenRing := &tokenRing{ 130 | tokens: []token{}, 131 | hosts: []*HostInfo{}, 132 | } 133 | 134 | if strings.HasSuffix(partitioner, "Murmur3Partitioner") { 135 | tokenRing.partitioner = murmur3Partitioner{} 136 | } else if strings.HasSuffix(partitioner, "OrderedPartitioner") { 137 | tokenRing.partitioner = orderedPartitioner{} 138 | } else if strings.HasSuffix(partitioner, "RandomPartitioner") { 139 | tokenRing.partitioner = randomPartitioner{} 140 | } else { 141 | return nil, fmt.Errorf("Unsupported partitioner '%s'", partitioner) 142 | } 143 | 144 | for _, host := range hosts { 145 | for _, strToken := range host.Tokens() { 146 | token := tokenRing.partitioner.ParseString(strToken) 147 | tokenRing.tokens = append(tokenRing.tokens, token) 148 | tokenRing.hosts = append(tokenRing.hosts, host) 149 | } 150 | } 151 | 152 | sort.Sort(tokenRing) 153 | 154 | return tokenRing, nil 155 | } 156 | 157 | func (t *tokenRing) Len() int { 158 | return len(t.tokens) 159 | } 160 | 161 | func (t *tokenRing) Less(i, j int) bool { 162 | return t.tokens[i].Less(t.tokens[j]) 163 | } 164 | 165 | func (t *tokenRing) Swap(i, j int) { 166 | t.tokens[i], t.hosts[i], t.tokens[j], t.hosts[j] = 167 | t.tokens[j], t.hosts[j], t.tokens[i], t.hosts[i] 168 | } 169 | 170 | func (t *tokenRing) String() string { 171 | 172 | buf := &bytes.Buffer{} 173 | buf.WriteString("TokenRing(") 174 | if t.partitioner != nil { 175 | buf.WriteString(t.partitioner.Name()) 176 | } 177 | buf.WriteString("){") 178 | sep := "" 179 | for i := range t.tokens { 180 | buf.WriteString(sep) 181 | sep = "," 182 | buf.WriteString("\n\t[") 183 | buf.WriteString(strconv.Itoa(i)) 184 | buf.WriteString("]") 185 | buf.WriteString(t.tokens[i].String()) 186 | buf.WriteString(":") 187 | buf.WriteString(t.hosts[i].Peer().String()) 188 | } 189 | buf.WriteString("\n}") 190 | return string(buf.Bytes()) 191 | } 192 | 193 | func (t *tokenRing) GetHostForPartitionKey(partitionKey []byte) *HostInfo { 194 | if t == nil { 195 | return nil 196 | } 197 | 198 | token := t.partitioner.Hash(partitionKey) 199 | return t.GetHostForToken(token) 200 | } 201 | 202 | func (t *tokenRing) GetHostForToken(token token) *HostInfo { 203 | if t == nil { 204 | return nil 205 | } 206 | 207 | // find the primary replica 208 | ringIndex := sort.Search( 209 | len(t.tokens), 210 | func(i int) bool { 211 | return !t.tokens[i].Less(token) 212 | }, 213 | ) 214 | if ringIndex == len(t.tokens) { 215 | // wrap around to the first in the ring 216 | ringIndex = 0 217 | } 218 | host := t.hosts[ringIndex] 219 | return host 220 | } 221 | -------------------------------------------------------------------------------- /uuid_test.go: -------------------------------------------------------------------------------- 1 | // +build all unit 2 | 3 | package gocql 4 | 5 | import ( 6 | "bytes" 7 | "strings" 8 | "testing" 9 | "time" 10 | ) 11 | 12 | func TestUUIDNil(t *testing.T) { 13 | var uuid UUID 14 | want, got := "00000000-0000-0000-0000-000000000000", uuid.String() 15 | if want != got { 16 | t.Fatalf("TestNil: expected %q got %q", want, got) 17 | } 18 | } 19 | 20 | var testsUUID = []struct { 21 | input string 22 | variant int 23 | version int 24 | }{ 25 | {"b4f00409-cef8-4822-802c-deb20704c365", VariantIETF, 4}, 26 | {"B4F00409-CEF8-4822-802C-DEB20704C365", VariantIETF, 4}, //Use capital letters 27 | {"f81d4fae-7dec-11d0-a765-00a0c91e6bf6", VariantIETF, 1}, 28 | {"00000000-7dec-11d0-a765-00a0c91e6bf6", VariantIETF, 1}, 29 | {"3051a8d7-aea7-1801-e0bf-bc539dd60cf3", VariantFuture, 1}, 30 | {"3051a8d7-aea7-2801-e0bf-bc539dd60cf3", VariantFuture, 2}, 31 | {"3051a8d7-aea7-3801-e0bf-bc539dd60cf3", VariantFuture, 3}, 32 | {"3051a8d7-aea7-4801-e0bf-bc539dd60cf3", VariantFuture, 4}, 33 | {"3051a8d7-aea7-3801-e0bf-bc539dd60cf3", VariantFuture, 5}, 34 | {"d0e817e1-e4b1-1801-3fe6-b4b60ccecf9d", VariantNCSCompat, 0}, 35 | {"d0e817e1-e4b1-1801-bfe6-b4b60ccecf9d", VariantIETF, 1}, 36 | {"d0e817e1-e4b1-1801-dfe6-b4b60ccecf9d", VariantMicrosoft, 0}, 37 | {"d0e817e1-e4b1-1801-ffe6-b4b60ccecf9d", VariantFuture, 0}, 38 | } 39 | 40 | func TestPredefinedUUID(t *testing.T) { 41 | for i := range testsUUID { 42 | uuid, err := ParseUUID(testsUUID[i].input) 43 | if err != nil { 44 | t.Errorf("ParseUUID #%d: %v", i, err) 45 | continue 46 | } 47 | 48 | if str := uuid.String(); str != strings.ToLower(testsUUID[i].input) { 49 | t.Errorf("String #%d: expected %q got %q", i, testsUUID[i].input, str) 50 | continue 51 | } 52 | 53 | if variant := uuid.Variant(); variant != testsUUID[i].variant { 54 | t.Errorf("Variant #%d: expected %d got %d", i, testsUUID[i].variant, variant) 55 | } 56 | 57 | if testsUUID[i].variant == VariantIETF { 58 | if version := uuid.Version(); version != testsUUID[i].version { 59 | t.Errorf("Version #%d: expected %d got %d", i, testsUUID[i].version, version) 60 | } 61 | } 62 | 63 | json, err := uuid.MarshalJSON() 64 | if err != nil { 65 | t.Errorf("MarshalJSON #%d: %v", i, err) 66 | } 67 | expectedJson := `"` + strings.ToLower(testsUUID[i].input) + `"` 68 | if string(json) != expectedJson { 69 | t.Errorf("MarshalJSON #%d: expected %v got %v", i, expectedJson, string(json)) 70 | } 71 | 72 | var unmarshaled UUID 73 | err = unmarshaled.UnmarshalJSON(json) 74 | if err != nil { 75 | t.Errorf("UnmarshalJSON #%d: %v", i, err) 76 | } 77 | if unmarshaled != uuid { 78 | t.Errorf("UnmarshalJSON #%d: expected %v got %v", i, uuid, unmarshaled) 79 | } 80 | } 81 | } 82 | 83 | func TestInvalidUUIDCharacter(t *testing.T) { 84 | _, err := ParseUUID("z4f00409-cef8-4822-802c-deb20704c365") 85 | if err == nil || !strings.Contains(err.Error(), "invalid UUID") { 86 | t.Fatalf("expected invalid UUID error, got '%v' ", err) 87 | } 88 | } 89 | 90 | func TestInvalidUUIDLength(t *testing.T) { 91 | _, err := ParseUUID("4f00") 92 | if err == nil || !strings.Contains(err.Error(), "invalid UUID") { 93 | t.Fatalf("expected invalid UUID error, got '%v' ", err) 94 | } 95 | 96 | _, err = UUIDFromBytes(TimeUUID().Bytes()[:15]) 97 | if err == nil || err.Error() != "UUIDs must be exactly 16 bytes long" { 98 | t.Fatalf("expected error '%v', got '%v'", "UUIDs must be exactly 16 bytes long", err) 99 | } 100 | } 101 | 102 | func TestRandomUUID(t *testing.T) { 103 | for i := 0; i < 20; i++ { 104 | uuid, err := RandomUUID() 105 | if err != nil { 106 | t.Errorf("RandomUUID: %v", err) 107 | } 108 | if variant := uuid.Variant(); variant != VariantIETF { 109 | t.Errorf("wrong variant. expected %d got %d", VariantIETF, variant) 110 | } 111 | if version := uuid.Version(); version != 4 { 112 | t.Errorf("wrong version. expected %d got %d", 4, version) 113 | } 114 | } 115 | } 116 | 117 | func TestRandomUUIDInvalidAPICalls(t *testing.T) { 118 | uuid, err := RandomUUID() 119 | if err != nil { 120 | t.Fatalf("unexpected error %v", err) 121 | } 122 | 123 | if node := uuid.Node(); node != nil { 124 | t.Fatalf("expected nil, got %v", node) 125 | } 126 | 127 | if stamp := uuid.Timestamp(); stamp != 0 { 128 | t.Fatalf("expceted 0, got %v", stamp) 129 | } 130 | zeroT := time.Time{} 131 | if to := uuid.Time(); to != zeroT { 132 | t.Fatalf("expected %v, got %v", zeroT, to) 133 | } 134 | } 135 | 136 | func TestUUIDFromTime(t *testing.T) { 137 | date := time.Date(1982, 5, 5, 12, 34, 56, 400, time.UTC) 138 | uuid := UUIDFromTime(date) 139 | 140 | if uuid.Time() != date { 141 | t.Errorf("embedded time incorrect. Expected %v got %v", date, uuid.Time()) 142 | } 143 | } 144 | 145 | func TestParseUUID(t *testing.T) { 146 | uuid, _ := ParseUUID("486f3a88-775b-11e3-ae07-d231feb1dc81") 147 | if uuid.Time() != time.Date(2014, 1, 7, 5, 19, 29, 222516000, time.UTC) { 148 | t.Errorf("Expected date of 1/7/2014 at 5:19:29.222516, got %v", uuid.Time()) 149 | } 150 | } 151 | 152 | func TestTimeUUID(t *testing.T) { 153 | var node []byte 154 | timestamp := int64(0) 155 | for i := 0; i < 20; i++ { 156 | uuid := TimeUUID() 157 | 158 | if variant := uuid.Variant(); variant != VariantIETF { 159 | t.Errorf("wrong variant. expected %d got %d", VariantIETF, variant) 160 | } 161 | if version := uuid.Version(); version != 1 { 162 | t.Errorf("wrong version. expected %d got %d", 1, version) 163 | } 164 | 165 | if n := uuid.Node(); !bytes.Equal(n, node) && i > 0 { 166 | t.Errorf("wrong node. expected %x, got %x", node, n) 167 | } else if i == 0 { 168 | node = n 169 | } 170 | 171 | ts := uuid.Timestamp() 172 | if ts < timestamp { 173 | t.Errorf("timestamps must grow: timestamp=%v ts=%v", timestamp, ts) 174 | } 175 | timestamp = ts 176 | } 177 | } 178 | 179 | func TestUnmarshalJSON(t *testing.T) { 180 | var withHyphens, withoutHypens, tooLong UUID 181 | 182 | withHyphens.UnmarshalJSON([]byte(`"486f3a88-775b-11e3-ae07-d231feb1dc81"`)) 183 | if withHyphens.Time().Truncate(time.Second) != time.Date(2014, 1, 7, 5, 19, 29, 0, time.UTC) { 184 | t.Errorf("Expected date of 1/7/2014 at 5:19:29, got %v", withHyphens.Time()) 185 | } 186 | 187 | withoutHypens.UnmarshalJSON([]byte(`"486f3a88775b11e3ae07d231feb1dc81"`)) 188 | if withoutHypens.Time().Truncate(time.Second) != time.Date(2014, 1, 7, 5, 19, 29, 0, time.UTC) { 189 | t.Errorf("Expected date of 1/7/2014 at 5:19:29, got %v", withoutHypens.Time()) 190 | } 191 | 192 | err := tooLong.UnmarshalJSON([]byte(`"486f3a88-775b-11e3-ae07-d231feb1dc81486f3a88"`)) 193 | if err == nil { 194 | t.Errorf("no error for invalid JSON UUID") 195 | } 196 | 197 | } 198 | 199 | func TestMarshalText(t *testing.T) { 200 | u, err := ParseUUID("486f3a88-775b-11e3-ae07-d231feb1dc81") 201 | if err != nil { 202 | t.Fatal(err) 203 | } 204 | 205 | text, err := u.MarshalText() 206 | if err != nil { 207 | t.Fatal(err) 208 | } 209 | 210 | var u2 UUID 211 | if err := u2.UnmarshalText(text); err != nil { 212 | t.Fatal(err) 213 | } 214 | 215 | if u != u2 { 216 | t.Fatalf("uuids not equal after marshalling: before=%s after=%s", u, u2) 217 | } 218 | } 219 | -------------------------------------------------------------------------------- /uuid.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2012 The gocql Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | // The uuid package can be used to generate and parse universally unique 6 | // identifiers, a standardized format in the form of a 128 bit number. 7 | // 8 | // http://tools.ietf.org/html/rfc4122 9 | package gocql 10 | 11 | import ( 12 | "crypto/rand" 13 | "errors" 14 | "fmt" 15 | "io" 16 | "net" 17 | "strings" 18 | "sync/atomic" 19 | "time" 20 | ) 21 | 22 | type UUID [16]byte 23 | 24 | var hardwareAddr []byte 25 | var clockSeq uint32 26 | 27 | const ( 28 | VariantNCSCompat = 0 29 | VariantIETF = 2 30 | VariantMicrosoft = 6 31 | VariantFuture = 7 32 | ) 33 | 34 | func init() { 35 | if interfaces, err := net.Interfaces(); err == nil { 36 | for _, i := range interfaces { 37 | if i.Flags&net.FlagLoopback == 0 && len(i.HardwareAddr) > 0 { 38 | hardwareAddr = i.HardwareAddr 39 | break 40 | } 41 | } 42 | } 43 | if hardwareAddr == nil { 44 | // If we failed to obtain the MAC address of the current computer, 45 | // we will use a randomly generated 6 byte sequence instead and set 46 | // the multicast bit as recommended in RFC 4122. 47 | hardwareAddr = make([]byte, 6) 48 | _, err := io.ReadFull(rand.Reader, hardwareAddr) 49 | if err != nil { 50 | panic(err) 51 | } 52 | hardwareAddr[0] = hardwareAddr[0] | 0x01 53 | } 54 | 55 | // initialize the clock sequence with a random number 56 | var clockSeqRand [2]byte 57 | io.ReadFull(rand.Reader, clockSeqRand[:]) 58 | clockSeq = uint32(clockSeqRand[1])<<8 | uint32(clockSeqRand[0]) 59 | } 60 | 61 | // ParseUUID parses a 32 digit hexadecimal number (that might contain hypens) 62 | // representing an UUID. 63 | func ParseUUID(input string) (UUID, error) { 64 | var u UUID 65 | j := 0 66 | for _, r := range input { 67 | switch { 68 | case r == '-' && j&1 == 0: 69 | continue 70 | case r >= '0' && r <= '9' && j < 32: 71 | u[j/2] |= byte(r-'0') << uint(4-j&1*4) 72 | case r >= 'a' && r <= 'f' && j < 32: 73 | u[j/2] |= byte(r-'a'+10) << uint(4-j&1*4) 74 | case r >= 'A' && r <= 'F' && j < 32: 75 | u[j/2] |= byte(r-'A'+10) << uint(4-j&1*4) 76 | default: 77 | return UUID{}, fmt.Errorf("invalid UUID %q", input) 78 | } 79 | j += 1 80 | } 81 | if j != 32 { 82 | return UUID{}, fmt.Errorf("invalid UUID %q", input) 83 | } 84 | return u, nil 85 | } 86 | 87 | // UUIDFromBytes converts a raw byte slice to an UUID. 88 | func UUIDFromBytes(input []byte) (UUID, error) { 89 | var u UUID 90 | if len(input) != 16 { 91 | return u, errors.New("UUIDs must be exactly 16 bytes long") 92 | } 93 | 94 | copy(u[:], input) 95 | return u, nil 96 | } 97 | 98 | // RandomUUID generates a totally random UUID (version 4) as described in 99 | // RFC 4122. 100 | func RandomUUID() (UUID, error) { 101 | var u UUID 102 | _, err := io.ReadFull(rand.Reader, u[:]) 103 | if err != nil { 104 | return u, err 105 | } 106 | u[6] &= 0x0F // clear version 107 | u[6] |= 0x40 // set version to 4 (random uuid) 108 | u[8] &= 0x3F // clear variant 109 | u[8] |= 0x80 // set to IETF variant 110 | return u, nil 111 | } 112 | 113 | var timeBase = time.Date(1582, time.October, 15, 0, 0, 0, 0, time.UTC).Unix() 114 | 115 | // TimeUUID generates a new time based UUID (version 1) using the current 116 | // time as the timestamp. 117 | func TimeUUID() UUID { 118 | return UUIDFromTime(time.Now()) 119 | } 120 | 121 | // UUIDFromTime generates a new time based UUID (version 1) as described in 122 | // RFC 4122. This UUID contains the MAC address of the node that generated 123 | // the UUID, the given timestamp and a sequence number. 124 | func UUIDFromTime(aTime time.Time) UUID { 125 | var u UUID 126 | 127 | utcTime := aTime.In(time.UTC) 128 | t := uint64(utcTime.Unix()-timeBase)*10000000 + uint64(utcTime.Nanosecond()/100) 129 | u[0], u[1], u[2], u[3] = byte(t>>24), byte(t>>16), byte(t>>8), byte(t) 130 | u[4], u[5] = byte(t>>40), byte(t>>32) 131 | u[6], u[7] = byte(t>>56)&0x0F, byte(t>>48) 132 | 133 | clock := atomic.AddUint32(&clockSeq, 1) 134 | u[8] = byte(clock >> 8) 135 | u[9] = byte(clock) 136 | 137 | copy(u[10:], hardwareAddr) 138 | 139 | u[6] |= 0x10 // set version to 1 (time based uuid) 140 | u[8] &= 0x3F // clear variant 141 | u[8] |= 0x80 // set to IETF variant 142 | 143 | return u 144 | } 145 | 146 | // String returns the UUID in it's canonical form, a 32 digit hexadecimal 147 | // number in the form of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. 148 | func (u UUID) String() string { 149 | var offsets = [...]int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} 150 | const hexString = "0123456789abcdef" 151 | r := make([]byte, 36) 152 | for i, b := range u { 153 | r[offsets[i]] = hexString[b>>4] 154 | r[offsets[i]+1] = hexString[b&0xF] 155 | } 156 | r[8] = '-' 157 | r[13] = '-' 158 | r[18] = '-' 159 | r[23] = '-' 160 | return string(r) 161 | 162 | } 163 | 164 | // Bytes returns the raw byte slice for this UUID. A UUID is always 128 bits 165 | // (16 bytes) long. 166 | func (u UUID) Bytes() []byte { 167 | return u[:] 168 | } 169 | 170 | // Variant returns the variant of this UUID. This package will only generate 171 | // UUIDs in the IETF variant. 172 | func (u UUID) Variant() int { 173 | x := u[8] 174 | if x&0x80 == 0 { 175 | return VariantNCSCompat 176 | } 177 | if x&0x40 == 0 { 178 | return VariantIETF 179 | } 180 | if x&0x20 == 0 { 181 | return VariantMicrosoft 182 | } 183 | return VariantFuture 184 | } 185 | 186 | // Version extracts the version of this UUID variant. The RFC 4122 describes 187 | // five kinds of UUIDs. 188 | func (u UUID) Version() int { 189 | return int(u[6] & 0xF0 >> 4) 190 | } 191 | 192 | // Node extracts the MAC address of the node who generated this UUID. It will 193 | // return nil if the UUID is not a time based UUID (version 1). 194 | func (u UUID) Node() []byte { 195 | if u.Version() != 1 { 196 | return nil 197 | } 198 | return u[10:] 199 | } 200 | 201 | // Timestamp extracts the timestamp information from a time based UUID 202 | // (version 1). 203 | func (u UUID) Timestamp() int64 { 204 | if u.Version() != 1 { 205 | return 0 206 | } 207 | return int64(uint64(u[0])<<24|uint64(u[1])<<16| 208 | uint64(u[2])<<8|uint64(u[3])) + 209 | int64(uint64(u[4])<<40|uint64(u[5])<<32) + 210 | int64(uint64(u[6]&0x0F)<<56|uint64(u[7])<<48) 211 | } 212 | 213 | // Time is like Timestamp, except that it returns a time.Time. 214 | func (u UUID) Time() time.Time { 215 | if u.Version() != 1 { 216 | return time.Time{} 217 | } 218 | t := u.Timestamp() 219 | sec := t / 1e7 220 | nsec := (t % 1e7) * 100 221 | return time.Unix(sec+timeBase, nsec).UTC() 222 | } 223 | 224 | // Marshaling for JSON 225 | func (u UUID) MarshalJSON() ([]byte, error) { 226 | return []byte(`"` + u.String() + `"`), nil 227 | } 228 | 229 | // Unmarshaling for JSON 230 | func (u *UUID) UnmarshalJSON(data []byte) error { 231 | str := strings.Trim(string(data), `"`) 232 | if len(str) > 36 { 233 | return fmt.Errorf("invalid JSON UUID %s", str) 234 | } 235 | 236 | parsed, err := ParseUUID(str) 237 | if err == nil { 238 | copy(u[:], parsed[:]) 239 | } 240 | 241 | return err 242 | } 243 | 244 | func (u UUID) MarshalText() ([]byte, error) { 245 | return []byte(u.String()), nil 246 | } 247 | 248 | func (u *UUID) UnmarshalText(text []byte) (err error) { 249 | *u, err = ParseUUID(string(text)) 250 | return 251 | } 252 | -------------------------------------------------------------------------------- /events.go: -------------------------------------------------------------------------------- 1 | package gocql 2 | 3 | import ( 4 | "log" 5 | "net" 6 | "sync" 7 | "time" 8 | ) 9 | 10 | type eventDebouncer struct { 11 | name string 12 | timer *time.Timer 13 | mu sync.Mutex 14 | events []frame 15 | 16 | callback func([]frame) 17 | quit chan struct{} 18 | } 19 | 20 | func newEventDebouncer(name string, eventHandler func([]frame)) *eventDebouncer { 21 | e := &eventDebouncer{ 22 | name: name, 23 | quit: make(chan struct{}), 24 | timer: time.NewTimer(eventDebounceTime), 25 | callback: eventHandler, 26 | } 27 | e.timer.Stop() 28 | go e.flusher() 29 | 30 | return e 31 | } 32 | 33 | func (e *eventDebouncer) stop() { 34 | e.quit <- struct{}{} // sync with flusher 35 | close(e.quit) 36 | } 37 | 38 | func (e *eventDebouncer) flusher() { 39 | for { 40 | select { 41 | case <-e.timer.C: 42 | e.mu.Lock() 43 | e.flush() 44 | e.mu.Unlock() 45 | case <-e.quit: 46 | return 47 | } 48 | } 49 | } 50 | 51 | const ( 52 | eventBufferSize = 1000 53 | eventDebounceTime = 1 * time.Second 54 | ) 55 | 56 | // flush must be called with mu locked 57 | func (e *eventDebouncer) flush() { 58 | if len(e.events) == 0 { 59 | return 60 | } 61 | 62 | // if the flush interval is faster than the callback then we will end up calling 63 | // the callback multiple times, probably a bad idea. In this case we could drop 64 | // frames? 65 | go e.callback(e.events) 66 | e.events = make([]frame, 0, eventBufferSize) 67 | } 68 | 69 | func (e *eventDebouncer) debounce(frame frame) { 70 | e.mu.Lock() 71 | e.timer.Reset(eventDebounceTime) 72 | 73 | // TODO: probably need a warning to track if this threshold is too low 74 | if len(e.events) < eventBufferSize { 75 | e.events = append(e.events, frame) 76 | } else { 77 | log.Printf("%s: buffer full, dropping event frame: %s", e.name, frame) 78 | } 79 | 80 | e.mu.Unlock() 81 | } 82 | 83 | func (s *Session) handleEvent(framer *framer) { 84 | // TODO(zariel): need to debounce events frames, and possible also events 85 | defer framerPool.Put(framer) 86 | 87 | frame, err := framer.parseFrame() 88 | if err != nil { 89 | // TODO: logger 90 | log.Printf("gocql: unable to parse event frame: %v\n", err) 91 | return 92 | } 93 | 94 | if gocqlDebug { 95 | log.Printf("gocql: handling frame: %v\n", frame) 96 | } 97 | 98 | // TODO: handle medatadata events 99 | switch f := frame.(type) { 100 | case *schemaChangeKeyspace, *schemaChangeFunction, *schemaChangeTable: 101 | s.schemaEvents.debounce(frame) 102 | case *topologyChangeEventFrame, *statusChangeEventFrame: 103 | s.nodeEvents.debounce(frame) 104 | default: 105 | log.Printf("gocql: invalid event frame (%T): %v\n", f, f) 106 | } 107 | } 108 | 109 | func (s *Session) handleSchemaEvent(frames []frame) { 110 | if s.schemaDescriber == nil { 111 | return 112 | } 113 | for _, frame := range frames { 114 | switch f := frame.(type) { 115 | case *schemaChangeKeyspace: 116 | s.schemaDescriber.clearSchema(f.keyspace) 117 | case *schemaChangeTable: 118 | s.schemaDescriber.clearSchema(f.keyspace) 119 | } 120 | } 121 | } 122 | 123 | func (s *Session) handleNodeEvent(frames []frame) { 124 | type nodeEvent struct { 125 | change string 126 | host net.IP 127 | port int 128 | } 129 | 130 | events := make(map[string]*nodeEvent) 131 | 132 | for _, frame := range frames { 133 | // TODO: can we be sure the order of events in the buffer is correct? 134 | switch f := frame.(type) { 135 | case *topologyChangeEventFrame: 136 | event, ok := events[f.host.String()] 137 | if !ok { 138 | event = &nodeEvent{change: f.change, host: f.host, port: f.port} 139 | events[f.host.String()] = event 140 | } 141 | event.change = f.change 142 | 143 | case *statusChangeEventFrame: 144 | event, ok := events[f.host.String()] 145 | if !ok { 146 | event = &nodeEvent{change: f.change, host: f.host, port: f.port} 147 | events[f.host.String()] = event 148 | } 149 | event.change = f.change 150 | } 151 | } 152 | 153 | for _, f := range events { 154 | if gocqlDebug { 155 | log.Printf("gocql: dispatching event: %+v\n", f) 156 | } 157 | 158 | switch f.change { 159 | case "NEW_NODE": 160 | s.handleNewNode(f.host, f.port, true) 161 | case "REMOVED_NODE": 162 | s.handleRemovedNode(f.host, f.port) 163 | case "MOVED_NODE": 164 | // java-driver handles this, not mentioned in the spec 165 | // TODO(zariel): refresh token map 166 | case "UP": 167 | s.handleNodeUp(f.host, f.port, true) 168 | case "DOWN": 169 | s.handleNodeDown(f.host, f.port) 170 | } 171 | } 172 | } 173 | 174 | func (s *Session) handleNewNode(ip net.IP, port int, waitForBinary bool) { 175 | var hostInfo *HostInfo 176 | if s.control != nil && !s.cfg.IgnorePeerAddr { 177 | var err error 178 | hostInfo, err = s.control.fetchHostInfo(ip, port) 179 | if err != nil { 180 | log.Printf("gocql: events: unable to fetch host info for (%s:%d): %v\n", ip, port, err) 181 | return 182 | } 183 | } else { 184 | hostInfo = &HostInfo{peer: ip, port: port} 185 | } 186 | 187 | if s.cfg.IgnorePeerAddr && hostInfo.Peer().Equal(ip) { 188 | hostInfo.setPeer(ip) 189 | } 190 | 191 | if s.cfg.HostFilter != nil && !s.cfg.HostFilter.Accept(hostInfo) { 192 | return 193 | } 194 | 195 | if t := hostInfo.Version().nodeUpDelay(); t > 0 && waitForBinary { 196 | time.Sleep(t) 197 | } 198 | 199 | // should this handle token moving? 200 | if existing, ok := s.ring.addHostIfMissing(hostInfo); ok { 201 | existing.update(hostInfo) 202 | hostInfo = existing 203 | } 204 | 205 | s.pool.addHost(hostInfo) 206 | s.policy.AddHost(hostInfo) 207 | hostInfo.setState(NodeUp) 208 | 209 | if s.control != nil && !s.cfg.IgnorePeerAddr { 210 | s.hostSource.refreshRing() 211 | } 212 | } 213 | 214 | func (s *Session) handleRemovedNode(ip net.IP, port int) { 215 | // we remove all nodes but only add ones which pass the filter 216 | host := s.ring.getHost(ip) 217 | if host == nil { 218 | host = &HostInfo{peer: ip, port: port} 219 | } 220 | 221 | if s.cfg.HostFilter != nil && !s.cfg.HostFilter.Accept(host) { 222 | return 223 | } 224 | 225 | host.setState(NodeDown) 226 | s.policy.RemoveHost(host) 227 | s.pool.removeHost(ip) 228 | s.ring.removeHost(ip) 229 | 230 | if !s.cfg.IgnorePeerAddr { 231 | s.hostSource.refreshRing() 232 | } 233 | } 234 | 235 | func (s *Session) handleNodeUp(ip net.IP, port int, waitForBinary bool) { 236 | if gocqlDebug { 237 | log.Printf("gocql: Session.handleNodeUp: %s:%d\n", ip.String(), port) 238 | } 239 | 240 | host := s.ring.getHost(ip) 241 | if host != nil { 242 | if s.cfg.IgnorePeerAddr && host.Peer().Equal(ip) { 243 | // TODO: how can this ever be true? 244 | host.setPeer(ip) 245 | } 246 | 247 | if s.cfg.HostFilter != nil && !s.cfg.HostFilter.Accept(host) { 248 | return 249 | } 250 | 251 | if t := host.Version().nodeUpDelay(); t > 0 && waitForBinary { 252 | time.Sleep(t) 253 | } 254 | 255 | s.pool.hostUp(host) 256 | s.policy.HostUp(host) 257 | host.setState(NodeUp) 258 | return 259 | } 260 | 261 | s.handleNewNode(ip, port, waitForBinary) 262 | } 263 | 264 | func (s *Session) handleNodeDown(ip net.IP, port int) { 265 | if gocqlDebug { 266 | log.Printf("gocql: Session.handleNodeDown: %s:%d\n", ip.String(), port) 267 | } 268 | 269 | host := s.ring.getHost(ip) 270 | if host == nil { 271 | host = &HostInfo{peer: ip, port: port} 272 | } 273 | 274 | if s.cfg.HostFilter != nil && !s.cfg.HostFilter.Accept(host) { 275 | return 276 | } 277 | 278 | host.setState(NodeDown) 279 | s.policy.HostDown(host) 280 | s.pool.hostDown(ip) 281 | } 282 | -------------------------------------------------------------------------------- /session_test.go: -------------------------------------------------------------------------------- 1 | // +build all integration 2 | 3 | package gocql 4 | 5 | import ( 6 | "fmt" 7 | "testing" 8 | ) 9 | 10 | func TestSessionAPI(t *testing.T) { 11 | 12 | cfg := &ClusterConfig{} 13 | 14 | s := &Session{ 15 | cfg: *cfg, 16 | cons: Quorum, 17 | policy: RoundRobinHostPolicy(), 18 | } 19 | 20 | s.pool = cfg.PoolConfig.buildPool(s) 21 | s.executor = &queryExecutor{ 22 | pool: s.pool, 23 | policy: s.policy, 24 | } 25 | defer s.Close() 26 | 27 | s.SetConsistency(All) 28 | if s.cons != All { 29 | t.Fatalf("expected consistency 'All', got '%v'", s.cons) 30 | } 31 | 32 | s.SetPageSize(100) 33 | if s.pageSize != 100 { 34 | t.Fatalf("expected pageSize 100, got %v", s.pageSize) 35 | } 36 | 37 | s.SetPrefetch(0.75) 38 | if s.prefetch != 0.75 { 39 | t.Fatalf("expceted prefetch 0.75, got %v", s.prefetch) 40 | } 41 | 42 | trace := &traceWriter{} 43 | 44 | s.SetTrace(trace) 45 | if s.trace != trace { 46 | t.Fatalf("expected traceWriter '%v',got '%v'", trace, s.trace) 47 | } 48 | 49 | qry := s.Query("test", 1) 50 | if v, ok := qry.values[0].(int); !ok { 51 | t.Fatalf("expected qry.values[0] to be an int, got %v", qry.values[0]) 52 | } else if v != 1 { 53 | t.Fatalf("expceted qry.values[0] to be 1, got %v", v) 54 | } else if qry.stmt != "test" { 55 | t.Fatalf("expected qry.stmt to be 'test', got '%v'", qry.stmt) 56 | } 57 | 58 | boundQry := s.Bind("test", func(q *QueryInfo) ([]interface{}, error) { 59 | return nil, nil 60 | }) 61 | if boundQry.binding == nil { 62 | t.Fatal("expected qry.binding to be defined, got nil") 63 | } else if boundQry.stmt != "test" { 64 | t.Fatalf("expected qry.stmt to be 'test', got '%v'", boundQry.stmt) 65 | } 66 | 67 | itr := s.executeQuery(qry) 68 | if itr.err != ErrNoConnections { 69 | t.Fatalf("expected itr.err to be '%v', got '%v'", ErrNoConnections, itr.err) 70 | } 71 | 72 | testBatch := s.NewBatch(LoggedBatch) 73 | testBatch.Query("test") 74 | err := s.ExecuteBatch(testBatch) 75 | 76 | if err != ErrNoConnections { 77 | t.Fatalf("expected session.ExecuteBatch to return '%v', got '%v'", ErrNoConnections, err) 78 | } 79 | 80 | s.Close() 81 | if !s.Closed() { 82 | t.Fatal("expected s.Closed() to be true, got false") 83 | } 84 | //Should just return cleanly 85 | s.Close() 86 | 87 | err = s.ExecuteBatch(testBatch) 88 | if err != ErrSessionClosed { 89 | t.Fatalf("expected session.ExecuteBatch to return '%v', got '%v'", ErrSessionClosed, err) 90 | } 91 | } 92 | 93 | func TestQueryBasicAPI(t *testing.T) { 94 | qry := &Query{} 95 | 96 | if qry.Latency() != 0 { 97 | t.Fatalf("expected Query.Latency() to return 0, got %v", qry.Latency()) 98 | } 99 | 100 | qry.attempts = 2 101 | qry.totalLatency = 4 102 | if qry.Attempts() != 2 { 103 | t.Fatalf("expected Query.Attempts() to return 2, got %v", qry.Attempts()) 104 | } 105 | if qry.Latency() != 2 { 106 | t.Fatalf("expected Query.Latency() to return 2, got %v", qry.Latency()) 107 | } 108 | 109 | qry.Consistency(All) 110 | if qry.GetConsistency() != All { 111 | t.Fatalf("expected Query.GetConsistency to return 'All', got '%s'", qry.GetConsistency()) 112 | } 113 | 114 | trace := &traceWriter{} 115 | qry.Trace(trace) 116 | if qry.trace != trace { 117 | t.Fatalf("expected Query.Trace to be '%v', got '%v'", trace, qry.trace) 118 | } 119 | 120 | qry.PageSize(10) 121 | if qry.pageSize != 10 { 122 | t.Fatalf("expected Query.PageSize to be 10, got %v", qry.pageSize) 123 | } 124 | 125 | qry.Prefetch(0.75) 126 | if qry.prefetch != 0.75 { 127 | t.Fatalf("expected Query.Prefetch to be 0.75, got %v", qry.prefetch) 128 | } 129 | 130 | rt := &SimpleRetryPolicy{NumRetries: 3} 131 | if qry.RetryPolicy(rt); qry.rt != rt { 132 | t.Fatalf("expected Query.RetryPolicy to be '%v', got '%v'", rt, qry.rt) 133 | } 134 | 135 | qry.Bind(qry) 136 | if qry.values[0] != qry { 137 | t.Fatalf("expected Query.Values[0] to be '%v', got '%v'", qry, qry.values[0]) 138 | } 139 | } 140 | 141 | func TestQueryShouldPrepare(t *testing.T) { 142 | toPrepare := []string{"select * ", "INSERT INTO", "update table", "delete from", "begin batch"} 143 | cantPrepare := []string{"create table", "USE table", "LIST keyspaces", "alter table", "drop table", "grant user", "revoke user"} 144 | q := &Query{} 145 | 146 | for i := 0; i < len(toPrepare); i++ { 147 | q.stmt = toPrepare[i] 148 | if !q.shouldPrepare() { 149 | t.Fatalf("expected Query.shouldPrepare to return true, got false for statement '%v'", toPrepare[i]) 150 | } 151 | } 152 | 153 | for i := 0; i < len(cantPrepare); i++ { 154 | q.stmt = cantPrepare[i] 155 | if q.shouldPrepare() { 156 | t.Fatalf("expected Query.shouldPrepare to return false, got true for statement '%v'", cantPrepare[i]) 157 | } 158 | } 159 | } 160 | 161 | func TestBatchBasicAPI(t *testing.T) { 162 | 163 | cfg := &ClusterConfig{RetryPolicy: &SimpleRetryPolicy{NumRetries: 2}} 164 | 165 | s := &Session{ 166 | cfg: *cfg, 167 | cons: Quorum, 168 | } 169 | defer s.Close() 170 | 171 | s.pool = cfg.PoolConfig.buildPool(s) 172 | 173 | b := s.NewBatch(UnloggedBatch) 174 | if b.Type != UnloggedBatch { 175 | t.Fatalf("expceted batch.Type to be '%v', got '%v'", UnloggedBatch, b.Type) 176 | } else if b.rt != cfg.RetryPolicy { 177 | t.Fatalf("expceted batch.RetryPolicy to be '%v', got '%v'", cfg.RetryPolicy, b.rt) 178 | } 179 | 180 | b = NewBatch(LoggedBatch) 181 | if b.Type != LoggedBatch { 182 | t.Fatalf("expected batch.Type to be '%v', got '%v'", LoggedBatch, b.Type) 183 | } 184 | 185 | b.attempts = 1 186 | if b.Attempts() != 1 { 187 | t.Fatalf("expceted batch.Attempts() to return %v, got %v", 1, b.Attempts()) 188 | } 189 | 190 | if b.Latency() != 0 { 191 | t.Fatalf("expected batch.Latency() to be 0, got %v", b.Latency()) 192 | } 193 | 194 | b.totalLatency = 4 195 | if b.Latency() != 4 { 196 | t.Fatalf("expected batch.Latency() to return %v, got %v", 4, b.Latency()) 197 | } 198 | 199 | b.Cons = One 200 | if b.GetConsistency() != One { 201 | t.Fatalf("expected batch.GetConsistency() to return 'One', got '%s'", b.GetConsistency()) 202 | } 203 | 204 | b.Query("test", 1) 205 | if b.Entries[0].Stmt != "test" { 206 | t.Fatalf("expected batch.Entries[0].Stmt to be 'test', got '%v'", b.Entries[0].Stmt) 207 | } else if b.Entries[0].Args[0].(int) != 1 { 208 | t.Fatalf("expected batch.Entries[0].Args[0] to be 1, got %v", b.Entries[0].Args[0]) 209 | } 210 | 211 | b.Bind("test2", func(q *QueryInfo) ([]interface{}, error) { 212 | return nil, nil 213 | }) 214 | 215 | if b.Entries[1].Stmt != "test2" { 216 | t.Fatalf("expected batch.Entries[1].Stmt to be 'test2', got '%v'", b.Entries[1].Stmt) 217 | } else if b.Entries[1].binding == nil { 218 | t.Fatal("expected batch.Entries[1].binding to be defined, got nil") 219 | } 220 | r := &SimpleRetryPolicy{NumRetries: 4} 221 | 222 | b.RetryPolicy(r) 223 | if b.rt != r { 224 | t.Fatalf("expected batch.RetryPolicy to be '%v', got '%v'", r, b.rt) 225 | } 226 | 227 | if b.Size() != 2 { 228 | t.Fatalf("expected batch.Size() to return 2, got %v", b.Size()) 229 | } 230 | 231 | } 232 | 233 | func TestConsistencyNames(t *testing.T) { 234 | names := map[fmt.Stringer]string{ 235 | Any: "ANY", 236 | One: "ONE", 237 | Two: "TWO", 238 | Three: "THREE", 239 | Quorum: "QUORUM", 240 | All: "ALL", 241 | LocalQuorum: "LOCAL_QUORUM", 242 | EachQuorum: "EACH_QUORUM", 243 | Serial: "SERIAL", 244 | LocalSerial: "LOCAL_SERIAL", 245 | LocalOne: "LOCAL_ONE", 246 | } 247 | 248 | for k, v := range names { 249 | if k.String() != v { 250 | t.Fatalf("expected '%v', got '%v'", v, k.String()) 251 | } 252 | } 253 | } 254 | -------------------------------------------------------------------------------- /helpers.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2012 The gocql Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package gocql 6 | 7 | import ( 8 | "fmt" 9 | "math/big" 10 | "reflect" 11 | "strings" 12 | "time" 13 | 14 | "gopkg.in/inf.v0" 15 | ) 16 | 17 | type RowData struct { 18 | Columns []string 19 | Values []interface{} 20 | } 21 | 22 | func goType(t TypeInfo) reflect.Type { 23 | switch t.Type() { 24 | case TypeVarchar, TypeAscii, TypeInet, TypeText: 25 | return reflect.TypeOf(*new(string)) 26 | case TypeBigInt, TypeCounter: 27 | return reflect.TypeOf(*new(int64)) 28 | case TypeTimestamp: 29 | return reflect.TypeOf(*new(time.Time)) 30 | case TypeBlob: 31 | return reflect.TypeOf(*new([]byte)) 32 | case TypeBoolean: 33 | return reflect.TypeOf(*new(bool)) 34 | case TypeFloat: 35 | return reflect.TypeOf(*new(float32)) 36 | case TypeDouble: 37 | return reflect.TypeOf(*new(float64)) 38 | case TypeInt: 39 | return reflect.TypeOf(*new(int)) 40 | case TypeSmallInt: 41 | return reflect.TypeOf(*new(int16)) 42 | case TypeTinyInt: 43 | return reflect.TypeOf(*new(int8)) 44 | case TypeDecimal: 45 | return reflect.TypeOf(*new(*inf.Dec)) 46 | case TypeUUID, TypeTimeUUID: 47 | return reflect.TypeOf(*new(UUID)) 48 | case TypeList, TypeSet: 49 | return reflect.SliceOf(goType(t.(CollectionType).Elem)) 50 | case TypeMap: 51 | return reflect.MapOf(goType(t.(CollectionType).Key), goType(t.(CollectionType).Elem)) 52 | case TypeVarint: 53 | return reflect.TypeOf(*new(*big.Int)) 54 | case TypeTuple: 55 | // what can we do here? all there is to do is to make a list of interface{} 56 | tuple := t.(TupleTypeInfo) 57 | return reflect.TypeOf(make([]interface{}, len(tuple.Elems))) 58 | case TypeUDT: 59 | return reflect.TypeOf(make(map[string]interface{})) 60 | default: 61 | return nil 62 | } 63 | } 64 | 65 | func dereference(i interface{}) interface{} { 66 | return reflect.Indirect(reflect.ValueOf(i)).Interface() 67 | } 68 | 69 | func getCassandraType(name string) Type { 70 | switch name { 71 | case "ascii": 72 | return TypeAscii 73 | case "bigint": 74 | return TypeBigInt 75 | case "blob": 76 | return TypeBlob 77 | case "boolean": 78 | return TypeBoolean 79 | case "counter": 80 | return TypeCounter 81 | case "decimal": 82 | return TypeDecimal 83 | case "double": 84 | return TypeDouble 85 | case "float": 86 | return TypeFloat 87 | case "int": 88 | return TypeInt 89 | case "timestamp": 90 | return TypeTimestamp 91 | case "uuid": 92 | return TypeUUID 93 | case "varchar", "text": 94 | return TypeVarchar 95 | case "varint": 96 | return TypeVarint 97 | case "timeuuid": 98 | return TypeTimeUUID 99 | case "inet": 100 | return TypeInet 101 | case "MapType": 102 | return TypeMap 103 | case "ListType": 104 | return TypeList 105 | case "SetType": 106 | return TypeSet 107 | case "TupleType": 108 | return TypeTuple 109 | default: 110 | if strings.HasPrefix(name, "set") { 111 | return TypeSet 112 | } else if strings.HasPrefix(name, "list") { 113 | return TypeList 114 | } else if strings.HasPrefix(name, "map") { 115 | return TypeMap 116 | } else if strings.HasPrefix(name, "tuple") { 117 | return TypeTuple 118 | } 119 | return TypeCustom 120 | } 121 | } 122 | 123 | func getApacheCassandraType(class string) Type { 124 | switch strings.TrimPrefix(class, apacheCassandraTypePrefix) { 125 | case "AsciiType": 126 | return TypeAscii 127 | case "LongType": 128 | return TypeBigInt 129 | case "BytesType": 130 | return TypeBlob 131 | case "BooleanType": 132 | return TypeBoolean 133 | case "CounterColumnType": 134 | return TypeCounter 135 | case "DecimalType": 136 | return TypeDecimal 137 | case "DoubleType": 138 | return TypeDouble 139 | case "FloatType": 140 | return TypeFloat 141 | case "Int32Type": 142 | return TypeInt 143 | case "ShortType": 144 | return TypeSmallInt 145 | case "ByteType": 146 | return TypeTinyInt 147 | case "DateType", "TimestampType": 148 | return TypeTimestamp 149 | case "UUIDType", "LexicalUUIDType": 150 | return TypeUUID 151 | case "UTF8Type": 152 | return TypeVarchar 153 | case "IntegerType": 154 | return TypeVarint 155 | case "TimeUUIDType": 156 | return TypeTimeUUID 157 | case "InetAddressType": 158 | return TypeInet 159 | case "MapType": 160 | return TypeMap 161 | case "ListType": 162 | return TypeList 163 | case "SetType": 164 | return TypeSet 165 | case "TupleType": 166 | return TypeTuple 167 | default: 168 | return TypeCustom 169 | } 170 | } 171 | 172 | func typeCanBeNull(typ TypeInfo) bool { 173 | switch typ.(type) { 174 | case CollectionType, UDTTypeInfo, TupleTypeInfo: 175 | return false 176 | } 177 | 178 | return true 179 | } 180 | 181 | func (r *RowData) rowMap(m map[string]interface{}) { 182 | for i, column := range r.Columns { 183 | val := dereference(r.Values[i]) 184 | if valVal := reflect.ValueOf(val); valVal.Kind() == reflect.Slice { 185 | valCopy := reflect.MakeSlice(valVal.Type(), valVal.Len(), valVal.Cap()) 186 | reflect.Copy(valCopy, valVal) 187 | m[column] = valCopy.Interface() 188 | } else { 189 | m[column] = val 190 | } 191 | } 192 | } 193 | 194 | // TupeColumnName will return the column name of a tuple value in a column named 195 | // c at index n. It should be used if a specific element within a tuple is needed 196 | // to be extracted from a map returned from SliceMap or MapScan. 197 | func TupleColumnName(c string, n int) string { 198 | return fmt.Sprintf("%s[%d]", c, n) 199 | } 200 | 201 | func (iter *Iter) RowData() (RowData, error) { 202 | if iter.err != nil { 203 | return RowData{}, iter.err 204 | } 205 | 206 | columns := make([]string, 0) 207 | values := make([]interface{}, 0) 208 | 209 | for _, column := range iter.Columns() { 210 | 211 | switch c := column.TypeInfo.(type) { 212 | case TupleTypeInfo: 213 | for i, elem := range c.Elems { 214 | columns = append(columns, TupleColumnName(column.Name, i)) 215 | values = append(values, elem.New()) 216 | } 217 | default: 218 | val := column.TypeInfo.New() 219 | columns = append(columns, column.Name) 220 | values = append(values, val) 221 | } 222 | } 223 | rowData := RowData{ 224 | Columns: columns, 225 | Values: values, 226 | } 227 | return rowData, nil 228 | } 229 | 230 | // SliceMap is a helper function to make the API easier to use 231 | // returns the data from the query in the form of []map[string]interface{} 232 | func (iter *Iter) SliceMap() ([]map[string]interface{}, error) { 233 | if iter.err != nil { 234 | return nil, iter.err 235 | } 236 | 237 | // Not checking for the error because we just did 238 | rowData, _ := iter.RowData() 239 | dataToReturn := make([]map[string]interface{}, 0) 240 | for iter.Scan(rowData.Values...) { 241 | m := make(map[string]interface{}) 242 | rowData.rowMap(m) 243 | dataToReturn = append(dataToReturn, m) 244 | } 245 | if iter.err != nil { 246 | return nil, iter.err 247 | } 248 | return dataToReturn, nil 249 | } 250 | 251 | // MapScan takes a map[string]interface{} and populates it with a row 252 | // that is returned from cassandra. 253 | func (iter *Iter) MapScan(m map[string]interface{}) bool { 254 | if iter.err != nil { 255 | return false 256 | } 257 | 258 | // Not checking for the error because we just did 259 | rowData, _ := iter.RowData() 260 | 261 | for i, col := range rowData.Columns { 262 | if dest, ok := m[col]; ok { 263 | rowData.Values[i] = dest 264 | } 265 | } 266 | 267 | if iter.Scan(rowData.Values...) { 268 | rowData.rowMap(m) 269 | return true 270 | } 271 | return false 272 | } 273 | 274 | func copyBytes(p []byte) []byte { 275 | b := make([]byte, len(p)) 276 | copy(b, p) 277 | return b 278 | } 279 | -------------------------------------------------------------------------------- /events_ccm_test.go: -------------------------------------------------------------------------------- 1 | // +build ccm, ignore 2 | 3 | package gocql 4 | 5 | import ( 6 | "log" 7 | "testing" 8 | "time" 9 | 10 | "github.com/gocql/gocql/internal/ccm" 11 | ) 12 | 13 | func TestEventDiscovery(t *testing.T) { 14 | t.Skip("FLAKE skipping") 15 | if err := ccm.AllUp(); err != nil { 16 | t.Fatal(err) 17 | } 18 | 19 | session := createSession(t) 20 | defer session.Close() 21 | 22 | status, err := ccm.Status() 23 | if err != nil { 24 | t.Fatal(err) 25 | } 26 | t.Logf("status=%+v\n", status) 27 | 28 | session.pool.mu.RLock() 29 | poolHosts := session.pool.hostConnPools // TODO: replace with session.ring 30 | t.Logf("poolhosts=%+v\n", poolHosts) 31 | // check we discovered all the nodes in the ring 32 | for _, host := range status { 33 | if _, ok := poolHosts[host.Addr]; !ok { 34 | t.Errorf("did not discover %q", host.Addr) 35 | } 36 | } 37 | session.pool.mu.RUnlock() 38 | if t.Failed() { 39 | t.FailNow() 40 | } 41 | } 42 | 43 | func TestEventNodeDownControl(t *testing.T) { 44 | t.Skip("FLAKE skipping") 45 | const targetNode = "node1" 46 | if err := ccm.AllUp(); err != nil { 47 | t.Fatal(err) 48 | } 49 | 50 | status, err := ccm.Status() 51 | if err != nil { 52 | t.Fatal(err) 53 | } 54 | 55 | cluster := createCluster() 56 | cluster.Hosts = []string{status[targetNode].Addr} 57 | session := createSessionFromCluster(cluster, t) 58 | defer session.Close() 59 | 60 | t.Log("marking " + targetNode + " as down") 61 | if err := ccm.NodeDown(targetNode); err != nil { 62 | t.Fatal(err) 63 | } 64 | 65 | t.Logf("status=%+v\n", status) 66 | t.Logf("marking node %q down: %v\n", targetNode, status[targetNode]) 67 | 68 | time.Sleep(5 * time.Second) 69 | 70 | session.pool.mu.RLock() 71 | 72 | poolHosts := session.pool.hostConnPools 73 | node := status[targetNode] 74 | t.Logf("poolhosts=%+v\n", poolHosts) 75 | 76 | if _, ok := poolHosts[node.Addr]; ok { 77 | session.pool.mu.RUnlock() 78 | t.Fatal("node not removed after remove event") 79 | } 80 | session.pool.mu.RUnlock() 81 | 82 | host := session.ring.getHost(node.Addr) 83 | if host == nil { 84 | t.Fatal("node not in metadata ring") 85 | } else if host.IsUp() { 86 | t.Fatalf("not not marked as down after event in metadata: %v", host) 87 | } 88 | } 89 | 90 | func TestEventNodeDown(t *testing.T) { 91 | t.Skip("FLAKE skipping") 92 | const targetNode = "node3" 93 | if err := ccm.AllUp(); err != nil { 94 | t.Fatal(err) 95 | } 96 | 97 | session := createSession(t) 98 | defer session.Close() 99 | 100 | if err := ccm.NodeDown(targetNode); err != nil { 101 | t.Fatal(err) 102 | } 103 | 104 | status, err := ccm.Status() 105 | if err != nil { 106 | t.Fatal(err) 107 | } 108 | t.Logf("status=%+v\n", status) 109 | t.Logf("marking node %q down: %v\n", targetNode, status[targetNode]) 110 | 111 | time.Sleep(5 * time.Second) 112 | 113 | session.pool.mu.RLock() 114 | defer session.pool.mu.RUnlock() 115 | 116 | poolHosts := session.pool.hostConnPools 117 | node := status[targetNode] 118 | t.Logf("poolhosts=%+v\n", poolHosts) 119 | 120 | if _, ok := poolHosts[node.Addr]; ok { 121 | t.Fatal("node not removed after remove event") 122 | } 123 | 124 | host := session.ring.getHost(node.Addr) 125 | if host == nil { 126 | t.Fatal("node not in metadata ring") 127 | } else if host.IsUp() { 128 | t.Fatalf("not not marked as down after event in metadata: %v", host) 129 | } 130 | } 131 | 132 | func TestEventNodeUp(t *testing.T) { 133 | t.Skip("FLAKE skipping") 134 | if err := ccm.AllUp(); err != nil { 135 | t.Fatal(err) 136 | } 137 | 138 | status, err := ccm.Status() 139 | if err != nil { 140 | t.Fatal(err) 141 | } 142 | log.Printf("status=%+v\n", status) 143 | 144 | session := createSession(t) 145 | defer session.Close() 146 | 147 | const targetNode = "node2" 148 | node := status[targetNode] 149 | 150 | _, ok := session.pool.getPool(node.Addr) 151 | if !ok { 152 | session.pool.mu.RLock() 153 | t.Errorf("target pool not in connection pool: addr=%q pools=%v", status[targetNode].Addr, session.pool.hostConnPools) 154 | session.pool.mu.RUnlock() 155 | t.FailNow() 156 | } 157 | 158 | if err := ccm.NodeDown(targetNode); err != nil { 159 | t.Fatal(err) 160 | } 161 | 162 | time.Sleep(5 * time.Second) 163 | 164 | _, ok = session.pool.getPool(node.Addr) 165 | if ok { 166 | t.Fatal("node not removed after remove event") 167 | } 168 | 169 | if err := ccm.NodeUp(targetNode); err != nil { 170 | t.Fatal(err) 171 | } 172 | 173 | // cassandra < 2.2 needs 10 seconds to start up the binary service 174 | time.Sleep(15 * time.Second) 175 | 176 | _, ok = session.pool.getPool(node.Addr) 177 | if !ok { 178 | t.Fatal("node not added after node added event") 179 | } 180 | 181 | host := session.ring.getHost(node.Addr) 182 | if host == nil { 183 | t.Fatal("node not in metadata ring") 184 | } else if !host.IsUp() { 185 | t.Fatalf("not not marked as UP after event in metadata: addr=%q host=%p: %v", node.Addr, host, host) 186 | } 187 | } 188 | 189 | func TestEventFilter(t *testing.T) { 190 | t.Skip("FLAKE skipping") 191 | if err := ccm.AllUp(); err != nil { 192 | t.Fatal(err) 193 | } 194 | 195 | status, err := ccm.Status() 196 | if err != nil { 197 | t.Fatal(err) 198 | } 199 | log.Printf("status=%+v\n", status) 200 | 201 | cluster := createCluster() 202 | cluster.HostFilter = WhiteListHostFilter(status["node1"].Addr) 203 | session := createSessionFromCluster(cluster, t) 204 | defer session.Close() 205 | 206 | if _, ok := session.pool.getPool(status["node1"].Addr); !ok { 207 | t.Errorf("should have %v in pool but dont", "node1") 208 | } 209 | 210 | for _, host := range [...]string{"node2", "node3"} { 211 | _, ok := session.pool.getPool(status[host].Addr) 212 | if ok { 213 | t.Errorf("should not have %v in pool", host) 214 | } 215 | } 216 | 217 | if t.Failed() { 218 | t.FailNow() 219 | } 220 | 221 | if err := ccm.NodeDown("node2"); err != nil { 222 | t.Fatal(err) 223 | } 224 | 225 | time.Sleep(5 * time.Second) 226 | 227 | if err := ccm.NodeUp("node2"); err != nil { 228 | t.Fatal(err) 229 | } 230 | 231 | time.Sleep(15 * time.Second) 232 | for _, host := range [...]string{"node2", "node3"} { 233 | _, ok := session.pool.getPool(status[host].Addr) 234 | if ok { 235 | t.Errorf("should not have %v in pool", host) 236 | } 237 | } 238 | 239 | if t.Failed() { 240 | t.FailNow() 241 | } 242 | 243 | } 244 | 245 | func TestEventDownQueryable(t *testing.T) { 246 | t.Skip("FLAKE skipping") 247 | if err := ccm.AllUp(); err != nil { 248 | t.Fatal(err) 249 | } 250 | 251 | status, err := ccm.Status() 252 | if err != nil { 253 | t.Fatal(err) 254 | } 255 | log.Printf("status=%+v\n", status) 256 | 257 | const targetNode = "node1" 258 | 259 | addr := status[targetNode].Addr 260 | 261 | cluster := createCluster() 262 | cluster.Hosts = []string{addr} 263 | cluster.HostFilter = WhiteListHostFilter(addr) 264 | session := createSessionFromCluster(cluster, t) 265 | defer session.Close() 266 | 267 | if pool, ok := session.pool.getPool(addr); !ok { 268 | t.Fatalf("should have %v in pool but dont", addr) 269 | } else if !pool.host.IsUp() { 270 | t.Fatalf("host is not up %v", pool.host) 271 | } 272 | 273 | if err := ccm.NodeDown(targetNode); err != nil { 274 | t.Fatal(err) 275 | } 276 | 277 | time.Sleep(5 * time.Second) 278 | 279 | if err := ccm.NodeUp(targetNode); err != nil { 280 | t.Fatal(err) 281 | } 282 | 283 | time.Sleep(15 * time.Second) 284 | 285 | if pool, ok := session.pool.getPool(addr); !ok { 286 | t.Fatalf("should have %v in pool but dont", addr) 287 | } else if !pool.host.IsUp() { 288 | t.Fatalf("host is not up %v", pool.host) 289 | } 290 | 291 | var rows int 292 | if err := session.Query("SELECT COUNT(*) FROM system.local").Scan(&rows); err != nil { 293 | t.Fatal(err) 294 | } else if rows != 1 { 295 | t.Fatalf("expected to get 1 row got %d", rows) 296 | } 297 | } 298 | -------------------------------------------------------------------------------- /wiki_test.go: -------------------------------------------------------------------------------- 1 | // +build all integration 2 | 3 | package gocql 4 | 5 | import ( 6 | "fmt" 7 | "reflect" 8 | "sort" 9 | "testing" 10 | "time" 11 | 12 | "gopkg.in/inf.v0" 13 | ) 14 | 15 | type WikiPage struct { 16 | Title string 17 | RevId UUID 18 | Body string 19 | Views int64 20 | Protected bool 21 | Modified time.Time 22 | Rating *inf.Dec 23 | Tags []string 24 | Attachments map[string]WikiAttachment 25 | } 26 | 27 | type WikiAttachment []byte 28 | 29 | var wikiTestData = []*WikiPage{ 30 | { 31 | Title: "Frontpage", 32 | RevId: TimeUUID(), 33 | Body: "Welcome to this wiki page!", 34 | Rating: inf.NewDec(131, 3), 35 | Modified: time.Date(2013, time.August, 13, 9, 52, 3, 0, time.UTC), 36 | Tags: []string{"start", "important", "test"}, 37 | Attachments: map[string]WikiAttachment{ 38 | "logo": WikiAttachment("\x00company logo\x00"), 39 | "favicon": WikiAttachment("favicon.ico"), 40 | }, 41 | }, 42 | { 43 | Title: "Foobar", 44 | RevId: TimeUUID(), 45 | Body: "foo::Foo f = new foo::Foo(foo::Foo::INIT);", 46 | Modified: time.Date(2013, time.August, 13, 9, 52, 3, 0, time.UTC), 47 | }, 48 | } 49 | 50 | type WikiTest struct { 51 | session *Session 52 | tb testing.TB 53 | 54 | table string 55 | } 56 | 57 | func CreateSchema(session *Session, tb testing.TB, table string) *WikiTest { 58 | table = "wiki_" + table 59 | if err := createTable(session, fmt.Sprintf("DROP TABLE IF EXISTS gocql_test.%s", table)); err != nil { 60 | tb.Fatal("CreateSchema:", err) 61 | } 62 | 63 | err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s ( 64 | title varchar, 65 | revid timeuuid, 66 | body varchar, 67 | views bigint, 68 | protected boolean, 69 | modified timestamp, 70 | rating decimal, 71 | tags set, 72 | attachments map, 73 | PRIMARY KEY (title, revid) 74 | )`, table)) 75 | 76 | if err != nil { 77 | tb.Fatal("CreateSchema:", err) 78 | } 79 | 80 | return &WikiTest{ 81 | session: session, 82 | tb: tb, 83 | table: table, 84 | } 85 | } 86 | 87 | func (w *WikiTest) CreatePages(n int) { 88 | var page WikiPage 89 | t0 := time.Now() 90 | for i := 0; i < n; i++ { 91 | page.Title = fmt.Sprintf("generated_%d", (i&16)+1) 92 | page.Modified = t0.Add(time.Duration(i-n) * time.Minute) 93 | page.RevId = UUIDFromTime(page.Modified) 94 | page.Body = fmt.Sprintf("text %d", i) 95 | if err := w.InsertPage(&page); err != nil { 96 | w.tb.Error("CreatePages:", err) 97 | } 98 | } 99 | } 100 | 101 | func (w *WikiTest) InsertPage(page *WikiPage) error { 102 | return w.session.Query(fmt.Sprintf(`INSERT INTO %s 103 | (title, revid, body, views, protected, modified, rating, tags, attachments) 104 | VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`, w.table), 105 | page.Title, page.RevId, page.Body, page.Views, page.Protected, 106 | page.Modified, page.Rating, page.Tags, page.Attachments).Exec() 107 | } 108 | 109 | func (w *WikiTest) SelectPage(page *WikiPage, title string, revid UUID) error { 110 | return w.session.Query(fmt.Sprintf(`SELECT title, revid, body, views, protected, 111 | modified,tags, attachments, rating 112 | FROM %s WHERE title = ? AND revid = ? LIMIT 1`, w.table), 113 | title, revid).Scan(&page.Title, &page.RevId, 114 | &page.Body, &page.Views, &page.Protected, &page.Modified, &page.Tags, 115 | &page.Attachments, &page.Rating) 116 | } 117 | 118 | func (w *WikiTest) GetPageCount() int { 119 | var count int 120 | if err := w.session.Query(fmt.Sprintf(`SELECT COUNT(*) FROM %s`, w.table)).Scan(&count); err != nil { 121 | w.tb.Error("GetPageCount", err) 122 | } 123 | return count 124 | } 125 | 126 | func TestWikiCreateSchema(t *testing.T) { 127 | session := createSession(t) 128 | defer session.Close() 129 | 130 | CreateSchema(session, t, "create") 131 | } 132 | 133 | func BenchmarkWikiCreateSchema(b *testing.B) { 134 | b.StopTimer() 135 | session := createSession(b) 136 | defer func() { 137 | b.StopTimer() 138 | session.Close() 139 | }() 140 | 141 | b.StartTimer() 142 | for i := 0; i < b.N; i++ { 143 | CreateSchema(session, b, "bench_create") 144 | } 145 | } 146 | 147 | func TestWikiCreatePages(t *testing.T) { 148 | session := createSession(t) 149 | defer session.Close() 150 | 151 | w := CreateSchema(session, t, "create_pages") 152 | 153 | numPages := 5 154 | w.CreatePages(numPages) 155 | if count := w.GetPageCount(); count != numPages { 156 | t.Errorf("expected %d pages, got %d pages.", numPages, count) 157 | } 158 | } 159 | 160 | func BenchmarkWikiCreatePages(b *testing.B) { 161 | b.StopTimer() 162 | session := createSession(b) 163 | defer func() { 164 | b.StopTimer() 165 | session.Close() 166 | }() 167 | 168 | w := CreateSchema(session, b, "bench_create_pages") 169 | 170 | b.StartTimer() 171 | 172 | w.CreatePages(b.N) 173 | } 174 | 175 | func BenchmarkWikiSelectAllPages(b *testing.B) { 176 | b.StopTimer() 177 | session := createSession(b) 178 | defer func() { 179 | b.StopTimer() 180 | session.Close() 181 | }() 182 | w := CreateSchema(session, b, "bench_select_all") 183 | 184 | w.CreatePages(100) 185 | b.StartTimer() 186 | 187 | var page WikiPage 188 | for i := 0; i < b.N; i++ { 189 | iter := session.Query(fmt.Sprintf(`SELECT title, revid, body, views, protected, 190 | modified, tags, attachments, rating 191 | FROM %s`, w.table)).Iter() 192 | for iter.Scan(&page.Title, &page.RevId, &page.Body, &page.Views, 193 | &page.Protected, &page.Modified, &page.Tags, &page.Attachments, 194 | &page.Rating) { 195 | // pass 196 | } 197 | if err := iter.Close(); err != nil { 198 | b.Error(err) 199 | } 200 | } 201 | } 202 | 203 | func BenchmarkWikiSelectSinglePage(b *testing.B) { 204 | b.StopTimer() 205 | session := createSession(b) 206 | defer func() { 207 | b.StopTimer() 208 | session.Close() 209 | }() 210 | w := CreateSchema(session, b, "bench_select_single") 211 | pages := make([]WikiPage, 100) 212 | w.CreatePages(len(pages)) 213 | iter := session.Query(fmt.Sprintf(`SELECT title, revid FROM %s`, w.table)).Iter() 214 | for i := 0; i < len(pages); i++ { 215 | if !iter.Scan(&pages[i].Title, &pages[i].RevId) { 216 | pages = pages[:i] 217 | break 218 | } 219 | } 220 | if err := iter.Close(); err != nil { 221 | b.Error(err) 222 | } 223 | b.StartTimer() 224 | 225 | var page WikiPage 226 | for i := 0; i < b.N; i++ { 227 | p := &pages[i%len(pages)] 228 | if err := w.SelectPage(&page, p.Title, p.RevId); err != nil { 229 | b.Error(err) 230 | } 231 | } 232 | } 233 | 234 | func BenchmarkWikiSelectPageCount(b *testing.B) { 235 | b.StopTimer() 236 | session := createSession(b) 237 | defer func() { 238 | b.StopTimer() 239 | session.Close() 240 | }() 241 | 242 | w := CreateSchema(session, b, "bench_page_count") 243 | const numPages = 10 244 | w.CreatePages(numPages) 245 | b.StartTimer() 246 | for i := 0; i < b.N; i++ { 247 | if count := w.GetPageCount(); count != numPages { 248 | b.Errorf("expected %d pages, got %d pages.", numPages, count) 249 | } 250 | } 251 | } 252 | 253 | func TestWikiTypicalCRUD(t *testing.T) { 254 | session := createSession(t) 255 | defer session.Close() 256 | 257 | w := CreateSchema(session, t, "crud") 258 | 259 | for _, page := range wikiTestData { 260 | if err := w.InsertPage(page); err != nil { 261 | t.Error("InsertPage:", err) 262 | } 263 | } 264 | if count := w.GetPageCount(); count != len(wikiTestData) { 265 | t.Errorf("count: expected %d, got %d\n", len(wikiTestData), count) 266 | } 267 | for _, original := range wikiTestData { 268 | page := new(WikiPage) 269 | if err := w.SelectPage(page, original.Title, original.RevId); err != nil { 270 | t.Error("SelectPage:", err) 271 | continue 272 | } 273 | sort.Sort(sort.StringSlice(page.Tags)) 274 | sort.Sort(sort.StringSlice(original.Tags)) 275 | if !reflect.DeepEqual(page, original) { 276 | t.Errorf("page: expected %#v, got %#v\n", original, page) 277 | } 278 | } 279 | } 280 | -------------------------------------------------------------------------------- /cluster.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2012 The gocql Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package gocql 6 | 7 | import ( 8 | "errors" 9 | "time" 10 | ) 11 | 12 | // PoolConfig configures the connection pool used by the driver, it defaults to 13 | // using a round-robin host selection policy and a round-robin connection selection 14 | // policy for each host. 15 | type PoolConfig struct { 16 | // HostSelectionPolicy sets the policy for selecting which host to use for a 17 | // given query (default: RoundRobinHostPolicy()) 18 | HostSelectionPolicy HostSelectionPolicy 19 | } 20 | 21 | func (p PoolConfig) buildPool(session *Session) *policyConnPool { 22 | return newPolicyConnPool(session) 23 | } 24 | 25 | // ClusterConfig is a struct to configure the default cluster implementation 26 | // of gocql. It has a variety of attributes that can be used to modify the 27 | // behavior to fit the most common use cases. Applications that require a 28 | // different setup must implement their own cluster. 29 | type ClusterConfig struct { 30 | // addresses for the initial connections. It is recomended to use the value set in 31 | // the Cassandra config for broadcast_address or listen_address, an IP address not 32 | // a domain name. This is because events from Cassandra will use the configured IP 33 | // address, which is used to index connected hosts. If the domain name specified 34 | // resolves to more than 1 IP address then the driver may connect multiple times to 35 | // the same host, and will not mark the node being down or up from events. 36 | Hosts []string 37 | CQLVersion string // CQL version (default: 3.0.0) 38 | 39 | // ProtoVersion sets the version of the native protocol to use, this will 40 | // enable features in the driver for specific protocol versions, generally this 41 | // should be set to a known version (2,3,4) for the cluster being connected to. 42 | // 43 | // If it is 0 or unset (the default) then the driver will attempt to discover the 44 | // highest supported protocol for the cluster. In clusters with nodes of different 45 | // versions the protocol selected is not defined (ie, it can be any of the supported in the cluster) 46 | ProtoVersion int 47 | Timeout time.Duration // connection timeout (default: 600ms) 48 | Port int // port (default: 9042) 49 | Keyspace string // initial keyspace (optional) 50 | NumConns int // number of connections per host (default: 2) 51 | Consistency Consistency // default consistency level (default: Quorum) 52 | Compressor Compressor // compression algorithm (default: nil) 53 | Authenticator Authenticator // authenticator (default: nil) 54 | RetryPolicy RetryPolicy // Default retry policy to use for queries (default: 0) 55 | SocketKeepalive time.Duration // The keepalive period to use, enabled if > 0 (default: 0) 56 | MaxPreparedStmts int // Sets the maximum cache size for prepared statements globally for gocql (default: 1000) 57 | MaxRoutingKeyInfo int // Sets the maximum cache size for query info about statements for each session (default: 1000) 58 | PageSize int // Default page size to use for created sessions (default: 5000) 59 | SerialConsistency SerialConsistency // Sets the consistency for the serial part of queries, values can be either SERIAL or LOCAL_SERIAL (default: unset) 60 | SslOpts *SslOptions 61 | DefaultTimestamp bool // Sends a client side timestamp for all requests which overrides the timestamp at which it arrives at the server. (default: true, only enabled for protocol 3 and above) 62 | // PoolConfig configures the underlying connection pool, allowing the 63 | // configuration of host selection and connection selection policies. 64 | PoolConfig PoolConfig 65 | 66 | // If not zero, gocql attempt to reconnect known DOWN nodes in every ReconnectSleep. 67 | ReconnectInterval time.Duration 68 | 69 | // The maximum amount of time to wait for schema agreement in a cluster after 70 | // receiving a schema change frame. (deault: 60s) 71 | MaxWaitSchemaAgreement time.Duration 72 | 73 | // HostFilter will filter all incoming events for host, any which don't pass 74 | // the filter will be ignored. If set will take precedence over any options set 75 | // via Discovery 76 | HostFilter HostFilter 77 | 78 | // If IgnorePeerAddr is true and the address in system.peers does not match 79 | // the supplied host by either initial hosts or discovered via events then the 80 | // host will be replaced with the supplied address. 81 | // 82 | // For example if an event comes in with host=10.0.0.1 but when looking up that 83 | // address in system.local or system.peers returns 127.0.0.1, the peer will be 84 | // set to 10.0.0.1 which is what will be used to connect to. 85 | IgnorePeerAddr bool 86 | 87 | // If DisableInitialHostLookup then the driver will not attempt to get host info 88 | // from the system.peers table, this will mean that the driver will connect to 89 | // hosts supplied and will not attempt to lookup the hosts information, this will 90 | // mean that data_centre, rack and token information will not be available and as 91 | // such host filtering and token aware query routing will not be available. 92 | DisableInitialHostLookup bool 93 | 94 | // Configure events the driver will register for 95 | Events struct { 96 | // disable registering for status events (node up/down) 97 | DisableNodeStatusEvents bool 98 | // disable registering for topology events (node added/removed/moved) 99 | DisableTopologyEvents bool 100 | // disable registering for schema events (keyspace/table/function removed/created/updated) 101 | DisableSchemaEvents bool 102 | } 103 | 104 | // DisableSkipMetadata will override the internal result metadata cache so that the driver does not 105 | // send skip_metadata for queries, this means that the result will always contain 106 | // the metadata to parse the rows and will not reuse the metadata from the prepared 107 | // statement. 108 | // 109 | // See https://issues.apache.org/jira/browse/CASSANDRA-10786 110 | DisableSkipMetadata bool 111 | 112 | // internal config for testing 113 | disableControlConn bool 114 | } 115 | 116 | // NewCluster generates a new config for the default cluster implementation. 117 | // 118 | // The supplied hosts are used to initially connect to the cluster then the rest of 119 | // the ring will be automatically discovered. It is recomended to use the value set in 120 | // the Cassandra config for broadcast_address or listen_address, an IP address not 121 | // a domain name. This is because events from Cassandra will use the configured IP 122 | // address, which is used to index connected hosts. If the domain name specified 123 | // resolves to more than 1 IP address then the driver may connect multiple times to 124 | // the same host, and will not mark the node being down or up from events. 125 | func NewCluster(hosts ...string) *ClusterConfig { 126 | cfg := &ClusterConfig{ 127 | Hosts: hosts, 128 | CQLVersion: "3.0.0", 129 | Timeout: 600 * time.Millisecond, 130 | Port: 9042, 131 | NumConns: 2, 132 | Consistency: Quorum, 133 | MaxPreparedStmts: defaultMaxPreparedStmts, 134 | MaxRoutingKeyInfo: 1000, 135 | PageSize: 5000, 136 | DefaultTimestamp: true, 137 | MaxWaitSchemaAgreement: 60 * time.Second, 138 | ReconnectInterval: 60 * time.Second, 139 | } 140 | return cfg 141 | } 142 | 143 | // CreateSession initializes the cluster based on this config and returns a 144 | // session object that can be used to interact with the database. 145 | func (cfg *ClusterConfig) CreateSession() (*Session, error) { 146 | return NewSession(*cfg) 147 | } 148 | 149 | var ( 150 | ErrNoHosts = errors.New("no hosts provided") 151 | ErrNoConnectionsStarted = errors.New("no connections were made when creating the session") 152 | ErrHostQueryFailed = errors.New("unable to populate Hosts") 153 | ) 154 | -------------------------------------------------------------------------------- /policies_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015 The gocql Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package gocql 6 | 7 | import ( 8 | "fmt" 9 | "net" 10 | "testing" 11 | 12 | "github.com/hailocab/go-hostpool" 13 | ) 14 | 15 | // Tests of the round-robin host selection policy implementation 16 | func TestRoundRobinHostPolicy(t *testing.T) { 17 | policy := RoundRobinHostPolicy() 18 | 19 | hosts := [...]*HostInfo{ 20 | {hostId: "0", peer: net.IPv4(0, 0, 0, 1)}, 21 | {hostId: "1", peer: net.IPv4(0, 0, 0, 2)}, 22 | } 23 | 24 | for _, host := range hosts { 25 | policy.AddHost(host) 26 | } 27 | 28 | // interleaved iteration should always increment the host 29 | iterA := policy.Pick(nil) 30 | if actual := iterA(); actual.Info() != hosts[0] { 31 | t.Errorf("Expected hosts[0] but was hosts[%s]", actual.Info().HostID()) 32 | } 33 | iterB := policy.Pick(nil) 34 | if actual := iterB(); actual.Info() != hosts[1] { 35 | t.Errorf("Expected hosts[1] but was hosts[%s]", actual.Info().HostID()) 36 | } 37 | if actual := iterB(); actual.Info() != hosts[0] { 38 | t.Errorf("Expected hosts[0] but was hosts[%s]", actual.Info().HostID()) 39 | } 40 | if actual := iterA(); actual.Info() != hosts[1] { 41 | t.Errorf("Expected hosts[1] but was hosts[%s]", actual.Info().HostID()) 42 | } 43 | 44 | iterC := policy.Pick(nil) 45 | if actual := iterC(); actual.Info() != hosts[0] { 46 | t.Errorf("Expected hosts[0] but was hosts[%s]", actual.Info().HostID()) 47 | } 48 | if actual := iterC(); actual.Info() != hosts[1] { 49 | t.Errorf("Expected hosts[1] but was hosts[%s]", actual.Info().HostID()) 50 | } 51 | } 52 | 53 | // Tests of the token-aware host selection policy implementation with a 54 | // round-robin host selection policy fallback. 55 | func TestTokenAwareHostPolicy(t *testing.T) { 56 | policy := TokenAwareHostPolicy(RoundRobinHostPolicy()) 57 | 58 | query := &Query{} 59 | 60 | iter := policy.Pick(nil) 61 | if iter == nil { 62 | t.Fatal("host iterator was nil") 63 | } 64 | actual := iter() 65 | if actual != nil { 66 | t.Fatalf("expected nil from iterator, but was %v", actual) 67 | } 68 | 69 | // set the hosts 70 | hosts := [...]*HostInfo{ 71 | {peer: net.IPv4(10, 0, 0, 1), tokens: []string{"00"}}, 72 | {peer: net.IPv4(10, 0, 0, 2), tokens: []string{"25"}}, 73 | {peer: net.IPv4(10, 0, 0, 3), tokens: []string{"50"}}, 74 | {peer: net.IPv4(10, 0, 0, 4), tokens: []string{"75"}}, 75 | } 76 | for _, host := range hosts { 77 | policy.AddHost(host) 78 | } 79 | 80 | // the token ring is not setup without the partitioner, but the fallback 81 | // should work 82 | if actual := policy.Pick(nil)(); !actual.Info().Peer().Equal(hosts[0].peer) { 83 | t.Errorf("Expected peer 0 but was %s", actual.Info().Peer()) 84 | } 85 | 86 | query.RoutingKey([]byte("30")) 87 | if actual := policy.Pick(query)(); !actual.Info().Peer().Equal(hosts[1].peer) { 88 | t.Errorf("Expected peer 1 but was %s", actual.Info().Peer()) 89 | } 90 | 91 | policy.SetPartitioner("OrderedPartitioner") 92 | 93 | // now the token ring is configured 94 | query.RoutingKey([]byte("20")) 95 | iter = policy.Pick(query) 96 | if actual := iter(); !actual.Info().Peer().Equal(hosts[1].peer) { 97 | t.Errorf("Expected peer 1 but was %s", actual.Info().Peer()) 98 | } 99 | // rest are round robin 100 | if actual := iter(); !actual.Info().Peer().Equal(hosts[2].peer) { 101 | t.Errorf("Expected peer 2 but was %s", actual.Info().Peer()) 102 | } 103 | if actual := iter(); !actual.Info().Peer().Equal(hosts[3].peer) { 104 | t.Errorf("Expected peer 3 but was %s", actual.Info().Peer()) 105 | } 106 | if actual := iter(); !actual.Info().Peer().Equal(hosts[0].peer) { 107 | t.Errorf("Expected peer 0 but was %s", actual.Info().Peer()) 108 | } 109 | } 110 | 111 | // Tests of the host pool host selection policy implementation 112 | func TestHostPoolHostPolicy(t *testing.T) { 113 | policy := HostPoolHostPolicy(hostpool.New(nil)) 114 | 115 | hosts := []*HostInfo{ 116 | {hostId: "0", peer: net.IPv4(10, 0, 0, 0)}, 117 | {hostId: "1", peer: net.IPv4(10, 0, 0, 1)}, 118 | } 119 | 120 | // Using set host to control the ordering of the hosts as calling "AddHost" iterates the map 121 | // which will result in an unpredictable ordering 122 | policy.(*hostPoolHostPolicy).SetHosts(hosts) 123 | 124 | // the first host selected is actually at [1], but this is ok for RR 125 | // interleaved iteration should always increment the host 126 | iter := policy.Pick(nil) 127 | actualA := iter() 128 | if actualA.Info().HostID() != "0" { 129 | t.Errorf("Expected hosts[0] but was hosts[%s]", actualA.Info().HostID()) 130 | } 131 | actualA.Mark(nil) 132 | 133 | actualB := iter() 134 | if actualB.Info().HostID() != "1" { 135 | t.Errorf("Expected hosts[1] but was hosts[%s]", actualB.Info().HostID()) 136 | } 137 | actualB.Mark(fmt.Errorf("error")) 138 | 139 | actualC := iter() 140 | if actualC.Info().HostID() != "0" { 141 | t.Errorf("Expected hosts[0] but was hosts[%s]", actualC.Info().HostID()) 142 | } 143 | actualC.Mark(nil) 144 | 145 | actualD := iter() 146 | if actualD.Info().HostID() != "0" { 147 | t.Errorf("Expected hosts[0] but was hosts[%s]", actualD.Info().HostID()) 148 | } 149 | actualD.Mark(nil) 150 | } 151 | 152 | func TestRoundRobinNilHostInfo(t *testing.T) { 153 | policy := RoundRobinHostPolicy() 154 | 155 | host := &HostInfo{hostId: "host-1"} 156 | policy.AddHost(host) 157 | 158 | iter := policy.Pick(nil) 159 | next := iter() 160 | if next == nil { 161 | t.Fatal("got nil host") 162 | } else if v := next.Info(); v == nil { 163 | t.Fatal("got nil HostInfo") 164 | } else if v.HostID() != host.HostID() { 165 | t.Fatalf("expected host %v got %v", host, v) 166 | } 167 | 168 | next = iter() 169 | if next != nil { 170 | t.Errorf("expected to get nil host got %+v", next) 171 | if next.Info() == nil { 172 | t.Fatalf("HostInfo is nil") 173 | } 174 | } 175 | } 176 | 177 | func TestTokenAwareNilHostInfo(t *testing.T) { 178 | policy := TokenAwareHostPolicy(RoundRobinHostPolicy()) 179 | 180 | hosts := [...]*HostInfo{ 181 | {peer: net.IPv4(10, 0, 0, 0), tokens: []string{"00"}}, 182 | {peer: net.IPv4(10, 0, 0, 1), tokens: []string{"25"}}, 183 | {peer: net.IPv4(10, 0, 0, 2), tokens: []string{"50"}}, 184 | {peer: net.IPv4(10, 0, 0, 3), tokens: []string{"75"}}, 185 | } 186 | for _, host := range hosts { 187 | policy.AddHost(host) 188 | } 189 | policy.SetPartitioner("OrderedPartitioner") 190 | 191 | query := &Query{} 192 | query.RoutingKey([]byte("20")) 193 | 194 | iter := policy.Pick(query) 195 | next := iter() 196 | if next == nil { 197 | t.Fatal("got nil host") 198 | } else if v := next.Info(); v == nil { 199 | t.Fatal("got nil HostInfo") 200 | } else if !v.Peer().Equal(hosts[1].peer) { 201 | t.Fatalf("expected peer 1 got %v", v.Peer()) 202 | } 203 | 204 | // Empty the hosts to trigger the panic when using the fallback. 205 | for _, host := range hosts { 206 | policy.RemoveHost(host) 207 | } 208 | 209 | next = iter() 210 | if next != nil { 211 | t.Errorf("expected to get nil host got %+v", next) 212 | if next.Info() == nil { 213 | t.Fatalf("HostInfo is nil") 214 | } 215 | } 216 | } 217 | 218 | func TestCOWList_Add(t *testing.T) { 219 | var cow cowHostList 220 | 221 | toAdd := [...]net.IP{net.IPv4(0, 0, 0, 0), net.IPv4(1, 0, 0, 0), net.IPv4(2, 0, 0, 0)} 222 | 223 | for _, addr := range toAdd { 224 | if !cow.add(&HostInfo{peer: addr}) { 225 | t.Fatal("did not add peer which was not in the set") 226 | } 227 | } 228 | 229 | hosts := cow.get() 230 | if len(hosts) != len(toAdd) { 231 | t.Fatalf("expected to have %d hosts got %d", len(toAdd), len(hosts)) 232 | } 233 | 234 | set := make(map[string]bool) 235 | for _, host := range hosts { 236 | set[string(host.Peer())] = true 237 | } 238 | 239 | for _, addr := range toAdd { 240 | if !set[string(addr)] { 241 | t.Errorf("addr was not in the host list: %q", addr) 242 | } 243 | } 244 | } 245 | 246 | // TestSimpleRetryPolicy makes sure that we only allow 1 + numRetries attempts 247 | func TestSimpleRetryPolicy(t *testing.T) { 248 | q := &Query{} 249 | 250 | // this should allow a total of 3 tries. 251 | rt := &SimpleRetryPolicy{NumRetries: 2} 252 | 253 | cases := []struct { 254 | attempts int 255 | allow bool 256 | }{ 257 | {0, true}, 258 | {1, true}, 259 | {2, true}, 260 | {3, false}, 261 | {4, false}, 262 | {5, false}, 263 | } 264 | 265 | for _, c := range cases { 266 | q.attempts = c.attempts 267 | if c.allow && !rt.Attempt(q) { 268 | t.Fatalf("should allow retry after %d attempts", c.attempts) 269 | } 270 | if !c.allow && rt.Attempt(q) { 271 | t.Fatalf("should not allow retry after %d attempts", c.attempts) 272 | } 273 | } 274 | } 275 | -------------------------------------------------------------------------------- /host_source.go: -------------------------------------------------------------------------------- 1 | package gocql 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "net" 7 | "strconv" 8 | "strings" 9 | "sync" 10 | "time" 11 | ) 12 | 13 | type nodeState int32 14 | 15 | func (n nodeState) String() string { 16 | if n == NodeUp { 17 | return "UP" 18 | } else if n == NodeDown { 19 | return "DOWN" 20 | } 21 | return fmt.Sprintf("UNKNOWN_%d", n) 22 | } 23 | 24 | const ( 25 | NodeUp nodeState = iota 26 | NodeDown 27 | ) 28 | 29 | type cassVersion struct { 30 | Major, Minor, Patch int 31 | } 32 | 33 | func (c *cassVersion) Set(v string) error { 34 | if v == "" { 35 | return nil 36 | } 37 | 38 | return c.UnmarshalCQL(nil, []byte(v)) 39 | } 40 | 41 | func (c *cassVersion) UnmarshalCQL(info TypeInfo, data []byte) error { 42 | return c.unmarshal(data) 43 | } 44 | 45 | func (c *cassVersion) unmarshal(data []byte) error { 46 | version := strings.TrimSuffix(string(data), "-SNAPSHOT") 47 | version = strings.TrimPrefix(version, "v") 48 | v := strings.Split(version, ".") 49 | 50 | if len(v) < 2 { 51 | return fmt.Errorf("invalid version string: %s", data) 52 | } 53 | 54 | var err error 55 | c.Major, err = strconv.Atoi(v[0]) 56 | if err != nil { 57 | return fmt.Errorf("invalid major version %v: %v", v[0], err) 58 | } 59 | 60 | c.Minor, err = strconv.Atoi(v[1]) 61 | if err != nil { 62 | return fmt.Errorf("invalid minor version %v: %v", v[1], err) 63 | } 64 | 65 | if len(v) > 2 { 66 | c.Patch, err = strconv.Atoi(v[2]) 67 | if err != nil { 68 | return fmt.Errorf("invalid patch version %v: %v", v[2], err) 69 | } 70 | } 71 | 72 | return nil 73 | } 74 | 75 | func (c cassVersion) Before(major, minor, patch int) bool { 76 | if c.Major > major { 77 | return true 78 | } else if c.Minor > minor { 79 | return true 80 | } else if c.Patch > patch { 81 | return true 82 | } 83 | return false 84 | } 85 | 86 | func (c cassVersion) String() string { 87 | return fmt.Sprintf("v%d.%d.%d", c.Major, c.Minor, c.Patch) 88 | } 89 | 90 | func (c cassVersion) nodeUpDelay() time.Duration { 91 | if c.Major >= 2 && c.Minor >= 2 { 92 | // CASSANDRA-8236 93 | return 0 94 | } 95 | 96 | return 10 * time.Second 97 | } 98 | 99 | type HostInfo struct { 100 | // TODO(zariel): reduce locking maybe, not all values will change, but to ensure 101 | // that we are thread safe use a mutex to access all fields. 102 | mu sync.RWMutex 103 | peer net.IP 104 | port int 105 | dataCenter string 106 | rack string 107 | hostId string 108 | version cassVersion 109 | state nodeState 110 | tokens []string 111 | } 112 | 113 | func (h *HostInfo) Equal(host *HostInfo) bool { 114 | h.mu.RLock() 115 | defer h.mu.RUnlock() 116 | host.mu.RLock() 117 | defer host.mu.RUnlock() 118 | 119 | return h.peer.Equal(host.peer) 120 | } 121 | 122 | func (h *HostInfo) Peer() net.IP { 123 | h.mu.RLock() 124 | defer h.mu.RUnlock() 125 | return h.peer 126 | } 127 | 128 | func (h *HostInfo) setPeer(peer net.IP) *HostInfo { 129 | h.mu.Lock() 130 | defer h.mu.Unlock() 131 | h.peer = peer 132 | return h 133 | } 134 | 135 | func (h *HostInfo) DataCenter() string { 136 | h.mu.RLock() 137 | defer h.mu.RUnlock() 138 | return h.dataCenter 139 | } 140 | 141 | func (h *HostInfo) setDataCenter(dataCenter string) *HostInfo { 142 | h.mu.Lock() 143 | defer h.mu.Unlock() 144 | h.dataCenter = dataCenter 145 | return h 146 | } 147 | 148 | func (h *HostInfo) Rack() string { 149 | h.mu.RLock() 150 | defer h.mu.RUnlock() 151 | return h.rack 152 | } 153 | 154 | func (h *HostInfo) setRack(rack string) *HostInfo { 155 | h.mu.Lock() 156 | defer h.mu.Unlock() 157 | h.rack = rack 158 | return h 159 | } 160 | 161 | func (h *HostInfo) HostID() string { 162 | h.mu.RLock() 163 | defer h.mu.RUnlock() 164 | return h.hostId 165 | } 166 | 167 | func (h *HostInfo) setHostID(hostID string) *HostInfo { 168 | h.mu.Lock() 169 | defer h.mu.Unlock() 170 | h.hostId = hostID 171 | return h 172 | } 173 | 174 | func (h *HostInfo) Version() cassVersion { 175 | h.mu.RLock() 176 | defer h.mu.RUnlock() 177 | return h.version 178 | } 179 | 180 | func (h *HostInfo) setVersion(major, minor, patch int) *HostInfo { 181 | h.mu.Lock() 182 | defer h.mu.Unlock() 183 | h.version = cassVersion{major, minor, patch} 184 | return h 185 | } 186 | 187 | func (h *HostInfo) State() nodeState { 188 | h.mu.RLock() 189 | defer h.mu.RUnlock() 190 | return h.state 191 | } 192 | 193 | func (h *HostInfo) setState(state nodeState) *HostInfo { 194 | h.mu.Lock() 195 | defer h.mu.Unlock() 196 | h.state = state 197 | return h 198 | } 199 | 200 | func (h *HostInfo) Tokens() []string { 201 | h.mu.RLock() 202 | defer h.mu.RUnlock() 203 | return h.tokens 204 | } 205 | 206 | func (h *HostInfo) setTokens(tokens []string) *HostInfo { 207 | h.mu.Lock() 208 | defer h.mu.Unlock() 209 | h.tokens = tokens 210 | return h 211 | } 212 | 213 | func (h *HostInfo) Port() int { 214 | h.mu.RLock() 215 | defer h.mu.RUnlock() 216 | return h.port 217 | } 218 | 219 | func (h *HostInfo) setPort(port int) *HostInfo { 220 | h.mu.Lock() 221 | defer h.mu.Unlock() 222 | h.port = port 223 | return h 224 | } 225 | 226 | func (h *HostInfo) update(from *HostInfo) { 227 | h.mu.Lock() 228 | defer h.mu.Unlock() 229 | 230 | h.tokens = from.tokens 231 | h.version = from.version 232 | h.hostId = from.hostId 233 | h.dataCenter = from.dataCenter 234 | } 235 | 236 | func (h *HostInfo) IsUp() bool { 237 | return h != nil && h.State() == NodeUp 238 | } 239 | 240 | func (h *HostInfo) String() string { 241 | h.mu.RLock() 242 | defer h.mu.RUnlock() 243 | return fmt.Sprintf("[hostinfo peer=%q port=%d data_centre=%q rack=%q host_id=%q version=%q state=%s num_tokens=%d]", h.peer, h.port, h.dataCenter, h.rack, h.hostId, h.version, h.state, len(h.tokens)) 244 | } 245 | 246 | // Polls system.peers at a specific interval to find new hosts 247 | type ringDescriber struct { 248 | dcFilter string 249 | rackFilter string 250 | session *Session 251 | closeChan chan bool 252 | // indicates that we can use system.local to get the connections remote address 253 | localHasRpcAddr bool 254 | 255 | mu sync.Mutex 256 | prevHosts []*HostInfo 257 | prevPartitioner string 258 | } 259 | 260 | func checkSystemLocal(control *controlConn) (bool, error) { 261 | iter := control.query("SELECT broadcast_address FROM system.local") 262 | if err := iter.err; err != nil { 263 | if errf, ok := err.(*errorFrame); ok { 264 | if errf.code == errSyntax { 265 | return false, nil 266 | } 267 | } 268 | 269 | return false, err 270 | } 271 | 272 | return true, nil 273 | } 274 | 275 | // Returns true if we are using system_schema.keyspaces instead of system.schema_keyspaces 276 | func checkSystemSchema(control *controlConn) (bool, error) { 277 | iter := control.query("SELECT * FROM system_schema.keyspaces") 278 | if err := iter.err; err != nil { 279 | if errf, ok := err.(*errorFrame); ok { 280 | if errf.code == errReadFailure { 281 | return false, nil 282 | } 283 | } 284 | 285 | return false, err 286 | } 287 | 288 | return true, nil 289 | } 290 | 291 | func (r *ringDescriber) GetHosts() (hosts []*HostInfo, partitioner string, err error) { 292 | r.mu.Lock() 293 | defer r.mu.Unlock() 294 | // we need conn to be the same because we need to query system.peers and system.local 295 | // on the same node to get the whole cluster 296 | 297 | const ( 298 | legacyLocalQuery = "SELECT data_center, rack, host_id, tokens, partitioner, release_version FROM system.local" 299 | // only supported in 2.2.0, 2.1.6, 2.0.16 300 | localQuery = "SELECT broadcast_address, data_center, rack, host_id, tokens, partitioner, release_version FROM system.local" 301 | ) 302 | 303 | localHost := &HostInfo{} 304 | if r.localHasRpcAddr { 305 | iter := r.session.control.query(localQuery) 306 | if iter == nil { 307 | return r.prevHosts, r.prevPartitioner, nil 308 | } 309 | 310 | iter.Scan(&localHost.peer, &localHost.dataCenter, &localHost.rack, 311 | &localHost.hostId, &localHost.tokens, &partitioner, &localHost.version) 312 | 313 | if err = iter.Close(); err != nil { 314 | return nil, "", err 315 | } 316 | } else { 317 | iter := r.session.control.withConn(func(c *Conn) *Iter { 318 | localHost = c.host 319 | return c.query(legacyLocalQuery) 320 | }) 321 | 322 | if iter == nil { 323 | return r.prevHosts, r.prevPartitioner, nil 324 | } 325 | 326 | iter.Scan(&localHost.dataCenter, &localHost.rack, &localHost.hostId, &localHost.tokens, &partitioner, &localHost.version) 327 | 328 | if err = iter.Close(); err != nil { 329 | return nil, "", err 330 | } 331 | } 332 | 333 | localHost.port = r.session.cfg.Port 334 | 335 | hosts = []*HostInfo{localHost} 336 | 337 | rows := r.session.control.query("SELECT rpc_address, data_center, rack, host_id, tokens, release_version FROM system.peers").Scanner() 338 | if rows == nil { 339 | return r.prevHosts, r.prevPartitioner, nil 340 | } 341 | 342 | for rows.Next() { 343 | host := &HostInfo{port: r.session.cfg.Port} 344 | err := rows.Scan(&host.peer, &host.dataCenter, &host.rack, &host.hostId, &host.tokens, &host.version) 345 | if err != nil { 346 | log.Println(err) 347 | continue 348 | } 349 | 350 | if r.matchFilter(host) { 351 | hosts = append(hosts, host) 352 | } 353 | } 354 | 355 | if err = rows.Err(); err != nil { 356 | return nil, "", err 357 | } 358 | 359 | r.prevHosts = hosts 360 | r.prevPartitioner = partitioner 361 | 362 | return hosts, partitioner, nil 363 | } 364 | 365 | func (r *ringDescriber) matchFilter(host *HostInfo) bool { 366 | if r.dcFilter != "" && r.dcFilter != host.DataCenter() { 367 | return false 368 | } 369 | 370 | if r.rackFilter != "" && r.rackFilter != host.Rack() { 371 | return false 372 | } 373 | 374 | return true 375 | } 376 | 377 | func (r *ringDescriber) refreshRing() error { 378 | // if we have 0 hosts this will return the previous list of hosts to 379 | // attempt to reconnect to the cluster otherwise we would never find 380 | // downed hosts again, could possibly have an optimisation to only 381 | // try to add new hosts if GetHosts didnt error and the hosts didnt change. 382 | hosts, partitioner, err := r.GetHosts() 383 | if err != nil { 384 | return err 385 | } 386 | 387 | // TODO: move this to session 388 | // TODO: handle removing hosts here 389 | for _, h := range hosts { 390 | if r.session.cfg.HostFilter == nil || r.session.cfg.HostFilter.Accept(h) { 391 | if host, ok := r.session.ring.addHostIfMissing(h); !ok { 392 | r.session.pool.addHost(h) 393 | } else { 394 | host.update(h) 395 | } 396 | } 397 | } 398 | 399 | r.session.metadata.setPartitioner(partitioner) 400 | r.session.policy.SetPartitioner(partitioner) 401 | return nil 402 | } 403 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | gocql 2 | ===== 3 | 4 | [![Join the chat at https://gitter.im/gocql/gocql](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/gocql/gocql?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) 5 | [![Build Status](https://travis-ci.org/gocql/gocql.svg?branch=master)](https://travis-ci.org/gocql/gocql) 6 | [![GoDoc](https://godoc.org/github.com/gocql/gocql?status.svg)](https://godoc.org/github.com/gocql/gocql) 7 | 8 | Package gocql implements a fast and robust Cassandra client for the 9 | Go programming language. 10 | 11 | Project Website: https://gocql.github.io/
12 | API documentation: https://godoc.org/github.com/gocql/gocql
13 | Discussions: https://groups.google.com/forum/#!forum/gocql 14 | 15 | Supported Versions 16 | ------------------ 17 | 18 | The following matrix shows the versions of Go and Cassandra that are tested with the integration test suite as part of the CI build: 19 | 20 | Go/Cassandra | 2.1.x | 2.2.x | 3.0.x 21 | -------------| -------| ------| --------- 22 | 1.6 | yes | yes | yes 23 | 1.7 | yes | yes | yes 24 | 25 | Gocql has been tested in production against many different versions of Cassandra. Due to limits in our CI setup we only test against the latest 3 major releases, which coincide with the official support from the Apache project. 26 | 27 | Sunsetting Model 28 | ---------------- 29 | 30 | In general, the gocql team will focus on supporting the current and previous versions of Go. gocql may still work with older versions of Go, but official support for these versions will have been sunset. 31 | 32 | Installation 33 | ------------ 34 | 35 | go get github.com/gocql/gocql 36 | 37 | 38 | Features 39 | -------- 40 | 41 | * Modern Cassandra client using the native transport 42 | * Automatic type conversions between Cassandra and Go 43 | * Support for all common types including sets, lists and maps 44 | * Custom types can implement a `Marshaler` and `Unmarshaler` interface 45 | * Strict type conversions without any loss of precision 46 | * Built-In support for UUIDs (version 1 and 4) 47 | * Support for logged, unlogged and counter batches 48 | * Cluster management 49 | * Automatic reconnect on connection failures with exponential falloff 50 | * Round robin distribution of queries to different hosts 51 | * Round robin distribution of queries to different connections on a host 52 | * Each connection can execute up to n concurrent queries (whereby n is the limit set by the protocol version the client chooses to use) 53 | * Optional automatic discovery of nodes 54 | * Policy based connection pool with token aware and round-robin policy implementations 55 | * Support for password authentication 56 | * Iteration over paged results with configurable page size 57 | * Support for TLS/SSL 58 | * Optional frame compression (using snappy) 59 | * Automatic query preparation 60 | * Support for query tracing 61 | * Support for Cassandra 2.1+ [binary protocol version 3](https://github.com/apache/cassandra/blob/trunk/doc/native_protocol_v3.spec) 62 | * Support for up to 32768 streams 63 | * Support for tuple types 64 | * Support for client side timestamps by default 65 | * Support for UDTs via a custom marshaller or struct tags 66 | * Support for Cassandra 3.0+ [binary protocol version 4](https://github.com/apache/cassandra/blob/trunk/doc/native_protocol_v4.spec) 67 | * An API to access the schema metadata of a given keyspace 68 | 69 | Performance 70 | ----------- 71 | While the driver strives to be highly performant, there are cases where it is difficult to test and verify. The driver is built 72 | with maintainability and code readability in mind first and then performance and features, as such every now and then performance 73 | may degrade, if this occurs please report and issue and it will be looked at and remedied. The only time the driver copies data from 74 | its read buffer is when it Unmarshal's data into supplied types. 75 | 76 | Some tips for getting more performance from the driver: 77 | * Use the TokenAware policy 78 | * Use many goroutines when doing inserts, the driver is asynchronous but provides a synchronous API, it can execute many queries concurrently 79 | * Tune query page size 80 | * Reading data from the network to unmarshal will incur a large amount of allocations, this can adversely affect the garbage collector, tune `GOGC` 81 | * Close iterators after use to recycle byte buffers 82 | 83 | Important Default Keyspace Changes 84 | ---------------------------------- 85 | gocql no longer supports executing "use " statements to simplify the library. The user still has the 86 | ability to define the default keyspace for connections but now the keyspace can only be defined before a 87 | session is created. Queries can still access keyspaces by indicating the keyspace in the query: 88 | ```sql 89 | SELECT * FROM example2.table; 90 | ``` 91 | 92 | Example of correct usage: 93 | ```go 94 | cluster := gocql.NewCluster("192.168.1.1", "192.168.1.2", "192.168.1.3") 95 | cluster.Keyspace = "example" 96 | ... 97 | session, err := cluster.CreateSession() 98 | 99 | ``` 100 | Example of incorrect usage: 101 | ```go 102 | cluster := gocql.NewCluster("192.168.1.1", "192.168.1.2", "192.168.1.3") 103 | cluster.Keyspace = "example" 104 | ... 105 | session, err := cluster.CreateSession() 106 | 107 | if err = session.Query("use example2").Exec(); err != nil { 108 | log.Fatal(err) 109 | } 110 | ``` 111 | This will result in an err being returned from the session.Query line as the user is trying to execute a "use" 112 | statement. 113 | 114 | Example 115 | ------- 116 | 117 | ```go 118 | /* Before you execute the program, Launch `cqlsh` and execute: 119 | create keyspace example with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }; 120 | create table example.tweet(timeline text, id UUID, text text, PRIMARY KEY(id)); 121 | create index on example.tweet(timeline); 122 | */ 123 | package main 124 | 125 | import ( 126 | "fmt" 127 | "log" 128 | 129 | "github.com/gocql/gocql" 130 | ) 131 | 132 | func main() { 133 | // connect to the cluster 134 | cluster := gocql.NewCluster("192.168.1.1", "192.168.1.2", "192.168.1.3") 135 | cluster.Keyspace = "example" 136 | cluster.Consistency = gocql.Quorum 137 | session, _ := cluster.CreateSession() 138 | defer session.Close() 139 | 140 | // insert a tweet 141 | if err := session.Query(`INSERT INTO tweet (timeline, id, text) VALUES (?, ?, ?)`, 142 | "me", gocql.TimeUUID(), "hello world").Exec(); err != nil { 143 | log.Fatal(err) 144 | } 145 | 146 | var id gocql.UUID 147 | var text string 148 | 149 | /* Search for a specific set of records whose 'timeline' column matches 150 | * the value 'me'. The secondary index that we created earlier will be 151 | * used for optimizing the search */ 152 | if err := session.Query(`SELECT id, text FROM tweet WHERE timeline = ? LIMIT 1`, 153 | "me").Consistency(gocql.One).Scan(&id, &text); err != nil { 154 | log.Fatal(err) 155 | } 156 | fmt.Println("Tweet:", id, text) 157 | 158 | // list all tweets 159 | iter := session.Query(`SELECT id, text FROM tweet WHERE timeline = ?`, "me").Iter() 160 | for iter.Scan(&id, &text) { 161 | fmt.Println("Tweet:", id, text) 162 | } 163 | if err := iter.Close(); err != nil { 164 | log.Fatal(err) 165 | } 166 | } 167 | ``` 168 | 169 | Data Binding 170 | ------------ 171 | 172 | There are various ways to bind application level data structures to CQL statements: 173 | 174 | * You can write the data binding by hand, as outlined in the Tweet example. This provides you with the greatest flexibility, but it does mean that you need to keep your application code in sync with your Cassandra schema. 175 | * You can dynamically marshal an entire query result into an `[]map[string]interface{}` using the `SliceMap()` API. This returns a slice of row maps keyed by CQL column mames. This method requires no special interaction with the gocql API, but it does require your application to be able to deal with a key value view of your data. 176 | * As a refinement on the `SliceMap()` API you can also call `MapScan()` which returns `map[string]interface{}` instances in a row by row fashion. 177 | * The `Bind()` API provides a client app with a low level mechanism to introspect query meta data and extract appropriate field values from application level data structures. 178 | * Building on top of the gocql driver, [cqlr](https://github.com/relops/cqlr) adds the ability to auto-bind a CQL iterator to a struct or to bind a struct to an INSERT statement. 179 | * Another external project that layers on top of gocql is [cqlc](http://relops.com/cqlc) which generates gocql compliant code from your Cassandra schema so that you can write type safe CQL statements in Go with a natural query syntax. 180 | * [gocassa](https://github.com/hailocab/gocassa) is an external project that layers on top of gocql to provide convenient query building and data binding. 181 | * [gocqltable](https://github.com/kristoiv/gocqltable) provides an ORM-style convenience layer to make CRUD operations with gocql easier. 182 | 183 | Ecosystem 184 | --------- 185 | 186 | The following community maintained tools are known to integrate with gocql: 187 | 188 | * [migrate](https://github.com/mattes/migrate) is a migration handling tool written in Go with Cassandra support. 189 | * [negronicql](https://github.com/mikebthun/negronicql) is gocql middleware for Negroni. 190 | * [cqlr](https://github.com/relops/cqlr) adds the ability to auto-bind a CQL iterator to a struct or to bind a struct to an INSERT statement. 191 | * [cqlc](http://relops.com/cqlc) generates gocql compliant code from your Cassandra schema so that you can write type safe CQL statements in Go with a natural query syntax. 192 | * [gocassa](https://github.com/hailocab/gocassa) provides query building, adds data binding, and provides easy-to-use "recipe" tables for common query use-cases. 193 | * [gocqltable](https://github.com/kristoiv/gocqltable) is a wrapper around gocql that aims to simplify common operations. 194 | * [gockle](https://github.com/willfaught/gockle) provides simple, mockable interfaces that wrap gocql types 195 | * [scylladb](https://github.com/scylladb/scylla) is a fast Apache Cassandra-compatible NoSQL database 196 | 197 | Other Projects 198 | -------------- 199 | 200 | * [gocqldriver](https://github.com/tux21b/gocqldriver) is the predecessor of gocql based on Go's `database/sql` package. This project isn't maintained anymore, because Cassandra wasn't a good fit for the traditional `database/sql` API. Use this package instead. 201 | 202 | SEO 203 | --- 204 | 205 | For some reason, when you Google `golang cassandra`, this project doesn't feature very highly in the result list. But if you Google `go cassandra`, then we're a bit higher up the list. So this is note to try to convince Google that golang is an alias for Go. 206 | 207 | License 208 | ------- 209 | 210 | > Copyright (c) 2012-2016 The gocql Authors. All rights reserved. 211 | > Use of this source code is governed by a BSD-style 212 | > license that can be found in the LICENSE file. 213 | -------------------------------------------------------------------------------- /token_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015 The gocql Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package gocql 6 | 7 | import ( 8 | "bytes" 9 | "fmt" 10 | "math/big" 11 | "net" 12 | "sort" 13 | "strconv" 14 | "testing" 15 | ) 16 | 17 | // Tests of the murmur3Patitioner 18 | func TestMurmur3Partitioner(t *testing.T) { 19 | token := murmur3Partitioner{}.ParseString("-1053604476080545076") 20 | 21 | if "-1053604476080545076" != token.String() { 22 | t.Errorf("Expected '-1053604476080545076' but was '%s'", token) 23 | } 24 | 25 | // at least verify that the partitioner 26 | // doesn't return nil 27 | pk, _ := marshalInt(nil, 1) 28 | token = murmur3Partitioner{}.Hash(pk) 29 | if token == nil { 30 | t.Fatal("token was nil") 31 | } 32 | } 33 | 34 | // Tests of the murmur3Token 35 | func TestMurmur3Token(t *testing.T) { 36 | if murmur3Token(42).Less(murmur3Token(42)) { 37 | t.Errorf("Expected Less to return false, but was true") 38 | } 39 | if !murmur3Token(-42).Less(murmur3Token(42)) { 40 | t.Errorf("Expected Less to return true, but was false") 41 | } 42 | if murmur3Token(42).Less(murmur3Token(-42)) { 43 | t.Errorf("Expected Less to return false, but was true") 44 | } 45 | } 46 | 47 | // Tests of the orderedPartitioner 48 | func TestOrderedPartitioner(t *testing.T) { 49 | // at least verify that the partitioner 50 | // doesn't return nil 51 | p := orderedPartitioner{} 52 | pk, _ := marshalInt(nil, 1) 53 | token := p.Hash(pk) 54 | if token == nil { 55 | t.Fatal("token was nil") 56 | } 57 | 58 | str := token.String() 59 | parsedToken := p.ParseString(str) 60 | 61 | if !bytes.Equal([]byte(token.(orderedToken)), []byte(parsedToken.(orderedToken))) { 62 | t.Errorf("Failed to convert to and from a string %s expected %x but was %x", 63 | str, 64 | []byte(token.(orderedToken)), 65 | []byte(parsedToken.(orderedToken)), 66 | ) 67 | } 68 | } 69 | 70 | // Tests of the orderedToken 71 | func TestOrderedToken(t *testing.T) { 72 | if orderedToken([]byte{0, 0, 4, 2}).Less(orderedToken([]byte{0, 0, 4, 2})) { 73 | t.Errorf("Expected Less to return false, but was true") 74 | } 75 | if !orderedToken([]byte{0, 0, 3}).Less(orderedToken([]byte{0, 0, 4, 2})) { 76 | t.Errorf("Expected Less to return true, but was false") 77 | } 78 | if orderedToken([]byte{0, 0, 4, 2}).Less(orderedToken([]byte{0, 0, 3})) { 79 | t.Errorf("Expected Less to return false, but was true") 80 | } 81 | } 82 | 83 | // Tests of the randomPartitioner 84 | func TestRandomPartitioner(t *testing.T) { 85 | // at least verify that the partitioner 86 | // doesn't return nil 87 | p := randomPartitioner{} 88 | pk, _ := marshalInt(nil, 1) 89 | token := p.Hash(pk) 90 | if token == nil { 91 | t.Fatal("token was nil") 92 | } 93 | 94 | str := token.String() 95 | parsedToken := p.ParseString(str) 96 | 97 | if (*big.Int)(token.(*randomToken)).Cmp((*big.Int)(parsedToken.(*randomToken))) != 0 { 98 | t.Errorf("Failed to convert to and from a string %s expected %v but was %v", 99 | str, 100 | token, 101 | parsedToken, 102 | ) 103 | } 104 | } 105 | 106 | func TestRandomPartitionerMatchesReference(t *testing.T) { 107 | // example taken from datastax python driver 108 | // >>> from cassandra.metadata import MD5Token 109 | // >>> MD5Token.hash_fn("test") 110 | // 12707736894140473154801792860916528374L 111 | var p randomPartitioner 112 | expect := "12707736894140473154801792860916528374" 113 | actual := p.Hash([]byte("test")).String() 114 | if actual != expect { 115 | t.Errorf("expected random partitioner to generate tokens in the same way as the reference"+ 116 | " python client. Expected %s, but got %s", expect, actual) 117 | } 118 | } 119 | 120 | // Tests of the randomToken 121 | func TestRandomToken(t *testing.T) { 122 | if ((*randomToken)(big.NewInt(42))).Less((*randomToken)(big.NewInt(42))) { 123 | t.Errorf("Expected Less to return false, but was true") 124 | } 125 | if !((*randomToken)(big.NewInt(41))).Less((*randomToken)(big.NewInt(42))) { 126 | t.Errorf("Expected Less to return true, but was false") 127 | } 128 | if ((*randomToken)(big.NewInt(42))).Less((*randomToken)(big.NewInt(41))) { 129 | t.Errorf("Expected Less to return false, but was true") 130 | } 131 | } 132 | 133 | type intToken int 134 | 135 | func (i intToken) String() string { 136 | return strconv.Itoa(int(i)) 137 | } 138 | 139 | func (i intToken) Less(token token) bool { 140 | return i < token.(intToken) 141 | } 142 | 143 | // Test of the token ring implementation based on example at the start of this 144 | // page of documentation: 145 | // http://www.datastax.com/docs/0.8/cluster_architecture/partitioning 146 | func TestIntTokenRing(t *testing.T) { 147 | host0 := &HostInfo{} 148 | host25 := &HostInfo{} 149 | host50 := &HostInfo{} 150 | host75 := &HostInfo{} 151 | ring := &tokenRing{ 152 | partitioner: nil, 153 | // these tokens and hosts are out of order to test sorting 154 | tokens: []token{ 155 | intToken(0), 156 | intToken(50), 157 | intToken(75), 158 | intToken(25), 159 | }, 160 | hosts: []*HostInfo{ 161 | host0, 162 | host50, 163 | host75, 164 | host25, 165 | }, 166 | } 167 | 168 | sort.Sort(ring) 169 | 170 | if ring.GetHostForToken(intToken(0)) != host0 { 171 | t.Error("Expected host 0 for token 0") 172 | } 173 | if ring.GetHostForToken(intToken(1)) != host25 { 174 | t.Error("Expected host 25 for token 1") 175 | } 176 | if ring.GetHostForToken(intToken(24)) != host25 { 177 | t.Error("Expected host 25 for token 24") 178 | } 179 | if ring.GetHostForToken(intToken(25)) != host25 { 180 | t.Error("Expected host 25 for token 25") 181 | } 182 | if ring.GetHostForToken(intToken(26)) != host50 { 183 | t.Error("Expected host 50 for token 26") 184 | } 185 | if ring.GetHostForToken(intToken(49)) != host50 { 186 | t.Error("Expected host 50 for token 49") 187 | } 188 | if ring.GetHostForToken(intToken(50)) != host50 { 189 | t.Error("Expected host 50 for token 50") 190 | } 191 | if ring.GetHostForToken(intToken(51)) != host75 { 192 | t.Error("Expected host 75 for token 51") 193 | } 194 | if ring.GetHostForToken(intToken(74)) != host75 { 195 | t.Error("Expected host 75 for token 74") 196 | } 197 | if ring.GetHostForToken(intToken(75)) != host75 { 198 | t.Error("Expected host 75 for token 75") 199 | } 200 | if ring.GetHostForToken(intToken(76)) != host0 { 201 | t.Error("Expected host 0 for token 76") 202 | } 203 | if ring.GetHostForToken(intToken(99)) != host0 { 204 | t.Error("Expected host 0 for token 99") 205 | } 206 | if ring.GetHostForToken(intToken(100)) != host0 { 207 | t.Error("Expected host 0 for token 100") 208 | } 209 | } 210 | 211 | // Test for the behavior of a nil pointer to tokenRing 212 | func TestNilTokenRing(t *testing.T) { 213 | var ring *tokenRing = nil 214 | 215 | if ring.GetHostForToken(nil) != nil { 216 | t.Error("Expected nil for nil token ring") 217 | } 218 | if ring.GetHostForPartitionKey(nil) != nil { 219 | t.Error("Expected nil for nil token ring") 220 | } 221 | } 222 | 223 | // Test of the recognition of the partitioner class 224 | func TestUnknownTokenRing(t *testing.T) { 225 | _, err := newTokenRing("UnknownPartitioner", nil) 226 | if err == nil { 227 | t.Error("Expected error for unknown partitioner value, but was nil") 228 | } 229 | } 230 | 231 | func hostsForTests(n int) []*HostInfo { 232 | hosts := make([]*HostInfo, n) 233 | for i := 0; i < n; i++ { 234 | host := &HostInfo{ 235 | peer: net.IPv4(1, 1, 1, byte(n)), 236 | tokens: []string{fmt.Sprintf("%d", n)}, 237 | } 238 | 239 | hosts[i] = host 240 | } 241 | return hosts 242 | } 243 | 244 | // Test of the tokenRing with the Murmur3Partitioner 245 | func TestMurmur3TokenRing(t *testing.T) { 246 | // Note, strings are parsed directly to int64, they are not murmur3 hashed 247 | hosts := hostsForTests(4) 248 | ring, err := newTokenRing("Murmur3Partitioner", hosts) 249 | if err != nil { 250 | t.Fatalf("Failed to create token ring due to error: %v", err) 251 | } 252 | 253 | p := murmur3Partitioner{} 254 | 255 | for _, host := range hosts { 256 | actual := ring.GetHostForToken(p.ParseString(host.tokens[0])) 257 | if !actual.Peer().Equal(host.peer) { 258 | t.Errorf("Expected peer %v for token %q, but was %v", host.peer, host.tokens[0], actual.peer) 259 | } 260 | } 261 | 262 | actual := ring.GetHostForToken(p.ParseString("12")) 263 | if !actual.Peer().Equal(hosts[1].peer) { 264 | t.Errorf("Expected peer 1 for token \"12\", but was %s", actual.Peer()) 265 | } 266 | 267 | actual = ring.GetHostForToken(p.ParseString("24324545443332")) 268 | if !actual.Peer().Equal(hosts[0].peer) { 269 | t.Errorf("Expected peer 0 for token \"24324545443332\", but was %s", actual.Peer()) 270 | } 271 | } 272 | 273 | // Test of the tokenRing with the OrderedPartitioner 274 | func TestOrderedTokenRing(t *testing.T) { 275 | // Tokens here more or less are similar layout to the int tokens above due 276 | // to each numeric character translating to a consistently offset byte. 277 | hosts := hostsForTests(4) 278 | ring, err := newTokenRing("OrderedPartitioner", hosts) 279 | if err != nil { 280 | t.Fatalf("Failed to create token ring due to error: %v", err) 281 | } 282 | 283 | p := orderedPartitioner{} 284 | 285 | var actual *HostInfo 286 | for _, host := range hosts { 287 | actual = ring.GetHostForToken(p.ParseString(host.tokens[0])) 288 | if !actual.Peer().Equal(host.peer) { 289 | t.Errorf("Expected peer %v for token %q, but was %v", host.peer, host.tokens[0], actual.peer) 290 | } 291 | } 292 | 293 | actual = ring.GetHostForToken(p.ParseString("12")) 294 | if !actual.peer.Equal(hosts[1].peer) { 295 | t.Errorf("Expected peer 1 for token \"12\", but was %s", actual.Peer()) 296 | } 297 | 298 | actual = ring.GetHostForToken(p.ParseString("24324545443332")) 299 | if !actual.peer.Equal(hosts[1].peer) { 300 | t.Errorf("Expected peer 1 for token \"24324545443332\", but was %s", actual.Peer()) 301 | } 302 | } 303 | 304 | // Test of the tokenRing with the RandomPartitioner 305 | func TestRandomTokenRing(t *testing.T) { 306 | // String tokens are parsed into big.Int in base 10 307 | hosts := hostsForTests(4) 308 | ring, err := newTokenRing("RandomPartitioner", hosts) 309 | if err != nil { 310 | t.Fatalf("Failed to create token ring due to error: %v", err) 311 | } 312 | 313 | p := randomPartitioner{} 314 | 315 | var actual *HostInfo 316 | for _, host := range hosts { 317 | actual = ring.GetHostForToken(p.ParseString(host.tokens[0])) 318 | if !actual.Peer().Equal(host.peer) { 319 | t.Errorf("Expected peer %v for token %q, but was %v", host.peer, host.tokens[0], actual.peer) 320 | } 321 | } 322 | 323 | actual = ring.GetHostForToken(p.ParseString("12")) 324 | if !actual.peer.Equal(hosts[1].peer) { 325 | t.Errorf("Expected peer 1 for token \"12\", but was %s", actual.Peer()) 326 | } 327 | 328 | actual = ring.GetHostForToken(p.ParseString("24324545443332")) 329 | if !actual.peer.Equal(hosts[0].peer) { 330 | t.Errorf("Expected peer 1 for token \"24324545443332\", but was %s", actual.Peer()) 331 | } 332 | } 333 | -------------------------------------------------------------------------------- /control.go: -------------------------------------------------------------------------------- 1 | package gocql 2 | 3 | import ( 4 | crand "crypto/rand" 5 | "errors" 6 | "fmt" 7 | "log" 8 | "math/rand" 9 | "net" 10 | "regexp" 11 | "strconv" 12 | "sync/atomic" 13 | "time" 14 | 15 | "golang.org/x/net/context" 16 | ) 17 | 18 | var ( 19 | randr *rand.Rand 20 | ) 21 | 22 | func init() { 23 | b := make([]byte, 4) 24 | if _, err := crand.Read(b); err != nil { 25 | panic(fmt.Sprintf("unable to seed random number generator: %v", err)) 26 | } 27 | 28 | randr = rand.New(rand.NewSource(int64(readInt(b)))) 29 | } 30 | 31 | // Ensure that the atomic variable is aligned to a 64bit boundary 32 | // so that atomic operations can be applied on 32bit architectures. 33 | type controlConn struct { 34 | session *Session 35 | conn atomic.Value 36 | 37 | retry RetryPolicy 38 | 39 | started int32 40 | quit chan struct{} 41 | } 42 | 43 | func createControlConn(session *Session) *controlConn { 44 | control := &controlConn{ 45 | session: session, 46 | quit: make(chan struct{}), 47 | retry: &SimpleRetryPolicy{NumRetries: 3}, 48 | } 49 | 50 | control.conn.Store((*Conn)(nil)) 51 | 52 | return control 53 | } 54 | 55 | func (c *controlConn) heartBeat() { 56 | if !atomic.CompareAndSwapInt32(&c.started, 0, 1) { 57 | return 58 | } 59 | 60 | sleepTime := 1 * time.Second 61 | 62 | for { 63 | select { 64 | case <-c.quit: 65 | return 66 | case <-time.After(sleepTime): 67 | } 68 | 69 | resp, err := c.writeFrame(&writeOptionsFrame{}) 70 | if err != nil { 71 | goto reconn 72 | } 73 | 74 | switch resp.(type) { 75 | case *supportedFrame: 76 | // Everything ok 77 | sleepTime = 5 * time.Second 78 | continue 79 | case error: 80 | goto reconn 81 | default: 82 | panic(fmt.Sprintf("gocql: unknown frame in response to options: %T", resp)) 83 | } 84 | 85 | reconn: 86 | // try to connect a bit faster 87 | sleepTime = 1 * time.Second 88 | c.reconnect(true) 89 | // time.Sleep(5 * time.Second) 90 | continue 91 | } 92 | } 93 | 94 | var hostLookupPreferV4 = false 95 | 96 | func hostInfo(addr string, defaultPort int) (*HostInfo, error) { 97 | var port int 98 | host, portStr, err := net.SplitHostPort(addr) 99 | if err != nil { 100 | host = addr 101 | port = defaultPort 102 | } else { 103 | port, err = strconv.Atoi(portStr) 104 | if err != nil { 105 | return nil, err 106 | } 107 | } 108 | 109 | ip := net.ParseIP(host) 110 | if ip == nil { 111 | ips, err := net.LookupIP(host) 112 | if err != nil { 113 | return nil, err 114 | } else if len(ips) == 0 { 115 | return nil, fmt.Errorf("No IP's returned from DNS lookup for %q", addr) 116 | } 117 | 118 | if hostLookupPreferV4 { 119 | for _, v := range ips { 120 | if v4 := v.To4(); v4 != nil { 121 | ip = v4 122 | break 123 | } 124 | } 125 | if ip == nil { 126 | ip = ips[0] 127 | } 128 | } else { 129 | // TODO(zariel): should we check that we can connect to any of the ips? 130 | ip = ips[0] 131 | } 132 | 133 | } 134 | 135 | return &HostInfo{peer: ip, port: port}, nil 136 | } 137 | 138 | func shuffleHosts(hosts []*HostInfo) []*HostInfo { 139 | perm := randr.Perm(len(hosts)) 140 | shuffled := make([]*HostInfo, len(hosts)) 141 | 142 | for i, host := range hosts { 143 | shuffled[perm[i]] = host 144 | } 145 | 146 | return shuffled 147 | } 148 | 149 | func (c *controlConn) shuffleDial(endpoints []*HostInfo) (*Conn, error) { 150 | // shuffle endpoints so not all drivers will connect to the same initial 151 | // node. 152 | shuffled := shuffleHosts(endpoints) 153 | 154 | var err error 155 | for _, host := range shuffled { 156 | var conn *Conn 157 | conn, err = c.session.connect(host, c) 158 | if err == nil { 159 | return conn, nil 160 | } 161 | 162 | log.Printf("gocql: unable to dial control conn %v: %v\n", host.Peer(), err) 163 | } 164 | 165 | return nil, err 166 | } 167 | 168 | // this is going to be version dependant and a nightmare to maintain :( 169 | var protocolSupportRe = regexp.MustCompile(`the lowest supported version is \d+ and the greatest is (\d+)$`) 170 | 171 | func parseProtocolFromError(err error) int { 172 | // I really wish this had the actual info in the error frame... 173 | matches := protocolSupportRe.FindAllStringSubmatch(err.Error(), -1) 174 | if len(matches) != 1 || len(matches[0]) != 2 { 175 | if verr, ok := err.(*protocolError); ok { 176 | return int(verr.frame.Header().version.version()) 177 | } 178 | return 0 179 | } 180 | 181 | max, err := strconv.Atoi(matches[0][1]) 182 | if err != nil { 183 | return 0 184 | } 185 | 186 | return max 187 | } 188 | 189 | func (c *controlConn) discoverProtocol(hosts []*HostInfo) (int, error) { 190 | hosts = shuffleHosts(hosts) 191 | 192 | connCfg := *c.session.connCfg 193 | connCfg.ProtoVersion = 4 // TODO: define maxProtocol 194 | 195 | handler := connErrorHandlerFn(func(c *Conn, err error, closed bool) { 196 | // we should never get here, but if we do it means we connected to a 197 | // host successfully which means our attempted protocol version worked 198 | }) 199 | 200 | var err error 201 | for _, host := range hosts { 202 | var conn *Conn 203 | conn, err = Connect(host, &connCfg, handler, c.session) 204 | if err == nil { 205 | conn.Close() 206 | return connCfg.ProtoVersion, nil 207 | } 208 | 209 | if proto := parseProtocolFromError(err); proto > 0 { 210 | return proto, nil 211 | } 212 | } 213 | 214 | return 0, err 215 | } 216 | 217 | func (c *controlConn) connect(hosts []*HostInfo) error { 218 | if len(hosts) == 0 { 219 | return errors.New("control: no endpoints specified") 220 | } 221 | 222 | conn, err := c.shuffleDial(hosts) 223 | if err != nil { 224 | return fmt.Errorf("control: unable to connect to initial hosts: %v", err) 225 | } 226 | 227 | if err := c.setupConn(conn); err != nil { 228 | conn.Close() 229 | return fmt.Errorf("control: unable to setup connection: %v", err) 230 | } 231 | 232 | // we could fetch the initial ring here and update initial host data. So that 233 | // when we return from here we have a ring topology ready to go. 234 | 235 | go c.heartBeat() 236 | 237 | return nil 238 | } 239 | 240 | func (c *controlConn) setupConn(conn *Conn) error { 241 | if err := c.registerEvents(conn); err != nil { 242 | conn.Close() 243 | return err 244 | } 245 | 246 | c.conn.Store(conn) 247 | 248 | host, portstr, err := net.SplitHostPort(conn.conn.RemoteAddr().String()) 249 | if err != nil { 250 | return err 251 | } 252 | port, err := strconv.Atoi(portstr) 253 | if err != nil { 254 | return err 255 | } 256 | 257 | c.session.handleNodeUp(net.ParseIP(host), port, false) 258 | 259 | return nil 260 | } 261 | 262 | func (c *controlConn) registerEvents(conn *Conn) error { 263 | var events []string 264 | 265 | if !c.session.cfg.Events.DisableTopologyEvents { 266 | events = append(events, "TOPOLOGY_CHANGE") 267 | } 268 | if !c.session.cfg.Events.DisableNodeStatusEvents { 269 | events = append(events, "STATUS_CHANGE") 270 | } 271 | if !c.session.cfg.Events.DisableSchemaEvents { 272 | events = append(events, "SCHEMA_CHANGE") 273 | } 274 | 275 | if len(events) == 0 { 276 | return nil 277 | } 278 | 279 | framer, err := conn.exec(context.Background(), 280 | &writeRegisterFrame{ 281 | events: events, 282 | }, nil) 283 | if err != nil { 284 | return err 285 | } 286 | 287 | frame, err := framer.parseFrame() 288 | if err != nil { 289 | return err 290 | } else if _, ok := frame.(*readyFrame); !ok { 291 | return fmt.Errorf("unexpected frame in response to register: got %T: %v\n", frame, frame) 292 | } 293 | 294 | return nil 295 | } 296 | 297 | func (c *controlConn) reconnect(refreshring bool) { 298 | // TODO: simplify this function, use session.ring to get hosts instead of the 299 | // connection pool 300 | 301 | var host *HostInfo 302 | oldConn := c.conn.Load().(*Conn) 303 | if oldConn != nil { 304 | host = oldConn.host 305 | oldConn.Close() 306 | } 307 | 308 | var newConn *Conn 309 | if host != nil { 310 | // try to connect to the old host 311 | conn, err := c.session.connect(host, c) 312 | if err != nil { 313 | // host is dead 314 | // TODO: this is replicated in a few places 315 | c.session.handleNodeDown(host.Peer(), host.Port()) 316 | } else { 317 | newConn = conn 318 | } 319 | } 320 | 321 | // TODO: should have our own roundrobbin for hosts so that we can try each 322 | // in succession and guantee that we get a different host each time. 323 | if newConn == nil { 324 | host := c.session.ring.rrHost() 325 | if host == nil { 326 | c.connect(c.session.ring.endpoints) 327 | return 328 | } 329 | 330 | var err error 331 | newConn, err = c.session.connect(host, c) 332 | if err != nil { 333 | // TODO: add log handler for things like this 334 | return 335 | } 336 | } 337 | 338 | if err := c.setupConn(newConn); err != nil { 339 | newConn.Close() 340 | log.Printf("gocql: control unable to register events: %v\n", err) 341 | return 342 | } 343 | 344 | if refreshring { 345 | c.session.hostSource.refreshRing() 346 | } 347 | } 348 | 349 | func (c *controlConn) HandleError(conn *Conn, err error, closed bool) { 350 | if !closed { 351 | return 352 | } 353 | 354 | oldConn := c.conn.Load().(*Conn) 355 | if oldConn != conn { 356 | return 357 | } 358 | 359 | c.reconnect(true) 360 | } 361 | 362 | func (c *controlConn) writeFrame(w frameWriter) (frame, error) { 363 | conn := c.conn.Load().(*Conn) 364 | if conn == nil { 365 | return nil, errNoControl 366 | } 367 | 368 | framer, err := conn.exec(context.Background(), w, nil) 369 | if err != nil { 370 | return nil, err 371 | } 372 | 373 | return framer.parseFrame() 374 | } 375 | 376 | func (c *controlConn) withConn(fn func(*Conn) *Iter) *Iter { 377 | const maxConnectAttempts = 5 378 | connectAttempts := 0 379 | 380 | for i := 0; i < maxConnectAttempts; i++ { 381 | conn := c.conn.Load().(*Conn) 382 | if conn == nil { 383 | if connectAttempts > maxConnectAttempts { 384 | break 385 | } 386 | 387 | connectAttempts++ 388 | 389 | c.reconnect(false) 390 | continue 391 | } 392 | 393 | return fn(conn) 394 | } 395 | 396 | return &Iter{err: errNoControl} 397 | } 398 | 399 | // query will return nil if the connection is closed or nil 400 | func (c *controlConn) query(statement string, values ...interface{}) (iter *Iter) { 401 | q := c.session.Query(statement, values...).Consistency(One).RoutingKey([]byte{}) 402 | 403 | for { 404 | iter = c.withConn(func(conn *Conn) *Iter { 405 | return conn.executeQuery(q) 406 | }) 407 | 408 | if gocqlDebug && iter.err != nil { 409 | log.Printf("control: error executing %q: %v\n", statement, iter.err) 410 | } 411 | 412 | q.attempts++ 413 | if iter.err == nil || !c.retry.Attempt(q) { 414 | break 415 | } 416 | } 417 | 418 | return 419 | } 420 | 421 | func (c *controlConn) fetchHostInfo(ip net.IP, port int) (*HostInfo, error) { 422 | // TODO(zariel): we should probably move this into host_source or atleast 423 | // share code with it. 424 | localHost := c.host() 425 | if localHost == nil { 426 | return nil, errors.New("unable to fetch host info, invalid conn host") 427 | } 428 | 429 | isLocal := localHost.Peer().Equal(ip) 430 | 431 | var fn func(*HostInfo) error 432 | 433 | // TODO(zariel): fetch preferred_ip address (is it >3.x only?) 434 | if isLocal { 435 | fn = func(host *HostInfo) error { 436 | iter := c.query("SELECT data_center, rack, host_id, tokens, release_version FROM system.local WHERE key='local'") 437 | iter.Scan(&host.dataCenter, &host.rack, &host.hostId, &host.tokens, &host.version) 438 | return iter.Close() 439 | } 440 | } else { 441 | fn = func(host *HostInfo) error { 442 | iter := c.query("SELECT data_center, rack, host_id, tokens, release_version FROM system.peers WHERE peer=?", ip) 443 | iter.Scan(&host.dataCenter, &host.rack, &host.hostId, &host.tokens, &host.version) 444 | return iter.Close() 445 | } 446 | } 447 | 448 | host := &HostInfo{ 449 | port: port, 450 | peer: ip, 451 | } 452 | 453 | if err := fn(host); err != nil { 454 | return nil, err 455 | } 456 | 457 | return host, nil 458 | } 459 | 460 | func (c *controlConn) awaitSchemaAgreement() error { 461 | return c.withConn(func(conn *Conn) *Iter { 462 | return &Iter{err: conn.awaitSchemaAgreement()} 463 | }).err 464 | } 465 | 466 | func (c *controlConn) host() *HostInfo { 467 | conn := c.conn.Load().(*Conn) 468 | if conn == nil { 469 | return nil 470 | } 471 | return conn.host 472 | } 473 | 474 | func (c *controlConn) close() { 475 | if atomic.CompareAndSwapInt32(&c.started, 1, -1) { 476 | c.quit <- struct{}{} 477 | } 478 | conn := c.conn.Load().(*Conn) 479 | if conn != nil { 480 | conn.Close() 481 | } 482 | } 483 | 484 | var errNoControl = errors.New("gocql: no control connection available") 485 | --------------------------------------------------------------------------------