├── .gitignore ├── .travis.yml ├── LICENSE ├── README.markdown ├── audit.go ├── cbdatasource ├── .gitignore ├── IDEAS.md ├── README.md ├── cbdatasource.go ├── cbdatasource_test.go └── example │ ├── .gitignore │ ├── dump.go │ ├── dump_test.go │ ├── dump_windows.go │ └── main.go ├── client.go ├── client_test.go ├── conn_pool.go ├── conn_pool_test.go ├── ddocs.go ├── examples ├── basic │ └── basic.go ├── bucketlist │ └── bucketlist.go ├── cb_auth │ └── example.go ├── failoverlog │ └── failoverlog.go ├── hello │ └── hello.go ├── hello_getandtouch │ └── hello_getandtouch.go ├── hello_observe │ └── hello_observe.go ├── hello_tap │ └── hello_tap.go ├── incr │ └── incr.go ├── mb-15442 │ └── repro.go ├── observe │ └── observe.go ├── streaming │ └── example.go ├── upr_bench │ └── bench.go ├── upr_feed │ └── feed.go ├── upr_restart │ └── restart.go └── view_params │ └── view_params.go ├── go.mod ├── observe.go ├── perf ├── generate-json.go ├── perf.go └── readme.txt ├── platform ├── platform.go ├── platform_windows.go ├── sync.go ├── sync_386.go └── test │ └── test.go ├── pools.go ├── pools_test.go ├── populate └── populate.go ├── port_map.go ├── port_map_test.go ├── streaming.go ├── tap.go ├── tools ├── loadfile │ └── loadfile.go └── view2go │ └── view2go.go ├── trace ├── trace.go └── trace_test.go ├── upr.go ├── users.go ├── users_test.go ├── util.go ├── util └── viewmgmt.go ├── util_test.go ├── vbmap.go ├── vbmap_test.go ├── views.go └── views_test.go /.gitignore: -------------------------------------------------------------------------------- 1 | #* 2 | *.6 3 | *.a 4 | *~ 5 | *.swp 6 | /examples/basic/basic 7 | /hello/hello 8 | /populate/populate 9 | /tools/view2go/view2go 10 | /tools/loadfile/loadfile 11 | gotags.files 12 | TAGS 13 | 6.out 14 | _* 15 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | install: go get -v -d ./... && go build -v ./... 3 | script: go test -v ./... 4 | 5 | go: 1.1.1 6 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2013 Couchbase, Inc. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | this software and associated documentation files (the "Software"), to deal in 5 | the Software without restriction, including without limitation the rights to 6 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 7 | of the Software, and to permit persons to whom the Software is furnished to do 8 | so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | -------------------------------------------------------------------------------- /README.markdown: -------------------------------------------------------------------------------- 1 | # A smart client for couchbase in go 2 | 3 | This is a *unoffical* version of a Couchbase Golang client. If you are 4 | looking for the *Offical* Couchbase Golang client please see 5 | [CB-go])[https://github.com/couchbaselabs/gocb]. 6 | 7 | This is an evolving package, but does provide a useful interface to a 8 | [couchbase](http://www.couchbase.com/) server including all of the 9 | pool/bucket discovery features, compatible key distribution with other 10 | clients, and vbucket motion awareness so application can continue to 11 | operate during rebalances. 12 | 13 | It also supports view querying with source node randomization so you 14 | don't bang on all one node to do all the work. 15 | 16 | ## Install 17 | 18 | go get github.com/couchbase/go-couchbase 19 | 20 | ## Example 21 | 22 | c, err := couchbase.Connect("http://dev-couchbase.example.com:8091/") 23 | if err != nil { 24 | log.Fatalf("Error connecting: %v", err) 25 | } 26 | 27 | pool, err := c.GetPool("default") 28 | if err != nil { 29 | log.Fatalf("Error getting pool: %v", err) 30 | } 31 | 32 | bucket, err := pool.GetBucket("default") 33 | if err != nil { 34 | log.Fatalf("Error getting bucket: %v", err) 35 | } 36 | 37 | bucket.Set("someKey", 0, []string{"an", "example", "list"}) 38 | -------------------------------------------------------------------------------- /audit.go: -------------------------------------------------------------------------------- 1 | package couchbase 2 | 3 | import () 4 | 5 | // Sample data: 6 | // {"disabled":["12333", "22244"],"uid":"132492431","auditdEnabled":true, 7 | // "disabledUsers":[{"name":"bill","domain":"local"},{"name":"bob","domain":"local"}], 8 | // "logPath":"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs", 9 | // "rotateInterval":86400,"rotateSize":20971520} 10 | type AuditSpec struct { 11 | Disabled []uint32 `json:"disabled"` 12 | Uid string `json:"uid"` 13 | AuditdEnabled bool `json:"auditdEnabled` 14 | DisabledUsers []AuditUser `json:"disabledUsers"` 15 | LogPath string `json:"logPath"` 16 | RotateInterval int64 `json:"rotateInterval"` 17 | RotateSize int64 `json:"rotateSize"` 18 | } 19 | 20 | type AuditUser struct { 21 | Name string `json:"name"` 22 | Domain string `json:"domain"` 23 | } 24 | 25 | func (c *Client) GetAuditSpec() (*AuditSpec, error) { 26 | ret := &AuditSpec{} 27 | err := c.parseURLResponse("/settings/audit", ret) 28 | if err != nil { 29 | return nil, err 30 | } 31 | return ret, nil 32 | } 33 | -------------------------------------------------------------------------------- /cbdatasource/.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | *.out 3 | -------------------------------------------------------------------------------- /cbdatasource/IDEAS.md: -------------------------------------------------------------------------------- 1 | Feedback from Aliaksey K., based on experiences such as erlang 2 | XDCR-DCP use case... 3 | 4 | - consider adding uuid to everywhere in Receiver interface -- anywhere 5 | that there's a sequence number? 6 | 7 | - advanced option idea - how about an option to not magically heal if 8 | there's a vbucket state or cluster topology change? 9 | - some apps like xdcr might not want magically healing, and instead 10 | want vbucket stickiness to a node (like xdcr tries to maintain one 11 | connection per pair of local & remote nodes). If something 12 | happens, it doesn't want a vbucket which moved to another source 13 | server to automatically be connected to. 14 | - one approach to do this today is the application can provide its 15 | own ConnectBucket() implementation in the BucketDataSourceOptions. 16 | if there's a connection attempt to a server the application 17 | doesn't want, then the application can reject the connection and 18 | do a BucketDataSource.Close(). 19 | 20 | - feedback on high level versus low level API mixing... 21 | - why not provide full bucket url? 22 | - or, why not accept already configured and auth'ed 23 | go-couchbase connection instance? 24 | - connection & auth approaches are varied and changing in future 25 | - if go-couchbase doesn't auto-heal, perhaps it should and not 26 | be a concern of cbdatasource? 27 | 28 | 29 | 30 | 31 | -------------------------------------------------------------------------------- /cbdatasource/README.md: -------------------------------------------------------------------------------- 1 | cbdatasource 2 | ============ 3 | 4 | golang library to stream data from a Couchbase cluster 5 | 6 | The cbdatasource package is implemented using Couchbase DCP protocol 7 | and has auto-reconnecting and auto-restarting goroutines underneath 8 | the hood to provide a simple, high-level cluster-wide abstraction. By 9 | using cbdatasource, your application does not need to worry about 10 | connections or reconnections to individual server nodes or cluster 11 | topology changes, rebalance & failovers. The API starting point is 12 | NewBucketDataSource(). 13 | 14 | LICENSE: Apache 2.0 15 | 16 | ### Status & Links 17 | 18 | [![GoDoc](https://godoc.org/github.com/couchbase/go-couchbase/cbdatasource?status.svg)](https://godoc.org/github.com/steveyen/cbdatasource) 19 | -------------------------------------------------------------------------------- /cbdatasource/example/.gitignore: -------------------------------------------------------------------------------- 1 | example 2 | -------------------------------------------------------------------------------- /cbdatasource/example/dump.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2014 Couchbase, Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); you 4 | // may not use this file except in compliance with the License. You 5 | // may obtain a copy of the License at 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // Unless required by applicable law or agreed to in writing, software 8 | // distributed under the License is distributed on an "AS IS" BASIS, 9 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 10 | // implied. See the License for the specific language governing 11 | // permissions and limitations under the License. 12 | 13 | // +build !windows 14 | 15 | package main 16 | 17 | import ( 18 | "syscall" 19 | ) 20 | 21 | func dumpOnSignalForPlatform() { 22 | dumpOnSignal(syscall.SIGUSR2) 23 | } 24 | -------------------------------------------------------------------------------- /cbdatasource/example/dump_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2014 Couchbase, Inc. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the 4 | // License. You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, 7 | // software distributed under the License is distributed on an "AS 8 | // IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 9 | // express or implied. See the License for the specific language 10 | // governing permissions and limitations under the License. 11 | 12 | package main 13 | 14 | import ( 15 | "testing" 16 | ) 17 | 18 | func TestDump(t *testing.T) { 19 | // I guess we just make sure this doesn't crash. 20 | go dumpOnSignalForPlatform() 21 | } 22 | -------------------------------------------------------------------------------- /cbdatasource/example/dump_windows.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2014 Couchbase, Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); you 4 | // may not use this file except in compliance with the License. You 5 | // may obtain a copy of the License at 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // Unless required by applicable law or agreed to in writing, software 8 | // distributed under the License is distributed on an "AS IS" BASIS, 9 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 10 | // implied. See the License for the specific language governing 11 | // permissions and limitations under the License. 12 | 13 | // +build windows !darwin !freebsd !linux !openbsd !netbsd 14 | 15 | package main 16 | 17 | func dumpOnSignalForPlatform() { 18 | // No-op for windows. 19 | } 20 | -------------------------------------------------------------------------------- /cbdatasource/example/main.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2014 Couchbase, Inc. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the 4 | // License. You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, 7 | // software distributed under the License is distributed on an "AS 8 | // IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 9 | // express or implied. See the License for the specific language 10 | // governing permissions and limitations under the License. 11 | 12 | package main 13 | 14 | import ( 15 | "encoding/json" 16 | "flag" 17 | "fmt" 18 | "log" 19 | "os" 20 | "os/signal" 21 | "reflect" 22 | "runtime" 23 | "runtime/pprof" 24 | "strconv" 25 | "strings" 26 | "sync" 27 | "time" 28 | 29 | "github.com/couchbase/go-couchbase" 30 | "github.com/couchbase/go-couchbase/cbdatasource" 31 | "github.com/couchbase/gomemcached" 32 | ) 33 | 34 | // Simple, memory-only sample program that uses the cbdatasource API's 35 | // to get data from a couchbase cluster using DCP. 36 | 37 | var verbose = flag.Int("verbose", 1, 38 | "verbose'ness of logging, where 0 is no logging") 39 | 40 | var serverURL = flag.String("serverURL", "http://localhost:8091", 41 | "couchbase server URL") 42 | var poolName = flag.String("poolName", "default", 43 | "pool name") 44 | var bucketName = flag.String("bucketName", "default", 45 | "bucket name") 46 | var bucketUUID = flag.String("bucketUUID", "", 47 | "bucket UUID") 48 | var vbucketIds = flag.String("vbucketIds", "", 49 | "comma separated vbucket id numbers; defaults to all vbucket id's") 50 | var authUser = flag.String("authUser", "", 51 | "auth user name (probably same as bucketName)") 52 | var authPswd = flag.String("authPswd", "", 53 | "auth password") 54 | 55 | var optionClusterManagerBackoffFactor = flag.Float64("optionClusterManagerBackoffFactor", 1.5, 56 | "factor to increase sleep time between retries to cluster manager") 57 | var optionClusterManagerSleepInitMS = flag.Int("optionClusterManagerSleepInitMS", 100, 58 | "initial sleep time for retries to cluster manager") 59 | var optionClusterManagerSleepMaxMS = flag.Int("optionClusterManagerSleepMaxMS", 1000, 60 | "max sleep time for retries to cluster manager") 61 | 62 | var optionDataManagerBackoffFactor = flag.Float64("optionDataManagerBackoffFactor", 1.5, 63 | "factor to increase sleep time between retries to data manager") 64 | var optionDataManagerSleepInitMS = flag.Int("optionDataManagerSleepInitMS", 100, 65 | "initial sleep time for retries to data manager") 66 | var optionDataManagerSleepMaxMS = flag.Int("optionDataManagerSleepMaxMS", 1000, 67 | "max sleep time for retries to data manager") 68 | 69 | var optionFeedBufferSizeBytes = flag.Int("optionFeedBufferSizeBytes", 20000000, 70 | "buffer size for flow control") 71 | var optionFeedBufferAckThreshold = flag.Float64("optionFeedBufferAckThreshold", 0.2, 72 | "percent (0-to-1.0) of buffer size before sending a flow control buffer-ack") 73 | 74 | var bds cbdatasource.BucketDataSource 75 | 76 | func main() { 77 | flag.Parse() 78 | 79 | go dumpOnSignalForPlatform() 80 | 81 | if *verbose > 0 { 82 | log.Printf("%s started", os.Args[0]) 83 | flag.VisitAll(func(f *flag.Flag) { log.Printf(" -%s=%s\n", f.Name, f.Value) }) 84 | log.Printf(" GOMAXPROCS=%d", runtime.GOMAXPROCS(-1)) 85 | } 86 | 87 | serverURLs := []string{*serverURL} 88 | 89 | vbucketIdsArr := []uint16(nil) // A nil means get all the vbuckets. 90 | if *vbucketIds != "" { 91 | vbucketIdsArr = []uint16{} 92 | for _, vbucketIdStr := range strings.Split(*vbucketIds, ",") { 93 | if vbucketIdStr != "" { 94 | vbucketId, err := strconv.Atoi(vbucketIdStr) 95 | if err != nil { 96 | log.Fatalf("error: could not parse vbucketId: %s", vbucketIdStr) 97 | } 98 | vbucketIdsArr = append(vbucketIdsArr, uint16(vbucketId)) 99 | } 100 | } 101 | if len(vbucketIdsArr) <= 0 { 102 | vbucketIdsArr = nil 103 | } 104 | } 105 | 106 | if *optionFeedBufferSizeBytes < 0 { 107 | log.Fatalf("error: optionFeedBufferSizeBytes must be >= 0") 108 | } 109 | 110 | options := &cbdatasource.BucketDataSourceOptions{ 111 | ClusterManagerBackoffFactor: float32(*optionClusterManagerBackoffFactor), 112 | ClusterManagerSleepInitMS: *optionClusterManagerSleepInitMS, 113 | ClusterManagerSleepMaxMS: *optionClusterManagerSleepMaxMS, 114 | 115 | DataManagerBackoffFactor: float32(*optionDataManagerBackoffFactor), 116 | DataManagerSleepInitMS: *optionDataManagerSleepInitMS, 117 | DataManagerSleepMaxMS: *optionDataManagerSleepMaxMS, 118 | 119 | FeedBufferSizeBytes: uint32(*optionFeedBufferSizeBytes), 120 | FeedBufferAckThreshold: float32(*optionFeedBufferAckThreshold), 121 | } 122 | 123 | var auth couchbase.AuthHandler = nil 124 | if *authUser != "" { 125 | auth = &authUserPswd{} 126 | } 127 | 128 | receiver := &ExampleReceiver{} 129 | 130 | var err error 131 | 132 | bds, err = cbdatasource.NewBucketDataSource(serverURLs, 133 | *poolName, *bucketName, *bucketUUID, vbucketIdsArr, auth, receiver, options) 134 | if err != nil { 135 | log.Fatalf(fmt.Sprintf("error: NewBucketDataSource, err: %v", err)) 136 | } 137 | 138 | if err = bds.Start(); err != nil { 139 | log.Fatalf(fmt.Sprintf("error: Start, err: %v", err)) 140 | } 141 | 142 | if *verbose > 0 { 143 | log.Printf("started bucket data source: %v", bds) 144 | } 145 | 146 | for { 147 | time.Sleep(1000 * time.Millisecond) 148 | reportStats(bds, false) 149 | } 150 | } 151 | 152 | type authUserPswd struct{} 153 | 154 | func (a authUserPswd) GetCredentials() (string, string, string) { 155 | return *authUser, *authPswd, "" 156 | } 157 | 158 | // ---------------------------------------------------------------- 159 | 160 | type ExampleReceiver struct { 161 | m sync.Mutex 162 | 163 | seqs map[uint16]uint64 // To track max seq #'s we received per vbucketId. 164 | meta map[uint16][]byte // To track metadata blob's per vbucketId. 165 | } 166 | 167 | func (r *ExampleReceiver) OnError(err error) { 168 | if *verbose > 0 { 169 | log.Printf("error: %v", err) 170 | } 171 | reportStats(bds, true) 172 | } 173 | 174 | func (r *ExampleReceiver) DataUpdate(vbucketId uint16, key []byte, seq uint64, 175 | req *gomemcached.MCRequest) error { 176 | if *verbose > 1 { 177 | log.Printf("data-update: vbucketId: %d, key: %s, seq: %x, req: %#v", 178 | vbucketId, key, seq, req) 179 | } 180 | r.updateSeq(vbucketId, seq) 181 | return nil 182 | } 183 | 184 | func (r *ExampleReceiver) DataDelete(vbucketId uint16, key []byte, seq uint64, 185 | req *gomemcached.MCRequest) error { 186 | if *verbose > 1 { 187 | log.Printf("data-delete: vbucketId: %d, key: %s, seq: %x, req: %#v", 188 | vbucketId, key, seq, req) 189 | } 190 | r.updateSeq(vbucketId, seq) 191 | return nil 192 | } 193 | 194 | func (r *ExampleReceiver) SnapshotStart(vbucketId uint16, 195 | snapStart, snapEnd uint64, snapType uint32) error { 196 | if *verbose > 1 { 197 | log.Printf("snapshot-start: vbucketId: %d, snapStart: %x, snapEnd: %x, snapType: %x", 198 | vbucketId, snapStart, snapEnd, snapType) 199 | } 200 | return nil 201 | } 202 | 203 | func (r *ExampleReceiver) SetMetaData(vbucketId uint16, value []byte) error { 204 | if *verbose > 1 { 205 | log.Printf("set-metadata: vbucketId: %d, value: %s", vbucketId, value) 206 | } 207 | 208 | r.m.Lock() 209 | defer r.m.Unlock() 210 | 211 | if r.meta == nil { 212 | r.meta = make(map[uint16][]byte) 213 | } 214 | r.meta[vbucketId] = value 215 | 216 | return nil 217 | } 218 | 219 | func (r *ExampleReceiver) GetMetaData(vbucketId uint16) ( 220 | value []byte, lastSeq uint64, err error) { 221 | if *verbose > 1 { 222 | log.Printf("get-metadata: vbucketId: %d", vbucketId) 223 | } 224 | 225 | r.m.Lock() 226 | defer r.m.Unlock() 227 | 228 | value = []byte(nil) 229 | if r.meta != nil { 230 | value = r.meta[vbucketId] 231 | } 232 | 233 | if r.seqs != nil { 234 | lastSeq = r.seqs[vbucketId] 235 | } 236 | 237 | return value, lastSeq, nil 238 | } 239 | 240 | func (r *ExampleReceiver) Rollback(vbucketId uint16, rollbackSeq uint64) error { 241 | if *verbose > 0 { 242 | log.Printf("rollback: vbucketId: %d, rollbackSeq: %x", vbucketId, rollbackSeq) 243 | } 244 | 245 | return fmt.Errorf("unimpl-rollback") 246 | } 247 | 248 | // ---------------------------------------------------------------- 249 | 250 | func (r *ExampleReceiver) updateSeq(vbucketId uint16, seq uint64) { 251 | r.m.Lock() 252 | defer r.m.Unlock() 253 | 254 | if r.seqs == nil { 255 | r.seqs = make(map[uint16]uint64) 256 | } 257 | if r.seqs[vbucketId] < seq { 258 | r.seqs[vbucketId] = seq // Remember the max seq for GetMetaData(). 259 | } 260 | } 261 | 262 | // ---------------------------------------------------------------- 263 | 264 | var mutexStats sync.Mutex 265 | var lastStats = &cbdatasource.BucketDataSourceStats{} 266 | var currStats = &cbdatasource.BucketDataSourceStats{} 267 | 268 | func reportStats(b cbdatasource.BucketDataSource, force bool) { 269 | if *verbose <= 0 { 270 | return 271 | } 272 | 273 | mutexStats.Lock() 274 | defer mutexStats.Unlock() 275 | 276 | b.Stats(currStats) 277 | if force || !reflect.DeepEqual(lastStats, currStats) { 278 | buf, err := json.Marshal(currStats) 279 | if err == nil { 280 | log.Printf("%s", string(buf)) 281 | } 282 | lastStats, currStats = currStats, lastStats 283 | } 284 | } 285 | 286 | func dumpOnSignal(signals ...os.Signal) { 287 | c := make(chan os.Signal, 1) 288 | signal.Notify(c, signals...) 289 | for _ = range c { 290 | reportStats(bds, true) 291 | 292 | log.Printf("dump: goroutine...") 293 | pprof.Lookup("goroutine").WriteTo(os.Stderr, 1) 294 | log.Printf("dump: heap...") 295 | pprof.Lookup("heap").WriteTo(os.Stderr, 1) 296 | } 297 | } 298 | -------------------------------------------------------------------------------- /client_test.go: -------------------------------------------------------------------------------- 1 | package couchbase 2 | 3 | import "testing" 4 | 5 | func TestWriteOptionsString(t *testing.T) { 6 | tests := []struct { 7 | opts WriteOptions 8 | exp string 9 | }{ 10 | {Raw, "raw"}, 11 | {AddOnly, "addonly"}, 12 | {Persist, "persist"}, 13 | {Indexable, "indexable"}, 14 | {Append, "append"}, 15 | {AddOnly | Raw, "raw|addonly"}, 16 | {0, "0x0"}, 17 | {Raw | AddOnly | Persist | Indexable | Append, 18 | "raw|addonly|persist|indexable|append"}, 19 | {Raw | 8192, "raw|0x2000"}, 20 | } 21 | 22 | for _, test := range tests { 23 | got := test.opts.String() 24 | if got != test.exp { 25 | t.Errorf("Expected %v, got %v", test.exp, got) 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /conn_pool.go: -------------------------------------------------------------------------------- 1 | package couchbase 2 | 3 | import ( 4 | "crypto/tls" 5 | "errors" 6 | "sync/atomic" 7 | "time" 8 | 9 | "github.com/couchbase/gomemcached" 10 | "github.com/couchbase/gomemcached/client" 11 | "github.com/couchbase/goutils/logging" 12 | ) 13 | 14 | // GenericMcdAuthHandler is a kind of AuthHandler that performs 15 | // special auth exchange (like non-standard auth, possibly followed by 16 | // select-bucket). 17 | type GenericMcdAuthHandler interface { 18 | AuthHandler 19 | AuthenticateMemcachedConn(host string, conn *memcached.Client) error 20 | } 21 | 22 | // Error raised when a connection can't be retrieved from a pool. 23 | var TimeoutError = errors.New("timeout waiting to build connection") 24 | var errClosedPool = errors.New("the connection pool is closed") 25 | var errNoPool = errors.New("no connection pool") 26 | 27 | // Default timeout for retrieving a connection from the pool. 28 | var ConnPoolTimeout = time.Hour * 24 * 30 29 | 30 | // overflow connection closer cycle time 31 | var ConnCloserInterval = time.Second * 30 32 | 33 | // ConnPoolAvailWaitTime is the amount of time to wait for an existing 34 | // connection from the pool before considering the creation of a new 35 | // one. 36 | var ConnPoolAvailWaitTime = time.Millisecond 37 | 38 | type connectionPool struct { 39 | host string 40 | mkConn func(host string, ah AuthHandler, tlsConfig *tls.Config, bucketName string) (*memcached.Client, error) 41 | auth AuthHandler 42 | connections chan *memcached.Client 43 | createsem chan bool 44 | bailOut chan bool 45 | poolSize int 46 | connCount uint64 47 | inUse bool 48 | encrypted bool 49 | tlsConfig *tls.Config 50 | bucket string 51 | } 52 | 53 | func newConnectionPool(host string, ah AuthHandler, closer bool, poolSize, poolOverflow int, tlsConfig *tls.Config, bucket string, encrypted bool) *connectionPool { 54 | connSize := poolSize 55 | if closer { 56 | connSize += poolOverflow 57 | } 58 | rv := &connectionPool{ 59 | host: host, 60 | connections: make(chan *memcached.Client, connSize), 61 | createsem: make(chan bool, poolSize+poolOverflow), 62 | mkConn: defaultMkConn, 63 | auth: ah, 64 | poolSize: poolSize, 65 | bucket: bucket, 66 | encrypted: encrypted, 67 | } 68 | 69 | if encrypted { 70 | rv.tlsConfig = tlsConfig 71 | } 72 | 73 | if closer { 74 | rv.bailOut = make(chan bool, 1) 75 | go rv.connCloser() 76 | } 77 | return rv 78 | } 79 | 80 | // ConnPoolTimeout is notified whenever connections are acquired from a pool. 81 | var ConnPoolCallback func(host string, source string, start time.Time, err error) 82 | 83 | // Use regular in-the-clear connection if tlsConfig is nil. 84 | // Use secure connection (TLS) if tlsConfig is set. 85 | func defaultMkConn(host string, ah AuthHandler, tlsConfig *tls.Config, bucketName string) (*memcached.Client, error) { 86 | var features memcached.Features 87 | 88 | var conn *memcached.Client 89 | var err error 90 | if tlsConfig == nil { 91 | conn, err = memcached.Connect("tcp", host) 92 | } else { 93 | conn, err = memcached.ConnectTLS("tcp", host, tlsConfig) 94 | } 95 | 96 | if err != nil { 97 | return nil, err 98 | } 99 | 100 | if DefaultTimeout > 0 { 101 | conn.SetDeadline(getDeadline(noDeadline, DefaultTimeout)) 102 | } 103 | 104 | if TCPKeepalive == true { 105 | conn.SetKeepAliveOptions(time.Duration(TCPKeepaliveInterval) * time.Second) 106 | } 107 | 108 | if EnableMutationToken == true { 109 | features = append(features, memcached.FeatureMutationToken) 110 | } 111 | if EnableDataType == true { 112 | features = append(features, memcached.FeatureDataType) 113 | } 114 | 115 | if EnableXattr == true { 116 | features = append(features, memcached.FeatureXattr) 117 | } 118 | 119 | if EnableCollections { 120 | features = append(features, memcached.FeatureCollections) 121 | } 122 | 123 | if len(features) > 0 { 124 | res, err := conn.EnableFeatures(features) 125 | if err != nil && isTimeoutError(err) { 126 | conn.Close() 127 | return nil, err 128 | } 129 | 130 | if err != nil || res.Status != gomemcached.SUCCESS { 131 | logging.Warnf("Unable to enable features %v", err) 132 | } 133 | } 134 | 135 | if gah, ok := ah.(GenericMcdAuthHandler); ok { 136 | err = gah.AuthenticateMemcachedConn(host, conn) 137 | if err != nil { 138 | conn.Close() 139 | return nil, err 140 | } 141 | 142 | if DefaultTimeout > 0 { 143 | conn.SetDeadline(noDeadline) 144 | } 145 | 146 | return conn, nil 147 | } 148 | name, pass, bucket := ah.GetCredentials() 149 | if bucket == "" { 150 | // Authenticator does not know specific bucket. 151 | bucket = bucketName 152 | } 153 | 154 | if name != "default" { 155 | _, err = conn.Auth(name, pass) 156 | if err != nil { 157 | conn.Close() 158 | return nil, err 159 | } 160 | // Select bucket (Required for cb_auth creds) 161 | // Required when doing auth with _admin credentials 162 | if bucket != "" && bucket != name { 163 | _, err = conn.SelectBucket(bucket) 164 | if err != nil { 165 | conn.Close() 166 | return nil, err 167 | } 168 | } 169 | } 170 | 171 | if DefaultTimeout > 0 { 172 | conn.SetDeadline(noDeadline) 173 | } 174 | 175 | return conn, nil 176 | } 177 | 178 | func (cp *connectionPool) Close() (err error) { 179 | defer func() { 180 | if recover() != nil { 181 | err = errors.New("connectionPool.Close error") 182 | } 183 | }() 184 | if cp.bailOut != nil { 185 | 186 | // defensively, we won't wait if the channel is full 187 | select { 188 | case cp.bailOut <- false: 189 | default: 190 | } 191 | } 192 | close(cp.connections) 193 | for c := range cp.connections { 194 | c.Close() 195 | } 196 | return 197 | } 198 | 199 | func (cp *connectionPool) Node() string { 200 | return cp.host 201 | } 202 | 203 | func (cp *connectionPool) GetWithTimeout(d time.Duration) (rv *memcached.Client, err error) { 204 | if cp == nil { 205 | return nil, errNoPool 206 | } 207 | 208 | path := "" 209 | 210 | if ConnPoolCallback != nil { 211 | defer func(path *string, start time.Time) { 212 | ConnPoolCallback(cp.host, *path, start, err) 213 | }(&path, time.Now()) 214 | } 215 | 216 | path = "short-circuit" 217 | 218 | // short-circuit available connetions. 219 | select { 220 | case rv, isopen := <-cp.connections: 221 | if !isopen { 222 | return nil, errClosedPool 223 | } 224 | atomic.AddUint64(&cp.connCount, 1) 225 | return rv, nil 226 | default: 227 | } 228 | 229 | t := time.NewTimer(ConnPoolAvailWaitTime) 230 | defer t.Stop() 231 | 232 | // Try to grab an available connection within 1ms 233 | select { 234 | case rv, isopen := <-cp.connections: 235 | path = "avail1" 236 | if !isopen { 237 | return nil, errClosedPool 238 | } 239 | atomic.AddUint64(&cp.connCount, 1) 240 | return rv, nil 241 | case <-t.C: 242 | // No connection came around in time, let's see 243 | // whether we can get one or build a new one first. 244 | t.Reset(d) // Reuse the timer for the full timeout. 245 | select { 246 | case rv, isopen := <-cp.connections: 247 | path = "avail2" 248 | if !isopen { 249 | return nil, errClosedPool 250 | } 251 | atomic.AddUint64(&cp.connCount, 1) 252 | return rv, nil 253 | case cp.createsem <- true: 254 | path = "create" 255 | // Build a connection if we can't get a real one. 256 | // This can potentially be an overflow connection, or 257 | // a pooled connection. 258 | rv, err := cp.mkConn(cp.host, cp.auth, cp.tlsConfig, cp.bucket) 259 | if err != nil { 260 | // On error, release our create hold 261 | <-cp.createsem 262 | } else { 263 | atomic.AddUint64(&cp.connCount, 1) 264 | } 265 | return rv, err 266 | case <-t.C: 267 | return nil, ErrTimeout 268 | } 269 | } 270 | } 271 | 272 | func (cp *connectionPool) Get() (*memcached.Client, error) { 273 | return cp.GetWithTimeout(ConnPoolTimeout) 274 | } 275 | 276 | func (cp *connectionPool) Return(c *memcached.Client) { 277 | if c == nil { 278 | return 279 | } 280 | 281 | if cp == nil { 282 | c.Close() 283 | } 284 | 285 | if c.IsHealthy() { 286 | defer func() { 287 | if recover() != nil { 288 | // This happens when the pool has already been 289 | // closed and we're trying to return a 290 | // connection to it anyway. Just close the 291 | // connection. 292 | c.Close() 293 | } 294 | }() 295 | 296 | select { 297 | case cp.connections <- c: 298 | default: 299 | <-cp.createsem 300 | c.Close() 301 | } 302 | } else { 303 | <-cp.createsem 304 | c.Close() 305 | } 306 | } 307 | 308 | // give the ability to discard a connection from a pool 309 | // useful for ditching connections to the wrong node after a rebalance 310 | func (cp *connectionPool) Discard(c *memcached.Client) { 311 | <-cp.createsem 312 | c.Close() 313 | } 314 | 315 | // asynchronous connection closer 316 | func (cp *connectionPool) connCloser() { 317 | var connCount uint64 318 | 319 | t := time.NewTimer(ConnCloserInterval) 320 | defer t.Stop() 321 | 322 | for { 323 | connCount = cp.connCount 324 | 325 | // we don't exist anymore! bail out! 326 | select { 327 | case <-cp.bailOut: 328 | return 329 | case <-t.C: 330 | } 331 | t.Reset(ConnCloserInterval) 332 | 333 | // no overflow connections open or sustained requests for connections 334 | // nothing to do until the next cycle 335 | if len(cp.connections) <= cp.poolSize || 336 | ConnCloserInterval/ConnPoolAvailWaitTime < time.Duration(cp.connCount-connCount) { 337 | continue 338 | } 339 | 340 | // close overflow connections now that they are not needed 341 | for c := range cp.connections { 342 | select { 343 | case <-cp.bailOut: 344 | return 345 | default: 346 | } 347 | 348 | // bail out if close did not work out 349 | if !cp.connCleanup(c) { 350 | return 351 | } 352 | if len(cp.connections) <= cp.poolSize { 353 | break 354 | } 355 | } 356 | } 357 | } 358 | 359 | // close connection with recovery on error 360 | func (cp *connectionPool) connCleanup(c *memcached.Client) (rv bool) { 361 | 362 | // just in case we are closing a connection after 363 | // bailOut has been sent but we haven't yet read it 364 | defer func() { 365 | if recover() != nil { 366 | rv = false 367 | } 368 | }() 369 | rv = true 370 | 371 | c.Close() 372 | <-cp.createsem 373 | return 374 | } 375 | 376 | func (cp *connectionPool) StartTapFeed(args *memcached.TapArguments) (*memcached.TapFeed, error) { 377 | if cp == nil { 378 | return nil, errNoPool 379 | } 380 | mc, err := cp.Get() 381 | if err != nil { 382 | return nil, err 383 | } 384 | 385 | // A connection can't be used after TAP; Dont' count it against the 386 | // connection pool capacity 387 | <-cp.createsem 388 | 389 | return mc.StartTapFeed(*args) 390 | } 391 | 392 | const DEFAULT_WINDOW_SIZE = 20 * 1024 * 1024 // 20 Mb 393 | 394 | func (cp *connectionPool) StartUprFeed(name string, sequence uint32, dcp_buffer_size uint32, data_chan_size int) (*memcached.UprFeed, error) { 395 | if cp == nil { 396 | return nil, errNoPool 397 | } 398 | mc, err := cp.Get() 399 | if err != nil { 400 | return nil, err 401 | } 402 | 403 | // A connection can't be used after it has been allocated to UPR; 404 | // Dont' count it against the connection pool capacity 405 | <-cp.createsem 406 | 407 | uf, err := mc.NewUprFeed() 408 | if err != nil { 409 | return nil, err 410 | } 411 | 412 | if err := uf.UprOpen(name, sequence, dcp_buffer_size); err != nil { 413 | return nil, err 414 | } 415 | 416 | if err := uf.StartFeedWithConfig(data_chan_size); err != nil { 417 | return nil, err 418 | } 419 | 420 | return uf, nil 421 | } 422 | -------------------------------------------------------------------------------- /conn_pool_test.go: -------------------------------------------------------------------------------- 1 | package couchbase 2 | 3 | import ( 4 | "crypto/tls" 5 | "errors" 6 | "io" 7 | "testing" 8 | "time" 9 | 10 | "github.com/couchbase/gomemcached" 11 | "github.com/couchbase/gomemcached/client" 12 | ) 13 | 14 | type testT struct { 15 | closed bool 16 | } 17 | 18 | func (t testT) Read([]byte) (int, error) { 19 | return 0, io.EOF 20 | } 21 | 22 | func (t testT) Write([]byte) (int, error) { 23 | return 0, io.EOF 24 | } 25 | 26 | func (t testT) SetReadDeadline(time.Time) error { 27 | return nil 28 | } 29 | 30 | func (t testT) SetDeadline(time.Time) error { 31 | return nil 32 | } 33 | 34 | var errAlreadyClosed = errors.New("already closed") 35 | 36 | func (t *testT) Close() error { 37 | if t.closed { 38 | return errAlreadyClosed 39 | } 40 | t.closed = true 41 | return nil 42 | } 43 | 44 | func testMkConn(h string, ah AuthHandler, tlsConfig *tls.Config, bucketName string) (*memcached.Client, error) { 45 | return memcached.Wrap(&testT{}) 46 | } 47 | 48 | func TestConnPool(t *testing.T) { 49 | cp := newConnectionPool("h", &basicAuth{}, false, 3, 6, nil, "", false) 50 | cp.mkConn = testMkConn 51 | 52 | seenClients := map[*memcached.Client]bool{} 53 | 54 | // build some connections 55 | 56 | for i := 0; i < 5; i++ { 57 | sc, err := cp.Get() 58 | if err != nil { 59 | t.Fatalf("Error getting connection from pool: %v", err) 60 | } 61 | seenClients[sc] = true 62 | } 63 | 64 | if len(cp.connections) != 0 { 65 | t.Errorf("Expected 0 connections after gets, got %v", 66 | len(cp.connections)) 67 | } 68 | 69 | // return them 70 | for k := range seenClients { 71 | cp.Return(k) 72 | } 73 | 74 | if len(cp.connections) != 3 { 75 | t.Errorf("Expected 3 connections after returning them, got %v", 76 | len(cp.connections)) 77 | } 78 | 79 | // Try again. 80 | matched := 0 81 | grabbed := []*memcached.Client{} 82 | for i := 0; i < 5; i++ { 83 | sc, err := cp.Get() 84 | if err != nil { 85 | t.Fatalf("Error getting connection from pool: %v", err) 86 | } 87 | if seenClients[sc] { 88 | matched++ 89 | } 90 | grabbed = append(grabbed, sc) 91 | } 92 | 93 | if matched != 3 { 94 | t.Errorf("Expected to match 3 conns, matched %v", matched) 95 | } 96 | 97 | for _, c := range grabbed { 98 | cp.Return(c) 99 | } 100 | 101 | // Connect write error. 102 | sc, err := cp.Get() 103 | if err != nil { 104 | t.Fatalf("Error getting a connection: %v", err) 105 | } 106 | err = sc.Transmit(&gomemcached.MCRequest{}) 107 | if err == nil { 108 | t.Fatalf("Expected error sending a request") 109 | } 110 | if sc.IsHealthy() { 111 | t.Fatalf("Expected unhealthy connection") 112 | } 113 | cp.Return(sc) 114 | 115 | if len(cp.connections) != 2 { 116 | t.Errorf("Expected to have 2 conns, have %v", len(cp.connections)) 117 | } 118 | 119 | err = cp.Close() 120 | if err != nil { 121 | t.Errorf("Expected clean close, got %v", err) 122 | } 123 | 124 | err = cp.Close() 125 | if err == nil { 126 | t.Errorf("Expected error on second pool close") 127 | } 128 | } 129 | 130 | func TestConnPoolSoonAvailable(t *testing.T) { 131 | defer func(d time.Duration) { ConnPoolAvailWaitTime = d }(ConnPoolAvailWaitTime) 132 | defer func() { ConnPoolCallback = nil }() 133 | 134 | m := map[string]int{} 135 | timings := []time.Duration{} 136 | ConnPoolCallback = func(host string, source string, start time.Time, err error) { 137 | m[source] = m[source] + 1 138 | timings = append(timings, time.Since(start)) 139 | } 140 | 141 | cp := newConnectionPool("h", &basicAuth{}, false, 3, 4, nil, "", false) 142 | cp.mkConn = testMkConn 143 | 144 | seenClients := map[*memcached.Client]bool{} 145 | 146 | // build some connections 147 | 148 | var aClient *memcached.Client 149 | for { 150 | sc, err := cp.GetWithTimeout(time.Millisecond) 151 | if err == ErrTimeout { 152 | break 153 | } 154 | if err != nil { 155 | t.Fatalf("Error getting connection from pool: %v", err) 156 | } 157 | aClient = sc 158 | seenClients[sc] = true 159 | } 160 | 161 | time.AfterFunc(time.Millisecond, func() { cp.Return(aClient) }) 162 | 163 | ConnPoolAvailWaitTime = time.Second 164 | 165 | sc, err := cp.Get() 166 | if err != nil || sc != aClient { 167 | t.Errorf("Expected a successful connection, got %v/%v", sc, err) 168 | } 169 | 170 | // Try again, but let's close it while we're stuck in secondary wait 171 | time.AfterFunc(time.Millisecond, func() { cp.Close() }) 172 | 173 | sc, err = cp.Get() 174 | if err != errClosedPool { 175 | t.Errorf("Expected a closed pool, got %v/%v", sc, err) 176 | } 177 | 178 | t.Logf("Callback report: %v, timings: %v", m, timings) 179 | } 180 | 181 | func TestConnPoolClosedFull(t *testing.T) { 182 | cp := newConnectionPool("h", &basicAuth{}, false, 3, 4, nil, "", false) 183 | cp.mkConn = testMkConn 184 | 185 | seenClients := map[*memcached.Client]bool{} 186 | 187 | // build some connections 188 | 189 | for { 190 | sc, err := cp.GetWithTimeout(time.Millisecond) 191 | if err == ErrTimeout { 192 | break 193 | } 194 | if err != nil { 195 | t.Fatalf("Error getting connection from pool: %v", err) 196 | } 197 | seenClients[sc] = true 198 | } 199 | 200 | time.AfterFunc(2*time.Millisecond, func() { cp.Close() }) 201 | 202 | sc, err := cp.Get() 203 | if err != errClosedPool { 204 | t.Errorf("Expected closed pool error after closed, got %v/%v", sc, err) 205 | } 206 | } 207 | 208 | func TestConnPoolWaitFull(t *testing.T) { 209 | cp := newConnectionPool("h", &basicAuth{}, false, 3, 4, nil, "", false) 210 | cp.mkConn = testMkConn 211 | 212 | seenClients := map[*memcached.Client]bool{} 213 | 214 | // build some connections 215 | 216 | var aClient *memcached.Client 217 | for { 218 | sc, err := cp.GetWithTimeout(time.Millisecond) 219 | if err == ErrTimeout { 220 | break 221 | } 222 | if err != nil { 223 | t.Fatalf("Error getting connection from pool: %v", err) 224 | } 225 | aClient = sc 226 | seenClients[sc] = true 227 | } 228 | 229 | time.AfterFunc(2*time.Millisecond, func() { cp.Return(aClient) }) 230 | 231 | sc, err := cp.Get() 232 | if err != nil || sc != aClient { 233 | t.Errorf("Expected a successful connection, got %v/%v", sc, err) 234 | } 235 | } 236 | 237 | func TestConnPoolWaitFailFull(t *testing.T) { 238 | cp := newConnectionPool("h", &basicAuth{}, false, 3, 4, nil, "", false) 239 | cp.mkConn = testMkConn 240 | 241 | seenClients := map[*memcached.Client]bool{} 242 | 243 | // build some connections 244 | 245 | var aClient *memcached.Client 246 | for { 247 | sc, err := cp.GetWithTimeout(time.Millisecond) 248 | if err == ErrTimeout { 249 | break 250 | } 251 | if err != nil { 252 | t.Fatalf("Error getting connection from pool: %v", err) 253 | } 254 | aClient = sc 255 | seenClients[sc] = true 256 | } 257 | 258 | // causes failure 259 | aClient.Transmit(&gomemcached.MCRequest{}) 260 | time.AfterFunc(2*time.Millisecond, func() { cp.Return(aClient) }) 261 | 262 | sc, err := cp.Get() 263 | if err != nil || sc == aClient { 264 | t.Errorf("Expected a new successful connection, got %v/%v", sc, err) 265 | } 266 | } 267 | 268 | func TestConnPoolWaitDoubleFailFull(t *testing.T) { 269 | cp := newConnectionPool("h", &basicAuth{}, false, 3, 4, nil, "", false) 270 | cp.mkConn = testMkConn 271 | 272 | seenClients := map[*memcached.Client]bool{} 273 | 274 | // build some connections 275 | 276 | var aClient *memcached.Client 277 | for { 278 | sc, err := cp.GetWithTimeout(time.Millisecond) 279 | if err == ErrTimeout { 280 | break 281 | } 282 | if err != nil { 283 | t.Fatalf("Error getting connection from pool: %v", err) 284 | } 285 | aClient = sc 286 | seenClients[sc] = true 287 | } 288 | 289 | cp.mkConn = func(h string, ah AuthHandler, tlsConfig *tls.Config, bucketName string) (*memcached.Client, error) { 290 | return nil, io.EOF 291 | } 292 | 293 | // causes failure 294 | aClient.Transmit(&gomemcached.MCRequest{}) 295 | time.AfterFunc(2*time.Millisecond, func() { cp.Return(aClient) }) 296 | 297 | sc, err := cp.Get() 298 | if err != io.EOF { 299 | t.Errorf("Expected to fail getting a new connection, got %v/%v", sc, err) 300 | } 301 | } 302 | 303 | func TestConnPoolNil(t *testing.T) { 304 | var cp *connectionPool 305 | c, err := cp.Get() 306 | if err == nil { 307 | t.Errorf("Expected an error getting from nil, got %v", c) 308 | } 309 | 310 | // This just shouldn't error. 311 | cp.Return(c) 312 | } 313 | 314 | func TestConnPoolClosed(t *testing.T) { 315 | cp := newConnectionPool("h", &basicAuth{}, false, 3, 6, nil, "", false) 316 | cp.mkConn = testMkConn 317 | c, err := cp.Get() 318 | if err != nil { 319 | t.Fatal(err) 320 | } 321 | cp.Close() 322 | 323 | // This should cause the connection to be closed 324 | cp.Return(c) 325 | if err = c.Close(); err != errAlreadyClosed { 326 | t.Errorf("Expected to close connection, wasn't closed (%v)", err) 327 | } 328 | 329 | sc, err := cp.Get() 330 | if err != errClosedPool { 331 | t.Errorf("Expected closed pool error after closed, got %v/%v", sc, err) 332 | } 333 | } 334 | 335 | func TestConnPoolCloseWrongPool(t *testing.T) { 336 | cp := newConnectionPool("h", &basicAuth{}, false, 3, 6, nil, "", false) 337 | cp.mkConn = testMkConn 338 | c, err := cp.Get() 339 | if err != nil { 340 | t.Fatal(err) 341 | } 342 | cp.Close() 343 | 344 | // Return to a different pool. Should still be OK. 345 | cp = newConnectionPool("h", &basicAuth{}, false, 3, 6, nil, "", false) 346 | cp.mkConn = testMkConn 347 | c, err = cp.Get() 348 | if err != nil { 349 | t.Fatal(err) 350 | } 351 | cp.Close() 352 | 353 | cp.Return(c) 354 | if err = c.Close(); err != errAlreadyClosed { 355 | t.Errorf("Expected to close connection, wasn't closed (%v)", err) 356 | } 357 | } 358 | 359 | func TestConnPoolCloseNil(t *testing.T) { 360 | cp := newConnectionPool("h", &basicAuth{}, false, 3, 6, nil, "", false) 361 | cp.mkConn = testMkConn 362 | c, err := cp.Get() 363 | if err != nil { 364 | t.Fatal(err) 365 | } 366 | cp.Close() 367 | 368 | cp = nil 369 | cp.Return(c) 370 | if err = c.Close(); err != errAlreadyClosed { 371 | t.Errorf("Expected to close connection, wasn't closed (%v)", err) 372 | } 373 | } 374 | 375 | func TestConnPoolStartTapFeed(t *testing.T) { 376 | var cp *connectionPool 377 | args := memcached.DefaultTapArguments() 378 | tf, err := cp.StartTapFeed(&args) 379 | if err != errNoPool { 380 | t.Errorf("Expected no pool error with no pool, got %v/%v", tf, err) 381 | } 382 | 383 | cp = newConnectionPool("h", &basicAuth{}, false, 3, 6, nil, "", false) 384 | cp.mkConn = testMkConn 385 | 386 | tf, err = cp.StartTapFeed(&args) 387 | if err != io.EOF { 388 | t.Errorf("Expected to fail a tap feed with EOF, got %v/%v", tf, err) 389 | } 390 | 391 | cp.Close() 392 | tf, err = cp.StartTapFeed(&args) 393 | if err != errClosedPool { 394 | t.Errorf("Expected a closed pool, got %v/%v", tf, err) 395 | } 396 | } 397 | 398 | func BenchmarkBestCaseCPGet(b *testing.B) { 399 | cp := newConnectionPool("h", &basicAuth{}, false, 3, 6, nil, "", false) 400 | cp.mkConn = testMkConn 401 | 402 | for i := 0; i < b.N; i++ { 403 | c, err := cp.Get() 404 | if err != nil { 405 | b.Fatalf("Error getting from pool: %v", err) 406 | } 407 | cp.Return(c) 408 | } 409 | } 410 | -------------------------------------------------------------------------------- /ddocs.go: -------------------------------------------------------------------------------- 1 | package couchbase 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "github.com/couchbase/goutils/logging" 8 | "io/ioutil" 9 | "net/http" 10 | ) 11 | 12 | // ViewDefinition represents a single view within a design document. 13 | type ViewDefinition struct { 14 | Map string `json:"map"` 15 | Reduce string `json:"reduce,omitempty"` 16 | } 17 | 18 | // DDoc is the document body of a design document specifying a view. 19 | type DDoc struct { 20 | Language string `json:"language,omitempty"` 21 | Views map[string]ViewDefinition `json:"views"` 22 | } 23 | 24 | // DDocsResult represents the result from listing the design 25 | // documents. 26 | type DDocsResult struct { 27 | Rows []struct { 28 | DDoc struct { 29 | Meta map[string]interface{} 30 | JSON DDoc 31 | } `json:"doc"` 32 | } `json:"rows"` 33 | } 34 | 35 | // GetDDocs lists all design documents 36 | func (b *Bucket) GetDDocs() (DDocsResult, error) { 37 | var ddocsResult DDocsResult 38 | b.RLock() 39 | pool := b.pool 40 | uri := b.DDocs.URI 41 | b.RUnlock() 42 | 43 | // MB-23555 ephemeral buckets have no ddocs 44 | if uri == "" { 45 | return DDocsResult{}, nil 46 | } 47 | 48 | err := pool.client.parseURLResponse(uri, &ddocsResult) 49 | if err != nil { 50 | return DDocsResult{}, err 51 | } 52 | return ddocsResult, nil 53 | } 54 | 55 | func (b *Bucket) GetDDocWithRetry(docname string, into interface{}) error { 56 | ddocURI := fmt.Sprintf("/%s/_design/%s", b.GetName(), docname) 57 | err := b.parseAPIResponse(ddocURI, &into) 58 | if err != nil { 59 | return err 60 | } 61 | return nil 62 | } 63 | 64 | func (b *Bucket) GetDDocsWithRetry() (DDocsResult, error) { 65 | var ddocsResult DDocsResult 66 | b.RLock() 67 | uri := b.DDocs.URI 68 | b.RUnlock() 69 | 70 | // MB-23555 ephemeral buckets have no ddocs 71 | if uri == "" { 72 | return DDocsResult{}, nil 73 | } 74 | 75 | err := b.parseURLResponse(uri, &ddocsResult) 76 | if err != nil { 77 | return DDocsResult{}, err 78 | } 79 | return ddocsResult, nil 80 | } 81 | 82 | func (b *Bucket) ddocURL(docname string) (string, error) { 83 | u, err := b.randomBaseURL() 84 | if err != nil { 85 | return "", err 86 | } 87 | u.Path = fmt.Sprintf("/%s/_design/%s", b.GetName(), docname) 88 | return u.String(), nil 89 | } 90 | 91 | func (b *Bucket) ddocURLNext(nodeId int, docname string) (string, int, error) { 92 | u, selected, err := b.randomNextURL(nodeId) 93 | if err != nil { 94 | return "", -1, err 95 | } 96 | u.Path = fmt.Sprintf("/%s/_design/%s", b.GetName(), docname) 97 | return u.String(), selected, nil 98 | } 99 | 100 | const ABS_MAX_RETRIES = 10 101 | const ABS_MIN_RETRIES = 3 102 | 103 | func (b *Bucket) getMaxRetries() (int, error) { 104 | 105 | maxRetries := len(b.Nodes()) 106 | 107 | if maxRetries == 0 { 108 | return 0, fmt.Errorf("No available Couch rest URLs") 109 | } 110 | 111 | if maxRetries > ABS_MAX_RETRIES { 112 | maxRetries = ABS_MAX_RETRIES 113 | } else if maxRetries < ABS_MIN_RETRIES { 114 | maxRetries = ABS_MIN_RETRIES 115 | } 116 | 117 | return maxRetries, nil 118 | } 119 | 120 | // PutDDoc installs a design document. 121 | func (b *Bucket) PutDDoc(docname string, value interface{}) error { 122 | 123 | var Err error 124 | 125 | maxRetries, err := b.getMaxRetries() 126 | if err != nil { 127 | return err 128 | } 129 | 130 | lastNode := START_NODE_ID 131 | 132 | for retryCount := 0; retryCount < maxRetries; retryCount++ { 133 | 134 | Err = nil 135 | 136 | ddocU, selectedNode, err := b.ddocURLNext(lastNode, docname) 137 | if err != nil { 138 | return err 139 | } 140 | 141 | lastNode = selectedNode 142 | 143 | logging.Infof(" Trying with selected node %d", selectedNode) 144 | j, err := json.Marshal(value) 145 | if err != nil { 146 | return err 147 | } 148 | 149 | req, err := http.NewRequest("PUT", ddocU, bytes.NewReader(j)) 150 | if err != nil { 151 | return err 152 | } 153 | req.Header.Set("Content-Type", "application/json") 154 | err = maybeAddAuth(req, b.authHandler(false /* bucket not yet locked */)) 155 | if err != nil { 156 | return err 157 | } 158 | 159 | res, err := doHTTPRequest(req) 160 | if err != nil { 161 | return err 162 | } 163 | 164 | if res.StatusCode != 201 { 165 | body, _ := ioutil.ReadAll(res.Body) 166 | Err = fmt.Errorf("error installing view: %v / %s", 167 | res.Status, body) 168 | logging.Errorf(" Error in PutDDOC %v. Retrying...", Err) 169 | res.Body.Close() 170 | b.Refresh() 171 | continue 172 | } 173 | 174 | res.Body.Close() 175 | break 176 | } 177 | 178 | return Err 179 | } 180 | 181 | // GetDDoc retrieves a specific a design doc. 182 | func (b *Bucket) GetDDoc(docname string, into interface{}) error { 183 | var Err error 184 | var res *http.Response 185 | 186 | maxRetries, err := b.getMaxRetries() 187 | if err != nil { 188 | return err 189 | } 190 | 191 | lastNode := START_NODE_ID 192 | for retryCount := 0; retryCount < maxRetries; retryCount++ { 193 | 194 | Err = nil 195 | ddocU, selectedNode, err := b.ddocURLNext(lastNode, docname) 196 | if err != nil { 197 | return err 198 | } 199 | 200 | lastNode = selectedNode 201 | logging.Infof(" Trying with selected node %d", selectedNode) 202 | 203 | req, err := http.NewRequest("GET", ddocU, nil) 204 | if err != nil { 205 | return err 206 | } 207 | req.Header.Set("Content-Type", "application/json") 208 | err = maybeAddAuth(req, b.authHandler(false /* bucket not yet locked */)) 209 | if err != nil { 210 | return err 211 | } 212 | 213 | res, err = doHTTPRequest(req) 214 | if err != nil { 215 | return err 216 | } 217 | if res.StatusCode != 200 { 218 | body, _ := ioutil.ReadAll(res.Body) 219 | Err = fmt.Errorf("error reading view: %v / %s", 220 | res.Status, body) 221 | logging.Errorf(" Error in GetDDOC %v Retrying...", Err) 222 | b.Refresh() 223 | res.Body.Close() 224 | continue 225 | } 226 | defer res.Body.Close() 227 | break 228 | } 229 | 230 | if Err != nil { 231 | return Err 232 | } 233 | 234 | d := json.NewDecoder(res.Body) 235 | return d.Decode(into) 236 | } 237 | 238 | // DeleteDDoc removes a design document. 239 | func (b *Bucket) DeleteDDoc(docname string) error { 240 | 241 | var Err error 242 | 243 | maxRetries, err := b.getMaxRetries() 244 | if err != nil { 245 | return err 246 | } 247 | 248 | lastNode := START_NODE_ID 249 | 250 | for retryCount := 0; retryCount < maxRetries; retryCount++ { 251 | 252 | Err = nil 253 | ddocU, selectedNode, err := b.ddocURLNext(lastNode, docname) 254 | if err != nil { 255 | return err 256 | } 257 | 258 | lastNode = selectedNode 259 | logging.Infof(" Trying with selected node %d", selectedNode) 260 | 261 | req, err := http.NewRequest("DELETE", ddocU, nil) 262 | if err != nil { 263 | return err 264 | } 265 | req.Header.Set("Content-Type", "application/json") 266 | err = maybeAddAuth(req, b.authHandler(false /* bucket not already locked */)) 267 | if err != nil { 268 | return err 269 | } 270 | 271 | res, err := doHTTPRequest(req) 272 | if err != nil { 273 | return err 274 | } 275 | if res.StatusCode != 200 { 276 | body, _ := ioutil.ReadAll(res.Body) 277 | Err = fmt.Errorf("error deleting view : %v / %s", res.Status, body) 278 | logging.Errorf(" Error in DeleteDDOC %v. Retrying ... ", Err) 279 | b.Refresh() 280 | res.Body.Close() 281 | continue 282 | } 283 | 284 | res.Body.Close() 285 | break 286 | } 287 | return Err 288 | } 289 | -------------------------------------------------------------------------------- /examples/basic/basic.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "log" 7 | "net/url" 8 | "os" 9 | 10 | "github.com/couchbase/go-couchbase" 11 | ) 12 | 13 | func mf(err error, msg string) { 14 | if err != nil { 15 | log.Fatalf("%v: %v", msg, err) 16 | } 17 | } 18 | 19 | func main() { 20 | bname := flag.String("bucket", "", 21 | "bucket to connect to (defaults to username)") 22 | 23 | flag.Usage = func() { 24 | fmt.Fprintf(os.Stderr, 25 | "%v [flags] http://user:pass@host:8091/\n\nFlags:\n", 26 | os.Args[0]) 27 | flag.PrintDefaults() 28 | os.Exit(64) 29 | } 30 | 31 | flag.Parse() 32 | 33 | if flag.NArg() < 1 { 34 | flag.Usage() 35 | } 36 | 37 | u, err := url.Parse(flag.Arg(0)) 38 | mf(err, "parse") 39 | 40 | if *bname == "" && u.User != nil { 41 | *bname = u.User.Username() 42 | } 43 | 44 | c, err := couchbase.Connect(u.String()) 45 | mf(err, "connect - "+u.String()) 46 | 47 | p, err := c.GetPool("default") 48 | mf(err, "pool") 49 | 50 | b, err := p.GetBucket(*bname) 51 | mf(err, "bucket") 52 | 53 | err = b.Set(",k", 90, map[string]interface{}{"x": 1}) 54 | mf(err, "set") 55 | 56 | ob := map[string]interface{}{} 57 | err = b.Get(",k", &ob) 58 | mf(err, "get") 59 | } 60 | -------------------------------------------------------------------------------- /examples/bucketlist/bucketlist.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "log" 7 | "net/url" 8 | "os" 9 | 10 | "github.com/couchbase/go-couchbase" 11 | ) 12 | 13 | func mf(err error, msg string) { 14 | if err != nil { 15 | log.Fatalf("%v: %v", msg, err) 16 | } 17 | } 18 | 19 | func main() { 20 | 21 | flag.Usage = func() { 22 | fmt.Fprintf(os.Stderr, 23 | "%v [flags] http://user:pass@host:8091/\n\nFlags:\n", 24 | os.Args[0]) 25 | flag.PrintDefaults() 26 | os.Exit(64) 27 | } 28 | 29 | flag.Parse() 30 | 31 | if flag.NArg() < 1 { 32 | flag.Usage() 33 | } 34 | 35 | u, err := url.Parse(flag.Arg(0)) 36 | mf(err, "parse") 37 | 38 | bucketInfo, err := couchbase.GetBucketList(u.String()) 39 | fmt.Printf("List of buckets and password %v", bucketInfo) 40 | 41 | //connect to a gamesim-sample 42 | client, err := couchbase.Connect(u.String()) 43 | if err != nil { 44 | fmt.Printf("Connect failed %v", err) 45 | return 46 | } 47 | 48 | cbpool, err := client.GetPool("default") 49 | if err != nil { 50 | fmt.Printf("Failed to connect to default pool %v", err) 51 | return 52 | } 53 | 54 | for _, bi := range bucketInfo { 55 | var cbbucket *couchbase.Bucket 56 | 57 | cbbucket, err = cbpool.GetBucketWithAuth(bi.Name, bi.User, bi.Password) 58 | 59 | if err != nil { 60 | fmt.Printf("Failed to connect to bucket %s %v", bi.Name, err) 61 | return 62 | } 63 | 64 | err = cbbucket.Set("k1", 0, "value") 65 | if err != nil { 66 | fmt.Printf("set failed error %v \n\n", err) 67 | return 68 | } 69 | 70 | } 71 | 72 | } 73 | -------------------------------------------------------------------------------- /examples/cb_auth/example.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "github.com/couchbase/cbauth" 7 | "github.com/couchbase/go-couchbase" 8 | "log" 9 | "net/url" 10 | ) 11 | 12 | var serverURL = flag.String("serverURL", "http://localhost:9000", 13 | "couchbase server URL") 14 | var poolName = flag.String("poolName", "default", 15 | "pool name") 16 | var bucketName = flag.String("bucketName", "default", 17 | "bucket name") 18 | var authUser = flag.String("authUser", "", 19 | "auth user name (probably same as bucketName)") 20 | var authPswd = flag.String("authPswd", "", 21 | "auth password") 22 | 23 | func main() { 24 | 25 | flag.Parse() 26 | /* 27 | NOTE. This example requires the following environment variables to be set. 28 | 29 | CBAUTH_REVRPC_URL 30 | 31 | e.g 32 | 33 | CBAUTH_REVRPC_URL="http://Administrator:asdasd@127.0.0.1:9000/_cbauth" 34 | 35 | */ 36 | 37 | url, err := url.Parse(*serverURL) 38 | if err != nil { 39 | log.Printf("Failed to parse url %v", err) 40 | return 41 | } 42 | 43 | hostPort := url.Host 44 | 45 | user, bucket_password, err := cbauth.GetHTTPServiceAuth(hostPort) 46 | if err != nil { 47 | log.Printf("Failed %v", err) 48 | return 49 | } 50 | 51 | log.Printf(" HTTP Servce username %s password %s", user, bucket_password) 52 | 53 | client, err := couchbase.ConnectWithAuthCreds(*serverURL, user, bucket_password) 54 | if err != nil { 55 | log.Printf("Connect failed %v", err) 56 | return 57 | } 58 | 59 | cbpool, err := client.GetPool("default") 60 | if err != nil { 61 | log.Printf("Failed to connect to default pool %v", err) 62 | return 63 | } 64 | 65 | mUser, mPassword, err := cbauth.GetMemcachedServiceAuth(hostPort) 66 | if err != nil { 67 | log.Printf(" failed %v", err) 68 | return 69 | } 70 | 71 | var cbbucket *couchbase.Bucket 72 | cbbucket, err = cbpool.GetBucketWithAuth(*bucketName, mUser, mPassword) 73 | 74 | if err != nil { 75 | log.Printf("Failed to connect to bucket %v", err) 76 | return 77 | } 78 | 79 | log.Printf(" Bucket name %s Bucket %v", *bucketName, cbbucket) 80 | 81 | err = cbbucket.Set("k1", 5, "value") 82 | if err != nil { 83 | log.Printf("set failed error %v", err) 84 | return 85 | } 86 | 87 | if *authUser != "" { 88 | creds, err := cbauth.Auth(*authUser, *authPswd) 89 | if err != nil { 90 | log.Printf(" failed %v", err) 91 | return 92 | } 93 | 94 | permission := fmt.Sprintf("cluster.bucket[%s].data!read", *bucketName) 95 | canAccess, err := creds.IsAllowed(permission) 96 | if err != nil { 97 | log.Printf(" error %v checking permission %v", err, permission) 98 | } else { 99 | log.Printf(" result of checking permission %v : %v", permission, canAccess) 100 | } 101 | } 102 | 103 | } 104 | -------------------------------------------------------------------------------- /examples/failoverlog/failoverlog.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/couchbase/go-couchbase" 5 | "log" 6 | ) 7 | 8 | const testURL = "http://localhost:9000" 9 | 10 | func main() { 11 | // get a bucket and mc.Client connection 12 | bucket, err := getTestConnection("default") 13 | if err != nil { 14 | panic(err) 15 | } 16 | 17 | // Get failover log for a vbucket 18 | flogs, err := bucket.GetFailoverLogs([]uint16{0, 1, 2, 3, 4, 5, 6, 7}) 19 | if err != nil { 20 | panic(err) 21 | } 22 | for vbno, flog := range flogs { 23 | log.Printf("Failover logs for vbucket %v: %v", vbno, flog) 24 | } 25 | } 26 | 27 | func getTestConnection(bucketname string) (*couchbase.Bucket, error) { 28 | couch, err := couchbase.Connect(testURL) 29 | if err != nil { 30 | log.Println("Make sure that couchbase is at", testURL) 31 | return nil, err 32 | } 33 | pool, err := couch.GetPool("default") 34 | if err != nil { 35 | return nil, err 36 | } 37 | bucket, err := pool.GetBucket(bucketname) 38 | return bucket, err 39 | } 40 | -------------------------------------------------------------------------------- /examples/hello/hello.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "github.com/couchbase/go-couchbase" 7 | "log" 8 | "strconv" 9 | "time" 10 | ) 11 | 12 | func maybeFatal(err error) { 13 | if err != nil { 14 | log.Fatalf("Error: %v", err) 15 | } 16 | } 17 | 18 | func doOps(b *couchbase.Bucket) { 19 | fmt.Printf("Doing some ops on %s\n", b.Name) 20 | start := time.Now() 21 | total := 2048 22 | for i := 0; i < total; i++ { 23 | k := fmt.Sprintf("k%d", i) 24 | maybeFatal(b.Set(k, 0, []string{"a", "b", "c"})) 25 | rv := make([]string, 0, 10) 26 | maybeFatal(b.Get(k, &rv)) 27 | if fmt.Sprintf("%#v", rv) != `[]string{"a", "b", "c"}` { 28 | log.Fatalf("Expected %#v, got %#v", 29 | []string{"a", "b", "c"}, rv) 30 | } 31 | maybeFatal(b.Delete(k)) 32 | } 33 | fmt.Printf("Did %d ops in %s\n", 34 | total*3, time.Now().Sub(start).String()) 35 | } 36 | 37 | func doMoreOps(b *couchbase.Bucket) { 38 | fmt.Printf("Doing some Cas ops on %s\n", b.Name) 39 | start := time.Now() 40 | total := 2048 41 | for i := 0; i < total; i++ { 42 | k := fmt.Sprintf("k2%d", i) 43 | maybeFatal(b.Set(k, 0, []string{"a", "b", "c"})) 44 | rv := make([]string, 0, 10) 45 | var cas uint64 46 | maybeFatal(b.Gets(k, &rv, &cas)) 47 | if fmt.Sprintf("%#v", rv) != `[]string{"a", "b", "c"}` { 48 | log.Fatalf("Expected %#v, got %#v", 49 | []string{"a", "b", "c"}, rv) 50 | } 51 | if _, err := b.Cas(k, 0, cas, []string{"a", "b", "d"}); err != nil { 52 | log.Fatalf("Error: %v", err) 53 | } 54 | maybeFatal(b.Get(k, &rv)) 55 | if fmt.Sprintf("%#v", rv) != `[]string{"a", "b", "d"}` { 56 | log.Fatalf("Expected %#v, got %#v", 57 | []string{"a", "b", "c"}, rv) 58 | } 59 | // this should fail since we don't know the latest cas value 60 | _, err := b.Cas(k, 0, cas, []string{"a", "b", "x"}) 61 | if err == nil { 62 | log.Fatalf("Expected \"Data exists for key\"") 63 | } 64 | 65 | maybeFatal(b.Delete(k)) 66 | } 67 | fmt.Printf("Did %d ops in %s\n", 68 | total*6, time.Now().Sub(start).String()) 69 | } 70 | 71 | func exploreBucket(bucket *couchbase.Bucket) { 72 | vbm := bucket.VBServerMap() 73 | fmt.Printf(" %v uses %s\n", bucket.Name, vbm.HashAlgorithm) 74 | for pos, server := range vbm.ServerList { 75 | vbs := make([]string, 0, 1024) 76 | for vb, a := range vbm.VBucketMap { 77 | if a[0] == pos { 78 | vbs = append(vbs, strconv.Itoa(vb)) 79 | } 80 | } 81 | fmt.Printf(" %s: %v\n", server, vbs) 82 | } 83 | 84 | doOps(bucket) 85 | doMoreOps(bucket) 86 | 87 | } 88 | 89 | func explorePool(pool couchbase.Pool) { 90 | for _, n := range pool.Nodes { 91 | fmt.Printf(" %v\n", n.Hostname) 92 | } 93 | fmt.Printf(" Buckets:\n") 94 | for n := range pool.BucketMap { 95 | bucket, err := pool.GetBucket(n) 96 | if err != nil { 97 | log.Fatalf("Error getting bucket: %v\n", err) 98 | } 99 | exploreBucket(bucket) 100 | } 101 | } 102 | 103 | func main() { 104 | flag.Parse() 105 | c, err := couchbase.Connect(flag.Arg(0)) 106 | if err != nil { 107 | log.Fatalf("Error connecting: %v", err) 108 | } 109 | fmt.Printf("Connected to ver=%s\n", c.Info.ImplementationVersion) 110 | for _, pn := range c.Info.Pools { 111 | fmt.Printf("Found pool: %s -> %s\n", pn.Name, pn.URI) 112 | p, err := c.GetPool(pn.Name) 113 | if err != nil { 114 | log.Fatalf("Can't get pool: %v", err) 115 | } 116 | explorePool(p) 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /examples/hello_getandtouch/hello_getandtouch.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "log" 7 | "time" 8 | 9 | "github.com/couchbase/go-couchbase" 10 | ) 11 | 12 | var poolName = flag.String("pool", "default", "Pool name") 13 | var writeFlag = flag.Bool("write", false, "If true, will write a value to the key") 14 | 15 | func main() { 16 | flag.Parse() 17 | 18 | if len(flag.Args()) < 3 { 19 | log.Fatalf("Usage: hello_getandtouch [-pool poolname] [-write] server bucket key") 20 | } 21 | 22 | c, err := couchbase.Connect(flag.Arg(0)) 23 | if err != nil { 24 | log.Fatalf("Error connecting: %v", err) 25 | } 26 | fmt.Printf("Connected to ver=%s\n", c.Info.ImplementationVersion) 27 | 28 | pool, err := c.GetPool(*poolName) 29 | if err != nil { 30 | log.Fatalf("Can't get pool %q: %v", *poolName, err) 31 | } 32 | 33 | bucket, err := pool.GetBucket(flag.Arg(1)) 34 | if err != nil { 35 | log.Fatalf("Can't get bucket %q: %v", flag.Arg(1), err) 36 | } 37 | 38 | key := flag.Arg(2) 39 | 40 | // Write an initial value to the key, with expiry 2s 41 | if err = bucket.Set(key, 2, []string{"a", "b", "c"}); err != nil { 42 | log.Fatalf("Set returned error %v", err) 43 | } 44 | 45 | // Validate that expiry is extended when getAndTouch is called 46 | for i := 0; i < 10; i++ { 47 | result, _, err := bucket.GetAndTouchRaw(key, 3) 48 | if err != nil { 49 | log.Fatalf("GetAndTouchRaw returned error %v", err) 50 | } 51 | if len(result) == 0 { 52 | log.Fatalf("GetAndTouchRaw returned invalid content, %v", err) 53 | } 54 | log.Printf("Successful retrieval via GetAndTouchRaw after %ds", i+1) 55 | time.Sleep(1 * time.Second) 56 | } 57 | 58 | // Validate failed retrieval post-expiry. Use GetAndTouchRaw to shorten expiry, 59 | // then attempt standard retrieval via GetRaw 60 | bucket.GetAndTouchRaw(key, 1) 61 | time.Sleep(2 * time.Second) 62 | _, err = bucket.GetRaw(key) 63 | if err == nil { 64 | log.Fatalf("Retrieved document that should have expired") 65 | } 66 | 67 | } 68 | -------------------------------------------------------------------------------- /examples/hello_observe/hello_observe.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "log" 7 | "time" 8 | 9 | "github.com/couchbase/go-couchbase" 10 | ) 11 | 12 | var poolName = flag.String("pool", "default", "Pool name") 13 | var writeFlag = flag.Bool("write", false, "If true, will write a value to the key") 14 | 15 | func main() { 16 | flag.Parse() 17 | 18 | if len(flag.Args()) < 3 { 19 | log.Fatalf("Usage: hello_observe [-pool poolname] [-write] server bucket key") 20 | } 21 | 22 | c, err := couchbase.Connect(flag.Arg(0)) 23 | if err != nil { 24 | log.Fatalf("Error connecting: %v", err) 25 | } 26 | fmt.Printf("Connected to ver=%s\n", c.Info.ImplementationVersion) 27 | 28 | pool, err := c.GetPool(*poolName) 29 | if err != nil { 30 | log.Fatalf("Can't get pool %q: %v", *poolName, err) 31 | } 32 | 33 | bucket, err := pool.GetBucket(flag.Arg(1)) 34 | if err != nil { 35 | log.Fatalf("Can't get bucket %q: %v", flag.Arg(1), err) 36 | } 37 | 38 | key := flag.Arg(2) 39 | 40 | result, err := bucket.Observe(key) 41 | if err != nil { 42 | log.Fatalf("Observe returned error %v", err) 43 | } 44 | log.Printf("Observe result: %+v", result) 45 | 46 | if *writeFlag { 47 | log.Printf("Now writing to key %q with persistence...", key) 48 | start := time.Now() 49 | err = bucket.Write(key, 0, 0, "observe test", couchbase.Persist) 50 | if err != nil { 51 | log.Fatalf("Write returned error %v", err) 52 | } 53 | log.Printf("Write with persistence took %s", time.Since(start)) 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /examples/hello_tap/hello_tap.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "log" 7 | 8 | "github.com/couchbase/go-couchbase" 9 | "github.com/couchbase/gomemcached/client" 10 | ) 11 | 12 | var poolName = flag.String("pool", "default", "Pool name") 13 | var back = flag.Uint64("backfill", memcached.TapNoBackfill, "List historical values starting from here") 14 | var dump = flag.Bool("dump", false, "Stop after backfill") 15 | var raw = flag.Bool("raw", false, "Show raw event contents") 16 | var ack = flag.Bool("ack", false, "Request ACKs from server") 17 | var keysOnly = flag.Bool("keysOnly", false, "Send only keys, no values") 18 | var checkpoint = flag.Bool("checkpoint", false, "Send checkpoint events") 19 | 20 | func main() { 21 | flag.Parse() 22 | 23 | if len(flag.Args()) < 2 { 24 | log.Fatalf("Server URL and bucket name required") 25 | } 26 | 27 | c, err := couchbase.Connect(flag.Arg(0)) 28 | if err != nil { 29 | log.Fatalf("Error connecting: %v", err) 30 | } 31 | fmt.Printf("Connected to ver=%s\n", c.Info.ImplementationVersion) 32 | 33 | pool, err := c.GetPool(*poolName) 34 | if err != nil { 35 | log.Fatalf("Can't get pool %q: %v", *poolName, err) 36 | } 37 | 38 | bucket, err := pool.GetBucket(flag.Arg(1)) 39 | if err != nil { 40 | log.Fatalf("Can't get bucket %q: %v", flag.Arg(1), err) 41 | } 42 | 43 | args := memcached.DefaultTapArguments() 44 | args.Backfill = uint64(*back) 45 | args.Dump = *dump 46 | args.SupportAck = *ack 47 | args.KeysOnly = *keysOnly 48 | args.Checkpoint = *checkpoint 49 | feed, err := bucket.StartTapFeed(&args) 50 | if err != nil { 51 | log.Fatalf("Error starting tap feed: %v", err) 52 | } 53 | for op := range feed.C { 54 | if *raw { 55 | log.Printf("Received %#v\n", op) 56 | } else { 57 | log.Printf("Received %s\n", op.String()) 58 | if len(op.Value) > 0 && len(op.Value) < 500 { 59 | log.Printf("\tValue: %s", op.Value) 60 | } 61 | } 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /examples/incr/incr.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "github.com/couchbase/go-couchbase" 6 | "log" 7 | "runtime" 8 | "time" 9 | ) 10 | 11 | func IncrementCounter(bucket *couchbase.Bucket, key string, amount uint64) { 12 | 13 | fmt.Printf("Trying to increment counter, key=%s, amount=%d\n", key, amount) 14 | 15 | value, err := bucket.Incr(key, amount, amount, 0) 16 | 17 | if err != nil { 18 | fmt.Printf("Error happened while incrementing %s\n", err) 19 | } else { 20 | 21 | fmt.Printf("Incremented counter, new value=%d\n", value) 22 | } 23 | 24 | value, err = bucket.Decr(key, amount, amount, 0) 25 | if err != nil { 26 | fmt.Printf("Error happened while decrementing %s\n", err) 27 | } else { 28 | 29 | fmt.Printf("Decremented counter, new value=%d\n", value) 30 | } 31 | 32 | } 33 | 34 | func main() { 35 | 36 | runtime.GOMAXPROCS(runtime.NumCPU()) 37 | 38 | c, err := couchbase.Connect("http://localhost:9000/") 39 | if err != nil { 40 | log.Fatalf("Error connecting: %v", err) 41 | } 42 | 43 | pool, err := c.GetPool("default") 44 | if err != nil { 45 | log.Fatalf("Error getting pool: %v", err) 46 | } 47 | 48 | bucket, err := pool.GetBucket("default") 49 | if err != nil { 50 | log.Fatalf("Error getting bucket: %v", err) 51 | } 52 | 53 | bucket.Delete("12345") 54 | 55 | go IncrementCounter(bucket, "12345", 2) 56 | go IncrementCounter(bucket, "12345", 2) 57 | go IncrementCounter(bucket, "12345", 2) 58 | 59 | time.Sleep(10000000) 60 | } 61 | -------------------------------------------------------------------------------- /examples/mb-15442/repro.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "database/sql" 5 | "flag" 6 | "fmt" 7 | "github.com/couchbase/go-couchbase" 8 | _ "github.com/couchbaselabs/go_n1ql" 9 | "log" 10 | ) 11 | 12 | var serverURL = flag.String("serverURL", "http://localhost:9000", 13 | "couchbase server URL") 14 | var poolName = flag.String("poolName", "default", 15 | "pool name") 16 | var bucketName = flag.String("bucketName", "default", 17 | "bucket name") 18 | 19 | func main() { 20 | 21 | flag.Parse() 22 | 23 | client, err := couchbase.Connect(*serverURL) 24 | if err != nil { 25 | log.Printf("Connect failed %v", err) 26 | return 27 | } 28 | 29 | cbpool, err := client.GetPool("default") 30 | if err != nil { 31 | log.Printf("Failed to connect to default pool %v", err) 32 | return 33 | } 34 | 35 | var cbbucket *couchbase.Bucket 36 | cbbucket, err = cbpool.GetBucket(*bucketName) 37 | 38 | if err != nil { 39 | log.Printf("Failed to connect to bucket %v", err) 40 | return 41 | } 42 | 43 | performOp(cbbucket) 44 | 45 | } 46 | 47 | func performOp(b *couchbase.Bucket) { 48 | 49 | key := fmt.Sprintf("odwalla-juice1") 50 | odwalla1 := map[string]interface{}{"type": "juice"} 51 | log.Printf(" setting key %v value %v", key, odwalla1) 52 | _, err := b.SetWithMeta(key, 0x1000001, 0, odwalla1) 53 | if err != nil { 54 | log.Printf("set failed error %v", err) 55 | return 56 | } 57 | 58 | _, flags, _, err := b.GetsRaw("odwalla-juice1") 59 | if err != nil { 60 | log.Fatal(err) 61 | } 62 | 63 | if flags != 0x1000001 { 64 | log.Fatalf("Flag mismatch %v", flags) 65 | } 66 | 67 | n1ql, err := sql.Open("n1ql", "localhost:8093") 68 | if err != nil { 69 | log.Fatal(err) 70 | } 71 | 72 | result, err := n1ql.Exec("UPDATE default USE KEYS \"odwalla-juice1\" SET type=\"product-juice\" RETURNING default.type") 73 | 74 | if err != nil { 75 | log.Fatal(err) 76 | } 77 | 78 | rowsAffected, err := result.RowsAffected() 79 | if err != nil { 80 | log.Fatal(err) 81 | } 82 | log.Printf("Rows affected %d", rowsAffected) 83 | 84 | _, flags, _, err = b.GetsRaw("odwalla-juice1") 85 | if err != nil { 86 | log.Fatal(err) 87 | } 88 | 89 | if flags != 0x1000001 { 90 | log.Fatalf("Flag mismatch %v", flags) 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /examples/observe/observe.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "github.com/couchbase/go-couchbase" 7 | "log" 8 | "time" 9 | ) 10 | 11 | var serverURL = flag.String("serverURL", "http://localhost:9000", 12 | "couchbase server URL") 13 | var poolName = flag.String("poolName", "default", 14 | "pool name") 15 | var bucketName = flag.String("bucketName", "default", 16 | "bucket name") 17 | 18 | func main() { 19 | 20 | flag.Parse() 21 | 22 | couchbase.EnableMutationToken = true 23 | client, err := couchbase.Connect(*serverURL) 24 | if err != nil { 25 | log.Printf("Connect failed %v", err) 26 | return 27 | } 28 | 29 | cbpool, err := client.GetPool("default") 30 | if err != nil { 31 | log.Printf("Failed to connect to default pool %v", err) 32 | return 33 | } 34 | 35 | var cbbucket *couchbase.Bucket 36 | cbbucket, err = cbpool.GetBucket(*bucketName) 37 | 38 | if err != nil { 39 | log.Printf("Failed to connect to bucket %v", err) 40 | return 41 | } 42 | 43 | go cbbucket.StartOPPollers(4) 44 | dsSet := false 45 | 46 | err = cbbucket.SetObserveAndPersist(couchbase.PersistMaster, couchbase.ObserveReplicateTwo) 47 | if err != nil { 48 | log.Printf("Not supported %v", err) 49 | } else { 50 | dsSet = true 51 | } 52 | 53 | if dsSet == false { 54 | err = cbbucket.SetObserveAndPersist(couchbase.PersistMaster, couchbase.ObserveReplicateOne) 55 | if err != nil { 56 | log.Printf("Not supported %v", err) 57 | } else { 58 | dsSet = true 59 | } 60 | } 61 | 62 | if dsSet == false { 63 | err = cbbucket.SetObserveAndPersist(couchbase.PersistMaster, couchbase.ObserveNone) 64 | if err != nil { 65 | log.Fatal(err) 66 | } 67 | } 68 | 69 | i := 512 70 | var mt *couchbase.MutationToken 71 | var failover bool 72 | 73 | for { 74 | key := fmt.Sprintf("key%d", i) 75 | value := fmt.Sprintf("value%d", i) 76 | mt, err = cbbucket.SetWithMeta(key, 0, 10, value) 77 | if err != nil { 78 | log.Printf(" Set operation failed for key %v. error %v", key, err) 79 | goto skip_mutation 80 | } 81 | log.Printf(" Got mutation token %v", mt) 82 | 83 | //observe persist this mutation 84 | err, failover = cbbucket.ObserveAndPersistPoll(mt.VBid, mt.Guard, mt.Value) 85 | if err != nil { 86 | log.Printf("Failure in Observe / Persist %v", err) 87 | } 88 | if failover == true { 89 | log.Printf(" Hard failover, cannot meet durablity requirements") 90 | } 91 | 92 | skip_mutation: 93 | <-time.After(1 * time.Second) 94 | i++ 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /examples/streaming/example.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "github.com/couchbase/go-couchbase" 7 | "log" 8 | "time" 9 | ) 10 | 11 | var serverURL = flag.String("serverURL", "http://localhost:9000", 12 | "couchbase server URL") 13 | var poolName = flag.String("poolName", "default", 14 | "pool name") 15 | var bucketName = flag.String("bucketName", "default", 16 | "bucket name") 17 | 18 | func main() { 19 | 20 | flag.Parse() 21 | 22 | couchbase.EnableMutationToken = true 23 | client, err := couchbase.Connect(*serverURL) 24 | if err != nil { 25 | log.Printf("Connect failed %v", err) 26 | return 27 | } 28 | 29 | cbpool, err := client.GetPool("default") 30 | if err != nil { 31 | log.Printf("Failed to connect to default pool %v", err) 32 | return 33 | } 34 | 35 | var cbbucket *couchbase.Bucket 36 | cbbucket, err = cbpool.GetBucket(*bucketName) 37 | 38 | if err != nil { 39 | log.Printf("Failed to connect to bucket %v", err) 40 | return 41 | } 42 | 43 | couchbase.SetConnectionPoolParams(256, 16) 44 | couchbase.SetTcpKeepalive(true, 30) 45 | 46 | go performOp(cbbucket) 47 | 48 | errCh := make(chan error) 49 | 50 | cbbucket.RunBucketUpdater(func(bucket string, err error) { 51 | log.Printf(" Updated retured err %v", err) 52 | errCh <- err 53 | }) 54 | 55 | <-errCh 56 | 57 | } 58 | 59 | func performOp(b *couchbase.Bucket) { 60 | 61 | i := 512 62 | key := fmt.Sprintf("k%d", i) 63 | value := fmt.Sprintf("value%d", i) 64 | err := b.Set(key, len(value), value) 65 | if err != nil { 66 | log.Printf("set failed error %v", err) 67 | return 68 | } 69 | var rv interface{} 70 | var cas uint64 71 | var mt *couchbase.MutationToken 72 | // get the CAS value for this key 73 | err = b.Gets(key, &rv, &cas) 74 | if err != nil { 75 | log.Printf("Gets failed. error %v", err) 76 | return 77 | } 78 | 79 | for { 80 | value = fmt.Sprintf("value%d", i) 81 | cas, mt, err = b.CasWithMeta(key, 0, 10, cas, value) 82 | if err != nil { 83 | log.Printf(" Cas2 operation failed. error %v", err) 84 | return 85 | } 86 | log.Printf(" Got new cas value %v mutation token %v", cas, mt) 87 | var flags, expiry int 88 | var seqNo uint64 89 | 90 | err = b.GetMeta(key, &flags, &expiry, &cas, &seqNo) 91 | if err != nil { 92 | log.Printf(" Failed to get meta . Error %v", err) 93 | return 94 | } 95 | 96 | log.Printf(" meta values for key %s. Flags %d, Expiry %v, Cas %d, Sequence %d", key, flags, time.Unix(int64(expiry), 0), cas, seqNo) 97 | 98 | <-time.After(1 * time.Second) 99 | i++ 100 | } 101 | 102 | } 103 | -------------------------------------------------------------------------------- /examples/upr_bench/bench.go: -------------------------------------------------------------------------------- 1 | // Tool receives raw events from go-couchbase UPR client. 2 | package main 3 | 4 | import ( 5 | "flag" 6 | "fmt" 7 | "log" 8 | "os" 9 | "runtime/debug" 10 | "strings" 11 | "time" 12 | 13 | "github.com/couchbase/go-couchbase" 14 | mcd "github.com/couchbase/gomemcached" 15 | mc "github.com/couchbase/gomemcached/client" 16 | ) 17 | 18 | var options struct { 19 | buckets []string // buckets to connect with 20 | maxVbno int // maximum number of vbuckets 21 | stats int // periodic timeout(ms) to print stats, 0 will disable 22 | duration int 23 | printflogs bool 24 | } 25 | 26 | var done = make(chan bool, 16) 27 | var rch = make(chan []interface{}, 10000) 28 | 29 | func argParse() string { 30 | var buckets string 31 | 32 | flag.StringVar(&buckets, "buckets", "default", 33 | "buckets to listen") 34 | flag.IntVar(&options.maxVbno, "maxvb", 1024, 35 | "maximum number of vbuckets") 36 | flag.IntVar(&options.stats, "stats", 1000, 37 | "periodic timeout in mS, to print statistics, `0` will disable stats") 38 | flag.IntVar(&options.duration, "duration", 3000, 39 | "receive mutations till duration milliseconds.") 40 | flag.BoolVar(&options.printflogs, "flogs", false, 41 | "display failover logs") 42 | 43 | flag.Parse() 44 | 45 | options.buckets = strings.Split(buckets, ",") 46 | args := flag.Args() 47 | if len(args) < 1 { 48 | usage() 49 | os.Exit(1) 50 | } 51 | return args[0] 52 | } 53 | 54 | func usage() { 55 | fmt.Fprintf(os.Stderr, "Usage : %s [OPTIONS] \n", os.Args[0]) 56 | flag.PrintDefaults() 57 | } 58 | 59 | func main() { 60 | cluster := argParse() 61 | ch := make(chan *couchbase.UprFeed, 10) 62 | for _, bucket := range options.buckets { 63 | go startBucket(cluster, bucket, ch) 64 | } 65 | receive(ch) 66 | } 67 | 68 | func startBucket(cluster, bucketn string, ch chan *couchbase.UprFeed) int { 69 | defer func() { 70 | if r := recover(); r != nil { 71 | fmt.Printf("%s:\n%s\n", r, debug.Stack()) 72 | } 73 | }() 74 | 75 | log.Printf("Connecting with %q\n", bucketn) 76 | b, err := ConnectBucket(cluster, "default", bucketn) 77 | mf(err, "bucket") 78 | 79 | uprFeed, err := b.StartUprFeed("rawupr", uint32(0)) 80 | mf(err, "- upr") 81 | 82 | vbnos := listOfVbnos(options.maxVbno) 83 | 84 | flogs, err := b.GetFailoverLogs(vbnos) 85 | mf(err, "- upr failoverlogs") 86 | 87 | if options.printflogs { 88 | printFlogs(vbnos, flogs) 89 | } 90 | 91 | ch <- uprFeed 92 | 93 | go startUpr(uprFeed, flogs) 94 | 95 | for { 96 | e, ok := <-uprFeed.C 97 | if ok == false { 98 | log.Printf("Closing for bucket %q\n", bucketn) 99 | } 100 | rch <- []interface{}{bucketn, e} 101 | } 102 | } 103 | 104 | func startUpr(uprFeed *couchbase.UprFeed, flogs couchbase.FailoverLog) { 105 | start, end := uint64(0), uint64(0xFFFFFFFFFFFFFFFF) 106 | snapStart, snapEnd := uint64(0), uint64(0) 107 | for vbno, flog := range flogs { 108 | x := flog[len(flog)-1] // map[uint16][][2]uint64 109 | opaque, flags, vbuuid := uint16(0), uint32(0), x[0] 110 | err := uprFeed.UprRequestStream( 111 | vbno, opaque, flags, vbuuid, start, end, snapStart, snapEnd) 112 | mf(err, fmt.Sprintf("stream-req for %v failed", vbno)) 113 | } 114 | } 115 | 116 | func endUpr(uprFeed *couchbase.UprFeed, vbnos []uint16) error { 117 | for _, vbno := range vbnos { 118 | if err := uprFeed.UprCloseStream(vbno, uint16(0)); err != nil { 119 | mf(err, "- UprCloseStream()") 120 | return err 121 | } 122 | } 123 | return nil 124 | } 125 | 126 | func mf(err error, msg string) { 127 | if err != nil { 128 | log.Fatalf("%v: %v", msg, err) 129 | } 130 | } 131 | 132 | func receive(ch chan *couchbase.UprFeed) { 133 | // bucket -> Opcode -> #count 134 | counts := make(map[string]map[mcd.CommandCode]int) 135 | 136 | var tick <-chan time.Time 137 | if options.stats > 0 { 138 | tick = time.Tick(time.Millisecond * time.Duration(options.stats)) 139 | } 140 | 141 | finTimeout := time.After(time.Millisecond * time.Duration(options.duration)) 142 | uprFeeds := make([]*couchbase.UprFeed, 0) 143 | loop: 144 | for { 145 | select { 146 | case uprFeed := <-ch: 147 | uprFeeds = append(uprFeeds, uprFeed) 148 | 149 | case msg, ok := <-rch: 150 | if ok == false { 151 | break loop 152 | } 153 | bucket, e := msg[0].(string), msg[1].(*mc.UprEvent) 154 | if _, ok := counts[bucket]; !ok { 155 | counts[bucket] = make(map[mcd.CommandCode]int) 156 | } 157 | if _, ok := counts[bucket][e.Opcode]; !ok { 158 | counts[bucket][e.Opcode] = 0 159 | } 160 | counts[bucket][e.Opcode]++ 161 | 162 | case <-tick: 163 | for bucket, m := range counts { 164 | log.Printf("%q %s\n", bucket, sprintCounts(m)) 165 | } 166 | 167 | case <-finTimeout: 168 | for _, uprFeed := range uprFeeds { 169 | endUpr(uprFeed, listOfVbnos(options.maxVbno)) 170 | } 171 | break loop 172 | } 173 | } 174 | fmt.Println("sleep wait ....") 175 | time.Sleep(10000 * time.Millisecond) 176 | } 177 | 178 | func sprintCounts(counts map[mcd.CommandCode]int) string { 179 | line := "" 180 | for i := 0; i < 256; i++ { 181 | opcode := mcd.CommandCode(i) 182 | if n, ok := counts[opcode]; ok { 183 | line += fmt.Sprintf("%s:%v ", mcd.CommandNames[opcode], n) 184 | } 185 | } 186 | return strings.TrimRight(line, " ") 187 | } 188 | 189 | func listOfVbnos(maxVbno int) []uint16 { 190 | // list of vbuckets 191 | vbnos := make([]uint16, 0, maxVbno) 192 | for i := 0; i < maxVbno; i++ { 193 | vbnos = append(vbnos, uint16(i)) 194 | } 195 | return vbnos 196 | } 197 | 198 | func printFlogs(vbnos []uint16, flogs couchbase.FailoverLog) { 199 | for i, vbno := range vbnos { 200 | log.Printf("Failover log for vbucket %v\n", vbno) 201 | log.Printf(" %#v\n", flogs[uint16(i)]) 202 | } 203 | } 204 | 205 | func ConnectBucket(cluster, pooln, bucketn string) (*couchbase.Bucket, error) { 206 | couch, err := couchbase.Connect("http://" + cluster) 207 | if err != nil { 208 | return nil, err 209 | } 210 | pool, err := couch.GetPool(pooln) 211 | if err != nil { 212 | return nil, err 213 | } 214 | bucket, err := pool.GetBucket(bucketn) 215 | if err != nil { 216 | return nil, err 217 | } 218 | return bucket, err 219 | } 220 | -------------------------------------------------------------------------------- /examples/upr_feed/feed.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "github.com/couchbase/go-couchbase" 7 | "github.com/couchbase/gomemcached" 8 | "github.com/couchbase/gomemcached/client" 9 | "log" 10 | "math/rand" 11 | "net/http" 12 | _ "net/http/pprof" 13 | "net/url" 14 | "os" 15 | "runtime/pprof" 16 | "time" 17 | ) 18 | 19 | var vbcount = 64 20 | var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to file") 21 | var memprofile = flag.String("memprofile", "", "write memory profile to this file") 22 | 23 | func mf(err error, msg string) { 24 | if err != nil { 25 | log.Fatalf("%v: %v", msg, err) 26 | } 27 | } 28 | 29 | // Flush the bucket before trying this program 30 | func main() { 31 | 32 | //runtime.GOMAXPROCS(4) 33 | 34 | go func() { 35 | log.Println(http.ListenAndServe("localhost:6060", nil)) 36 | }() 37 | 38 | bname := flag.String("bucket", "", 39 | "bucket to connect to (defaults to username)") 40 | 41 | flag.Usage = func() { 42 | fmt.Fprintf(os.Stderr, 43 | "%v [flags] http://user:pass@host:8091/\n\nFlags:\n", 44 | os.Args[0]) 45 | flag.PrintDefaults() 46 | os.Exit(64) 47 | } 48 | 49 | flag.Parse() 50 | 51 | if flag.NArg() < 1 { 52 | flag.Usage() 53 | } 54 | 55 | if *cpuprofile != "" { 56 | f, err := os.Create(*cpuprofile) 57 | if err != nil { 58 | log.Fatal(err) 59 | } 60 | pprof.StartCPUProfile(f) 61 | defer pprof.StopCPUProfile() 62 | } 63 | 64 | if *memprofile != "" { 65 | f, err := os.Create(*memprofile) 66 | if err != nil { 67 | log.Fatal(err) 68 | } 69 | defer pprof.WriteHeapProfile(f) 70 | defer f.Close() 71 | } 72 | 73 | u, err := url.Parse(flag.Arg(0)) 74 | mf(err, "parse") 75 | 76 | if *bname == "" && u.User != nil { 77 | *bname = u.User.Username() 78 | } 79 | 80 | c, err := couchbase.Connect(u.String()) 81 | mf(err, "connect - "+u.String()) 82 | 83 | p, err := c.GetPool("default") 84 | mf(err, "pool") 85 | 86 | bucket, err := p.GetBucket(*bname) 87 | mf(err, "bucket") 88 | 89 | // start upr feed 90 | name := fmt.Sprintf("%v", time.Now().UnixNano()) 91 | feed, err := bucket.StartUprFeed(name, 0) 92 | if err != nil { 93 | log.Print(" Failed to start stream ", err) 94 | return 95 | } 96 | 97 | // request stream for all vbuckets 98 | for i := 0; i < vbcount; i++ { 99 | err := feed.UprRequestStream( 100 | uint16(i) /*vbno*/, uint16(0) /*opaque*/, 0 /*flag*/, 0, /*vbuuid*/ 101 | 0 /*seqStart*/, 0xFFFFFFFFFFFFFFFF /*seqEnd*/, 0 /*snaps*/, 0) 102 | if err != nil { 103 | fmt.Printf("%s", err.Error()) 104 | } 105 | } 106 | 107 | // observe the mutations from the channel. 108 | var event *memcached.UprEvent 109 | var mutations = 0 110 | //var callOnce bool 111 | loop: 112 | for { 113 | select { 114 | case e, ok := <-feed.C: 115 | if !ok { 116 | break loop 117 | } else { 118 | event = e 119 | } 120 | case <-time.After(time.Second): 121 | break loop 122 | } 123 | if event.Opcode == gomemcached.UPR_MUTATION { 124 | //log.Printf(" got mutation %s", e.Value) 125 | mutations += 1 126 | } 127 | 128 | if event.Opcode == gomemcached.UPR_STREAMEND { 129 | log.Printf(" Received Stream end for vbucket %d", event.VBucket) 130 | } 131 | 132 | if mutations%100000 == 0 && mutations != 0 { 133 | log.Printf(" received %d mutations ", mutations) 134 | } 135 | 136 | //e.Release() 137 | } 138 | 139 | // close stream for all vbuckets 140 | for i := 0; i < vbcount; i++ { 141 | err := feed.UprCloseStream( 142 | uint16(i) /*vbno*/, uint16(0)) 143 | if err != nil { 144 | fmt.Printf("%s", err.Error()) 145 | } 146 | } 147 | 148 | feed.Close() 149 | log.Printf("Mutation count %d", mutations) 150 | 151 | } 152 | 153 | func addKVset(b *couchbase.Bucket, count int) { 154 | for i := 0; i < count; i++ { 155 | key := fmt.Sprintf("key%v", i+1000000) 156 | val_len := rand.Intn(10*1024) + rand.Intn(10*1024) 157 | value := fmt.Sprintf("This is a test key %d", val_len) 158 | err := b.Set(key, 0, value) 159 | if err != nil { 160 | panic(err) 161 | } 162 | 163 | if i%1000000 == 0 { 164 | fmt.Printf("\n Added %d keys", i) 165 | } 166 | } 167 | } 168 | -------------------------------------------------------------------------------- /examples/upr_restart/restart.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "time" 7 | 8 | "github.com/couchbase/go-couchbase" 9 | "github.com/couchbase/gomemcached" 10 | "github.com/couchbase/gomemcached/client" 11 | ) 12 | 13 | var vbcount = 2 14 | 15 | const TESTURL = "http://localhost:9000" 16 | 17 | // Flush the bucket before trying this program 18 | func main() { 19 | // get a bucket and mc.Client connection 20 | bucket, err := getTestConnection("default") 21 | if err != nil { 22 | panic(err) 23 | } 24 | 25 | // start upr feed 26 | feed, err := bucket.StartUprFeed("index" /*name*/, 0) 27 | if err != nil { 28 | panic(err) 29 | } 30 | 31 | for i := 0; i < vbcount; i++ { 32 | err := feed.UprRequestStream( 33 | uint16(i) /*vbno*/, uint16(0) /*opaque*/, 0 /*flag*/, 0, /*vbuuid*/ 34 | 0 /*seqStart*/, 0xFFFFFFFFFFFFFFFF /*seqEnd*/, 0 /*snaps*/, 0) 35 | if err != nil { 36 | fmt.Printf("%s", err.Error()) 37 | } 38 | } 39 | 40 | vbseqNo := receiveMutations(feed, 20000) 41 | 42 | vbList := make([]uint16, 0) 43 | for i := 0; i < vbcount; i++ { 44 | vbList = append(vbList, uint16(i)) 45 | } 46 | failoverlogMap, err := bucket.GetFailoverLogs(vbList) 47 | if err != nil { 48 | log.Printf(" error in failover log request %s", err.Error()) 49 | 50 | } 51 | 52 | // get a bucket and mc.Client connection 53 | bucket1, err := getTestConnection("default") 54 | if err != nil { 55 | panic(err) 56 | } 57 | 58 | // add mutations to the bucket 59 | var mutationCount = 5000 60 | addKVset(bucket1, mutationCount) 61 | 62 | log.Println("Restarting ....") 63 | feed, err = bucket.StartUprFeed("index" /*name*/, 0) 64 | if err != nil { 65 | panic(err) 66 | } 67 | 68 | for i := 0; i < vbcount; i++ { 69 | log.Printf("Vbucket %d High sequence number %d, Snapshot end sequence %d", i, vbseqNo[i][0], vbseqNo[i][1]) 70 | failoverLog := failoverlogMap[uint16(i)] 71 | err := feed.UprRequestStream( 72 | uint16(i) /*vbno*/, uint16(0) /*opaque*/, 0, /*flag*/ 73 | failoverLog[0][0], /*vbuuid*/ 74 | vbseqNo[i][0] /*seqStart*/, 0xFFFFFFFFFFFFFFFF, /*seqEnd*/ 75 | 0 /*snaps*/, vbseqNo[i][1]) 76 | if err != nil { 77 | fmt.Printf("%s", err.Error()) 78 | } 79 | } 80 | 81 | var e, f *memcached.UprEvent 82 | var mutations int 83 | loop: 84 | for { 85 | select { 86 | case f = <-feed.C: 87 | case <-time.After(time.Second): 88 | break loop 89 | } 90 | 91 | if f.Opcode == gomemcached.UPR_MUTATION { 92 | vbseqNo[f.VBucket][0] = f.Seqno 93 | e = f 94 | mutations += 1 95 | } 96 | } 97 | 98 | log.Printf(" got %d mutations", mutations) 99 | 100 | exptSeq := vbseqNo[e.VBucket][0] + 1 101 | 102 | if e.Seqno != exptSeq { 103 | fmt.Printf("Expected seqno %v, received %v", exptSeq+1, e.Seqno) 104 | //panic(err) 105 | } 106 | feed.Close() 107 | } 108 | 109 | func addKVset(b *couchbase.Bucket, count int) { 110 | for i := 0; i < count; i++ { 111 | key := fmt.Sprintf("key%v", i) 112 | value := fmt.Sprintf("Hello world%v", i) 113 | if err := b.Set(key, 0, value); err != nil { 114 | panic(err) 115 | } 116 | } 117 | } 118 | 119 | func receiveMutations(feed *couchbase.UprFeed, breakAfter int) [][2]uint64 { 120 | var vbseqNo = make([][2]uint64, vbcount) 121 | var mutations = 0 122 | var ssMarkers = 0 123 | var e *memcached.UprEvent 124 | loop: 125 | for { 126 | select { 127 | case e = <-feed.C: 128 | case <-time.After(time.Second): 129 | break loop 130 | } 131 | 132 | if e.Opcode == gomemcached.UPR_MUTATION { 133 | vbseqNo[e.VBucket][0] = e.Seqno 134 | mutations += 1 135 | } 136 | 137 | if e.Opcode == gomemcached.UPR_MUTATION { 138 | vbseqNo[e.VBucket][1] = e.SnapendSeq 139 | ssMarkers += 1 140 | } 141 | if mutations == breakAfter { 142 | break loop 143 | } 144 | } 145 | 146 | log.Printf(" Mutation count %d, Snapshot markers %d", mutations, ssMarkers) 147 | 148 | return vbseqNo 149 | } 150 | 151 | func getTestConnection(bucketname string) (*couchbase.Bucket, error) { 152 | couch, err := couchbase.Connect(TESTURL) 153 | if err != nil { 154 | fmt.Println("Make sure that couchbase is at", TESTURL) 155 | return nil, err 156 | } 157 | pool, err := couch.GetPool("default") 158 | if err != nil { 159 | return nil, err 160 | } 161 | bucket, err := pool.GetBucket(bucketname) 162 | return bucket, err 163 | } 164 | -------------------------------------------------------------------------------- /examples/view_params/view_params.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "log" 7 | "net/url" 8 | "os" 9 | 10 | "github.com/couchbase/go-couchbase" 11 | ) 12 | 13 | func mf(err error, msg string) { 14 | if err != nil { 15 | log.Fatalf("%v: %v", msg, err) 16 | } 17 | } 18 | 19 | var updateInterval = flag.Int("updateInterval", 5000, 20 | "min update interval ms (int)") 21 | var updateMinChanges = flag.Int("updateMinChanges", 5000, 22 | "min update changes (int)") 23 | 24 | func main() { 25 | 26 | flag.Usage = func() { 27 | fmt.Fprintf(os.Stderr, 28 | "%v [flags] http://user:pass@host:8091/\n\n", 29 | os.Args[0]) 30 | flag.PrintDefaults() 31 | fmt.Fprintf(os.Stderr, "\nExample : ./view_params -updateInterval=7 -updateMinChanges=4000 http://Administrator:asdasd@localhost:9000\n") 32 | os.Exit(64) 33 | } 34 | 35 | flag.Parse() 36 | 37 | if flag.NArg() < 1 { 38 | flag.Usage() 39 | } 40 | 41 | u, err := url.Parse(flag.Arg(0)) 42 | mf(err, "parse") 43 | 44 | params := map[string]interface{}{"updateInterval": *updateInterval, "updateMinChanges": *updateMinChanges} 45 | 46 | viewParams, err := couchbase.SetViewUpdateParams(u.String(), params) 47 | if err != nil { 48 | log.Fatal(" Failed ", err) 49 | } 50 | 51 | log.Printf("Returned view params %v", viewParams) 52 | 53 | } 54 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/couchbase/go-couchbase 2 | 3 | go 1.13 4 | -------------------------------------------------------------------------------- /observe.go: -------------------------------------------------------------------------------- 1 | package couchbase 2 | 3 | import ( 4 | "fmt" 5 | "github.com/couchbase/goutils/logging" 6 | "sync" 7 | ) 8 | 9 | type PersistTo uint8 10 | 11 | const ( 12 | PersistNone = PersistTo(0x00) 13 | PersistMaster = PersistTo(0x01) 14 | PersistOne = PersistTo(0x02) 15 | PersistTwo = PersistTo(0x03) 16 | PersistThree = PersistTo(0x04) 17 | PersistFour = PersistTo(0x05) 18 | ) 19 | 20 | type ObserveTo uint8 21 | 22 | const ( 23 | ObserveNone = ObserveTo(0x00) 24 | ObserveReplicateOne = ObserveTo(0x01) 25 | ObserveReplicateTwo = ObserveTo(0x02) 26 | ObserveReplicateThree = ObserveTo(0x03) 27 | ObserveReplicateFour = ObserveTo(0x04) 28 | ) 29 | 30 | type JobType uint8 31 | 32 | const ( 33 | OBSERVE = JobType(0x00) 34 | PERSIST = JobType(0x01) 35 | ) 36 | 37 | type ObservePersistJob struct { 38 | vb uint16 39 | vbuuid uint64 40 | hostname string 41 | jobType JobType 42 | failover uint8 43 | lastPersistedSeqNo uint64 44 | currentSeqNo uint64 45 | resultChan chan *ObservePersistJob 46 | errorChan chan *OPErrResponse 47 | } 48 | 49 | type OPErrResponse struct { 50 | vb uint16 51 | vbuuid uint64 52 | err error 53 | job *ObservePersistJob 54 | } 55 | 56 | var ObservePersistPool = NewPool(1024) 57 | var OPJobChan = make(chan *ObservePersistJob, 1024) 58 | var OPJobDone = make(chan bool) 59 | 60 | var wg sync.WaitGroup 61 | 62 | func (b *Bucket) StartOPPollers(maxWorkers int) { 63 | 64 | for i := 0; i < maxWorkers; i++ { 65 | go b.OPJobPoll() 66 | wg.Add(1) 67 | } 68 | wg.Wait() 69 | } 70 | 71 | func (b *Bucket) SetObserveAndPersist(nPersist PersistTo, nObserve ObserveTo) (err error) { 72 | 73 | numNodes := len(b.Nodes()) 74 | if int(nPersist) > numNodes || int(nObserve) > numNodes { 75 | return fmt.Errorf("Not enough healthy nodes in the cluster") 76 | } 77 | 78 | if int(nPersist) > (b.Replicas+1) || int(nObserve) > b.Replicas { 79 | return fmt.Errorf("Not enough replicas in the cluster") 80 | } 81 | 82 | if EnableMutationToken == false { 83 | return fmt.Errorf("Mutation Tokens not enabled ") 84 | } 85 | 86 | b.ds = &DurablitySettings{Persist: PersistTo(nPersist), Observe: ObserveTo(nObserve)} 87 | return 88 | } 89 | 90 | func (b *Bucket) ObserveAndPersistPoll(vb uint16, vbuuid uint64, seqNo uint64) (err error, failover bool) { 91 | b.RLock() 92 | ds := b.ds 93 | b.RUnlock() 94 | 95 | if ds == nil { 96 | return 97 | } 98 | 99 | nj := 0 // total number of jobs 100 | resultChan := make(chan *ObservePersistJob, 10) 101 | errChan := make(chan *OPErrResponse, 10) 102 | 103 | nodes := b.GetNodeList(vb) 104 | if int(ds.Observe) > len(nodes) || int(ds.Persist) > len(nodes) { 105 | return fmt.Errorf("Not enough healthy nodes in the cluster"), false 106 | } 107 | 108 | logging.Infof("Node list %v", nodes) 109 | 110 | if ds.Observe >= ObserveReplicateOne { 111 | // create a job for each host 112 | for i := ObserveReplicateOne; i < ds.Observe+1; i++ { 113 | opJob := ObservePersistPool.Get() 114 | opJob.vb = vb 115 | opJob.vbuuid = vbuuid 116 | opJob.jobType = OBSERVE 117 | opJob.hostname = nodes[i] 118 | opJob.resultChan = resultChan 119 | opJob.errorChan = errChan 120 | 121 | OPJobChan <- opJob 122 | nj++ 123 | 124 | } 125 | } 126 | 127 | if ds.Persist >= PersistMaster { 128 | for i := PersistMaster; i < ds.Persist+1; i++ { 129 | opJob := ObservePersistPool.Get() 130 | opJob.vb = vb 131 | opJob.vbuuid = vbuuid 132 | opJob.jobType = PERSIST 133 | opJob.hostname = nodes[i] 134 | opJob.resultChan = resultChan 135 | opJob.errorChan = errChan 136 | 137 | OPJobChan <- opJob 138 | nj++ 139 | 140 | } 141 | } 142 | 143 | ok := true 144 | for ok { 145 | select { 146 | case res := <-resultChan: 147 | jobDone := false 148 | if res.failover == 0 { 149 | // no failover 150 | if res.jobType == PERSIST { 151 | if res.lastPersistedSeqNo >= seqNo { 152 | jobDone = true 153 | } 154 | 155 | } else { 156 | if res.currentSeqNo >= seqNo { 157 | jobDone = true 158 | } 159 | } 160 | 161 | if jobDone == true { 162 | nj-- 163 | ObservePersistPool.Put(res) 164 | } else { 165 | // requeue this job 166 | OPJobChan <- res 167 | } 168 | 169 | } else { 170 | // Not currently handling failover scenarios TODO 171 | nj-- 172 | ObservePersistPool.Put(res) 173 | failover = true 174 | } 175 | 176 | if nj == 0 { 177 | // done with all the jobs 178 | ok = false 179 | close(resultChan) 180 | close(errChan) 181 | } 182 | 183 | case Err := <-errChan: 184 | logging.Errorf("Error in Observe/Persist %v", Err.err) 185 | err = fmt.Errorf("Error in Observe/Persist job %v", Err.err) 186 | nj-- 187 | ObservePersistPool.Put(Err.job) 188 | if nj == 0 { 189 | close(resultChan) 190 | close(errChan) 191 | ok = false 192 | } 193 | } 194 | } 195 | 196 | return 197 | } 198 | 199 | func (b *Bucket) OPJobPoll() { 200 | 201 | ok := true 202 | for ok == true { 203 | select { 204 | case job := <-OPJobChan: 205 | pool := b.getConnPoolByHost(job.hostname, false /* bucket not already locked */) 206 | if pool == nil { 207 | errRes := &OPErrResponse{vb: job.vb, vbuuid: job.vbuuid} 208 | errRes.err = fmt.Errorf("Pool not found for host %v", job.hostname) 209 | errRes.job = job 210 | job.errorChan <- errRes 211 | continue 212 | } 213 | conn, err := pool.Get() 214 | if err != nil { 215 | errRes := &OPErrResponse{vb: job.vb, vbuuid: job.vbuuid} 216 | errRes.err = fmt.Errorf("Unable to get connection from pool %v", err) 217 | errRes.job = job 218 | job.errorChan <- errRes 219 | continue 220 | } 221 | 222 | res, err := conn.ObserveSeq(job.vb, job.vbuuid) 223 | if err != nil { 224 | errRes := &OPErrResponse{vb: job.vb, vbuuid: job.vbuuid} 225 | errRes.err = fmt.Errorf("Command failed %v", err) 226 | errRes.job = job 227 | job.errorChan <- errRes 228 | continue 229 | 230 | } 231 | pool.Return(conn) 232 | job.lastPersistedSeqNo = res.LastPersistedSeqNo 233 | job.currentSeqNo = res.CurrentSeqNo 234 | job.failover = res.Failover 235 | 236 | job.resultChan <- job 237 | case <-OPJobDone: 238 | logging.Infof("Observe Persist Poller exitting") 239 | ok = false 240 | } 241 | } 242 | wg.Done() 243 | } 244 | 245 | func (b *Bucket) GetNodeList(vb uint16) []string { 246 | 247 | vbm := b.VBServerMap() 248 | if len(vbm.VBucketMap) < int(vb) { 249 | logging.Infof("vbmap smaller than vblist") 250 | return nil 251 | } 252 | 253 | nodes := make([]string, len(vbm.VBucketMap[vb])) 254 | for i := 0; i < len(vbm.VBucketMap[vb]); i++ { 255 | n := vbm.VBucketMap[vb][i] 256 | if n < 0 { 257 | continue 258 | } 259 | 260 | node := b.getMasterNode(n) 261 | if len(node) > 1 { 262 | nodes[i] = node 263 | } 264 | continue 265 | 266 | } 267 | return nodes 268 | } 269 | 270 | //pool of ObservePersist Jobs 271 | type OPpool struct { 272 | pool chan *ObservePersistJob 273 | } 274 | 275 | // NewPool creates a new pool of jobs 276 | func NewPool(max int) *OPpool { 277 | return &OPpool{ 278 | pool: make(chan *ObservePersistJob, max), 279 | } 280 | } 281 | 282 | // Borrow a Client from the pool. 283 | func (p *OPpool) Get() *ObservePersistJob { 284 | var o *ObservePersistJob 285 | select { 286 | case o = <-p.pool: 287 | default: 288 | o = &ObservePersistJob{} 289 | } 290 | return o 291 | } 292 | 293 | // Return returns a Client to the pool. 294 | func (p *OPpool) Put(o *ObservePersistJob) { 295 | select { 296 | case p.pool <- o: 297 | default: 298 | // let it go, let it go... 299 | } 300 | } 301 | -------------------------------------------------------------------------------- /perf/generate-json.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "github.com/Pallinder/go-randomdata" 6 | ) 7 | 8 | type contacts struct { 9 | Name string 10 | Email string 11 | Age int 12 | Address string 13 | City string 14 | State string 15 | Country string 16 | } 17 | 18 | // return a json marshalled document 19 | func generateRandomDocument() ([]byte, error) { 20 | c := &contacts{} 21 | c.Name = randomdata.FullName(randomdata.RandomGender) 22 | c.Email = randomdata.Email() 23 | c.Age = randomdata.Number(20, 50) 24 | c.Address = randomdata.Address() 25 | c.City = randomdata.City() 26 | c.State = randomdata.State(randomdata.Large) 27 | c.Country = randomdata.Country(randomdata.FullCountry) 28 | 29 | return json.Marshal(c) 30 | } 31 | -------------------------------------------------------------------------------- /perf/perf.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "crypto/rand" 5 | "encoding/base64" 6 | "encoding/json" 7 | "flag" 8 | "fmt" 9 | "github.com/couchbase/go-couchbase" 10 | "log" 11 | "runtime" 12 | "sync" 13 | "time" 14 | ) 15 | 16 | func maybeFatal(err error) { 17 | if err != nil { 18 | log.Fatalf("Error: %v", err) 19 | } 20 | } 21 | 22 | var serverURL = flag.String("serverURL", "http://localhost:9000", 23 | "couchbase server URL") 24 | var poolName = flag.String("poolName", "default", 25 | "pool name") 26 | var bucketName = flag.String("bucketName", "default", 27 | "bucket name") 28 | var set = flag.Bool("set", false, "create document mode") 29 | var size = flag.Int("size", 1024, "document size") 30 | var documents = flag.Int("documents", 2000000, "total documents") 31 | var threads = flag.Int("threads", 10, "Number of threads") 32 | var quantum = flag.Int("quantum", 1024, "Number of documents per bulkGet") 33 | var jsonDoc = flag.Bool("generate-json", false, "generate json documents") 34 | 35 | var wg sync.WaitGroup 36 | 37 | func main() { 38 | flag.Parse() 39 | 40 | runtime.GOMAXPROCS(*threads) 41 | client, err := couchbase.Connect(*serverURL) 42 | if err != nil { 43 | log.Printf("Connect failed %v", err) 44 | return 45 | } 46 | 47 | cbpool, err := client.GetPool("default") 48 | if err != nil { 49 | log.Printf("Failed to connect to default pool %v", err) 50 | return 51 | } 52 | 53 | var cbbucket *couchbase.Bucket 54 | cbbucket, err = cbpool.GetBucket(*bucketName) 55 | 56 | start := time.Now() 57 | if *set == true { 58 | var value []byte 59 | if *jsonDoc == false { 60 | value = generateRandomDoc(*size) 61 | } 62 | for i := 0; i < *threads; i++ { 63 | go doSetOps(cbbucket, i*(*documents / *threads), *documents / *threads, value) 64 | wg.Add(1) 65 | } 66 | } else { 67 | for i := 0; i < *threads; i++ { 68 | go doBulkGetOps(cbbucket, *documents / *threads, *quantum, i*(*documents / *threads)) 69 | wg.Add(1) 70 | } 71 | } 72 | 73 | wg.Wait() 74 | 75 | finish := time.Now().Sub(start) 76 | fmt.Printf("**** Did %d ops in %s. Ops/sec %d\n", 77 | *documents, finish.String(), int(float64(*documents)/finish.Seconds())) 78 | 79 | if err != nil { 80 | log.Printf("Failed to connect to bucket %v", err) 81 | return 82 | } 83 | 84 | } 85 | 86 | func doBulkGetOps(b *couchbase.Bucket, total int, quantum int, startNum int) { 87 | 88 | defer wg.Done() 89 | start := time.Now() 90 | iter := total / quantum 91 | currentKeyNum := startNum 92 | for i := 0; i < iter; i++ { 93 | 94 | keylist := make([]string, quantum, quantum) 95 | for j := 0; j < quantum; j++ { 96 | key := fmt.Sprintf("test%d", currentKeyNum) 97 | keylist[j] = key 98 | currentKeyNum++ 99 | 100 | } 101 | _, err := b.GetBulk(keylist, start.Add(5*time.Second), nil) 102 | if err != nil { 103 | log.Printf(" Failed to get keys startnum %s to %d", keylist[0], quantum) 104 | } 105 | } 106 | fmt.Printf("Did %d ops in %s\n", 107 | total, time.Now().Sub(start).String()) 108 | } 109 | 110 | func generateRandomDoc(size int) []byte { 111 | 112 | rb := make([]byte, size) 113 | _, err := rand.Read(rb) 114 | 115 | if err != nil { 116 | log.Fatalf("Cannot generate data %v", err) 117 | } 118 | 119 | rs := base64.URLEncoding.EncodeToString(rb) 120 | data := map[string]interface{}{"data": rs} 121 | 122 | encode, _ := json.Marshal(data) 123 | return encode 124 | 125 | } 126 | 127 | func doSetOps(b *couchbase.Bucket, startNum int, total int, data []byte) { 128 | 129 | defer wg.Done() 130 | 131 | start := time.Now() 132 | 133 | var err error 134 | for i := 0; i < total; i++ { 135 | if data == nil { 136 | data, err = generateRandomDocument() 137 | if err != nil { 138 | log.Fatal(err) 139 | } 140 | } 141 | 142 | k := fmt.Sprintf("test%d", startNum+i) 143 | maybeFatal(b.SetRaw(k, 0, data)) 144 | } 145 | fmt.Printf("Did %d ops in %s\n", 146 | total, time.Now().Sub(start).String()) 147 | } 148 | -------------------------------------------------------------------------------- /perf/readme.txt: -------------------------------------------------------------------------------- 1 | Two modes. Create and bulkGet mode 2 | 3 | 1. To create documents 4 | 5 | ./perf -set=true 6 | 7 | Default setting will create 2 million documents of 1024 bytes with 10 threads 8 | Following command will create 5 million documents of 512 bytes with 20 threads 9 | 10 | ./perf -set=true -size=512 -threads=20 -documents=5000000 11 | 12 | 2. BulkGet performance 13 | 14 | ./perf 15 | 16 | Default setting will fetch 2 million documents in 10 threads with size of each bulkGet(quantum) 1024 17 | To fetch 5 million documents in 20 threads with a quantum of 2048 18 | 19 | ./perf -documents=5000000 -threads=20 -quantum=2048 20 | 21 | Other options 22 | -------------- 23 | 24 | -serverURL. Default http://localhost:9000 25 | -bucketName. Default : default 26 | -poolName. Default: default 27 | -------------------------------------------------------------------------------- /platform/platform.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2013 Couchbase, Inc. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file 3 | // except in compliance with the License. You may obtain a copy of the License at 4 | // http://www.apache.org/licenses/LICENSE-2.0 5 | // Unless required by applicable law or agreed to in writing, software distributed under the 6 | // License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 7 | // either express or implied. See the License for the specific language governing permissions 8 | // and limitations under the License. 9 | 10 | // +build !windows 11 | 12 | package platform 13 | 14 | import ( 15 | _ "net/http/pprof" 16 | "os" 17 | "os/signal" 18 | "runtime/pprof" 19 | "syscall" 20 | ) 21 | 22 | import "C" 23 | 24 | func DumpOnSignal() { 25 | c := make(chan os.Signal, 1) 26 | signal.Notify(c, syscall.SIGUSR2) 27 | for _ = range c { 28 | pprof.Lookup("goroutine").WriteTo(os.Stderr, 1) 29 | } 30 | } 31 | 32 | func HideConsole(_ bool) { 33 | } 34 | -------------------------------------------------------------------------------- /platform/platform_windows.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2013 Couchbase, Inc. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file 3 | // except in compliance with the License. You may obtain a copy of the License at 4 | // http://www.apache.org/licenses/LICENSE-2.0 5 | // Unless required by applicable law or agreed to in writing, software distributed under the 6 | // License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 7 | // either express or implied. See the License for the specific language governing permissions 8 | // and limitations under the License. 9 | 10 | // +build windows 11 | 12 | package platform 13 | 14 | import "syscall" 15 | 16 | func DumpOnSignal() { 17 | } 18 | 19 | // Hide console on windows without removing it unlike -H windowsgui. 20 | func HideConsole(hide bool) { 21 | var k32 = syscall.NewLazyDLL("kernel32.dll") 22 | var cw = k32.NewProc("GetConsoleWindow") 23 | var u32 = syscall.NewLazyDLL("user32.dll") 24 | var sw = u32.NewProc("ShowWindow") 25 | hwnd, _, _ := cw.Call() 26 | if hwnd == 0 { 27 | return 28 | } 29 | if hide { 30 | var SW_HIDE uintptr = 0 31 | sw.Call(hwnd, SW_HIDE) 32 | } else { 33 | var SW_RESTORE uintptr = 9 34 | sw.Call(hwnd, SW_RESTORE) 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /platform/sync.go: -------------------------------------------------------------------------------- 1 | // 2 | // This is a thin wrapper around sync/atomic to help with alignment issues. 3 | // This is for 64-bit OS and hence is a no-op effectively. 4 | // 5 | 6 | // +build !386 7 | 8 | package platform 9 | 10 | import "unsafe" 11 | import orig "sync/atomic" 12 | 13 | type AlignedInt64 int64 14 | type AlignedUint64 uint64 15 | 16 | func NewAlignedInt64(v int64) AlignedInt64 { 17 | return AlignedInt64(v) 18 | } 19 | 20 | func NewAlignedUint64(v uint64) AlignedUint64 { 21 | return AlignedUint64(v) 22 | } 23 | 24 | func SwapInt32(addr *int32, new int32) int32 { 25 | return orig.SwapInt32(addr, new) 26 | } 27 | 28 | func SwapInt64(addr *AlignedInt64, new int64) int64 { 29 | return orig.SwapInt64((*int64)(addr), new) 30 | } 31 | 32 | func SwapUint32(addr *uint32, new uint32) uint32 { 33 | return orig.SwapUint32(addr, new) 34 | } 35 | 36 | func SwapUint64(addr *AlignedUint64, new uint64) uint64 { 37 | return orig.SwapUint64((*uint64)(addr), new) 38 | } 39 | 40 | func SwapUintptr(addr *uintptr, new uintptr) uintptr { 41 | return orig.SwapUintptr(addr, new) 42 | } 43 | 44 | func SwapPointer(addr *unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer { 45 | return orig.SwapPointer(addr, new) 46 | } 47 | 48 | func CompareAndSwapInt32(addr *int32, old, new int32) bool { 49 | return orig.CompareAndSwapInt32(addr, old, new) 50 | } 51 | 52 | func CompareAndSwapInt64(addr *AlignedInt64, old, new int64) bool { 53 | return orig.CompareAndSwapInt64((*int64)(addr), old, new) 54 | } 55 | 56 | func CompareAndSwapUint32(addr *uint32, old, new uint32) bool { 57 | return orig.CompareAndSwapUint32(addr, old, new) 58 | } 59 | 60 | func CompareAndSwapUint64(addr *AlignedUint64, old, new uint64) bool { 61 | return orig.CompareAndSwapUint64((*uint64)(addr), old, new) 62 | } 63 | 64 | func CompareAndSwapUintptr(addr *uintptr, old, new uintptr) bool { 65 | return orig.CompareAndSwapUintptr(addr, old, new) 66 | } 67 | 68 | func CompareAndSwapPointer(addr *unsafe.Pointer, old, new unsafe.Pointer) bool { 69 | return orig.CompareAndSwapPointer(addr, old, new) 70 | } 71 | 72 | func AddInt32(addr *int32, delta int32) int32 { 73 | return orig.AddInt32(addr, delta) 74 | } 75 | 76 | func AddUint32(addr *uint32, delta uint32) uint32 { 77 | return orig.AddUint32(addr, delta) 78 | } 79 | 80 | func AddInt64(addr *AlignedInt64, delta int64) int64 { 81 | return orig.AddInt64((*int64)(addr), delta) 82 | } 83 | 84 | func AddUint64(addr *AlignedUint64, delta uint64) uint64 { 85 | return orig.AddUint64((*uint64)(addr), delta) 86 | } 87 | 88 | func AddUintptr(addr *uintptr, delta uintptr) uintptr { 89 | return orig.AddUintptr(addr, delta) 90 | } 91 | 92 | func LoadInt32(addr *int32) int32 { 93 | return orig.LoadInt32(addr) 94 | } 95 | 96 | func LoadInt64(addr *AlignedInt64) int64 { 97 | return orig.LoadInt64((*int64)(addr)) 98 | } 99 | 100 | func LoadUint32(addr *uint32) uint32 { 101 | return orig.LoadUint32(addr) 102 | } 103 | 104 | func LoadUint64(addr *AlignedUint64) uint64 { 105 | return orig.LoadUint64((*uint64)(addr)) 106 | } 107 | 108 | func LoadUintptr(addr *uintptr) uintptr { 109 | return orig.LoadUintptr(addr) 110 | } 111 | 112 | func LoadPointer(addr *unsafe.Pointer) unsafe.Pointer { 113 | return orig.LoadPointer(addr) 114 | } 115 | 116 | func StoreInt32(addr *int32, val int32) { 117 | orig.StoreInt32(addr, val) 118 | } 119 | 120 | func StoreInt64(addr *AlignedInt64, val int64) { 121 | orig.StoreInt64((*int64)(addr), val) 122 | } 123 | 124 | func StoreUint32(addr *uint32, val uint32) { 125 | orig.StoreUint32(addr, val) 126 | } 127 | 128 | func StoreUint64(addr *AlignedUint64, val uint64) { 129 | orig.StoreUint64((*uint64)(addr), val) 130 | } 131 | 132 | func StoreUintptr(addr *uintptr, val uintptr) { 133 | orig.StoreUintptr(addr, val) 134 | } 135 | 136 | func StorePointer(addr *unsafe.Pointer, val unsafe.Pointer) { 137 | orig.StorePointer(addr, val) 138 | } 139 | -------------------------------------------------------------------------------- /platform/sync_386.go: -------------------------------------------------------------------------------- 1 | // 2 | // This is a thin wrapper around sync/atomic to help with alignment issues. 3 | // 4 | 5 | // +build 386 6 | 7 | package platform 8 | 9 | import "unsafe" 10 | import orig "sync/atomic" 11 | 12 | type AlignedInt64 struct{ data int64 } 13 | type AlignedUint64 struct{ data uint64 } 14 | 15 | func NewAlignedInt64(v int64) AlignedInt64 { 16 | var nw AlignedInt64 17 | nw.data = v 18 | return nw 19 | } 20 | 21 | func NewAlignedUint64(v uint64) AlignedUint64 { 22 | var nw AlignedUint64 23 | nw.data = v 24 | return nw 25 | } 26 | 27 | func SwapInt32(addr *int32, new int32) int32 { 28 | return orig.SwapInt32(addr, new) 29 | } 30 | 31 | func SwapInt64(addr *AlignedInt64, new int64) int64 { 32 | return orig.SwapInt64(&addr.data, new) 33 | } 34 | 35 | func SwapUint32(addr *uint32, new uint32) uint32 { 36 | return orig.SwapUint32(addr, new) 37 | } 38 | 39 | func SwapUint64(addr *AlignedUint64, new uint64) uint64 { 40 | return orig.SwapUint64(&(addr.data), new) 41 | } 42 | 43 | func SwapUintptr(addr *uintptr, new uintptr) uintptr { 44 | return orig.SwapUintptr(addr, new) 45 | } 46 | 47 | func SwapPointer(addr *unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer { 48 | return orig.SwapPointer(addr, new) 49 | } 50 | 51 | func CompareAndSwapInt32(addr *int32, old, new int32) bool { 52 | return orig.CompareAndSwapInt32(addr, old, new) 53 | } 54 | 55 | func CompareAndSwapInt64(addr *AlignedInt64, old, new int64) bool { 56 | return orig.CompareAndSwapInt64(&addr.data, old, new) 57 | } 58 | 59 | func CompareAndSwapUint32(addr *uint32, old, new uint32) bool { 60 | return orig.CompareAndSwapUint32(addr, old, new) 61 | } 62 | 63 | func CompareAndSwapUint64(addr *AlignedUint64, old, new uint64) bool { 64 | return orig.CompareAndSwapUint64(&addr.data, old, new) 65 | } 66 | 67 | func CompareAndSwapUintptr(addr *uintptr, old, new uintptr) bool { 68 | return orig.CompareAndSwapUintptr(addr, old, new) 69 | } 70 | 71 | func CompareAndSwapPointer(addr *unsafe.Pointer, old, new unsafe.Pointer) bool { 72 | return orig.CompareAndSwapPointer(addr, old, new) 73 | } 74 | 75 | func AddInt32(addr *int32, delta int32) int32 { 76 | return orig.AddInt32(addr, delta) 77 | } 78 | 79 | func AddUint32(addr *uint32, delta uint32) uint32 { 80 | return orig.AddUint32(addr, delta) 81 | } 82 | 83 | func AddInt64(addr *AlignedInt64, delta int64) int64 { 84 | return orig.AddInt64(&addr.data, delta) 85 | } 86 | 87 | func AddUint64(addr *AlignedUint64, delta uint64) uint64 { 88 | return orig.AddUint64(&addr.data, delta) 89 | } 90 | 91 | func AddUintptr(addr *uintptr, delta uintptr) uintptr { 92 | return orig.AddUintptr(addr, delta) 93 | } 94 | 95 | func LoadInt32(addr *int32) int32 { 96 | return orig.LoadInt32(addr) 97 | } 98 | 99 | func LoadInt64(addr *AlignedInt64) int64 { 100 | return orig.LoadInt64(&addr.data) 101 | } 102 | 103 | func LoadUint32(addr *uint32) uint32 { 104 | return orig.LoadUint32(addr) 105 | } 106 | 107 | func LoadUint64(addr *AlignedUint64) uint64 { 108 | return orig.LoadUint64(&addr.data) 109 | } 110 | 111 | func LoadUintptr(addr *uintptr) uintptr { 112 | return orig.LoadUintptr(addr) 113 | } 114 | 115 | func LoadPointer(addr *unsafe.Pointer) unsafe.Pointer { 116 | return orig.LoadPointer(addr) 117 | } 118 | 119 | func StoreInt32(addr *int32, val int32) { 120 | orig.StoreInt32(addr, val) 121 | } 122 | 123 | func StoreInt64(addr *AlignedInt64, val int64) { 124 | orig.StoreInt64(&addr.data, val) 125 | } 126 | 127 | func StoreUint32(addr *uint32, val uint32) { 128 | orig.StoreUint32(addr, val) 129 | } 130 | 131 | func StoreUint64(addr *AlignedUint64, val uint64) { 132 | orig.StoreUint64(&addr.data, val) 133 | } 134 | 135 | func StoreUintptr(addr *uintptr, val uintptr) { 136 | orig.StoreUintptr(addr, val) 137 | } 138 | 139 | func StorePointer(addr *unsafe.Pointer, val unsafe.Pointer) { 140 | orig.StorePointer(addr, val) 141 | } 142 | -------------------------------------------------------------------------------- /platform/test/test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | atomic "github.com/couchbase/go-couchbase/platform" 6 | ) 7 | 8 | func main() { 9 | 10 | var someval atomic.AlignedInt64 11 | 12 | atomic.StoreInt64(&someval, int64(512)) 13 | fmt.Printf(" Value of someval %v", someval) 14 | 15 | rval := atomic.LoadInt64(&someval) 16 | 17 | fmt.Printf(" Returned val %v", rval) 18 | } 19 | -------------------------------------------------------------------------------- /pools_test.go: -------------------------------------------------------------------------------- 1 | package couchbase 2 | 3 | import ( 4 | "encoding/json" 5 | "sync" 6 | "testing" 7 | "unsafe" 8 | ) 9 | 10 | var samplePools = `{ 11 | "componentsVersion": { 12 | "ale": "eff9516", 13 | "couch": "1.2.0a-7dd003e-git", 14 | "couch_set_view": "1.2.0a-7dd003e-git", 15 | "crypto": "2.0.3", 16 | "ibrowse": "2.2.0", 17 | "inets": "5.6", 18 | "kernel": "2.14.4", 19 | "mnesia": "4.4.19", 20 | "mochiweb": "1.4.1", 21 | "ns_server": "2.0.0r-388-gf35126e-community", 22 | "oauth": "7d85d3ef", 23 | "os_mon": "2.2.6", 24 | "public_key": "0.12", 25 | "sasl": "2.1.9.4", 26 | "ssl": "4.1.5", 27 | "stdlib": "1.17.4" 28 | }, 29 | "implementationVersion": "2.0.0r-388-gf35126e-community", 30 | "isAdminCreds": false, 31 | "pools": [ 32 | { 33 | "name": "default", 34 | "streamingUri": "/poolsStreaming/default", 35 | "uri": "/pools/default" 36 | } 37 | ], 38 | "uuid": "debbb353c26a6ee1c9eceb748a8c6907" 39 | }` 40 | 41 | var samplePool = `{ 42 | "alerts": [], 43 | "autoCompactionSettings": { 44 | "databaseFragmentationThreshold": 80, 45 | "parallelDBAndViewCompaction": false, 46 | "viewFragmentationThreshold": 80 47 | }, 48 | "balanced": true, 49 | "buckets": { 50 | "uri": "/pools/default/buckets?v=118084983" 51 | }, 52 | "controllers": { 53 | "addNode": { 54 | "uri": "/controller/addNode" 55 | }, 56 | "ejectNode": { 57 | "uri": "/controller/ejectNode" 58 | }, 59 | "failOver": { 60 | "uri": "/controller/failOver" 61 | }, 62 | "reAddNode": { 63 | "uri": "/controller/reAddNode" 64 | }, 65 | "rebalance": { 66 | "uri": "/controller/rebalance" 67 | }, 68 | "replication": { 69 | "createURI": "/controller/createReplication", 70 | "infosURI": "/couchBase/_replicator/_design/_replicator_info/_view/infos?group_level=1", 71 | "replicatorDBURI": "/couchBase/_replicator" 72 | }, 73 | "setAutoCompaction": { 74 | "uri": "/controller/setAutoCompaction", 75 | "validateURI": "/controller/setAutoCompaction?just_validate=1" 76 | } 77 | }, 78 | "counters": { 79 | "rebalance_start": 1, 80 | "rebalance_success": 1 81 | }, 82 | "failoverWarnings": [], 83 | "name": "default", 84 | "nodeStatusesUri": "/nodeStatuses", 85 | "nodes": [ 86 | { 87 | "clusterCompatibility": 1, 88 | "clusterMembership": "active", 89 | "couchApiBase": "http://10.203.6.236:8092/", 90 | "hostname": "10.203.6.236:8091", 91 | "alternateAddresses": { 92 | "external": { 93 | "hostname": "server0.example.com" 94 | } 95 | }, 96 | "interestingStats": { 97 | "curr_items": 0, 98 | "curr_items_tot": 0, 99 | "vb_replica_curr_items": 0 100 | }, 101 | "mcdMemoryAllocated": 5978, 102 | "mcdMemoryReserved": 5978, 103 | "memoryFree": 6891118592, 104 | "memoryTotal": 7836254208, 105 | "os": "x86_64-unknown-linux-gnu", 106 | "ports": { 107 | "direct": 11210, 108 | "proxy": 11211 109 | }, 110 | "status": "healthy", 111 | "systemStats": { 112 | "cpu_utilization_rate": 0.5025125628140703, 113 | "swap_total": 4294963200, 114 | "swap_used": 0 115 | }, 116 | "thisNode": true, 117 | "uptime": "20516", 118 | "version": "2.0.0r-388-gf35126e-community" 119 | }, 120 | { 121 | "clusterCompatibility": 1, 122 | "clusterMembership": "active", 123 | "couchApiBase": "http://10.32.21.163:8092/", 124 | "hostname": "10.32.21.163:8091", 125 | "alternateAddresses": { 126 | "external": { 127 | "hostname": "server1.example.com" 128 | } 129 | }, 130 | "interestingStats": { 131 | "curr_items": 0, 132 | "curr_items_tot": 0, 133 | "vb_replica_curr_items": 0 134 | }, 135 | "mcdMemoryAllocated": 5978, 136 | "mcdMemoryReserved": 5978, 137 | "memoryFree": 6959566848, 138 | "memoryTotal": 7836254208, 139 | "os": "x86_64-unknown-linux-gnu", 140 | "ports": { 141 | "direct": 11210, 142 | "proxy": 11211 143 | }, 144 | "status": "healthy", 145 | "systemStats": { 146 | "cpu_utilization_rate": 0.7575757575757576, 147 | "swap_total": 4294963200, 148 | "swap_used": 0 149 | }, 150 | "uptime": "20523", 151 | "version": "2.0.0r-388-gf35126e-community" 152 | }, 153 | { 154 | "clusterCompatibility": 1, 155 | "clusterMembership": "active", 156 | "couchApiBase": "http://10.98.83.17:8092/", 157 | "hostname": "10.98.83.17:8091", 158 | "alternateAddresses": { 159 | "external": { 160 | "hostname": "server2.example.com" 161 | } 162 | }, 163 | "interestingStats": { 164 | "curr_items": 0, 165 | "curr_items_tot": 0, 166 | "vb_replica_curr_items": 0 167 | }, 168 | "mcdMemoryAllocated": 5978, 169 | "mcdMemoryReserved": 5978, 170 | "memoryFree": 6960541696, 171 | "memoryTotal": 7836254208, 172 | "os": "x86_64-unknown-linux-gnu", 173 | "ports": { 174 | "direct": 11210, 175 | "proxy": 11211 176 | }, 177 | "status": "healthy", 178 | "systemStats": { 179 | "cpu_utilization_rate": 0.24213075060532688, 180 | "swap_total": 4294963200, 181 | "swap_used": 0 182 | }, 183 | "uptime": "20505", 184 | "version": "2.0.0r-388-gf35126e-community" 185 | }, 186 | { 187 | "clusterCompatibility": 1, 188 | "clusterMembership": "active", 189 | "couchApiBase": "http://10.34.21.232:8092/", 190 | "hostname": "10.34.21.232:8091", 191 | "alternateAddresses": { 192 | "external": { 193 | "hostname": "server3.example.com" 194 | } 195 | }, 196 | "interestingStats": { 197 | "curr_items": 0, 198 | "curr_items_tot": 0, 199 | "vb_replica_curr_items": 0 200 | }, 201 | "mcdMemoryAllocated": 5978, 202 | "mcdMemoryReserved": 5978, 203 | "memoryFree": 6961504256, 204 | "memoryTotal": 7836254208, 205 | "os": "x86_64-unknown-linux-gnu", 206 | "ports": { 207 | "direct": 11210, 208 | "proxy": 11211 209 | }, 210 | "status": "healthy", 211 | "systemStats": { 212 | "cpu_utilization_rate": 0.7334963325183375, 213 | "swap_total": 4294963200, 214 | "swap_used": 0 215 | }, 216 | "uptime": "20528", 217 | "version": "2.0.0r-388-gf35126e-community" 218 | }, 219 | { 220 | "clusterCompatibility": 1, 221 | "clusterMembership": "active", 222 | "couchApiBase": "http://10.203.33.4:8092/", 223 | "hostname": "10.203.33.4:8091", 224 | "alternateAddresses": { 225 | "external": { 226 | "hostname": "server4.example.com" 227 | } 228 | }, 229 | "interestingStats": { 230 | "curr_items": 0, 231 | "curr_items_tot": 0, 232 | "vb_replica_curr_items": 0 233 | }, 234 | "mcdMemoryAllocated": 5978, 235 | "mcdMemoryReserved": 5978, 236 | "memoryFree": 6960599040, 237 | "memoryTotal": 7836254208, 238 | "os": "x86_64-unknown-linux-gnu", 239 | "ports": { 240 | "direct": 11210, 241 | "proxy": 11211 242 | }, 243 | "status": "healthy", 244 | "systemStats": { 245 | "cpu_utilization_rate": 0.7575757575757576, 246 | "swap_total": 4294963200, 247 | "swap_used": 0 248 | }, 249 | "uptime": "20537", 250 | "version": "2.0.0r-388-gf35126e-community" 251 | } 252 | ], 253 | "rebalanceProgressUri": "/pools/default/rebalanceProgress", 254 | "rebalanceStatus": "none", 255 | "remoteClusters": { 256 | "uri": "/pools/default/remoteClusters", 257 | "validateURI": "/pools/default/remoteClusters?just_validate=1" 258 | }, 259 | "stats": { 260 | "uri": "/pools/default/stats" 261 | }, 262 | "stopRebalanceUri": "/controller/stopRebalance", 263 | "storageTotals": { 264 | "hdd": { 265 | "free": 1046325215240, 266 | "quotaTotal": 1056894156800, 267 | "total": 1056894156800, 268 | "used": 10568941560, 269 | "usedByData": 12543880 270 | }, 271 | "ram": { 272 | "quotaTotal": 31341936640, 273 | "quotaUsed": 31341936640, 274 | "total": 39181271040, 275 | "used": 4447940608, 276 | "usedByData": 13557744 277 | } 278 | }, 279 | "tasksProgressUri": "/pools/default/tasksProgress", 280 | "tasksStatus": "none" 281 | }` 282 | 283 | func assert(t *testing.T, name string, got interface{}, expected interface{}) { 284 | if got != expected { 285 | t.Fatalf("Expected %v for %s, got %v", expected, name, got) 286 | } 287 | } 288 | 289 | func testParse(t *testing.T, s string, rv interface{}) { 290 | if err := json.Unmarshal([]byte(s), rv); err != nil { 291 | t.Fatalf("Error decoding: %v", err) 292 | } 293 | } 294 | 295 | func TestPoolsResponse(t *testing.T) { 296 | res := Pools{} 297 | testParse(t, samplePools, &res) 298 | 299 | assert(t, "couch", res.ComponentsVersion["couch"], 300 | "1.2.0a-7dd003e-git") 301 | assert(t, "implementationVersion", res.ImplementationVersion, 302 | "2.0.0r-388-gf35126e-community") 303 | assert(t, "uuid", res.UUID, "debbb353c26a6ee1c9eceb748a8c6907") 304 | assert(t, "IsAdmin", res.IsAdmin, false) 305 | assert(t, "pool name", res.Pools[0].Name, "default") 306 | assert(t, "pool streamingUri", res.Pools[0].StreamingURI, 307 | "/poolsStreaming/default") 308 | assert(t, "pool URI", res.Pools[0].URI, "/pools/default") 309 | } 310 | 311 | func TestPool(t *testing.T) { 312 | res := Pool{} 313 | testParse(t, samplePool, &res) 314 | assert(t, "len(pools)", 5, len(res.Nodes)) 315 | } 316 | 317 | func TestPoolAlternateNames(t *testing.T) { 318 | res := Pool{} 319 | testParse(t, samplePool, &res) 320 | assert(t, "node[0].AlternateAddresses[\"external\"].Hostname", 321 | res.Nodes[0].AlternateNames["external"].Hostname, "server0.example.com") 322 | assert(t, "node[1].AlternateAddresses[\"external\"].Hostname", 323 | res.Nodes[1].AlternateNames["external"].Hostname, "server1.example.com") 324 | assert(t, "node[2].AlternateAddresses[\"external\"].Hostname", 325 | res.Nodes[2].AlternateNames["external"].Hostname, "server2.example.com") 326 | assert(t, "node[3].AlternateAddresses[\"external\"].Hostname", 327 | res.Nodes[3].AlternateNames["external"].Hostname, "server3.example.com") 328 | assert(t, "node[4].AlternateAddresses[\"external\"].Hostname", 329 | res.Nodes[4].AlternateNames["external"].Hostname, "server4.example.com") 330 | } 331 | 332 | func TestCommonAddressSuffixEmpty(t *testing.T) { 333 | b := Bucket{nodeList: mkNL([]Node{})} 334 | assert(t, "empty", "", b.CommonAddressSuffix()) 335 | } 336 | 337 | func TestCommonAddressSuffixUncommon(t *testing.T) { 338 | b := Bucket{vBucketServerMap: unsafe.Pointer(&VBucketServerMap{ 339 | ServerList: []string{"somestring", "unrelated"}}), 340 | nodeList: mkNL([]Node{}), 341 | } 342 | assert(t, "shouldn't match", "", b.CommonAddressSuffix()) 343 | } 344 | 345 | func TestCommonAddressSuffixCommon(t *testing.T) { 346 | b := Bucket{nodeList: unsafe.Pointer(&[]Node{ 347 | {Hostname: "server1.example.com:11210"}, 348 | {Hostname: "server2.example.com:11210"}, 349 | {Hostname: "server3.example.com:11210"}, 350 | {Hostname: "server4.example.com:11210"}, 351 | })} 352 | assert(t, "useful suffix", ".example.com:11210", 353 | b.CommonAddressSuffix()) 354 | } 355 | 356 | func TestBucketConnPool(t *testing.T) { 357 | b := Bucket{} 358 | b.replaceConnPools([]*connectionPool{}) 359 | p := b.getConnPool(3) 360 | if p != nil { 361 | t.Fatalf("Successfully got a pool where there was none: %v", p) 362 | } 363 | // TODO: I have a few more cases to cover here. 364 | } 365 | 366 | // No assertions, but this is meant to be tested with the race 367 | // detector to verify the connection pool stuff is clean. 368 | func TestBucketConnPoolConcurrent(t *testing.T) { 369 | b := Bucket{} 370 | 371 | wg := sync.WaitGroup{} 372 | for i := 0; i < 16; i++ { 373 | wg.Add(1) 374 | go func() { 375 | for i := 0; i < 100; i++ { 376 | b.replaceConnPools([]*connectionPool{}) 377 | } 378 | wg.Done() 379 | }() 380 | } 381 | wg.Wait() 382 | } 383 | -------------------------------------------------------------------------------- /populate/populate.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "log" 7 | "math/rand" 8 | "os" 9 | "text/tabwriter" 10 | "time" 11 | 12 | "github.com/couchbase/go-couchbase" 13 | ) 14 | 15 | var poolName = flag.String("pool", "default", "Pool to connect to") 16 | var bucketName = flag.String("bucket", "default", "Bucket to connect to") 17 | 18 | const myfmt = "2006-02-01-15:04:05.000000000" 19 | 20 | var names = []string{ 21 | "Jan Lehnardt", 22 | "John Christopher Anderson", 23 | "Noah Slater", 24 | "Filipe David Borba Manana", 25 | "Adam Kocoloski", 26 | "Paul Joseph Davis", 27 | "Christopher Lenz", 28 | "Damien F. Katz", 29 | "Robert Newson", 30 | "Benoit Chesneau", 31 | "Jason David Davies", 32 | "Mark Hammond", 33 | "Randall Leeds", 34 | "Bin Cui", 35 | "Benjamin Young", 36 | "Dustin Sallings", 37 | "Steve Yen", 38 | "Joe Schaefer", 39 | } 40 | 41 | var actions = []string{ 42 | "submitted", "aborted", "approved", "declined", 43 | } 44 | 45 | var projects = []string{ 46 | "ep-engine", "couchdb", "ns_server", "moxi", "libcouchbase", 47 | } 48 | 49 | type record struct { 50 | Author string `json:"author"` 51 | Reviewer string `json:"reviewer"` 52 | Action string `json:"action"` 53 | Project string `json:"project"` 54 | Score int `json:"score"` 55 | } 56 | 57 | func report(c *couchbase.Client, b *couchbase.Bucket) { 58 | fmt.Printf("-----------------------------------------------------\n") 59 | tr := tabwriter.NewWriter(os.Stdout, 8, 8, 1, ' ', 0) 60 | defer tr.Flush() 61 | params := map[string]interface{}{ 62 | "group_level": 1, 63 | "stale": "update_after", 64 | "connection_timeout": 60000, 65 | } 66 | vres, err := b.View("test", "test", params) 67 | if err != nil { 68 | log.Printf("Error executing view: %v", err) 69 | } 70 | 71 | for _, e := range vres.Errors { 72 | fmt.Printf(" * Error from %s: %s\n", e.From, e.Reason) 73 | } 74 | 75 | for _, r := range vres.Rows { 76 | fmt.Fprintf(tr, "%v:\t%v\n", r.Key, r.Value) 77 | } 78 | } 79 | 80 | func harass(c *couchbase.Client, b *couchbase.Bucket) { 81 | fmt.Printf("Doing stuff\n") 82 | 83 | go func() { 84 | for { 85 | time.Sleep(2 * time.Second) 86 | report(c, b) 87 | } 88 | }() 89 | 90 | for { 91 | r := record{ 92 | Author: names[rand.Intn(len(names))], 93 | Reviewer: names[rand.Intn(len(names))], 94 | Action: actions[rand.Intn(len(actions))], 95 | Project: projects[rand.Intn(len(projects))], 96 | Score: rand.Intn(4) - 2, 97 | } 98 | 99 | k := time.Now().Format(myfmt) 100 | 101 | if err := b.Set(k, 0, r); err != nil { 102 | log.Fatalf("Oops, failed a store of %s: %v", k, err) 103 | } 104 | } 105 | } 106 | 107 | func main() { 108 | flag.Parse() 109 | bucket, err := couchbase.GetBucket(flag.Arg(0), "default", "default") 110 | if err != nil { 111 | log.Fatalf("Error getting bucket: %v", err) 112 | } 113 | defer bucket.Close() 114 | 115 | harass(bucket.GetPool().GetClient(), bucket) 116 | } 117 | -------------------------------------------------------------------------------- /port_map.go: -------------------------------------------------------------------------------- 1 | package couchbase 2 | 3 | /* 4 | 5 | The goal here is to map a hostname:port combination to another hostname:port 6 | combination. The original hostname:port gives the name and regular KV port 7 | of a couchbase server. We want to determine the corresponding SSL KV port. 8 | 9 | To do this, we have a pool services structure, as obtained from 10 | the /pools/default/nodeServices API. 11 | 12 | For a fully configured two-node system, the structure may look like this: 13 | {"rev":32,"nodesExt":[ 14 | {"services":{"mgmt":8091,"mgmtSSL":18091,"fts":8094,"ftsSSL":18094,"indexAdmin":9100,"indexScan":9101,"indexHttp":9102,"indexStreamInit":9103,"indexStreamCatchup":9104,"indexStreamMaint":9105,"indexHttps":19102,"capiSSL":18092,"capi":8092,"kvSSL":11207,"projector":9999,"kv":11210,"moxi":11211},"hostname":"172.23.123.101"}, 15 | {"services":{"mgmt":8091,"mgmtSSL":18091,"indexAdmin":9100,"indexScan":9101,"indexHttp":9102,"indexStreamInit":9103,"indexStreamCatchup":9104,"indexStreamMaint":9105,"indexHttps":19102,"capiSSL":18092,"capi":8092,"kvSSL":11207,"projector":9999,"kv":11210,"moxi":11211,"n1ql":8093,"n1qlSSL":18093},"thisNode":true,"hostname":"172.23.123.102"}]} 16 | 17 | In this case, note the "hostname" fields, and the "kv" and "kvSSL" fields. 18 | 19 | For a single-node system, perhaps brought up for testing, the structure may look like this: 20 | {"rev":66,"nodesExt":[ 21 | {"services":{"mgmt":8091,"mgmtSSL":18091,"indexAdmin":9100,"indexScan":9101,"indexHttp":9102,"indexStreamInit":9103,"indexStreamCatchup":9104,"indexStreamMaint":9105,"indexHttps":19102,"kv":11210,"kvSSL":11207,"capi":8092,"capiSSL":18092,"projector":9999,"n1ql":8093,"n1qlSSL":18093},"thisNode":true}],"clusterCapabilitiesVer":[1,0],"clusterCapabilities":{"n1ql":["enhancedPreparedStatements"]}} 22 | 23 | Here, note that there is only a single entry in the "nodeExt" array and that it does not have a "hostname" field. 24 | We will assume that either hostname fields are present, or there is only a single node. 25 | */ 26 | 27 | import ( 28 | "encoding/json" 29 | "fmt" 30 | "net" 31 | "strconv" 32 | ) 33 | 34 | func ParsePoolServices(jsonInput string) (*PoolServices, error) { 35 | ps := &PoolServices{} 36 | err := json.Unmarshal([]byte(jsonInput), ps) 37 | return ps, err 38 | } 39 | 40 | // Accepts a "host:port" string representing the KV TCP port and the pools 41 | // nodeServices payload and returns a host:port string representing the KV 42 | // TLS port on the same node as the KV TCP port. 43 | // Returns the original host:port if in case of local communication (services 44 | // on the same node as source) 45 | func MapKVtoSSL(hostport string, ps *PoolServices) (string, bool, error) { 46 | return MapKVtoSSLExt(hostport, ps, false) 47 | } 48 | 49 | func MapKVtoSSLExt(hostport string, ps *PoolServices, force bool) (string, bool, error) { 50 | host, port, err := net.SplitHostPort(hostport) 51 | if err != nil { 52 | return "", false, fmt.Errorf("Unable to split hostport %s: %v", hostport, err) 53 | } 54 | 55 | portInt, err := strconv.Atoi(port) 56 | if err != nil { 57 | return "", false, fmt.Errorf("Unable to parse host/port combination %s: %v", hostport, err) 58 | } 59 | 60 | var ns *NodeServices 61 | for i := range ps.NodesExt { 62 | hostname := ps.NodesExt[i].Hostname 63 | if len(hostname) != 0 && hostname != host { 64 | /* If the hostname is the empty string, it means the node (and by extension 65 | the cluster) is configured on the loopback. Further, it means that the client 66 | should use whatever hostname it used to get the nodeServices information in 67 | the first place to access the cluster. Thus, when the hostname is empty in 68 | the nodeService entry we can assume that client will use the hostname it used 69 | to access the KV TCP endpoint - and thus that it automatically "matches". 70 | If hostname is not empty and doesn't match then we move to the next entry. 71 | */ 72 | continue 73 | } 74 | kvPort, found := ps.NodesExt[i].Services["kv"] 75 | if !found { 76 | /* not a node with a KV service */ 77 | continue 78 | } 79 | if kvPort == portInt { 80 | ns = &(ps.NodesExt[i]) 81 | break 82 | } 83 | } 84 | 85 | if ns == nil { 86 | return "", false, fmt.Errorf("Unable to parse host/port combination %s: no matching node found among %d", hostport, len(ps.NodesExt)) 87 | } 88 | kvSSL, found := ns.Services["kvSSL"] 89 | if !found { 90 | return "", false, fmt.Errorf("Unable to map host/port combination %s: target host has no kvSSL port listed", hostport) 91 | } 92 | 93 | //Don't encrypt for communication between local nodes 94 | if !force && (len(ns.Hostname) == 0 || ns.ThisNode) { 95 | return hostport, false, nil 96 | } 97 | 98 | ip := net.ParseIP(host) 99 | if ip != nil && ip.To4() == nil && ip.To16() != nil { // IPv6 and not a FQDN 100 | // Prefix and suffix square brackets as SplitHostPort removes them, 101 | // see: https://golang.org/pkg/net/#SplitHostPort 102 | host = "[" + host + "]" 103 | } 104 | 105 | return fmt.Sprintf("%s:%d", host, kvSSL), true, nil 106 | } 107 | -------------------------------------------------------------------------------- /port_map_test.go: -------------------------------------------------------------------------------- 1 | package couchbase 2 | 3 | import "testing" 4 | 5 | func TestSingleNode(t *testing.T) { 6 | jsonInput := `{"rev":66,"nodesExt":[{"services":{"mgmt":8091,"mgmtSSL":18091,"indexAdmin":9100,"indexScan":9101,"indexHttp":9102,"indexStreamInit":9103,"indexStreamCatchup":9104,"indexStreamMaint":9105,"indexHttps":19102,"kv":11210,"kvSSL":11207,"capi":8092,"capiSSL":18092,"projector":9999,"n1ql":8093,"n1qlSSL":18093},"thisNode":true}],"clusterCapabilitiesVer":[1,0],"clusterCapabilities":{"n1ql":["enhancedPreparedStatements"]}}` 7 | 8 | poolServices, err := ParsePoolServices(jsonInput) 9 | if err != nil { 10 | t.Fatalf("Unable to parse json: %v", err) 11 | } 12 | if poolServices == nil { 13 | t.Fatalf("Parse produced no result") 14 | } 15 | if len(poolServices.NodesExt) != 1 { 16 | t.Fatalf("Expected nodesExt of length 1, got %d", len(poolServices.NodesExt)) 17 | } 18 | if poolServices.NodesExt[0].Hostname != "" { 19 | t.Fatalf("Expected empty hostname, got %s", poolServices.NodesExt[0].Hostname) 20 | } 21 | if poolServices.NodesExt[0].Services["kv"] != 11210 { 22 | t.Fatalf("Expected kv port 11210, got %d", poolServices.NodesExt[0].Services["kv"]) 23 | } 24 | if poolServices.NodesExt[0].Services["kvSSL"] != 11207 { 25 | t.Fatalf("Expected kvSSL port 11207, got %d", poolServices.NodesExt[0].Services["kvSSL"]) 26 | } 27 | 28 | // Should succeed. 29 | target := "127.0.0.1:11210" 30 | res, _, err := MapKVtoSSL(target, poolServices) 31 | if err != nil { 32 | t.Fatalf("Mapping target %s, expected success, got error: %v", target, err) 33 | } 34 | expected := "127.0.0.1:11210" // no hostname 35 | if res != expected { 36 | t.Fatalf("Mapping target %s, expected %s, got %s", target, expected, res) 37 | } 38 | 39 | // No port. 40 | target = "127.0.0.1" 41 | res, _, err = MapKVtoSSL(target, poolServices) 42 | if err == nil { 43 | t.Fatalf("Mapping target %s, expected failure, got success: %s", target, res) 44 | } 45 | 46 | // Bad KV port. 47 | target = "127.0.0.1:11111" 48 | res, _, err = MapKVtoSSL(target, poolServices) 49 | if err == nil { 50 | t.Fatalf("Mapping target %s, expected failure, got success: %s", target, res) 51 | } 52 | } 53 | 54 | func TestMultiNode(t *testing.T) { 55 | jsonInput := `{"rev":32,"nodesExt":[{"services":{"mgmt":8091,"mgmtSSL":18091,"fts":8094,"ftsSSL":18094,"indexAdmin":9100,"indexScan":9101,"indexHttp":9102,"indexStreamInit":9103,"indexStreamCatchup":9104,"indexStreamMaint":9105,"indexHttps":19102,"capiSSL":18092,"capi":8092,"kvSSL":11299,"projector":9999,"kv":11298,"moxi":11211},"hostname":"172.23.123.101"},{"services":{"mgmt":8091,"mgmtSSL":18091,"indexAdmin":9100,"indexScan":9101,"indexHttp":9102,"indexStreamInit":9103,"indexStreamCatchup":9104,"indexStreamMaint":9105,"indexHttps":19102,"capiSSL":18092,"capi":8092,"kvSSL":11207,"projector":9999,"kv":11210,"moxi":11211,"n1ql":8093,"n1qlSSL":18093},"thisNode":false,"hostname":"172.23.123.102"}]}` 56 | 57 | poolServices, err := ParsePoolServices(jsonInput) 58 | if err != nil { 59 | t.Fatalf("Unable to parse json: %v", err) 60 | } 61 | if poolServices == nil { 62 | t.Fatalf("Parse produced no result") 63 | } 64 | if len(poolServices.NodesExt) != 2 { 65 | t.Fatalf("Expected nodesExt of length 2, got %d", len(poolServices.NodesExt)) 66 | } 67 | if poolServices.NodesExt[0].Services["kv"] != 11298 { 68 | t.Fatalf("Expected kv port 11298, got %d", poolServices.NodesExt[0].Services["kv"]) 69 | } 70 | if poolServices.NodesExt[1].Services["kvSSL"] != 11207 { 71 | t.Fatalf("Expected kvSSL port 11207, got %d", poolServices.NodesExt[1].Services["kvSSL"]) 72 | } 73 | 74 | // Should succeed. 75 | target := "172.23.123.102:11210" 76 | res, _, err := MapKVtoSSL(target, poolServices) 77 | if err != nil { 78 | t.Fatalf("Mapping target %s, expected success, got error: %v", target, err) 79 | } 80 | expected := "172.23.123.102:11207" 81 | if res != expected { 82 | t.Fatalf("Mapping target %s, expected %s, got %s", target, expected, res) 83 | } 84 | 85 | // No such host. 86 | target = "172.23.123.999:11210" 87 | res, _, err = MapKVtoSSL(target, poolServices) 88 | if err == nil { 89 | t.Fatalf("Mapping target %s, expected failure, got success: %s", target, res) 90 | } 91 | 92 | // Bad KV port. 93 | target = "172.23.123.101:11111" 94 | res, _, err = MapKVtoSSL(target, poolServices) 95 | if err == nil { 96 | t.Fatalf("Mapping target %s, expected failure, got success: %s", target, res) 97 | } 98 | } 99 | 100 | func TestIPv6Node(t *testing.T) { 101 | jsonInput := `{"rev":32,"nodesExt":[{"services":{"mgmt":8091,"mgmtSSL":18091,"fts":8094,"ftsSSL":18094,"indexAdmin":9100,"indexScan":9101,"indexHttp":9102,"indexStreamInit":9103,"indexStreamCatchup":9104,"indexStreamMaint":9105,"indexHttps":19102,"capiSSL":18092,"capi":8092,"kvSSL":11299,"projector":9999,"kv":11298,"moxi":11211},"hostname":"DEAD::BEEF"},{"services":{"mgmt":8091,"mgmtSSL":18091,"indexAdmin":9100,"indexScan":9101,"indexHttp":9102,"indexStreamInit":9103,"indexStreamCatchup":9104,"indexStreamMaint":9105,"indexHttps":19102,"capiSSL":18092,"capi":8092,"kvSSL":11207,"projector":9999,"kv":11210,"moxi":11211,"n1ql":8093,"n1qlSSL":18093},"thisNode":false,"hostname":"FEED::DEED"}]}` 102 | 103 | poolServices, err := ParsePoolServices(jsonInput) 104 | if err != nil { 105 | t.Fatalf("Unable to parse json: %v", err) 106 | } 107 | if poolServices == nil { 108 | t.Fatalf("Parse produced no result") 109 | } 110 | if len(poolServices.NodesExt) != 2 { 111 | t.Fatalf("Expected nodesExt of length 2, got %d", len(poolServices.NodesExt)) 112 | } 113 | if poolServices.NodesExt[0].Services["kv"] != 11298 { 114 | t.Fatalf("Expected kv port 11298, got %d", poolServices.NodesExt[0].Services["kv"]) 115 | } 116 | if poolServices.NodesExt[1].Services["kvSSL"] != 11207 { 117 | t.Fatalf("Expected kvSSL port 11207, got %d", poolServices.NodesExt[1].Services["kvSSL"]) 118 | } 119 | 120 | // Should succeed. 121 | target := "[FEED::DEED]:11210" 122 | res, _, err := MapKVtoSSL(target, poolServices) 123 | if err != nil { 124 | t.Fatalf("Mapping target %s, expected success, got error: %v", target, err) 125 | } 126 | expected := "[FEED::DEED]:11207" 127 | if res != expected { 128 | t.Fatalf("Mapping target %s, expected %s, got %s", target, expected, res) 129 | } 130 | 131 | // Bad KV port. 132 | target = "[DEAD::BEEF]:11111" 133 | res, _, err = MapKVtoSSL(target, poolServices) 134 | if err == nil { 135 | t.Fatalf("Mapping target %s, expected failure, got success: %s", target, res) 136 | } 137 | } 138 | 139 | func TestMissingIPNodes(t *testing.T) { 140 | jsonInput := `{"rev":73,"nodesExt":[{"services":{"mgmt":9000,"mgmtSSL":19000,"fts":9200,"ftsSSL":19200,"ftsGRPC":9201,"ftsGRPCSSL":19201,"kv":12000,"kvSSL":11998,"capi":9500,"capiSSL":19500,"projector":10000},"thisNode":false,"hostname":"192.168.212.71"},{"services":{"mgmt":9001,"mgmtSSL":19001,"kv":12002,"kvSSL":11994,"capi":9501,"capiSSL":19501,"projector":10001},"thisNode":false,"hostname":"192.168.212.72"}],"clusterCapabilitiesVer":[1,0],"clusterCapabilities":{"n1ql":["enhancedPreparedStatements"]}}` 141 | 142 | poolServices, err := ParsePoolServices(jsonInput) 143 | if err != nil { 144 | t.Fatalf("Unable to parse json: %v", err) 145 | } 146 | if poolServices == nil { 147 | t.Fatalf("Parse produced no result") 148 | } 149 | if len(poolServices.NodesExt) != 2 { 150 | t.Fatalf("Expected nodesExt of length 2, got %d", len(poolServices.NodesExt)) 151 | } 152 | if poolServices.NodesExt[0].Services["kvSSL"] != 11998 { 153 | t.Fatalf("Expected kv port 11998, got %d", poolServices.NodesExt[0].Services["kvSSL"]) 154 | } 155 | if poolServices.NodesExt[1].Services["kvSSL"] != 11994 { 156 | t.Fatalf("Expected kvSSL port 11994, got %d", poolServices.NodesExt[1].Services["kvSSL"]) 157 | } 158 | 159 | target := "192.168.212.71:12000" 160 | res, _, err := MapKVtoSSL(target, poolServices) 161 | if err != nil { 162 | t.Fatalf("Mapping target %s, expected success, got error: %v", target, err) 163 | } 164 | expected := "192.168.212.71:11998" 165 | if res != expected { 166 | t.Fatalf("Mapping target %s, expected %s, got %s", target, expected, res) 167 | } 168 | 169 | target = "192.168.212.72:12002" 170 | res, _, err = MapKVtoSSL(target, poolServices) 171 | if err != nil { 172 | t.Fatalf("Mapping target %s, expected success, got error: %v", target, err) 173 | } 174 | expected = "192.168.212.72:11994" 175 | if res != expected { 176 | t.Fatalf("Mapping target %s, expected %s, got %s", target, expected, res) 177 | } 178 | } 179 | -------------------------------------------------------------------------------- /streaming.go: -------------------------------------------------------------------------------- 1 | package couchbase 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "github.com/couchbase/goutils/logging" 7 | "io" 8 | "io/ioutil" 9 | "net" 10 | "net/http" 11 | "time" 12 | "unsafe" 13 | ) 14 | 15 | // Bucket auto-updater gets the latest version of the bucket config from 16 | // the server. If the configuration has changed then updated the local 17 | // bucket information. If the bucket has been deleted then notify anyone 18 | // who is holding a reference to this bucket 19 | 20 | const MAX_RETRY_COUNT = 5 21 | const DISCONNECT_PERIOD = 120 * time.Second 22 | 23 | type NotifyFn func(bucket string, err error) 24 | type StreamingFn func(bucket *Bucket) 25 | 26 | // Use TCP keepalive to detect half close sockets 27 | var updaterTransport http.RoundTripper = &http.Transport{ 28 | Proxy: http.ProxyFromEnvironment, 29 | Dial: (&net.Dialer{ 30 | Timeout: 30 * time.Second, 31 | KeepAlive: 30 * time.Second, 32 | }).Dial, 33 | } 34 | 35 | var updaterHTTPClient = &http.Client{Transport: updaterTransport} 36 | 37 | func doHTTPRequestForUpdate(req *http.Request) (*http.Response, error) { 38 | 39 | var err error 40 | var res *http.Response 41 | 42 | for i := 0; i < HTTP_MAX_RETRY; i++ { 43 | res, err = updaterHTTPClient.Do(req) 44 | if err != nil && isHttpConnError(err) { 45 | continue 46 | } 47 | break 48 | } 49 | 50 | if err != nil { 51 | return nil, err 52 | } 53 | 54 | return res, err 55 | } 56 | 57 | func (b *Bucket) RunBucketUpdater(notify NotifyFn) { 58 | b.RunBucketUpdater2(nil, notify) 59 | } 60 | 61 | func (b *Bucket) RunBucketUpdater2(streamingFn StreamingFn, notify NotifyFn) { 62 | go func() { 63 | err := b.UpdateBucket2(streamingFn) 64 | if err != nil { 65 | if notify != nil { 66 | notify(b.GetName(), err) 67 | } 68 | logging.Errorf(" Bucket Updater exited with err %v", err) 69 | } 70 | }() 71 | } 72 | 73 | func (b *Bucket) replaceConnPools2(with []*connectionPool, bucketLocked bool) { 74 | if !bucketLocked { 75 | b.Lock() 76 | defer b.Unlock() 77 | } 78 | old := b.connPools 79 | b.connPools = unsafe.Pointer(&with) 80 | if old != nil { 81 | for _, pool := range *(*[]*connectionPool)(old) { 82 | if pool != nil && pool.inUse == false { 83 | pool.Close() 84 | } 85 | } 86 | } 87 | return 88 | } 89 | 90 | func (b *Bucket) UpdateBucket() error { 91 | return b.UpdateBucket2(nil) 92 | } 93 | 94 | func (b *Bucket) UpdateBucket2(streamingFn StreamingFn) error { 95 | var failures int 96 | var returnErr error 97 | var poolServices PoolServices 98 | 99 | for { 100 | 101 | if failures == MAX_RETRY_COUNT { 102 | logging.Errorf(" Maximum failures reached. Exiting loop...") 103 | return fmt.Errorf("Max failures reached. Last Error %v", returnErr) 104 | } 105 | 106 | nodes := b.Nodes() 107 | if len(nodes) < 1 { 108 | return fmt.Errorf("No healthy nodes found") 109 | } 110 | 111 | streamUrl := fmt.Sprintf("%s/pools/default/bucketsStreaming/%s", b.pool.client.BaseURL, uriAdj(b.GetName())) 112 | logging.Infof(" Trying with %s", streamUrl) 113 | req, err := http.NewRequest("GET", streamUrl, nil) 114 | if err != nil { 115 | return err 116 | } 117 | 118 | // Lock here to avoid having pool closed under us. 119 | b.RLock() 120 | err = maybeAddAuth(req, b.pool.client.ah) 121 | b.RUnlock() 122 | if err != nil { 123 | return err 124 | } 125 | 126 | res, err := doHTTPRequestForUpdate(req) 127 | if err != nil { 128 | return err 129 | } 130 | 131 | if res.StatusCode != 200 { 132 | bod, _ := ioutil.ReadAll(io.LimitReader(res.Body, 512)) 133 | logging.Errorf("Failed to connect to host, unexpected status code: %v. Body %s", res.StatusCode, bod) 134 | res.Body.Close() 135 | returnErr = fmt.Errorf("Failed to connect to host. Status %v Body %s", res.StatusCode, bod) 136 | failures++ 137 | continue 138 | } 139 | 140 | dec := json.NewDecoder(res.Body) 141 | 142 | tmpb := &Bucket{} 143 | for { 144 | 145 | err := dec.Decode(&tmpb) 146 | if err != nil { 147 | returnErr = err 148 | res.Body.Close() 149 | break 150 | } 151 | 152 | // if we got here, reset failure count 153 | failures = 0 154 | 155 | if b.pool.client.tlsConfig != nil { 156 | poolServices, err = b.pool.client.GetPoolServices("default") 157 | if err != nil { 158 | returnErr = err 159 | res.Body.Close() 160 | break 161 | } 162 | } 163 | 164 | b.Lock() 165 | 166 | // mark all the old connection pools for deletion 167 | pools := b.getConnPools(true /* already locked */) 168 | for _, pool := range pools { 169 | if pool != nil { 170 | pool.inUse = false 171 | } 172 | } 173 | 174 | newcps := make([]*connectionPool, len(tmpb.VBSMJson.ServerList)) 175 | for i := range newcps { 176 | // get the old connection pool and check if it is still valid 177 | pool := b.getConnPoolByHost(tmpb.VBSMJson.ServerList[i], true /* bucket already locked */) 178 | if pool != nil && pool.inUse == false && pool.tlsConfig == b.pool.client.tlsConfig { 179 | // if the hostname and index is unchanged then reuse this pool 180 | newcps[i] = pool 181 | pool.inUse = true 182 | continue 183 | } 184 | // else create a new pool 185 | var encrypted bool 186 | hostport := tmpb.VBSMJson.ServerList[i] 187 | if b.pool.client.tlsConfig != nil { 188 | hostport, encrypted, err = MapKVtoSSL(hostport, &poolServices) 189 | if err != nil { 190 | b.Unlock() 191 | return err 192 | } 193 | } 194 | if b.ah != nil { 195 | newcps[i] = newConnectionPool(hostport, 196 | b.ah, false, PoolSize, PoolOverflow, b.pool.client.tlsConfig, b.Name, encrypted) 197 | 198 | } else { 199 | newcps[i] = newConnectionPool(hostport, 200 | b.authHandler(true /* bucket already locked */), 201 | false, PoolSize, PoolOverflow, b.pool.client.tlsConfig, b.Name, encrypted) 202 | } 203 | } 204 | 205 | b.replaceConnPools2(newcps, true /* bucket already locked */) 206 | 207 | tmpb.ah = b.ah 208 | b.vBucketServerMap = unsafe.Pointer(&tmpb.VBSMJson) 209 | b.nodeList = unsafe.Pointer(&tmpb.NodesJSON) 210 | b.Unlock() 211 | 212 | if streamingFn != nil { 213 | streamingFn(tmpb) 214 | } 215 | logging.Debugf("Got new configuration for bucket %s", b.GetName()) 216 | 217 | } 218 | // we are here because of an error 219 | failures++ 220 | continue 221 | 222 | } 223 | return nil 224 | } 225 | -------------------------------------------------------------------------------- /tap.go: -------------------------------------------------------------------------------- 1 | package couchbase 2 | 3 | import ( 4 | "github.com/couchbase/gomemcached/client" 5 | "github.com/couchbase/goutils/logging" 6 | "sync" 7 | "time" 8 | ) 9 | 10 | const initialRetryInterval = 1 * time.Second 11 | const maximumRetryInterval = 30 * time.Second 12 | 13 | // A TapFeed streams mutation events from a bucket. 14 | // 15 | // Events from the bucket can be read from the channel 'C'. Remember 16 | // to call Close() on it when you're done, unless its channel has 17 | // closed itself already. 18 | type TapFeed struct { 19 | C <-chan memcached.TapEvent 20 | 21 | bucket *Bucket 22 | args *memcached.TapArguments 23 | nodeFeeds []*memcached.TapFeed // The TAP feeds of the individual nodes 24 | output chan memcached.TapEvent // Same as C but writeably-typed 25 | wg sync.WaitGroup 26 | quit chan bool 27 | } 28 | 29 | // StartTapFeed creates and starts a new Tap feed 30 | func (b *Bucket) StartTapFeed(args *memcached.TapArguments) (*TapFeed, error) { 31 | if args == nil { 32 | defaultArgs := memcached.DefaultTapArguments() 33 | args = &defaultArgs 34 | } 35 | 36 | feed := &TapFeed{ 37 | bucket: b, 38 | args: args, 39 | output: make(chan memcached.TapEvent, 10), 40 | quit: make(chan bool), 41 | } 42 | 43 | go feed.run() 44 | 45 | feed.C = feed.output 46 | return feed, nil 47 | } 48 | 49 | // Goroutine that runs the feed 50 | func (feed *TapFeed) run() { 51 | retryInterval := initialRetryInterval 52 | bucketOK := true 53 | for { 54 | // Connect to the TAP feed of each server node: 55 | if bucketOK { 56 | killSwitch, err := feed.connectToNodes() 57 | if err == nil { 58 | // Run until one of the sub-feeds fails: 59 | select { 60 | case <-killSwitch: 61 | case <-feed.quit: 62 | return 63 | } 64 | feed.closeNodeFeeds() 65 | retryInterval = initialRetryInterval 66 | } 67 | } 68 | 69 | // On error, try to refresh the bucket in case the list of nodes changed: 70 | logging.Infof("go-couchbase: TAP connection lost; reconnecting to bucket %q in %v", 71 | feed.bucket.Name, retryInterval) 72 | err := feed.bucket.Refresh() 73 | bucketOK = err == nil 74 | 75 | select { 76 | case <-time.After(retryInterval): 77 | case <-feed.quit: 78 | return 79 | } 80 | if retryInterval *= 2; retryInterval > maximumRetryInterval { 81 | retryInterval = maximumRetryInterval 82 | } 83 | } 84 | } 85 | 86 | func (feed *TapFeed) connectToNodes() (killSwitch chan bool, err error) { 87 | killSwitch = make(chan bool) 88 | for _, serverConn := range feed.bucket.getConnPools(false /* not already locked */) { 89 | var singleFeed *memcached.TapFeed 90 | singleFeed, err = serverConn.StartTapFeed(feed.args) 91 | if err != nil { 92 | logging.Errorf("go-couchbase: Error connecting to tap feed of %s: %v", serverConn.host, err) 93 | feed.closeNodeFeeds() 94 | return 95 | } 96 | feed.nodeFeeds = append(feed.nodeFeeds, singleFeed) 97 | go feed.forwardTapEvents(singleFeed, killSwitch, serverConn.host) 98 | feed.wg.Add(1) 99 | } 100 | return 101 | } 102 | 103 | // Goroutine that forwards Tap events from a single node's feed to the aggregate feed. 104 | func (feed *TapFeed) forwardTapEvents(singleFeed *memcached.TapFeed, killSwitch chan bool, host string) { 105 | defer feed.wg.Done() 106 | for { 107 | select { 108 | case event, ok := <-singleFeed.C: 109 | if !ok { 110 | if singleFeed.Error != nil { 111 | logging.Errorf("go-couchbase: Tap feed from %s failed: %v", host, singleFeed.Error) 112 | } 113 | killSwitch <- true 114 | return 115 | } 116 | feed.output <- event 117 | case <-feed.quit: 118 | return 119 | } 120 | } 121 | } 122 | 123 | func (feed *TapFeed) closeNodeFeeds() { 124 | for _, f := range feed.nodeFeeds { 125 | f.Close() 126 | } 127 | feed.nodeFeeds = nil 128 | } 129 | 130 | // Close a Tap feed. 131 | func (feed *TapFeed) Close() error { 132 | select { 133 | case <-feed.quit: 134 | return nil 135 | default: 136 | } 137 | 138 | feed.closeNodeFeeds() 139 | close(feed.quit) 140 | feed.wg.Wait() 141 | close(feed.output) 142 | return nil 143 | } 144 | -------------------------------------------------------------------------------- /tools/loadfile/loadfile.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "io/ioutil" 7 | "os" 8 | "path" 9 | 10 | "github.com/couchbase/go-couchbase" 11 | ) 12 | 13 | func maybeFatal(e error, f string, args ...interface{}) { 14 | if e != nil { 15 | fmt.Fprintf(os.Stderr, f, args...) 16 | os.Exit(64) 17 | } 18 | } 19 | 20 | func main() { 21 | cbServ := flag.String("couchbase", "http://localhost:8091/", 22 | "URL to couchbase") 23 | cbBucket := flag.String("bucket", "default", "couchbase bucket") 24 | includeExt := flag.Bool("includeExt", false, "include file extension in document ID") 25 | verbose := flag.Bool("v", false, "verbose output") 26 | 27 | flag.Parse() 28 | 29 | b, err := couchbase.GetBucket(*cbServ, "default", *cbBucket) 30 | maybeFatal(err, "Error connecting to couchbase: %v\n", err) 31 | 32 | for _, filename := range flag.Args() { 33 | key := pathToID(filename, *includeExt) 34 | bytes, err := ioutil.ReadFile(filename) 35 | maybeFatal(err, "Error reading file contents: %v\n", err) 36 | b.SetRaw(key, 0, bytes) 37 | if *verbose { 38 | fmt.Printf("Loaded %s to key %s\n", filename, key) 39 | } 40 | } 41 | if *verbose { 42 | fmt.Printf("Loaded %d documents into bucket %s\n", len(flag.Args()), *cbBucket) 43 | } 44 | } 45 | 46 | func pathToID(p string, includeExt bool) string { 47 | _, file := path.Split(p) 48 | if includeExt { 49 | return file 50 | } 51 | ext := path.Ext(file) 52 | return file[0 : len(file)-len(ext)] 53 | } 54 | -------------------------------------------------------------------------------- /tools/view2go/view2go.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "flag" 7 | "fmt" 8 | "os" 9 | "strings" 10 | 11 | "github.com/couchbase/go-couchbase" 12 | ) 13 | 14 | func init() { 15 | flag.Usage = func() { 16 | fmt.Fprintf(os.Stderr, "%v [flags] ddocname\n", os.Args[0]) 17 | flag.PrintDefaults() 18 | os.Exit(64) 19 | } 20 | } 21 | 22 | func maybeFatal(e error, f string, args ...interface{}) { 23 | if e != nil { 24 | fmt.Fprintf(os.Stderr, f, args...) 25 | os.Exit(64) 26 | } 27 | } 28 | 29 | func main() { 30 | cbServ := flag.String("couchbase", "http://localhost:8091/", 31 | "URL to couchbase") 32 | cbBucket := flag.String("bucket", "default", "couchbase bucket") 33 | objName := flag.String("objname", "designDoc", 34 | "Name of the variable to create") 35 | flag.Parse() 36 | 37 | ddocName := flag.Arg(0) 38 | if ddocName == "" { 39 | fmt.Fprintf(os.Stderr, "No ddoc given\n") 40 | flag.Usage() 41 | } 42 | 43 | b, err := couchbase.GetBucket(*cbServ, "default", *cbBucket) 44 | maybeFatal(err, "Error connecting to couchbase: %v\n", err) 45 | 46 | j := json.RawMessage{} 47 | err = b.GetDDoc(ddocName, &j) 48 | maybeFatal(err, "Error getting ddoc: %v\n", err) 49 | 50 | buf := &bytes.Buffer{} 51 | err = json.Indent(buf, []byte(j), "", " ") 52 | 53 | fmt.Printf("const %s = `%s`\n", *objName, 54 | strings.Replace(buf.String(), "`", "` + \"`\" + `", 0)) 55 | } 56 | -------------------------------------------------------------------------------- /trace/trace.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2016 Couchbase, Inc. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the 4 | // License. You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, 7 | // software distributed under the License is distributed on an "AS 8 | // IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 9 | // express or implied. See the License for the specific language 10 | // governing permissions and limitations under the License. 11 | 12 | // Package trace provides a ring buffer utility to trace events. 13 | package trace 14 | 15 | import ( 16 | "bytes" 17 | "fmt" 18 | "sync" 19 | ) 20 | 21 | // A Msg is a trace message, which might be repeated. 22 | type Msg struct { 23 | Title string 24 | Body []byte `json:"Body,omitempty"` 25 | 26 | // Repeats will be >1 when there was a "run" of consolidated, 27 | // repeated trace messages. 28 | Repeats uint64 29 | } 30 | 31 | // A RingBuffer provides a ring buffer to capture trace messages, 32 | // along with an optional consolidation func that can merge similar, 33 | // consecutive trace messages. 34 | type RingBuffer struct { 35 | consolidateFunc MsgConsolidateFunc 36 | 37 | m sync.Mutex // Protects the fields that follow. 38 | 39 | next int // Index in msgs where next entry will be written. 40 | msgs []Msg 41 | } 42 | 43 | // MsgConsolidateFunc is the func signature of an optional merge 44 | // function, allowing for similar trace messages to be consolidated. 45 | // For example, instead of using 216 individual slots in the ring 46 | // buffer, in order to save space, the consolidation func can arrange 47 | // for just a single entry of "216 repeated mutations" to be used 48 | // instead. 49 | // 50 | // The next and prev Msg parameters may be modified by the 51 | // consolidate func. The consolidate func should return nil if it 52 | // performed a consolidation and doesn't want a new entry written. 53 | type MsgConsolidateFunc func(next *Msg, prev *Msg) *Msg 54 | 55 | // ConsolidateByTitle implements the MsgConsolidateFunc signature 56 | // by consolidating trace message when their titles are the same. 57 | func ConsolidateByTitle(next *Msg, prev *Msg) *Msg { 58 | if prev == nil || prev.Title != next.Title { 59 | return next 60 | } 61 | 62 | prev.Repeats++ 63 | return nil 64 | } 65 | 66 | // NewRingBuffer returns a RingBuffer initialized with the 67 | // given capacity and optional consolidateFunc. 68 | func NewRingBuffer( 69 | capacity int, 70 | consolidateFunc MsgConsolidateFunc) *RingBuffer { 71 | return &RingBuffer{ 72 | consolidateFunc: consolidateFunc, 73 | next: 0, 74 | msgs: make([]Msg, capacity), 75 | } 76 | } 77 | 78 | // Add appens a trace message to the ring buffer, consolidating trace 79 | // messages based on the optional consolidation function. 80 | func (trb *RingBuffer) Add(title string, body []byte) { 81 | if len(trb.msgs) <= 0 { 82 | return 83 | } 84 | 85 | msg := &Msg{ 86 | Title: title, 87 | Body: body, 88 | Repeats: 1, 89 | } 90 | 91 | trb.m.Lock() 92 | 93 | if trb.consolidateFunc != nil { 94 | msg = trb.consolidateFunc(msg, trb.lastUNLOCKED()) 95 | if msg == nil { 96 | trb.m.Unlock() 97 | 98 | return 99 | } 100 | } 101 | 102 | trb.msgs[trb.next] = *msg 103 | 104 | trb.next++ 105 | if trb.next >= len(trb.msgs) { 106 | trb.next = 0 107 | } 108 | 109 | trb.m.Unlock() 110 | } 111 | 112 | // Cap returns the capacity of the ring buffer. 113 | func (trb *RingBuffer) Cap() int { 114 | return len(trb.msgs) 115 | } 116 | 117 | // Last returns the last trace in the ring buffer. 118 | func (trb *RingBuffer) Last() *Msg { 119 | trb.m.Lock() 120 | last := trb.lastUNLOCKED() 121 | trb.m.Unlock() 122 | return last 123 | } 124 | 125 | func (trb *RingBuffer) lastUNLOCKED() *Msg { 126 | if len(trb.msgs) <= 0 { 127 | return nil 128 | } 129 | last := trb.next - 1 130 | if last < 0 { 131 | last = len(trb.msgs) - 1 132 | } 133 | return &trb.msgs[last] 134 | } 135 | 136 | // Msgs returns a copy of all the trace messages, as an array with the 137 | // oldest trace message first. 138 | func (trb *RingBuffer) Msgs() []Msg { 139 | rv := make([]Msg, 0, len(trb.msgs)) 140 | 141 | trb.m.Lock() 142 | 143 | i := trb.next 144 | for { 145 | if trb.msgs[i].Title != "" { 146 | rv = append(rv, trb.msgs[i]) 147 | } 148 | 149 | i++ 150 | if i >= len(trb.msgs) { 151 | i = 0 152 | } 153 | 154 | if i == trb.next { // We've returned to the beginning. 155 | break 156 | } 157 | } 158 | 159 | trb.m.Unlock() 160 | 161 | return rv 162 | } 163 | 164 | // MsgsToString formats a []Msg into a pretty string. 165 | // lineSep is usually something like "\n". 166 | // linePrefix is usually something like " ". 167 | func MsgsToString(msgs []Msg, lineSep, linePrefix string) string { 168 | linePrefixRest := lineSep + linePrefix 169 | 170 | var buf bytes.Buffer 171 | 172 | for i := range msgs { 173 | msg := &msgs[i] 174 | 175 | body := "" 176 | bodySep := "" 177 | if msg.Body != nil { 178 | body = string(msg.Body) 179 | bodySep = " " 180 | } 181 | 182 | linePrefixCur := "" 183 | if i > 0 { 184 | linePrefixCur = linePrefixRest 185 | } 186 | 187 | if msg.Repeats > 1 { 188 | fmt.Fprintf(&buf, "%s%s (%dx)%s%s", 189 | linePrefixCur, msg.Title, msg.Repeats, bodySep, body) 190 | } else { 191 | fmt.Fprintf(&buf, "%s%s%s%s", 192 | linePrefixCur, msg.Title, bodySep, body) 193 | } 194 | } 195 | 196 | return buf.String() 197 | } 198 | -------------------------------------------------------------------------------- /trace/trace_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2014 Couchbase, Inc. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the 4 | // License. You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, 7 | // software distributed under the License is distributed on an "AS 8 | // IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 9 | // express or implied. See the License for the specific language 10 | // governing permissions and limitations under the License. 11 | 12 | package trace 13 | 14 | import ( 15 | "reflect" 16 | "testing" 17 | ) 18 | 19 | func TestTrace(t *testing.T) { 20 | r := NewRingBuffer(2, nil) 21 | if r.Cap() != 2 { 22 | t.Errorf("expected 2") 23 | } 24 | 25 | m := r.Msgs() 26 | exp := []Msg{} 27 | if !reflect.DeepEqual(m, exp) { 28 | t.Errorf("expected %#v, got %#v", exp, m) 29 | } 30 | 31 | r.Add("hi", nil) 32 | m = r.Msgs() 33 | exp = []Msg{ 34 | Msg{"hi", nil, 1}, 35 | } 36 | if !reflect.DeepEqual(m, exp) { 37 | t.Errorf("expected %#v, got %#v", exp, m) 38 | } 39 | 40 | r.Add("bye", nil) 41 | m = r.Msgs() 42 | exp = []Msg{ 43 | Msg{"hi", nil, 1}, 44 | Msg{"bye", nil, 1}, 45 | } 46 | if !reflect.DeepEqual(m, exp) { 47 | t.Errorf("expected %#v, got %#v", exp, m) 48 | } 49 | 50 | r.Add("buh", nil) 51 | m = r.Msgs() 52 | exp = []Msg{ 53 | Msg{"bye", nil, 1}, 54 | Msg{"buh", nil, 1}, 55 | } 56 | if !reflect.DeepEqual(m, exp) { 57 | t.Errorf("expected %#v, got %#v", exp, m) 58 | } 59 | 60 | r.Add("buh", nil) 61 | m = r.Msgs() 62 | exp = []Msg{ 63 | Msg{"buh", nil, 1}, 64 | Msg{"buh", nil, 1}, 65 | } 66 | if !reflect.DeepEqual(m, exp) { 67 | t.Errorf("expected %#v, got %#v", exp, m) 68 | } 69 | 70 | if !reflect.DeepEqual(r.Last(), &Msg{"buh", nil, 1}) { 71 | t.Errorf("expected last to be buh") 72 | } 73 | 74 | s := MsgsToString(r.Msgs(), "\n", "") 75 | exps := "buh\nbuh" 76 | if s != exps { 77 | t.Errorf("expected string %q, got %q", exps, s) 78 | } 79 | 80 | s = MsgsToString(r.Msgs(), "\n", "foo") 81 | exps = "buh\nfoobuh" 82 | if s != exps { 83 | t.Errorf("expected string %q, got %q", exps, s) 84 | } 85 | } 86 | 87 | func TestTraceConsolidateByTitle(t *testing.T) { 88 | r := NewRingBuffer(2, ConsolidateByTitle) 89 | if r.Cap() != 2 { 90 | t.Errorf("expected 2") 91 | } 92 | 93 | m := r.Msgs() 94 | exp := []Msg{} 95 | if !reflect.DeepEqual(m, exp) { 96 | t.Errorf("expected %#v, got %#v", exp, m) 97 | } 98 | 99 | r.Add("hi", nil) 100 | m = r.Msgs() 101 | exp = []Msg{ 102 | Msg{"hi", nil, 1}, 103 | } 104 | if !reflect.DeepEqual(m, exp) { 105 | t.Errorf("expected %#v, got %#v", exp, m) 106 | } 107 | 108 | r.Add("hi", nil) 109 | m = r.Msgs() 110 | exp = []Msg{ 111 | Msg{"hi", nil, 2}, 112 | } 113 | if !reflect.DeepEqual(m, exp) { 114 | t.Errorf("expected %#v, got %#v", exp, m) 115 | } 116 | 117 | r.Add("hi", nil) 118 | m = r.Msgs() 119 | exp = []Msg{ 120 | Msg{"hi", nil, 3}, 121 | } 122 | if !reflect.DeepEqual(m, exp) { 123 | t.Errorf("expected %#v, got %#v", exp, m) 124 | } 125 | 126 | r.Add("bye", nil) 127 | m = r.Msgs() 128 | exp = []Msg{ 129 | Msg{"hi", nil, 3}, 130 | Msg{"bye", nil, 1}, 131 | } 132 | if !reflect.DeepEqual(m, exp) { 133 | t.Errorf("expected %#v, got %#v", exp, m) 134 | } 135 | 136 | r.Add("bye", nil) 137 | m = r.Msgs() 138 | exp = []Msg{ 139 | Msg{"hi", nil, 3}, 140 | Msg{"bye", nil, 2}, 141 | } 142 | if !reflect.DeepEqual(m, exp) { 143 | t.Errorf("expected %#v, got %#v", exp, m) 144 | } 145 | 146 | r.Add("buh", nil) 147 | m = r.Msgs() 148 | exp = []Msg{ 149 | Msg{"bye", nil, 2}, 150 | Msg{"buh", nil, 1}, 151 | } 152 | if !reflect.DeepEqual(m, exp) { 153 | t.Errorf("expected %#v, got %#v", exp, m) 154 | } 155 | 156 | r.Add("buh", nil) 157 | m = r.Msgs() 158 | exp = []Msg{ 159 | Msg{"bye", nil, 2}, 160 | Msg{"buh", nil, 2}, 161 | } 162 | if !reflect.DeepEqual(m, exp) { 163 | t.Errorf("expected %#v, got %#v", exp, m) 164 | } 165 | 166 | if !reflect.DeepEqual(r.Last(), &Msg{"buh", nil, 2}) { 167 | t.Errorf("expected last to be buh") 168 | } 169 | 170 | s := MsgsToString(r.Msgs(), "\n", "") 171 | exps := "bye (2x)\nbuh (2x)" 172 | if s != exps { 173 | t.Errorf("expected string %q, got %q", exps, s) 174 | } 175 | 176 | s = MsgsToString(r.Msgs(), "\n", "prefix") 177 | exps = "bye (2x)\nprefixbuh (2x)" 178 | if s != exps { 179 | t.Errorf("expected string %q, got %q", exps, s) 180 | } 181 | } 182 | -------------------------------------------------------------------------------- /upr.go: -------------------------------------------------------------------------------- 1 | package couchbase 2 | 3 | import ( 4 | "log" 5 | "sync" 6 | "time" 7 | 8 | "fmt" 9 | "github.com/couchbase/gomemcached" 10 | "github.com/couchbase/gomemcached/client" 11 | "github.com/couchbase/goutils/logging" 12 | ) 13 | 14 | // A UprFeed streams mutation events from a bucket. 15 | // 16 | // Events from the bucket can be read from the channel 'C'. Remember 17 | // to call Close() on it when you're done, unless its channel has 18 | // closed itself already. 19 | type UprFeed struct { 20 | C <-chan *memcached.UprEvent 21 | 22 | bucket *Bucket 23 | nodeFeeds map[string]*FeedInfo // The UPR feeds of the individual nodes 24 | output chan *memcached.UprEvent // Same as C but writeably-typed 25 | outputClosed bool 26 | quit chan bool 27 | name string // name of this UPR feed 28 | sequence uint32 // sequence number for this feed 29 | connected bool 30 | killSwitch chan bool 31 | closing bool 32 | wg sync.WaitGroup 33 | dcp_buffer_size uint32 34 | data_chan_size int 35 | } 36 | 37 | // UprFeed from a single connection 38 | type FeedInfo struct { 39 | uprFeed *memcached.UprFeed // UPR feed handle 40 | host string // hostname 41 | connected bool // connected 42 | quit chan bool // quit channel 43 | } 44 | 45 | type FailoverLog map[uint16]memcached.FailoverLog 46 | 47 | // GetFailoverLogs, get the failover logs for a set of vbucket ids 48 | func (b *Bucket) GetFailoverLogs(vBuckets []uint16) (FailoverLog, error) { 49 | 50 | // map vbids to their corresponding hosts 51 | vbHostList := make(map[string][]uint16) 52 | vbm := b.VBServerMap() 53 | if len(vbm.VBucketMap) < len(vBuckets) { 54 | return nil, fmt.Errorf("vbmap smaller than vbucket list: %v vs. %v", 55 | vbm.VBucketMap, vBuckets) 56 | } 57 | 58 | for _, vb := range vBuckets { 59 | masterID := vbm.VBucketMap[vb][0] 60 | master := b.getMasterNode(masterID) 61 | if master == "" { 62 | return nil, fmt.Errorf("No master found for vb %d", vb) 63 | } 64 | 65 | vbList := vbHostList[master] 66 | if vbList == nil { 67 | vbList = make([]uint16, 0) 68 | } 69 | vbList = append(vbList, vb) 70 | vbHostList[master] = vbList 71 | } 72 | 73 | failoverLogMap := make(FailoverLog) 74 | for _, serverConn := range b.getConnPools(false /* not already locked */) { 75 | 76 | vbList := vbHostList[serverConn.host] 77 | if vbList == nil { 78 | continue 79 | } 80 | 81 | mc, err := serverConn.Get() 82 | if err != nil { 83 | logging.Infof("No Free connections for vblist %v", vbList) 84 | return nil, fmt.Errorf("No Free connections for host %s", 85 | serverConn.host) 86 | 87 | } 88 | // close the connection so that it doesn't get reused for upr data 89 | // connection 90 | defer mc.Close() 91 | mc.SetDeadline(getDeadline(time.Time{}, DefaultTimeout)) 92 | failoverlogs, err := mc.UprGetFailoverLog(vbList) 93 | if err != nil { 94 | return nil, fmt.Errorf("Error getting failover log %s host %s", 95 | err.Error(), serverConn.host) 96 | 97 | } 98 | 99 | for vb, log := range failoverlogs { 100 | failoverLogMap[vb] = *log 101 | } 102 | } 103 | 104 | return failoverLogMap, nil 105 | } 106 | 107 | func (b *Bucket) StartUprFeed(name string, sequence uint32) (*UprFeed, error) { 108 | return b.StartUprFeedWithConfig(name, sequence, 10, DEFAULT_WINDOW_SIZE) 109 | } 110 | 111 | // StartUprFeed creates and starts a new Upr feed 112 | // No data will be sent on the channel unless vbuckets streams are requested 113 | func (b *Bucket) StartUprFeedWithConfig(name string, sequence uint32, data_chan_size int, dcp_buffer_size uint32) (*UprFeed, error) { 114 | 115 | feed := &UprFeed{ 116 | bucket: b, 117 | output: make(chan *memcached.UprEvent, data_chan_size), 118 | quit: make(chan bool), 119 | nodeFeeds: make(map[string]*FeedInfo, 0), 120 | name: name, 121 | sequence: sequence, 122 | killSwitch: make(chan bool), 123 | dcp_buffer_size: dcp_buffer_size, 124 | data_chan_size: data_chan_size, 125 | } 126 | 127 | err := feed.connectToNodes() 128 | if err != nil { 129 | return nil, fmt.Errorf("Cannot connect to bucket %s", err.Error()) 130 | } 131 | feed.connected = true 132 | go feed.run() 133 | 134 | feed.C = feed.output 135 | return feed, nil 136 | } 137 | 138 | // UprRequestStream starts a stream for a vb on a feed 139 | func (feed *UprFeed) UprRequestStream(vb uint16, opaque uint16, flags uint32, 140 | vuuid, startSequence, endSequence, snapStart, snapEnd uint64) error { 141 | 142 | defer func() { 143 | if r := recover(); r != nil { 144 | log.Panicf("Panic in UprRequestStream. Feed %v Bucket %v", feed, feed.bucket) 145 | } 146 | }() 147 | 148 | vbm := feed.bucket.VBServerMap() 149 | if len(vbm.VBucketMap) < int(vb) { 150 | return fmt.Errorf("vbmap smaller than vbucket list: %v vs. %v", 151 | vb, vbm.VBucketMap) 152 | } 153 | 154 | if int(vb) >= len(vbm.VBucketMap) { 155 | return fmt.Errorf("Invalid vbucket id %d", vb) 156 | } 157 | 158 | masterID := vbm.VBucketMap[vb][0] 159 | master := feed.bucket.getMasterNode(masterID) 160 | if master == "" { 161 | return fmt.Errorf("Master node not found for vbucket %d", vb) 162 | } 163 | singleFeed := feed.nodeFeeds[master] 164 | if singleFeed == nil { 165 | return fmt.Errorf("UprFeed for this host not found") 166 | } 167 | 168 | if err := singleFeed.uprFeed.UprRequestStream(vb, opaque, flags, 169 | vuuid, startSequence, endSequence, snapStart, snapEnd); err != nil { 170 | return err 171 | } 172 | 173 | return nil 174 | } 175 | 176 | // UprCloseStream ends a vbucket stream. 177 | func (feed *UprFeed) UprCloseStream(vb, opaqueMSB uint16) error { 178 | 179 | defer func() { 180 | if r := recover(); r != nil { 181 | log.Panicf("Panic in UprCloseStream. Feed %v Bucket %v ", feed, feed.bucket) 182 | } 183 | }() 184 | 185 | vbm := feed.bucket.VBServerMap() 186 | if len(vbm.VBucketMap) < int(vb) { 187 | return fmt.Errorf("vbmap smaller than vbucket list: %v vs. %v", 188 | vb, vbm.VBucketMap) 189 | } 190 | 191 | if int(vb) >= len(vbm.VBucketMap) { 192 | return fmt.Errorf("Invalid vbucket id %d", vb) 193 | } 194 | 195 | masterID := vbm.VBucketMap[vb][0] 196 | master := feed.bucket.getMasterNode(masterID) 197 | if master == "" { 198 | return fmt.Errorf("Master node not found for vbucket %d", vb) 199 | } 200 | singleFeed := feed.nodeFeeds[master] 201 | if singleFeed == nil { 202 | return fmt.Errorf("UprFeed for this host not found") 203 | } 204 | 205 | if err := singleFeed.uprFeed.CloseStream(vb, opaqueMSB); err != nil { 206 | return err 207 | } 208 | return nil 209 | } 210 | 211 | // Goroutine that runs the feed 212 | func (feed *UprFeed) run() { 213 | retryInterval := initialRetryInterval 214 | bucketOK := true 215 | for { 216 | // Connect to the UPR feed of each server node: 217 | if bucketOK { 218 | // Run until one of the sub-feeds fails: 219 | select { 220 | case <-feed.killSwitch: 221 | case <-feed.quit: 222 | return 223 | } 224 | //feed.closeNodeFeeds() 225 | retryInterval = initialRetryInterval 226 | } 227 | 228 | if feed.closing == true { 229 | // we have been asked to shut down 230 | return 231 | } 232 | 233 | // On error, try to refresh the bucket in case the list of nodes changed: 234 | logging.Infof("go-couchbase: UPR connection lost; reconnecting to bucket %q in %v", 235 | feed.bucket.Name, retryInterval) 236 | 237 | if err := feed.bucket.Refresh(); err != nil { 238 | // if we fail to refresh the bucket, exit the feed 239 | // MB-14917 240 | logging.Infof("Unable to refresh bucket %s ", err.Error()) 241 | close(feed.output) 242 | feed.outputClosed = true 243 | feed.closeNodeFeeds() 244 | return 245 | } 246 | 247 | // this will only connect to nodes that are not connected or changed 248 | // user will have to reconnect the stream 249 | err := feed.connectToNodes() 250 | if err != nil { 251 | logging.Infof("Unable to connect to nodes..exit ") 252 | close(feed.output) 253 | feed.outputClosed = true 254 | feed.closeNodeFeeds() 255 | return 256 | } 257 | bucketOK = err == nil 258 | 259 | select { 260 | case <-time.After(retryInterval): 261 | case <-feed.quit: 262 | return 263 | } 264 | if retryInterval *= 2; retryInterval > maximumRetryInterval { 265 | retryInterval = maximumRetryInterval 266 | } 267 | } 268 | } 269 | 270 | func (feed *UprFeed) connectToNodes() (err error) { 271 | nodeCount := 0 272 | for _, serverConn := range feed.bucket.getConnPools(false /* not already locked */) { 273 | 274 | // this maybe a reconnection, so check if the connection to the node 275 | // already exists. Connect only if the node is not found in the list 276 | // or connected == false 277 | nodeFeed := feed.nodeFeeds[serverConn.host] 278 | 279 | if nodeFeed != nil && nodeFeed.connected == true { 280 | continue 281 | } 282 | 283 | var singleFeed *memcached.UprFeed 284 | var name string 285 | if feed.name == "" { 286 | name = "DefaultUprClient" 287 | } else { 288 | name = feed.name 289 | } 290 | singleFeed, err = serverConn.StartUprFeed(name, feed.sequence, feed.dcp_buffer_size, feed.data_chan_size) 291 | if err != nil { 292 | logging.Errorf("go-couchbase: Error connecting to upr feed of %s: %v", serverConn.host, err) 293 | feed.closeNodeFeeds() 294 | return 295 | } 296 | // add the node to the connection map 297 | feedInfo := &FeedInfo{ 298 | uprFeed: singleFeed, 299 | connected: true, 300 | host: serverConn.host, 301 | quit: make(chan bool), 302 | } 303 | feed.nodeFeeds[serverConn.host] = feedInfo 304 | go feed.forwardUprEvents(feedInfo, feed.killSwitch, serverConn.host) 305 | feed.wg.Add(1) 306 | nodeCount++ 307 | } 308 | if nodeCount == 0 { 309 | return fmt.Errorf("No connection to bucket") 310 | } 311 | 312 | return nil 313 | } 314 | 315 | // Goroutine that forwards Upr events from a single node's feed to the aggregate feed. 316 | func (feed *UprFeed) forwardUprEvents(nodeFeed *FeedInfo, killSwitch chan bool, host string) { 317 | singleFeed := nodeFeed.uprFeed 318 | 319 | defer func() { 320 | feed.wg.Done() 321 | if r := recover(); r != nil { 322 | //if feed is not closing, re-throw the panic 323 | if feed.outputClosed != true && feed.closing != true { 324 | panic(r) 325 | } else { 326 | logging.Errorf("Panic is recovered. Since feed is closed, exit gracefully") 327 | 328 | } 329 | } 330 | }() 331 | 332 | for { 333 | select { 334 | case <-nodeFeed.quit: 335 | nodeFeed.connected = false 336 | return 337 | 338 | case event, ok := <-singleFeed.C: 339 | if !ok { 340 | if singleFeed.Error != nil { 341 | logging.Errorf("go-couchbase: Upr feed from %s failed: %v", host, singleFeed.Error) 342 | } 343 | killSwitch <- true 344 | return 345 | } 346 | if feed.outputClosed == true { 347 | // someone closed the node feed 348 | logging.Infof("Node need closed, returning from forwardUprEvent") 349 | return 350 | } 351 | feed.output <- event 352 | if event.Status == gomemcached.NOT_MY_VBUCKET { 353 | logging.Infof(" Got a not my vbucket error !! ") 354 | if err := feed.bucket.Refresh(); err != nil { 355 | logging.Errorf("Unable to refresh bucket %s ", err.Error()) 356 | feed.closeNodeFeeds() 357 | return 358 | } 359 | // this will only connect to nodes that are not connected or changed 360 | // user will have to reconnect the stream 361 | if err := feed.connectToNodes(); err != nil { 362 | logging.Errorf("Unable to connect to nodes %s", err.Error()) 363 | return 364 | } 365 | 366 | } 367 | } 368 | } 369 | } 370 | 371 | func (feed *UprFeed) closeNodeFeeds() { 372 | for _, f := range feed.nodeFeeds { 373 | logging.Infof(" Sending close to forwardUprEvent ") 374 | close(f.quit) 375 | f.uprFeed.Close() 376 | } 377 | feed.nodeFeeds = nil 378 | } 379 | 380 | // Close a Upr feed. 381 | func (feed *UprFeed) Close() error { 382 | select { 383 | case <-feed.quit: 384 | return nil 385 | default: 386 | } 387 | 388 | feed.closing = true 389 | feed.closeNodeFeeds() 390 | close(feed.quit) 391 | 392 | feed.wg.Wait() 393 | if feed.outputClosed == false { 394 | feed.outputClosed = true 395 | close(feed.output) 396 | } 397 | 398 | return nil 399 | } 400 | -------------------------------------------------------------------------------- /users.go: -------------------------------------------------------------------------------- 1 | package couchbase 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | ) 7 | 8 | type User struct { 9 | Name string 10 | Id string 11 | Domain string 12 | Roles []Role 13 | } 14 | 15 | type Role struct { 16 | Role string 17 | BucketName string `json:"bucket_name"` 18 | ScopeName string `json:"scope_name"` 19 | CollectionName string `json:"collection_name"` 20 | } 21 | 22 | // Sample: 23 | // {"role":"admin","name":"Admin","desc":"Can manage ALL cluster features including security.","ce":true} 24 | // {"role":"query_select","bucket_name":"*","name":"Query Select","desc":"Can execute SELECT statement on bucket to retrieve data"} 25 | type RoleDescription struct { 26 | Role string 27 | Name string 28 | Desc string 29 | Ce bool 30 | BucketName string `json:"bucket_name"` 31 | } 32 | 33 | // Return user-role data, as parsed JSON. 34 | // Sample: 35 | // [{"id":"ivanivanov","name":"Ivan Ivanov","roles":[{"role":"cluster_admin"},{"bucket_name":"default","role":"bucket_admin"}]}, 36 | // {"id":"petrpetrov","name":"Petr Petrov","roles":[{"role":"replication_admin"}]}] 37 | func (c *Client) GetUserRoles() ([]interface{}, error) { 38 | ret := make([]interface{}, 0, 1) 39 | err := c.parseURLResponse("/settings/rbac/users", &ret) 40 | if err != nil { 41 | return nil, err 42 | } 43 | 44 | // Get the configured administrator. 45 | // Expected result: {"port":8091,"username":"Administrator"} 46 | adminInfo := make(map[string]interface{}, 2) 47 | err = c.parseURLResponse("/settings/web", &adminInfo) 48 | if err != nil { 49 | return nil, err 50 | } 51 | 52 | // Create a special entry for the configured administrator. 53 | adminResult := map[string]interface{}{ 54 | "name": adminInfo["username"], 55 | "id": adminInfo["username"], 56 | "domain": "ns_server", 57 | "roles": []interface{}{ 58 | map[string]interface{}{ 59 | "role": "admin", 60 | }, 61 | }, 62 | } 63 | 64 | // Add the configured administrator to the list of results. 65 | ret = append(ret, adminResult) 66 | 67 | return ret, nil 68 | } 69 | 70 | func (c *Client) GetUserInfoAll() ([]User, error) { 71 | ret := make([]User, 0, 16) 72 | err := c.parseURLResponse("/settings/rbac/users", &ret) 73 | if err != nil { 74 | return nil, err 75 | } 76 | return ret, nil 77 | } 78 | 79 | func rolesToParamFormat(roles []Role) string { 80 | var buffer bytes.Buffer 81 | for i, role := range roles { 82 | if i > 0 { 83 | buffer.WriteString(",") 84 | } 85 | buffer.WriteString(role.Role) 86 | if role.BucketName != "" { 87 | buffer.WriteString("[") 88 | buffer.WriteString(role.BucketName) 89 | buffer.WriteString("]") 90 | } 91 | } 92 | return buffer.String() 93 | } 94 | 95 | func (c *Client) PutUserInfo(u *User) error { 96 | params := map[string]interface{}{ 97 | "name": u.Name, 98 | "roles": rolesToParamFormat(u.Roles), 99 | } 100 | var target string 101 | switch u.Domain { 102 | case "external": 103 | target = "/settings/rbac/users/" + u.Id 104 | case "local": 105 | target = "/settings/rbac/users/local/" + u.Id 106 | default: 107 | return fmt.Errorf("Unknown user type: %s", u.Domain) 108 | } 109 | var ret string // PUT returns an empty string. We ignore it. 110 | err := c.parsePutURLResponse(target, params, &ret) 111 | return err 112 | } 113 | 114 | func (c *Client) GetRolesAll() ([]RoleDescription, error) { 115 | ret := make([]RoleDescription, 0, 32) 116 | err := c.parseURLResponse("/settings/rbac/roles", &ret) 117 | if err != nil { 118 | return nil, err 119 | } 120 | return ret, nil 121 | } 122 | -------------------------------------------------------------------------------- /users_test.go: -------------------------------------------------------------------------------- 1 | package couchbase 2 | 3 | import ( 4 | "encoding/json" 5 | "reflect" 6 | "testing" 7 | ) 8 | 9 | func TestGetRolesAll(t *testing.T) { 10 | t.Skip("Skip this test, as it needs a live connection") 11 | 12 | client, err := ConnectWithAuthCreds("http://localhost:8091", "Administrator", "password") 13 | if err != nil { 14 | t.Fatalf("Unable to connect: %v", err) 15 | } 16 | roles, err := client.GetRolesAll() 17 | if err != nil { 18 | t.Fatalf("Unable to get roles: %v", err) 19 | } 20 | 21 | cases := make(map[string]RoleDescription, 2) 22 | cases["admin"] = RoleDescription{Role: "admin", Name: "Full Admin", 23 | Desc: "Can manage all cluster features (including security). This user can access the web console. This user can read and write all data.", 24 | Ce: true} 25 | cases["query_select"] = RoleDescription{Role: "query_select", BucketName: "*", Name: "Query Select", 26 | Desc: "Can execute a SELECT statement on a given bucket to retrieve data. This user can access the web console and can read data, but not write it."} 27 | for roleName, expectedValue := range cases { 28 | foundThisRole := false 29 | for _, foundValue := range roles { 30 | if foundValue.Role == roleName { 31 | foundThisRole = true 32 | if expectedValue == foundValue { 33 | break // OK for this role 34 | } 35 | t.Fatalf("Unexpected value for role %s. Expected %+v, got %+v", roleName, expectedValue, foundValue) 36 | } 37 | } 38 | if !foundThisRole { 39 | t.Fatalf("Could not find role %s", roleName) 40 | } 41 | } 42 | } 43 | 44 | func TestUserUnmarshal(t *testing.T) { 45 | text := `[{"id":"ivanivanov","name":"Ivan Ivanov","roles":[{"role":"cluster_admin"},{"bucket_name":"default","role":"bucket_admin"}]}, 46 | {"id":"petrpetrov","name":"Petr Petrov","roles":[{"role":"replication_admin"}]}]` 47 | users := make([]User, 0) 48 | 49 | err := json.Unmarshal([]byte(text), &users) 50 | if err != nil { 51 | t.Fatalf("Unable to unmarshal: %v", err) 52 | } 53 | 54 | expected := []User{ 55 | User{Id: "ivanivanov", Name: "Ivan Ivanov", Roles: []Role{ 56 | Role{Role: "cluster_admin"}, 57 | Role{Role: "bucket_admin", BucketName: "default"}}}, 58 | User{Id: "petrpetrov", Name: "Petr Petrov", Roles: []Role{ 59 | Role{Role: "replication_admin"}}}, 60 | } 61 | if !reflect.DeepEqual(users, expected) { 62 | t.Fatalf("Unexpected unmarshalled result. Expected %v, got %v.", expected, users) 63 | } 64 | 65 | ivanRoles := rolesToParamFormat(users[0].Roles) 66 | ivanRolesExpected := "cluster_admin,bucket_admin[default]" 67 | if ivanRolesExpected != ivanRoles { 68 | t.Errorf("Unexpected param for Ivan. Expected %v, got %v.", ivanRolesExpected, ivanRoles) 69 | } 70 | petrRoles := rolesToParamFormat(users[1].Roles) 71 | petrRolesExpected := "replication_admin" 72 | if petrRolesExpected != petrRoles { 73 | t.Errorf("Unexpected param for Petr. Expected %v, got %v.", petrRolesExpected, petrRoles) 74 | } 75 | 76 | } 77 | -------------------------------------------------------------------------------- /util.go: -------------------------------------------------------------------------------- 1 | package couchbase 2 | 3 | import ( 4 | "fmt" 5 | "net/url" 6 | "strings" 7 | ) 8 | 9 | // CleanupHost returns the hostname with the given suffix removed. 10 | func CleanupHost(h, commonSuffix string) string { 11 | if strings.HasSuffix(h, commonSuffix) { 12 | return h[:len(h)-len(commonSuffix)] 13 | } 14 | return h 15 | } 16 | 17 | // FindCommonSuffix returns the longest common suffix from the given 18 | // strings. 19 | func FindCommonSuffix(input []string) string { 20 | rv := "" 21 | if len(input) < 2 { 22 | return "" 23 | } 24 | from := input 25 | for i := len(input[0]); i > 0; i-- { 26 | common := true 27 | suffix := input[0][i:] 28 | for _, s := range from { 29 | if !strings.HasSuffix(s, suffix) { 30 | common = false 31 | break 32 | } 33 | } 34 | if common { 35 | rv = suffix 36 | } 37 | } 38 | return rv 39 | } 40 | 41 | // ParseURL is a wrapper around url.Parse with some sanity-checking 42 | func ParseURL(urlStr string) (result *url.URL, err error) { 43 | result, err = url.Parse(urlStr) 44 | if result != nil && result.Scheme == "" { 45 | result = nil 46 | err = fmt.Errorf("invalid URL <%s>", urlStr) 47 | } 48 | return 49 | } 50 | -------------------------------------------------------------------------------- /util/viewmgmt.go: -------------------------------------------------------------------------------- 1 | // Package couchbaseutil offers some convenience functions for apps 2 | // that use couchbase. 3 | package couchbaseutil 4 | 5 | import ( 6 | "encoding/json" 7 | "log" 8 | "time" 9 | 10 | "github.com/couchbase/go-couchbase" 11 | ) 12 | 13 | // A ViewMarker is stored in your DB to mark a particular view 14 | // version. 15 | type ViewMarker struct { 16 | Version int `json:"version"` 17 | Timestamp time.Time `json:"timestamp"` 18 | Type string `json:"type"` 19 | } 20 | 21 | // UpdateView installs or updates a view. 22 | // 23 | // This creates a document that tracks the version of design document 24 | // in couchbase and updates it if it's behind the version specified. 25 | // 26 | // A ViewMarker is stored with a type of "viewmarker" under the key 27 | // specified by `markerKey` to keep up with the view info. 28 | func UpdateView(d *couchbase.Bucket, 29 | ddocName, markerKey, ddocBody string, version int) error { 30 | 31 | marker := ViewMarker{} 32 | err := d.Get(markerKey, &marker) 33 | if err != nil { 34 | log.Printf("Error checking view version: %v", err) 35 | } 36 | if marker.Version < version { 37 | log.Printf("Installing new version of views (old version=%v)", 38 | marker.Version) 39 | doc := json.RawMessage([]byte(ddocBody)) 40 | err = d.PutDDoc(ddocName, &doc) 41 | if err != nil { 42 | return err 43 | } 44 | marker.Version = version 45 | marker.Timestamp = time.Now().UTC() 46 | marker.Type = "viewmarker" 47 | 48 | return d.Set(markerKey, 0, &marker) 49 | } 50 | return nil 51 | } 52 | -------------------------------------------------------------------------------- /util_test.go: -------------------------------------------------------------------------------- 1 | package couchbase 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestCleanupHost(t *testing.T) { 8 | tests := []struct { 9 | name, full, suffix, exp string 10 | }{ 11 | {"empty", "", "", ""}, 12 | {"empty suffix", "aprefix", "", "aprefix"}, 13 | {"empty host", "", "asuffix", ""}, 14 | {"matched suffix", "server1.example.com:11210", ".example.com:11210", "server1"}, 15 | } 16 | 17 | for _, test := range tests { 18 | got := CleanupHost(test.full, test.suffix) 19 | if got != test.exp { 20 | t.Errorf("Error on %v: got %q, expected %q", 21 | test.name, got, test.exp) 22 | } 23 | } 24 | } 25 | 26 | func TestFindCommonSuffix(t *testing.T) { 27 | tests := []struct { 28 | name, exp string 29 | strings []string 30 | }{ 31 | {"empty", "", nil}, 32 | {"one", "", []string{"blah"}}, 33 | {"two", ".com", []string{"blah.com", "foo.com"}}, 34 | } 35 | 36 | for _, test := range tests { 37 | got := FindCommonSuffix(test.strings) 38 | if got != test.exp { 39 | t.Errorf("Error on %v: got %q, expected %q", 40 | test.name, got, test.exp) 41 | } 42 | } 43 | } 44 | 45 | func TestParseURL(t *testing.T) { 46 | tests := []struct { 47 | in string 48 | works bool 49 | }{ 50 | {"", false}, 51 | {"http://whatever/", true}, 52 | {"http://%/", false}, 53 | } 54 | 55 | for _, test := range tests { 56 | got, err := ParseURL(test.in) 57 | switch { 58 | case err == nil && test.works, 59 | !(err == nil || test.works): 60 | case err == nil && !test.works: 61 | t.Errorf("Expected failure on %v, got %v", test.in, got) 62 | case test.works && err != nil: 63 | t.Errorf("Expected success on %v, got %v", test.in, err) 64 | } 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /vbmap.go: -------------------------------------------------------------------------------- 1 | package couchbase 2 | 3 | var crc32tab = []uint32{ 4 | 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 5 | 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, 6 | 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 7 | 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 8 | 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, 9 | 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 10 | 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 11 | 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, 12 | 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, 13 | 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 14 | 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 15 | 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 16 | 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 17 | 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, 18 | 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 19 | 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 20 | 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 21 | 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, 22 | 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 23 | 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, 24 | 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 25 | 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 26 | 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, 27 | 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, 28 | 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 29 | 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 30 | 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, 31 | 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 32 | 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, 33 | 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, 34 | 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 35 | 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 36 | 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 37 | 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 38 | 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, 39 | 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 40 | 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 41 | 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, 42 | 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, 43 | 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 44 | 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 45 | 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 46 | 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 47 | 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, 48 | 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, 49 | 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 50 | 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 51 | 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, 52 | 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 53 | 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, 54 | 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 55 | 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 56 | 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, 57 | 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, 58 | 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 59 | 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 60 | 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 61 | 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 62 | 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, 63 | 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, 64 | 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 65 | 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, 66 | 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, 67 | 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d} 68 | 69 | // VBHash finds the vbucket for the given key. 70 | func (b *Bucket) VBHash(key string) uint32 { 71 | crc := uint32(0xffffffff) 72 | for x := 0; x < len(key); x++ { 73 | crc = (crc >> 8) ^ crc32tab[(uint64(crc)^uint64(key[x]))&0xff] 74 | } 75 | vbm := b.VBServerMap() 76 | return ((^crc) >> 16) & 0x7fff & (uint32(len(vbm.VBucketMap)) - 1) 77 | } 78 | -------------------------------------------------------------------------------- /vbmap_test.go: -------------------------------------------------------------------------------- 1 | package couchbase 2 | 3 | import ( 4 | "testing" 5 | "unsafe" 6 | ) 7 | 8 | func testBucket() Bucket { 9 | b := Bucket{vBucketServerMap: unsafe.Pointer(&VBucketServerMap{ 10 | VBucketMap: make([][]int, 256), 11 | })} 12 | return b 13 | } 14 | 15 | /* 16 | key: k0 master: 10.1.7.1:11210 vBucketId: 9 couchApiBase: http://10.1.7.1:8092/default replicas: 10.1.7.2:11210 17 | key: k1 master: 10.1.7.1:11210 vBucketId: 14 couchApiBase: http://10.1.7.1:8092/default replicas: 10.1.7.3:11210 18 | key: k2 master: 10.1.7.1:11210 vBucketId: 7 couchApiBase: http://10.1.7.1:8092/default replicas: 10.1.7.2:11210 19 | key: k3 master: 10.1.7.1:11210 vBucketId: 0 couchApiBase: http://10.1.7.1:8092/default replicas: 10.1.7.2:11210 20 | key: k4 master: 10.1.7.2:11210 vBucketId: 100 couchApiBase: http://10.1.7.2:8092/default replicas: 10.1.7.5:11210 21 | key: k5 master: 10.1.7.2:11210 vBucketId: 99 couchApiBase: http://10.1.7.2:8092/default replicas: 10.1.7.5:11210 22 | */ 23 | 24 | func TestVBHash(t *testing.T) { 25 | b := testBucket() 26 | m := map[string]uint32{ 27 | "k0": 9, 28 | "k1": 14, 29 | "k2": 7, 30 | "k3": 0, 31 | "k4": 100, 32 | "k5": 99, 33 | } 34 | 35 | for k, v := range m { 36 | assert(t, k, b.VBHash(k), v) 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /views.go: -------------------------------------------------------------------------------- 1 | package couchbase 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "fmt" 7 | "io/ioutil" 8 | "math/rand" 9 | "net/http" 10 | "net/url" 11 | "time" 12 | ) 13 | 14 | // ViewRow represents a single result from a view. 15 | // 16 | // Doc is present only if include_docs was set on the request. 17 | type ViewRow struct { 18 | ID string 19 | Key interface{} 20 | Value interface{} 21 | Doc *interface{} 22 | } 23 | 24 | // A ViewError is a node-specific error indicating a partial failure 25 | // within a view result. 26 | type ViewError struct { 27 | From string 28 | Reason string 29 | } 30 | 31 | func (ve ViewError) Error() string { 32 | return "Node: " + ve.From + ", reason: " + ve.Reason 33 | } 34 | 35 | // ViewResult holds the entire result set from a view request, 36 | // including the rows and the errors. 37 | type ViewResult struct { 38 | TotalRows int `json:"total_rows"` 39 | Rows []ViewRow 40 | Errors []ViewError 41 | } 42 | 43 | func (b *Bucket) randomBaseURL() (*url.URL, error) { 44 | nodes := b.HealthyNodes() 45 | if len(nodes) == 0 { 46 | return nil, errors.New("no available couch rest URLs") 47 | } 48 | nodeNo := rand.Intn(len(nodes)) 49 | node := nodes[nodeNo] 50 | 51 | b.RLock() 52 | name := b.Name 53 | pool := b.pool 54 | b.RUnlock() 55 | 56 | u, err := ParseURL(node.CouchAPIBase) 57 | if err != nil { 58 | return nil, fmt.Errorf("config error: Bucket %q node #%d CouchAPIBase=%q: %v", 59 | name, nodeNo, node.CouchAPIBase, err) 60 | } else if pool != nil { 61 | u.User = pool.client.BaseURL.User 62 | } 63 | return u, err 64 | } 65 | 66 | const START_NODE_ID = -1 67 | 68 | func (b *Bucket) randomNextURL(lastNode int) (*url.URL, int, error) { 69 | nodes := b.HealthyNodes() 70 | if len(nodes) == 0 { 71 | return nil, -1, errors.New("no available couch rest URLs") 72 | } 73 | 74 | var nodeNo int 75 | if lastNode == START_NODE_ID || lastNode >= len(nodes) { 76 | // randomly select a node if the value of lastNode is invalid 77 | nodeNo = rand.Intn(len(nodes)) 78 | } else { 79 | // wrap around the node list 80 | nodeNo = (lastNode + 1) % len(nodes) 81 | } 82 | 83 | b.RLock() 84 | name := b.Name 85 | pool := b.pool 86 | b.RUnlock() 87 | 88 | node := nodes[nodeNo] 89 | u, err := ParseURL(node.CouchAPIBase) 90 | if err != nil { 91 | return nil, -1, fmt.Errorf("config error: Bucket %q node #%d CouchAPIBase=%q: %v", 92 | name, nodeNo, node.CouchAPIBase, err) 93 | } else if pool != nil { 94 | u.User = pool.client.BaseURL.User 95 | } 96 | return u, nodeNo, err 97 | } 98 | 99 | // DocID is the document ID type for the startkey_docid parameter in 100 | // views. 101 | type DocID string 102 | 103 | func qParam(k, v string) string { 104 | format := `"%s"` 105 | switch k { 106 | case "startkey_docid", "endkey_docid", "stale": 107 | format = "%s" 108 | } 109 | return fmt.Sprintf(format, v) 110 | } 111 | 112 | // ViewURL constructs a URL for a view with the given ddoc, view name, 113 | // and parameters. 114 | func (b *Bucket) ViewURL(ddoc, name string, 115 | params map[string]interface{}) (string, error) { 116 | u, err := b.randomBaseURL() 117 | if err != nil { 118 | return "", err 119 | } 120 | 121 | values := url.Values{} 122 | for k, v := range params { 123 | switch t := v.(type) { 124 | case DocID: 125 | values[k] = []string{string(t)} 126 | case string: 127 | values[k] = []string{qParam(k, t)} 128 | case int: 129 | values[k] = []string{fmt.Sprintf(`%d`, t)} 130 | case bool: 131 | values[k] = []string{fmt.Sprintf(`%v`, t)} 132 | default: 133 | b, err := json.Marshal(v) 134 | if err != nil { 135 | return "", fmt.Errorf("unsupported value-type %T in Query, "+ 136 | "json encoder said %v", t, err) 137 | } 138 | values[k] = []string{fmt.Sprintf(`%v`, string(b))} 139 | } 140 | } 141 | 142 | if ddoc == "" && name == "_all_docs" { 143 | u.Path = fmt.Sprintf("/%s/_all_docs", b.GetName()) 144 | } else { 145 | u.Path = fmt.Sprintf("/%s/_design/%s/_view/%s", b.GetName(), ddoc, name) 146 | } 147 | u.RawQuery = values.Encode() 148 | 149 | return u.String(), nil 150 | } 151 | 152 | // ViewCallback is called for each view invocation. 153 | var ViewCallback func(ddoc, name string, start time.Time, err error) 154 | 155 | // ViewCustom performs a view request that can map row values to a 156 | // custom type. 157 | // 158 | // See the source to View for an example usage. 159 | func (b *Bucket) ViewCustom(ddoc, name string, params map[string]interface{}, 160 | vres interface{}) (err error) { 161 | if SlowServerCallWarningThreshold > 0 { 162 | defer slowLog(time.Now(), "call to ViewCustom(%q, %q)", ddoc, name) 163 | } 164 | 165 | if ViewCallback != nil { 166 | defer func(t time.Time) { ViewCallback(ddoc, name, t, err) }(time.Now()) 167 | } 168 | 169 | u, err := b.ViewURL(ddoc, name, params) 170 | if err != nil { 171 | return err 172 | } 173 | 174 | req, err := http.NewRequest("GET", u, nil) 175 | if err != nil { 176 | return err 177 | } 178 | 179 | ah := b.authHandler(false /* bucket not yet locked */) 180 | maybeAddAuth(req, ah) 181 | 182 | res, err := doHTTPRequest(req) 183 | if err != nil { 184 | return fmt.Errorf("error starting view req at %v: %v", u, err) 185 | } 186 | defer res.Body.Close() 187 | 188 | if res.StatusCode != 200 { 189 | bod := make([]byte, 512) 190 | l, _ := res.Body.Read(bod) 191 | return fmt.Errorf("error executing view req at %v: %v - %s", 192 | u, res.Status, bod[:l]) 193 | } 194 | 195 | body, err := ioutil.ReadAll(res.Body) 196 | if err := json.Unmarshal(body, vres); err != nil { 197 | return nil 198 | } 199 | 200 | return nil 201 | } 202 | 203 | // View executes a view. 204 | // 205 | // The ddoc parameter is just the bare name of your design doc without 206 | // the "_design/" prefix. 207 | // 208 | // Parameters are string keys with values that correspond to couchbase 209 | // view parameters. Primitive should work fairly naturally (booleans, 210 | // ints, strings, etc...) and other values will attempt to be JSON 211 | // marshaled (useful for array indexing on on view keys, for example). 212 | // 213 | // Example: 214 | // 215 | // res, err := couchbase.View("myddoc", "myview", map[string]interface{}{ 216 | // "group_level": 2, 217 | // "startkey_docid": []interface{}{"thing"}, 218 | // "endkey_docid": []interface{}{"thing", map[string]string{}}, 219 | // "stale": false, 220 | // }) 221 | func (b *Bucket) View(ddoc, name string, params map[string]interface{}) (ViewResult, error) { 222 | vres := ViewResult{} 223 | 224 | if err := b.ViewCustom(ddoc, name, params, &vres); err != nil { 225 | //error in accessing views. Retry once after a bucket refresh 226 | b.Refresh() 227 | return vres, b.ViewCustom(ddoc, name, params, &vres) 228 | } else { 229 | return vres, nil 230 | } 231 | } 232 | -------------------------------------------------------------------------------- /views_test.go: -------------------------------------------------------------------------------- 1 | package couchbase 2 | 3 | import ( 4 | "testing" 5 | "unsafe" 6 | ) 7 | 8 | func TestViewError(t *testing.T) { 9 | e := ViewError{"f", "r"} 10 | exp := `Node: f, reason: r` 11 | if e.Error() != exp { 12 | t.Errorf("Expected %q, got %q", exp, e.Error()) 13 | } 14 | } 15 | 16 | func mkNL(in []Node) unsafe.Pointer { 17 | return unsafe.Pointer(&in) 18 | } 19 | 20 | func TestViewURL(t *testing.T) { 21 | // Missing URL 22 | b := Bucket{nodeList: mkNL([]Node{{}})} 23 | v, err := b.ViewURL("a", "b", nil) 24 | if err == nil { 25 | t.Errorf("Expected error on missing URL, got %v", v) 26 | } 27 | 28 | // Invalidish URL 29 | b = Bucket{nodeList: mkNL([]Node{{CouchAPIBase: "::gopher:://localhost:80x92/"}})} 30 | v, err = b.ViewURL("a", "b", nil) 31 | if err == nil { 32 | t.Errorf("Expected error on broken URL, got %v", v) 33 | } 34 | 35 | // Unmarshallable parameter 36 | b = Bucket{nodeList: mkNL([]Node{{CouchAPIBase: "http:://localhost:8092/"}})} 37 | v, err = b.ViewURL("a", "b", 38 | map[string]interface{}{"ch": make(chan bool)}) 39 | if err == nil { 40 | t.Errorf("Expected error on unmarshalable param, got %v", v) 41 | } 42 | 43 | tests := []struct { 44 | ddoc, name string 45 | params map[string]interface{} 46 | exppath string 47 | exp map[string]string 48 | }{ 49 | {"a", "b", 50 | map[string]interface{}{"i": 1, "b": true, "s": "ess"}, 51 | "/x/_design/a/_view/b", 52 | map[string]string{"i": "1", "b": "true", "s": `"ess"`}}, 53 | {"a", "b", 54 | map[string]interface{}{"unk": DocID("le"), "startkey_docid": "ess"}, 55 | "/x/_design/a/_view/b", 56 | map[string]string{"unk": "le", "startkey_docid": "ess"}}, 57 | {"a", "b", 58 | map[string]interface{}{"stale": "update_after"}, 59 | "/x/_design/a/_view/b", 60 | map[string]string{"stale": "update_after"}}, 61 | {"a", "b", 62 | map[string]interface{}{"startkey": []string{"a"}}, 63 | "/x/_design/a/_view/b", 64 | map[string]string{"startkey": `["a"]`}}, 65 | {"", "_all_docs", nil, "/x/_all_docs", map[string]string{}}, 66 | } 67 | 68 | b = Bucket{Name: "x", 69 | nodeList: mkNL([]Node{{CouchAPIBase: "http://localhost:8092/", Status: "healthy"}})} 70 | for _, test := range tests { 71 | us, err := b.ViewURL(test.ddoc, test.name, test.params) 72 | if err != nil { 73 | t.Errorf("Failed on %v: %v", test, err) 74 | continue 75 | } 76 | 77 | u, err := ParseURL(us) 78 | if err != nil { 79 | t.Errorf("Failed on %v", test) 80 | continue 81 | } 82 | 83 | if u.Path != test.exppath { 84 | t.Errorf("Expected path of %v to be %v, got %v", 85 | test, test.exppath, u.Path) 86 | } 87 | 88 | got := u.Query() 89 | 90 | if len(got) != len(test.exp) { 91 | t.Errorf("Expected %v, got %v", test.exp, got) 92 | continue 93 | } 94 | 95 | for k, v := range test.exp { 96 | if len(got[k]) != 1 || got.Get(k) != v { 97 | t.Errorf("Expected param %v to be %q on %v, was %#q", 98 | k, v, test, got[k]) 99 | } 100 | } 101 | } 102 | } 103 | 104 | func TestBadViewParam(t *testing.T) { 105 | b := Bucket{Name: "x", 106 | nodeList: mkNL([]Node{{CouchAPIBase: "http://localhost:8092/", 107 | Status: "healthy"}})} 108 | thing, err := b.ViewURL("adoc", "aview", map[string]interface{}{ 109 | "aparam": make(chan bool), 110 | }) 111 | if err == nil { 112 | t.Errorf("Failed to build a view with a bad param, got %v", 113 | thing) 114 | } 115 | 116 | } 117 | --------------------------------------------------------------------------------