├── .gitignore ├── .travis.yml ├── LICENSE ├── Makefile ├── README.md ├── _conf └── riak.conf ├── _examples └── multi_async.go ├── bucket.go ├── bucket_type.go ├── bucket_type_test.go ├── buf.go ├── build_bench_test.go ├── changeset.go ├── client.go ├── client_benchmode.go ├── client_nobench.go ├── client_norace.go ├── client_race.go ├── client_test.go ├── counter.go ├── counter_test.go ├── delete.go ├── delete_test.go ├── fetch.go ├── fetch_test.go ├── index.go ├── index_test.go ├── object.go ├── object_test.go ├── rpbc ├── descriptor.proto ├── gogo.proto ├── riak.pb.go ├── riak.proto ├── riak_dt.pb.go ├── riak_dt.proto ├── riak_dtpb_test.go ├── riak_kv.pb.go ├── riak_kv.proto ├── riak_kvpb_test.go ├── riak_search.pb.go ├── riak_search.proto ├── riak_searchpb_test.go ├── riak_yokozuna.pb.go ├── riak_yokozuna.proto ├── riak_yokozunapb_test.go └── riakpb_test.go ├── store.go ├── store_test.go └── wercker.yml /.gitignore: -------------------------------------------------------------------------------- 1 | riak/* 2 | *~ 3 | coverage_profile.out 4 | .zedstate -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | go: 3 | - 1.3 4 | services: 5 | - riak 6 | install: 7 | - go get gopkg.in/check.v1 8 | - go get code.google.com/p/gogoprotobuf/proto 9 | - go install ./... 10 | script: 11 | - go test -v -tags 'riak' -check.vv -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (C) 2014 Philip Hofer 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | install: 2 | @go install ./... 3 | 4 | test: 5 | @go test -v 6 | 7 | test-all: 8 | @go get gopkg.in/check.v1 9 | @go test -v -tags 'riak' -check.vv 10 | 11 | ci-deps: 12 | go get gopkg.in/check.v1 13 | go get code.google.com/p/gogoprotobuf/proto 14 | go get -d ./... 15 | 16 | continuous: install test-all 17 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Riak for Go 2 | ================ 3 | [![Build Status](https://travis-ci.org/philhofer/rkive.svg?branch=master)](https://travis-ci.org/philhofer/rkive) 4 | [![docs examples](https://sourcegraph.com/api/repos/github.com/philhofer/rkive/.badges/docs-examples.png)](https://sourcegraph.com/github.com/philhofer/rkive) 5 | 6 | A Riak client for Go(1.3+). 7 | 8 | [![Riak](http://basho.com/design-assets/Riak-Product-Logos/png/riak-logo-color.png)](http://basho.com/riak) 9 | 10 | Complete documentation is available at [godoc](http://godoc.org/github.com/philhofer/rkive). 11 | 12 | ## Status 13 | 14 | Core functionality (fetch, store, secondary indexes, links) is complete, but many advanced features (MapReduce, Yokozuna search) are still on the way. There is no short-term guarantee that the API will remain stable. (We are shooting for a beta release in Nov. '14, followed by a "stable" 1.0 in December.) That being said, this code is already being actively tested in some production applications. 15 | 16 | ## Features 17 | 18 | - Efficient connection pooling and re-dialing. 19 | - Asynchronous batch fetches (see `FetchAsync` and `MultiFetchAsync`). 20 | - Easy RAM-backed caching (see `MakeCache`). 21 | - Transparent sibling conflict resolution. 22 | - Compare-and-swap (see: `PushChangeset`). 23 | - Low per-operation heap allocation overhead. 24 | 25 | 26 | ## Usage 27 | 28 | Satisfy the `Object` interface and you're off to the races. The included 'Blob' object is the simplest possible Object implementation. 29 | 30 | ```go 31 | import ( 32 | "github.com/philhofer/rkive" 33 | ) 34 | 35 | // Open up one connection 36 | riak, err := rkive.DialOne("127.0.0.1:8087", "test-Client-ID") 37 | // handle err... 38 | 39 | blobs := riak.Bucket("blob_bucket") 40 | 41 | 42 | // let's make an object 43 | myBlob := &rkive.Blob{ Data: []byte("Hello World!") } 44 | 45 | // now let's put it in the database 46 | err = blobs.New(myBlob, nil) 47 | // handle err... 48 | 49 | 50 | // since we didn't specify a key, riak assigned 51 | // an available key 52 | fmt.Printf("Our blob key is %s\n", myBlob.Info().Key()) 53 | 54 | // Let's make a change to the object... 55 | myBlob.Data = []byte("MOAR DATA") 56 | 57 | // ... and store it! 58 | err = blobs.Push(myBlob) 59 | // riak.Push will return an error (riakpb.ErrModified) if 60 | // the object has been modified since the last 61 | // time you called New(), Push(), Store(), Fetch(), etc. 62 | // You can retreive the latest copy of the object with: 63 | updated, err := blobs.Update(myBlob) 64 | // handle err 65 | if updated { /* the object has been changed! */ } 66 | 67 | // you can also fetch a new copy 68 | // of the object like so: 69 | 70 | newBlob := &rkive.Blob{} 71 | 72 | err = blobs.Fetch(newBlob, myBlob.Info().Key()) 73 | // handle err... 74 | 75 | ``` 76 | 77 | For more worked examples, check out the `/_examples` folder. 78 | 79 | For automatic code generation that implements `Marshal` and `Unmarshal`, [check this out.](http://github.com/philhofer/msgp) 80 | 81 | If you want to run Riak with `allow_mult=true` (which you should *strongly* consider), take a look 82 | at the `ObjectM` interface, which allows you to specify a `Merge()` operation to be used for 83 | your object when multiple values are encountered on a read or write operation. If you have `allow_mult=true` 84 | and your object does not satisfy the `ObjectM` interface, then read and write operations on a key/bucket 85 | pair with siblings will return a `*ErrMultipleResponses`. (In the degenerate case where 10 consecutive merge 86 | conflict resolution attempts fail, `*ErrMultipleResponses` will be returned for `ObjectM` operations. This is to 87 | avoid "sibling explosion.") 88 | 89 | As an example, here's what the `Blob` type would have to define (internally) if it were 90 | to satisfy the `ObjectM` interface: 91 | 92 | ```go 93 | // NewEmpty should always return a properly-initialized 94 | // zero value for the type in question. The client 95 | // will marshal data into this object and pass it to 96 | // Merge(). 97 | func (b *Blob) NewEmpty() Object { 98 | return &Blob{} 99 | } 100 | 101 | // Merge should make a best-effort attempt to merge 102 | // data from its argument into the method receiver. 103 | // It should be prepared to handle nil/zero values 104 | // for either object. 105 | func (b *Blob) Merge(o Object) { 106 | // you can always type-assert the argument 107 | // to Merge() to be the same type returned 108 | // by NewEmtpy(), which should also be the 109 | // same type as the method receiver 110 | nb := o.(*Blob) 111 | 112 | // we don't really have a good way of handling 113 | // this conflict, so we'll set the content 114 | // to be the combination of both 115 | b.Content = append(b.Content, nb.Content...) 116 | } 117 | ``` 118 | 119 | ## Performance 120 | 121 | This client library was built with performance in mind. 122 | 123 | To run benchmarks, start up Riak locally and run: 124 | `go test -v -tags 'riak' -check.vv -bench .` 125 | 126 | You will need to be running Riak 2.0+ using the configuration file found at `$GOPATH/github.com/philhofer/rkive/_conf/riak.conf`. 127 | 128 | Here's what I get on my MacBook Pro, keeping in mind that time/op and iowait/op vary by +/- 10% on every benchmark run. (Client time per operation is more consistent between benchmark runs.) Memory allocations are perfectly consistent between benchmark runs. 129 | 130 | | Operation | time/op | iowait/op | client time / op | allocs | heap/op | 131 | |:---------:|:-------:|:---------:|:----------------:|:------:|:-------:| 132 | | Fetch | 418598ns| 413398ns | 5200ns | 6 | 550B | 133 | | Store | 782187ns| 775353ns | 6834ns | 5 | 750B | 134 | 135 | 136 | ## Design & TODOs 137 | 138 | This package is focused on using Riak the way it was intended: with `allow_mult` set to `true`. This library will *always* use vclocks when getting and setting values. Additionally, this library adheres strictly to Riak's read-before-write policy. 139 | 140 | Internally, Return-Head is always set to `true`, so every `Push()` or `Store()` operation updates the local object's metadata. Consequently, you can carry out a series of transactions on an object in parallel and still avoid conflicts. (`PushChangeset()` is particularly useful in this regard.) You can retreive the latest version of an object by calling Update(). 141 | 142 | The core "verbs" of this library (New, Fetch, Store, Push, Update, Delete) are meant to have intuitive and sane default behavior. For instance, New always asks Riak to abort the transaction if an object already exists at the given key, and Update doesn't return the whole body of the object back from the database if it hasn't been modified. 143 | 144 | Since garbage collection time bottlenecks many Go applications, a lot of effort was put into reducing memory allocations on database reads and writes. The implementation can only become more memory efficient when Go's escape analysis becomes less pessimistic about escaping pointers. 145 | 146 | There is an open issue for cache buckets, which has the potential to dramatically improve performance in query-heavy (2i, map-reduce, Yokozuna) use cases. There are also some open issues related to implementing Riak 2.0 features. 147 | 148 | 149 | ## License 150 | 151 | This code is MIT licensed. You may use it however you see fit. However, I would very much appreciate it if you create PRs in this repo for patches and improvements! 152 | 153 | -------------------------------------------------------------------------------- /_conf/riak.conf: -------------------------------------------------------------------------------- 1 | ## Where to emit the default log messages (typically at 'info' 2 | ## severity): 3 | ## off: disabled 4 | ## file: the file specified by log.console.file 5 | ## console: to standard output (seen when using `riak attach-direct`) 6 | ## both: log.console.file and standard out. 7 | ## 8 | ## Default: file 9 | ## 10 | ## Acceptable values: 11 | ## - one of: off, file, console, both 12 | log.console = file 13 | 14 | ## The severity level of the console log, default is 'info'. 15 | ## 16 | ## Default: info 17 | ## 18 | ## Acceptable values: 19 | ## - one of: debug, info, warning, error 20 | log.console.level = info 21 | 22 | ## When 'log.console' is set to 'file' or 'both', the file where 23 | ## console messages will be logged. 24 | ## 25 | ## Default: $(platform_log_dir)/console.log 26 | ## 27 | ## Acceptable values: 28 | ## - the path to a file 29 | log.console.file = $(platform_log_dir)/console.log 30 | 31 | ## The file where error messages will be logged. 32 | ## 33 | ## Default: $(platform_log_dir)/error.log 34 | ## 35 | ## Acceptable values: 36 | ## - the path to a file 37 | log.error.file = $(platform_log_dir)/error.log 38 | 39 | ## When set to 'on', enables log output to syslog. 40 | ## 41 | ## Default: off 42 | ## 43 | ## Acceptable values: 44 | ## - on or off 45 | log.syslog = off 46 | 47 | ## Whether to enable the crash log. 48 | ## 49 | ## Default: on 50 | ## 51 | ## Acceptable values: 52 | ## - on or off 53 | log.crash = on 54 | 55 | ## If the crash log is enabled, the file where its messages will 56 | ## be written. 57 | ## 58 | ## Default: $(platform_log_dir)/crash.log 59 | ## 60 | ## Acceptable values: 61 | ## - the path to a file 62 | log.crash.file = $(platform_log_dir)/crash.log 63 | 64 | ## Maximum size in bytes of individual messages in the crash log 65 | ## 66 | ## Default: 64KB 67 | ## 68 | ## Acceptable values: 69 | ## - a byte size with units, e.g. 10GB 70 | log.crash.maximum_message_size = 64KB 71 | 72 | ## Maximum size of the crash log in bytes, before it is rotated 73 | ## 74 | ## Default: 10MB 75 | ## 76 | ## Acceptable values: 77 | ## - a byte size with units, e.g. 10GB 78 | log.crash.size = 10MB 79 | 80 | ## The schedule on which to rotate the crash log. For more 81 | ## information see: 82 | ## https://github.com/basho/lager/blob/master/README.md#internal-log-rotation 83 | ## 84 | ## Default: $D0 85 | ## 86 | ## Acceptable values: 87 | ## - text 88 | log.crash.rotation = $D0 89 | 90 | ## The number of rotated crash logs to keep. When set to 91 | ## 'current', only the current open log file is kept. 92 | ## 93 | ## Default: 5 94 | ## 95 | ## Acceptable values: 96 | ## - an integer 97 | ## - the text "current" 98 | log.crash.rotation.keep = 5 99 | 100 | ## Name of the Erlang node 101 | ## 102 | ## Default: riak@127.0.0.1 103 | ## 104 | ## Acceptable values: 105 | ## - text 106 | nodename = riak@127.0.0.1 107 | 108 | ## Cookie for distributed node communication. All nodes in the 109 | ## same cluster should use the same cookie or they will not be able to 110 | ## communicate. 111 | ## 112 | ## Default: riak 113 | ## 114 | ## Acceptable values: 115 | ## - text 116 | distributed_cookie = riak 117 | 118 | ## Sets the number of threads in async thread pool, valid range 119 | ## is 0-1024. If thread support is available, the default is 64. 120 | ## More information at: http://erlang.org/doc/man/erl.html 121 | ## 122 | ## Default: 64 123 | ## 124 | ## Acceptable values: 125 | ## - an integer 126 | erlang.async_threads = 64 127 | 128 | ## The number of concurrent ports/sockets 129 | ## Valid range is 1024-134217727 130 | ## 131 | ## Default: 65536 132 | ## 133 | ## Acceptable values: 134 | ## - an integer 135 | erlang.max_ports = 65536 136 | 137 | ## Number of partitions in the cluster (only valid when first 138 | ## creating the cluster). Must be a power of 2, minimum 8 and maximum 139 | ## 1024. 140 | ## 141 | ## Default: 64 142 | ## 143 | ## Acceptable values: 144 | ## - an integer 145 | ## ring_size = 64 146 | 147 | ## Number of concurrent node-to-node transfers allowed. 148 | ## 149 | ## Default: 2 150 | ## 151 | ## Acceptable values: 152 | ## - an integer 153 | ## transfer_limit = 2 154 | 155 | ## Default cert location for https can be overridden 156 | ## with the ssl config variable, for example: 157 | ## 158 | ## Acceptable values: 159 | ## - the path to a file 160 | ## ssl.certfile = $(platform_etc_dir)/cert.pem 161 | 162 | ## Default key location for https can be overridden with the ssl 163 | ## config variable, for example: 164 | ## 165 | ## Acceptable values: 166 | ## - the path to a file 167 | ## ssl.keyfile = $(platform_etc_dir)/key.pem 168 | 169 | ## Default signing authority location for https can be overridden 170 | ## with the ssl config variable, for example: 171 | ## 172 | ## Acceptable values: 173 | ## - the path to a file 174 | ## ssl.cacertfile = $(platform_etc_dir)/cacertfile.pem 175 | 176 | ## DTrace support Do not enable 'dtrace' unless your Erlang/OTP 177 | ## runtime is compiled to support DTrace. DTrace is available in 178 | ## R15B01 (supported by the Erlang/OTP official source package) and in 179 | ## R14B04 via a custom source repository & branch. 180 | ## 181 | ## Default: off 182 | ## 183 | ## Acceptable values: 184 | ## - on or off 185 | dtrace = off 186 | 187 | ## Platform-specific installation paths (substituted by rebar) 188 | ## 189 | ## Default: ./bin 190 | ## 191 | ## Acceptable values: 192 | ## - the path to a directory 193 | platform_bin_dir = ./bin 194 | 195 | ## 196 | ## Default: ./data 197 | ## 198 | ## Acceptable values: 199 | ## - the path to a directory 200 | platform_data_dir = ./data 201 | 202 | ## 203 | ## Default: ./etc 204 | ## 205 | ## Acceptable values: 206 | ## - the path to a directory 207 | platform_etc_dir = ./etc 208 | 209 | ## 210 | ## Default: ./lib 211 | ## 212 | ## Acceptable values: 213 | ## - the path to a directory 214 | platform_lib_dir = ./lib 215 | 216 | ## 217 | ## Default: ./log 218 | ## 219 | ## Acceptable values: 220 | ## - the path to a directory 221 | platform_log_dir = ./log 222 | 223 | ## Enable consensus subsystem. Set to 'on' to enable the 224 | ## consensus subsystem used for strongly consistent Riak operations. 225 | ## 226 | ## Default: off 227 | ## 228 | ## Acceptable values: 229 | ## - on or off 230 | ## strong_consistency = on 231 | 232 | ## listener.http. is an IP address and TCP port that the Riak 233 | ## HTTP interface will bind. 234 | ## 235 | ## Default: 127.0.0.1:8098 236 | ## 237 | ## Acceptable values: 238 | ## - an IP/port pair, e.g. 127.0.0.1:10011 239 | listener.http.internal = 127.0.0.1:8098 240 | 241 | ## listener.protobuf. is an IP address and TCP port that the Riak 242 | ## Protocol Buffers interface will bind. 243 | ## 244 | ## Default: 127.0.0.1:8087 245 | ## 246 | ## Acceptable values: 247 | ## - an IP/port pair, e.g. 127.0.0.1:10011 248 | listener.protobuf.internal = 127.0.0.1:8087 249 | 250 | ## The maximum length to which the queue of pending connections 251 | ## may grow. If set, it must be an integer > 0. If you anticipate a 252 | ## huge number of connections being initialized *simultaneously*, set 253 | ## this number higher. 254 | ## 255 | ## Default: 128 256 | ## 257 | ## Acceptable values: 258 | ## - an integer 259 | ## protobuf.backlog = 128 260 | 261 | ## listener.https. is an IP address and TCP port that the Riak 262 | ## HTTPS interface will bind. 263 | ## 264 | ## Acceptable values: 265 | ## - an IP/port pair, e.g. 127.0.0.1:10011 266 | ## listener.https.internal = 127.0.0.1:8098 267 | 268 | ## How Riak will repair out-of-sync keys. Some features require 269 | ## this to be set to 'active', including search. 270 | ## * active: out-of-sync keys will be repaired in the background 271 | ## * passive: out-of-sync keys are only repaired on read 272 | ## * active-debug: like active, but outputs verbose debugging 273 | ## information 274 | ## 275 | ## Default: active 276 | ## 277 | ## Acceptable values: 278 | ## - one of: active, passive, active-debug 279 | anti_entropy = active 280 | 281 | ## Specifies the storage engine used for Riak's key-value data 282 | ## and secondary indexes (if supported). 283 | ## 284 | ## Default: bitcask 285 | ## 286 | ## Acceptable values: 287 | ## - one of: bitcask, leveldb, memory, multi 288 | storage_backend = multi 289 | 290 | ## Controls which binary representation of a riak value is stored 291 | ## on disk. 292 | ## * 0: Original erlang:term_to_binary format. Higher space overhead. 293 | ## * 1: New format for more compact storage of small values. 294 | ## 295 | ## Default: 1 296 | ## 297 | ## Acceptable values: 298 | ## - the integer 1 299 | ## - the integer 0 300 | object.format = 1 301 | 302 | ## Reading or writing objects bigger than this size will write a 303 | ## warning in the logs. 304 | ## 305 | ## Default: 5MB 306 | ## 307 | ## Acceptable values: 308 | ## - a byte size with units, e.g. 10GB 309 | object.size.warning_threshold = 5MB 310 | 311 | ## Writing an object bigger than this will send a failure to the 312 | ## client. 313 | ## 314 | ## Default: 50MB 315 | ## 316 | ## Acceptable values: 317 | ## - a byte size with units, e.g. 10GB 318 | object.size.maximum = 50MB 319 | 320 | ## Writing an object with more than this number of siblings will 321 | ## generate a warning in the logs. 322 | ## 323 | ## Default: 25 324 | ## 325 | ## Acceptable values: 326 | ## - an integer 327 | object.siblings.warning_threshold = 25 328 | 329 | ## Writing an object with more than this number of siblings will 330 | ## send a failure to the client. 331 | ## 332 | ## Default: 100 333 | ## 334 | ## Acceptable values: 335 | ## - an integer 336 | object.siblings.maximum = 100 337 | 338 | ## A path under which bitcask data files will be stored. 339 | ## 340 | ## Default: $(platform_data_dir)/bitcask 341 | ## 342 | ## Acceptable values: 343 | ## - the path to a directory 344 | bitcask.data_root = $(platform_data_dir)/bitcask 345 | 346 | ## Configure how Bitcask writes data to disk. 347 | ## erlang: Erlang's built-in file API 348 | ## nif: Direct calls to the POSIX C API 349 | ## The NIF mode provides higher throughput for certain 350 | ## workloads, but has the potential to negatively impact 351 | ## the Erlang VM, leading to higher worst-case latencies 352 | ## and possible throughput collapse. 353 | ## 354 | ## Default: erlang 355 | ## 356 | ## Acceptable values: 357 | ## - one of: erlang, nif 358 | bitcask.io_mode = erlang 359 | 360 | ## Set to 'off' to disable the admin panel. 361 | ## 362 | ## Default: off 363 | ## 364 | ## Acceptable values: 365 | ## - on or off 366 | riak_control = off 367 | 368 | ## Authentication mode used for access to the admin panel. 369 | ## 370 | ## Default: off 371 | ## 372 | ## Acceptable values: 373 | ## - one of: off, userlist 374 | riak_control.auth.mode = off 375 | 376 | ## If riak control's authentication mode (riak_control.auth.mode) 377 | ## is set to 'userlist' then this is the list of usernames and 378 | ## passwords for access to the admin panel. 379 | ## 380 | ## Acceptable values: 381 | ## - text 382 | ## riak_control.auth.user.user.password = pass 383 | 384 | ## This parameter defines the percentage of total server memory 385 | ## to assign to LevelDB. LevelDB will dynamically adjust its internal 386 | ## cache sizes to stay within this size. The memory size can 387 | ## alternately be assigned as a byte count via leveldb.maximum_memory 388 | ## instead. 389 | ## 390 | ## Default: 70 391 | ## 392 | ## Acceptable values: 393 | ## - an integer 394 | leveldb.maximum_memory.percent = 70 395 | 396 | ## To enable Search set this 'on'. 397 | ## 398 | ## Default: off 399 | ## 400 | ## Acceptable values: 401 | ## - on or off 402 | search = off 403 | 404 | ## How long Riak will wait for Solr to start. The start sequence 405 | ## will be tried twice. If both attempts timeout, then the Riak node 406 | ## will be shutdown. This may need to be increased as more data is 407 | ## indexed and Solr takes longer to start. Values lower than 1s will 408 | ## be rounded up to the minimum 1s. 409 | ## 410 | ## Default: 30s 411 | ## 412 | ## Acceptable values: 413 | ## - a time duration with units, e.g. '10s' for 10 seconds 414 | search.solr.start_timeout = 30s 415 | 416 | ## The port number which Solr binds to. 417 | ## NOTE: Binds on every interface. 418 | ## 419 | ## Default: 8093 420 | ## 421 | ## Acceptable values: 422 | ## - an integer 423 | search.solr.port = 8093 424 | 425 | ## The port number which Solr JMX binds to. 426 | ## NOTE: Binds on every interface. 427 | ## 428 | ## Default: 8985 429 | ## 430 | ## Acceptable values: 431 | ## - an integer 432 | search.solr.jmx_port = 8985 433 | 434 | ## The options to pass to the Solr JVM. Non-standard options, 435 | ## i.e. -XX, may not be portable across JVM implementations. 436 | ## E.g. -XX:+UseCompressedStrings 437 | ## 438 | ## Default: -d64 -Xms1g -Xmx1g -XX:+UseStringCache -XX:+UseCompressedOops 439 | ## 440 | ## Acceptable values: 441 | ## - text 442 | search.solr.jvm_options = -d64 -Xms1g -Xmx1g -XX:+UseStringCache -XX:+UseCompressedOops 443 | 444 | ## allow multiple response 445 | buckets.default.allow_mult = true 446 | 447 | ## no lww 448 | buckets.default.last_write_wins = false 449 | 450 | ## default backend is leveldb 451 | multi_backend.default = std 452 | 453 | ## name 'cache' and 'std' backends 454 | multi_backend.cache.storage_backend = memory 455 | multi_backend.std.storage_backend = leveldb 456 | 457 | ## memory backend ttl 458 | memory_backend.ttl = 30s 459 | 460 | ## memory backend mem per vnode - adjust this in production 461 | memory_backend.max_memory_per_vnode = 64MB -------------------------------------------------------------------------------- /_examples/multi_async.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "github.com/philhofer/rkive" 7 | ) 8 | 9 | // This example demonstrates 10 | // how to use the Duplicator 11 | // interface to use the "Async" methods 12 | // defined by the client. 13 | 14 | // "Person" will be the data-type used for this example 15 | type Person struct { 16 | // we need to embed the 'Info' field 17 | // in order to fulfill the rkive.Object interface (see person.Info()) 18 | info rkive.Info 19 | 20 | First string `json:"first_name"` // first name 21 | Last string `json:"last_name"` // last name 22 | } 23 | 24 | // Implementing the Info method is as simple as 25 | // returning an inline reference to the embedded field 26 | func (p *Person) Info() *rkive.Info { return &p.info } 27 | 28 | // Marshal needs to marshal an object into bytes. 29 | // Here we simply use json.Marshal 30 | func (p *Person) Marshal() ([]byte, error) { 31 | return json.Marshal(p) 32 | } 33 | 34 | // Unmarshal needs to unmarshal an object from bytes. 35 | // Since we used json.Marshal, we need json.Unmarshal here. 36 | func (p *Person) Unmarshal(b []byte) error { 37 | return json.Unmarshal(b, p) 38 | } 39 | 40 | // NewEmpty is the method required to satisfy the Duplicator 41 | // interface. It is almost always as simple as returning a reference 42 | // to a zero-initialized value of the same type as the method receiver. 43 | func (p *Person) NewEmpty() rkive.Object { return &Person{} } 44 | 45 | func main() { 46 | 47 | // first, we need to set up a client. make sure 48 | // you have the database running locally first. 49 | riak, err := rkive.DialOne("localhost:8087", "demo-client") 50 | 51 | // we'll use the "people" bucket for Person structs 52 | people := riak.Bucket("people") 53 | 54 | // let's make some people 55 | // and put them in the database 56 | bob := &Person{ 57 | First: "Bob", 58 | Last: "Johnson", 59 | } 60 | 61 | joe := &Person{ 62 | First: "Joe", 63 | Last: "Johnson", 64 | } 65 | 66 | // here we're putting "bob" in the "people" 67 | // bucket under the key "bob" 68 | err = people.New(bob, &bob.First) 69 | if err != nil { 70 | panic(err) 71 | } 72 | 73 | // ... and we'll do the same with joe 74 | err = people.New(joe, &joe.First) 75 | if err != nil { 76 | panic(err) 77 | } 78 | 79 | // Now this is where things get more interesting. 80 | // We can retrieve both the "bob" and "joe" objects 81 | // asynchronously: 82 | results := people.MultiFetchAsync(&Person{}, 2, "Bob", "Joe") 83 | 84 | // and now we can iterate through 85 | // the results and print them out. 86 | for res := range results { 87 | // results return a "Value" field 88 | // and an "Error" field. 89 | if res.Error != nil { 90 | panic(err) 91 | } 92 | 93 | // res.Value can always be type-asserted 94 | // to the same type as the result returned 95 | // from NewEmpty() 96 | person := res.Value.(*Person) 97 | fmt.Printf("%s %s\n", person.First, person.Last) 98 | } 99 | 100 | // Here's another trick: let's make 101 | // people query-able by their last name. 102 | // We'll add a secondary index field called 103 | // "lastname" that contains the last name of the person. 104 | bob.Info().AddIndex("lastname", bob.Last) 105 | joe.Info().AddIndex("lastname", joe.Last) 106 | 107 | // now we need to push the changes 108 | // to those objects back to the database 109 | err = people.Push(bob) 110 | if err != nil { 111 | panic(err) 112 | } 113 | err = people.Push(joe) 114 | if err != nil { 115 | panic(err) 116 | } 117 | 118 | // now we can fetch all people 119 | // with the last name "Johnson": 120 | res, err := people.IndexLookup("lastname", "Johnson") 121 | if err != nil { 122 | panic(err) 123 | } 124 | 125 | // Riak only returns keys for secondary 126 | // index queries. However, rkive gives you 127 | // a quick way to fetch all of them that 128 | // looks a lot like the one we used before: 129 | stream := res.FetchAsync(&Person{}, 2) 130 | 131 | // ... and we can use it the same way: 132 | fmt.Print("\n") 133 | fmt.Println("All the Johnsons: ") 134 | fmt.Println("------------------") 135 | for v := range stream { 136 | person := v.Value.(*Person) 137 | fmt.Printf("%s %s\n", person.First, person.Last) 138 | } 139 | 140 | // now let's do some cleanup. 141 | riak.Delete(bob, nil) 142 | riak.Delete(joe, nil) 143 | } 144 | -------------------------------------------------------------------------------- /bucket.go: -------------------------------------------------------------------------------- 1 | package rkive 2 | 3 | import ( 4 | "github.com/philhofer/rkive/rpbc" 5 | "sync" 6 | ) 7 | 8 | // Bucket represents a Riak bucket 9 | type Bucket struct { 10 | c *Client 11 | nm string 12 | } 13 | 14 | // Bucket returns a Riak bucket 15 | // with the provided name 16 | func (c *Client) Bucket(name string) *Bucket { return &Bucket{c: c, nm: name} } 17 | 18 | // Fetch performs a fetch with the bucket's default properties 19 | func (b *Bucket) Fetch(o Object, key string) error { return b.c.Fetch(o, b.nm, key, nil) } 20 | 21 | // New performs a new store with the bucket's default properties 22 | func (b *Bucket) New(o Object, key *string) error { return b.c.New(o, b.nm, key, nil) } 23 | 24 | // Push pushes an object with a bucket's default properties 25 | func (b *Bucket) Push(o Object) error { return b.c.Push(o, nil) } 26 | 27 | // Store stores an object with a bucket's default properties 28 | func (b *Bucket) Store(o Object) error { return b.c.Store(o, nil) } 29 | 30 | // Update updates an object in a bucket 31 | func (b *Bucket) Update(o Object) (bool, error) { return b.c.Update(o, nil) } 32 | 33 | // Overwrite performs an overwrite on the specified key 34 | func (b *Bucket) Overwrite(o Object, key string) error { return b.c.Overwrite(o, b.nm, key, nil) } 35 | 36 | // IndexLookup performs a secondary index query on the bucket 37 | func (b *Bucket) IndexLookup(idx string, val string) (*IndexQueryRes, error) { 38 | return b.c.IndexLookup(b.nm, idx, val, nil) 39 | } 40 | 41 | // IndexRange performs a secondary index range query on the bucket 42 | func (b *Bucket) IndexRange(idx string, min int64, max int64) (*IndexQueryRes, error) { 43 | return b.c.IndexRange(b.nm, idx, min, max, nil) 44 | } 45 | 46 | // GetProperties retreives the properties of the bucket 47 | func (b *Bucket) GetProperties() (*rpbc.RpbBucketProps, error) { 48 | req := &rpbc.RpbGetBucketReq{ 49 | Bucket: []byte(b.nm), 50 | } 51 | res := &rpbc.RpbGetBucketResp{} 52 | _, err := b.c.req(req, 19, res) 53 | return res.GetProps(), err 54 | } 55 | 56 | // SetProperties sets the properties of the bucket 57 | func (b *Bucket) SetProperties(props *rpbc.RpbBucketProps) error { 58 | req := &rpbc.RpbSetBucketReq{ 59 | Bucket: ustr(b.nm), 60 | Props: props, 61 | } 62 | _, err := b.c.req(req, 21, nil) 63 | return err 64 | } 65 | 66 | var ( 67 | // properties for memory-backed cache bucket 68 | cacheProps = rpbc.RpbBucketProps{ 69 | Backend: []byte("cache"), // this has to come from the riak.conf 70 | NotfoundOk: &ptrTrue, 71 | AllowMult: &ptrFalse, 72 | LastWriteWins: &ptrFalse, 73 | BasicQuorum: &ptrFalse, 74 | NVal: &ptrOne, 75 | R: &ptrOne, 76 | W: &ptrOne, 77 | } 78 | ) 79 | 80 | // MakeCache makes a memory-backed cache bucket. You will 81 | // most likely need the following options enabled in your riak.conf: 82 | // 83 | // # this enables multiple backends 84 | // storage_backend = multi 85 | // 86 | // # this creates a backend called 'cache' backed by RAM 87 | // multi_backend.cache.storage_backend = memory 88 | // 89 | // # this makes a backend called 'std' and sets its storage backend 90 | // # (you can name this one whatever you would like) 91 | // multi_backend.std.storage_backend = 92 | // multi_backend.default = std 93 | // 94 | // MakeCache will error if your configuration is incorrect. 95 | // 96 | // NB: keep in mind that this bucket will only be backed by RAM and 97 | // uses no replication. This bucket should only be used to store 98 | // ephemeral objects. 99 | func (b *Bucket) MakeCache() error { 100 | return b.SetProperties(&cacheProps) 101 | } 102 | 103 | // Reset resets the bucket's properties 104 | func (b *Bucket) Reset() error { 105 | req := &rpbc.RpbResetBucketReq{ 106 | Bucket: ustr(b.nm), 107 | } 108 | code, err := b.c.req(req, 29, nil) 109 | if err != nil { 110 | return err 111 | } 112 | if code != 30 { 113 | return ErrUnexpectedResponse 114 | } 115 | return nil 116 | } 117 | 118 | // MultiFetchAsync returns fetch results as a future. Results 119 | // may return in any order. Every result on the channel will 120 | // have its "Value" field type-assertable to the underlying type of 'o'. 121 | // 'procs' goroutines will be used for fetching. 122 | func (b *Bucket) MultiFetchAsync(o Duplicator, procs int, keys ...string) <-chan *AsyncFetch { 123 | if procs <= 0 { 124 | procs = 1 125 | } 126 | kc := make(chan string, len(keys)) 127 | out := make(chan *AsyncFetch, len(keys)) 128 | 129 | wg := new(sync.WaitGroup) 130 | for i := 0; i < procs; i++ { 131 | wg.Add(1) 132 | go func() { 133 | for key := range kc { 134 | v := o.NewEmpty() 135 | err := b.Fetch(v, key) 136 | out <- &AsyncFetch{v, err} 137 | } 138 | wg.Done() 139 | }() 140 | } 141 | go func() { 142 | wg.Wait() 143 | close(out) 144 | }() 145 | for _, k := range keys { 146 | kc <- k 147 | } 148 | close(kc) 149 | 150 | return out 151 | } 152 | -------------------------------------------------------------------------------- /bucket_type.go: -------------------------------------------------------------------------------- 1 | package rkive 2 | 3 | import ( 4 | "github.com/philhofer/rkive/rpbc" 5 | ) 6 | 7 | // GetBucketTypeProperties gets the bucket properties 8 | // associated with a given bucket type. 9 | // *NOTE* bucket types are a Riak 2.0 feature. 10 | func (c *Client) GetBucketTypeProperties(typeName string) (*rpbc.RpbBucketProps, error) { 11 | req := &rpbc.RpbGetBucketTypeReq{} 12 | // unsafe string is allowed b/c typeName 13 | // it is not referenced outside of this scope 14 | req.Type = ustr(typeName) 15 | res := &rpbc.RpbBucketProps{} 16 | _, err := c.req(req, 31, res) 17 | return res, err 18 | } 19 | 20 | // SetBucketTypeProperties sets the bucket properties 21 | // associated with a given bucket type. 22 | // *NOTE* bucket types are a Riak 2.0 feature. 23 | func (c *Client) SetBucketTypeProperties(typeName string, props *rpbc.RpbBucketProps) error { 24 | req := &rpbc.RpbSetBucketReq{} 25 | req.Props = props 26 | _, err := c.req(req, 32, nil) 27 | return err 28 | } 29 | -------------------------------------------------------------------------------- /bucket_type_test.go: -------------------------------------------------------------------------------- 1 | // +build riak 2 | 3 | package rkive 4 | 5 | import ( 6 | "bytes" 7 | check "gopkg.in/check.v1" 8 | "time" 9 | ) 10 | 11 | func (s *riakSuite) TestGetBucketTypeProperties(c *check.C) { 12 | c.Skip("not implemented") 13 | } 14 | 15 | func (s *riakSuite) TestCache(c *check.C) { 16 | startt := time.Now() 17 | 18 | cache := s.cl.Bucket("test-cache") 19 | err := cache.MakeCache() 20 | if err != nil { 21 | c.Fatal(err) 22 | } 23 | props, err := cache.GetProperties() 24 | if err != nil { 25 | c.Error(err) 26 | } 27 | if !bytes.Equal(props.GetBackend(), []byte("cache")) { 28 | c.Errorf("Expected backend %q; got %q", "cache", props.GetBackend()) 29 | } 30 | 31 | // cache buckets are the only place 32 | // in which Overwrite is safe... 33 | 34 | ob := &TestObject{ 35 | Data: []byte("Save this."), 36 | } 37 | 38 | err = cache.New(ob, nil) 39 | if err != nil { 40 | c.Error(err) 41 | } 42 | 43 | ob2 := &TestObject{ 44 | Data: []byte("overwrite!"), 45 | } 46 | 47 | err = cache.Overwrite(ob2, ob.Info().Key()) 48 | if err != nil { 49 | c.Error(err) 50 | } 51 | 52 | var upd bool 53 | upd, err = cache.Update(ob) 54 | if err != nil { 55 | c.Error(err) 56 | } 57 | if !upd { 58 | c.Error("Expected update.") 59 | } 60 | 61 | if !bytes.Equal(ob.Data, []byte("overwrite!")) { 62 | c.Errorf("Expected body %q; got %q", []byte("overwrite!"), ob.Data) 63 | } 64 | 65 | s.runtime += time.Since(startt) 66 | } 67 | -------------------------------------------------------------------------------- /buf.go: -------------------------------------------------------------------------------- 1 | package rkive 2 | 3 | import ( 4 | "encoding/binary" 5 | "sync" 6 | ) 7 | 8 | var bufPool *sync.Pool 9 | 10 | func init() { 11 | bufPool = new(sync.Pool) 12 | bufPool.New = func() interface{} { 13 | return &buf{Body: make([]byte, 512)} 14 | } 15 | } 16 | 17 | type buf struct { 18 | Body []byte 19 | } 20 | 21 | // proto message w/ Size and MarshalTo 22 | type protom interface { 23 | MarshalTo([]byte) (n int, err error) 24 | Size() int 25 | } 26 | 27 | // opportunistic MarshalTo; leaves Body[4] open for code 28 | func (b *buf) Set(p protom) error { 29 | sz := p.Size() 30 | bsz := sz + 5 31 | if cap(b.Body) >= bsz { 32 | b.Body = b.Body[0:bsz] 33 | } else { 34 | b.Body = make([]byte, bsz) 35 | } 36 | binary.BigEndian.PutUint32(b.Body, uint32(sz+1)) 37 | _, err := p.MarshalTo(b.Body[5:]) 38 | return err 39 | } 40 | 41 | func (b *buf) setSz(n int) { 42 | if cap(b.Body) >= n { 43 | b.Body = b.Body[0:n] 44 | } else { 45 | b.Body = make([]byte, n) 46 | } 47 | } 48 | 49 | func getBuf() *buf { 50 | return bufPool.Get().(*buf) 51 | } 52 | 53 | func putBuf(b *buf) { b.Body = b.Body[:cap(b.Body)]; bufPool.Put(b) } 54 | -------------------------------------------------------------------------------- /build_bench_test.go: -------------------------------------------------------------------------------- 1 | // +build riak 2 | 3 | package rkive 4 | 5 | import ( 6 | "runtime" 7 | "testing" 8 | "time" 9 | ) 10 | 11 | func BenchmarkStore(b *testing.B) { 12 | cl, err := DialOne("localhost:8087", "bench-client") 13 | if err != nil { 14 | b.Fatal(err) 15 | } 16 | 17 | b.N /= 10 18 | ob := &TestObject{ 19 | Data: []byte("Hello World"), 20 | } 21 | 22 | err = cl.New(ob, "tesbucket", nil, nil) 23 | if err != nil { 24 | b.Fatal(err) 25 | } 26 | 27 | b.ReportAllocs() 28 | b.ResetTimer() 29 | for i := 0; i < b.N; i++ { 30 | err = cl.Store(ob, nil) 31 | if err != nil { 32 | b.Fatal(err) 33 | } 34 | } 35 | b.StopTimer() 36 | b.Logf("Avg iowait: %s", time.Duration(cl.AvgWait())) 37 | cl.Close() 38 | } 39 | 40 | func BenchmarkParallelStore(b *testing.B) { 41 | b.Skip("Doesn't run by default.") 42 | cl, err := DialOne("localhost:8087", "bench-client") 43 | if err != nil { 44 | b.Fatal(err) 45 | } 46 | defer cl.Close() 47 | b.N /= 10 48 | b.SetParallelism(maxConns / runtime.GOMAXPROCS(0)) 49 | b.ResetTimer() 50 | b.RunParallel(func(pb *testing.PB) { 51 | obj := &TestObject{ 52 | Data: []byte("Hello World"), 53 | } 54 | err = cl.New(obj, "testbucket", nil, nil) 55 | if err != nil { 56 | b.Fatal(err) 57 | } 58 | for pb.Next() { 59 | cl.Store(obj, nil) 60 | } 61 | }) 62 | b.StopTimer() 63 | } 64 | 65 | func BenchmarkFetch(b *testing.B) { 66 | cl, err := DialOne("localhost:8087", "bench-client") 67 | if err != nil { 68 | b.Fatal(err) 69 | } 70 | 71 | b.N /= 10 72 | ob := &TestObject{ 73 | Data: []byte("Hello World"), 74 | } 75 | 76 | err = cl.New(ob, "testbucket", nil, nil) 77 | if err != nil { 78 | b.Fatal(err) 79 | } 80 | 81 | key := ob.Info().Key() 82 | 83 | b.ReportAllocs() 84 | b.ResetTimer() 85 | for i := 0; i < b.N; i++ { 86 | err = cl.Fetch(ob, "testbucket", key, nil) 87 | if err != nil { 88 | b.Fatal(err) 89 | } 90 | } 91 | b.StopTimer() 92 | b.Logf("Avg iowait: %s", time.Duration(cl.AvgWait())) 93 | cl.Close() 94 | } 95 | 96 | func BenchmarkParallelFetch(b *testing.B) { 97 | b.Skip("Doesn't run by default.") 98 | cl, err := DialOne("localhost:8087", "bench-client") 99 | if err != nil { 100 | b.Fatal(err) 101 | } 102 | defer cl.Close() 103 | b.N /= 10 104 | b.SetParallelism(maxConns / runtime.GOMAXPROCS(0)) 105 | b.ResetTimer() 106 | b.RunParallel(func(pb *testing.PB) { 107 | obj := &TestObject{ 108 | Data: []byte("Hello World"), 109 | } 110 | err = cl.New(obj, "testbucket", nil, nil) 111 | if err != nil { 112 | b.Fatal(err) 113 | } 114 | for pb.Next() { 115 | cl.Fetch(obj, "testbucket", obj.Info().Key(), nil) 116 | } 117 | }) 118 | b.StopTimer() 119 | } 120 | 121 | func BenchmarkCacheFetch(b *testing.B) { 122 | cl, err := DialOne("localhost:8087", "bench-client") 123 | if err != nil { 124 | b.Fatal(err) 125 | } 126 | defer cl.Close() 127 | b.N /= 10 128 | cache := cl.Bucket("test-cache") 129 | err = cache.MakeCache() 130 | if err != nil { 131 | b.Fatal(err) 132 | } 133 | 134 | // make test object 135 | ob := &TestObject{ 136 | Data: []byte("Hello, World!"), 137 | } 138 | 139 | err = cache.New(ob, nil) 140 | if err != nil { 141 | b.Fatal(err) 142 | } 143 | 144 | key := ob.Info().Key() 145 | 146 | b.ReportAllocs() 147 | b.ResetTimer() 148 | for i := 0; i < b.N; i++ { 149 | err = cache.Fetch(ob, key) 150 | if err != nil { 151 | b.Fatal(err) 152 | } 153 | } 154 | } 155 | 156 | func BenchmarkCounterInc(b *testing.B) { 157 | b.Skip("Doesn't run by default.") 158 | 159 | cl, err := DialOne("localhost:8087", "bench-client") 160 | if err != nil { 161 | b.Fatal(err) 162 | } 163 | defer cl.Close() 164 | b.N /= 10 165 | ctr, err := cl.Bucket("testbucket").NewCounter("bench-counter", 0) 166 | if err != nil { 167 | b.Fatal(err) 168 | } 169 | 170 | b.ReportAllocs() 171 | b.ResetTimer() 172 | for i := 0; i < b.N; i++ { 173 | err = ctr.Add(1) 174 | if err != nil { 175 | b.Fatal(err) 176 | } 177 | } 178 | b.StopTimer() 179 | ctr.Destroy() 180 | } 181 | -------------------------------------------------------------------------------- /changeset.go: -------------------------------------------------------------------------------- 1 | package rkive 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | ) 7 | 8 | var ( 9 | ErrDone = errors.New("done") 10 | ) 11 | 12 | // PushChangeset pushes a changeset to an object, handling the case 13 | // in which the object has been updated in the database since the last 14 | // local fetch. The 'chng' function should check if the change that it wanted 15 | // already happened, and return ErrDone in that case. The 'chng' function is allowed 16 | // to type-assert its argument to the underlying type of 'o'. 17 | func (c *Client) PushChangeset(o Object, chng func(Object) error, opts *WriteOpts) error { 18 | err := chng(o) 19 | if err != nil { 20 | return err 21 | } 22 | nmerge := 0 23 | push: 24 | err = c.Push(o, opts) 25 | if err == ErrModified { 26 | var upd bool 27 | nmerge++ 28 | if nmerge > maxMerges { 29 | return fmt.Errorf("exceeded max merges: %s", err) 30 | } 31 | upd, err = c.Update(o, nil) 32 | if err != nil { 33 | return err 34 | } 35 | if !upd { 36 | return errors.New("failure updating...") 37 | } 38 | err = chng(o) 39 | if err == ErrDone { 40 | return nil 41 | } 42 | goto push 43 | } 44 | return err 45 | } 46 | -------------------------------------------------------------------------------- /client.go: -------------------------------------------------------------------------------- 1 | package rkive 2 | 3 | import ( 4 | "encoding/binary" 5 | "errors" 6 | "fmt" 7 | "github.com/philhofer/rkive/rpbc" 8 | "io" 9 | "log" 10 | "math/rand" 11 | "net" 12 | "os" 13 | "runtime" 14 | "sync" 15 | "sync/atomic" 16 | "time" 17 | ) 18 | 19 | var ( 20 | // ErrClosed is returned when the 21 | // an attempt is made to make a request 22 | // with a closed clinet 23 | ErrClosed = errors.New("client closed") 24 | 25 | // ErrUnavail is returned when the client 26 | // is unable to successfully dial any 27 | // Riak node. 28 | ErrUnavail = errors.New("no connection to could be established") 29 | 30 | logger = log.New(os.Stderr, "[RKIVE] ", log.LstdFlags) 31 | 32 | // since protocol buffers 33 | // use pointers for optional fields, 34 | // let's create some static references: 35 | ptrTrue = true 36 | ptrFalse = false 37 | ptrOne uint32 = 1 38 | ptrZero uint32 = 0 39 | ) 40 | 41 | // read timeout (ms) 42 | const readTimeout = 1000 43 | 44 | // write timeout (ms) 45 | const writeTimeout = 1000 46 | 47 | // max connection limit 48 | const maxConns = 30 49 | 50 | // RiakError is an error 51 | // returned from the Riak server 52 | // iteself. 53 | type RiakError struct { 54 | res *rpbc.RpbErrorResp 55 | } 56 | 57 | func (r RiakError) Error() string { 58 | return fmt.Sprintf("riak error (0): %s", r.res.GetErrmsg()) 59 | } 60 | 61 | // ErrMultipleResponses is the type 62 | // of error returned when multiple 63 | // siblings are retrieved for an object. 64 | type ErrMultipleResponses struct { 65 | Bucket string 66 | Key string 67 | NumSiblings int 68 | } 69 | 70 | func (m *ErrMultipleResponses) Error() string { 71 | return fmt.Sprintf("%d siblings found", m.NumSiblings) 72 | } 73 | 74 | // Blob is a generic riak key/value container that 75 | // implements the Object interface. 76 | type Blob struct { 77 | RiakInfo Info 78 | Content []byte 79 | } 80 | 81 | // generate *ErrMultipleResponses from multiple contents 82 | func handleMultiple(n int, key, bucket string) *ErrMultipleResponses { 83 | return &ErrMultipleResponses{ 84 | Bucket: bucket, 85 | Key: key, 86 | NumSiblings: n, 87 | } 88 | } 89 | 90 | // Info implements part of the Object interface. 91 | func (r *Blob) Info() *Info { return &r.RiakInfo } 92 | 93 | // Unmarshal implements part of the Object interface 94 | func (r *Blob) Unmarshal(b []byte) error { r.Content = b; return nil } 95 | 96 | // Marshal implements part of the Object interface 97 | func (r *Blob) Marshal() ([]byte, error) { return r.Content, nil } 98 | 99 | // conn is a connection 100 | type conn struct { 101 | *net.TCPConn // underlying connection 102 | parent *Client // parent Client 103 | isClosed bool // has Close() been called? 104 | } 105 | 106 | // write wraps the TCP write 107 | func (c *conn) Write(b []byte) (int, error) { 108 | c.SetWriteDeadline(time.Now().Add(writeTimeout * time.Millisecond)) 109 | return c.TCPConn.Write(b) 110 | } 111 | 112 | // read wraps the TCP read 113 | func (c *conn) Read(b []byte) (int, error) { 114 | c.SetReadDeadline(time.Now().Add(readTimeout * time.Millisecond)) 115 | return c.TCPConn.Read(b) 116 | } 117 | 118 | // Close idempotently closes 119 | // the connection and decrements 120 | // the parent conn counter 121 | func (c *conn) Close() { 122 | if c.isClosed { 123 | return 124 | } 125 | c.isClosed = true 126 | logger.Printf("closing TCP connection to %s", c.RemoteAddr().String()) 127 | c.Close() 128 | c.parent.dec() 129 | } 130 | 131 | // Dial creates a client connected to one 132 | // or many Riak nodes. The client will attempt 133 | // to avoid using downed nodes. Dial returns an error 134 | // if it is unable to reach a good node. 135 | func Dial(addrs []string, clientID string) (*Client, error) { 136 | naddrs := make([]*net.TCPAddr, len(addrs)) 137 | 138 | var err error 139 | for i, node := range addrs { 140 | naddrs[i], err = net.ResolveTCPAddr("tcp", node) 141 | if err != nil { 142 | return nil, err 143 | } 144 | } 145 | 146 | cl := &Client{ 147 | tag: 0, 148 | id: []byte(clientID), 149 | addrs: naddrs, 150 | } 151 | 152 | // fail on no dial-able nodes 153 | err = cl.Ping() 154 | if err != nil { 155 | cl.Close() 156 | return nil, err 157 | } 158 | 159 | return cl, nil 160 | } 161 | 162 | // DialOne returns a client that 163 | // always dials the same node. (See: Dial) 164 | func DialOne(addr string, clientID string) (*Client, error) { 165 | return Dial([]string{addr}, clientID) 166 | } 167 | 168 | // Close() idempotently closes the client. 169 | func (c *Client) Close() { 170 | if !atomic.CompareAndSwapInt32(&c.tag, 0, 1) { 171 | return 172 | } 173 | 174 | // wait for all connetions to end 175 | // up in the pool 176 | for atomic.LoadInt32(&c.inuse) > 0 { 177 | time.Sleep(2 * time.Millisecond) 178 | } 179 | 180 | // we'll hang if we don't make 181 | // the connection pool immediately 182 | // GC-able 183 | c.pool = sync.Pool{} 184 | 185 | runtime.GC() 186 | nspin := 0 187 | maxspin := 50 188 | // give up after 100ms just in case GC 189 | // fails to work as desired. 190 | for ; atomic.LoadInt32(&c.conns) > 0 && nspin < maxspin; nspin++ { 191 | time.Sleep(2 * time.Millisecond) 192 | } 193 | nc := atomic.LoadInt32(&c.conns) 194 | if nc > 0 { 195 | logger.Printf("unable to close %d conns after 100ms", nc) 196 | } 197 | } 198 | 199 | func (c *Client) closed() bool { 200 | return atomic.LoadInt32(&c.tag) == 1 201 | } 202 | 203 | // can we add another connection? 204 | // if so, increment 205 | func (c *Client) try() bool { 206 | new := atomic.AddInt32(&c.conns, 1) 207 | if new > maxConns { 208 | atomic.AddInt32(&c.conns, -1) 209 | return false 210 | } 211 | return true 212 | } 213 | 214 | // decrement conn counter 215 | // MUST BE CALLED WHENEVER A CONNECTION 216 | // IS CLOSED, OR WE WILL HAVE PROBLEMS. 217 | func (c *Client) dec() { atomic.AddInt32(&c.conns, -1) } 218 | 219 | // newconn tries to return a valid 220 | // tcp connection to a node, dropping 221 | // failed connections. it should only 222 | // be called by popConn(). 223 | func (c *Client) newconn() (*conn, error) { 224 | 225 | // randomly shuffle the list 226 | // of addresses and then dial 227 | // them in (shuffled) order until 228 | // success 229 | perm := rand.Perm(len(c.addrs)) 230 | 231 | for _, v := range perm { 232 | addr := c.addrs[v] 233 | logger.Printf("dialing TCP %s", addr) 234 | tcpconn, err := net.DialTCP("tcp", nil, addr) 235 | if err != nil { 236 | logger.Printf("error dialing %s: %s", addr, err) 237 | continue 238 | } 239 | tcpconn.SetKeepAlive(true) 240 | tcpconn.SetNoDelay(true) 241 | out := &conn{ 242 | TCPConn: tcpconn, 243 | parent: c, 244 | isClosed: false, 245 | } 246 | err = c.writeClientID(out) 247 | if err != nil { 248 | // call the tcp connection's 249 | // close method, because otherwise 250 | // the client conn counter will 251 | // be decremented 252 | out.TCPConn.Close() 253 | logger.Printf("error writing client ID: %s", err) 254 | continue 255 | } 256 | runtime.SetFinalizer(out, (*conn).Close) 257 | return out, nil 258 | } 259 | c.dec() 260 | return nil, ErrUnavail 261 | } 262 | 263 | // pop connection 264 | func (c *Client) popConn() (*conn, error) { 265 | // spinlock (sort of) 266 | // on acquiring a connection 267 | for { 268 | if c.closed() { 269 | return nil, ErrClosed 270 | } 271 | cn, ok := c.pool.Get().(*conn) 272 | if ok && cn != nil { 273 | atomic.AddInt32(&c.inuse, 1) 274 | return cn, nil 275 | } 276 | if c.try() { 277 | cn, err := c.newconn() 278 | if err != nil { 279 | return nil, err 280 | } 281 | atomic.AddInt32(&c.inuse, 1) 282 | return cn, nil 283 | } 284 | runtime.Gosched() 285 | } 286 | } 287 | 288 | func (c *Client) writeClientID(cn *conn) error { 289 | if c.id == nil { 290 | // writeClientID is used 291 | // to test if a node is actually 292 | // live, so we need to do *something* 293 | return ping(cn) 294 | } 295 | req := &rpbc.RpbSetClientIdReq{ 296 | ClientId: c.id, 297 | } 298 | bts, err := req.Marshal() 299 | if err != nil { 300 | return err 301 | } 302 | msglen := len(bts) + 1 303 | msg := make([]byte, msglen+4) 304 | binary.BigEndian.PutUint32(msg, uint32(msglen)) 305 | msg[4] = 5 // code for RpbSetClientIdReq 306 | copy(msg[5:], bts) 307 | _, err = cn.Write(msg) 308 | if err != nil { 309 | return err 310 | } 311 | _, err = io.ReadFull(cn, msg[:5]) 312 | if err != nil { 313 | return err 314 | } 315 | // expect response code 6 316 | if msg[4] != 6 { 317 | return ErrUnexpectedResponse 318 | } 319 | return nil 320 | } 321 | 322 | // readLead reads the size of the inbound message 323 | func readLead(n *conn) (int, byte, error) { 324 | var lead [5]byte 325 | _, err := io.ReadFull(n, lead[:]) 326 | if err != nil { 327 | return 0, lead[4], err 328 | } 329 | msglen := binary.BigEndian.Uint32(lead[:4]) - 1 330 | rescode := lead[4] 331 | return int(msglen), rescode, nil 332 | } 333 | 334 | // read response into 'b'; truncate or append if necessary. 335 | // this is analagous to ReadFull into 'b', except that the buffer 336 | // may be extended, and is returned 337 | func readResponse(c *conn, b []byte) ([]byte, byte, error) { 338 | var n int 339 | var nn int 340 | b = b[:cap(b)] 341 | nn, err := c.Read(b) 342 | n += nn 343 | if err != nil { 344 | return nil, 0, err 345 | } 346 | b = b[:n] 347 | mlen := int(binary.BigEndian.Uint32(b[:4]) - 1) 348 | var scratch [512]byte 349 | for n < (mlen + 5) { 350 | nn, err = c.Read(scratch[:]) 351 | n += nn 352 | if err != nil { 353 | return b, b[4], err 354 | } 355 | b = append(b, scratch[:nn]...) 356 | } 357 | return b[5:n], b[4], err 358 | } 359 | 360 | func (c *Client) req(msg protom, code byte, res unmarshaler) (byte, error) { 361 | buf := getBuf() // maybe we've already allocated 362 | err := buf.Set(msg) 363 | if err != nil { 364 | return 0, fmt.Errorf("rkive: client.Req marshal err: %s", err) 365 | } 366 | resbts, rescode, err := c.doBuf(code, buf.Body) 367 | buf.Body = resbts // save the returned slice 368 | if err != nil { 369 | putBuf(buf) 370 | return 0, fmt.Errorf("rkive: doBuf err: %s", err) 371 | } 372 | if rescode == 0 { 373 | riakerr := new(rpbc.RpbErrorResp) 374 | err = riakerr.Unmarshal(resbts) 375 | putBuf(buf) 376 | if err != nil { 377 | return 0, err 378 | } 379 | return 0, RiakError{res: riakerr} 380 | } 381 | if res != nil { 382 | // expected response body, 383 | // but we got none 384 | if len(resbts) == 0 { 385 | putBuf(buf) 386 | return 0, ErrNotFound 387 | } 388 | err = res.Unmarshal(resbts) 389 | if err != nil { 390 | err = fmt.Errorf("rkive: unmarshal err: %s", err) 391 | } 392 | } 393 | putBuf(buf) // save the bytes we allocated 394 | return rescode, err 395 | } 396 | 397 | type protoStream interface { 398 | Unmarshal([]byte) error 399 | GetDone() bool 400 | } 401 | 402 | type unmarshaler interface { 403 | Unmarshal([]byte) error 404 | ProtoMessage() 405 | } 406 | 407 | // streaming response - 408 | // returns a primed connection 409 | type streamRes struct { 410 | c *Client 411 | node *conn 412 | } 413 | 414 | // unmarshals; returns done / code / error 415 | func (s *streamRes) unmarshal(res protoStream) (bool, byte, error) { 416 | var msglen int 417 | var code byte 418 | var err error 419 | 420 | msglen, code, err = readLead(s.node) 421 | if err != nil { 422 | s.c.err(s.node) 423 | return true, code, err 424 | } 425 | 426 | buf := getBuf() 427 | buf.setSz(msglen) 428 | 429 | // read into s.bts 430 | _, err = io.ReadFull(s.node, buf.Body) 431 | 432 | if err != nil { 433 | s.c.err(s.node) 434 | putBuf(buf) 435 | return true, code, err 436 | } 437 | // handle a code 0 438 | if code == 0 { 439 | // we're done 440 | s.close() 441 | 442 | riakerr := new(rpbc.RpbErrorResp) 443 | err = riakerr.Unmarshal(buf.Body) 444 | putBuf(buf) 445 | if err != nil { 446 | return true, 0, err 447 | } 448 | return true, 0, RiakError{res: riakerr} 449 | } 450 | 451 | err = res.Unmarshal(buf.Body) 452 | putBuf(buf) 453 | if err != nil { 454 | s.close() 455 | return true, code, err 456 | } 457 | done := res.GetDone() 458 | if done { 459 | s.close() 460 | } 461 | return done, code, nil 462 | } 463 | 464 | // return the connection to the client 465 | func (s *streamRes) close() { s.c.done(s.node) } 466 | 467 | func (c *Client) streamReq(req protom, code byte) (*streamRes, error) { 468 | 469 | buf := getBuf() 470 | err := buf.Set(req) 471 | if err != nil { 472 | putBuf(buf) 473 | return nil, err 474 | } 475 | node, err := c.popConn() 476 | if err != nil { 477 | return nil, err 478 | } 479 | 480 | buf.Body[4] = code 481 | _, err = node.Write(buf.Body) 482 | putBuf(buf) 483 | if err != nil { 484 | c.err(node) 485 | return nil, err 486 | } 487 | return &streamRes{c: c, node: node}, nil 488 | } 489 | 490 | // Ping pings a random node. 491 | func (c *Client) Ping() error { 492 | conn, err := c.popConn() 493 | if err != nil { 494 | return err 495 | } 496 | err = ping(conn) 497 | if err != nil { 498 | conn.Close() 499 | return err 500 | } 501 | c.done(conn) 502 | return nil 503 | } 504 | 505 | func ping(cn *conn) error { 506 | _, err := cn.Write([]byte{0, 0, 0, 1, 1}) 507 | if err != nil { 508 | return err 509 | } 510 | var res [5]byte 511 | _, err = io.ReadFull(cn, res[:]) 512 | return err 513 | } 514 | -------------------------------------------------------------------------------- /client_benchmode.go: -------------------------------------------------------------------------------- 1 | // +build riak 2 | 3 | package rkive 4 | 5 | import ( 6 | "fmt" 7 | "net" 8 | "sync" 9 | "sync/atomic" 10 | "time" 11 | ) 12 | 13 | func init() { 14 | fmt.Println("RKIVE: BENCHMARK MODE") 15 | } 16 | 17 | // Client represents a pool of connections 18 | // to a Riak cluster. 19 | type Client struct { 20 | conns int32 // total live conns 21 | pad1 [4]byte // 22 | inuse int32 // conns in use 23 | pad2 [4]byte // 24 | tag int32 // 0 = open; 1 = closed 25 | pad3 [4]byte // 26 | 27 | // BENCH-SPECIFIC 28 | nwait uint64 // number of roundtrips 29 | twait uint64 // time between Write() and Read() 30 | 31 | id []byte // client ID 32 | pool sync.Pool // connection pool 33 | addrs []*net.TCPAddr // node addrs 34 | } 35 | 36 | func (c *Client) doBuf(code byte, msg []byte) ([]byte, byte, error) { 37 | var retried bool 38 | try: 39 | node, err := c.popConn() 40 | if err != nil { 41 | return nil, 0, err 42 | } 43 | 44 | msg[4] = code 45 | 46 | // this is testing-specific in order to 47 | // time network i/o 48 | // BENCHMARKING 49 | if !retried { 50 | atomic.AddUint64(&c.nwait, 1) 51 | } 52 | startwrite := time.Now() 53 | // BENCHMARKING 54 | 55 | _, err = node.Write(msg) 56 | if err != nil { 57 | go c.err(node) 58 | 59 | // it could be the case that we pulled 60 | // a bad connection from the pool - we'll 61 | // attempt one retry 62 | if !retried { 63 | retried = true 64 | goto try 65 | } 66 | 67 | return nil, 0, err 68 | } 69 | msg, code, err = readResponse(node, msg) 70 | 71 | // testing-specific, again 72 | // BENCHMARKING 73 | atomic.AddUint64(&c.twait, uint64(time.Since(startwrite).Nanoseconds())) 74 | // BENCHMARKING 75 | 76 | if err == nil { 77 | c.done(node) 78 | } else { 79 | c.err(node) 80 | } 81 | return msg, code, nil 82 | } 83 | 84 | func (c *Client) AvgWait() uint64 { return atomic.LoadUint64(&c.twait) / atomic.LoadUint64(&c.nwait) } 85 | func (c *Client) TimerReset() { atomic.StoreUint64(&c.twait, 0); atomic.StoreUint64(&c.nwait, 0) } 86 | -------------------------------------------------------------------------------- /client_nobench.go: -------------------------------------------------------------------------------- 1 | // +build !riak 2 | 3 | package rkive 4 | 5 | import ( 6 | "net" 7 | "sync" 8 | ) 9 | 10 | // Client represents a pool of connections 11 | // to a Riak cluster. 12 | type Client struct { 13 | conns int32 // total live conns 14 | pad1 [4]byte // 15 | inuse int32 // conns in use 16 | pad2 [4]byte // 17 | tag int32 // 0 = open; 1 = closed; others reserved 18 | pad3 [4]byte // 19 | id []byte // client ID for writeClientID 20 | pool sync.Pool // connection pool 21 | addrs []*net.TCPAddr // addresses to dial 22 | } 23 | 24 | func (c *Client) doBuf(code byte, msg []byte) ([]byte, byte, error) { 25 | var retried bool 26 | try: 27 | node, err := c.popConn() 28 | if err != nil { 29 | return nil, 0, err 30 | } 31 | 32 | msg[4] = code 33 | _, err = node.Write(msg) 34 | if err != nil { 35 | go c.err(node) 36 | 37 | // it could be the case that we pulled 38 | // a bad connection from the pool - we'll 39 | // attempt one retry 40 | if !retried { 41 | retried = true 42 | goto try 43 | } 44 | 45 | return nil, 0, err 46 | } 47 | if err != nil { 48 | c.err(node) 49 | return nil, 0, err 50 | } 51 | msg, code, err = readResponse(node, msg) 52 | if err == nil { 53 | c.done(node) 54 | } else { 55 | c.err(node) 56 | } 57 | return msg, code, nil 58 | } 59 | -------------------------------------------------------------------------------- /client_norace.go: -------------------------------------------------------------------------------- 1 | // +build race 2 | 3 | package rkive 4 | 5 | import "sync/atomic" 6 | 7 | // finish node (success) 8 | func (c *Client) done(n *conn) { 9 | n.Close() 10 | atomic.AddInt32(&c.inuse, -1) 11 | } 12 | 13 | // finish node (err) 14 | func (c *Client) err(n *conn) { 15 | n.Close() 16 | atomic.AddInt32(&c.inuse, -1) 17 | } 18 | -------------------------------------------------------------------------------- /client_race.go: -------------------------------------------------------------------------------- 1 | // +build !race 2 | 3 | package rkive 4 | 5 | import "sync/atomic" 6 | 7 | // finish node (success) 8 | func (c *Client) done(n *conn) { 9 | if c.closed() { 10 | n.Close() 11 | } else { 12 | c.pool.Put(n) 13 | } 14 | atomic.AddInt32(&c.inuse, -1) 15 | } 16 | 17 | // finish node (err) 18 | func (c *Client) err(n *conn) { 19 | if c.closed() { 20 | n.Close() 21 | } else { 22 | err := ping(n) 23 | if err != nil { 24 | n.Close() 25 | } else { 26 | c.pool.Put(n) 27 | } 28 | } 29 | atomic.AddInt32(&c.inuse, -1) 30 | } 31 | -------------------------------------------------------------------------------- /client_test.go: -------------------------------------------------------------------------------- 1 | // +build riak 2 | 3 | package rkive 4 | 5 | import ( 6 | check "gopkg.in/check.v1" 7 | "sync" 8 | ) 9 | 10 | func (s *riakAsync) TestRiakPing(c *check.C) { 11 | c.Log("Performing 4 x 50 pings...") 12 | 13 | wg := new(sync.WaitGroup) 14 | lock := new(sync.Mutex) 15 | wg.Add(4) 16 | for g := 0; g < 4; g++ { 17 | go func(c *check.C) { 18 | for i := 0; i < 50; i++ { 19 | err := s.cl.Ping() 20 | if err != nil { 21 | lock.Lock() 22 | c.Fatal(err) 23 | lock.Unlock() 24 | } 25 | } 26 | wg.Done() 27 | }(c) 28 | } 29 | wg.Wait() 30 | } 31 | -------------------------------------------------------------------------------- /counter.go: -------------------------------------------------------------------------------- 1 | package rkive 2 | 3 | import ( 4 | "github.com/philhofer/rkive/rpbc" 5 | ) 6 | 7 | // Counter is a Riak CRDT that 8 | // acts as a distributed counter. Counters 9 | // only work in buckets with 'allow_mult' turned on. 10 | type Counter struct { 11 | key []byte 12 | bucket []byte 13 | val int64 14 | parent *Client 15 | } 16 | 17 | // Val is the value of the counter 18 | func (c *Counter) Val() int64 { return c.val } 19 | 20 | // Bucket is the bucket of the counter 21 | func (c *Counter) Bucket() string { return string(c.bucket) } 22 | 23 | // Key is the key of the counter 24 | func (c *Counter) Key() string { return string(c.key) } 25 | 26 | // Add adds the value 'v' to the counter. 27 | func (c *Counter) Add(v int64) error { 28 | req := rpbc.RpbCounterUpdateReq{ 29 | Amount: &v, // new value 30 | Returnvalue: &ptrTrue, // return new value 31 | Key: c.key, // key 32 | Bucket: c.bucket, // bucket 33 | } 34 | res := rpbc.RpbCounterUpdateResp{} 35 | code, err := c.parent.req(&req, 50, &res) 36 | if err != nil { 37 | return err 38 | } 39 | if code != 51 { 40 | return ErrUnexpectedResponse 41 | } 42 | c.val = res.GetValue() 43 | return nil 44 | } 45 | 46 | // Refresh gets the latest value of the counter 47 | // from the database. 48 | func (c *Counter) Refresh() error { 49 | req := rpbc.RpbCounterGetReq{ 50 | Key: c.key, 51 | Bucket: c.bucket, 52 | } 53 | res := rpbc.RpbCounterGetResp{} 54 | code, err := c.parent.req(&req, 52, &res) 55 | if err != nil { 56 | return err 57 | } 58 | if code != 53 { 59 | return ErrUnexpectedResponse 60 | } 61 | c.val = res.GetValue() 62 | return nil 63 | } 64 | 65 | // Destroy deletes the counter. 66 | func (c *Counter) Destroy() error { 67 | req := rpbc.RpbDelReq{ 68 | Bucket: c.bucket, 69 | Key: c.key, 70 | } 71 | _, err := c.parent.req(&req, 13, nil) 72 | return err 73 | } 74 | 75 | // NewCounter creates a new counter with 76 | // an optional starting value. If the counter 77 | // already exists, the value returned will be 78 | // the existing value plus "start". 79 | func (b *Bucket) NewCounter(name string, start int64) (*Counter, error) { 80 | req := rpbc.RpbCounterUpdateReq{ 81 | Amount: &start, 82 | Returnvalue: &ptrTrue, 83 | Key: []byte(name), 84 | Bucket: []byte(b.nm), 85 | } 86 | res := rpbc.RpbCounterUpdateResp{} 87 | code, err := b.c.req(&req, 50, &res) 88 | if err != nil { 89 | return nil, err 90 | } 91 | if code != 51 { 92 | return nil, ErrUnexpectedResponse 93 | } 94 | return &Counter{ 95 | key: req.Key, 96 | bucket: req.Bucket, 97 | val: res.GetValue(), 98 | parent: b.c, 99 | }, nil 100 | } 101 | 102 | // GetCounter gets a counter. 103 | func (b *Bucket) GetCounter(name string) (*Counter, error) { 104 | req := rpbc.RpbCounterGetReq{ 105 | Key: []byte(name), 106 | Bucket: []byte(b.nm), 107 | } 108 | res := rpbc.RpbCounterGetResp{} 109 | code, err := b.c.req(&req, 52, &res) 110 | if err != nil { 111 | return nil, err 112 | } 113 | if code != 53 { 114 | return nil, ErrUnexpectedResponse 115 | } 116 | return &Counter{ 117 | key: req.Key, 118 | bucket: req.Bucket, 119 | val: res.GetValue(), 120 | parent: b.c, 121 | }, nil 122 | } 123 | -------------------------------------------------------------------------------- /counter_test.go: -------------------------------------------------------------------------------- 1 | package rkive 2 | 3 | import ( 4 | check "gopkg.in/check.v1" 5 | "os" 6 | "time" 7 | ) 8 | 9 | func (s *riakSuite) TestCounter(c *check.C) { 10 | travis := os.Getenv("TRAVIS") 11 | werck := os.Getenv("WERCKER") 12 | if travis != "" || werck != "" { 13 | c.Skip(`The CI environment does not have "allow_mult" set to 'true'`) 14 | } 15 | 16 | startt := time.Now() 17 | 18 | var ct *Counter 19 | var err error 20 | ct, err = s.cl.Bucket("testbucket").NewCounter("test-counter", 0) 21 | if err != nil { 22 | c.Fatal(err) 23 | } 24 | 25 | start := ct.Val() 26 | 27 | err = ct.Add(5) 28 | if err != nil { 29 | c.Error(err) 30 | } 31 | 32 | if ct.Val() != start+5 { 33 | c.Errorf("Expected value %d; got %d", start+5, ct.Val()) 34 | } 35 | 36 | err = ct.Refresh() 37 | if err != nil { 38 | c.Error(err) 39 | } 40 | 41 | if ct.Val() != start+5 { 42 | c.Errorf("Expected value %d; got %d", start+5, ct.Val()) 43 | } 44 | 45 | nct, err := s.cl.Bucket("testbucket").GetCounter("test-counter") 46 | if err != nil { 47 | c.Fatal(err) 48 | } 49 | 50 | if nct.Val() != start+5 { 51 | c.Errorf("Expected value %d; got %d", start+5, nct.Val()) 52 | } 53 | 54 | err = ct.Destroy() 55 | if err != nil { 56 | c.Error(err) 57 | } 58 | 59 | nct, err = s.cl.Bucket("testbucket").GetCounter("test-counter") 60 | if err != ErrNotFound { 61 | c.Errorf("Expected ErrNotFound (%q); got %q", ErrNotFound, err) 62 | } 63 | 64 | s.runtime += time.Since(startt) 65 | } 66 | -------------------------------------------------------------------------------- /delete.go: -------------------------------------------------------------------------------- 1 | package rkive 2 | 3 | import ( 4 | "github.com/philhofer/rkive/rpbc" 5 | ) 6 | 7 | // DelOpts are options available on delete 8 | // operations. All values are optional. 9 | type DelOpts struct { 10 | R *uint32 // required reads 11 | W *uint32 // required writes 12 | PR *uint32 // required primary node reads 13 | PW *uint32 // required primary node writes 14 | RW *uint32 // required replica deletions 15 | DW *uint32 // required durable (to disk) writes 16 | } 17 | 18 | func parseDelOpts(opts *DelOpts, req *rpbc.RpbDelReq) { 19 | if opts == nil || req == nil { 20 | return 21 | } 22 | if opts.W != nil { 23 | req.W = opts.W 24 | } 25 | if opts.DW != nil { 26 | req.Dw = opts.DW 27 | } 28 | if opts.PW != nil { 29 | req.Pw = opts.PW 30 | } 31 | if opts.R != nil { 32 | req.R = opts.R 33 | } 34 | if opts.PR != nil { 35 | req.Pr = opts.R 36 | } 37 | if opts.RW != nil { 38 | req.Rw = opts.RW 39 | } 40 | 41 | } 42 | 43 | func (c *Client) Delete(o Object, opts *DelOpts) error { 44 | if o.Info().bucket == nil || o.Info().key == nil { 45 | return ErrNoPath 46 | } 47 | req := &rpbc.RpbDelReq{ 48 | Bucket: o.Info().bucket, 49 | Key: o.Info().key, 50 | Vclock: o.Info().vclock, 51 | } 52 | 53 | parseDelOpts(opts, req) 54 | 55 | _, err := c.req(req, 13, nil) 56 | return err 57 | } 58 | -------------------------------------------------------------------------------- /delete_test.go: -------------------------------------------------------------------------------- 1 | // +build riak 2 | 3 | package rkive 4 | 5 | import ( 6 | check "gopkg.in/check.v1" 7 | "time" 8 | ) 9 | 10 | func (s *riakSuite) TestDelete(c *check.C) { 11 | startt := time.Now() 12 | ob := &TestObject{ 13 | Data: []byte("Blah."), 14 | } 15 | 16 | err := s.cl.New(ob, "testbucket", nil, nil) 17 | if err != nil { 18 | c.Fatal(err) 19 | } 20 | 21 | err = s.cl.Delete(ob, nil) 22 | if err != nil { 23 | c.Fatal(err) 24 | } 25 | 26 | err = s.cl.Fetch(ob, ob.Info().Bucket(), ob.Info().Key(), nil) 27 | if err != ErrNotFound { 28 | c.Fatalf("Expected ErrNotFound; got %s", err) 29 | } 30 | 31 | s.runtime += time.Since(startt) 32 | } 33 | -------------------------------------------------------------------------------- /fetch.go: -------------------------------------------------------------------------------- 1 | package rkive 2 | 3 | import ( 4 | "errors" 5 | "github.com/philhofer/rkive/rpbc" 6 | "sync" 7 | ) 8 | 9 | const ( 10 | DefaultReqTimeout = 500 11 | ) 12 | 13 | var ( 14 | // ErrUnexpectedResponse is returned when riak returns the wrong 15 | // message type 16 | ErrUnexpectedResponse = errors.New("unexpected response") 17 | 18 | // ErrNotFound is returned when 19 | // no objects are returned for 20 | // a read operation 21 | ErrNotFound = errors.New("not found") 22 | 23 | // ErrDeleted is returned 24 | // when the object has been marked 25 | // as deleted, but has not yet been reaped 26 | ErrDeleted = errors.New("object deleted") 27 | 28 | // default timeout on a request is 500ms 29 | dfltreq uint32 = DefaultReqTimeout 30 | 31 | // RpbGetResponse pool 32 | gresPool *sync.Pool 33 | ) 34 | 35 | func init() { 36 | gresPool = new(sync.Pool) 37 | gresPool.New = func() interface{} { return &rpbc.RpbGetResp{} } 38 | } 39 | 40 | // pop response from cache 41 | func gresPop() *rpbc.RpbGetResp { 42 | return gresPool.Get().(*rpbc.RpbGetResp) 43 | } 44 | 45 | // push response to cache 46 | func gresPush(r *rpbc.RpbGetResp) { 47 | r.Content = r.Content[0:0] 48 | r.Vclock = r.Vclock[0:0] 49 | r.Unchanged = nil 50 | gresPool.Put(r) 51 | } 52 | 53 | // ReadOpts are read options 54 | // that can be specified when 55 | // doing a read operation. All 56 | // of these default to the default 57 | // bucket properties. 58 | type ReadOpts struct { 59 | R *uint32 // number of reads 60 | Pr *uint32 // number of primary replica reads 61 | BasicQuorum *bool // basic quorum required 62 | SloppyQuorum *bool // sloppy quorum required 63 | NotfoundOk *bool // treat not-found as a read for 'R' 64 | NVal *uint32 // 'n_val' 65 | } 66 | 67 | // parse read options 68 | func parseROpts(req *rpbc.RpbGetReq, opts *ReadOpts) { 69 | if opts != nil { 70 | if opts.R != nil { 71 | req.R = opts.R 72 | } 73 | if opts.Pr != nil { 74 | req.Pr = opts.Pr 75 | } 76 | if opts.BasicQuorum != nil { 77 | req.BasicQuorum = opts.BasicQuorum 78 | } 79 | if opts.NVal != nil { 80 | req.NVal = opts.NVal 81 | } 82 | if opts.NotfoundOk != nil { 83 | req.NotfoundOk = opts.NotfoundOk 84 | } 85 | } 86 | } 87 | 88 | // Fetch puts whatever exists at the provided bucket+key 89 | // into the provided Object. It has undefined behavior 90 | // if the object supplied does not know how to unmarshal 91 | // the bytes returned from riak. 92 | func (c *Client) Fetch(o Object, bucket string, key string, opts *ReadOpts) error { 93 | // make request object 94 | req := &rpbc.RpbGetReq{ 95 | Bucket: []byte(bucket), 96 | Key: []byte(key), 97 | } 98 | // set 500ms reqeust timeout 99 | req.Timeout = &dfltreq 100 | // get opts 101 | parseROpts(req, opts) 102 | 103 | res := gresPop() 104 | rescode, err := c.req(req, 9, res) 105 | if err != nil { 106 | return err 107 | } 108 | if rescode != 10 { 109 | return ErrUnexpectedResponse 110 | } 111 | // this *should* be handled by req(), 112 | // but just in case: 113 | if len(res.GetContent()) == 0 { 114 | return ErrNotFound 115 | } 116 | if len(res.GetContent()) > 1 { 117 | // merge objects; repair happens 118 | // on write to prevent sibling 119 | // explosion 120 | if om, ok := o.(ObjectM); ok { 121 | om.Info().key = append(om.Info().key[0:0], req.Key...) 122 | om.Info().bucket = append(om.Info().bucket[0:0], req.Bucket...) 123 | om.Info().vclock = append(om.Info().vclock[0:0], res.Vclock...) 124 | return handleMerge(om, res.Content) 125 | } else { 126 | return handleMultiple(len(res.Content), key, bucket) 127 | } 128 | } 129 | err = readContent(o, res.Content[0]) 130 | o.Info().key = append(o.Info().key[0:0], req.Key...) 131 | o.Info().bucket = append(o.Info().bucket[0:0], req.Bucket...) 132 | o.Info().vclock = append(o.Info().vclock[0:0], res.Vclock...) 133 | gresPush(res) 134 | return err 135 | } 136 | 137 | // Update conditionally fetches the object in question 138 | // based on whether or not it has been modified in the database. 139 | // If the object has been changed, the object will be modified 140 | // and Update() will return true. (The object must have a well-defined) 141 | // key, bucket, and vclock.) 142 | func (c *Client) Update(o Object, opts *ReadOpts) (bool, error) { 143 | if len(o.Info().key) == 0 { 144 | return false, ErrNoPath 145 | } 146 | req := &rpbc.RpbGetReq{ 147 | Bucket: o.Info().bucket, 148 | Key: o.Info().key, 149 | Timeout: &dfltreq, 150 | IfModified: o.Info().vclock, 151 | } 152 | 153 | parseROpts(req, opts) 154 | 155 | res := gresPop() 156 | rescode, err := c.req(req, 9, res) 157 | if err != nil { 158 | return false, err 159 | } 160 | if rescode != 10 { 161 | return false, ErrUnexpectedResponse 162 | } 163 | if res.Unchanged != nil && *res.Unchanged { 164 | return false, nil 165 | } 166 | if len(res.GetContent()) == 0 { 167 | return false, ErrNotFound 168 | } 169 | if len(res.GetContent()) > 1 { 170 | if om, ok := o.(ObjectM); ok { 171 | // like Fetch, we merge the results 172 | // here and hope for reconciliation 173 | // on write 174 | om.Info().vclock = append(o.Info().vclock[0:0], res.GetVclock()...) 175 | err = handleMerge(om, res.Content) 176 | return true, err 177 | } 178 | return false, handleMultiple(len(res.Content), o.Info().Key(), o.Info().Bucket()) 179 | } 180 | err = readContent(o, res.Content[0]) 181 | o.Info().vclock = append(o.Info().vclock[0:0], res.Vclock...) 182 | gresPush(res) 183 | return true, err 184 | } 185 | 186 | // FetchHead returns the head (*Info) of an object 187 | // stored in Riak. This is the least expensive way 188 | // to check for the existence of an object. 189 | func (c *Client) FetchHead(bucket string, key string) (*Info, error) { 190 | req := &rpbc.RpbGetReq{ 191 | Key: []byte(key), 192 | Bucket: []byte(bucket), 193 | Timeout: &dfltreq, 194 | Head: &ptrTrue, 195 | } 196 | res := gresPop() 197 | rescode, err := c.req(req, 9, res) 198 | if err != nil { 199 | gresPush(res) 200 | return nil, err 201 | } 202 | if rescode != 10 { 203 | gresPush(res) 204 | return nil, ErrUnexpectedResponse 205 | } 206 | // NotFound is supposed to be handled by 207 | // c.req, but just in case: 208 | if len(res.Content) == 0 { 209 | gresPush(res) 210 | return nil, ErrNotFound 211 | } 212 | if len(res.Content) > 1 { 213 | gresPush(res) 214 | return nil, handleMultiple(len(res.Content), key, bucket) 215 | } 216 | bl := &Blob{} 217 | readHeader(bl, res.Content[0]) 218 | bl.RiakInfo.vclock = append(bl.Info().vclock[0:0], res.Vclock...) 219 | bl.RiakInfo.key = append(bl.Info().key[0:0], req.Key...) 220 | bl.RiakInfo.bucket = append(bl.Info().bucket[0:0], req.Bucket...) 221 | gresPush(res) 222 | return bl.Info(), err 223 | } 224 | 225 | // PullHead pulls the latest object metadata into the object. 226 | // The Info() pointed to by the object will be changed if the 227 | // object has been changed in Riak since the last read. If you 228 | // want to read the entire object, use Update() instead. 229 | func (c *Client) PullHead(o Object) error { 230 | if len(o.Info().key) == 0 { 231 | return ErrNoPath 232 | } 233 | req := &rpbc.RpbGetReq{ 234 | Key: o.Info().key, 235 | Bucket: o.Info().bucket, 236 | Timeout: &dfltreq, 237 | Head: &ptrTrue, 238 | IfModified: o.Info().vclock, 239 | } 240 | res := gresPop() 241 | code, err := c.req(req, 9, res) 242 | if err != nil { 243 | gresPush(res) 244 | return err 245 | } 246 | if code != 10 { 247 | return ErrUnexpectedResponse 248 | } 249 | if res.GetUnchanged() { 250 | gresPush(res) 251 | return nil 252 | } 253 | if len(res.Content) == 0 { 254 | return ErrNotFound 255 | } 256 | if len(res.Content) > 1 { 257 | gresPush(res) 258 | return handleMultiple(len(res.Content), o.Info().Key(), o.Info().Bucket()) 259 | } 260 | readHeader(o, res.Content[0]) 261 | o.Info().vclock = append(o.Info().vclock[0:0], res.Vclock...) 262 | gresPush(res) 263 | return nil 264 | } 265 | -------------------------------------------------------------------------------- /fetch_test.go: -------------------------------------------------------------------------------- 1 | // +build riak 2 | 3 | package rkive 4 | 5 | import ( 6 | "bytes" 7 | "fmt" 8 | check "gopkg.in/check.v1" 9 | "os" 10 | "sync" 11 | "testing" 12 | "time" 13 | ) 14 | 15 | // timed suite - CAN ONLY USE 1 CONNECTION, otherwise timing is useless 16 | type riakSuite struct { 17 | cl *Client 18 | runtime time.Duration // needs to be incremented on tests 19 | } 20 | 21 | // untimed suite - can use many connections 22 | type riakAsync struct { 23 | cl *Client 24 | } 25 | 26 | func TestAll(t *testing.T) { 27 | check.Suite(&riakAsync{}) 28 | check.Suite(&riakSuite{}) 29 | check.TestingT(t) 30 | } 31 | 32 | func (s *riakAsync) SetUpSuite(c *check.C) { 33 | addr := os.Getenv("RIAK_PB_URL") 34 | if addr == "" { 35 | addr = "localhost:8087" 36 | } 37 | var err error 38 | s.cl, err = DialOne(addr, "testClient") 39 | if err != nil { 40 | fmt.Printf("Couldn't connect to Riak: %s\n", err) 41 | os.Exit(1) 42 | } 43 | err = s.cl.Ping() 44 | if err != nil { 45 | c.Fatalf("Error on ping: %s", err) 46 | } 47 | } 48 | 49 | func (s *riakSuite) SetUpSuite(c *check.C) { 50 | addr := os.Getenv("RIAK_PB_URL") 51 | if addr == "" { 52 | addr = "localhost:8087" 53 | } 54 | var err error 55 | s.cl, err = DialOne(addr, "testClient") 56 | if err != nil { 57 | fmt.Printf("Couldn't connect to Riak: %s\n", err) 58 | os.Exit(1) 59 | } 60 | err = s.cl.Ping() 61 | if err != nil { 62 | c.Fatalf("Error on ping: %s", err) 63 | } 64 | } 65 | 66 | func (s *riakAsync) TearDownSuite(c *check.C) { 67 | s.cl.Close() 68 | } 69 | 70 | func (s *riakSuite) TearDownSuite(c *check.C) { 71 | s.cl.Close() 72 | c.Log("------------ STATS -----------") 73 | c.Logf("Total elapsed time: %s", s.runtime) 74 | c.Logf("total iowait time: %s", time.Duration(s.cl.twait)) 75 | c.Logf("total client time: %s", s.runtime-time.Duration(s.cl.twait)) 76 | c.Logf("non-iowait %%: %.2f%%", 100*float64((s.runtime-time.Duration(s.cl.twait)))/float64(s.runtime)) 77 | c.Logf("request count: %d", s.cl.nwait) 78 | c.Logf("avg request time: %s", time.Duration(s.cl.AvgWait())) 79 | c.Logf("nowait rate: %d req/s", (uint64(time.Second))*(s.cl.nwait)/uint64(s.runtime-time.Duration(s.cl.twait))) 80 | c.Log("------------------------------") 81 | } 82 | 83 | type TestObject struct { 84 | info Info 85 | Data []byte 86 | } 87 | 88 | func (t *TestObject) Unmarshal(b []byte) error { 89 | t.Data = b 90 | return nil 91 | } 92 | 93 | func (t *TestObject) Marshal() ([]byte, error) { 94 | return t.Data, nil 95 | } 96 | 97 | func (t *TestObject) Info() *Info { return &t.info } 98 | 99 | func (t *TestObject) NewEmpty() Object { return &TestObject{} } 100 | 101 | // naive merge 102 | func (t *TestObject) Merge(o Object) { 103 | tn := o.(*TestObject) 104 | if len(tn.Data) > len(t.Data) { 105 | t.Data = tn.Data 106 | } 107 | } 108 | 109 | //func TestMultipleVclocks(t *testing.T) { 110 | func (s *riakSuite) TestMultipleVclocks(c *check.C) { 111 | startt := time.Now() 112 | travis := os.Getenv("TRAVIS") 113 | wercker := os.Getenv("WERCKER") 114 | if travis != "" || wercker != "" { 115 | c.Skip("The service doesn't have allow_mult set to true") 116 | } 117 | oba := &TestObject{ 118 | Data: []byte("Body 1"), 119 | } 120 | 121 | obb := &TestObject{ 122 | Data: []byte("Body 2..."), 123 | } 124 | 125 | // manually create conflict - a user can't ordinarily do this 126 | oba.info.bucket, oba.info.key = []byte("testbucket"), []byte("conflict") 127 | obb.info.bucket, obb.info.key = []byte("testbucket"), []byte("conflict") 128 | 129 | // The store operations should not error, 130 | // because we are doing a fetch and merge 131 | // when we detect multiple responses on 132 | // Store() 133 | err := s.cl.Store(obb, nil) 134 | if err != nil { 135 | c.Fatal(err) 136 | } 137 | err = s.cl.Store(oba, nil) 138 | if err != nil { 139 | c.Fatal(err) 140 | } 141 | 142 | // Since our Merge() function takes the longer of the 143 | // two Data fields, the body should always be "Body 2..." 144 | err = s.cl.Fetch(oba, "testbucket", "conflict", nil) 145 | if err != nil { 146 | c.Fatal(err) 147 | } 148 | 149 | if !bytes.Equal(oba.Data, []byte("Body 2...")) { 150 | c.Errorf("Data should be %q; got %q", "Body 2...", oba.Data) 151 | } 152 | s.runtime += time.Since(startt) 153 | } 154 | 155 | func (s *riakSuite) TestFetchNotFound(c *check.C) { 156 | startt := time.Now() 157 | ob := &TestObject{} 158 | 159 | err := s.cl.Fetch(ob, "anybucket", "dne", nil) 160 | if err == nil { 161 | c.Error("'err' should not be nil") 162 | } 163 | if err != ErrNotFound { 164 | c.Errorf("err is not ErrNotFound: %q", err) 165 | } 166 | s.runtime += time.Since(startt) 167 | } 168 | 169 | func (s *riakSuite) TestUpdate(c *check.C) { 170 | startt := time.Now() 171 | test := s.cl.Bucket("testbucket") 172 | 173 | lb := &TestObject{ 174 | Data: []byte("flibbertyibbitygibbit"), 175 | } 176 | 177 | err := test.New(lb, nil) 178 | if err != nil { 179 | c.Fatal(err) 180 | } 181 | 182 | newlb := &TestObject{} 183 | 184 | err = test.Fetch(newlb, lb.Info().Key()) 185 | if err != nil { 186 | c.Fatal(err) 187 | } 188 | 189 | if !bytes.Equal(newlb.Data, lb.Data) { 190 | c.Logf("Object 1 data: %q", lb.Data) 191 | c.Logf("Object 2 data: %q", newlb.Data) 192 | c.Errorf("Objects don't have the same body") 193 | } 194 | 195 | // make a modification 196 | newlb.Data = []byte("new data.") 197 | err = test.Push(newlb) 198 | if err != nil { 199 | c.Fatal(err) 200 | } 201 | 202 | // this should return true 203 | upd, err := test.Update(lb) 204 | if err != nil { 205 | c.Fatal(err) 206 | } 207 | 208 | if !upd { 209 | c.Error("Object was not updated.") 210 | } 211 | 212 | if !bytes.Equal(lb.Data, newlb.Data) { 213 | c.Error("Objects are not equal after update.") 214 | } 215 | 216 | // this should return false 217 | upd, err = test.Update(newlb) 218 | if err != nil { 219 | c.Fatal(err) 220 | } 221 | 222 | if upd { 223 | c.Error("Object was spuriously updated...?") 224 | } 225 | s.runtime += time.Since(startt) 226 | } 227 | 228 | func (s *riakSuite) TestHead(c *check.C) { 229 | startt := time.Now() 230 | tests := s.cl.Bucket("testbucket") 231 | 232 | ob := &TestObject{ 233 | Data: []byte("exists."), 234 | } 235 | 236 | err := tests.New(ob, nil) 237 | if err != nil { 238 | c.Fatal(err) 239 | } 240 | 241 | // fetch head exists 242 | var info *Info 243 | info, err = s.cl.FetchHead("testbucket", ob.Info().Key()) 244 | if err != nil { 245 | c.Fatal(err) 246 | } 247 | 248 | if !bytes.Equal(info.vclock, ob.info.vclock) { 249 | c.Errorf("vclocks not equal: %q and %q", info.vclock, ob.info.vclock) 250 | } 251 | 252 | // fetch dne 253 | _, err = s.cl.FetchHead("testbucket", "dne") 254 | if err != ErrNotFound { 255 | c.Errorf("expected ErrNotFound, got: %q", err) 256 | } 257 | s.runtime += time.Since(startt) 258 | } 259 | 260 | func (s *riakAsync) TestGoFlood(c *check.C) { 261 | c.Skip("This isn't necessary unless the connection handler changes.") 262 | 263 | // flood with goroutines 264 | // to test the stability 265 | // of the connection cap 266 | 267 | ob := &TestObject{ 268 | Data: []byte("Here's a body."), 269 | } 270 | tests := s.cl.Bucket("testbucket") 271 | err := tests.New(ob, nil) 272 | if err != nil { 273 | c.Fatal(err) 274 | } 275 | 276 | key := ob.Info().Key() 277 | NGO := 200 278 | wg := new(sync.WaitGroup) 279 | lock := new(sync.Mutex) 280 | for i := 0; i < NGO; i++ { 281 | wg.Add(1) 282 | go func(key string, wg *sync.WaitGroup) { 283 | nob := &TestObject{} 284 | err := tests.Fetch(nob, key) 285 | if err != nil { 286 | lock.Lock() 287 | c.Error(err) 288 | lock.Unlock() 289 | } 290 | wg.Done() 291 | }(key, wg) 292 | } 293 | wg.Wait() 294 | } 295 | -------------------------------------------------------------------------------- /index.go: -------------------------------------------------------------------------------- 1 | package rkive 2 | 3 | import ( 4 | "bytes" 5 | "github.com/philhofer/rkive/rpbc" 6 | "io" 7 | "strconv" 8 | "sync" 9 | ) 10 | 11 | // IndexQueryRes is the response to a secondary index query. 12 | type IndexQueryRes struct { 13 | c *Client 14 | ftchd int 15 | bucket []byte 16 | keys [][]byte 17 | } 18 | 19 | // Contains returns whether or not the query 20 | // response contains this particular key 21 | func (i *IndexQueryRes) Contains(key string) bool { 22 | kb := ustr(key) 23 | for _, kv := range i.keys { 24 | if bytes.Equal(kv, kb) { 25 | return true 26 | } 27 | } 28 | return false 29 | } 30 | 31 | // Len returns the number of keys in the response 32 | func (i *IndexQueryRes) Len() int { return len(i.keys) } 33 | 34 | // Keys returns the complete list of response keys 35 | func (i *IndexQueryRes) Keys() []string { 36 | out := make([]string, i.Len()) 37 | for i, kv := range i.keys { 38 | out[i] = string(kv) 39 | } 40 | return out 41 | } 42 | 43 | // Fetch fetches the next object in the query. Fetch 44 | // returns whether or not there are objects remaining 45 | // in the query result, and any error encountered in 46 | // fetching that object. 47 | func (i *IndexQueryRes) FetchNext(o Object) (done bool, err error) { 48 | if i.ftchd >= len(i.keys) { 49 | return true, io.EOF 50 | } 51 | 52 | err = i.c.Fetch(o, string(i.bucket), string(i.keys[i.ftchd]), nil) 53 | i.ftchd++ 54 | if i.ftchd == len(i.keys) { 55 | done = true 56 | } 57 | return 58 | } 59 | 60 | // Which searches within the query result for objects that satisfy 61 | // the given condition functions. 62 | func (i *IndexQueryRes) Which(o Object, conds ...func(Object) bool) ([]string, error) { 63 | var out []string 64 | bckt := string(i.bucket) 65 | search: 66 | for j := 0; j < i.Len(); j++ { 67 | key := string(i.keys[j]) 68 | err := i.c.Fetch(o, bckt, key, nil) 69 | if err != nil { 70 | return out, err 71 | } 72 | for _, cond := range conds { 73 | if !cond(o) { 74 | continue search 75 | } 76 | } 77 | out = append(out, key) 78 | } 79 | return out, nil 80 | } 81 | 82 | // AsyncFetch represents the output of an 83 | // asynchronous fetch operation. 'Value' is 84 | // never nil, but 'Error' may or may not be nil. 85 | // If 'Error' is non-nil, then 'Value' is usually 86 | // the zero value of the underlying object. 87 | type AsyncFetch struct { 88 | Value Object 89 | Error error 90 | } 91 | 92 | // FetchAsync returns a channel on which all of the objects 93 | // in the query are returned. 'procs' determines the 94 | // number of goroutines actively fetching. The channel will be closed once 95 | // all the objects have been returned. Objects are fetched 96 | // asynchronously. The (underlying) type of every object returned in each 97 | // AsyncFetch is the same as returned from o.NewEmpty(). 98 | func (i *IndexQueryRes) FetchAsync(o Duplicator, procs int) <-chan *AsyncFetch { 99 | nw := procs 100 | if i.Len() < nw || nw <= 0 { 101 | nw = i.Len() 102 | } 103 | // keys to fetch 104 | keys := make(chan string, nw) 105 | 106 | // responses from fetch 107 | outs := make(chan *AsyncFetch, i.Len()) 108 | 109 | // start 'nw' workers 110 | wg := new(sync.WaitGroup) 111 | for j := 0; j < nw; j++ { 112 | wg.Add(1) 113 | go func(ks chan string, outs chan *AsyncFetch, o Duplicator, wg *sync.WaitGroup) { 114 | for key := range ks { 115 | ob := o.NewEmpty() 116 | err := i.c.Fetch(ob, string(i.bucket), key, nil) 117 | outs <- &AsyncFetch{Value: ob, Error: err} 118 | } 119 | wg.Done() 120 | }(keys, outs, o, wg) 121 | 122 | } 123 | 124 | // close 'outs' when all 125 | // workers have exited. 126 | go func(wg *sync.WaitGroup, os chan *AsyncFetch) { 127 | wg.Wait() 128 | close(os) 129 | }(wg, outs) 130 | 131 | for _, key := range i.Keys() { 132 | keys <- key 133 | } 134 | close(keys) 135 | 136 | return outs 137 | } 138 | 139 | // IndexLookup returns the keys that match the index-value pair specified. You 140 | // can specify the maximum number of returned keys ('max'). Index queries are 141 | // performed in "streaming" mode. 142 | func (c *Client) IndexLookup(bucket string, index string, value string, max *int) (*IndexQueryRes, error) { 143 | bckt := []byte(bucket) 144 | idx := make([]byte, len(index)+4) 145 | copy(idx[0:], index) 146 | copy(idx[len(index):], []byte("_bin")) 147 | kv := []byte(value) 148 | var qtype rpbc.RpbIndexReq_IndexQueryType = 0 149 | req := &rpbc.RpbIndexReq{ 150 | Bucket: bckt, 151 | Index: idx, 152 | Key: kv, 153 | Qtype: &qtype, 154 | Stream: &ptrTrue, 155 | } 156 | 157 | if max != nil { 158 | mxr := uint32(*max) 159 | req.MaxResults = &mxr 160 | } 161 | 162 | queryres := &IndexQueryRes{ 163 | c: c, 164 | bucket: bckt, 165 | } 166 | 167 | res := &rpbc.RpbIndexResp{} 168 | 169 | // make a stream request 170 | stream, err := c.streamReq(req, 25) 171 | if err != nil { 172 | return nil, err 173 | } 174 | 175 | // Retrieve streaming responses 176 | done := false 177 | for !done { 178 | var code byte 179 | done, code, err = stream.unmarshal(res) 180 | if err != nil { 181 | return queryres, err 182 | } 183 | if code != 26 { 184 | return queryres, ErrUnexpectedResponse 185 | } 186 | 187 | queryres.keys = append(queryres.keys, res.Keys...) 188 | res.Reset() 189 | } 190 | return queryres, nil 191 | } 192 | 193 | // IndexRange returns the keys that match the index range query. You can specify 194 | // the maximum number of returned results ('max'). Index queries are performed in 195 | // "streaming" mode. 196 | func (c *Client) IndexRange(bucket string, index string, min int64, max int64, maxret *int) (*IndexQueryRes, error) { 197 | bckt := []byte(bucket) 198 | idx := make([]byte, len(index)+4) 199 | copy(idx[0:], index) 200 | copy(idx[len(index):], []byte("_int")) 201 | var qtype rpbc.RpbIndexReq_IndexQueryType = 1 202 | req := &rpbc.RpbIndexReq{ 203 | Bucket: bckt, 204 | Index: idx, 205 | Qtype: &qtype, 206 | Stream: &ptrTrue, 207 | RangeMin: strconv.AppendInt([]byte{}, min, 10), 208 | RangeMax: strconv.AppendInt([]byte{}, max, 10), 209 | } 210 | if maxret != nil { 211 | msr := uint32(*maxret) 212 | req.MaxResults = &msr 213 | } 214 | 215 | queryres := &IndexQueryRes{ 216 | bucket: bckt, 217 | } 218 | 219 | res := &rpbc.RpbIndexResp{} 220 | stream, err := c.streamReq(req, 25) 221 | if err != nil { 222 | return nil, err 223 | } 224 | 225 | done := false 226 | for !done { 227 | var code byte 228 | done, code, err = stream.unmarshal(res) 229 | if err != nil { 230 | return queryres, err 231 | } 232 | if code != 26 { 233 | return queryres, ErrUnexpectedResponse 234 | } 235 | queryres.keys = append(queryres.keys, res.Keys...) 236 | res.Reset() 237 | } 238 | return queryres, nil 239 | } 240 | -------------------------------------------------------------------------------- /index_test.go: -------------------------------------------------------------------------------- 1 | // +build riak 2 | 3 | package rkive 4 | 5 | import ( 6 | check "gopkg.in/check.v1" 7 | "time" 8 | ) 9 | 10 | func (s *riakSuite) TestIndexLookup(c *check.C) { 11 | startt := time.Now() 12 | ob := &TestObject{ 13 | Data: []byte("Hello world!"), 14 | } 15 | 16 | ob.Info().AddIndex("testIdx", "myValue") 17 | 18 | bucket := s.cl.Bucket("testbucket") 19 | 20 | err := bucket.New(ob, nil) 21 | if err != nil { 22 | c.Fatal(err) 23 | } 24 | 25 | res, err := bucket.IndexLookup("testIdx", "myValue") 26 | if err != nil { 27 | c.Fatal(err) 28 | } 29 | 30 | if res.Len() < 1 { 31 | c.Fatalf("Expected multiple keys; got %d.", res.Len()) 32 | } 33 | if !res.Contains(ob.Info().Key()) { 34 | c.Errorf("Response doesn't contain original key...?") 35 | } 36 | 37 | hasCorrectIndex := func(o Object) bool { 38 | val := o.Info().GetIndex("testIdx") 39 | if val != "myValue" { 40 | c.Logf("Found incorrect: %v", o.Info().idxs) 41 | return false 42 | } 43 | return true 44 | } 45 | 46 | ncorrect, err := res.Which(ob, hasCorrectIndex) 47 | if err != nil { 48 | c.Fatal(err) 49 | } 50 | if len(ncorrect) != res.Len() { 51 | c.Errorf("Ncorrect is %d; response length is %d", len(ncorrect), res.Len()) 52 | } 53 | 54 | c.Logf("Found %d keys.", res.Len()) 55 | s.runtime += time.Since(startt) 56 | } 57 | 58 | func (s *riakAsync) TestFetchAsync(c *check.C) { 59 | ob := &TestObject{ 60 | Data: []byte("Hello world!"), 61 | } 62 | 63 | ob.Info().AddIndex("testIdx", "myValue") 64 | 65 | bucket := s.cl.Bucket("testbucket") 66 | 67 | err := bucket.New(ob, nil) 68 | if err != nil { 69 | c.Fatal(err) 70 | } 71 | 72 | res, err := bucket.IndexLookup("testIdx", "myValue") 73 | if err != nil { 74 | c.Fatal(err) 75 | } 76 | 77 | async := res.FetchAsync(ob, 4) 78 | count := 0 79 | ts := time.Now() 80 | for fres := range async { 81 | count++ 82 | if fres.Error != nil { 83 | c.Errorf("received error %q", err) 84 | } 85 | if fres.Value == nil { 86 | c.Error("received nil value") 87 | } 88 | if val, ok := fres.Value.(*TestObject); !ok { 89 | c.Error("value cannot be type-asserted to *TestObject") 90 | } else { 91 | if val.Info().GetIndex("testIdx") != "myValue" { 92 | c.Errorf("Expected %q; got %q", "myValue", val.Info().GetIndex("testIdx")) 93 | } 94 | } 95 | } 96 | elapsed := time.Since(ts) 97 | c.Logf("AsyncFetch: %d fetches in %s", count, elapsed) 98 | if count != res.Len() { 99 | c.Errorf("Expected %d responses; got %d", res.Len(), count) 100 | } 101 | 102 | } 103 | 104 | func (s *riakSuite) TestIndexRange(c *check.C) { 105 | startt := time.Now() 106 | ob := &TestObject{ 107 | Data: []byte("Hello world!"), 108 | } 109 | 110 | ob.Info().AddIndexInt("testNum", 35) 111 | 112 | err := s.cl.New(ob, "testbucket", nil, nil) 113 | if err != nil { 114 | c.Fatal(err) 115 | } 116 | 117 | res, err := s.cl.Bucket("testbucket").IndexRange("testNum", 30, 40) 118 | if err != nil { 119 | c.Fatal(err) 120 | } 121 | 122 | if res.Len() < 1 { 123 | c.Fatalf("Expected multiple keys; got %d", res.Len()) 124 | } 125 | 126 | if !res.Contains(ob.Info().Key()) { 127 | c.Errorf("Response doesn't contain original key...?") 128 | } 129 | c.Logf("Found %d keys.", res.Len()) 130 | s.runtime += time.Since(startt) 131 | } 132 | -------------------------------------------------------------------------------- /object.go: -------------------------------------------------------------------------------- 1 | package rkive 2 | 3 | import ( 4 | "bytes" 5 | "github.com/philhofer/rkive/rpbc" 6 | "strconv" 7 | "unsafe" 8 | ) 9 | 10 | // unsafe string-to-byte 11 | // only use this when 's' has the same scope 12 | // as the returned byte slice, and there are guarantees 13 | // that the slice will not be mutated. 14 | func ustr(s string) []byte { return *(*[]byte)(unsafe.Pointer(&s)) } 15 | 16 | // Object is the interface that must 17 | // be satisfied in order to fetch or 18 | // store an object in Riak. 19 | type Object interface { 20 | // Objects must maintain 21 | // a reference to an Info 22 | // struct, which contains 23 | // this object's riak 24 | // metadata. Info() must 25 | // never return nil, or it 26 | // will cause a panic. 27 | Info() *Info 28 | 29 | // Marshal should return the encoded 30 | // value of the object, and any 31 | // relevant errors. 32 | Marshal() ([]byte, error) 33 | 34 | // Unmarshal should unmarshal the object 35 | // from a []byte. It can safely use 36 | // zero-copy methods, as the byte slice 37 | // passed to it will "belong" to the 38 | // object. 39 | Unmarshal([]byte) error 40 | } 41 | 42 | // Duplicator types know how to return 43 | // an empty copy of themselves, on top of 44 | // fulfilling the Object interface. 45 | type Duplicator interface { 46 | Object 47 | // Empty should return an initialized 48 | // (zero-value) object of the same underlying 49 | // type as the parent. 50 | NewEmpty() Object 51 | } 52 | 53 | // ObjectM is an object that also knows how to merge 54 | // itself with siblings. If an object has this interface 55 | // defined, this package will use the Merge method to transparently 56 | // handle siblings returned from Riak. 57 | type ObjectM interface { 58 | Duplicator 59 | 60 | // Merge should merge the argument object into the method receiver. It 61 | // is safe to type-assert the argument of Merge to the same type 62 | // as the type of the object satisfying the inteface. (Under the hood, 63 | // the argument passed to Merge is simply the value of NewEmpty() after 64 | // data has been read into it.) Merge is used to iteratively merge many sibling objects. 65 | Merge(o Object) 66 | } 67 | 68 | // sibling merge - object should be Store()d after call 69 | func handleMerge(om ObjectM, ct []*rpbc.RpbContent) error { 70 | var err error 71 | for i, ctt := range ct { 72 | if i == 0 { 73 | err = readContent(om, ctt) 74 | if err != nil { 75 | return err 76 | } 77 | continue 78 | } 79 | 80 | // read into new empty 81 | nom := om.NewEmpty() 82 | err = readContent(nom, ctt) 83 | nom.Info().vclock = append(nom.Info().vclock[0:0], ctt.Vtag...) 84 | if err != nil { 85 | return err 86 | } 87 | om.Merge(nom) 88 | 89 | // transfer vclocks if we didn't have one before 90 | if len(om.Info().vclock) == 0 && len(nom.Info().vclock) > 0 { 91 | om.Info().vclock = append(om.Info().vclock, nom.Info().vclock...) 92 | } 93 | } 94 | return nil 95 | } 96 | 97 | // Info contains information 98 | // about a specific Riak object. You can use 99 | // it to satisfy the Object interface. 100 | // Info's zero value (Info{}) is valid. 101 | // You can use the Info object to add 102 | // links, seconary indexes, and user metadata 103 | // to the object referencing this Info object. 104 | // Calls to Fetch(), Push(), Store(), New(), 105 | // etc. will changes the contents of this struct. 106 | type Info struct { 107 | key []byte // key 108 | bucket []byte // bucket 109 | links []*rpbc.RpbLink // Links 110 | idxs []*rpbc.RpbPair // Indexes 111 | meta []*rpbc.RpbPair // Meta 112 | ctype []byte // Content-Type 113 | vclock []byte // Vclock 114 | } 115 | 116 | func readHeader(o Object, ctnt *rpbc.RpbContent) { 117 | o.Info().ctype = append(o.Info().ctype[0:0], ctnt.ContentType...) 118 | o.Info().links = append(o.Info().links[0:0], ctnt.Links...) 119 | o.Info().idxs = append(o.Info().idxs[0:0], ctnt.Indexes...) 120 | o.Info().meta = append(o.Info().meta[0:0], ctnt.Usermeta...) 121 | } 122 | 123 | // read into 'o' from content 124 | func readContent(o Object, ctnt *rpbc.RpbContent) error { 125 | if ctnt.GetDeleted() { 126 | return ErrDeleted 127 | } 128 | readHeader(o, ctnt) 129 | return o.Unmarshal(ctnt.Value) 130 | } 131 | 132 | // write into content from 'o' 133 | func writeContent(o Object, ctnt *rpbc.RpbContent) error { 134 | var err error 135 | ctnt.Value, err = o.Marshal() 136 | if err != nil { 137 | return err 138 | } 139 | ctnt.ContentType = append(ctnt.ContentType[0:0], o.Info().ctype...) 140 | ctnt.Links = append(ctnt.Links[0:0], o.Info().links...) 141 | ctnt.Usermeta = append(ctnt.Usermeta[0:0], o.Info().meta...) 142 | ctnt.Indexes = append(ctnt.Indexes[0:0], o.Info().idxs...) 143 | return nil 144 | } 145 | 146 | func set(l *[]*rpbc.RpbPair, key, value []byte) { 147 | if l == nil || len(*l) == 0 { 148 | goto add 149 | } 150 | for _, item := range *l { 151 | if bytes.Equal(key, item.Key) { 152 | item.Key = key 153 | item.Value = value 154 | return 155 | } 156 | } 157 | add: 158 | *l = append(*l, &rpbc.RpbPair{ 159 | Key: key, 160 | Value: value, 161 | }) 162 | return 163 | } 164 | 165 | func get(l *[]*rpbc.RpbPair, key []byte) []byte { 166 | if l == nil || len(*l) == 0 { 167 | return nil 168 | } 169 | for _, item := range *l { 170 | if bytes.Equal(key, item.Key) { 171 | return item.Value 172 | } 173 | } 174 | return nil 175 | } 176 | 177 | func add(l *[]*rpbc.RpbPair, key, value []byte) bool { 178 | if l == nil || len(*l) == 0 { 179 | goto add 180 | } 181 | for _, item := range *l { 182 | if bytes.Equal(key, item.Key) { 183 | if bytes.Equal(value, item.Value) { 184 | return true 185 | } 186 | return false 187 | } 188 | } 189 | add: 190 | *l = append(*l, &rpbc.RpbPair{ 191 | Key: key, 192 | Value: value, 193 | }) 194 | return true 195 | } 196 | 197 | func del(l *[]*rpbc.RpbPair, key []byte) { 198 | if l == nil || len(*l) == 0 { 199 | return 200 | } 201 | nl := len(*l) 202 | for i, item := range *l { 203 | if bytes.Equal(key, item.Key) { 204 | (*l)[i], (*l)[nl-1], *l = (*l)[nl-1], nil, (*l)[:nl-1] 205 | return 206 | } 207 | } 208 | } 209 | 210 | func all(l *[]*rpbc.RpbPair) [][2]string { 211 | nl := len(*l) 212 | if nl == 0 { 213 | return nil 214 | } 215 | out := make([][2]string, nl) 216 | for i, item := range *l { 217 | out[i] = [2]string{string(item.Key), string(item.Value)} 218 | } 219 | return out 220 | } 221 | 222 | // Key is the canonical riak key 223 | func (in *Info) Key() string { return string(in.key) } 224 | 225 | // Bucket is the canonical riak bucket 226 | func (in *Info) Bucket() string { return string(in.bucket) } 227 | 228 | // ContentType is the content-type 229 | func (in *Info) ContentType() string { return string(in.ctype) } 230 | 231 | // SetContentType sets the content-type 232 | // to 's'. 233 | func (in *Info) SetContentType(s string) { in.ctype = []byte(s) } 234 | 235 | // Vclock is the vector clock value as a string 236 | func (in *Info) Vclock() string { return string(in.vclock) } 237 | 238 | // format key as key_bin 239 | func fmtbin(key string) []byte { 240 | kl := len(key) 241 | kv := make([]byte, kl+4) 242 | copy(kv[0:], key) 243 | copy(kv[kl:], []byte("_bin")) 244 | kv = bytes.ToLower(kv) 245 | return kv 246 | } 247 | 248 | // format key as key_int 249 | func fmtint(key string) []byte { 250 | kl := len(key) 251 | kv := make([]byte, kl+4) 252 | copy(kv[0:], key) 253 | copy(kv[kl:], []byte("_int")) 254 | kv = bytes.ToLower(kv) 255 | return kv 256 | } 257 | 258 | // Add adds a key-value pair to an Indexes 259 | // object, but returns false if a key already 260 | // exists under that name and has a different value. 261 | // Returns true if the index already has this exact key-value 262 | // pair, or if the pair is written in with no conflicts. 263 | // (All XxxIndex operations append "_bin" to key values 264 | // internally in order to comply with the Riak secondary 265 | // index specification, so the user does not have to 266 | // include it.) 267 | func (in *Info) AddIndex(key string, value string) bool { 268 | return add(&in.idxs, fmtbin(key), []byte(value)) 269 | } 270 | 271 | // AddIndexInt sets an integer secondary index value 272 | // using the same conditional rules as AddIndex 273 | func (in *Info) AddIndexInt(key string, value int64) bool { 274 | return add(&in.idxs, fmtint(key), ustr(strconv.FormatInt(value, 10))) 275 | } 276 | 277 | // Set sets a key-value pair in an Indexes object 278 | func (in *Info) SetIndex(key string, value string) { 279 | set(&in.idxs, fmtbin(key), []byte(value)) 280 | } 281 | 282 | // SetIndexInt sets a integer secondary index value 283 | func (in *Info) SetIndexInt(key string, value int64) { 284 | set(&in.idxs, fmtint(key), ustr(strconv.FormatInt(value, 10))) 285 | } 286 | 287 | // Get gets a key-value pair in an indexes object 288 | func (in *Info) GetIndex(key string) (val string) { 289 | return string(get(&in.idxs, fmtbin(key))) 290 | } 291 | 292 | // GetIndexInt gets an integer index value 293 | func (in *Info) GetIndexInt(key string) *int64 { 294 | bts := get(&in.idxs, fmtint(key)) 295 | if bts == nil { 296 | return nil 297 | } 298 | val, _ := strconv.ParseInt(string(bts), 10, 64) 299 | return &val 300 | } 301 | 302 | // RemoveIndex removes a key from the object 303 | func (in *Info) RemoveIndex(key string) { 304 | del(&in.idxs, fmtbin(key)) 305 | } 306 | 307 | // RemoveIndexInt removes an integer index key 308 | // from an object 309 | func (in *Info) RemoveIndexInt(key string) { 310 | del(&in.idxs, fmtint(key)) 311 | } 312 | 313 | // Indexes returns a list of all of the 314 | // key-value pairs in this object. (Key first, 315 | // then value.) Note that string-string 316 | // indexes will have keys postfixed with 317 | // "_bin", and string-int indexes will 318 | // have keys postfixed with "_int", per the 319 | // Riak secondary index specification. 320 | func (in *Info) Indexes() [][2]string { 321 | return all(&in.idxs) 322 | } 323 | 324 | // AddMeta conditionally adds a key-value pair 325 | // if it didn't exist already 326 | func (in *Info) AddMeta(key string, value string) bool { 327 | return add(&in.meta, []byte(key), []byte(value)) 328 | } 329 | 330 | // SetMeta sets a key-value pair 331 | func (in *Info) SetMeta(key string, value string) { 332 | set(&in.meta, []byte(key), []byte(value)) 333 | } 334 | 335 | // GetMeta gets a meta value 336 | func (in *Info) GetMeta(key string) (val string) { 337 | return string(get(&in.meta, []byte(key))) 338 | } 339 | 340 | // RemoveMeta deletes the meta value 341 | // at a key 342 | func (in *Info) RemoveMeta(key string) { 343 | del(&in.meta, []byte(key)) 344 | } 345 | 346 | // Metas returns all of the metadata 347 | // key-value pairs. (Key first, then value.) 348 | func (in *Info) Metas() [][2]string { 349 | return all(&in.idxs) 350 | } 351 | 352 | // AddLink adds a link conditionally. It returns true 353 | // if the value was already set to this bucket-key pair, 354 | // or if no value existed at 'name'. It returns false otherwise. 355 | func (in *Info) AddLink(name string, bucket string, key string) bool { 356 | nm := []byte(name) 357 | 358 | // don't duplicate 359 | for _, link := range in.links { 360 | if bytes.Equal(nm, link.GetTag()) { 361 | return false 362 | } 363 | } 364 | 365 | in.links = append(in.links, &rpbc.RpbLink{ 366 | Bucket: []byte(bucket), 367 | Key: []byte(key), 368 | Tag: nm, 369 | }) 370 | return true 371 | } 372 | 373 | // SetLink sets a link for an object 374 | func (in *Info) SetLink(name string, bucket string, key string) { 375 | nm := []byte(name) 376 | for _, link := range in.links { 377 | if bytes.Equal(nm, link.GetTag()) { 378 | link.Bucket = []byte(bucket) 379 | link.Key = []byte(key) 380 | return 381 | } 382 | } 383 | in.links = append(in.links, &rpbc.RpbLink{ 384 | Bucket: []byte(bucket), 385 | Key: []byte(key), 386 | Tag: nm, 387 | }) 388 | return 389 | } 390 | 391 | // RemoveLink removes a link (if it exists) 392 | func (in *Info) RemoveLink(name string) { 393 | nm := []byte(name) 394 | nl := len(in.links) 395 | if nl == 0 { 396 | return 397 | } 398 | for i, link := range in.links { 399 | if bytes.Equal(nm, link.GetTag()) { 400 | // swap and don't preserve order 401 | in.links[i], in.links[nl-1], in.links = in.links[nl-1], nil, in.links[:nl-1] 402 | } 403 | } 404 | } 405 | 406 | // GetLink gets a link from the object 407 | func (in *Info) GetLink(name string) (bucket string, key string) { 408 | nm := []byte(name) 409 | 410 | for _, link := range in.links { 411 | if bytes.Equal(nm, link.GetTag()) { 412 | bucket = string(link.GetBucket()) 413 | key = string(link.GetKey()) 414 | return 415 | } 416 | } 417 | return 418 | } 419 | -------------------------------------------------------------------------------- /object_test.go: -------------------------------------------------------------------------------- 1 | package rkive 2 | 3 | import ( 4 | "testing" 5 | "unsafe" 6 | ) 7 | 8 | func TestClientAlignment(t *testing.T) { 9 | // we're doing atomic operations 10 | // on 'conns', 'inuse', and 'tag', so 11 | // let's keep them 8-byte aligned 12 | 13 | cl := Client{} 14 | 15 | t.Logf("Client alignment: %d", unsafe.Alignof(cl)) 16 | if (unsafe.Alignof(cl) % 8) != 0 { 17 | t.Errorf("Wanted 8-byte alignment; addr%8 = %d", unsafe.Alignof(cl)%8) 18 | } 19 | 20 | t.Logf("'conns' offset: %d", unsafe.Offsetof(cl.conns)) 21 | if (unsafe.Offsetof(cl.conns) % 8) != 0 { 22 | t.Errorf("Wanted 8-byte alignment; addr%8 = %d", unsafe.Offsetof(cl.conns)%8) 23 | } 24 | 25 | t.Logf("'inuse' offset: %d", unsafe.Offsetof(cl.inuse)) 26 | if (unsafe.Offsetof(cl.inuse) % 8) != 0 { 27 | t.Errorf("Wanted 8-byte alignment; addr%8 = %d", unsafe.Offsetof(cl.inuse)%8) 28 | } 29 | 30 | t.Logf("'tag' offset: %d", unsafe.Offsetof(cl.tag)) 31 | if (unsafe.Offsetof(cl.tag) % 8) != 0 { 32 | t.Errorf("Wanted 8-byte alignment; addr%8 = %d", unsafe.Offsetof(cl.tag)%8) 33 | } 34 | 35 | } 36 | 37 | func TestAddRemoveLink(t *testing.T) { 38 | info := Info{} 39 | 40 | info.AddLink("testlink", "testbucket", "k") 41 | 42 | bucket, key := info.GetLink("testlink") 43 | if bucket != "testbucket" || key != "k" { 44 | t.Errorf("Bucket: %q; key: %q", bucket, key) 45 | } 46 | 47 | info.RemoveLink("testlink") 48 | bucket, key = info.GetLink("testlink") 49 | if bucket != "" || key != "" { 50 | t.Errorf("Bucket: %q; key: %q", bucket, key) 51 | } 52 | 53 | info.AddLink("testlink", "testbucket", "k1") 54 | info.SetLink("testlink", "newbucket", "k2") 55 | 56 | bucket, key = info.GetLink("testlink") 57 | if bucket != "newbucket" || key != "k2" { 58 | t.Errorf("Bucket: %q; key: %q", bucket, key) 59 | } 60 | } 61 | 62 | func TestAddRemoveIndex(t *testing.T) { 63 | info := Info{} 64 | 65 | info.AddIndex("testidx", "blah") 66 | 67 | val := info.GetIndex("testidx") 68 | if val != "blah" { 69 | t.Errorf("Val: %q", val) 70 | t.Errorf("Indexes: %v", info.idxs) 71 | } 72 | 73 | info.SetIndex("testidx", "newblah") 74 | val = info.GetIndex("testidx") 75 | if val != "newblah" { 76 | t.Errorf("Val: %q", val) 77 | } 78 | 79 | info.RemoveIndex("testidx") 80 | val = info.GetIndex("testidx") 81 | if val != "" { 82 | t.Errorf("Val: %q", val) 83 | } 84 | 85 | info.AddIndexInt("myNum", 300) 86 | 87 | ival := info.GetIndexInt("myNum") 88 | if ival == nil || *ival != 300 { 89 | t.Errorf("Ival is %d; expected %d", *ival, 300) 90 | } 91 | 92 | info.SetIndexInt("myNum", -84) 93 | ival = info.GetIndexInt("myNum") 94 | if ival == nil || *ival != -84 { 95 | t.Errorf("Ival is %d; expected %d", *ival, -84) 96 | } 97 | 98 | info.RemoveIndexInt("myNum") 99 | ival = info.GetIndexInt("myNum") 100 | if ival != nil { 101 | t.Errorf("Expected nil; got %d", *ival) 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /rpbc/descriptor.proto: -------------------------------------------------------------------------------- 1 | // Protocol Buffers - Google's data interchange format 2 | // Copyright 2008 Google Inc. All rights reserved. 3 | // http://code.google.com/p/protobuf/ 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions are 7 | // met: 8 | // 9 | // * Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // * Redistributions in binary form must reproduce the above 12 | // copyright notice, this list of conditions and the following disclaimer 13 | // in the documentation and/or other materials provided with the 14 | // distribution. 15 | // * Neither the name of Google Inc. nor the names of its 16 | // contributors may be used to endorse or promote products derived from 17 | // this software without specific prior written permission. 18 | // 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | // Author: kenton@google.com (Kenton Varda) 32 | // Based on original Protocol Buffers design by 33 | // Sanjay Ghemawat, Jeff Dean, and others. 34 | // 35 | // The messages in this file describe the definitions found in .proto files. 36 | // A valid .proto file can be translated directly to a FileDescriptorProto 37 | // without any other information (e.g. without reading its imports). 38 | 39 | 40 | 41 | package google.protobuf; 42 | option java_package = "com.google.protobuf"; 43 | option java_outer_classname = "DescriptorProtos"; 44 | 45 | // descriptor.proto must be optimized for speed because reflection-based 46 | // algorithms don't work during bootstrapping. 47 | option optimize_for = SPEED; 48 | 49 | // The protocol compiler can output a FileDescriptorSet containing the .proto 50 | // files it parses. 51 | message FileDescriptorSet { 52 | repeated FileDescriptorProto file = 1; 53 | } 54 | 55 | // Describes a complete .proto file. 56 | message FileDescriptorProto { 57 | optional string name = 1; // file name, relative to root of source tree 58 | optional string package = 2; // e.g. "foo", "foo.bar", etc. 59 | 60 | // Names of files imported by this file. 61 | repeated string dependency = 3; 62 | // Indexes of the public imported files in the dependency list above. 63 | repeated int32 public_dependency = 10; 64 | // Indexes of the weak imported files in the dependency list. 65 | // For Google-internal migration only. Do not use. 66 | repeated int32 weak_dependency = 11; 67 | 68 | // All top-level definitions in this file. 69 | repeated DescriptorProto message_type = 4; 70 | repeated EnumDescriptorProto enum_type = 5; 71 | repeated ServiceDescriptorProto service = 6; 72 | repeated FieldDescriptorProto extension = 7; 73 | 74 | optional FileOptions options = 8; 75 | 76 | // This field contains optional information about the original source code. 77 | // You may safely remove this entire field whithout harming runtime 78 | // functionality of the descriptors -- the information is needed only by 79 | // development tools. 80 | optional SourceCodeInfo source_code_info = 9; 81 | } 82 | 83 | // Describes a message type. 84 | message DescriptorProto { 85 | optional string name = 1; 86 | 87 | repeated FieldDescriptorProto field = 2; 88 | repeated FieldDescriptorProto extension = 6; 89 | 90 | repeated DescriptorProto nested_type = 3; 91 | repeated EnumDescriptorProto enum_type = 4; 92 | 93 | message ExtensionRange { 94 | optional int32 start = 1; 95 | optional int32 end = 2; 96 | } 97 | repeated ExtensionRange extension_range = 5; 98 | 99 | optional MessageOptions options = 7; 100 | } 101 | 102 | // Describes a field within a message. 103 | message FieldDescriptorProto { 104 | enum Type { 105 | // 0 is reserved for errors. 106 | // Order is weird for historical reasons. 107 | TYPE_DOUBLE = 1; 108 | TYPE_FLOAT = 2; 109 | // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if 110 | // negative values are likely. 111 | TYPE_INT64 = 3; 112 | TYPE_UINT64 = 4; 113 | // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if 114 | // negative values are likely. 115 | TYPE_INT32 = 5; 116 | TYPE_FIXED64 = 6; 117 | TYPE_FIXED32 = 7; 118 | TYPE_BOOL = 8; 119 | TYPE_STRING = 9; 120 | TYPE_GROUP = 10; // Tag-delimited aggregate. 121 | TYPE_MESSAGE = 11; // Length-delimited aggregate. 122 | 123 | // New in version 2. 124 | TYPE_BYTES = 12; 125 | TYPE_UINT32 = 13; 126 | TYPE_ENUM = 14; 127 | TYPE_SFIXED32 = 15; 128 | TYPE_SFIXED64 = 16; 129 | TYPE_SINT32 = 17; // Uses ZigZag encoding. 130 | TYPE_SINT64 = 18; // Uses ZigZag encoding. 131 | }; 132 | 133 | enum Label { 134 | // 0 is reserved for errors 135 | LABEL_OPTIONAL = 1; 136 | LABEL_REQUIRED = 2; 137 | LABEL_REPEATED = 3; 138 | // TODO(sanjay): Should we add LABEL_MAP? 139 | }; 140 | 141 | optional string name = 1; 142 | optional int32 number = 3; 143 | optional Label label = 4; 144 | 145 | // If type_name is set, this need not be set. If both this and type_name 146 | // are set, this must be either TYPE_ENUM or TYPE_MESSAGE. 147 | optional Type type = 5; 148 | 149 | // For message and enum types, this is the name of the type. If the name 150 | // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping 151 | // rules are used to find the type (i.e. first the nested types within this 152 | // message are searched, then within the parent, on up to the root 153 | // namespace). 154 | optional string type_name = 6; 155 | 156 | // For extensions, this is the name of the type being extended. It is 157 | // resolved in the same manner as type_name. 158 | optional string extendee = 2; 159 | 160 | // For numeric types, contains the original text representation of the value. 161 | // For booleans, "true" or "false". 162 | // For strings, contains the default text contents (not escaped in any way). 163 | // For bytes, contains the C escaped value. All bytes >= 128 are escaped. 164 | // TODO(kenton): Base-64 encode? 165 | optional string default_value = 7; 166 | 167 | optional FieldOptions options = 8; 168 | } 169 | 170 | // Describes an enum type. 171 | message EnumDescriptorProto { 172 | optional string name = 1; 173 | 174 | repeated EnumValueDescriptorProto value = 2; 175 | 176 | optional EnumOptions options = 3; 177 | } 178 | 179 | // Describes a value within an enum. 180 | message EnumValueDescriptorProto { 181 | optional string name = 1; 182 | optional int32 number = 2; 183 | 184 | optional EnumValueOptions options = 3; 185 | } 186 | 187 | // Describes a service. 188 | message ServiceDescriptorProto { 189 | optional string name = 1; 190 | repeated MethodDescriptorProto method = 2; 191 | 192 | optional ServiceOptions options = 3; 193 | } 194 | 195 | // Describes a method of a service. 196 | message MethodDescriptorProto { 197 | optional string name = 1; 198 | 199 | // Input and output type names. These are resolved in the same way as 200 | // FieldDescriptorProto.type_name, but must refer to a message type. 201 | optional string input_type = 2; 202 | optional string output_type = 3; 203 | 204 | optional MethodOptions options = 4; 205 | } 206 | 207 | 208 | // =================================================================== 209 | // Options 210 | 211 | // Each of the definitions above may have "options" attached. These are 212 | // just annotations which may cause code to be generated slightly differently 213 | // or may contain hints for code that manipulates protocol messages. 214 | // 215 | // Clients may define custom options as extensions of the *Options messages. 216 | // These extensions may not yet be known at parsing time, so the parser cannot 217 | // store the values in them. Instead it stores them in a field in the *Options 218 | // message called uninterpreted_option. This field must have the same name 219 | // across all *Options messages. We then use this field to populate the 220 | // extensions when we build a descriptor, at which point all protos have been 221 | // parsed and so all extensions are known. 222 | // 223 | // Extension numbers for custom options may be chosen as follows: 224 | // * For options which will only be used within a single application or 225 | // organization, or for experimental options, use field numbers 50000 226 | // through 99999. It is up to you to ensure that you do not use the 227 | // same number for multiple options. 228 | // * For options which will be published and used publicly by multiple 229 | // independent entities, e-mail protobuf-global-extension-registry@google.com 230 | // to reserve extension numbers. Simply provide your project name (e.g. 231 | // Object-C plugin) and your porject website (if available) -- there's no need 232 | // to explain how you intend to use them. Usually you only need one extension 233 | // number. You can declare multiple options with only one extension number by 234 | // putting them in a sub-message. See the Custom Options section of the docs 235 | // for examples: 236 | // http://code.google.com/apis/protocolbuffers/docs/proto.html#options 237 | // If this turns out to be popular, a web service will be set up 238 | // to automatically assign option numbers. 239 | 240 | 241 | message FileOptions { 242 | 243 | // Sets the Java package where classes generated from this .proto will be 244 | // placed. By default, the proto package is used, but this is often 245 | // inappropriate because proto packages do not normally start with backwards 246 | // domain names. 247 | optional string java_package = 1; 248 | 249 | 250 | // If set, all the classes from the .proto file are wrapped in a single 251 | // outer class with the given name. This applies to both Proto1 252 | // (equivalent to the old "--one_java_file" option) and Proto2 (where 253 | // a .proto always translates to a single class, but you may want to 254 | // explicitly choose the class name). 255 | optional string java_outer_classname = 8; 256 | 257 | // If set true, then the Java code generator will generate a separate .java 258 | // file for each top-level message, enum, and service defined in the .proto 259 | // file. Thus, these types will *not* be nested inside the outer class 260 | // named by java_outer_classname. However, the outer class will still be 261 | // generated to contain the file's getDescriptor() method as well as any 262 | // top-level extensions defined in the file. 263 | optional bool java_multiple_files = 10 [default=false]; 264 | 265 | // If set true, then the Java code generator will generate equals() and 266 | // hashCode() methods for all messages defined in the .proto file. This is 267 | // purely a speed optimization, as the AbstractMessage base class includes 268 | // reflection-based implementations of these methods. 269 | optional bool java_generate_equals_and_hash = 20 [default=false]; 270 | 271 | // Generated classes can be optimized for speed or code size. 272 | enum OptimizeMode { 273 | SPEED = 1; // Generate complete code for parsing, serialization, 274 | // etc. 275 | CODE_SIZE = 2; // Use ReflectionOps to implement these methods. 276 | LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. 277 | } 278 | optional OptimizeMode optimize_for = 9 [default=SPEED]; 279 | 280 | // Sets the Go package where structs generated from this .proto will be 281 | // placed. There is no default. 282 | optional string go_package = 11; 283 | 284 | 285 | 286 | // Should generic services be generated in each language? "Generic" services 287 | // are not specific to any particular RPC system. They are generated by the 288 | // main code generators in each language (without additional plugins). 289 | // Generic services were the only kind of service generation supported by 290 | // early versions of proto2. 291 | // 292 | // Generic services are now considered deprecated in favor of using plugins 293 | // that generate code specific to your particular RPC system. Therefore, 294 | // these default to false. Old code which depends on generic services should 295 | // explicitly set them to true. 296 | optional bool cc_generic_services = 16 [default=false]; 297 | optional bool java_generic_services = 17 [default=false]; 298 | optional bool py_generic_services = 18 [default=false]; 299 | 300 | // The parser stores options it doesn't recognize here. See above. 301 | repeated UninterpretedOption uninterpreted_option = 999; 302 | 303 | // Clients can define custom options in extensions of this message. See above. 304 | extensions 1000 to max; 305 | } 306 | 307 | message MessageOptions { 308 | // Set true to use the old proto1 MessageSet wire format for extensions. 309 | // This is provided for backwards-compatibility with the MessageSet wire 310 | // format. You should not use this for any other reason: It's less 311 | // efficient, has fewer features, and is more complicated. 312 | // 313 | // The message must be defined exactly as follows: 314 | // message Foo { 315 | // option message_set_wire_format = true; 316 | // extensions 4 to max; 317 | // } 318 | // Note that the message cannot have any defined fields; MessageSets only 319 | // have extensions. 320 | // 321 | // All extensions of your type must be singular messages; e.g. they cannot 322 | // be int32s, enums, or repeated messages. 323 | // 324 | // Because this is an option, the above two restrictions are not enforced by 325 | // the protocol compiler. 326 | optional bool message_set_wire_format = 1 [default=false]; 327 | 328 | // Disables the generation of the standard "descriptor()" accessor, which can 329 | // conflict with a field of the same name. This is meant to make migration 330 | // from proto1 easier; new code should avoid fields named "descriptor". 331 | optional bool no_standard_descriptor_accessor = 2 [default=false]; 332 | 333 | // The parser stores options it doesn't recognize here. See above. 334 | repeated UninterpretedOption uninterpreted_option = 999; 335 | 336 | // Clients can define custom options in extensions of this message. See above. 337 | extensions 1000 to max; 338 | } 339 | 340 | message FieldOptions { 341 | // The ctype option instructs the C++ code generator to use a different 342 | // representation of the field than it normally would. See the specific 343 | // options below. This option is not yet implemented in the open source 344 | // release -- sorry, we'll try to include it in a future version! 345 | optional CType ctype = 1 [default = STRING]; 346 | enum CType { 347 | // Default mode. 348 | STRING = 0; 349 | 350 | CORD = 1; 351 | 352 | STRING_PIECE = 2; 353 | } 354 | // The packed option can be enabled for repeated primitive fields to enable 355 | // a more efficient representation on the wire. Rather than repeatedly 356 | // writing the tag and type for each element, the entire array is encoded as 357 | // a single length-delimited blob. 358 | optional bool packed = 2; 359 | 360 | 361 | 362 | // Should this field be parsed lazily? Lazy applies only to message-type 363 | // fields. It means that when the outer message is initially parsed, the 364 | // inner message's contents will not be parsed but instead stored in encoded 365 | // form. The inner message will actually be parsed when it is first accessed. 366 | // 367 | // This is only a hint. Implementations are free to choose whether to use 368 | // eager or lazy parsing regardless of the value of this option. However, 369 | // setting this option true suggests that the protocol author believes that 370 | // using lazy parsing on this field is worth the additional bookkeeping 371 | // overhead typically needed to implement it. 372 | // 373 | // This option does not affect the public interface of any generated code; 374 | // all method signatures remain the same. Furthermore, thread-safety of the 375 | // interface is not affected by this option; const methods remain safe to 376 | // call from multiple threads concurrently, while non-const methods continue 377 | // to require exclusive access. 378 | // 379 | // 380 | // Note that implementations may choose not to check required fields within 381 | // a lazy sub-message. That is, calling IsInitialized() on the outher message 382 | // may return true even if the inner message has missing required fields. 383 | // This is necessary because otherwise the inner message would have to be 384 | // parsed in order to perform the check, defeating the purpose of lazy 385 | // parsing. An implementation which chooses not to check required fields 386 | // must be consistent about it. That is, for any particular sub-message, the 387 | // implementation must either *always* check its required fields, or *never* 388 | // check its required fields, regardless of whether or not the message has 389 | // been parsed. 390 | optional bool lazy = 5 [default=false]; 391 | 392 | // Is this field deprecated? 393 | // Depending on the target platform, this can emit Deprecated annotations 394 | // for accessors, or it will be completely ignored; in the very least, this 395 | // is a formalization for deprecating fields. 396 | optional bool deprecated = 3 [default=false]; 397 | 398 | // EXPERIMENTAL. DO NOT USE. 399 | // For "map" fields, the name of the field in the enclosed type that 400 | // is the key for this map. For example, suppose we have: 401 | // message Item { 402 | // required string name = 1; 403 | // required string value = 2; 404 | // } 405 | // message Config { 406 | // repeated Item items = 1 [experimental_map_key="name"]; 407 | // } 408 | // In this situation, the map key for Item will be set to "name". 409 | // TODO: Fully-implement this, then remove the "experimental_" prefix. 410 | optional string experimental_map_key = 9; 411 | 412 | // For Google-internal migration only. Do not use. 413 | optional bool weak = 10 [default=false]; 414 | 415 | // The parser stores options it doesn't recognize here. See above. 416 | repeated UninterpretedOption uninterpreted_option = 999; 417 | 418 | // Clients can define custom options in extensions of this message. See above. 419 | extensions 1000 to max; 420 | } 421 | 422 | message EnumOptions { 423 | 424 | // Set this option to false to disallow mapping different tag names to a same 425 | // value. 426 | optional bool allow_alias = 2 [default=true]; 427 | 428 | // The parser stores options it doesn't recognize here. See above. 429 | repeated UninterpretedOption uninterpreted_option = 999; 430 | 431 | // Clients can define custom options in extensions of this message. See above. 432 | extensions 1000 to max; 433 | } 434 | 435 | message EnumValueOptions { 436 | // The parser stores options it doesn't recognize here. See above. 437 | repeated UninterpretedOption uninterpreted_option = 999; 438 | 439 | // Clients can define custom options in extensions of this message. See above. 440 | extensions 1000 to max; 441 | } 442 | 443 | message ServiceOptions { 444 | 445 | // Note: Field numbers 1 through 32 are reserved for Google's internal RPC 446 | // framework. We apologize for hoarding these numbers to ourselves, but 447 | // we were already using them long before we decided to release Protocol 448 | // Buffers. 449 | 450 | // The parser stores options it doesn't recognize here. See above. 451 | repeated UninterpretedOption uninterpreted_option = 999; 452 | 453 | // Clients can define custom options in extensions of this message. See above. 454 | extensions 1000 to max; 455 | } 456 | 457 | message MethodOptions { 458 | 459 | // Note: Field numbers 1 through 32 are reserved for Google's internal RPC 460 | // framework. We apologize for hoarding these numbers to ourselves, but 461 | // we were already using them long before we decided to release Protocol 462 | // Buffers. 463 | 464 | // The parser stores options it doesn't recognize here. See above. 465 | repeated UninterpretedOption uninterpreted_option = 999; 466 | 467 | // Clients can define custom options in extensions of this message. See above. 468 | extensions 1000 to max; 469 | } 470 | 471 | 472 | // A message representing a option the parser does not recognize. This only 473 | // appears in options protos created by the compiler::Parser class. 474 | // DescriptorPool resolves these when building Descriptor objects. Therefore, 475 | // options protos in descriptor objects (e.g. returned by Descriptor::options(), 476 | // or produced by Descriptor::CopyTo()) will never have UninterpretedOptions 477 | // in them. 478 | message UninterpretedOption { 479 | // The name of the uninterpreted option. Each string represents a segment in 480 | // a dot-separated name. is_extension is true iff a segment represents an 481 | // extension (denoted with parentheses in options specs in .proto files). 482 | // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents 483 | // "foo.(bar.baz).qux". 484 | message NamePart { 485 | required string name_part = 1; 486 | required bool is_extension = 2; 487 | } 488 | repeated NamePart name = 2; 489 | 490 | // The value of the uninterpreted option, in whatever type the tokenizer 491 | // identified it as during parsing. Exactly one of these should be set. 492 | optional string identifier_value = 3; 493 | optional uint64 positive_int_value = 4; 494 | optional int64 negative_int_value = 5; 495 | optional double double_value = 6; 496 | optional bytes string_value = 7; 497 | optional string aggregate_value = 8; 498 | } 499 | 500 | // =================================================================== 501 | // Optional source code info 502 | 503 | // Encapsulates information about the original source file from which a 504 | // FileDescriptorProto was generated. 505 | message SourceCodeInfo { 506 | // A Location identifies a piece of source code in a .proto file which 507 | // corresponds to a particular definition. This information is intended 508 | // to be useful to IDEs, code indexers, documentation generators, and similar 509 | // tools. 510 | // 511 | // For example, say we have a file like: 512 | // message Foo { 513 | // optional string foo = 1; 514 | // } 515 | // Let's look at just the field definition: 516 | // optional string foo = 1; 517 | // ^ ^^ ^^ ^ ^^^ 518 | // a bc de f ghi 519 | // We have the following locations: 520 | // span path represents 521 | // [a,i) [ 4, 0, 2, 0 ] The whole field definition. 522 | // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). 523 | // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). 524 | // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). 525 | // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). 526 | // 527 | // Notes: 528 | // - A location may refer to a repeated field itself (i.e. not to any 529 | // particular index within it). This is used whenever a set of elements are 530 | // logically enclosed in a single code segment. For example, an entire 531 | // extend block (possibly containing multiple extension definitions) will 532 | // have an outer location whose path refers to the "extensions" repeated 533 | // field without an index. 534 | // - Multiple locations may have the same path. This happens when a single 535 | // logical declaration is spread out across multiple places. The most 536 | // obvious example is the "extend" block again -- there may be multiple 537 | // extend blocks in the same scope, each of which will have the same path. 538 | // - A location's span is not always a subset of its parent's span. For 539 | // example, the "extendee" of an extension declaration appears at the 540 | // beginning of the "extend" block and is shared by all extensions within 541 | // the block. 542 | // - Just because a location's span is a subset of some other location's span 543 | // does not mean that it is a descendent. For example, a "group" defines 544 | // both a type and a field in a single declaration. Thus, the locations 545 | // corresponding to the type and field and their components will overlap. 546 | // - Code which tries to interpret locations should probably be designed to 547 | // ignore those that it doesn't understand, as more types of locations could 548 | // be recorded in the future. 549 | repeated Location location = 1; 550 | message Location { 551 | // Identifies which part of the FileDescriptorProto was defined at this 552 | // location. 553 | // 554 | // Each element is a field number or an index. They form a path from 555 | // the root FileDescriptorProto to the place where the definition. For 556 | // example, this path: 557 | // [ 4, 3, 2, 7, 1 ] 558 | // refers to: 559 | // file.message_type(3) // 4, 3 560 | // .field(7) // 2, 7 561 | // .name() // 1 562 | // This is because FileDescriptorProto.message_type has field number 4: 563 | // repeated DescriptorProto message_type = 4; 564 | // and DescriptorProto.field has field number 2: 565 | // repeated FieldDescriptorProto field = 2; 566 | // and FieldDescriptorProto.name has field number 1: 567 | // optional string name = 1; 568 | // 569 | // Thus, the above path gives the location of a field name. If we removed 570 | // the last element: 571 | // [ 4, 3, 2, 7 ] 572 | // this path refers to the whole field declaration (from the beginning 573 | // of the label to the terminating semicolon). 574 | repeated int32 path = 1 [packed=true]; 575 | 576 | // Always has exactly three or four elements: start line, start column, 577 | // end line (optional, otherwise assumed same as start line), end column. 578 | // These are packed into a single field for efficiency. Note that line 579 | // and column numbers are zero-based -- typically you will want to add 580 | // 1 to each before displaying to a user. 581 | repeated int32 span = 2 [packed=true]; 582 | 583 | // If this SourceCodeInfo represents a complete declaration, these are any 584 | // comments appearing before and after the declaration which appear to be 585 | // attached to the declaration. 586 | // 587 | // A series of line comments appearing on consecutive lines, with no other 588 | // tokens appearing on those lines, will be treated as a single comment. 589 | // 590 | // Only the comment content is provided; comment markers (e.g. //) are 591 | // stripped out. For block comments, leading whitespace and an asterisk 592 | // will be stripped from the beginning of each line other than the first. 593 | // Newlines are included in the output. 594 | // 595 | // Examples: 596 | // 597 | // optional int32 foo = 1; // Comment attached to foo. 598 | // // Comment attached to bar. 599 | // optional int32 bar = 2; 600 | // 601 | // optional string baz = 3; 602 | // // Comment attached to baz. 603 | // // Another line attached to baz. 604 | // 605 | // // Comment attached to qux. 606 | // // 607 | // // Another line attached to qux. 608 | // optional double qux = 4; 609 | // 610 | // optional string corge = 5; 611 | // /* Block comment attached 612 | // * to corge. Leading asterisks 613 | // * will be removed. */ 614 | // /* Block comment attached to 615 | // * grault. */ 616 | // optional int32 grault = 6; 617 | optional string leading_comments = 3; 618 | optional string trailing_comments = 4; 619 | } 620 | } 621 | -------------------------------------------------------------------------------- /rpbc/gogo.proto: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. 2 | // http://code.google.com/p/gogoprotobuf/gogoproto 3 | // 4 | // Redistribution and use in source and binary forms, with or without 5 | // modification, are permitted provided that the following conditions are 6 | // met: 7 | // 8 | // * Redistributions of source code must retain the above copyright 9 | // notice, this list of conditions and the following disclaimer. 10 | // * Redistributions in binary form must reproduce the above 11 | // copyright notice, this list of conditions and the following disclaimer 12 | // in the documentation and/or other materials provided with the 13 | // distribution. 14 | // 15 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 18 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 19 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 20 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 21 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | 27 | package gogoproto; 28 | 29 | import "descriptor.proto"; 30 | 31 | extend google.protobuf.EnumOptions { 32 | optional bool goproto_enum_prefix = 62001; 33 | optional bool goproto_enum_stringer = 62021; 34 | optional bool enum_stringer = 62022; 35 | } 36 | 37 | extend google.protobuf.FileOptions { 38 | optional bool goproto_getters_all = 63001; 39 | optional bool goproto_enum_prefix_all = 63002; 40 | optional bool goproto_stringer_all = 63003; 41 | optional bool verbose_equal_all = 63004; 42 | optional bool face_all = 63005; 43 | optional bool gostring_all = 63006; 44 | optional bool populate_all = 63007; 45 | optional bool stringer_all = 63008; 46 | optional bool onlyone_all = 63009; 47 | 48 | optional bool equal_all = 63013; 49 | optional bool description_all = 63014; 50 | optional bool testgen_all = 63015; 51 | optional bool benchgen_all = 63016; 52 | optional bool marshaler_all = 63017; 53 | optional bool unmarshaler_all = 63018; 54 | optional bool bufferto_all = 63019; 55 | optional bool sizer_all = 63020; 56 | 57 | optional bool goproto_enum_stringer_all = 63021; 58 | optional bool enum_stringer_all = 63022; 59 | 60 | optional bool unsafe_marshaler_all = 63023; 61 | optional bool unsafe_unmarshaler_all = 63024; 62 | 63 | optional bool goproto_extensions_map_all = 63025; 64 | } 65 | 66 | extend google.protobuf.MessageOptions { 67 | optional bool goproto_getters = 64001; 68 | optional bool goproto_stringer = 64003; 69 | optional bool verbose_equal = 64004; 70 | optional bool face = 64005; 71 | optional bool gostring = 64006; 72 | optional bool populate = 64007; 73 | optional bool stringer = 67008; 74 | optional bool onlyone = 64009; 75 | 76 | optional bool equal = 64013; 77 | optional bool description = 64014; 78 | optional bool testgen = 64015; 79 | optional bool benchgen = 64016; 80 | optional bool marshaler = 64017; 81 | optional bool unmarshaler = 64018; 82 | optional bool bufferto = 64019; 83 | optional bool sizer = 64020; 84 | 85 | optional bool unsafe_marshaler = 64023; 86 | optional bool unsafe_unmarshaler = 64024; 87 | 88 | optional bool goproto_extensions_map = 64025; 89 | } 90 | 91 | extend google.protobuf.FieldOptions { 92 | optional bool nullable = 65001; 93 | optional bool embed = 65002; 94 | optional string customtype = 65003; 95 | optional string customname = 65004; 96 | } 97 | 98 | -------------------------------------------------------------------------------- /rpbc/riak.proto: -------------------------------------------------------------------------------- 1 | /* ------------------------------------------------------------------- 2 | ** 3 | ** riak.proto: Protocol buffers for Riak 4 | ** 5 | ** Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. 6 | ** 7 | ** This file is provided to you under the Apache License, 8 | ** Version 2.0 (the "License"); you may not use this file 9 | ** except in compliance with the License. You may obtain 10 | ** a copy of the License at 11 | ** 12 | ** http://www.apache.org/licenses/LICENSE-2.0 13 | ** 14 | ** Unless required by applicable law or agreed to in writing, 15 | ** software distributed under the License is distributed on an 16 | ** "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | ** KIND, either express or implied. See the License for the 18 | ** specific language governing permissions and limitations 19 | ** under the License. 20 | ** 21 | ** ------------------------------------------------------------------- 22 | */ 23 | 24 | /* 25 | ** Revision: 1.4 26 | */ 27 | 28 | package rpbc; 29 | 30 | import "gogo.proto"; 31 | 32 | option (gogoproto.unsafe_unmarshaler_all) = true; 33 | option (gogoproto.unsafe_marshaler_all) = true; 34 | option (gogoproto.equal_all) = true; 35 | option (gogoproto.testgen_all) = true; 36 | option (gogoproto.populate_all) = true; 37 | option (gogoproto.benchgen_all) = true; 38 | option (gogoproto.sizer_all) = true; 39 | option (gogoproto.goproto_stringer_all) = true; 40 | 41 | 42 | // Error response - may be generated for any Req 43 | message RpbErrorResp { 44 | required bytes errmsg = 1; 45 | required uint32 errcode = 2; 46 | } 47 | 48 | // Get server info request - no message defined, just send RpbGetServerInfoReq message code 49 | message RpbGetServerInfoResp { 50 | optional bytes node = 1; 51 | optional bytes server_version = 2; 52 | } 53 | 54 | // Key/value pair - used for user metadata, indexes, search doc fields 55 | message RpbPair { 56 | required bytes key = 1; 57 | optional bytes value = 2; 58 | } 59 | 60 | 61 | // Get bucket properties request 62 | message RpbGetBucketReq { 63 | required bytes bucket = 1; 64 | optional bytes type = 2; 65 | } 66 | 67 | // Get bucket properties response 68 | message RpbGetBucketResp { 69 | required RpbBucketProps props = 1; 70 | } 71 | 72 | // Set bucket properties request 73 | message RpbSetBucketReq { 74 | required bytes bucket = 1; 75 | required RpbBucketProps props = 2; 76 | optional bytes type = 3; 77 | } 78 | 79 | // Set bucket properties response - no message defined, just send 80 | // RpbSetBucketResp 81 | 82 | // Reset bucket properties request 83 | message RpbResetBucketReq { 84 | required bytes bucket = 1; 85 | optional bytes type = 2; 86 | } 87 | 88 | // Get bucket properties request 89 | message RpbGetBucketTypeReq { 90 | required bytes type = 1; 91 | } 92 | 93 | // Set bucket properties request 94 | message RpbSetBucketTypeReq { 95 | required bytes type = 1; 96 | required RpbBucketProps props = 2; 97 | } 98 | 99 | // Set bucket properties response - no message defined, just send 100 | // RpbSetBucketResp 101 | 102 | // Module-Function pairs for commit hooks and other bucket properties 103 | // that take functions 104 | message RpbModFun { 105 | required bytes module = 1; 106 | required bytes function = 2; 107 | } 108 | 109 | // A commit hook, which may either be a modfun or a JavaScript named 110 | // function 111 | message RpbCommitHook { 112 | optional RpbModFun modfun = 1; 113 | optional bytes name = 2; 114 | } 115 | 116 | // Bucket properties 117 | message RpbBucketProps { 118 | // Declared in riak_core_app 119 | optional uint32 n_val = 1; 120 | optional bool allow_mult = 2; 121 | optional bool last_write_wins = 3; 122 | repeated RpbCommitHook precommit = 4; 123 | optional bool has_precommit = 5 [default = false]; 124 | repeated RpbCommitHook postcommit = 6; 125 | optional bool has_postcommit = 7 [default = false]; 126 | optional RpbModFun chash_keyfun = 8; 127 | 128 | // Declared in riak_kv_app 129 | optional RpbModFun linkfun = 9; 130 | optional uint32 old_vclock = 10; 131 | optional uint32 young_vclock = 11; 132 | optional uint32 big_vclock = 12; 133 | optional uint32 small_vclock = 13; 134 | optional uint32 pr = 14; 135 | optional uint32 r = 15; 136 | optional uint32 w = 16; 137 | optional uint32 pw = 17; 138 | optional uint32 dw = 18; 139 | optional uint32 rw = 19; 140 | optional bool basic_quorum = 20; 141 | optional bool notfound_ok = 21; 142 | 143 | // Used by riak_kv_multi_backend 144 | optional bytes backend = 22; 145 | 146 | // Used by riak_search bucket fixup 147 | optional bool search = 23; 148 | 149 | // Used by riak_repl bucket fixup 150 | enum RpbReplMode { 151 | FALSE = 0; 152 | REALTIME = 1; 153 | FULLSYNC = 2; 154 | TRUE = 3; 155 | } 156 | optional RpbReplMode repl = 24; 157 | 158 | // Search index 159 | optional bytes search_index = 25; 160 | 161 | // KV Datatypes 162 | optional bytes datatype = 26; 163 | 164 | // KV strong consistency 165 | optional bool consistent = 27; 166 | } 167 | 168 | // Authentication request 169 | message RpbAuthReq { 170 | required bytes user = 1; 171 | required bytes password = 2; 172 | } 173 | -------------------------------------------------------------------------------- /rpbc/riak_dt.proto: -------------------------------------------------------------------------------- 1 | /* ------------------------------------------------------------------- 2 | ** 3 | ** riak_dt.proto: Protocol buffers for Riak data structures/types 4 | ** 5 | ** Copyright (c) 2013 Basho Technologies, Inc. All Rights Reserved. 6 | ** 7 | ** This file is provided to you under the Apache License, 8 | ** Version 2.0 (the "License"); you may not use this file 9 | ** except in compliance with the License. You may obtain 10 | ** a copy of the License at 11 | ** 12 | ** http://www.apache.org/licenses/LICENSE-2.0 13 | ** 14 | ** Unless required by applicable law or agreed to in writing, 15 | ** software distributed under the License is distributed on an 16 | ** "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | ** KIND, either express or implied. See the License for the 18 | ** specific language governing permissions and limitations 19 | ** under the License. 20 | ** 21 | ** ------------------------------------------------------------------- 22 | */ 23 | 24 | /* 25 | ** Revision: 2.0 26 | */ 27 | 28 | package rpbc; 29 | 30 | import "gogo.proto"; 31 | 32 | option (gogoproto.unsafe_unmarshaler_all) = true; 33 | option (gogoproto.unsafe_marshaler_all) = true; 34 | option (gogoproto.equal_all) = true; 35 | option (gogoproto.testgen_all) = true; 36 | option (gogoproto.populate_all) = true; 37 | option (gogoproto.benchgen_all) = true; 38 | option (gogoproto.sizer_all) = true; 39 | option (gogoproto.goproto_stringer_all) = true; 40 | 41 | 42 | import "riak.proto"; 43 | 44 | /* 45 | * =============== DATA STRUCTURES ================= 46 | */ 47 | 48 | /* 49 | * Field names in maps are composed of a binary identifier and a type. 50 | * This is so that two clients can create fields with the same name 51 | * but different types, and they converge independently. 52 | */ 53 | message MapField { 54 | /* 55 | * The types that can be stored in a map are limited to counters, 56 | * sets, registers, flags, and maps. 57 | */ 58 | enum MapFieldType { 59 | COUNTER = 1; 60 | SET = 2; 61 | REGISTER = 3; 62 | FLAG = 4; 63 | MAP = 5; 64 | } 65 | 66 | required bytes name = 1; 67 | required MapFieldType type = 2; 68 | } 69 | 70 | 71 | /* 72 | * An entry in a map is a pair of a field-name and value. The type 73 | * defined in the field determines which value type is expected. 74 | */ 75 | message MapEntry { 76 | required MapField field = 1; 77 | optional sint64 counter_value = 2; 78 | repeated bytes set_value = 3; 79 | optional bytes register_value = 4; 80 | optional bool flag_value = 5; 81 | repeated MapEntry map_value = 6; 82 | } 83 | 84 | /* 85 | * =============== FETCH ================= 86 | */ 87 | 88 | /* 89 | * The equivalent of KV's "RpbGetReq", results in a DtFetchResp. The 90 | * request-time options are limited to ones that are relevant to 91 | * structured data-types. 92 | */ 93 | message DtFetchReq { 94 | // The identifier: bucket, key and bucket-type 95 | required bytes bucket = 1; 96 | required bytes key = 2; 97 | required bytes type = 3; 98 | 99 | // Request options 100 | optional uint32 r = 4; 101 | optional uint32 pr = 5; 102 | optional bool basic_quorum = 6; 103 | optional bool notfound_ok = 7; 104 | optional uint32 timeout = 8; 105 | optional bool sloppy_quorum = 9; // Experimental, may change/disappear 106 | optional uint32 n_val = 10; // Experimental, may change/disappear 107 | 108 | // For read-only requests or context-free operations, you can set 109 | // this to false to reduce the size of the response payload. 110 | optional bool include_context = 11 [default=true]; 111 | } 112 | 113 | 114 | /* 115 | * The value of the fetched data type. If present in the response, 116 | * then empty values (sets, maps) should be treated as such. 117 | */ 118 | message DtValue { 119 | optional sint64 counter_value = 1; 120 | repeated bytes set_value = 2; 121 | repeated MapEntry map_value = 3; 122 | } 123 | 124 | 125 | /* 126 | * The response to a "Fetch" request. If the `include_context` option 127 | * is specified, an opaque "context" value will be returned along with 128 | * the user-friendly data. When sending an "Update" request, the 129 | * client should send this context as well, similar to how one would 130 | * send a vclock for KV updates. The `type` field indicates which 131 | * value type to expect. When the `value` field is missing from the 132 | * message, the client should interpret it as a "not found". 133 | */ 134 | message DtFetchResp { 135 | enum DataType { 136 | COUNTER = 1; 137 | SET = 2; 138 | MAP = 3; 139 | } 140 | 141 | optional bytes context = 1; 142 | required DataType type = 2; 143 | optional DtValue value = 3; 144 | } 145 | 146 | /* 147 | * =============== UPDATE ================= 148 | */ 149 | 150 | /* 151 | * An operation to update a Counter, either on its own or inside a 152 | * Map. The `increment` field can be positive or negative. When absent, 153 | * the meaning is an increment by 1. 154 | */ 155 | message CounterOp { 156 | optional sint64 increment = 1; 157 | } 158 | 159 | /* 160 | * An operation to update a Set, either on its own or inside a Map. 161 | * Set members are opaque binary values, you can only add or remove 162 | * them from a Set. 163 | */ 164 | message SetOp { 165 | repeated bytes adds = 1; 166 | repeated bytes removes = 2; 167 | } 168 | 169 | /* 170 | * An operation to be applied to a value stored in a Map -- the 171 | * contents of an UPDATE operation. The operation field that is 172 | * present depends on the type of the field to which it is applied. 173 | */ 174 | message MapUpdate { 175 | /* 176 | * Flags only exist inside Maps and can only be enabled or 177 | * disabled, and there are no arguments to the operations. 178 | */ 179 | enum FlagOp { 180 | ENABLE = 1; 181 | DISABLE = 2; 182 | } 183 | 184 | required MapField field = 1; 185 | 186 | optional CounterOp counter_op = 2; 187 | optional SetOp set_op = 3; 188 | 189 | /* 190 | * There is only one operation on a register, which is to set its 191 | * value, therefore the "operation" is the new value. 192 | */ 193 | optional bytes register_op = 4; 194 | optional FlagOp flag_op = 5; 195 | optional MapOp map_op = 6; 196 | } 197 | 198 | /* 199 | * An operation to update a Map. All operations apply to individual 200 | * fields in the Map. 201 | */ 202 | message MapOp { 203 | /* 204 | * REMOVE removes a field and value from the Map. 205 | * UPDATE applies type-specific 206 | * operations to the values stored in the Map. 207 | */ 208 | repeated MapField removes = 1; 209 | repeated MapUpdate updates = 2; 210 | } 211 | 212 | /* 213 | * A "union" type for update operations. The included operation 214 | * depends on the datatype being updated. 215 | */ 216 | message DtOp { 217 | optional CounterOp counter_op = 1; 218 | optional SetOp set_op = 2; 219 | optional MapOp map_op = 3; 220 | } 221 | 222 | /* 223 | * The equivalent of KV's "RpbPutReq", results in an empty response or 224 | * "DtUpdateResp" if `return_body` is specified, or the key is 225 | * assigned by the server. The request-time options are limited to 226 | * ones that are relevant to structured data-types. 227 | */ 228 | message DtUpdateReq { 229 | // The identifier 230 | required bytes bucket = 1; 231 | optional bytes key = 2; // missing key results in server-assigned key, like KV 232 | required bytes type = 3; // bucket type, not data-type (but the data-type is constrained per bucket-type) 233 | 234 | // Opaque update-context 235 | optional bytes context = 4; 236 | 237 | // The operations 238 | required DtOp op = 5; 239 | 240 | // Request options 241 | optional uint32 w = 6; 242 | optional uint32 dw = 7; 243 | optional uint32 pw = 8; 244 | optional bool return_body = 9 [default=false]; 245 | optional uint32 timeout = 10; 246 | optional bool sloppy_quorum = 11; // Experimental, may change/disappear 247 | optional uint32 n_val = 12; // Experimental, may change/disappear 248 | optional bool include_context = 13 [default=true]; // When return_body is true, should the context be returned too? 249 | } 250 | 251 | 252 | /* 253 | * The equivalent of KV's "RpbPutResp", contains the assigned key if 254 | * it was assigned by the server, and the resulting value and context 255 | * if return_body was set. 256 | */ 257 | message DtUpdateResp { 258 | // The key, if assigned by the server 259 | optional bytes key = 1; 260 | 261 | // The opaque update context and value, if return_body was set. 262 | optional bytes context = 2; 263 | optional sint64 counter_value = 3; 264 | repeated bytes set_value = 4; 265 | repeated MapEntry map_value = 5; 266 | } -------------------------------------------------------------------------------- /rpbc/riak_kv.proto: -------------------------------------------------------------------------------- 1 | /* ------------------------------------------------------------------- 2 | ** 3 | ** riak_kv.proto: Protocol buffers for riak KV 4 | ** 5 | ** Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. 6 | ** 7 | ** This file is provided to you under the Apache License, 8 | ** Version 2.0 (the "License"); you may not use this file 9 | ** except in compliance with the License. You may obtain 10 | ** a copy of the License at 11 | ** 12 | ** http://www.apache.org/licenses/LICENSE-2.0 13 | ** 14 | ** Unless required by applicable law or agreed to in writing, 15 | ** software distributed under the License is distributed on an 16 | ** "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | ** KIND, either express or implied. See the License for the 18 | ** specific language governing permissions and limitations 19 | ** under the License. 20 | ** 21 | ** ------------------------------------------------------------------- 22 | */ 23 | 24 | /* 25 | ** Revision: 1.4 26 | */ 27 | 28 | package rpbc; 29 | 30 | import "gogo.proto"; 31 | 32 | option (gogoproto.unsafe_unmarshaler_all) = true; 33 | option (gogoproto.unsafe_marshaler_all) = true; 34 | option (gogoproto.equal_all) = true; 35 | option (gogoproto.testgen_all) = true; 36 | option (gogoproto.populate_all) = true; 37 | option (gogoproto.benchgen_all) = true; 38 | option (gogoproto.sizer_all) = true; 39 | option (gogoproto.goproto_stringer_all) = true; 40 | 41 | 42 | import "riak.proto"; 43 | 44 | 45 | // Get ClientId Request - no message defined, just send RpbGetClientIdReq message code 46 | message RpbGetClientIdResp { 47 | required bytes client_id = 1; // Client id in use for this connection 48 | } 49 | 50 | message RpbSetClientIdReq { 51 | required bytes client_id = 1; // Client id to use for this connection 52 | } 53 | // Set ClientId Request - no message defined, just send RpbSetClientIdReq message code 54 | 55 | 56 | // Get Request - retrieve bucket/key 57 | message RpbGetReq { 58 | required bytes bucket = 1; 59 | required bytes key = 2; 60 | optional uint32 r = 3; 61 | optional uint32 pr = 4; 62 | optional bool basic_quorum = 5; 63 | optional bool notfound_ok = 6; 64 | optional bytes if_modified = 7; // fail if the supplied vclock does not match 65 | optional bool head = 8; // return everything but the value 66 | optional bool deletedvclock = 9; // return the tombstone's vclock, if applicable 67 | optional uint32 timeout = 10; 68 | optional bool sloppy_quorum = 11; // Experimental, may change/disappear 69 | optional uint32 n_val = 12; // Experimental, may change/disappear 70 | optional bytes type = 13; // Bucket type, if not set we assume the 'default' type 71 | } 72 | 73 | // Get Response - if the record was not found there will be no content/vclock 74 | message RpbGetResp { 75 | repeated RpbContent content = 1; 76 | optional bytes vclock = 2; // the opaque vector clock for the object 77 | optional bool unchanged = 3; 78 | } 79 | 80 | 81 | // Put request - if options.return_body is set then the updated metadata/data for 82 | // the key will be returned. 83 | message RpbPutReq { 84 | required bytes bucket = 1; 85 | optional bytes key = 2; 86 | optional bytes vclock = 3; 87 | required RpbContent content = 4; 88 | optional uint32 w = 5; 89 | optional uint32 dw = 6; 90 | optional bool return_body = 7; 91 | optional uint32 pw = 8; 92 | optional bool if_not_modified = 9; 93 | optional bool if_none_match = 10; 94 | optional bool return_head = 11; 95 | optional uint32 timeout = 12; 96 | optional bool asis = 13; 97 | optional bool sloppy_quorum = 14; // Experimental, may change/disappear 98 | optional uint32 n_val = 15; // Experimental, may change/disappear 99 | optional bytes type = 16; // Bucket type, if not set we assume the 'default' type 100 | } 101 | 102 | // Put response - same as get response with optional key if one was generated 103 | message RpbPutResp { 104 | repeated RpbContent content = 1; 105 | optional bytes vclock = 2; // the opaque vector clock for the object 106 | optional bytes key = 3; // the key generated, if any 107 | } 108 | 109 | 110 | // Delete request 111 | message RpbDelReq { 112 | required bytes bucket = 1; 113 | required bytes key = 2; 114 | optional uint32 rw = 3; 115 | optional bytes vclock = 4; 116 | optional uint32 r = 5; 117 | optional uint32 w = 6; 118 | optional uint32 pr = 7; 119 | optional uint32 pw = 8; 120 | optional uint32 dw = 9; 121 | optional uint32 timeout = 10; 122 | optional bool sloppy_quorum = 11; // Experimental, may change/disappear 123 | optional uint32 n_val = 12; // Experimental, may change/disappear 124 | optional bytes type = 13; // Bucket type, if not set we assume the 'default' type 125 | } 126 | 127 | // Delete response - not defined, will return a RpbDelResp on success or RpbErrorResp on failure 128 | 129 | // List buckets request 130 | message RpbListBucketsReq { 131 | optional uint32 timeout = 1; 132 | optional bool stream = 2; 133 | optional bytes type = 3; // Bucket type, if not set we assume the 'default' type 134 | } 135 | 136 | // List buckets response - one or more of these packets will be sent 137 | // the last one will have done set true (and may not have any buckets in it) 138 | message RpbListBucketsResp { 139 | repeated bytes buckets = 1; 140 | optional bool done = 2; 141 | } 142 | 143 | 144 | // List keys in bucket request 145 | message RpbListKeysReq { 146 | required bytes bucket = 1; 147 | optional uint32 timeout = 2; 148 | optional bytes type = 3; // Bucket type, if not set we assume the 'default' type 149 | } 150 | 151 | // List keys in bucket response - one or more of these packets will be sent 152 | // the last one will have done set true (and may not have any keys in it) 153 | message RpbListKeysResp { 154 | repeated bytes keys = 1; 155 | optional bool done = 2; 156 | } 157 | 158 | 159 | // Map/Reduce request 160 | message RpbMapRedReq { 161 | required bytes request = 1; 162 | required bytes content_type = 2; 163 | } 164 | 165 | // Map/Reduce response 166 | // one or more of these packets will be sent the last one will have done set 167 | // true (and may not have phase/data in it) 168 | message RpbMapRedResp { 169 | optional uint32 phase = 1; 170 | optional bytes response = 2; 171 | optional bool done = 3; 172 | } 173 | 174 | // Secondary Index query request 175 | message RpbIndexReq { 176 | enum IndexQueryType { 177 | eq = 0; 178 | range = 1; 179 | } 180 | 181 | required bytes bucket = 1; 182 | required bytes index = 2; 183 | required IndexQueryType qtype = 3; 184 | optional bytes key = 4; // key here means equals value for index? 185 | optional bytes range_min = 5; 186 | optional bytes range_max = 6; 187 | optional bool return_terms = 7; 188 | optional bool stream = 8; 189 | optional uint32 max_results = 9; 190 | optional bytes continuation = 10; 191 | optional uint32 timeout = 11; 192 | optional bytes type = 12; // Bucket type, if not set we assume the 'default' type 193 | optional bytes term_regex = 13; 194 | // Whether to use pagination sort for non-paginated queries 195 | optional bool pagination_sort = 14; 196 | } 197 | 198 | // Secondary Index query response 199 | message RpbIndexResp { 200 | repeated bytes keys = 1; 201 | repeated RpbPair results = 2; 202 | optional bytes continuation = 3; 203 | optional bool done = 4; 204 | } 205 | 206 | // added solely for riak_cs currently 207 | // for folding over a bucket and returning 208 | // objects. 209 | message RpbCSBucketReq { 210 | required bytes bucket = 1; 211 | required bytes start_key = 2; 212 | optional bytes end_key = 3; 213 | optional bool start_incl = 4 [default = true]; 214 | optional bool end_incl = 5 [default = false]; 215 | optional bytes continuation = 6; 216 | optional uint32 max_results = 7; 217 | optional uint32 timeout = 8; 218 | optional bytes type = 9; // Bucket type, if not set we assume the 'default' type 219 | } 220 | 221 | // return for CS bucket fold 222 | message RpbCSBucketResp { 223 | repeated RpbIndexObject objects = 1; 224 | optional bytes continuation = 2; 225 | optional bool done = 3; 226 | } 227 | 228 | message RpbIndexObject { 229 | required bytes key = 1; 230 | required RpbGetResp object = 2; 231 | } 232 | 233 | // Content message included in get/put responses 234 | // Holds the value and associated metadata 235 | message RpbContent { 236 | required bytes value = 1; 237 | optional bytes content_type = 2; // the media type/format 238 | optional bytes charset = 3; 239 | optional bytes content_encoding = 4; 240 | optional bytes vtag = 5; 241 | repeated RpbLink links = 6; // links to other resources 242 | optional uint32 last_mod = 7; 243 | optional uint32 last_mod_usecs = 8; 244 | repeated RpbPair usermeta = 9; // user metadata stored with the object 245 | repeated RpbPair indexes = 10; // user metadata stored with the object 246 | optional bool deleted = 11; 247 | } 248 | 249 | // Link metadata 250 | message RpbLink { 251 | optional bytes bucket = 1; 252 | optional bytes key = 2; 253 | optional bytes tag = 3; 254 | } 255 | 256 | // Counter update request 257 | message RpbCounterUpdateReq { 258 | required bytes bucket = 1; 259 | required bytes key = 2; 260 | required sint64 amount = 3; 261 | optional uint32 w = 4; 262 | optional uint32 dw = 5; 263 | optional uint32 pw = 6; 264 | optional bool returnvalue = 7; 265 | } 266 | 267 | // Counter update response? No message | error response 268 | message RpbCounterUpdateResp { 269 | optional sint64 value = 1; 270 | } 271 | 272 | // counter value 273 | message RpbCounterGetReq { 274 | required bytes bucket = 1; 275 | required bytes key = 2; 276 | optional uint32 r = 3; 277 | optional uint32 pr = 4; 278 | optional bool basic_quorum = 5; 279 | optional bool notfound_ok = 6; 280 | } 281 | 282 | // Counter value response 283 | message RpbCounterGetResp { 284 | optional sint64 value = 1; 285 | } 286 | -------------------------------------------------------------------------------- /rpbc/riak_search.proto: -------------------------------------------------------------------------------- 1 | /* ------------------------------------------------------------------- 2 | ** 3 | ** riak_search.proto: Protocol buffers for Riak Search 4 | ** 5 | ** Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved. 6 | ** 7 | ** This file is provided to you under the Apache License, 8 | ** Version 2.0 (the "License"); you may not use this file 9 | ** except in compliance with the License. You may obtain 10 | ** a copy of the License at 11 | ** 12 | ** http://www.apache.org/licenses/LICENSE-2.0 13 | ** 14 | ** Unless required by applicable law or agreed to in writing, 15 | ** software distributed under the License is distributed on an 16 | ** "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | ** KIND, either express or implied. See the License for the 18 | ** specific language governing permissions and limitations 19 | ** under the License. 20 | ** 21 | ** ------------------------------------------------------------------- 22 | */ 23 | 24 | /* 25 | ** Revision: 1.4 26 | */ 27 | 28 | package rpbc; 29 | 30 | import "gogo.proto"; 31 | 32 | option (gogoproto.unsafe_unmarshaler_all) = true; 33 | option (gogoproto.unsafe_marshaler_all) = true; 34 | option (gogoproto.equal_all) = true; 35 | option (gogoproto.testgen_all) = true; 36 | option (gogoproto.populate_all) = true; 37 | option (gogoproto.benchgen_all) = true; 38 | option (gogoproto.sizer_all) = true; 39 | option (gogoproto.goproto_stringer_all) = true; 40 | 41 | 42 | import "riak.proto"; 43 | 44 | message RpbSearchDoc { 45 | repeated RpbPair fields = 1; 46 | } 47 | 48 | message RpbSearchQueryReq { 49 | required bytes q = 1; // Query string 50 | required bytes index = 2; // Index 51 | optional uint32 rows = 3; // Limit rows 52 | optional uint32 start = 4; // Starting offset 53 | optional bytes sort = 5; // Sort order 54 | optional bytes filter = 6; // Inline fields filtering query 55 | optional bytes df = 7; // Default field 56 | optional bytes op = 8; // Default op 57 | repeated bytes fl = 9; // Return fields limit (for ids only, generally) 58 | optional bytes presort = 10; // Presort (key / score) 59 | } 60 | 61 | message RpbSearchQueryResp { 62 | repeated RpbSearchDoc docs = 1; // Result documents 63 | optional float max_score = 2; // Maximum score 64 | optional uint32 num_found = 3; // Number of results 65 | } 66 | -------------------------------------------------------------------------------- /rpbc/riak_searchpb_test.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-gogo. 2 | // source: riak_search.proto 3 | // DO NOT EDIT! 4 | 5 | /* 6 | Package rpbc is a generated protocol buffer package. 7 | 8 | It is generated from these files: 9 | riak_search.proto 10 | 11 | It has these top-level messages: 12 | RpbSearchDoc 13 | RpbSearchQueryReq 14 | RpbSearchQueryResp 15 | */ 16 | package rpbc 17 | 18 | import testing4 "testing" 19 | import math_rand4 "math/rand" 20 | import time4 "time" 21 | import code_google_com_p_gogoprotobuf_proto3 "code.google.com/p/gogoprotobuf/proto" 22 | import testing5 "testing" 23 | import math_rand5 "math/rand" 24 | import time5 "time" 25 | import encoding_json1 "encoding/json" 26 | import testing6 "testing" 27 | import math_rand6 "math/rand" 28 | import time6 "time" 29 | import code_google_com_p_gogoprotobuf_proto4 "code.google.com/p/gogoprotobuf/proto" 30 | import math_rand7 "math/rand" 31 | import time7 "time" 32 | import testing7 "testing" 33 | import code_google_com_p_gogoprotobuf_proto5 "code.google.com/p/gogoprotobuf/proto" 34 | 35 | func TestRpbSearchDocProto(t *testing4.T) { 36 | popr := math_rand4.New(math_rand4.NewSource(time4.Now().UnixNano())) 37 | p := NewPopulatedRpbSearchDoc(popr, false) 38 | data, err := code_google_com_p_gogoprotobuf_proto3.Marshal(p) 39 | if err != nil { 40 | panic(err) 41 | } 42 | msg := &RpbSearchDoc{} 43 | if err := code_google_com_p_gogoprotobuf_proto3.Unmarshal(data, msg); err != nil { 44 | panic(err) 45 | } 46 | for i := range data { 47 | data[i] = byte(popr.Intn(256)) 48 | } 49 | if !p.Equal(msg) { 50 | t.Fatalf("%#v !Proto %#v", msg, p) 51 | } 52 | } 53 | 54 | func TestRpbSearchDocMarshalTo(t *testing4.T) { 55 | popr := math_rand4.New(math_rand4.NewSource(time4.Now().UnixNano())) 56 | p := NewPopulatedRpbSearchDoc(popr, false) 57 | size := p.Size() 58 | data := make([]byte, size) 59 | for i := range data { 60 | data[i] = byte(popr.Intn(256)) 61 | } 62 | _, err := p.MarshalTo(data) 63 | if err != nil { 64 | panic(err) 65 | } 66 | msg := &RpbSearchDoc{} 67 | if err := code_google_com_p_gogoprotobuf_proto3.Unmarshal(data, msg); err != nil { 68 | panic(err) 69 | } 70 | for i := range data { 71 | data[i] = byte(popr.Intn(256)) 72 | } 73 | if !p.Equal(msg) { 74 | t.Fatalf("%#v !Proto %#v", msg, p) 75 | } 76 | } 77 | 78 | func BenchmarkRpbSearchDocProtoMarshal(b *testing4.B) { 79 | popr := math_rand4.New(math_rand4.NewSource(616)) 80 | total := 0 81 | pops := make([]*RpbSearchDoc, 10000) 82 | for i := 0; i < 10000; i++ { 83 | pops[i] = NewPopulatedRpbSearchDoc(popr, false) 84 | } 85 | b.ResetTimer() 86 | for i := 0; i < b.N; i++ { 87 | data, err := code_google_com_p_gogoprotobuf_proto3.Marshal(pops[i%10000]) 88 | if err != nil { 89 | panic(err) 90 | } 91 | total += len(data) 92 | } 93 | b.SetBytes(int64(total / b.N)) 94 | } 95 | 96 | func BenchmarkRpbSearchDocProtoUnmarshal(b *testing4.B) { 97 | popr := math_rand4.New(math_rand4.NewSource(616)) 98 | total := 0 99 | datas := make([][]byte, 10000) 100 | for i := 0; i < 10000; i++ { 101 | data, err := code_google_com_p_gogoprotobuf_proto3.Marshal(NewPopulatedRpbSearchDoc(popr, false)) 102 | if err != nil { 103 | panic(err) 104 | } 105 | datas[i] = data 106 | } 107 | msg := &RpbSearchDoc{} 108 | b.ResetTimer() 109 | for i := 0; i < b.N; i++ { 110 | total += len(datas[i%10000]) 111 | if err := code_google_com_p_gogoprotobuf_proto3.Unmarshal(datas[i%10000], msg); err != nil { 112 | panic(err) 113 | } 114 | } 115 | b.SetBytes(int64(total / b.N)) 116 | } 117 | 118 | func TestRpbSearchQueryReqProto(t *testing4.T) { 119 | popr := math_rand4.New(math_rand4.NewSource(time4.Now().UnixNano())) 120 | p := NewPopulatedRpbSearchQueryReq(popr, false) 121 | data, err := code_google_com_p_gogoprotobuf_proto3.Marshal(p) 122 | if err != nil { 123 | panic(err) 124 | } 125 | msg := &RpbSearchQueryReq{} 126 | if err := code_google_com_p_gogoprotobuf_proto3.Unmarshal(data, msg); err != nil { 127 | panic(err) 128 | } 129 | for i := range data { 130 | data[i] = byte(popr.Intn(256)) 131 | } 132 | if !p.Equal(msg) { 133 | t.Fatalf("%#v !Proto %#v", msg, p) 134 | } 135 | } 136 | 137 | func TestRpbSearchQueryReqMarshalTo(t *testing4.T) { 138 | popr := math_rand4.New(math_rand4.NewSource(time4.Now().UnixNano())) 139 | p := NewPopulatedRpbSearchQueryReq(popr, false) 140 | size := p.Size() 141 | data := make([]byte, size) 142 | for i := range data { 143 | data[i] = byte(popr.Intn(256)) 144 | } 145 | _, err := p.MarshalTo(data) 146 | if err != nil { 147 | panic(err) 148 | } 149 | msg := &RpbSearchQueryReq{} 150 | if err := code_google_com_p_gogoprotobuf_proto3.Unmarshal(data, msg); err != nil { 151 | panic(err) 152 | } 153 | for i := range data { 154 | data[i] = byte(popr.Intn(256)) 155 | } 156 | if !p.Equal(msg) { 157 | t.Fatalf("%#v !Proto %#v", msg, p) 158 | } 159 | } 160 | 161 | func BenchmarkRpbSearchQueryReqProtoMarshal(b *testing4.B) { 162 | popr := math_rand4.New(math_rand4.NewSource(616)) 163 | total := 0 164 | pops := make([]*RpbSearchQueryReq, 10000) 165 | for i := 0; i < 10000; i++ { 166 | pops[i] = NewPopulatedRpbSearchQueryReq(popr, false) 167 | } 168 | b.ResetTimer() 169 | for i := 0; i < b.N; i++ { 170 | data, err := code_google_com_p_gogoprotobuf_proto3.Marshal(pops[i%10000]) 171 | if err != nil { 172 | panic(err) 173 | } 174 | total += len(data) 175 | } 176 | b.SetBytes(int64(total / b.N)) 177 | } 178 | 179 | func BenchmarkRpbSearchQueryReqProtoUnmarshal(b *testing4.B) { 180 | popr := math_rand4.New(math_rand4.NewSource(616)) 181 | total := 0 182 | datas := make([][]byte, 10000) 183 | for i := 0; i < 10000; i++ { 184 | data, err := code_google_com_p_gogoprotobuf_proto3.Marshal(NewPopulatedRpbSearchQueryReq(popr, false)) 185 | if err != nil { 186 | panic(err) 187 | } 188 | datas[i] = data 189 | } 190 | msg := &RpbSearchQueryReq{} 191 | b.ResetTimer() 192 | for i := 0; i < b.N; i++ { 193 | total += len(datas[i%10000]) 194 | if err := code_google_com_p_gogoprotobuf_proto3.Unmarshal(datas[i%10000], msg); err != nil { 195 | panic(err) 196 | } 197 | } 198 | b.SetBytes(int64(total / b.N)) 199 | } 200 | 201 | func TestRpbSearchQueryRespProto(t *testing4.T) { 202 | popr := math_rand4.New(math_rand4.NewSource(time4.Now().UnixNano())) 203 | p := NewPopulatedRpbSearchQueryResp(popr, false) 204 | data, err := code_google_com_p_gogoprotobuf_proto3.Marshal(p) 205 | if err != nil { 206 | panic(err) 207 | } 208 | msg := &RpbSearchQueryResp{} 209 | if err := code_google_com_p_gogoprotobuf_proto3.Unmarshal(data, msg); err != nil { 210 | panic(err) 211 | } 212 | for i := range data { 213 | data[i] = byte(popr.Intn(256)) 214 | } 215 | if !p.Equal(msg) { 216 | t.Fatalf("%#v !Proto %#v", msg, p) 217 | } 218 | } 219 | 220 | func TestRpbSearchQueryRespMarshalTo(t *testing4.T) { 221 | popr := math_rand4.New(math_rand4.NewSource(time4.Now().UnixNano())) 222 | p := NewPopulatedRpbSearchQueryResp(popr, false) 223 | size := p.Size() 224 | data := make([]byte, size) 225 | for i := range data { 226 | data[i] = byte(popr.Intn(256)) 227 | } 228 | _, err := p.MarshalTo(data) 229 | if err != nil { 230 | panic(err) 231 | } 232 | msg := &RpbSearchQueryResp{} 233 | if err := code_google_com_p_gogoprotobuf_proto3.Unmarshal(data, msg); err != nil { 234 | panic(err) 235 | } 236 | for i := range data { 237 | data[i] = byte(popr.Intn(256)) 238 | } 239 | if !p.Equal(msg) { 240 | t.Fatalf("%#v !Proto %#v", msg, p) 241 | } 242 | } 243 | 244 | func BenchmarkRpbSearchQueryRespProtoMarshal(b *testing4.B) { 245 | popr := math_rand4.New(math_rand4.NewSource(616)) 246 | total := 0 247 | pops := make([]*RpbSearchQueryResp, 10000) 248 | for i := 0; i < 10000; i++ { 249 | pops[i] = NewPopulatedRpbSearchQueryResp(popr, false) 250 | } 251 | b.ResetTimer() 252 | for i := 0; i < b.N; i++ { 253 | data, err := code_google_com_p_gogoprotobuf_proto3.Marshal(pops[i%10000]) 254 | if err != nil { 255 | panic(err) 256 | } 257 | total += len(data) 258 | } 259 | b.SetBytes(int64(total / b.N)) 260 | } 261 | 262 | func BenchmarkRpbSearchQueryRespProtoUnmarshal(b *testing4.B) { 263 | popr := math_rand4.New(math_rand4.NewSource(616)) 264 | total := 0 265 | datas := make([][]byte, 10000) 266 | for i := 0; i < 10000; i++ { 267 | data, err := code_google_com_p_gogoprotobuf_proto3.Marshal(NewPopulatedRpbSearchQueryResp(popr, false)) 268 | if err != nil { 269 | panic(err) 270 | } 271 | datas[i] = data 272 | } 273 | msg := &RpbSearchQueryResp{} 274 | b.ResetTimer() 275 | for i := 0; i < b.N; i++ { 276 | total += len(datas[i%10000]) 277 | if err := code_google_com_p_gogoprotobuf_proto3.Unmarshal(datas[i%10000], msg); err != nil { 278 | panic(err) 279 | } 280 | } 281 | b.SetBytes(int64(total / b.N)) 282 | } 283 | 284 | func TestRpbSearchDocJSON(t *testing5.T) { 285 | popr := math_rand5.New(math_rand5.NewSource(time5.Now().UnixNano())) 286 | p := NewPopulatedRpbSearchDoc(popr, true) 287 | jsondata, err := encoding_json1.Marshal(p) 288 | if err != nil { 289 | panic(err) 290 | } 291 | msg := &RpbSearchDoc{} 292 | err = encoding_json1.Unmarshal(jsondata, msg) 293 | if err != nil { 294 | panic(err) 295 | } 296 | if !p.Equal(msg) { 297 | t.Fatalf("%#v !Json Equal %#v", msg, p) 298 | } 299 | } 300 | func TestRpbSearchQueryReqJSON(t *testing5.T) { 301 | popr := math_rand5.New(math_rand5.NewSource(time5.Now().UnixNano())) 302 | p := NewPopulatedRpbSearchQueryReq(popr, true) 303 | jsondata, err := encoding_json1.Marshal(p) 304 | if err != nil { 305 | panic(err) 306 | } 307 | msg := &RpbSearchQueryReq{} 308 | err = encoding_json1.Unmarshal(jsondata, msg) 309 | if err != nil { 310 | panic(err) 311 | } 312 | if !p.Equal(msg) { 313 | t.Fatalf("%#v !Json Equal %#v", msg, p) 314 | } 315 | } 316 | func TestRpbSearchQueryRespJSON(t *testing5.T) { 317 | popr := math_rand5.New(math_rand5.NewSource(time5.Now().UnixNano())) 318 | p := NewPopulatedRpbSearchQueryResp(popr, true) 319 | jsondata, err := encoding_json1.Marshal(p) 320 | if err != nil { 321 | panic(err) 322 | } 323 | msg := &RpbSearchQueryResp{} 324 | err = encoding_json1.Unmarshal(jsondata, msg) 325 | if err != nil { 326 | panic(err) 327 | } 328 | if !p.Equal(msg) { 329 | t.Fatalf("%#v !Json Equal %#v", msg, p) 330 | } 331 | } 332 | func TestRpbSearchDocProtoText(t *testing6.T) { 333 | popr := math_rand6.New(math_rand6.NewSource(time6.Now().UnixNano())) 334 | p := NewPopulatedRpbSearchDoc(popr, true) 335 | data := code_google_com_p_gogoprotobuf_proto4.MarshalTextString(p) 336 | msg := &RpbSearchDoc{} 337 | if err := code_google_com_p_gogoprotobuf_proto4.UnmarshalText(data, msg); err != nil { 338 | panic(err) 339 | } 340 | if !p.Equal(msg) { 341 | t.Fatalf("%#v !Proto %#v", msg, p) 342 | } 343 | } 344 | 345 | func TestRpbSearchDocProtoCompactText(t *testing6.T) { 346 | popr := math_rand6.New(math_rand6.NewSource(time6.Now().UnixNano())) 347 | p := NewPopulatedRpbSearchDoc(popr, true) 348 | data := code_google_com_p_gogoprotobuf_proto4.CompactTextString(p) 349 | msg := &RpbSearchDoc{} 350 | if err := code_google_com_p_gogoprotobuf_proto4.UnmarshalText(data, msg); err != nil { 351 | panic(err) 352 | } 353 | if !p.Equal(msg) { 354 | t.Fatalf("%#v !Proto %#v", msg, p) 355 | } 356 | } 357 | 358 | func TestRpbSearchQueryReqProtoText(t *testing6.T) { 359 | popr := math_rand6.New(math_rand6.NewSource(time6.Now().UnixNano())) 360 | p := NewPopulatedRpbSearchQueryReq(popr, true) 361 | data := code_google_com_p_gogoprotobuf_proto4.MarshalTextString(p) 362 | msg := &RpbSearchQueryReq{} 363 | if err := code_google_com_p_gogoprotobuf_proto4.UnmarshalText(data, msg); err != nil { 364 | panic(err) 365 | } 366 | if !p.Equal(msg) { 367 | t.Fatalf("%#v !Proto %#v", msg, p) 368 | } 369 | } 370 | 371 | func TestRpbSearchQueryReqProtoCompactText(t *testing6.T) { 372 | popr := math_rand6.New(math_rand6.NewSource(time6.Now().UnixNano())) 373 | p := NewPopulatedRpbSearchQueryReq(popr, true) 374 | data := code_google_com_p_gogoprotobuf_proto4.CompactTextString(p) 375 | msg := &RpbSearchQueryReq{} 376 | if err := code_google_com_p_gogoprotobuf_proto4.UnmarshalText(data, msg); err != nil { 377 | panic(err) 378 | } 379 | if !p.Equal(msg) { 380 | t.Fatalf("%#v !Proto %#v", msg, p) 381 | } 382 | } 383 | 384 | func TestRpbSearchQueryRespProtoText(t *testing6.T) { 385 | popr := math_rand6.New(math_rand6.NewSource(time6.Now().UnixNano())) 386 | p := NewPopulatedRpbSearchQueryResp(popr, true) 387 | data := code_google_com_p_gogoprotobuf_proto4.MarshalTextString(p) 388 | msg := &RpbSearchQueryResp{} 389 | if err := code_google_com_p_gogoprotobuf_proto4.UnmarshalText(data, msg); err != nil { 390 | panic(err) 391 | } 392 | if !p.Equal(msg) { 393 | t.Fatalf("%#v !Proto %#v", msg, p) 394 | } 395 | } 396 | 397 | func TestRpbSearchQueryRespProtoCompactText(t *testing6.T) { 398 | popr := math_rand6.New(math_rand6.NewSource(time6.Now().UnixNano())) 399 | p := NewPopulatedRpbSearchQueryResp(popr, true) 400 | data := code_google_com_p_gogoprotobuf_proto4.CompactTextString(p) 401 | msg := &RpbSearchQueryResp{} 402 | if err := code_google_com_p_gogoprotobuf_proto4.UnmarshalText(data, msg); err != nil { 403 | panic(err) 404 | } 405 | if !p.Equal(msg) { 406 | t.Fatalf("%#v !Proto %#v", msg, p) 407 | } 408 | } 409 | 410 | func TestRpbSearchDocSize(t *testing7.T) { 411 | popr := math_rand7.New(math_rand7.NewSource(time7.Now().UnixNano())) 412 | p := NewPopulatedRpbSearchDoc(popr, true) 413 | size2 := code_google_com_p_gogoprotobuf_proto5.Size(p) 414 | data, err := code_google_com_p_gogoprotobuf_proto5.Marshal(p) 415 | if err != nil { 416 | panic(err) 417 | } 418 | size := p.Size() 419 | if len(data) != size { 420 | t.Fatalf("size %v != marshalled size %v", size, len(data)) 421 | } 422 | if size2 != size { 423 | t.Fatalf("size %v != before marshal proto.Size %v", size, size2) 424 | } 425 | size3 := code_google_com_p_gogoprotobuf_proto5.Size(p) 426 | if size3 != size { 427 | t.Fatalf("size %v != after marshal proto.Size %v", size, size3) 428 | } 429 | } 430 | 431 | func BenchmarkRpbSearchDocSize(b *testing7.B) { 432 | popr := math_rand7.New(math_rand7.NewSource(616)) 433 | total := 0 434 | pops := make([]*RpbSearchDoc, 1000) 435 | for i := 0; i < 1000; i++ { 436 | pops[i] = NewPopulatedRpbSearchDoc(popr, false) 437 | } 438 | b.ResetTimer() 439 | for i := 0; i < b.N; i++ { 440 | total += pops[i%1000].Size() 441 | } 442 | b.SetBytes(int64(total / b.N)) 443 | } 444 | 445 | func TestRpbSearchQueryReqSize(t *testing7.T) { 446 | popr := math_rand7.New(math_rand7.NewSource(time7.Now().UnixNano())) 447 | p := NewPopulatedRpbSearchQueryReq(popr, true) 448 | size2 := code_google_com_p_gogoprotobuf_proto5.Size(p) 449 | data, err := code_google_com_p_gogoprotobuf_proto5.Marshal(p) 450 | if err != nil { 451 | panic(err) 452 | } 453 | size := p.Size() 454 | if len(data) != size { 455 | t.Fatalf("size %v != marshalled size %v", size, len(data)) 456 | } 457 | if size2 != size { 458 | t.Fatalf("size %v != before marshal proto.Size %v", size, size2) 459 | } 460 | size3 := code_google_com_p_gogoprotobuf_proto5.Size(p) 461 | if size3 != size { 462 | t.Fatalf("size %v != after marshal proto.Size %v", size, size3) 463 | } 464 | } 465 | 466 | func BenchmarkRpbSearchQueryReqSize(b *testing7.B) { 467 | popr := math_rand7.New(math_rand7.NewSource(616)) 468 | total := 0 469 | pops := make([]*RpbSearchQueryReq, 1000) 470 | for i := 0; i < 1000; i++ { 471 | pops[i] = NewPopulatedRpbSearchQueryReq(popr, false) 472 | } 473 | b.ResetTimer() 474 | for i := 0; i < b.N; i++ { 475 | total += pops[i%1000].Size() 476 | } 477 | b.SetBytes(int64(total / b.N)) 478 | } 479 | 480 | func TestRpbSearchQueryRespSize(t *testing7.T) { 481 | popr := math_rand7.New(math_rand7.NewSource(time7.Now().UnixNano())) 482 | p := NewPopulatedRpbSearchQueryResp(popr, true) 483 | size2 := code_google_com_p_gogoprotobuf_proto5.Size(p) 484 | data, err := code_google_com_p_gogoprotobuf_proto5.Marshal(p) 485 | if err != nil { 486 | panic(err) 487 | } 488 | size := p.Size() 489 | if len(data) != size { 490 | t.Fatalf("size %v != marshalled size %v", size, len(data)) 491 | } 492 | if size2 != size { 493 | t.Fatalf("size %v != before marshal proto.Size %v", size, size2) 494 | } 495 | size3 := code_google_com_p_gogoprotobuf_proto5.Size(p) 496 | if size3 != size { 497 | t.Fatalf("size %v != after marshal proto.Size %v", size, size3) 498 | } 499 | } 500 | 501 | func BenchmarkRpbSearchQueryRespSize(b *testing7.B) { 502 | popr := math_rand7.New(math_rand7.NewSource(616)) 503 | total := 0 504 | pops := make([]*RpbSearchQueryResp, 1000) 505 | for i := 0; i < 1000; i++ { 506 | pops[i] = NewPopulatedRpbSearchQueryResp(popr, false) 507 | } 508 | b.ResetTimer() 509 | for i := 0; i < b.N; i++ { 510 | total += pops[i%1000].Size() 511 | } 512 | b.SetBytes(int64(total / b.N)) 513 | } 514 | 515 | //These tests are generated by code.google.com/p/gogoprotobuf/plugin/testgen 516 | -------------------------------------------------------------------------------- /rpbc/riak_yokozuna.proto: -------------------------------------------------------------------------------- 1 | /* ------------------------------------------------------------------- 2 | ** 3 | ** riak_yokozuna.proto: Protocol buffers for Yokozuna 4 | ** 5 | ** Copyright (c) 2013 Basho Technologies, Inc. All Rights Reserved. 6 | ** 7 | ** This file is provided to you under the Apache License, 8 | ** Version 2.0 (the "License"); you may not use this file 9 | ** except in compliance with the License. You may obtain 10 | ** a copy of the License at 11 | ** 12 | ** http://www.apache.org/licenses/LICENSE-2.0 13 | ** 14 | ** Unless required by applicable law or agreed to in writing, 15 | ** software distributed under the License is distributed on an 16 | ** "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | ** KIND, either express or implied. See the License for the 18 | ** specific language governing permissions and limitations 19 | ** under the License. 20 | ** 21 | ** ------------------------------------------------------------------- 22 | */ 23 | 24 | /* 25 | ** Revision: 2.0 26 | */ 27 | 28 | package rpbc; 29 | 30 | import "gogo.proto"; 31 | 32 | option (gogoproto.unsafe_unmarshaler_all) = true; 33 | option (gogoproto.unsafe_marshaler_all) = true; 34 | option (gogoproto.equal_all) = true; 35 | option (gogoproto.testgen_all) = true; 36 | option (gogoproto.populate_all) = true; 37 | option (gogoproto.benchgen_all) = true; 38 | option (gogoproto.sizer_all) = true; 39 | option (gogoproto.goproto_stringer_all) = true; 40 | 41 | 42 | import "riak.proto"; 43 | 44 | // Index queries 45 | 46 | message RpbYokozunaIndex { 47 | required bytes name = 1; // Index name 48 | optional bytes schema = 2; // Schema name 49 | optional uint32 n_val = 3; // N value 50 | } 51 | 52 | // GET request - If a name is given, return matching index, else return all 53 | message RpbYokozunaIndexGetReq { 54 | optional bytes name = 1; // Index name 55 | } 56 | 57 | message RpbYokozunaIndexGetResp { 58 | repeated RpbYokozunaIndex index = 1; 59 | } 60 | 61 | // PUT request - Create a new index 62 | message RpbYokozunaIndexPutReq { 63 | required RpbYokozunaIndex index = 1; 64 | } 65 | 66 | // DELETE request - Remove an index 67 | message RpbYokozunaIndexDeleteReq { 68 | required bytes name = 1; // Index name 69 | } 70 | 71 | // Schema queries 72 | 73 | message RpbYokozunaSchema { 74 | required bytes name = 1; // Index name 75 | optional bytes content = 2; // Schema data 76 | } 77 | 78 | // PUT request - create or potentially update a new schema 79 | message RpbYokozunaSchemaPutReq { 80 | required RpbYokozunaSchema schema = 1; 81 | } 82 | 83 | // GET request - Return matching schema by name 84 | message RpbYokozunaSchemaGetReq { 85 | required bytes name = 1; // Schema name 86 | } 87 | 88 | message RpbYokozunaSchemaGetResp { 89 | required RpbYokozunaSchema schema = 1; 90 | } 91 | -------------------------------------------------------------------------------- /store.go: -------------------------------------------------------------------------------- 1 | package rkive 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "github.com/philhofer/rkive/rpbc" 7 | "sync" 8 | ) 9 | 10 | const ( 11 | // maximum number of times we attempt 12 | // to handle contended stores, either 13 | // via conflicting vclocks or modifications 14 | maxMerges = 10 15 | ) 16 | 17 | var ( 18 | ErrNoPath = errors.New("bucket and/or key not defined") 19 | ErrModified = errors.New("object has been modified since last read") 20 | ErrExists = errors.New("object already exists") 21 | ctntPool *sync.Pool // pool for RpbContent 22 | hdrPool *sync.Pool // pool for RpbPutResp 23 | ) 24 | 25 | func init() { 26 | ctntPool = new(sync.Pool) 27 | ctntPool.New = func() interface{} { return &rpbc.RpbContent{} } 28 | hdrPool = new(sync.Pool) 29 | hdrPool.New = func() interface{} { return &rpbc.RpbPutResp{} } 30 | } 31 | 32 | // push content 33 | func ctput(c *rpbc.RpbContent) { 34 | ctntPool.Put(c) 35 | } 36 | 37 | // create RpbContent from object 38 | func ctpop(o Object) (*rpbc.RpbContent, error) { 39 | ctnt := ctntPool.Get().(*rpbc.RpbContent) 40 | err := writeContent(o, ctnt) 41 | return ctnt, err 42 | } 43 | 44 | // pop putresp 45 | func hdrpop() *rpbc.RpbPutResp { 46 | return hdrPool.Get().(*rpbc.RpbPutResp) 47 | } 48 | 49 | // put putresp; zeros fields 50 | func hdrput(r *rpbc.RpbPutResp) { 51 | r.Content = r.Content[0:0] 52 | r.Key = r.Key[0:0] 53 | r.Vclock = r.Vclock[0:0] 54 | hdrPool.Put(r) 55 | } 56 | 57 | // WriteOpts are options available 58 | // for all write opertations. 59 | type WriteOpts struct { 60 | W *uint32 // Required write acknowledgements 61 | DW *uint32 // 'Durable' (to disk) write 62 | PW *uint32 // Primary replica writes 63 | } 64 | 65 | // put options into request 66 | func parseOpts(opts *WriteOpts, req *rpbc.RpbPutReq) { 67 | if opts == nil || req == nil { 68 | return 69 | } 70 | if opts.W != nil { 71 | req.W = opts.W 72 | } 73 | if opts.DW != nil { 74 | req.Dw = opts.DW 75 | } 76 | if opts.PW != nil { 77 | req.Pw = opts.PW 78 | } 79 | } 80 | 81 | // New writes a new object into the database. If 'key' 82 | // is non-nil, New will attempt to use that key, and return 83 | // ErrExists if an object already exists at that key-bucket pair. 84 | // Riak will assign this object a key if 'key' is nil. 85 | func (c *Client) New(o Object, bucket string, key *string, opts *WriteOpts) error { 86 | req := rpbc.RpbPutReq{ 87 | Bucket: []byte(bucket), 88 | } 89 | 90 | // return head 91 | req.ReturnHead = &ptrTrue 92 | 93 | // set keys if specified 94 | if key != nil { 95 | req.Key = ustr(*key) 96 | req.IfNoneMatch = &ptrTrue 97 | o.Info().key = append(o.Info().key[0:0], req.Key...) 98 | } 99 | var err error 100 | req.Content, err = ctpop(o) 101 | if err != nil { 102 | return err 103 | } 104 | // parse options 105 | parseOpts(opts, &req) 106 | res := hdrpop() 107 | rescode, err := c.req(&req, 11, res) 108 | ctput(req.Content) 109 | if err != nil { 110 | hdrput(res) 111 | // riak returns "match_found" on failure 112 | if rke, ok := err.(RiakError); ok { 113 | if bytes.Contains(rke.res.GetErrmsg(), []byte("match_found")) { 114 | return ErrExists 115 | } 116 | } 117 | return err 118 | } 119 | // not what we expected... 120 | if rescode != 12 { 121 | return ErrUnexpectedResponse 122 | } 123 | // multiple content items 124 | if len(res.GetContent()) > 1 { 125 | return handleMultiple(len(res.GetContent()), string(req.Key), string(req.Bucket)) 126 | } 127 | // pull info from content 128 | readHeader(o, res.GetContent()[0]) 129 | // set data 130 | o.Info().vclock = append(o.Info().vclock[0:0], res.Vclock...) 131 | o.Info().bucket = append(o.Info().bucket[0:0], req.Bucket...) 132 | if len(res.Key) > 0 { 133 | o.Info().key = append(o.Info().key[0:0], res.Key...) 134 | } 135 | hdrput(res) 136 | return err 137 | } 138 | 139 | // Store makes a basic write to the database. Store 140 | // will return ErrNoPath if the object does not already 141 | // have a key and bucket defined. (Use New() if this object 142 | // isn't already in the database.) 143 | func (c *Client) Store(o Object, opts *WriteOpts) error { 144 | if o.Info().bucket == nil || o.Info().key == nil { 145 | return ErrNoPath 146 | } 147 | ntry := 0 // merge attempts 148 | 149 | dostore: 150 | req := rpbc.RpbPutReq{ 151 | Bucket: o.Info().bucket, 152 | Key: o.Info().key, 153 | Vclock: o.Info().vclock, 154 | } 155 | 156 | req.ReturnHead = &ptrTrue 157 | if o.Info().vclock != nil { 158 | req.Vclock = append(req.Vclock, o.Info().vclock...) 159 | } 160 | parseOpts(opts, &req) 161 | 162 | // write content 163 | var err error 164 | req.Content, err = ctpop(o) 165 | if err != nil { 166 | return err 167 | } 168 | res := hdrpop() 169 | rescode, err := c.req(&req, 11, res) 170 | ctput(req.Content) 171 | if err != nil { 172 | return err 173 | } 174 | if rescode != 12 { 175 | return ErrUnexpectedResponse 176 | } 177 | if len(res.GetContent()) > 1 { 178 | if ntry > maxMerges { 179 | return handleMultiple(len(res.GetContent()), o.Info().Key(), o.Info().Bucket()) 180 | } 181 | // repair if possible 182 | if om, ok := o.(ObjectM); ok { 183 | hdrput(res) 184 | // load the old value(s) into nom 185 | nom := om.NewEmpty() 186 | err = c.Fetch(nom, om.Info().Bucket(), om.Info().Key(), nil) 187 | if err != nil { 188 | return err 189 | } 190 | // merge old values 191 | om.Merge(nom) 192 | om.Info().vclock = nom.Info().vclock 193 | ntry++ 194 | // retry the store 195 | goto dostore 196 | } else { 197 | return handleMultiple(len(res.GetContent()), o.Info().Key(), o.Info().Bucket()) 198 | } 199 | } 200 | readHeader(o, res.GetContent()[0]) 201 | o.Info().vclock = append(o.Info().vclock[0:0], res.Vclock...) 202 | hdrput(res) 203 | return nil 204 | } 205 | 206 | // Push makes a conditional (if-not-modified) write 207 | // to the database. This is the recommended way of making 208 | // writes to the database, as it minimizes the chances 209 | // of producing sibling objects. 210 | func (c *Client) Push(o Object, opts *WriteOpts) error { 211 | if o.Info().bucket == nil || o.Info().key == nil || o.Info().vclock == nil { 212 | return ErrNoPath 213 | } 214 | 215 | req := rpbc.RpbPutReq{ 216 | Bucket: o.Info().bucket, 217 | Key: o.Info().key, 218 | Vclock: o.Info().vclock, 219 | } 220 | 221 | // Return-Head = true; If-Not-Modified = true 222 | req.ReturnHead = &ptrTrue 223 | req.IfNotModified = &ptrTrue 224 | parseOpts(opts, &req) 225 | ntry := 0 226 | 227 | dopush: 228 | var err error 229 | req.Content, err = ctpop(o) 230 | if err != nil { 231 | return err 232 | } 233 | res := hdrpop() 234 | rescode, err := c.req(&req, 11, res) 235 | ctput(req.Content) 236 | if err != nil { 237 | hdrput(res) 238 | if rke, ok := err.(RiakError); ok { 239 | if bytes.Contains(rke.res.Errmsg, []byte("modified")) { 240 | return ErrModified 241 | } 242 | } 243 | return err 244 | } 245 | if rescode != 12 { 246 | hdrput(res) 247 | return ErrUnexpectedResponse 248 | } 249 | if res.Vclock == nil || len(res.Content) == 0 { 250 | hdrput(res) 251 | return ErrNotFound 252 | } 253 | if len(res.Content) > 1 { 254 | // repair if possible 255 | if om, ok := o.(ObjectM); ok { 256 | if ntry > maxMerges { 257 | return handleMultiple(len(res.Content), o.Info().Key(), o.Info().Bucket()) 258 | } 259 | nom := om.NewEmpty() 260 | // fetch carries out the local merge on read 261 | err = c.Fetch(nom, om.Info().Bucket(), om.Info().Key(), nil) 262 | if err != nil { 263 | return err 264 | } 265 | om.Merge(nom) 266 | om.Info().vclock = append(om.Info().vclock[0:0], nom.Info().vclock...) 267 | ntry++ 268 | goto dopush 269 | } else { 270 | return handleMultiple(len(res.Content), o.Info().Key(), o.Info().Bucket()) 271 | } 272 | } 273 | o.Info().vclock = append(o.Info().vclock[0:0], res.Vclock...) 274 | readHeader(o, res.GetContent()[0]) 275 | hdrput(res) 276 | return nil 277 | } 278 | 279 | // Overwrite performs a store operation on an arbitrary 280 | // location. It does not send a vclock, and the object itself 281 | // is not modified. Overwrite ignores NotFound errors. 282 | // This function is only safe to use with buckets in which "last_write_wins" is turned on. 283 | // Ideally, this function is only used for caches. 284 | func (c *Client) Overwrite(o Object, bucket string, key string, opts *WriteOpts) error { 285 | req := rpbc.RpbPutReq{ 286 | Bucket: ustr(bucket), 287 | Key: ustr(key), 288 | ReturnBody: &ptrFalse, 289 | } 290 | 291 | parseOpts(opts, &req) 292 | 293 | var err error 294 | req.Content, err = ctpop(o) 295 | if err != nil { 296 | return err 297 | } 298 | 299 | res := hdrpop() 300 | 301 | var code byte 302 | code, err = c.req(&req, 11, res) 303 | ctput(req.Content) 304 | hdrput(res) 305 | if err != nil { 306 | if err == ErrNotFound { 307 | return nil 308 | } 309 | return err 310 | } 311 | if code != 12 { 312 | return ErrUnexpectedResponse 313 | } 314 | return nil 315 | } 316 | -------------------------------------------------------------------------------- /store_test.go: -------------------------------------------------------------------------------- 1 | // +build riak 2 | 3 | package rkive 4 | 5 | import ( 6 | "bytes" 7 | check "gopkg.in/check.v1" 8 | "time" 9 | ) 10 | 11 | func (s *riakSuite) TestNewObject(c *check.C) { 12 | startt := time.Now() 13 | ob := &TestObject{ 14 | Data: []byte("Hello World"), 15 | } 16 | 17 | // random key assignment 18 | err := s.cl.New(ob, "testbucket", nil, nil) 19 | if err != nil { 20 | c.Fatal(err) 21 | } 22 | if string(ob.Data) != "Hello World" { 23 | c.Error("Object lost its data") 24 | } 25 | if ob.Info().Vclock() == "" { 26 | c.Error("object didn't get assigned a vclock") 27 | } 28 | 29 | nob := &TestObject{Data: []byte("Blah.")} 30 | key := "testkey" 31 | err = s.cl.New(nob, "testbucket", &key, nil) 32 | if err != nil { 33 | // we'll allow ErrExists 34 | // b/c of prior test runs 35 | if err != ErrExists { 36 | c.Fatal(err) 37 | } 38 | } 39 | if ob.Info().Vclock() == "" { 40 | c.Error("Object didn't get assigned a vclock") 41 | } 42 | if ob.Info().Key() == "" { 43 | c.Errorf("object didn't get assigned a key") 44 | } 45 | s.runtime += time.Since(startt) 46 | } 47 | 48 | func (s *riakSuite) TestPushObject(c *check.C) { 49 | startt := time.Now() 50 | ob := &TestObject{ 51 | Data: []byte("Hello World"), 52 | } 53 | // make new 54 | err := s.cl.New(ob, "testbucket", nil, nil) 55 | if err != nil { 56 | c.Fatal(err) 57 | } 58 | 59 | // fetch 'n store 60 | newob := &TestObject{ 61 | Data: nil, 62 | } 63 | // fetch the same 64 | err = s.cl.Fetch(newob, "testbucket", ob.Info().Key(), nil) 65 | if err != nil { 66 | c.Fatal(err) 67 | } 68 | // modify the data 69 | newob.Data = []byte("new conflicting data!") 70 | // this should work 71 | err = s.cl.Push(newob, nil) 72 | if err != nil { 73 | c.Fatal(err) 74 | } 75 | 76 | // modify the old 77 | ob.Data = []byte("blah blah blah") 78 | 79 | err = s.cl.Push(ob, nil) 80 | if err != ErrModified { 81 | c.Fatalf("Expected ErrModified; got %q", err) 82 | } 83 | s.runtime += time.Since(startt) 84 | } 85 | 86 | func (s *riakSuite) TestStoreObject(c *check.C) { 87 | startt := time.Now() 88 | ob := &TestObject{ 89 | Data: []byte("Hello World"), 90 | } 91 | 92 | // random key assignment 93 | err := s.cl.New(ob, "testbucket", nil, nil) 94 | if err != nil { 95 | c.Fatal(err) 96 | } 97 | if ob.Info().Vclock() == "" { 98 | c.Error("object didn't get assigned a vclock") 99 | } 100 | if string(ob.Data) != "Hello World" { 101 | c.Fatal("Object lost its data!") 102 | } 103 | 104 | // fetch the same object 105 | nob := &TestObject{} 106 | err = s.cl.Fetch(nob, "testbucket", ob.Info().Key(), nil) 107 | if err != nil { 108 | c.Fatal(err) 109 | } 110 | 111 | if !bytes.Equal(ob.Data, nob.Data) { 112 | c.Logf("Sent: %q", ob.Data) 113 | c.Logf("Returned : %q", nob.Data) 114 | c.Fatal("Objects' 'data' field differs") 115 | } 116 | 117 | // make a change 118 | nob.Data = []byte("new information!") 119 | err = s.cl.Store(nob, nil) 120 | if err != nil { 121 | c.Fatal(err) 122 | } 123 | s.runtime += time.Since(startt) 124 | } 125 | 126 | func (s *riakSuite) TestPushChangeset(c *check.C) { 127 | startt := time.Now() 128 | ob := &TestObject{ 129 | Data: []byte("Here's a body."), 130 | } 131 | nob := &TestObject{} 132 | 133 | err := s.cl.New(ob, "testbucket", nil, nil) 134 | if err != nil { 135 | c.Fatal(err) 136 | } 137 | 138 | err = s.cl.Fetch(nob, "testbucket", ob.Info().Key(), nil) 139 | if err != nil { 140 | c.Fatal(err) 141 | } 142 | 143 | nob.Data = []byte("Intermediate") 144 | err = s.cl.Push(nob, nil) 145 | if err != nil { 146 | c.Fatal(err) 147 | } 148 | 149 | // this should fail 150 | err = s.cl.Push(ob, nil) 151 | if err != ErrModified { 152 | c.Fatalf("Expected \"modified\", got %q", err) 153 | } 154 | 155 | chng := func(o Object) error { 156 | v := o.(*TestObject) 157 | if bytes.Equal(v.Data, []byte("New Body")) { 158 | return ErrDone 159 | } 160 | v.Data = []byte("New Body") 161 | return nil 162 | } 163 | 164 | // ... and then this should pass 165 | err = s.cl.PushChangeset(ob, chng, nil) 166 | if err != nil { 167 | c.Fatal(err) 168 | } 169 | 170 | // the other object should reflect the changes 171 | chngd, err := s.cl.Update(nob, nil) 172 | if err != nil { 173 | c.Fatal(err) 174 | } 175 | if !chngd { 176 | c.Error("Expected change; didn't get it") 177 | } 178 | if !bytes.Equal(nob.Data, []byte("New Body")) { 179 | c.Errorf("Wanted data \"New Body\"; got %q", nob.Data) 180 | } 181 | 182 | if !bytes.Equal(ob.Data, []byte("New Body")) { 183 | c.Error("ob.Data didn't retain the appropriate value") 184 | } 185 | 186 | err = s.cl.Fetch(ob, "testbucket", ob.Info().Key(), nil) 187 | if err != nil { 188 | c.Fatal(err) 189 | } 190 | if !bytes.Equal(ob.Data, []byte("New Body")) { 191 | c.Errorf(`Expected "New Body"; got %q`, ob.Data) 192 | } 193 | s.runtime += time.Since(startt) 194 | } 195 | -------------------------------------------------------------------------------- /wercker.yml: -------------------------------------------------------------------------------- 1 | box: wercker/golang@1.2.0 2 | services: 3 | - packrat386/riak@0.0.3 4 | build: 5 | steps: 6 | - script: 7 | name: install-dep 8 | code: go get gopkg.in/check.v1 && go get code.google.com/p/gogoprotobuf/proto && go get -d ./... 9 | - script: 10 | name: test 11 | code: go test -v -tags 'riak' -check.v 12 | after-steps: 13 | - wantedly/pretty-slack-notify: 14 | team: $SLACK_TEAM 15 | token: $SLACK_API_TOKEN 16 | channel: $SLACK_CHANNEL 17 | username: werckerbot --------------------------------------------------------------------------------