├── .github ├── dependabot.yml └── workflows │ └── ci.yml ├── .golangci.yml ├── LICENSE ├── README.md ├── bucket.go ├── collate.go ├── collate_raw.go ├── collate_test.go ├── datastore_name.go ├── datastore_name_test.go ├── design_doc.go ├── go.mod ├── go.sum ├── js_map_fn.go ├── js_map_fn_test.go ├── js_runner.go ├── js_server.go ├── licenses ├── APL2.txt ├── BSL-Couchbase.txt └── addlicense.tmpl ├── logg.go ├── pipeline.go ├── queries.go ├── tap.go ├── tap_test.go ├── util_test.go ├── vb.go └── views.go /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # Copyright 2024-Present Couchbase, Inc. 2 | # 3 | # Use of this software is governed by the Business Source License included 4 | # in the file licenses/BSL-Couchbase.txt. As of the Change Date specified 5 | # in that file, in accordance with the Business Source License, use of this 6 | # software will be governed by the Apache License, Version 2.0, included in 7 | # the file licenses/APL2.txt. 8 | 9 | version: 2 10 | updates: 11 | - package-ecosystem: "gomod" # See documentation for possible values 12 | directory: "/" # Location of package manifests 13 | schedule: 14 | interval: "weekly" 15 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | # Copyright 2023-Present Couchbase, Inc. 2 | # 3 | # Use of this software is governed by the Business Source License included 4 | # in the file licenses/BSL-Couchbase.txt. As of the Change Date specified 5 | # in that file, in accordance with the Business Source License, use of this 6 | # software will be governed by the Apache License, Version 2.0, included in 7 | # the file licenses/APL2.txt. 8 | 9 | name: ci 10 | 11 | on: 12 | push: 13 | branches: 14 | - 'main' 15 | - 'release/*' 16 | - 'CBG*' 17 | - 'ci-*' 18 | - 'feature*' 19 | pull_request: 20 | branches: 21 | - 'main' 22 | - 'release/*' 23 | 24 | jobs: 25 | addlicense: 26 | name: addlicense 27 | runs-on: ubuntu-latest 28 | steps: 29 | - uses: actions/checkout@v4 30 | - uses: actions/setup-go@v5 31 | with: 32 | go-version: 1.21.9 33 | - run: go install github.com/google/addlicense@latest 34 | - run: addlicense -check -f licenses/addlicense.tmpl . 35 | 36 | test: 37 | runs-on: ${{ matrix.os }} 38 | strategy: 39 | fail-fast: false 40 | matrix: 41 | os: [macos-latest, windows-latest, ubuntu-latest] 42 | steps: 43 | - uses: actions/checkout@v4 44 | - uses: actions/setup-go@v5 45 | with: 46 | go-version: 1.21.9 47 | - name: Build 48 | run: go build -v "./..." 49 | - name: Run Tests 50 | run: go test -timeout=30m -count=1 -json -v "./..." | tee test.json | jq -s -jr 'sort_by(.Package,.Time) | .[].Output | select (. != null )' 51 | shell: bash 52 | - name: Annotate Failures 53 | if: always() 54 | uses: guyarb/golang-test-annotations@v0.8.0 55 | with: 56 | test-results: test.json 57 | 58 | golangci: 59 | name: lint 60 | runs-on: ubuntu-latest 61 | steps: 62 | - uses: actions/checkout@v4 63 | - uses: actions/setup-go@v5 64 | with: 65 | go-version: 1.21.9 66 | - name: golangci-lint 67 | uses: golangci/golangci-lint-action@v4 68 | with: 69 | version: v1.57.2 70 | 71 | test-race: 72 | runs-on: ${{ matrix.os }} 73 | strategy: 74 | fail-fast: false 75 | matrix: 76 | os: [macos-latest, windows-latest, ubuntu-latest] 77 | steps: 78 | - uses: actions/checkout@v4 79 | - uses: actions/setup-go@v5 80 | with: 81 | go-version: 1.21.9 82 | - name: Run Tests 83 | run: go test -race -timeout=30m -count=1 -json -v "./..." | tee test.json | jq -s -jr 'sort_by(.Package,.Time) | .[].Output | select (. != null )' 84 | shell: bash 85 | - name: Annotate Failures 86 | if: always() 87 | uses: guyarb/golang-test-annotations@v0.8.0 88 | with: 89 | test-results: test.json 90 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | # Copyright 2020-Present Couchbase, Inc. 2 | # 3 | # Use of this software is governed by the Business Source License included in 4 | # the file licenses/BSL-Couchbase.txt. As of the Change Date specified in that 5 | # file, in accordance with the Business Source License, use of this software 6 | # will be governed by the Apache License, Version 2.0, included in the file 7 | # licenses/APL2.txt. 8 | 9 | # config file for golangci-lint 10 | 11 | linters: 12 | enable: 13 | #- bodyclose # checks whether HTTP response body is closed successfully 14 | #- dupl # Tool for code clone detection 15 | - errcheck # Errcheck is a program for checking for unchecked errors in go programs. These unchecked errors can be critical bugs in some cases 16 | #- goconst # Finds repeated strings that could be replaced by a constant 17 | #- gocritic # The most opinionated Go source code linter 18 | - goimports # Goimports does everything that gofmt does. Additionally it checks unused imports 19 | #- goprintffuncname # Checks that printf-like functions are named with `f` at the end 20 | #- gosec # (gas) Inspects source code for security problems 21 | #- gosimple # (megacheck) Linter for Go source code that specializes in simplifying a code 22 | - govet # (vet, vetshadow) Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string 23 | #- ineffassign # Detects when assignments to existing variables are not used 24 | - misspell # Finds commonly misspelled English words in comments 25 | #- nakedret # Finds naked returns in functions greater than a specified function length 26 | #- prealloc # Finds slice declarations that could potentially be preallocated 27 | #- revive # Golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes 28 | #- staticcheck # (megacheck) Staticcheck is a go vet on steroids, applying a ton of static analysis checks 29 | #- structcheck # Finds unused struct fields - typecheck # Like the front-end of a Go compiler, parses and type-checks Go code 30 | #- unconvert # Remove unnecessary type conversions 31 | #- unparam # Reports unused function parameters 32 | #- unused # (megacheck) Checks Go code for unused constants, variables, functions and types 33 | disable: 34 | - asciicheck # Simple linter to check that your code does not contain non-ASCII identifiers 35 | - depguard # Go linter that checks if package imports are in a list of acceptable packages 36 | - dogsled # Checks assignments with too many blank identifiers # (e.g. x, _, _, _, := f()) 37 | - funlen # Tool for detection of long functions 38 | - gochecknoglobals # Checks that no globals are present in Go code 39 | - gochecknoinits # Checks that no init functions are present in Go code 40 | - gocognit # Computes and checks the cognitive complexity of functions 41 | - gocyclo # Computes and checks the cyclomatic complexity of functions 42 | - godot # Check if comments end in a period 43 | - godox # Tool for detection of FIXME, TODO and other comment keywords 44 | - goerr113 # Golang linter to check the errors handling expressions 45 | - gofmt # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification 46 | - gomnd # An analyzer to detect magic numbers. 47 | - gomodguard # Allow and block list linter for direct Go module dependencies. 48 | - interfacer # Linter that suggests narrower interface types 49 | - lll # Reports long lines 50 | - nestif # Reports deeply nested if statements 51 | - nolintlint # Reports ill-formed or insufficient nolint directives 52 | - rowserrcheck # checks whether Err of rows is checked successfully 53 | - scopelint # Scopelint checks for unpinned variables in go programs 54 | - stylecheck # Stylecheck is a replacement for golint 55 | - testpackage # linter that makes you use a separate _test package 56 | - unused # (megacheck) Checks Go code for unused constants, variables, functions and types 57 | - whitespace # Tool for detection of leading and trailing whitespace 58 | - wsl # Whitespace Linter - Forces you to use empty lines! 59 | # Once fixed, should enable 60 | - bodyclose # checks whether HTTP response body is closed successfully 61 | - deadcode # Finds unused code 62 | - dupl # Tool for code clone detection 63 | - goconst # Finds repeated strings that could be replaced by a constant 64 | - gocritic # The most opinionated Go source code linter 65 | - goprintffuncname # Checks that printf-like functions are named with `f` at the end 66 | - gosec # (gas) Inspects source code for security problems 67 | - gosimple # (megacheck) Linter for Go source code that specializes in simplifying a code 68 | - ineffassign # Detects when assignments to existing variables are not used 69 | - nakedret # Finds naked returns in functions greater than a specified function length 70 | - prealloc # Finds slice declarations that could potentially be preallocated 71 | - revive # Golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes 72 | - staticcheck # (megacheck) Staticcheck is a go vet on steroids, applying a ton of static analysis checks 73 | - structcheck # Finds unused struct fields 74 | - unconvert # Remove unnecessary type conversions 75 | - unparam # Reports unused function parameters 76 | - varcheck # Finds unused global variables and constants 77 | 78 | # Don't enable fieldalignment, changing the field alignment requires checking to see if anyone uses constructors 79 | # without names. If there is a memory issue on a specific field, that is best found with a heap profile. 80 | #linters-settings: 81 | # govet: 82 | # enable: 83 | # - fieldalignment # detect Go structs that would take less memory if their fields were sorted 84 | 85 | # Disable goconst in test files, often we have duplicated strings across tests, but don't make sense as constants. 86 | issues: 87 | exclude-rules: 88 | - path: (_test\.go|utilities_testing\.go) 89 | linters: 90 | - goconst 91 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Source code in this repository is licensed under various licenses. The 2 | Business Source License 1.1 (BSL) is one such license. Each file indicates in 3 | a section at the beginning of the file the name of the license that applies to 4 | it. All licenses used in this repository can be found in the top-level 5 | licenses directory. 6 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![GoDoc](https://godoc.org/github.com/couchbase/sg-bucket?status.png)](https://godoc.org/github.com/couchbase/sg-bucket) [![Sourcegraph](https://sourcegraph.com/github.com/couchbase/sg-bucket/-/badge.svg)](https://sourcegraph.com/github.com/couchbase/sg-bucket?badge) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) 2 | 3 | 4 | # sg-bucket 5 | 6 | This repo contains: 7 | 8 | - Interfaces needed by all concrete implementations of the `sgbucket.Bucket` interface, as well as by Sync Gateway itself. 9 | - Common code used by certain sg-bucket concrete implementations ([walrus](https://github.com/couchbaselabs/walrus), [forestdb-bucket](https://github.com/couchbaselabs/forestdb-bucket/)) and to a lesser extent by Sync Gateway itself. 10 | -------------------------------------------------------------------------------- /bucket.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013-Present Couchbase, Inc. 2 | // 3 | // Use of this software is governed by the Business Source License included 4 | // in the file licenses/BSL-Couchbase.txt. As of the Change Date specified 5 | // in that file, in accordance with the Business Source License, use of this 6 | // software will be governed by the Apache License, Version 2.0, included in 7 | // the file licenses/APL2.txt. 8 | 9 | package sgbucket 10 | 11 | import ( 12 | "context" 13 | "errors" 14 | "expvar" 15 | "fmt" 16 | ) 17 | 18 | // BucketDocument is a raw representation of a document, body and xattrs as bytes, along with cas. 19 | type BucketDocument struct { 20 | Body []byte 21 | Xattrs map[string][]byte 22 | Cas uint64 23 | Expiry uint32 // Item expiration time (UNIX Epoch time) 24 | IsTombstone bool // IsTombstone is true if the document is a tombstone 25 | } 26 | 27 | // BucketStoreFeature can be tested for with BucketStoreFeatureIsSupported.IsSupported. 28 | type BucketStoreFeature int 29 | 30 | const ( 31 | BucketStoreFeatureXattrs = BucketStoreFeature(iota) 32 | BucketStoreFeatureN1ql 33 | BucketStoreFeatureCrc32cMacroExpansion 34 | BucketStoreFeatureCreateDeletedWithXattr 35 | BucketStoreFeatureSubdocOperations 36 | BucketStoreFeaturePreserveExpiry 37 | BucketStoreFeatureCollections 38 | BucketStoreFeatureSystemCollections 39 | BucketStoreFeatureMobileXDCR 40 | BucketStoreFeatureMultiXattrSubdocOperations 41 | BucketStoreFeatureN1qlIfNotExistsDDL 42 | ) 43 | 44 | // BucketStore is a basic interface that describes a bucket - with one or many underlying DataStore. 45 | type BucketStore interface { 46 | GetName() string // The bucket's name 47 | UUID() (string, error) // The bucket's UUID 48 | Close(context.Context) // Closes the bucket 49 | 50 | // A list of all DataStore names in the bucket. 51 | ListDataStores() ([]DataStoreName, error) 52 | 53 | // The default data store of the bucket (always exists.) 54 | DefaultDataStore() DataStore 55 | 56 | // Returns a named data store in the bucket, or an error if it doesn't exist. 57 | NamedDataStore(DataStoreName) (DataStore, error) 58 | 59 | MutationFeedStore 60 | BucketStoreFeatureIsSupported 61 | } 62 | 63 | // DynamicDataStoreBucket is an interface that describes a bucket that can change its set of DataStores. 64 | type DynamicDataStoreBucket interface { 65 | CreateDataStore(context.Context, DataStoreName) error // CreateDataStore creates a new DataStore in the bucket 66 | DropDataStore(DataStoreName) error // DropDataStore drops a DataStore from the bucket 67 | } 68 | 69 | // MutationFeedStore is a DataStore that supports a DCP or TAP streaming mutation feed. 70 | type MutationFeedStore interface { 71 | // GetMaxVbno returns the number of vBuckets of this store; usually 1024. 72 | GetMaxVbno() (uint16, error) 73 | 74 | // StartDCPFeed starts a new DCP event feed. Events will be passed to the callback function. 75 | // To close the feed, pass a channel in args.Terminator and close that channel. The callback will be called for each event processed. dbStats are optional to provide metrics. 76 | StartDCPFeed(ctx context.Context, args FeedArguments, callback FeedEventCallbackFunc, dbStats *expvar.Map) error 77 | } 78 | 79 | // BucketStoreFeatureIsSupported allows a BucketStore to be tested for support for various features. 80 | type BucketStoreFeatureIsSupported interface { 81 | IsSupported(feature BucketStoreFeature) bool // IsSupported reports whether the bucket/datastore supports a given feature 82 | } 83 | 84 | // DataStore is a basic key-value store with extended attributes and subdoc operations. 85 | // A Couchbase Server collection within a bucket is an example of a DataStore. 86 | // The expiry field (exp) can take offsets or UNIX Epoch times. See https://developer.couchbase.com/documentation/server/3.x/developer/dev-guide-3.0/doc-expiration.html 87 | type DataStore interface { 88 | // GetName returns bucket.scope.collection 89 | GetName() string 90 | 91 | // An integer that uniquely identifies this Collection in its Bucket. 92 | // The default collection always has the ID zero. 93 | GetCollectionID() uint32 94 | 95 | KVStore 96 | XattrStore 97 | SubdocStore 98 | BucketStoreFeatureIsSupported 99 | DataStoreName 100 | } 101 | 102 | // UpsertOptions are the options to use with the set operations 103 | type UpsertOptions struct { 104 | PreserveExpiry bool // PreserveExpiry will keep the existing expiry of an existing document if available 105 | } 106 | 107 | // MutateInOptions is a struct of options for mutate in operations, to be used by both sync gateway and rosmar 108 | type MutateInOptions struct { 109 | PreserveExpiry bool // PreserveExpiry will keep the existing document expiry on modification 110 | MacroExpansion []MacroExpansionSpec 111 | } 112 | 113 | // MacroExpansionSpec is a path, value pair where the path is a xattr path and the macro to be used to populate that path 114 | type MacroExpansionSpec struct { 115 | Path string 116 | Type MacroExpansionType 117 | } 118 | 119 | // MacroExpansionType defines the macro expansion types used by Sync Gateway and supported by CBS and rosmar 120 | type MacroExpansionType int 121 | 122 | const ( 123 | MacroCas MacroExpansionType = iota // Document CAS 124 | MacroCrc32c // crc32c hash of the document body 125 | ) 126 | 127 | var ( 128 | macroExpansionTypeStrings = []string{"CAS", "crc32c"} 129 | ) 130 | 131 | func (t MacroExpansionType) String() string { 132 | return macroExpansionTypeStrings[t] 133 | } 134 | 135 | func NewMacroExpansionSpec(specPath string, macro MacroExpansionType) MacroExpansionSpec { 136 | return MacroExpansionSpec{ 137 | Path: specPath, 138 | Type: macro, 139 | } 140 | } 141 | 142 | // A KVStore implements the basic key-value CRUD operations. 143 | type KVStore interface { 144 | // Get retrives a document value of a key and unmarshals it. 145 | // Parameters: 146 | // - k: The key (document ID) 147 | // - rv: The value, if any, is stored here. Must be a pointer. 148 | // If it is a `*[]byte` the raw value will be stored in it. 149 | // Otherwise it's written to by json.Unmarshal; the usual type is `*map[string]any`. 150 | // If the document is a tombstone, nothing is stored. 151 | // Return values: 152 | // - cas: The document's current CAS (sequence) number. 153 | // - err: Error, if any. Returns an error if the key does not exist. 154 | Get(k string, rv interface{}) (cas uint64, err error) 155 | 156 | // GetRaw returns value of a key as a raw byte array. 157 | // Parameters: 158 | // - k: The key (document ID) 159 | // Return values: 160 | // - rv: The raw value. Nil if the document is a tombstone. 161 | // - cas: The document's current CAS (sequence) number. 162 | // - err: Error, if any. Returns an error if the key does not exist. 163 | GetRaw(k string) (rv []byte, cas uint64, err error) 164 | 165 | // GetAndTouchRaw is like GetRaw, but also sets the document's expiration time. 166 | // Since this changes the document, it generates a new CAS value and posts an event. 167 | GetAndTouchRaw(k string, exp uint32) (rv []byte, cas uint64, err error) 168 | 169 | // Touch is equivalent to GetAndTouchRaw, but does not return the value. 170 | Touch(k string, exp uint32) (cas uint64, err error) 171 | 172 | // Add creates a document; similar to Set but gives up if the key exists with a non-nil value. 173 | // Parameters: 174 | // - k: The key (document ID) 175 | // - exp: Expiration timestamp (0 for never) 176 | // - v: The value to set. Will be marshaled to JSON unless it is a `[]byte` or `*[]byte`. 177 | // Return values: 178 | // - added: True if the document was added, false if it already has a value. 179 | // - err: Error, if any. Does not return ErrKeyExists. 180 | Add(k string, exp uint32, v interface{}) (added bool, err error) 181 | 182 | // AddRaw creates a document; similar to SetRaw but gives up if the key exists with a non-nil value. 183 | // Parameters: 184 | // - k: The key (document ID) 185 | // - exp: Expiration timestamp (0 for never) 186 | // - v: The raw value to set. 187 | // Return values: 188 | // - added: True if the document was added, false if it already has a value. 189 | // - err: Error, if any. Does not return ErrKeyExists. 190 | AddRaw(k string, exp uint32, v []byte) (added bool, err error) 191 | 192 | // Set upserts a a document, creating it if it doesn't exist. 193 | // Parameters: 194 | // - k: The key (document ID) 195 | // - exp: Expiration timestamp (0 for never) 196 | // - opts: Options. Use PreserveExpiry=true to leave the expiration alone 197 | // - v: The value to set. Will be marshaled to JSON unless it is a `[]byte` or `*[]byte` 198 | // Return values: 199 | // - err: Error, if any 200 | Set(k string, exp uint32, opts *UpsertOptions, v interface{}) error 201 | 202 | // Set upserts a document, creating it if it doesn't exist. 203 | // Parameters: 204 | // - k: The key (document ID) 205 | // - exp: Expiration timestamp (0 for never) 206 | // - opts: Options. Use PreserveExpiry=true to leave the expiration alone 207 | // - v: The raw value to set 208 | // Return values: 209 | // - err: Error, if any. Does not return ErrKeyExists 210 | SetRaw(k string, exp uint32, opts *UpsertOptions, v []byte) error 211 | 212 | // WriteCas is the most general write method. Sets the value of a document, creating it if it doesn't 213 | // exist, but checks for CAS conflicts: 214 | // If the document has a value, and its CAS differs from the input `cas` parameter, the method 215 | // fails and returns a CasMismatchErr. 216 | // Parameters: 217 | // - k: The key (document ID) 218 | // - exp: Expiration timestamp (0 for never) 219 | // - cas: Expected CAS value 220 | // - v: The value to set. Will be marshaled to JSON unless it is a `[]byte` or `*[]byte` 221 | // - opt: Options; see WriteOptions for details 222 | // Return values: 223 | // - casOut: The new CAS value 224 | // - err: Error, if any. May be CasMismatchErr 225 | WriteCas(k string, exp uint32, cas uint64, v interface{}, opt WriteOptions) (casOut uint64, err error) 226 | 227 | // Delete removes a document by setting its value to nil, making it a tombstone. 228 | // System xattrs are preserved but user xattrs are removed. 229 | // Returns an error if the document doesn't exist or has no value. 230 | Delete(k string) error 231 | 232 | // Remove a document if its CAS matches the given value. 233 | // System xattrs are preserved but user xattrs are removed. 234 | // Returns an erorr if the document doesn't exist or has no value. Returns a CasMismatchErr if the CAS doesn't match. 235 | Remove(k string, cas uint64) (casOut uint64, err error) 236 | 237 | // Update interactively updates a document. The document's current value (nil if none) is passed to 238 | // the callback, then the result of the callback is used to update the value. 239 | // 240 | // Warning: If the document's CAS changes between the read and the write, the method retries; 241 | // therefore you must be prepared for your callback to be called multiple times. 242 | // 243 | // Note: The new value is assumed to be JSON, i.e. when the document is updated its "is JSON" 244 | // flag is set. The UpdateFunc callback unfortunately has no way to override this. 245 | // 246 | // Parameters: 247 | // - k: The key (document ID) 248 | // - exp: Expiration timestamp to set (0 for never) 249 | // - callback: Will be called to compute the new value 250 | // Return values: 251 | // - casOut: The document's new CAS 252 | // - err: Error, if any (including an error returned by the callback) 253 | Update(k string, exp uint32, callback UpdateFunc) (casOut uint64, err error) 254 | 255 | // Incr adds a number to a document serving as a counter. 256 | // The document's value must be an ASCII decimal integer. 257 | // Parameters: 258 | // - k: The key (document ID) 259 | // - amt: The amount to add to the existing value 260 | // - def: The number to store if there is no existing value 261 | // - exp: Expiration timestamp to set (0 for never) 262 | // Return values: 263 | // - casOut: The document's new CAS 264 | // - err: Error, if any 265 | Incr(k string, amt, def uint64, exp uint32) (casOut uint64, err error) 266 | 267 | // GetExpiry returns the document's current expiration timestamp. 268 | GetExpiry(ctx context.Context, k string) (expiry uint32, err error) 269 | 270 | // Exists tests whether a document exists. 271 | // A tombstone with a nil value is still considered to exist. 272 | Exists(k string) (exists bool, err error) 273 | } 274 | 275 | // SubdocStore is an extension of KVStore that allows individual properties in a document to be accessed. 276 | // Documents accessed through this API must have values that are JSON objects. 277 | // Properties are specified by SQL++ paths that look like "foo.bar.baz" or "foo.bar[3].baz". 278 | type SubdocStore interface { 279 | // SubdocInsert adds an individual JSON property to a document. The document must exist. 280 | // If the property already exists, returns `ErrPathExists`. 281 | // If the parent property doesn't exist, returns `ErrPathNotFound`. 282 | // If a parent property has the wrong type, returns ErrPathMismatch. 283 | // Parameters: 284 | // - k: The key (document ID) 285 | // - subdocPath: The JSON path of the property to set 286 | // - cas: Expected CAS value, or 0 to ignore CAS conflicts 287 | // - value: The value to set. Will be marshaled to JSON. 288 | SubdocInsert(ctx context.Context, k string, subdocPath string, cas uint64, value interface{}) error 289 | 290 | // GetSubDocRaw returns the raw JSON value of a document property. 291 | // If the property doesn't exist, returns ErrPathNotFound. 292 | // If a parent property has the wrong type, returns ErrPathMismatch. 293 | // Parameters: 294 | // - k: The key (document ID) 295 | // - subdocPath: The JSON path of the property to get 296 | // Return values: 297 | // - value: The property value as JSON 298 | // - casOut: The document's current CAS (sequence) number. 299 | // - err: Error, if any. 300 | GetSubDocRaw(ctx context.Context, k string, subdocPath string) (value []byte, casOut uint64, err error) 301 | 302 | // WriteSubDoc sets an individual JSON property in a document. 303 | // Creates the document if it didn't exist. 304 | // If the parent property doesn't exist, returns `ErrPathNotFound`. 305 | // If a parent property has the wrong type, returns ErrPathMismatch. 306 | // Parameters: 307 | // - docID: The document ID or key 308 | // - subdocPath: The JSON path of the property to set 309 | // - cas: Expected CAS value, or 0 to ignore CAS conflicts 310 | // - value: The raw value to set. Must be valid JSON. 311 | // Return values: 312 | // - casOut: The document's new CAS 313 | // - err: Error, if any 314 | WriteSubDoc(ctx context.Context, k string, subdocPath string, cas uint64, value []byte) (casOut uint64, err error) 315 | } 316 | 317 | // XattrStore is a data store that supports extended attributes, i.e. document metadata. 318 | type XattrStore interface { 319 | 320 | // Writes a document and updates xattr values. Fails on a CAS mismatch. 321 | // Parameters: 322 | // - k: The key (document ID) 323 | // - exp: Expiration timestamp (0 for never) 324 | // - cas: Expected CAS value 325 | // - opts: Options; use PreserveExpiry to avoid setting expiry 326 | // - value: The raw value to set, or nil to *leave unchanged* 327 | // - xattrValues: Each key represent a raw xattrs value to set, setting any of these values to nil will result in an error. 328 | // - xattrsToDelete: The names of xattrs to delete. 329 | WriteWithXattrs(ctx context.Context, k string, exp uint32, cas uint64, value []byte, xattrsValues map[string][]byte, xattrsToDelete []string, opts *MutateInOptions) (casOut uint64, err error) 330 | 331 | // WriteTombstoneWithXattrs is used when writing a tombstone. This is used when creating a tombstone an existing document, modifying a tombstone, or creating a tombstone from no document. If deleteBody=true, will delete an existing body. 332 | WriteTombstoneWithXattrs(ctx context.Context, k string, exp uint32, cas uint64, xattrValue map[string][]byte, xattrsToDelete []string, deleteBody bool, opts *MutateInOptions) (casOut uint64, err error) 333 | 334 | // WriteResurrectionWithXattrs is used when resurrecting a tombstone. Any existing xattrs on a document will be overwritten. 335 | WriteResurrectionWithXattrs(ctx context.Context, k string, exp uint32, body []byte, xattrs map[string][]byte, opts *MutateInOptions) (casOut uint64, err error) 336 | 337 | // SetXattrs updates xattrs of a document. 338 | // Parameters: 339 | // - k: The key (document ID) 340 | // - xattrs: Each xattr value is stored as a key with the raw value to set or nil to delete. 341 | SetXattrs(ctx context.Context, k string, xattrs map[string][]byte) (casOut uint64, err error) 342 | 343 | // RemoveXattrs removes xattrs by name. Fails on a CAS mismatch. 344 | // - k: The key (document ID) 345 | // - xattrKey: The name of the xattr to update 346 | // - cas: Expected CAS value 347 | RemoveXattrs(ctx context.Context, k string, xattrKeys []string, cas uint64) (err error) 348 | 349 | // DeleteSubDocPaths removes any SQL++ subdoc paths from a document. 350 | DeleteSubDocPaths(ctx context.Context, k string, paths ...string) (err error) 351 | 352 | // GetXattrs returns the xattrs with the following keys. If the key is not present, it will not be present in the returned map. 353 | GetXattrs(ctx context.Context, k string, xattrKeys []string) (xattrs map[string][]byte, casOut uint64, err error) 354 | 355 | // GetWithXattrs returns a document's value as well as an xattrs. 356 | GetWithXattrs(ctx context.Context, k string, xattrKeys []string) (v []byte, xv map[string][]byte, cas uint64, err error) 357 | 358 | // DeleteWithXattrs removes a document and its named xattrs. User xattrs will always be deleted, but system xattrs must be manually removed when a document becomes a tombstone. 359 | DeleteWithXattrs(ctx context.Context, k string, xattrKeys []string) error 360 | 361 | // WriteUpdateWithXattrs preforms an interactive update of a document with MVCC. 362 | // See the documentation of WriteUpdateWithXattrsFunc for details. 363 | // - k: The key (document ID) 364 | // - xattrs: The name of the xattrs to view or update. 365 | // - exp: Expiration timestamp (0 for never) 366 | // - cas: Expected CAS value 367 | // - opts: Options; use PreserveExpiry to avoid setting expiry 368 | // - previous: The current document, if known. Will be used in place of the initial Get 369 | // - callback: The callback that mutates the document 370 | WriteUpdateWithXattrs(ctx context.Context, k string, xattrs []string, exp uint32, previous *BucketDocument, opts *MutateInOptions, callback WriteUpdateWithXattrsFunc) (casOut uint64, err error) 371 | 372 | // UpdateXattrs will update the xattrs for a document. Use MutateInOptions to preserve the expiry value of a document. This operation returns an error on a CAS mismatch. 373 | UpdateXattrs(ctx context.Context, k string, exp uint32, cas uint64, xv map[string][]byte, opts *MutateInOptions) (casOut uint64, err error) 374 | } 375 | 376 | // DeletableStore is a data store that supports deletion of the underlying persistent storage. 377 | type DeleteableStore interface { 378 | // CloseAndDelete closes the store and removes its persistent storage. 379 | CloseAndDelete(ctx context.Context) error 380 | } 381 | 382 | type DeletableBucket = DeleteableStore 383 | 384 | // FlushableStore is a data store that supports flush. 385 | type FlushableStore interface { 386 | Flush() error 387 | } 388 | 389 | // WriteOptions are option flags for the Write method. 390 | type WriteOptions int 391 | 392 | const ( 393 | Raw = WriteOptions(1 << iota) // Value is raw []byte; don't JSON-encode it 394 | AddOnly // Fail with ErrKeyExists if key already has a value 395 | Persist // After write, wait until it's written to disk 396 | Indexable // After write, wait until it's ready for views to index 397 | Append // Appends to value instead of replacing it 398 | ) 399 | 400 | // MissingError is returned by Bucket API when a document is missing 401 | type MissingError struct { 402 | Key string // The document's ID 403 | } 404 | 405 | func (err MissingError) Error() string { 406 | return fmt.Sprintf("key %q missing", err.Key) 407 | } 408 | 409 | // XattrMissingError is returned by Bucket API when an Xattr is missing 410 | type XattrMissingError struct { 411 | Key string // The document ID 412 | Xattrs []string // missing xattrs 413 | } 414 | 415 | func (err XattrMissingError) Error() string { 416 | return fmt.Sprintf("key %q's xattr %q missing", err.Key, err.Xattrs) 417 | } 418 | 419 | // ErrKeyExists is returned from Write with AddOnly flag, when key already exists in the bucket. 420 | // (This is *not* returned from the Add method! Add has an extra boolean parameter to 421 | // indicate this state, so it returns (false,nil).) 422 | var ErrKeyExists = errors.New("Key exists") 423 | 424 | // ErrTimeout returned from Write with Perist or Indexable flags, if the value doesn't become 425 | // persistent or indexable within the timeout period. 426 | var ErrTimeout = errors.New("Timeout") 427 | 428 | // ErrCasFailureShouldRetry is returned from an update callback causes the function to re-fetch the doc and try again. 429 | var ErrCasFailureShouldRetry = errors.New("CAS failure should retry") 430 | 431 | // DocTooBigErr is returned when trying to store a document value larger than the limit (usually 20MB.) 432 | type DocTooBigErr struct{} 433 | 434 | func (err DocTooBigErr) Error() string { 435 | return "document value too large" 436 | } 437 | 438 | // CasMismatchErr is returned when the input CAS does not match the document's current CAS. 439 | type CasMismatchErr struct { 440 | Expected, Actual uint64 441 | } 442 | 443 | func (err CasMismatchErr) Error() string { 444 | return fmt.Sprintf("cas mismatch: expected %x, really %x", err.Expected, err.Actual) 445 | } 446 | 447 | // ErrPathNotFound is returned by subdoc operations when the path is not found. 448 | var ErrPathNotFound = errors.New("subdocument path not found in document") 449 | 450 | // ErrPathExists is returned by subdoc operations when the path already exists, and is expected to not exist. 451 | var ErrPathExists = errors.New("subdocument path already exists in document") 452 | 453 | // ErrPathMismatch is returned by subdoc operations when the path exists but has the wrong type. 454 | var ErrPathMismatch = errors.New("type mismatch in subdocument path") 455 | 456 | // ErrDeleteXattrOnTombstone is returned when trying to delete an xattr on a tombstone document. 457 | var ErrDeleteXattrOnTombstone = errors.New("cannot delete xattr on tombstone") 458 | 459 | // ErrDeleteXattrOnTombstoneResurrection is returned when trying to delete an xattr on a resurrection of a tombstone document. 460 | var ErrDeleteXattrOnTombstoneResurrection = errors.New("cannot delete xattr on resurrection of a tombstone") 461 | 462 | // ErrDeleteXattrOnDocumentInsert is returned when trying to specify xattrs to delete on a document insert, which is invalid. 463 | var ErrDeleteXattrOnDocumentInsert = errors.New("cannot delete xattrs on document insert") 464 | 465 | // ErrUpsertAndDeleteSameXattr is returned when trying to upsert and delete the same xattr in the same operation. 466 | var ErrUpsertAndDeleteSameXattr = errors.New("cannot upsert and delete the same xattr in the same operation") 467 | 468 | // ErrNilXattrValue is returned when trying to set a named xattr to a nil value. This is allowed in Couchbase Server, but not rosmar and has no use in Sync Gateway. 469 | var ErrNilXattrValue = errors.New("nil xattr value not allowed") 470 | 471 | // ErrDocumentExistsOnResurrection is returned when trying to resurrect a document that already exists in a live form. 472 | var ErrDocumentExistsOnResurrection = errors.New("document already exists on resurrection") 473 | 474 | // ErrNeedXattrs is returned xattrs are not specified. 475 | var ErrNeedXattrs = errors.New("xattrs must be specified to update or to delete") 476 | 477 | // ErrNeedBody is returned when a function requires a non nil body. 478 | var ErrNeedBody = errors.New("body must be specified") 479 | 480 | // UpdateFunc is a callback passed to KVStore.Update. 481 | // Parameters: 482 | // - current: The document's current raw value. nil if it's a tombstone or doesn't exist. 483 | // Results: 484 | // - updated: The new value to store, or nil to leave the value alone. 485 | // - expiry: Nil to leave expiry alone, else a pointer to a new timestamp. 486 | // - delete: If true, the document will be deleted. 487 | // - err: Returning an error aborts the update. 488 | type UpdateFunc func(current []byte) (updated []byte, expiry *uint32, delete bool, err error) 489 | 490 | // UpdatedDoc is returned by WriteUpdateWithXattrsFunc, to indicate the new document value and xattrs. 491 | type UpdatedDoc struct { 492 | Doc []byte // Raw value of the document 493 | Xattrs map[string][]byte // Each xattr found with its value. If the xattr is specified, it will be preserved. 494 | XattrsToDelete []string // xattrs to delete. This must be empty if the updated document will be a resurrection of a tombstone. 495 | IsTombstone bool // IsTombstone is true if the document is a tombstone 496 | Expiry *uint32 // Expiry is non-nil to set an expiry 497 | Spec []MacroExpansionSpec // Spec represents which macros to expand 498 | } 499 | 500 | // WriteUpdateWithXattrsFunc is used by XattrStore.WriteUpdateWithXattrs, used to transform the doc in preparation for update. 501 | // Parameters: 502 | // - doc: Current document raw value 503 | // - xattrs: Current value of xattrs 504 | // - cas: Document's current CAS 505 | // Return values: 506 | // - UpdatedDoc: New value to store (or nil to leave unchanged) 507 | // - err: If non-nil, cancels update. 508 | type WriteUpdateWithXattrsFunc func(doc []byte, xattrs map[string][]byte, cas uint64) (UpdatedDoc, error) 509 | -------------------------------------------------------------------------------- /collate.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013-Present Couchbase, Inc. 2 | // 3 | // Use of this software is governed by the Business Source License included 4 | // in the file licenses/BSL-Couchbase.txt. As of the Change Date specified 5 | // in that file, in accordance with the Business Source License, use of this 6 | // software will be governed by the Apache License, Version 2.0, included in 7 | // the file licenses/APL2.txt. 8 | 9 | package sgbucket 10 | 11 | import ( 12 | "encoding/json" 13 | "fmt" 14 | "reflect" 15 | 16 | "golang.org/x/text/collate" 17 | "golang.org/x/text/language" 18 | ) 19 | 20 | // Context for JSON collation. This struct is not thread-safe (or rather, its embedded string 21 | // collator isn't) so it should only be used on one goroutine at a time. 22 | type JSONCollator struct { 23 | stringCollator *collate.Collator 24 | } 25 | 26 | // A predigested form of a collatable value; it's faster to compare two of these. 27 | type preCollated struct { 28 | tok token // type identifier 29 | val any // canonical form of value: float64, string, or []any 30 | } 31 | 32 | func defaultLocale() language.Tag { 33 | l, e := language.Parse("icu") 34 | if e != nil { 35 | return language.Und 36 | } 37 | return l 38 | } 39 | 40 | func CollateJSON(key1, key2 any) int { 41 | var collator JSONCollator 42 | return collator.Collate(key1, key2) 43 | } 44 | 45 | func (c *JSONCollator) Clear() { 46 | c.stringCollator = nil 47 | } 48 | 49 | // CouchDB-compatible collation/comparison of JSON values. 50 | // See: http://wiki.apache.org/couchdb/View_collation#Collation_Specification 51 | func (c *JSONCollator) Collate(key1, key2 any) int { 52 | pc1 := preCollate(key1) 53 | pc2 := preCollate(key2) 54 | return c.collate(&pc1, &pc2) 55 | } 56 | 57 | func (c *JSONCollator) collate(key1, key2 *preCollated) int { 58 | if key1.tok != key2.tok { 59 | return compareTokens(key1.tok, key2.tok) 60 | } 61 | switch key1.tok { 62 | case kNull, kFalse, kTrue: 63 | return 0 64 | case kNumber: 65 | return compareNumbers(key1.val.(float64), key2.val.(float64)) 66 | case kString: 67 | return c.compareStrings(key1.val.(string), key2.val.(string)) 68 | case kArray: 69 | // Handle the case where a walrus bucket is returning a []float64 70 | array1 := key1.val.([]any) 71 | array2 := key2.val.([]any) 72 | for i, item1 := range array1 { 73 | if i >= len(array2) { 74 | return 1 75 | } 76 | if cmp := c.Collate(item1, array2[i]); cmp != 0 { 77 | return cmp 78 | } 79 | } 80 | return compareNumbers(len(array1), len(array2)) 81 | case kObject: 82 | return 0 // ignore ordering for catch-all stuff 83 | default: 84 | panic("bogus collationType") 85 | } 86 | } 87 | 88 | // Converts an arbitrary value into a form that's faster to use in collations. 89 | func preCollate(value any) (result preCollated) { 90 | if value == nil { 91 | return preCollated{kNull, nil} 92 | } 93 | 94 | ref := reflect.ValueOf(value) 95 | switch ref.Kind() { 96 | case reflect.Bool: 97 | if ref.Bool() { 98 | result.tok = kTrue 99 | } else { 100 | result.tok = kFalse 101 | } 102 | case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8: 103 | result.tok = kNumber 104 | result.val = float64(ref.Int()) 105 | case reflect.Uint, reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8: 106 | result.tok = kNumber 107 | result.val = float64(ref.Uint()) 108 | case reflect.Float64, reflect.Float32: 109 | result.tok = kNumber 110 | result.val = ref.Float() 111 | case reflect.String: 112 | result.tok = kString 113 | result.val = value 114 | if jnum, ok := value.(json.Number); ok { 115 | // json.Number is actually a string, but can be parsed to a number 116 | if f, err := jnum.Float64(); err == nil { 117 | result.tok = kNumber 118 | result.val = f 119 | } else if i, err := jnum.Int64(); err == nil { 120 | result.tok = kNumber 121 | result.val = float64(i) 122 | } 123 | } 124 | case reflect.Slice: 125 | slice, ok := value.([]any) 126 | if !ok { 127 | len := ref.Len() 128 | slice := make([]any, len) 129 | for i := 0; i < len; i++ { 130 | slice[i] = ref.Index(i).Interface() 131 | } 132 | } 133 | result.tok = kArray 134 | result.val = slice 135 | case reflect.Map: 136 | result.tok = kObject 137 | default: 138 | panic(fmt.Sprintf("collation doesn't understand %+v (%T)", value, value)) 139 | } 140 | return 141 | } 142 | 143 | func compareNumbers[N ~int | ~int8 | ~int64 | ~float64](n1 N, n2 N) int { 144 | if n1 < n2 { 145 | return -1 146 | } else if n1 > n2 { 147 | return 1 148 | } 149 | return 0 150 | } 151 | 152 | func (c *JSONCollator) compareStrings(s1, s2 string) int { 153 | stringCollator := c.stringCollator 154 | if stringCollator == nil { 155 | stringCollator = collate.New(defaultLocale()) 156 | c.stringCollator = stringCollator 157 | } 158 | return stringCollator.CompareString(s1, s2) 159 | } 160 | -------------------------------------------------------------------------------- /collate_raw.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2013-Present Couchbase, Inc. 3 | 4 | Use of this software is governed by the Business Source License included in 5 | the file licenses/BSL-Couchbase.txt. As of the Change Date specified in that 6 | file, in accordance with the Business Source License, use of this software will 7 | be governed by the Apache License, Version 2.0, included in the file 8 | licenses/APL2.txt. 9 | */ 10 | 11 | package sgbucket 12 | 13 | import ( 14 | "fmt" 15 | "strconv" 16 | "unicode/utf8" 17 | ) 18 | 19 | type token int8 20 | 21 | // JSON input tokens. The order is significant: it's the collation ordering defined by CouchDB. 22 | const ( 23 | kEndArray = token(iota) 24 | kEndObject 25 | kComma 26 | kColon 27 | kNull 28 | kFalse 29 | kTrue 30 | kNumber 31 | kString 32 | kArray 33 | kObject 34 | ) 35 | 36 | // Collates raw JSON data without unmarshaling it. 37 | // THE INPUTS MUST BE VALID JSON, WITH NO WHITESPACE! 38 | // Invalid input will result in a panic, or perhaps just bogus output. 39 | func (c *JSONCollator) CollateRaw(key1, key2 []byte) int { 40 | depth := 0 41 | for { 42 | c1 := key1[0] 43 | c2 := key2[0] 44 | tok1 := tokenize(c1) 45 | tok2 := tokenize(c2) 46 | 47 | // If token types don't match, stop and return their relative ordering: 48 | if tok1 != tok2 { 49 | return compareTokens(tok1, tok2) 50 | } else { 51 | switch tok1 { 52 | case kNull, kTrue: 53 | advance(&key1, 4) 54 | advance(&key2, 4) 55 | case kFalse: 56 | advance(&key1, 5) 57 | advance(&key2, 5) 58 | case kNumber: 59 | if diff := compareNumbers(readNumber(&key1), readNumber(&key2)); diff != 0 { 60 | return diff 61 | } 62 | case kString: 63 | if diff := c.compareStrings(c.readString(&key1), c.readString(&key2)); diff != 0 { 64 | return diff 65 | } 66 | case kArray, kObject: 67 | advance(&key1, 1) 68 | advance(&key2, 1) 69 | depth++ 70 | case kEndArray, kEndObject: 71 | advance(&key1, 1) 72 | advance(&key2, 1) 73 | depth-- 74 | case kComma, kColon: 75 | advance(&key1, 1) 76 | advance(&key2, 1) 77 | } 78 | } 79 | if depth == 0 { 80 | return 0 81 | } 82 | } 83 | } 84 | 85 | func tokenize(c byte) token { 86 | switch c { 87 | case 'n': 88 | return kNull 89 | case 'f': 90 | return kFalse 91 | case 't': 92 | return kTrue 93 | case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': 94 | return kNumber 95 | case '"': 96 | return kString 97 | case ']': 98 | return kEndArray 99 | case '}': 100 | return kEndObject 101 | case ',': 102 | return kComma 103 | case ':': 104 | return kColon 105 | case '[': 106 | return kArray 107 | case '{': 108 | return kObject 109 | default: 110 | panic(fmt.Sprintf("Unexpected character '%c' parsing JSON", c)) 111 | } 112 | } 113 | 114 | // Removes n bytes from the start of the slice 115 | func advance(s *[]byte, n int) { 116 | *s = (*s)[n:] 117 | } 118 | 119 | // Simple byte comparison 120 | func compareTokens(a, b token) int { 121 | if a < b { 122 | return -1 123 | } else if a > b { 124 | return 1 125 | } 126 | return 0 127 | } 128 | 129 | // Parse a JSON number from the input stream 130 | func readNumber(input *[]byte) float64 { 131 | // Look for the end of the number, either at a delimiter or at end of input: 132 | end := len(*input) 133 | for i, c := range *input { 134 | if c == ',' || c == ']' || c == '}' { 135 | end = i 136 | break 137 | } 138 | } 139 | numPart := string((*input)[0:end]) 140 | result, _ := strconv.ParseFloat(numPart, 64) 141 | *input = (*input)[end:] 142 | return result 143 | } 144 | 145 | // Parse a JSON string from the input stream (starting at the opening quote) 146 | func (c *JSONCollator) readString(input *[]byte) string { 147 | // Look for the quote marking the end of the string. Count up escape sequence: 148 | i := 1 149 | escapes := 0 150 | for { 151 | c := (*input)[i] 152 | if c == '"' { 153 | break 154 | } else if c == '\\' { 155 | escapes++ 156 | i++ 157 | if (*input)[i] == 'u' { 158 | i += 4 // skip past Unicode escape /uxxxx 159 | } 160 | } 161 | i++ 162 | } 163 | 164 | var str string 165 | if escapes > 0 { 166 | str = c.readEscapedString((*input)[1:i], i-escapes) // slower case 167 | } else { 168 | str = string((*input)[1:i]) 169 | } 170 | *input = (*input)[i+1:] // Skip the closing quote as well 171 | return str 172 | } 173 | 174 | // Parse a string, interpreting JSON escape sequences: 175 | func (c *JSONCollator) readEscapedString(input []byte, bufSize int) string { 176 | decoded := make([]byte, 0, bufSize) 177 | for i := 0; i < len(input); i++ { 178 | c := input[i] 179 | if c == '\\' { 180 | i++ 181 | c = input[i] 182 | if c == 'u' { 183 | // Decode a Unicode escape: 184 | r, _ := strconv.ParseUint(string(input[i+1:i+5]), 16, 32) 185 | i += 4 186 | var utf [8]byte 187 | size := utf8.EncodeRune(utf[0:], rune(r)) 188 | decoded = append(decoded, utf[0:size]...) 189 | } else { 190 | switch c { 191 | case 'b': 192 | c = '\b' 193 | case 'n': 194 | c = '\n' 195 | case 'r': 196 | c = '\r' 197 | case 't': 198 | c = '\t' 199 | } 200 | decoded = append(decoded, c) 201 | } 202 | } else { 203 | decoded = append(decoded, c) 204 | } 205 | } 206 | return string(decoded) 207 | // This can be optimized by scanning through input for the next backslash, 208 | // then appending all the chars up to it in one append() call. 209 | } 210 | -------------------------------------------------------------------------------- /collate_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013-Present Couchbase, Inc. 2 | // 3 | // Use of this software is governed by the Business Source License included 4 | // in the file licenses/BSL-Couchbase.txt. As of the Change Date specified 5 | // in that file, in accordance with the Business Source License, use of this 6 | // software will be governed by the Apache License, Version 2.0, included in 7 | // the file licenses/APL2.txt. 8 | 9 | package sgbucket 10 | 11 | import ( 12 | "encoding/json" 13 | "testing" 14 | 15 | "github.com/stretchr/testify/require" 16 | ) 17 | 18 | type collateTest struct { 19 | left interface{} 20 | right interface{} 21 | result int 22 | } 23 | type collateTestList []collateTest 24 | 25 | var collateTests, collateRawTests collateTestList 26 | 27 | func init() { 28 | collateTests = collateTestList{ 29 | // scalars 30 | {true, false, 1}, 31 | {false, true, -1}, 32 | {nil, float64(17), -1}, 33 | {4321, 4321, 0}, 34 | {int32(4321), uint16(4000), 1}, 35 | {float64(1), float64(1), 0}, 36 | {float64(123), float64(1), 1}, 37 | {float64(123), 0123.0, 0}, 38 | {float64(123), "123", -1}, 39 | {"1234", "123", 1}, 40 | {"1234", "1235", -1}, 41 | {"1234", "1234", 0}, 42 | 43 | // verify unicode collation 44 | {"a", "A", -1}, 45 | {"A", "aa", -1}, 46 | {"B", "aa", 1}, 47 | 48 | // arrays 49 | {[]interface{}{}, "foo", 1}, 50 | {[]interface{}{}, []interface{}{}, 0}, 51 | {[]interface{}{true}, []interface{}{true}, 0}, 52 | {[]interface{}{false}, []interface{}{nil}, 1}, 53 | {[]interface{}{}, []interface{}{nil}, -1}, 54 | {[]interface{}{float64(123)}, []interface{}{float64(45)}, 1}, 55 | {[]interface{}{float64(123)}, []interface{}{float64(45), float64(67)}, 1}, 56 | {[]interface{}{123.4, "wow"}, []interface{}{123.40, float64(789)}, 1}, 57 | {[]interface{}{float64(5), "wow"}, []interface{}{float64(5), "wow"}, 0}, 58 | {[]interface{}{float64(5), "wow"}, float64(1), 1}, 59 | {[]interface{}{float64(1)}, []interface{}{float64(5), "wow"}, -1}, 60 | 61 | // nested arrays 62 | {[]interface{}{[]interface{}{}}, []interface{}{}, 1}, 63 | {[]interface{}{float64(1), []interface{}{float64(2), float64(3)}, float64(4)}, 64 | []interface{}{float64(1), []interface{}{float64(2), 3.1}, float64(4), float64(5), float64(6)}, -1}, 65 | 66 | // unicode strings 67 | {"fréd", "fréd", 0}, 68 | {"ømø", "omo", 1}, 69 | {"\t", " ", -1}, 70 | {"\001", " ", -1}, 71 | } 72 | 73 | for _, test := range collateTests { 74 | jsonLeft, _ := json.Marshal(test.left) 75 | jsonRight, _ := json.Marshal(test.right) 76 | collateRawTests = append(collateRawTests, collateTest{ 77 | left: jsonLeft, 78 | right: jsonRight, 79 | result: test.result, 80 | }) 81 | } 82 | } 83 | 84 | func TestCollateJSON(t *testing.T) { 85 | var collator JSONCollator 86 | for _, test := range collateTests { 87 | result := collator.Collate(test.left, test.right) 88 | if result != test.result { 89 | t.Errorf("Comparing %v with %v, expected %v, got %v", test.left, test.right, test.result, result) 90 | } 91 | } 92 | } 93 | 94 | func TestCollateJSONRaw(t *testing.T) { 95 | var collator JSONCollator 96 | for _, test := range collateRawTests { 97 | result := collator.CollateRaw(test.left.([]byte), test.right.([]byte)) 98 | if result != test.result { 99 | t.Errorf("CollateRawJSON `%v` with `%v`, expected %v, got %v", test.left, test.right, test.result, result) 100 | } 101 | } 102 | } 103 | 104 | func TestReadNumber(t *testing.T) { 105 | tests := []struct { 106 | input string 107 | value float64 108 | }{ 109 | {"0", 0}, 110 | {"1234", 1234}, 111 | {"-1", -1}, 112 | {"3.14159", 3.14159}, 113 | {"1.7e26", 1.7e26}, 114 | {"1.7e-10", 1.7e-10}, 115 | {"1.7e+6", 1.7e+6}, 116 | } 117 | for _, test := range tests { 118 | input := []byte(test.input) 119 | value := readNumber(&input) 120 | if value != test.value { 121 | t.Errorf("readNumber(%q) -> %f, should be %f", test.input, value, test.value) 122 | } 123 | if len(input) != 0 { 124 | t.Errorf("readNumber(%q), remainder is %q, should be empty", test.input, string(input)) 125 | } 126 | 127 | test.input += "," 128 | input = []byte(test.input) 129 | value = readNumber(&input) 130 | if value != test.value { 131 | t.Errorf("readNumber(%q) -> %f, should be %f", test.input, value, test.value) 132 | } 133 | if string(input) != "," { 134 | t.Errorf("readNumber(%q), remainder is %q, should be \",\"", test.input, string(input)) 135 | } 136 | } 137 | } 138 | 139 | func TestReadString(t *testing.T) { 140 | var collator JSONCollator 141 | tests := []struct { 142 | input string 143 | value string 144 | }{ 145 | {"", ""}, 146 | {"X", "X"}, 147 | {"xyzzy", "xyzzy"}, 148 | {`J.R. \"Bob\" Dobbs`, `J.R. "Bob" Dobbs`}, 149 | {`\bFoo\t\tbar\n`, "\bFoo\t\tbar\n"}, 150 | {`X\\y`, `X\y`}, 151 | {`x\u0020y`, `x y`}, 152 | {`\ufade\u0123`, "\ufade\u0123"}, 153 | } 154 | for _, test := range tests { 155 | input := []byte("\"" + test.input + "\"") 156 | value := collator.readString(&input) 157 | if value != test.value { 158 | t.Errorf("readString(`%s`) -> `%s`, should be `%s`", test.input, value, test.value) 159 | } 160 | } 161 | } 162 | 163 | // Collate already-parsed values (this is the fastest option) 164 | func BenchmarkCollate(b *testing.B) { 165 | for i := 0; i < b.N; i++ { 166 | var collator JSONCollator 167 | for _, test := range collateTests { 168 | result := collator.Collate(test.left, test.right) 169 | if result != test.result { 170 | panic("wrong result") 171 | } 172 | } 173 | } 174 | } 175 | 176 | // Collate raw JSON data (about 3.5x faster than parse-and-collate) 177 | func BenchmarkCollateRaw(b *testing.B) { 178 | for i := 0; i < b.N; i++ { 179 | var collator JSONCollator 180 | for _, test := range collateRawTests { 181 | result := collator.CollateRaw(test.left.([]byte), test.right.([]byte)) 182 | if result != test.result { 183 | panic("wrong result") 184 | } 185 | } 186 | } 187 | } 188 | 189 | // Parse raw JSON and collate the values (this is the slowest) 190 | func BenchmarkParseAndCollate(b *testing.B) { 191 | for i := 0; i < b.N; i++ { 192 | var collator JSONCollator 193 | for _, test := range collateRawTests { 194 | var left, right interface{} 195 | require.NoError(b, json.Unmarshal(test.left.([]byte), &left)) 196 | require.NoError(b, json.Unmarshal(test.right.([]byte), &right)) 197 | result := collator.Collate(left, right) 198 | if result != test.result { 199 | panic("wrong result") 200 | } 201 | } 202 | } 203 | } 204 | -------------------------------------------------------------------------------- /datastore_name.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023-Present Couchbase, Inc. 2 | // 3 | // Use of this software is governed by the Business Source License included 4 | // in the file licenses/BSL-Couchbase.txt. As of the Change Date specified 5 | // in that file, in accordance with the Business Source License, use of this 6 | // software will be governed by the Apache License, Version 2.0, included in 7 | // the file licenses/APL2.txt. 8 | 9 | package sgbucket 10 | 11 | import ( 12 | "fmt" 13 | "regexp" 14 | ) 15 | 16 | // DataStoreName provides the methods that can give you each part of a data store. 17 | // 18 | // Each implementation is free to decide how to store the data store name, to avoid both sgbucket leaking into implementations, 19 | // and also reduce duplication for storing these values, in the event SDKs already hold copies of names internally. 20 | type DataStoreName interface { 21 | ScopeName() string 22 | CollectionName() string 23 | } 24 | 25 | // Simple struct implementation of DataStoreName. 26 | type DataStoreNameImpl struct { 27 | Scope, Collection string 28 | } 29 | 30 | const ( 31 | DefaultCollection = "_default" // Name of the default collection 32 | DefaultScope = "_default" // Name of the default collection 33 | ScopeCollectionSeparator = "." // Delimiter between scope & collection names 34 | ) 35 | 36 | var dsNameRegexp = regexp.MustCompile("^[a-zA-Z0-9-][a-zA-Z0-9%_-]{0,250}$") 37 | 38 | func (sc DataStoreNameImpl) ScopeName() string { 39 | return sc.Scope 40 | } 41 | 42 | func (sc DataStoreNameImpl) CollectionName() string { 43 | return sc.Collection 44 | } 45 | 46 | func (sc DataStoreNameImpl) String() string { 47 | return sc.Scope + ScopeCollectionSeparator + sc.Collection 48 | } 49 | 50 | func (sc DataStoreNameImpl) IsDefault() bool { 51 | return sc.Scope == DefaultScope && sc.Collection == DefaultCollection 52 | } 53 | 54 | // Validates the names and creates new scope and collection pair 55 | func NewValidDataStoreName(scope, collection string) (id DataStoreNameImpl, err error) { 56 | if IsValidDataStoreName(scope, collection) { 57 | id = DataStoreNameImpl{scope, collection} 58 | } else { 59 | err = fmt.Errorf("invalid scope/collection name '%s.%s'", scope, collection) 60 | } 61 | return 62 | } 63 | 64 | // Returns true if scope.coll is a valid data store name. 65 | func IsValidDataStoreName(scope, coll string) bool { 66 | scopeIsDefault := (scope == DefaultScope) 67 | collIsDefault := (coll == DefaultCollection) 68 | return (scopeIsDefault || dsNameRegexp.MatchString(scope)) && 69 | ((collIsDefault && scopeIsDefault) || dsNameRegexp.MatchString(coll)) 70 | } 71 | 72 | var ( 73 | // Enforce interface conformance: 74 | _ DataStoreName = &DataStoreNameImpl{"a", "b"} 75 | ) 76 | -------------------------------------------------------------------------------- /datastore_name_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023-Present Couchbase, Inc. 2 | // 3 | // Use of this software is governed by the Business Source License included 4 | // in the file licenses/BSL-Couchbase.txt. As of the Change Date specified 5 | // in that file, in accordance with the Business Source License, use of this 6 | // software will be governed by the Apache License, Version 2.0, included in 7 | // the file licenses/APL2.txt. 8 | 9 | package sgbucket 10 | 11 | import ( 12 | "testing" 13 | 14 | "github.com/stretchr/testify/assert" 15 | ) 16 | 17 | func TestValidDataStoreName(t *testing.T) { 18 | 19 | validDataStoreNames := [][2]string{ 20 | {"myScope", "myCollection"}, 21 | {"ABCabc123_-%", "ABCabc123_-%"}, 22 | {"_default", "myCollection"}, 23 | {"_default", "_default"}, 24 | } 25 | 26 | invalidDataStoreNames := [][2]string{ 27 | {"a:1", "a:1"}, 28 | {"_a", "b"}, 29 | {"a", "_b"}, 30 | {"%a", "b"}, 31 | {"%a", "b"}, 32 | {"a", "%b"}, 33 | {"myScope", "_default"}, 34 | {"_default", "a:1"}, 35 | } 36 | 37 | for _, validPair := range validDataStoreNames { 38 | assert.True(t, IsValidDataStoreName(validPair[0], validPair[1]), 39 | "(%q, %q) should be valid", validPair[0], validPair[1]) 40 | } 41 | for _, invalidPair := range invalidDataStoreNames { 42 | assert.False(t, IsValidDataStoreName(invalidPair[0], invalidPair[1]), 43 | "(%q, %q) should be invalid", invalidPair[0], invalidPair[1]) 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /design_doc.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013-Present Couchbase, Inc. 2 | // 3 | // Use of this software is governed by the Business Source License included 4 | // in the file licenses/BSL-Couchbase.txt. As of the Change Date specified 5 | // in that file, in accordance with the Business Source License, use of this 6 | // software will be governed by the Apache License, Version 2.0, included in 7 | // the file licenses/APL2.txt. 8 | 9 | package sgbucket 10 | 11 | type ViewDef struct { 12 | Map string `json:"map"` 13 | Reduce string `json:"reduce,omitempty"` 14 | } 15 | 16 | type ViewMap map[string]ViewDef 17 | 18 | type DesignDocOptions struct { 19 | LocalSeq bool `json:"local_seq,omitempty"` 20 | IncludeDesign bool `json:"include_design,omitempty"` 21 | Raw bool `json:"raw,omitempty"` 22 | IndexXattrOnTombstones bool `json:"index_xattr_on_deleted_docs,omitempty"` 23 | } 24 | 25 | // A Couchbase design document, which stores map/reduce function definitions. 26 | type DesignDoc struct { 27 | Language string `json:"language,omitempty"` 28 | Views ViewMap `json:"views,omitempty"` 29 | Options *DesignDocOptions `json:"options,omitempty"` 30 | } 31 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/couchbase/sg-bucket 2 | 3 | go 1.19 4 | 5 | require ( 6 | github.com/robertkrimen/otto v0.0.0-20211024170158-b87d35c0b86f 7 | github.com/stretchr/testify v1.9.0 8 | golang.org/x/text v0.17.0 9 | ) 10 | 11 | require ( 12 | github.com/davecgh/go-spew v1.1.1 // indirect 13 | github.com/pmezard/go-difflib v1.0.0 // indirect 14 | gopkg.in/sourcemap.v1 v1.0.5 // indirect 15 | gopkg.in/yaml.v3 v3.0.1 // indirect 16 | ) 17 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= 2 | github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= 3 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 4 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 5 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 6 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 7 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 8 | github.com/robertkrimen/otto v0.0.0-20211024170158-b87d35c0b86f h1:a7clxaGmmqtdNTXyvrp/lVO/Gnkzlhc/+dLs5v965GM= 9 | github.com/robertkrimen/otto v0.0.0-20211024170158-b87d35c0b86f/go.mod h1:/mK7FZ3mFYEn9zvNPhpngTyatyehSwte5bJZ4ehL5Xw= 10 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 11 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 12 | github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= 13 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 14 | golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= 15 | golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= 16 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 17 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 18 | gopkg.in/readline.v1 v1.0.0-20160726135117-62c6fe619375/go.mod h1:lNEQeAhU009zbRxng+XOj5ITVgY24WcbNnQopyfKoYQ= 19 | gopkg.in/sourcemap.v1 v1.0.5 h1:inv58fC9f9J3TK2Y2R1NPntXEn3/wjWHkonhIUODNTI= 20 | gopkg.in/sourcemap.v1 v1.0.5/go.mod h1:2RlvNNSMglmRrcvhfuzp4hQHwOtjxlbjX7UPY/GXb78= 21 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 22 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 23 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 24 | -------------------------------------------------------------------------------- /js_map_fn.go: -------------------------------------------------------------------------------- 1 | // Copyright 2012-Present Couchbase, Inc. 2 | // 3 | // Use of this software is governed by the Business Source License included 4 | // in the file licenses/BSL-Couchbase.txt. As of the Change Date specified 5 | // in that file, in accordance with the Business Source License, use of this 6 | // software will be governed by the Apache License, Version 2.0, included in 7 | // the file licenses/APL2.txt. 8 | 9 | package sgbucket 10 | 11 | import ( 12 | "context" 13 | "encoding/json" 14 | "fmt" 15 | "time" 16 | 17 | "github.com/robertkrimen/otto" 18 | ) 19 | 20 | const kTaskCacheSize = 4 21 | 22 | // A compiled JavaScript 'map' function, API-compatible with Couchbase Server 2.0. 23 | // Based on JSRunner, so this is not thread-safe; use its wrapper JSMapFunction for that. 24 | type jsMapTask struct { 25 | JSRunner 26 | output []*ViewRow 27 | } 28 | 29 | // Compiles a JavaScript map function to a jsMapTask object. 30 | func newJsMapTask(funcSource string, timeout time.Duration) (JSServerTask, error) { 31 | mapper := &jsMapTask{} 32 | err := mapper.Init(funcSource, timeout) 33 | if err != nil { 34 | return nil, err 35 | } 36 | 37 | // Implementation of the 'emit()' callback: 38 | mapper.DefineNativeFunction("emit", func(call otto.FunctionCall) otto.Value { 39 | key, err1 := call.ArgumentList[0].Export() 40 | value, err2 := call.ArgumentList[1].Export() 41 | if err1 != nil || err2 != nil { 42 | panic(fmt.Sprintf("Unsupported key or value types: emit(%#v,%#v): %v %v", key, value, err1, err2)) 43 | } 44 | mapper.output = append(mapper.output, &ViewRow{Key: key, Value: value}) 45 | return otto.UndefinedValue() 46 | }) 47 | 48 | mapper.Before = func() { 49 | mapper.output = []*ViewRow{} 50 | } 51 | mapper.After = func(result otto.Value, err error) (interface{}, error) { 52 | output := mapper.output 53 | mapper.output = nil 54 | return output, err 55 | } 56 | return mapper, nil 57 | } 58 | 59 | // JSMapFunction is a thread-safe wrapper around a jsMapTask, i.e. a Couchbase-Server-compatible JavaScript 60 | // 'map' function. 61 | type JSMapFunction struct { 62 | *JSServer 63 | } 64 | 65 | type JSMapFunctionInput struct { 66 | Doc string // Doc body 67 | DocID string // Doc ID 68 | VbNo uint32 // Vbucket number 69 | VbSeq uint64 // Sequence (CAS) in Vbucket 70 | Xattrs map[string][]byte // Xattrs, each value marshaled to JSON 71 | } 72 | 73 | func NewJSMapFunction(ctx context.Context, fnSource string, timeout time.Duration) *JSMapFunction { 74 | return &JSMapFunction{ 75 | JSServer: NewJSServer(ctx, fnSource, timeout, kTaskCacheSize, 76 | func(ctx context.Context, fnSource string, timeout time.Duration) (JSServerTask, error) { 77 | return newJsMapTask(fnSource, timeout) 78 | }), 79 | } 80 | } 81 | 82 | // CallFunction calls a jsMapTask. 83 | func (mapper *JSMapFunction) CallFunction(ctx context.Context, input *JSMapFunctionInput) ([]*ViewRow, error) { 84 | result1, err := mapper.Call(ctx, JSONString(input.Doc), MakeMeta(input)) 85 | if err != nil { 86 | return nil, err 87 | } 88 | rows := result1.([]*ViewRow) 89 | for i := range rows { 90 | rows[i].ID = input.DocID 91 | } 92 | return rows, nil 93 | } 94 | 95 | // MakeMeta returns a Couchbase-compatible 'meta' object, given a document ID 96 | func MakeMeta(input *JSMapFunctionInput) map[string]interface{} { 97 | meta := map[string]interface{}{ 98 | "id": input.DocID, 99 | "vb": input.VbNo, 100 | "seq": input.VbSeq, 101 | } 102 | if len(input.Xattrs) > 0 { 103 | xattrs := map[string]any{} 104 | for key, data := range input.Xattrs { 105 | var value any 106 | err := json.Unmarshal(data, &value) 107 | if err != nil { 108 | panic("Can't unmarshal xattrs") 109 | } 110 | xattrs[key] = value 111 | } 112 | meta["xattrs"] = xattrs 113 | } 114 | return meta 115 | } 116 | -------------------------------------------------------------------------------- /js_map_fn_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013-Present Couchbase, Inc. 2 | // 3 | // Use of this software is governed by the Business Source License included 4 | // in the file licenses/BSL-Couchbase.txt. As of the Change Date specified 5 | // in that file, in accordance with the Business Source License, use of this 6 | // software will be governed by the Apache License, Version 2.0, included in 7 | // the file licenses/APL2.txt. 8 | 9 | package sgbucket 10 | 11 | import ( 12 | "context" 13 | "fmt" 14 | "testing" 15 | 16 | "github.com/stretchr/testify/assert" 17 | ) 18 | 19 | func testCtx(t *testing.T) context.Context { 20 | return context.Background() // return background for tests, to match sync_gateway interfaces 21 | } 22 | 23 | // Just verify that the calls to the emit() fn show up in the output. 24 | func TestEmitFunction(t *testing.T) { 25 | ctx := testCtx(t) 26 | mapper := NewJSMapFunction(ctx, `function(doc) {emit("key", "value"); emit("k2","v2")}`, 0) 27 | rows, err := mapper.CallFunction(ctx, &JSMapFunctionInput{`{}`, "doc1", 0, 0, nil}) 28 | assertNoError(t, err, "CallFunction failed") 29 | assert.Equal(t, 2, len(rows)) 30 | assert.Equal(t, &ViewRow{ID: "doc1", Key: "key", Value: "value"}, rows[0]) 31 | assert.Equal(t, &ViewRow{ID: "doc1", Key: "k2", Value: "v2"}, rows[1]) 32 | } 33 | 34 | func TestTimeout(t *testing.T) { 35 | ctx := testCtx(t) 36 | mapper := NewJSMapFunction(ctx, `function(doc) {while(true) {}}`, 1) 37 | _, err := mapper.CallFunction(ctx, &JSMapFunctionInput{`{}`, "doc1", 0, 0, nil}) 38 | assert.ErrorIs(t, err, ErrJSTimeout) 39 | } 40 | 41 | func testMap(t *testing.T, mapFn string, doc string) []*ViewRow { 42 | ctx := testCtx(t) 43 | mapper := NewJSMapFunction(ctx, mapFn, 0) 44 | rows, err := mapper.CallFunction(ctx, &JSMapFunctionInput{doc, "doc1", 0, 0, nil}) 45 | assertNoError(t, err, fmt.Sprintf("CallFunction failed on %s", doc)) 46 | return rows 47 | } 48 | 49 | // Now just make sure the input comes through intact 50 | func TestInputParse(t *testing.T) { 51 | rows := testMap(t, `function(doc) {emit(doc.key, doc.value);}`, 52 | `{"key": "k", "value": "v"}`) 53 | assert.Equal(t, 1, len(rows)) 54 | assert.Equal(t, &ViewRow{ID: "doc1", Key: "k", Value: "v"}, rows[0]) 55 | } 56 | 57 | // Test different types of keys/values: 58 | func TestKeyTypes(t *testing.T) { 59 | rows := testMap(t, `function(doc) {emit(doc.key, doc.value);}`, 60 | `{"ID": "doc1", "key": true, "value": false}`) 61 | assert.Equal(t, &ViewRow{ID: "doc1", Key: true, Value: false}, rows[0]) 62 | rows = testMap(t, `function(doc) {emit(doc.key, doc.value);}`, 63 | `{"ID": "doc1", "key": null, "value": 0}`) 64 | assert.Equal(t, &ViewRow{ID: "doc1", Key: nil, Value: float64(0)}, rows[0]) 65 | rows = testMap(t, `function(doc) {emit(doc.key, doc.value);}`, 66 | `{"ID": "doc1", "key": ["foo", 23, []], "value": [null]}`) 67 | assert.Equal(t, &ViewRow{ 68 | ID: "doc1", 69 | Key: []interface{}{"foo", 23.0, []interface{}{}}, 70 | Value: []interface{}{nil}, 71 | }, rows[0]) 72 | 73 | } 74 | 75 | // Empty/no-op map fn 76 | func TestEmptyJSMapFunction(t *testing.T) { 77 | ctx := testCtx(t) 78 | mapper := NewJSMapFunction(ctx, `function(doc) {}`, 0) 79 | rows, err := mapper.CallFunction(ctx, &JSMapFunctionInput{`{"key": "k", "value": "v"}`, "doc1", 0, 0, nil}) 80 | assertNoError(t, err, "CallFunction failed") 81 | assert.Equal(t, 0, len(rows)) 82 | } 83 | 84 | // Test meta object 85 | func TestMeta(t *testing.T) { 86 | ctx := testCtx(t) 87 | mapper := NewJSMapFunction(ctx, `function(doc,meta) {if (meta.id!="doc1") throw("bad ID");}`, 0) 88 | rows, err := mapper.CallFunction(ctx, &JSMapFunctionInput{`{"key": "k", "value": "v"}`, "doc1", 0, 0, nil}) 89 | assertNoError(t, err, "CallFunction failed") 90 | assert.Equal(t, 0, len(rows)) 91 | } 92 | 93 | func TestXattrs(t *testing.T) { 94 | xattrs := map[string][]byte{ 95 | "_sync": []byte(`{"hey":"hey"}`), 96 | "user": []byte(`{"a":1}`), 97 | } 98 | ctx := testCtx(t) 99 | mapper := NewJSMapFunction(ctx, `function(doc,meta) {if (meta.xattrs._sync.hey != "hey") throw("bad xattrs");}`, 0) 100 | rows, err := mapper.CallFunction(ctx, &JSMapFunctionInput{`{"key": "k", "value": "v"}`, "doc1", 0, 0, xattrs}) 101 | assertNoError(t, err, "CallFunction failed") 102 | assert.Equal(t, 0, len(rows)) 103 | } 104 | 105 | func TestNoXattrs(t *testing.T) { 106 | ctx := testCtx(t) 107 | mapper := NewJSMapFunction(ctx, `function(doc,meta) {if (meta.xattrs !== undefined) throw("unexpected xattrs");}`, 0) 108 | rows, err := mapper.CallFunction(ctx, &JSMapFunctionInput{`{"key": "k", "value": "v"}`, "doc1", 0, 0, nil}) 109 | assertNoError(t, err, "CallFunction failed") 110 | assert.Equal(t, 0, len(rows)) 111 | } 112 | 113 | // Test the public API 114 | func TestPublicJSMapFunction(t *testing.T) { 115 | ctx := testCtx(t) 116 | mapper := NewJSMapFunction(ctx, `function(doc) {emit(doc.key, doc.value);}`, 0) 117 | rows, err := mapper.CallFunction(ctx, &JSMapFunctionInput{`{"key": "k", "value": "v"}`, "doc1", 0, 0, nil}) 118 | assertNoError(t, err, "CallFunction failed") 119 | assert.Equal(t, 1, len(rows)) 120 | assert.Equal(t, &ViewRow{ID: "doc1", Key: "k", Value: "v"}, rows[0]) 121 | } 122 | -------------------------------------------------------------------------------- /js_runner.go: -------------------------------------------------------------------------------- 1 | // Copyright 2012-Present Couchbase, Inc. 2 | // 3 | // Use of this software is governed by the Business Source License included 4 | // in the file licenses/BSL-Couchbase.txt. As of the Change Date specified 5 | // in that file, in accordance with the Business Source License, use of this 6 | // software will be governed by the Apache License, Version 2.0, included in 7 | // the file licenses/APL2.txt. 8 | 9 | package sgbucket 10 | 11 | import ( 12 | "context" 13 | "encoding/json" 14 | "errors" 15 | "fmt" 16 | "time" 17 | 18 | "github.com/robertkrimen/otto" 19 | ) 20 | 21 | // jsStackDepthLimit defines an upper-limit for how deep the JavaScript stack can go before Otto returns an error. 22 | // 23 | // The value 10,000 aligns with the maximum depth of Go's stdlib JSON library (golang/go#31789) 24 | // which is a good match for the worst-case of users recursing into nested document properties. 25 | const jsStackDepthLimit = 10_000 26 | 27 | // Alternate type to wrap a Go string in to mark that Call() should interpret it as JSON. 28 | // That is, when Call() sees a parameter of type JSONString it will parse the JSON and use 29 | // the result as the parameter value, instead of just converting it to a JS string. 30 | type JSONString string 31 | 32 | type NativeFunction func(otto.FunctionCall) otto.Value 33 | 34 | // This specific instance will be returned if a call times out. 35 | var ErrJSTimeout = errors.New("javascript function timed out") 36 | 37 | // Go interface to a JavaScript function (like a map/reduce/channelmap/validation function.) 38 | // Each JSServer object compiles a single function into a JavaScript runtime, and lets you 39 | // call that function. 40 | // JSRunner is NOT thread-safe! For that, use JSServer, a wrapper around it. 41 | type JSRunner struct { 42 | js *otto.Otto 43 | fn otto.Value 44 | fnSource string 45 | timeout time.Duration 46 | 47 | // Optional function that will be called just before the JS function. 48 | Before func() 49 | 50 | // Optional function that will be called after the JS function returns, and can convert 51 | // its output from JS (Otto) values to Go values. 52 | After func(otto.Value, error) (interface{}, error) 53 | } 54 | 55 | // Creates a new JSRunner that will run a JavaScript function. 56 | // 'funcSource' should look like "function(x,y) { ... }" 57 | func NewJSRunner(funcSource string, timeout time.Duration) (*JSRunner, error) { 58 | runner := &JSRunner{} 59 | if err := runner.Init(funcSource, timeout); err != nil { 60 | return nil, err 61 | } 62 | return runner, nil 63 | } 64 | 65 | // Initializes a JSRunner. 66 | func (runner *JSRunner) Init(funcSource string, timeout time.Duration) error { 67 | return runner.InitWithLogging(funcSource, timeout, defaultLogFunction, defaultLogFunction) 68 | } 69 | 70 | func (runner *JSRunner) InitWithLogging(funcSource string, timeout time.Duration, consoleErrorFunc func(string), consoleLogFunc func(string)) error { 71 | runner.js = otto.New() 72 | runner.js.SetStackDepthLimit(jsStackDepthLimit) 73 | runner.fn = otto.UndefinedValue() 74 | runner.timeout = timeout 75 | 76 | runner.DefineNativeFunction("log", func(call otto.FunctionCall) otto.Value { 77 | var output string 78 | for _, arg := range call.ArgumentList { 79 | str, _ := arg.ToString() 80 | output += str + " " 81 | } 82 | logg("JS: %s", output) 83 | return otto.UndefinedValue() 84 | }) 85 | 86 | if _, err := runner.SetFunction(funcSource); err != nil { 87 | return err 88 | } 89 | 90 | return runner.js.Set("console", map[string]interface{}{ 91 | "error": consoleErrorFunc, 92 | "log": consoleLogFunc, 93 | }) 94 | 95 | } 96 | 97 | func defaultLogFunction(s string) { 98 | fmt.Println(s) 99 | } 100 | 101 | // Sets the JavaScript function the runner executes. 102 | func (runner *JSRunner) SetFunction(funcSource string) (bool, error) { 103 | if funcSource == runner.fnSource { 104 | return false, nil // no-op 105 | } 106 | if funcSource == "" { 107 | runner.fn = otto.UndefinedValue() 108 | } else { 109 | fnobj, err := runner.js.Object("(" + funcSource + ")") 110 | if err != nil { 111 | return false, err 112 | } 113 | if fnobj.Class() != "Function" { 114 | return false, errors.New("JavaScript source does not evaluate to a function") 115 | } 116 | runner.fn = fnobj.Value() 117 | } 118 | runner.fnSource = funcSource 119 | return true, nil 120 | } 121 | 122 | // Sets the runner's timeout. A value of 0 removes any timeout. 123 | func (runner *JSRunner) SetTimeout(timeout time.Duration) { 124 | runner.timeout = timeout 125 | } 126 | 127 | // Lets you define native helper functions (for example, the "emit" function to be called by 128 | // JS map functions) in the main namespace of the JS runtime. 129 | // This method is not thread-safe and should only be called before making any calls to the 130 | // main JS function. 131 | func (runner *JSRunner) DefineNativeFunction(name string, function NativeFunction) { 132 | _ = runner.js.Set(name, (func(otto.FunctionCall) otto.Value)(function)) 133 | } 134 | 135 | func (runner *JSRunner) jsonToValue(jsonStr string) (interface{}, error) { 136 | if jsonStr == "" { 137 | return otto.NullValue(), nil 138 | } 139 | var parsed interface{} 140 | if err := json.Unmarshal([]byte(jsonStr), &parsed); err != nil { 141 | return nil, fmt.Errorf("Unparseable JSRunner input: %s", jsonStr) 142 | } 143 | return parsed, nil 144 | } 145 | 146 | // ToValue calls ToValue on the otto instance. Required for conversion of 147 | // complex types to otto Values. 148 | func (runner *JSRunner) ToValue(value interface{}) (otto.Value, error) { 149 | return runner.js.ToValue(value) 150 | } 151 | 152 | // Invokes the JS function with JSON inputs. 153 | func (runner *JSRunner) CallWithJSON(inputs ...string) (interface{}, error) { 154 | if runner.Before != nil { 155 | runner.Before() 156 | } 157 | var result otto.Value 158 | var err error 159 | if runner.fn.IsUndefined() { 160 | result = otto.UndefinedValue() 161 | } else { 162 | inputJS := make([]interface{}, len(inputs)) 163 | for i, inputStr := range inputs { 164 | inputJS[i], err = runner.jsonToValue(inputStr) 165 | if err != nil { 166 | return nil, err 167 | } 168 | } 169 | result, err = runner.fn.Call(runner.fn, inputJS...) 170 | } 171 | if runner.After != nil { 172 | return runner.After(result, err) 173 | } 174 | return nil, err 175 | } 176 | 177 | // Invokes the JS function with Go inputs. 178 | func (runner *JSRunner) Call(ctx context.Context, inputs ...interface{}) (_ interface{}, err error) { 179 | if runner.Before != nil { 180 | runner.Before() 181 | } 182 | 183 | var result otto.Value 184 | if runner.fn.IsUndefined() { 185 | result = otto.UndefinedValue() 186 | } else { 187 | inputJS := make([]interface{}, len(inputs)) 188 | for i, input := range inputs { 189 | if jsonStr, ok := input.(JSONString); ok { 190 | if input, err = runner.jsonToValue(string(jsonStr)); err != nil { 191 | return nil, err 192 | } 193 | } 194 | inputJS[i], err = runner.js.ToValue(input) 195 | if err != nil { 196 | return nil, fmt.Errorf("Couldn't convert %#v to JS: %s", input, err) 197 | } 198 | } 199 | 200 | var completed chan struct{} 201 | timeout := runner.timeout 202 | if timeout > 0 { 203 | completed = make(chan struct{}) 204 | defer func() { 205 | if caught := recover(); caught != nil { 206 | if caught == ErrJSTimeout { 207 | err = ErrJSTimeout 208 | return 209 | } 210 | panic(caught) 211 | } 212 | }() 213 | 214 | runner.js.Interrupt = make(chan func(), 1) 215 | timer := time.NewTimer(timeout) 216 | go func() { 217 | defer timer.Stop() 218 | 219 | select { 220 | case <-completed: 221 | return 222 | case <-timer.C: 223 | runner.js.Interrupt <- func() { 224 | panic(ErrJSTimeout) 225 | } 226 | } 227 | }() 228 | } 229 | 230 | result, err = runner.fn.Call(runner.fn, inputJS...) 231 | if completed != nil { 232 | close(completed) 233 | } 234 | } 235 | if runner.After != nil { 236 | return runner.After(result, err) 237 | } 238 | return nil, err 239 | } 240 | -------------------------------------------------------------------------------- /js_server.go: -------------------------------------------------------------------------------- 1 | // Copyright 2012-Present Couchbase, Inc. 2 | // 3 | // Use of this software is governed by the Business Source License included 4 | // in the file licenses/BSL-Couchbase.txt. As of the Change Date specified 5 | // in that file, in accordance with the Business Source License, use of this 6 | // software will be governed by the Apache License, Version 2.0, included in 7 | // the file licenses/APL2.txt. 8 | 9 | package sgbucket 10 | 11 | import ( 12 | "context" 13 | "sync" 14 | "time" 15 | ) 16 | 17 | // Thread-safe wrapper around a JSRunner. 18 | type JSServer struct { 19 | factory JSServerTaskFactory 20 | tasks chan JSServerTask 21 | fnSource string 22 | lock sync.RWMutex // Protects access to .fnSource 23 | timeout time.Duration // Maximum time to allow the js func to run 24 | } 25 | 26 | // Abstract interface for a callable interpreted function. JSRunner implements this. 27 | type JSServerTask interface { 28 | SetFunction(funcSource string) (bool, error) 29 | Call(ctx context.Context, inputs ...interface{}) (interface{}, error) 30 | } 31 | 32 | // Factory function that creates JSServerTasks. 33 | type JSServerTaskFactory func(ctx context.Context, fnSource string, timeout time.Duration) (JSServerTask, error) 34 | 35 | // Creates a new JSServer that will run a JavaScript function. 36 | // 'funcSource' should look like "function(x,y) { ... }" 37 | func NewJSServer(ctx context.Context, funcSource string, timeout time.Duration, maxTasks int, factory JSServerTaskFactory) *JSServer { 38 | if factory == nil { 39 | factory = func(ctx context.Context, fnSource string, timeout time.Duration) (JSServerTask, error) { 40 | return NewJSRunner(fnSource, timeout) 41 | } 42 | } 43 | server := &JSServer{ 44 | factory: factory, 45 | fnSource: funcSource, 46 | tasks: make(chan JSServerTask, maxTasks), 47 | timeout: timeout, 48 | } 49 | return server 50 | } 51 | 52 | func (server *JSServer) Function() (fn string) { 53 | server.lock.RLock() 54 | defer server.lock.RUnlock() 55 | return server.fnSource 56 | } 57 | 58 | // Public thread-safe entry point for changing the JS function. 59 | func (server *JSServer) SetFunction(fnSource string) (bool, error) { 60 | server.lock.Lock() 61 | defer server.lock.Unlock() 62 | if fnSource == server.fnSource { 63 | return false, nil 64 | } 65 | server.fnSource = fnSource 66 | return true, nil 67 | } 68 | 69 | func (server *JSServer) getTask(ctx context.Context) (task JSServerTask, err error) { 70 | fnSource := server.Function() 71 | select { 72 | case task = <-server.tasks: 73 | _, err = task.SetFunction(fnSource) 74 | default: 75 | task, err = server.factory(ctx, fnSource, server.timeout) 76 | } 77 | return 78 | } 79 | 80 | func (server *JSServer) returnTask(task JSServerTask) { 81 | select { 82 | case server.tasks <- task: 83 | default: 84 | // Drop it on the floor if the pool is already full 85 | } 86 | } 87 | 88 | type WithTaskFunc func(JSServerTask) (interface{}, error) 89 | 90 | func (server *JSServer) WithTask(ctx context.Context, fn WithTaskFunc) (interface{}, error) { 91 | task, err := server.getTask(ctx) 92 | if err != nil { 93 | return nil, err 94 | } 95 | defer server.returnTask(task) 96 | return fn(task) 97 | } 98 | 99 | // Public thread-safe entry point for invoking the JS function. 100 | // The input parameters are JavaScript expressions (most likely JSON) that will be parsed and 101 | // passed as parameters to the function. 102 | // The output value will be nil unless a custom 'After' function has been installed, in which 103 | // case it'll be the result of that function. 104 | func (server *JSServer) CallWithJSON(ctx context.Context, jsonParams ...string) (interface{}, error) { 105 | goParams := make([]JSONString, len(jsonParams)) 106 | for i, str := range jsonParams { 107 | goParams[i] = JSONString(str) 108 | } 109 | return server.Call(ctx, goParams) 110 | } 111 | 112 | // Public thread-safe entry point for invoking the JS function. 113 | // The input parameters are Go values that will be converted to JavaScript values. 114 | // JSON can be passed in as a value of type JSONString (a wrapper type for string.) 115 | // The output value will be nil unless a custom 'After' function has been installed, in which 116 | // case it'll be the result of that function. 117 | func (server *JSServer) Call(ctx context.Context, goParams ...interface{}) (interface{}, error) { 118 | return server.WithTask(ctx, func(task JSServerTask) (interface{}, error) { 119 | return task.Call(ctx, goParams...) 120 | }) 121 | } 122 | -------------------------------------------------------------------------------- /licenses/APL2.txt: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS -------------------------------------------------------------------------------- /licenses/BSL-Couchbase.txt: -------------------------------------------------------------------------------- 1 | COUCHBASE BUSINESS SOURCE LICENSE AGREEMENT 2 | 3 | Business Source License 1.1 4 | Licensor: Couchbase, Inc. 5 | Licensed Work: Couchbase Sync Gateway 3.0 6 | The Licensed Work is © 2021-Present Couchbase, Inc. 7 | 8 | Additional Use Grant: You may make production use of the Licensed Work, provided 9 | you comply with the following conditions: 10 | 11 | (i) You may not prepare a derivative work based upon the Licensed Work and 12 | distribute or otherwise offer such derivative work, whether on a standalone 13 | basis or in combination with other products, applications, or services 14 | (including in any "as-a-service" offering, such as, by way of example, a 15 | software-as-a-service, database-as-a-service, or infrastructure-as-a-service 16 | offering, or any other offering based on a cloud computing or other type of 17 | hosted distribution model (collectively, "Hosted Offerings")), for a fee or 18 | otherwise on a commercial or other for-profit basis. 19 | 20 | (ii) You may not link the Licensed Work to, or otherwise include the Licensed 21 | Work in or with, any product, application, or service (including in any Hosted 22 | Offering) that is distributed or otherwise offered, whether on a standalone 23 | basis or in combination with other products, applications, or services for a fee 24 | or otherwise on a commercial or other for-profit basis. Condition (ii) shall not 25 | limit the generality of condition (i) above. 26 | 27 | 28 | Change Date: July 1, 2025 29 | 30 | Change License: Apache License, Version 2.0 31 | 32 | 33 | Notice 34 | 35 | The Business Source License (this document, or the "License") is not an Open 36 | Source license. However, the Licensed Work will eventually be made available 37 | under an Open Source License, as stated in this License. License text copyright 38 | © 2017 MariaDB Corporation Ab, All Rights Reserved. "Business Source License" is 39 | a trademark of MariaDB Corporation Ab. 40 | 41 | Terms 42 | 43 | The Licensor hereby grants You the right to copy, modify, create derivative 44 | works, redistribute, and make non-production use of the Licensed Work. The 45 | Licensor may make an Additional Use Grant, above, permitting limited production 46 | use. 47 | 48 | Effective on the Change Date, or the fourth anniversary of the first publicly 49 | available distribution of a specific version of the Licensed Work under this 50 | License, whichever comes first, the Licensor hereby grants you rights under the 51 | terms of the Change License, and the rights granted in the paragraph above 52 | terminate. 53 | 54 | If your use of the Licensed Work does not comply with the requirements currently 55 | in effect as described in this License, you must purchase a commercial license 56 | from the Licensor, its affiliated entities, or authorized resellers, or you must 57 | refrain from using the Licensed Work. 58 | 59 | All copies of the original and modified Licensed Work, and derivative works of 60 | the Licensed Work, are subject to this License. This License applies separately 61 | for each version of the Licensed Work and the Change Date may vary for each 62 | version of the Licensed Work released by Licensor. 63 | 64 | You must conspicuously display this License on each original or modified copy of 65 | the Licensed Work. If you receive the Licensed Work in original or modified form 66 | from a third party, the terms and conditions set forth in this License apply to 67 | your use of that work. 68 | 69 | Any use of the Licensed Work in violation of this License will automatically 70 | terminate your rights under this License for the current and all other versions 71 | of the Licensed Work. 72 | 73 | This License does not grant you any right in any trademark or logo of Licensor 74 | or its affiliates (provided that you may use a trademark or logo of Licensor as 75 | expressly required by this License). 76 | 77 | TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON AN 78 | "AS IS" BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, EXPRESS 79 | OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF MERCHANTABILITY, 80 | FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND TITLE. 81 | 82 | MariaDB hereby grants you permission to use this License's text to license your 83 | works, and to refer to it using the trademark "Business Source License", as long 84 | as you comply with the Covenants of Licensor below. 85 | 86 | Covenants of Licensor 87 | 88 | In consideration of the right to use this License's text and the "Business 89 | Source License" name and trademark, Licensor covenants to MariaDB, and to all 90 | other recipients of the licensed work to be provided by Licensor: 91 | 92 | 1. To specify as the Change License the GPL Version 2.0 or any later version, or 93 | a license that is compatible with GPL Version 2.0 or a later version, where 94 | "compatible" means that software provided under the Change License can be 95 | included in a program with software provided under GPL Version 2.0 or a later 96 | version. Licensor may specify additional Change Licenses without limitation. 97 | 98 | 2. To either: (a) specify an additional grant of rights to use that does not 99 | impose any additional restriction on the right granted in this License, as the 100 | Additional Use Grant; or (b) insert the text "None". 101 | 102 | 3. To specify a Change Date. 103 | 104 | 4. Not to modify this License in any other way. -------------------------------------------------------------------------------- /licenses/addlicense.tmpl: -------------------------------------------------------------------------------- 1 | Copyright {{.Year}}-Present Couchbase, Inc. 2 | 3 | Use of this software is governed by the Business Source License included 4 | in the file licenses/BSL-Couchbase.txt. As of the Change Date specified 5 | in that file, in accordance with the Business Source License, use of this 6 | software will be governed by the Apache License, Version 2.0, included in 7 | the file licenses/APL2.txt. 8 | -------------------------------------------------------------------------------- /logg.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2015-Present Couchbase, Inc. 3 | 4 | Use of this software is governed by the Business Source License included in 5 | the file licenses/BSL-Couchbase.txt. As of the Change Date specified in that 6 | file, in accordance with the Business Source License, use of this software will 7 | be governed by the Apache License, Version 2.0, included in the file 8 | licenses/APL2.txt. 9 | */ 10 | 11 | package sgbucket 12 | 13 | import ( 14 | "log" 15 | "sync/atomic" 16 | ) 17 | 18 | // Set this to true to enable logging, 0 == false, >0 == true 19 | var logging uint32 = 0 20 | 21 | func logg(fmt string, args ...interface{}) { 22 | loggingEnabled := atomic.LoadUint32(&logging) 23 | if loggingEnabled > 0 { 24 | log.Printf("SG-Bucket: "+fmt, args...) 25 | } 26 | } 27 | 28 | func SetLogging(setLogging bool) { 29 | if setLogging { 30 | atomic.StoreUint32(&logging, 1) 31 | } else { 32 | atomic.StoreUint32(&logging, 0) 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /pipeline.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2013-Present Couchbase, Inc. 3 | 4 | Use of this software is governed by the Business Source License included in 5 | the file licenses/BSL-Couchbase.txt. As of the Change Date specified in that 6 | file, in accordance with the Business Source License, use of this software will 7 | be governed by the Apache License, Version 2.0, included in the file 8 | licenses/APL2.txt. 9 | */ 10 | 11 | package sgbucket 12 | 13 | import ( 14 | "runtime" 15 | "sync" 16 | ) 17 | 18 | type PipelineFunc func(input interface{}, output chan<- interface{}) 19 | 20 | type Pipeline struct { 21 | funcs []PipelineFunc 22 | input <-chan interface{} 23 | } 24 | 25 | func NewPipeline(chanSize int, parallelism int, funcs ...PipelineFunc) { 26 | p := Pipeline{ 27 | funcs: funcs, 28 | input: make(chan interface{}, chanSize), 29 | } 30 | var input <-chan interface{} = p.input 31 | for _, f := range funcs { 32 | input = Parallelize(f, parallelism, input) 33 | } 34 | } 35 | 36 | // Feeds the input channel through a number of copies of the function in parallel. 37 | // This call is asynchronous. Output can be read from the returned channel. 38 | func Parallelize(f PipelineFunc, parallelism int, input <-chan interface{}) <-chan interface{} { 39 | if parallelism == 0 { 40 | parallelism = runtime.GOMAXPROCS(0) 41 | } 42 | output := make(chan interface{}, len(input)) 43 | var waiter sync.WaitGroup 44 | for j := 0; j < parallelism; j++ { 45 | waiter.Add(1) 46 | go func() { 47 | defer waiter.Done() 48 | for item := range input { 49 | f(item, output) 50 | } 51 | }() 52 | } 53 | go func() { 54 | waiter.Wait() 55 | close(output) 56 | }() 57 | return output 58 | } 59 | -------------------------------------------------------------------------------- /queries.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023-Present Couchbase, Inc. 2 | // 3 | // Use of this software is governed by the Business Source License included 4 | // in the file licenses/BSL-Couchbase.txt. As of the Change Date specified 5 | // in that file, in accordance with the Business Source License, use of this 6 | // software will be governed by the Apache License, Version 2.0, included in 7 | // the file licenses/APL2.txt. 8 | 9 | package sgbucket 10 | 11 | import ( 12 | "context" 13 | "fmt" 14 | ) 15 | 16 | // Identifies query languages understood by QueryableStore objects. 17 | type QueryLanguage string 18 | 19 | const ( 20 | SQLppLanguage QueryLanguage = "SQL++" // SQL++ as implemented by Couchbase Server 21 | SQLiteLanguage QueryLanguage = "SQLite" // SQLite's dialect of SQL (including JSON syntax) 22 | 23 | N1QLLanguage QueryLanguage = SQLppLanguage 24 | ) 25 | 26 | // Specifies what level of data consistency is required in a query. 27 | type ConsistencyMode int 28 | 29 | const ( 30 | // NotBounded indicates no data consistency is required. 31 | NotBounded = ConsistencyMode(1) 32 | // RequestPlus indicates that request-level data consistency is required. 33 | RequestPlus = ConsistencyMode(2) 34 | ) 35 | 36 | // Token used for keyspace name replacement in query statement, 37 | // e.g. `SELECT ... FROM $_keyspace WHERE ...`. 38 | // Will be replaced with escaped keyspace name. 39 | const KeyspaceQueryToken = "$_keyspace" 40 | 41 | // Error returned from QueryResultIterator.One if there are no rows. 42 | var ErrNoRows = fmt.Errorf("no rows in query result") 43 | 44 | // Error returned from QueryableStore.CreateIndex if an index with that name exists. 45 | var ErrIndexExists = fmt.Errorf("index already exists") 46 | 47 | // QueryableStore can run queries in some query language(s). 48 | type QueryableStore interface { 49 | // Returns true if the given query language is supported. 50 | CanQueryIn(language QueryLanguage) bool 51 | 52 | // Runs a query. 53 | Query( 54 | language QueryLanguage, 55 | statement string, 56 | args map[string]any, 57 | consistency ConsistencyMode, 58 | adhoc bool, 59 | ) (QueryResultIterator, error) 60 | 61 | // Creates an index. 62 | CreateIndex(indexName string, expression string, filterExpression string) error 63 | 64 | // Returns an object containing an explanation of the database's query plan. 65 | ExplainQuery(statement string, params map[string]any) (plan map[string]any, err error) 66 | } 67 | 68 | // Common query iterator interface, 69 | // implemented by sgbucket.ViewResult, gocb.ViewResults, and gocb.QueryResults 70 | type QueryResultIterator interface { 71 | // Unmarshals a single result row into valuePtr, and then closes the iterator. 72 | One(ctx context.Context, valuePtr any) error 73 | // Unmarshals the next result row into valuePtr. 74 | // Returns false when reaching end of result set. 75 | Next(ctx context.Context, valuePtr any) bool 76 | // Retrieves raw JSON bytes for the next result row. 77 | NextBytes() []byte 78 | // Closes the iterator. Returns any row-level errors seen during iteration. 79 | Close() error 80 | } 81 | -------------------------------------------------------------------------------- /tap.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2013-Present Couchbase, Inc. 3 | 4 | Use of this software is governed by the Business Source License included in 5 | the file licenses/BSL-Couchbase.txt. As of the Change Date specified in that 6 | file, in accordance with the Business Source License, use of this software will 7 | be governed by the Apache License, Version 2.0, included in the file 8 | licenses/APL2.txt. 9 | */ 10 | 11 | package sgbucket 12 | 13 | import ( 14 | "bytes" 15 | "encoding/binary" 16 | "errors" 17 | "fmt" 18 | "math" 19 | "strings" 20 | "time" 21 | ) 22 | 23 | // FeedOpCode represents operation type (found in FeedEvent) 24 | type FeedOpcode uint8 25 | 26 | const ( 27 | FeedOpBeginBackfill = FeedOpcode(iota) // Start of prior events 28 | FeedOpEndBackfill // End of prior events 29 | FeedOpMutation // A document was modified 30 | FeedOpDeletion // A document was deleted 31 | ) 32 | 33 | func (o FeedOpcode) String() string { 34 | switch o { 35 | case FeedOpBeginBackfill: 36 | return "BeginBackfill" 37 | case FeedOpEndBackfill: 38 | return "EndBackfill" 39 | case FeedOpMutation: 40 | return "Mutation" 41 | case FeedOpDeletion: 42 | return "Deletion" 43 | default: 44 | return fmt.Sprintf("Opcode(%d)", o) 45 | } 46 | } 47 | 48 | // FeedDataType represents the type of data in a FeedEvent 49 | type FeedDataType = uint8 50 | 51 | const FeedDataTypeRaw FeedDataType = 0 // raw (binary) document 52 | const ( 53 | FeedDataTypeJSON FeedDataType = 1 << iota // JSON document 54 | FeedDataTypeSnappy // Snappy compression 55 | FeedDataTypeXattr // Document has Xattrs 56 | ) 57 | 58 | // FeedEvent is a notification of a change in a data store. 59 | type FeedEvent struct { 60 | TimeReceived time.Time // Used for latency calculations 61 | Key []byte // Item key 62 | Value []byte // Item value 63 | Cas uint64 // Cas of document 64 | RevNo uint64 // Server revision number of document 65 | Flags uint32 // Item flags 66 | Expiry uint32 // Item expiration time (UNIX Epoch time) 67 | CollectionID uint32 // ID of the item's collection - 0x0 for the default collection 68 | VbNo uint16 // Vbucket of the document 69 | Opcode FeedOpcode // Type of event 70 | DataType FeedDataType // Datatype of document 71 | Synchronous bool // When true, requires that event is processed synchronously 72 | } 73 | 74 | // MutationFeed shows events from the bucket can be read from the channel returned by Events(). 75 | // Remember to call Close() on it when you're done, unless its channel has closed itself already. 76 | type MutationFeed interface { 77 | Events() <-chan FeedEvent // Read only channel to read TapEvents 78 | WriteEvents() chan<- FeedEvent // Write only channel to write TapEvents 79 | Close() error // Close the tap feed 80 | } 81 | 82 | // FeedArguments are options for starting a MutationFeed 83 | type FeedArguments struct { 84 | ID string // Feed ID, used to build unique identifier for DCP feed 85 | Backfill uint64 // Timestamp of oldest item to send. Use FeedNoBackfill to suppress all past items. 86 | Dump bool // If set, feed will stop after sending existing items. 87 | KeysOnly bool // If true, events will not contain values or xattrs. 88 | Terminator chan bool // Feed will stop when this channel is closed (DCP Only) 89 | DoneChan chan struct{} // DoneChan is closed when the mutation feed terminates. 90 | CheckpointPrefix string // Key of checkpoint doc to save state in, if non-empty 91 | Scopes map[string][]string // Collection names to stream - map keys are scopes 92 | } 93 | 94 | // Value for FeedArguments.Backfill denoting that no past events at all should be sent. FeedNoBackfill value 95 | // used as actual value for walrus, go-couchbase bucket, these event types aren't defined using usual approach 96 | const FeedNoBackfill = math.MaxUint64 97 | 98 | // Value for FeedArguments.Backfill denoting that the feed should resume from where it left off 99 | // previously, or start from the beginning if there's no previous checkpoint. 100 | // Requires that CheckpointPrefix is set. 101 | const FeedResume = 1 102 | 103 | // FeedEventCallbackFunc performs mutation processing. Return value indicates whether the mutation should trigger 104 | // checkpoint persistence (used to avoid recursive checkpoint document processing) 105 | type FeedEventCallbackFunc func(event FeedEvent) bool 106 | 107 | // ErrXattrInvalidLen is returned if the xattr is corrupt. 108 | var ErrXattrInvalidLen = errors.New("Xattr stream length") 109 | 110 | // ErrEmptyMetadata is returned when there is no Sync Gateway metadata 111 | var ErrEmptyMetadata = errors.New("Empty Sync Gateway metadata") 112 | 113 | // The name and value of an extended attribute (xattr) 114 | type Xattr struct { 115 | Name string 116 | Value []byte 117 | } 118 | 119 | // EncodeValueWithXattrs encodes a document value and Xattrs into DCP data format. 120 | // Set the FeedDataTypeXattr flag if you store a value of this format. 121 | func EncodeValueWithXattrs(body []byte, xattrs ...Xattr) []byte { 122 | /* Details on DCP data format taken from https://docs.google.com/document/d/18UVa5j8KyufnLLy29VObbWRtoBn9vs8pcxttuMt6rz8/edit#heading=h.caqiui1pmmmb. : 123 | 124 | When the XATTR bit is set the first 4 bytes of the body contain the size of the entire XATTR 125 | section, in network byte order (big-endian). 126 | 127 | Following the length you'll find an iovector-style encoding of all of the XATTR key-value 128 | pairs, each with the following encoding: 129 | 130 | uint32_t length of next xattr pair (network byte order) 131 | (bytes) xattr key in modified UTF-8 132 | 0x00 end-of-string marker 133 | (bytes) xattr value in modified UTF-8 134 | 0x00 end-of-string marker 135 | */ 136 | xattrLen := func(xattr Xattr) uint32 { 137 | return uint32(len(xattr.Name) + 1 + len(xattr.Value) + 1) 138 | } 139 | 140 | var totalSize uint32 141 | for _, xattr := range xattrs { 142 | totalSize += 4 + xattrLen(xattr) 143 | } 144 | 145 | var out bytes.Buffer 146 | _ = binary.Write(&out, binary.BigEndian, totalSize) 147 | for _, xattr := range xattrs { 148 | _ = binary.Write(&out, binary.BigEndian, xattrLen(xattr)) 149 | out.WriteString(xattr.Name) 150 | out.WriteByte(0) 151 | out.Write(xattr.Value) 152 | out.WriteByte(0) 153 | } 154 | out.Write(body) 155 | return out.Bytes() 156 | } 157 | 158 | // DecodeValueWithXattrs converts DCP Xattrs value format into a body and zero or more Xattrs. Only the xattrs passed into the function will be decoded. 159 | func DecodeValueWithXattrs(xattrNames []string, data []byte) (body []byte, xattrs map[string][]byte, err error) { 160 | return decodeValueWithXattrs(data, xattrNames, false) 161 | } 162 | 163 | // DecodeValueWithXattrs converts DCP Xattrs value format into a body and xattrs. All xattrs found will be returned. 164 | func DecodeValueWithAllXattrs(data []byte) (body []byte, xattrs map[string][]byte, err error) { 165 | return decodeValueWithXattrs(data, nil, true) 166 | } 167 | 168 | // decodeValueWithXattrs will turn DCP byte stream into xattrs and a body. It is safe to call if the DCP event DataType has the FeedDataTypeXattr flag. 169 | 170 | // Details on format (taken from https://docs.google.com/document/d/18UVa5j8KyufnLLy29VObbWRtoBn9vs8pcxttuMt6rz8/edit#heading=h.caqiui1pmmmb.): 171 | /* 172 | When the XATTR bit is set the first uint32_t in the body contains the size of the entire XATTR section. 173 | 174 | 175 | Byte/ 0 | 1 | 2 | 3 | 176 | / | | | | 177 | |0 1 2 3 4 5 6 7|0 1 2 3 4 5 6 7|0 1 2 3 4 5 6 7|0 1 2 3 4 5 6 7| 178 | +---------------+---------------+---------------+---------------+ 179 | 0| Total xattr length in network byte order | 180 | +---------------+---------------+---------------+---------------+ 181 | 182 | Following the length you'll find an iovector-style encoding of all of the XATTR key-value pairs with the following encoding: 183 | 184 | uint32_t length of next xattr pair (network order) 185 | xattr key in modified UTF-8 186 | 0x00 187 | xattr value in modified UTF-8 188 | 0x00 189 | 190 | The 0x00 byte after the key saves us from storing a key length, and the trailing 0x00 is just for convenience to allow us to use string functions to search in them. 191 | */ 192 | func decodeValueWithXattrs(data []byte, xattrNames []string, allXattrs bool) (body []byte, xattrs map[string][]byte, err error) { 193 | if allXattrs && len(xattrNames) > 0 { 194 | return nil, nil, fmt.Errorf("can not specify specific xattrs and allXattrs simultaneously") 195 | } 196 | if len(data) < 4 { 197 | return nil, nil, fmt.Errorf("invalid DCP xattr data: %w truncated (%d bytes)", ErrEmptyMetadata, len(data)) 198 | } 199 | 200 | xattrsLen := binary.BigEndian.Uint32(data[0:4]) 201 | if int(xattrsLen)+4 > len(data) { 202 | return nil, nil, fmt.Errorf("invalid DCP xattr data: %w length %d (data is only %d bytes)", ErrXattrInvalidLen, xattrsLen, len(data)) 203 | } 204 | body = data[xattrsLen+4:] 205 | if xattrsLen == 0 { 206 | return body, nil, nil 207 | } 208 | 209 | // In the xattr key/value pairs, key and value are both terminated by 0x00 (byte(0)). Use this as a separator to split the byte slice 210 | separator := []byte("\x00") 211 | 212 | xattrs = make(map[string][]byte, len(xattrNames)) 213 | // Iterate over xattr key/value pairs 214 | pos := uint32(4) 215 | for pos < xattrsLen { 216 | pairLen := binary.BigEndian.Uint32(data[pos : pos+4]) 217 | if pairLen == 0 || int(pos+pairLen) > len(data) { 218 | return nil, nil, fmt.Errorf("invalid DCP xattr data: unexpected xattr pair length (%d)", pairLen) 219 | } 220 | pos += 4 221 | pairBytes := data[pos : pos+pairLen] 222 | components := bytes.Split(pairBytes, separator) 223 | // xattr pair has the format [key]0x00[value]0x00, and so should split into three components 224 | if len(components) != 3 { 225 | return nil, nil, fmt.Errorf("Unexpected number of components found in xattr pair: %s", pairBytes) 226 | } 227 | xattrKey := string(components[0]) 228 | if allXattrs { 229 | xattrs[xattrKey] = components[1] 230 | } else { 231 | for _, xattrName := range xattrNames { 232 | if xattrName == xattrKey { 233 | xattrs[xattrName] = components[1] 234 | break 235 | } 236 | } 237 | // Exit if we have all xattrs we want 238 | if !allXattrs && len(xattrs) == len(xattrNames) { 239 | return body, xattrs, nil 240 | } 241 | } 242 | pos += pairLen 243 | } 244 | return body, xattrs, nil 245 | } 246 | 247 | // DecodeXattrNames extracts only the xattr names from a DCP value. When systemOnly is true, only 248 | // returns system xattrs 249 | func DecodeXattrNames(data []byte, systemOnly bool) (xattrKeys []string, err error) { 250 | 251 | if len(data) < 4 { 252 | return nil, nil 253 | } 254 | 255 | xattrsLen := binary.BigEndian.Uint32(data[0:4]) 256 | if int(xattrsLen)+4 > len(data) { 257 | return nil, nil 258 | } 259 | 260 | if xattrsLen == 0 { 261 | return nil, nil 262 | } 263 | 264 | // In the xattr key/value pairs, key and value are both terminated by 0x00 (byte(0)). Use this as a separator to split the byte slice 265 | separator := []byte("\x00") 266 | 267 | // Iterate over xattr key/value pairs 268 | xattrKeys = make([]string, 0) 269 | pos := uint32(4) 270 | for pos < xattrsLen { 271 | pairLen := binary.BigEndian.Uint32(data[pos : pos+4]) 272 | if pairLen == 0 || int(pos+pairLen) > len(data) { 273 | return nil, fmt.Errorf("invalid DCP xattr data: unexpected xattr pair length (%d)", pairLen) 274 | } 275 | pos += 4 276 | pairBytes := data[pos : pos+pairLen] 277 | components := bytes.Split(pairBytes, separator) 278 | // xattr pair has the format [key]0x00[value]0x00, and so should split into three components 279 | if len(components) != 3 { 280 | return nil, fmt.Errorf("Unexpected number of components found in xattr pair: %s", pairBytes) 281 | } 282 | xattrName := string(components[0]) 283 | if !systemOnly || strings.HasPrefix(xattrName, "_") { 284 | xattrKeys = append(xattrKeys, xattrName) 285 | } 286 | pos += pairLen 287 | } 288 | return xattrKeys, nil 289 | } 290 | -------------------------------------------------------------------------------- /tap_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023-Present Couchbase, Inc. 2 | // 3 | // Use of this software is governed by the Business Source License included 4 | // in the file licenses/BSL-Couchbase.txt. As of the Change Date specified 5 | // in that file, in accordance with the Business Source License, use of this 6 | // software will be governed by the Apache License, Version 2.0, included in 7 | // the file licenses/APL2.txt. 8 | 9 | package sgbucket 10 | 11 | import ( 12 | "encoding/binary" 13 | "testing" 14 | 15 | "github.com/stretchr/testify/assert" 16 | "github.com/stretchr/testify/require" 17 | ) 18 | 19 | func TestDCPEncodeXattrs(t *testing.T) { 20 | allXattrs := []Xattr{ 21 | {Name: "_sync", Value: []byte(`{"rev":1234}`)}, 22 | {Name: "swim", Value: []byte(`{"stroke":"dolphin"}`)}, 23 | {Name: "empty", Value: []byte(``)}, 24 | } 25 | 26 | xattrNames := []string{"_sync", "swim", "empty"} 27 | tests := []struct { 28 | name string 29 | body []byte 30 | xattrs []Xattr 31 | }{ 32 | { 33 | name: "normal body", 34 | body: []byte(`{"name":"the document body"}`), 35 | xattrs: allXattrs, 36 | }, 37 | { 38 | name: "empty body", 39 | body: []byte{}, 40 | xattrs: allXattrs, 41 | }, 42 | { 43 | name: "no xattrs", 44 | body: []byte(`{"name":"the document body"}`), 45 | xattrs: nil, 46 | }, 47 | } 48 | for _, test := range tests { 49 | t.Run(test.name, func(t *testing.T) { 50 | value := EncodeValueWithXattrs(test.body, test.xattrs...) 51 | gotBody, gotXattrs, err := DecodeValueWithXattrs(xattrNames, value) 52 | require.NoError(t, err) 53 | assert.Equal(t, test.body, gotBody) 54 | requireXattrsEqual(t, test.xattrs, gotXattrs) 55 | 56 | gotBody, gotXattrs, err = DecodeValueWithAllXattrs(value) 57 | require.NoError(t, err) 58 | require.Equal(t, test.body, gotBody) 59 | requireXattrsEqual(t, test.xattrs, gotXattrs) 60 | 61 | // Verify name-only retrieval 62 | decodedXattrNames, err := DecodeXattrNames(value, false) 63 | require.NoError(t, err) 64 | if test.xattrs == nil { 65 | require.Len(t, decodedXattrNames, 0) 66 | } else { 67 | require.Equal(t, decodedXattrNames, xattrNames) 68 | } 69 | 70 | // Verify name-only retrieval, system-only 71 | decodedSystemXattrNames, err := DecodeXattrNames(value, true) 72 | require.NoError(t, err) 73 | if test.xattrs == nil { 74 | require.Len(t, decodedSystemXattrNames, 0) 75 | } else { 76 | require.Equal(t, decodedSystemXattrNames, []string{"_sync"}) 77 | } 78 | }) 79 | } 80 | } 81 | 82 | func TestDCPDecodeValue(t *testing.T) { 83 | testCases := []struct { 84 | name string 85 | body []byte 86 | expectedErr error 87 | expectedBody []byte 88 | expectedSyncXattr []byte 89 | }{ 90 | { 91 | name: "bad value", 92 | body: []byte("abcde"), 93 | expectedErr: ErrXattrInvalidLen, 94 | }, 95 | { 96 | name: "xattr length 4, overflow", 97 | body: []byte{0x00, 0x00, 0x00, 0x04, 0x01}, 98 | expectedErr: ErrXattrInvalidLen, 99 | }, 100 | { 101 | name: "empty", 102 | body: nil, 103 | expectedErr: ErrEmptyMetadata, 104 | }, 105 | { 106 | name: "single xattr pair and body", 107 | body: getSingleXattrDCPBytes(), 108 | expectedBody: []byte(`{"value":"ABC"}`), 109 | expectedSyncXattr: []byte(`{"seq":1}`), 110 | }, 111 | } 112 | for _, test := range testCases { 113 | t.Run(test.name, func(t *testing.T) { 114 | // DecodeValueWithXattrs is the underlying function 115 | body, xattrs, err := DecodeValueWithXattrs([]string{"_sync"}, test.body) 116 | require.ErrorIs(t, err, test.expectedErr) 117 | require.Equal(t, test.expectedBody, body) 118 | if test.expectedSyncXattr != nil { 119 | require.Len(t, xattrs, 1) 120 | require.Equal(t, test.expectedSyncXattr, xattrs["_sync"]) 121 | } else { 122 | require.Nil(t, xattrs) 123 | } 124 | }) 125 | } 126 | } 127 | 128 | // TestInvalidXattrStreamEmptyBody tests is a bit different than cases in TestDCPDecodeValue since DecodeValueWithXattrs will pass but UnmarshalDocumentSyncDataFromFeed will fail due to invalid json. 129 | func TestInvalidXattrStreamEmptyBody(t *testing.T) { 130 | inputStream := []byte{0x00, 0x00, 0x00, 0x01, 0x01} 131 | emptyBody := []byte{} 132 | 133 | var xattrNames []string 134 | body, xattrs, err := DecodeValueWithXattrs(xattrNames, inputStream) 135 | require.NoError(t, err) 136 | require.Empty(t, xattrs) 137 | require.Equal(t, emptyBody, body) 138 | } 139 | 140 | // getSingleXattrDCPBytes returns a DCP body with a single xattr pair and body 141 | func getSingleXattrDCPBytes() []byte { 142 | zeroByte := byte(0) 143 | // Build payload for single xattr pair and body 144 | xattrValue := `{"seq":1}` 145 | xattrPairLength := 4 + len("_sync") + len(xattrValue) + 2 146 | xattrTotalLength := xattrPairLength 147 | body := `{"value":"ABC"}` 148 | 149 | // Build up the dcp Body 150 | dcpBody := make([]byte, 8) 151 | binary.BigEndian.PutUint32(dcpBody[0:4], uint32(xattrTotalLength)) 152 | binary.BigEndian.PutUint32(dcpBody[4:8], uint32(xattrPairLength)) 153 | dcpBody = append(dcpBody, "_sync"...) 154 | dcpBody = append(dcpBody, zeroByte) 155 | dcpBody = append(dcpBody, xattrValue...) 156 | dcpBody = append(dcpBody, zeroByte) 157 | dcpBody = append(dcpBody, body...) 158 | return dcpBody 159 | } 160 | 161 | func requireXattrsEqual(t *testing.T, expected []Xattr, actual map[string][]byte) { 162 | require.Len(t, actual, len(expected), "expected xattrs %+v to match actual length xattrs %+v", expected, actual) 163 | for _, expectedXattr := range expected { 164 | actualValue, ok := actual[expectedXattr.Name] 165 | require.True(t, ok, "expected xattr key %s not found in actual xattrs %+v", expectedXattr.Name, actual) 166 | if string(expectedXattr.Value) == "" { 167 | require.Equal(t, string(expectedXattr.Value), string(actualValue)) 168 | } else { 169 | require.JSONEq(t, string(expectedXattr.Value), string(actualValue)) 170 | } 171 | } 172 | } 173 | -------------------------------------------------------------------------------- /util_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2015-Present Couchbase, Inc. 3 | 4 | Use of this software is governed by the Business Source License included in 5 | the file licenses/BSL-Couchbase.txt. As of the Change Date specified in that 6 | file, in accordance with the Business Source License, use of this software will 7 | be governed by the Apache License, Version 2.0, included in the file 8 | licenses/APL2.txt. 9 | */ 10 | 11 | package sgbucket 12 | 13 | import "testing" 14 | 15 | //////// HELPERS: 16 | 17 | func assertNoError(t *testing.T, err error, message string) { 18 | if err != nil { 19 | t.Fatalf("%s: %v", message, err) 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /vb.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2016-Present Couchbase, Inc. 3 | 4 | Use of this software is governed by the Business Source License included in 5 | the file licenses/BSL-Couchbase.txt. As of the Change Date specified in that 6 | file, in accordance with the Business Source License, use of this software will 7 | be governed by the Apache License, Version 2.0, included in the file 8 | licenses/APL2.txt. 9 | */ 10 | 11 | package sgbucket 12 | 13 | // Returns the vbucket number for a document key, produced by hashing the key string. 14 | // - key: The document ID 15 | // - numVb: The total number of vbuckets 16 | func VBHash(key string, numVb uint16) uint32 { 17 | crc := uint32(0xffffffff) 18 | for x := 0; x < len(key); x++ { 19 | crc = (crc >> 8) ^ crc32tab[(uint64(crc)^uint64(key[x]))&0xff] 20 | } 21 | vbNo := ((^crc) >> 16) & 0x7fff & (uint32(numVb) - 1) 22 | return vbNo 23 | } 24 | 25 | // Cloned from go-couchbase 26 | var crc32tab = []uint32{ 27 | 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 28 | 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, 29 | 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 30 | 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 31 | 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, 32 | 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 33 | 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 34 | 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, 35 | 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, 36 | 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 37 | 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 38 | 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 39 | 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 40 | 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, 41 | 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 42 | 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 43 | 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 44 | 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, 45 | 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 46 | 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, 47 | 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 48 | 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 49 | 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, 50 | 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, 51 | 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 52 | 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 53 | 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, 54 | 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 55 | 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, 56 | 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, 57 | 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 58 | 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 59 | 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 60 | 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 61 | 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, 62 | 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 63 | 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 64 | 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, 65 | 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, 66 | 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 67 | 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 68 | 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 69 | 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 70 | 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, 71 | 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, 72 | 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 73 | 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 74 | 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, 75 | 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 76 | 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, 77 | 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 78 | 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 79 | 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, 80 | 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, 81 | 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 82 | 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 83 | 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 84 | 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 85 | 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, 86 | 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, 87 | 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 88 | 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, 89 | 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, 90 | 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d} 91 | -------------------------------------------------------------------------------- /views.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2013-Present Couchbase, Inc. 3 | 4 | Use of this software is governed by the Business Source License included in 5 | the file licenses/BSL-Couchbase.txt. As of the Change Date specified in that 6 | file, in accordance with the Business Source License, use of this software will 7 | be governed by the Apache License, Version 2.0, included in the file 8 | licenses/APL2.txt. 9 | */ 10 | 11 | package sgbucket 12 | 13 | import ( 14 | "context" 15 | "encoding/json" 16 | "errors" 17 | "fmt" 18 | "reflect" 19 | "sort" 20 | ) 21 | 22 | // A ViewStore is a data store with a map-reduce query interface compatible with CouchDB. 23 | // Query parameters are a subset of CouchDB's: https://docs.couchdb.org/en/stable/api/ddoc/views.html 24 | // Supported parameters are: descending, endkey, group, group_level, include_docs, inclusive_end, 25 | // key, keys, limit, reduce, stale, startkey 26 | type ViewStore interface { 27 | GetDDoc(docname string) (DesignDoc, error) // Gets a DesignDoc given its name. 28 | GetDDocs() (map[string]DesignDoc, error) // Gets all the DesignDocs. 29 | PutDDoc(ctx context.Context, docname string, value *DesignDoc) error // Stores a design doc. (Must not be nil.) 30 | DeleteDDoc(docname string) error // Deletes a design doc. 31 | 32 | // Issues a view query, and returns the results all at once. 33 | // Parameters: 34 | // - ddoc: The view's design doc's name 35 | // - name: The view's name 36 | // - params: Parameters defining the query 37 | View(ctx context.Context, ddoc, name string, params map[string]interface{}) (ViewResult, error) 38 | 39 | // Issues a view query, and returns an iterator over result rows. Depending on the 40 | // implementation this may have lower latency and use less memory. 41 | ViewQuery(ctx context.Context, ddoc, name string, params map[string]interface{}) (QueryResultIterator, error) 42 | } 43 | 44 | // Result of a view query. 45 | type ViewResult struct { 46 | TotalRows int `json:"total_rows"` // Number of rows 47 | Rows ViewRows `json:"rows"` // The rows. NOTE: Treat this as read-only. 48 | Errors []ViewError `json:"errors,omitempty"` // Any errors 49 | 50 | Collator JSONCollator // Performs Unicode string comparisons 51 | iterIndex int // Used to support iterator interface 52 | iterErr error // Error encountered during iteration 53 | collationKeys []preCollated // Parallel array of cached collation hints for ViewRows 54 | reversed bool // True if the rows have been reversed and are no longer sorted 55 | } 56 | 57 | type ViewRows []*ViewRow 58 | 59 | // A single result row from a view query. 60 | type ViewRow struct { 61 | ID string `json:"id"` // The source document's ID 62 | Key interface{} `json:"key"` // The emitted key 63 | Value interface{} `json:"value"` // The emitted value 64 | Doc *interface{} `json:"doc,omitempty"` // Document body, if requested with `include_docs` 65 | } 66 | 67 | // Error describing a failure in a view's map function. 68 | type ViewError struct { 69 | From string 70 | Reason string 71 | } 72 | 73 | func (ve ViewError) Error() string { 74 | return fmt.Sprintf("Node: %v, reason: %v", ve.From, ve.Reason) 75 | } 76 | 77 | //////// VIEW IMPLEMENTATION UTILITIES: 78 | 79 | // Validates a design document. 80 | func CheckDDoc(value interface{}) (*DesignDoc, error) { 81 | source, err := json.Marshal(value) 82 | if err != nil { 83 | return nil, err 84 | } 85 | 86 | var design DesignDoc 87 | if err := json.Unmarshal(source, &design); err != nil { 88 | return nil, err 89 | } 90 | 91 | if design.Language != "" && design.Language != "javascript" { 92 | return nil, fmt.Errorf("walrus design docs don't support language %q", 93 | design.Language) 94 | } 95 | 96 | return &design, nil 97 | } 98 | 99 | // Parsed view parameters 100 | type ViewParams struct { 101 | MinKey any // Minimum key, if non-nil 102 | MaxKey any // Maximum key, if non-nil 103 | IncludeMinKey bool // Should key equal to MinKey be included? 104 | IncludeMaxKey bool // Should key equal to MaxKey be included? 105 | Keys []any // Specific keys, if non-nil 106 | Descending bool // Results in descending order? 107 | Limit *int // Maximum number of rows, if non-nil 108 | IncludeDocs bool // Put doc body in `Document` field? 109 | Reduce bool // Skip reduce? 110 | GroupLevel *int // Level of grouping to apply, if non-nil 111 | } 112 | 113 | // Interprets parameters from a JSON map and returns a ViewParams struct. 114 | func ParseViewParams(jsonParams map[string]any) (params ViewParams, err error) { 115 | params = ViewParams{ 116 | IncludeMinKey: true, 117 | IncludeMaxKey: true, 118 | Reduce: true, 119 | } 120 | if jsonParams != nil { 121 | if keys, _ := jsonParams["keys"].([]any); keys != nil { 122 | params.Keys = keys 123 | } else if key := jsonParams["key"]; key != nil { 124 | params.MinKey = key 125 | params.MaxKey = key 126 | } else { 127 | params.MinKey = jsonParams["startkey"] 128 | if params.MinKey == nil { 129 | params.MinKey = jsonParams["start_key"] // older synonym 130 | } 131 | params.MaxKey = jsonParams["endkey"] 132 | if params.MaxKey == nil { 133 | params.MaxKey = jsonParams["end_key"] 134 | } 135 | if value, ok := jsonParams["inclusive_end"].(bool); ok { 136 | params.IncludeMaxKey = value 137 | } 138 | } 139 | 140 | params.Descending, _ = jsonParams["descending"].(bool) 141 | if params.Descending { 142 | // Swap min/max if descending order 143 | temp := params.MinKey 144 | params.MinKey = params.MaxKey 145 | params.MaxKey = temp 146 | params.IncludeMinKey = params.IncludeMaxKey 147 | params.IncludeMaxKey = true 148 | } 149 | 150 | if plimit, ok := jsonParams["limit"]; ok { 151 | if limit, limiterr := interfaceToInt(plimit); limiterr == nil && limit > 0 { 152 | params.Limit = &limit 153 | } else { 154 | err = fmt.Errorf("invalid limit parameter in view query: %v", jsonParams["limit"]) 155 | return 156 | } 157 | } 158 | 159 | params.IncludeDocs, _ = jsonParams["include_docs"].(bool) 160 | 161 | if reduceParam, found := jsonParams["reduce"].(bool); found { 162 | params.Reduce = reduceParam 163 | } 164 | if params.Reduce { 165 | if jsonParams["group"] != nil && jsonParams["group"].(bool) { 166 | var groupLevel int = 0 167 | params.GroupLevel = &groupLevel 168 | } else if jsonParams["group_level"] != nil { 169 | groupLevel, groupErr := interfaceToInt(jsonParams["group_level"]) 170 | if groupErr == nil && groupLevel >= 0 { 171 | params.GroupLevel = &groupLevel 172 | } else { 173 | err = fmt.Errorf("invalid group_level parameter in view query: %v", jsonParams["group_level"]) 174 | return 175 | } 176 | } 177 | } 178 | } 179 | return 180 | } 181 | 182 | // Applies view params (startkey/endkey, limit, etc) to a ViewResult. 183 | func (result *ViewResult) Process(jsonParams map[string]interface{}, ds DataStore, reduceFunction string) error { 184 | params, err := ParseViewParams(jsonParams) 185 | if err != nil { 186 | return err 187 | } 188 | return result.ProcessParsed(params, ds, reduceFunction) 189 | } 190 | 191 | func (result *ViewResult) ProcessParsed(params ViewParams, ds DataStore, reduceFunction string) error { 192 | 193 | if params.Keys != nil { 194 | result.FilterKeys(params.Keys) 195 | } 196 | 197 | result.SetStartKey(params.MinKey, params.IncludeMinKey) 198 | 199 | if params.Limit != nil && *params.Limit < len(result.Rows) { 200 | result.Rows = result.Rows[:*params.Limit] 201 | } 202 | 203 | result.SetEndKey(params.MaxKey, params.IncludeMaxKey) 204 | 205 | if params.IncludeDocs { 206 | // Make a new Rows array since the current one may be shared 207 | newRows := make(ViewRows, len(result.Rows)) 208 | for i, rowPtr := range result.Rows { 209 | if rowPtr.Doc == nil { 210 | //OPT: This may unmarshal the same doc more than once 211 | newRow := *rowPtr 212 | _, err := ds.Get(newRow.ID, &newRow.Doc) 213 | if err != nil { 214 | return err 215 | } 216 | newRows[i] = &newRow 217 | } else { 218 | newRows[i] = rowPtr 219 | } 220 | } 221 | result.Rows = newRows 222 | result.collationKeys = nil 223 | } 224 | 225 | if params.Reduce && reduceFunction != "" { 226 | if err := result.ReduceAndGroup(reduceFunction, params.GroupLevel); err != nil { 227 | return err 228 | } 229 | } 230 | 231 | if params.Descending { 232 | result.ReverseRows() 233 | } 234 | 235 | result.TotalRows = len(result.Rows) 236 | result.Collator.Clear() 237 | result.collationKeys = nil // not needed any more 238 | logg("\t... view returned %d rows", result.TotalRows) 239 | return nil 240 | } 241 | 242 | // Applies a reduce function to a view result, modifying it in place. 243 | func (result *ViewResult) Reduce(reduceFunction string, jsonParams map[string]interface{}) error { 244 | params, err := ParseViewParams(jsonParams) 245 | if err != nil { 246 | return err 247 | } 248 | return result.ReduceAndGroup(reduceFunction, params.GroupLevel) 249 | } 250 | 251 | // Applies a reduce function to a view result, modifying it in place. 252 | // If the group level is non-nil, results will be grouped. 253 | // Group level 0 groups by the entire key; higher levels group by components of an array key. 254 | func (result *ViewResult) ReduceAndGroup(reduceFunction string, groupLevelOrNil *int) error { 255 | reduceFun, compileErr := ReduceFunc(reduceFunction) 256 | if compileErr != nil { 257 | return compileErr 258 | } 259 | if len(result.Rows) == 0 { 260 | return nil 261 | } 262 | if groupLevelOrNil != nil { 263 | groupLevel := *groupLevelOrNil 264 | var collator JSONCollator 265 | key := result.Rows[0].Key 266 | if groupLevel > 0 { 267 | // don't try to cast key as a slice if group=true 268 | key = keyPrefix(groupLevel, key) 269 | } 270 | inRows := []*ViewRow{} 271 | outRows := []*ViewRow{} 272 | for _, row := range result.Rows { 273 | inKey := row.Key 274 | if groupLevel > 0 { 275 | // don't try to cast key as a slice if group=true 276 | inKey = keyPrefix(groupLevel, inKey) 277 | } 278 | collated := collator.Collate(inKey, key) 279 | if collated == 0 { 280 | inRows = append(inRows, row) 281 | } else { 282 | outRow, outErr := reduceFun(inRows) 283 | if outErr != nil { 284 | return outErr 285 | } 286 | outRow.Key = key 287 | outRows = append(outRows, outRow) 288 | // reset for next key 289 | inRows = []*ViewRow{row} 290 | key = inKey 291 | } 292 | } 293 | // do last key 294 | outRow, outErr := reduceFun(inRows) 295 | if outErr != nil { 296 | return outErr 297 | } 298 | outRow.Key = key 299 | result.Rows = append(outRows, outRow) 300 | result.collationKeys = nil 301 | } else { 302 | row, err := reduceFun(result.Rows) 303 | if err != nil { 304 | return err 305 | } 306 | result.Rows = []*ViewRow{row} 307 | result.collationKeys = nil 308 | } 309 | return nil 310 | } 311 | 312 | func keyPrefix(groupLevel int, key interface{}) []interface{} { 313 | return key.([]interface{})[0:groupLevel] 314 | } 315 | 316 | func ReduceFunc(reduceFunction string) (func([]*ViewRow) (*ViewRow, error), error) { 317 | switch reduceFunction { 318 | case "_count": 319 | return func(rows []*ViewRow) (*ViewRow, error) { 320 | return &ViewRow{Value: float64(len(rows))}, nil 321 | }, nil 322 | case "_sum": 323 | return func(rows []*ViewRow) (*ViewRow, error) { 324 | total := float64(0) 325 | for _, row := range rows { 326 | // This could theoretically know how to unwrap our [channels, value] 327 | // design_doc emit wrapper, but even so reduce would remain admin only. 328 | if n, err := interfaceToFloat64(row.Value); err == nil { 329 | total += n 330 | } else { 331 | return nil, err 332 | } 333 | } 334 | return &ViewRow{Value: total}, nil 335 | }, nil 336 | default: 337 | // TODO: Implement other reduce functions! 338 | return nil, fmt.Errorf("sgbucket only supports _count and _sum reduce functions") 339 | } 340 | } 341 | 342 | func interfaceToInt(value interface{}) (i int, err error) { 343 | ref := reflect.ValueOf(value) 344 | if ref.CanInt() { 345 | i = int(ref.Int()) 346 | } else if ref.CanFloat() { 347 | i = int(ref.Float()) 348 | } else if ref.CanUint() { 349 | i = int(ref.Uint()) 350 | } else { 351 | err = fmt.Errorf("unable to convert %v (%T) to int", value, value) 352 | } 353 | return 354 | } 355 | 356 | func interfaceToFloat64(value any) (f float64, err error) { 357 | ref := reflect.ValueOf(value) 358 | if ref.CanInt() { 359 | f = float64(ref.Int()) 360 | } else if ref.CanFloat() { 361 | f = ref.Float() 362 | } else if ref.CanUint() { 363 | f = float64(ref.Uint()) 364 | } else { 365 | err = fmt.Errorf("unable to convert %v (%T) to float64", value, value) 366 | } 367 | return 368 | } 369 | 370 | // Removes all the rows whose keys do not appear in the array. 371 | func (result *ViewResult) FilterKeys(keys []any) { 372 | if keys != nil { 373 | result.makeCollationKeys() 374 | filteredRows := make(ViewRows, 0, len(keys)) 375 | filteredCollationKeys := make([]preCollated, 0, len(keys)) 376 | for _, targetKey := range keys { 377 | targetColl := preCollate(targetKey) 378 | i, found := sort.Find(len(result.Rows), func(i int) int { 379 | return result.Collator.collate(&targetColl, &result.collationKeys[i]) 380 | }) 381 | if found { 382 | filteredRows = append(filteredRows, result.Rows[i]) 383 | filteredCollationKeys = append(filteredCollationKeys, result.collationKeys[i]) 384 | } 385 | } 386 | result.Rows = filteredRows 387 | result.collationKeys = filteredCollationKeys 388 | } 389 | } 390 | 391 | // Removes all the rows whose keys are less than `startkey` 392 | // If `inclusive` is false, it also removes rows whose keys are equal to `startkey`. 393 | func (result *ViewResult) SetStartKey(startkey any, inclusive bool) { 394 | if startkey != nil { 395 | result.makeCollationKeys() 396 | startColl := preCollate(startkey) 397 | limit := 0 398 | if inclusive { 399 | limit = -1 400 | } 401 | i := sort.Search(len(result.Rows), func(i int) bool { 402 | return result.Collator.collate(&result.collationKeys[i], &startColl) > limit 403 | }) 404 | result.Rows = result.Rows[i:] 405 | result.collationKeys = result.collationKeys[i:] 406 | } 407 | } 408 | 409 | // Removes all the rows whose keys are greater than `endkey`. 410 | // If `inclusive` is false, it also removes rows whose keys are equal to `endkey`. 411 | func (result *ViewResult) SetEndKey(endkey any, inclusive bool) { 412 | if endkey != nil { 413 | result.makeCollationKeys() 414 | endColl := preCollate(endkey) 415 | limit := 0 416 | if !inclusive { 417 | limit = -1 418 | } 419 | i := sort.Search(len(result.Rows), func(i int) bool { 420 | return result.Collator.collate(&result.collationKeys[i], &endColl) > limit 421 | }) 422 | result.Rows = result.Rows[:i] 423 | result.collationKeys = result.collationKeys[:i] 424 | } 425 | } 426 | 427 | func (result *ViewResult) ReverseRows() { 428 | // Note: Can't reverse result.Rows in place because it'd mess with any other copy of this 429 | // ViewResult (they share the same underlying array.) 430 | n := len(result.Rows) 431 | newRows := make([]*ViewRow, n) 432 | for i, row := range result.Rows { 433 | newRows[n-1-i] = row 434 | } 435 | result.Rows = newRows 436 | result.reversed = !result.reversed 437 | result.collationKeys = nil 438 | } 439 | 440 | func (result *ViewResult) makeCollationKeys() { 441 | if result.collationKeys == nil && !result.reversed { 442 | keys := make([]preCollated, len(result.Rows)) 443 | for i, row := range result.Rows { 444 | keys[i] = preCollate(row.Key) 445 | } 446 | result.collationKeys = keys 447 | } 448 | } 449 | 450 | //////// ViewResult: implementation of sort.Interface interface 451 | 452 | func (result *ViewResult) Sort() { 453 | result.makeCollationKeys() 454 | sort.Sort(result) 455 | } 456 | 457 | func (result *ViewResult) Len() int { 458 | return len(result.Rows) 459 | } 460 | 461 | func (result *ViewResult) Swap(i, j int) { 462 | temp := result.Rows[i] 463 | result.Rows[i] = result.Rows[j] 464 | result.Rows[j] = temp 465 | if result.collationKeys != nil { 466 | temp := result.collationKeys[i] 467 | result.collationKeys[i] = result.collationKeys[j] 468 | result.collationKeys[j] = temp 469 | } 470 | } 471 | 472 | func (result *ViewResult) Less(i, j int) bool { 473 | return result.Collator.collate(&result.collationKeys[i], &result.collationKeys[j]) < 0 474 | } 475 | 476 | //////// ViewResult: Implementation of QueryResultIterator interface 477 | 478 | // Note: iterIndex is a 1-based counter, for consistent error handling w/ gocb's iterators 479 | func (r *ViewResult) NextBytes() []byte { 480 | 481 | if len(r.Errors) > 0 || r.iterErr != nil { 482 | return nil 483 | } 484 | 485 | if r.iterIndex >= len(r.Rows) { 486 | return nil 487 | } 488 | r.iterIndex++ 489 | 490 | var rowBytes []byte 491 | rowBytes, r.iterErr = json.Marshal(r.Rows[r.iterIndex-1]) 492 | if r.iterErr != nil { 493 | return nil 494 | } 495 | 496 | return rowBytes 497 | 498 | } 499 | 500 | func (r *ViewResult) Next(_ context.Context, valuePtr interface{}) bool { 501 | if len(r.Errors) > 0 || r.iterErr != nil { 502 | return false 503 | } 504 | 505 | row := r.NextBytes() 506 | if row == nil { 507 | return false 508 | } 509 | 510 | r.iterErr = json.Unmarshal(row, valuePtr) 511 | return r.iterErr == nil 512 | } 513 | 514 | func (r *ViewResult) Close() error { 515 | if r.iterErr != nil { 516 | return r.iterErr 517 | } 518 | 519 | if len(r.Errors) > 0 { 520 | return r.Errors[0] 521 | } 522 | 523 | return nil 524 | } 525 | 526 | func (r *ViewResult) One(ctx context.Context, valuePtr interface{}) error { 527 | if !r.Next(ctx, valuePtr) { 528 | err := r.Close() 529 | if err != nil { 530 | return err 531 | } 532 | return errors.New("No results returned.") 533 | } 534 | 535 | // Ignore any errors occurring after we already have our result 536 | _ = r.Close() 537 | return nil 538 | } 539 | --------------------------------------------------------------------------------