├── .github ├── CODEOWNERS ├── dependabot.yml └── workflows │ ├── ci.yml │ ├── docker.yml │ ├── lint.yml │ └── stale.yml ├── .gitignore ├── .gitpod.yml ├── .golangci.yml ├── .mergify.yml ├── CHANGELOG.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── backend_test.go ├── badger_db.go ├── boltdb.go ├── boltdb_batch.go ├── boltdb_iterator.go ├── boltdb_test.go ├── cleveldb.go ├── cleveldb_batch.go ├── cleveldb_iterator.go ├── cleveldb_test.go ├── codecov.yml ├── common_test.go ├── db.go ├── db_test.go ├── docs └── how_to_release.md ├── go.mod ├── go.sum ├── goleveldb.go ├── goleveldb_batch.go ├── goleveldb_iterator.go ├── goleveldb_test.go ├── makefile ├── memdb.go ├── memdb_batch.go ├── memdb_iterator.go ├── memdb_test.go ├── prefixdb.go ├── prefixdb_batch.go ├── prefixdb_iterator.go ├── prefixdb_test.go ├── remotedb ├── batch.go ├── doc.go ├── grpcdb │ ├── client.go │ ├── doc.go │ ├── example_test.go │ └── server.go ├── iterator.go ├── proto │ ├── defs.pb.go │ ├── defs.proto │ └── defspb_test.go ├── remotedb.go ├── remotedb_test.go ├── test.crt └── test.key ├── rocksdb.go ├── rocksdb_batch.go ├── rocksdb_iterator.go ├── rocksdb_test.go ├── test_helpers.go ├── tools └── Dockerfile ├── types.go ├── util.go └── util_test.go /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # CODEOWNERS: https://help.github.com/articles/about-codeowners/ 2 | 3 | * @alexanderbez @cmwaters @marbar3778 @tychoish @williambanfield 4 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: github-actions 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | time: "11:00" 8 | open-pull-requests-limit: 10 9 | - package-ecosystem: gomod 10 | directory: "/" 11 | schedule: 12 | interval: daily 13 | time: "11:00" 14 | open-pull-requests-limit: 10 15 | labels: 16 | - T:dependencies 17 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | on: 3 | push: 4 | branches: 5 | - master 6 | pull_request: 7 | jobs: 8 | cleanup-runs: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: rokroskar/workflow-run-cleanup-action@master 12 | env: 13 | GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" 14 | if: "!startsWith(github.ref, 'refs/tags/') && github.ref != 'refs/heads/master'" 15 | 16 | Test: 17 | # The custom image here contains pre-built libraries for leveldb and 18 | # rocksdb, which are needed to build and test those modules. 19 | # To update the container image, see docker.yml. 20 | runs-on: ubuntu-latest 21 | container: tendermintdev/docker-tm-db-testing 22 | steps: 23 | - uses: actions/checkout@v3 24 | - name: test & coverage report creation 25 | run: | 26 | CGO_ENABLED=1 go test ./... -mod=readonly -timeout 8m -race -coverprofile=coverage.txt -covermode=atomic -tags=memdb,goleveldb,cleveldb,boltdb,rocksdb,badgerdb -v 27 | - uses: codecov/codecov-action@v3 28 | with: 29 | file: ./coverage.txt 30 | fail_ci_if_error: true 31 | -------------------------------------------------------------------------------- /.github/workflows/docker.yml: -------------------------------------------------------------------------------- 1 | # This workflow builds and pushes a new version of the build container image 2 | # when the tools directory changes on master. Edit tools/Dockerfile. 3 | # 4 | # This workflow does not push a new image until it is merged, so tests that 5 | # depend on changes in this image will not pass until this workflow succeeds. 6 | # For that reason, changes here should be done in a separate PR in advance of 7 | # work that depends on them. 8 | 9 | name: Build & Push TM-DB-Testing 10 | on: 11 | pull_request: 12 | paths: 13 | - "tools/*" 14 | push: 15 | branches: 16 | - master 17 | paths: 18 | - "tools/*" 19 | 20 | jobs: 21 | build: 22 | runs-on: ubuntu-latest 23 | steps: 24 | - uses: actions/checkout@v3 25 | - name: Prepare 26 | id: prep 27 | run: | 28 | DOCKER_IMAGE=tendermintdev/docker-tm-db-testing 29 | VERSION=noop 30 | if [[ $GITHUB_REF == refs/tags/* ]]; then 31 | VERSION=${GITHUB_REF#refs/tags/} 32 | elif [[ $GITHUB_REF == refs/heads/* ]]; then 33 | VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g') 34 | if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then 35 | VERSION=latest 36 | fi 37 | fi 38 | TAGS="${DOCKER_IMAGE}:${VERSION}" 39 | echo ::set-output name=tags::${TAGS} 40 | 41 | - name: Set up Docker Buildx 42 | uses: docker/setup-buildx-action@v2 43 | 44 | - name: Login to DockerHub 45 | uses: docker/login-action@v2 46 | with: 47 | username: ${{ secrets.DOCKERHUB_USERNAME }} 48 | password: ${{ secrets.DOCKERHUB_TOKEN }} 49 | 50 | - name: Publish to Docker Hub 51 | uses: docker/build-push-action@v3 52 | with: 53 | context: ./tools 54 | file: ./tools/Dockerfile 55 | push: ${{ github.event_name != 'pull_request' }} 56 | tags: ${{ steps.prep.outputs.tags }} 57 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | on: 3 | push: 4 | branches: 5 | - master 6 | pull_request: 7 | 8 | jobs: 9 | golangci: 10 | # We need to run the linter on the same image we use for building, since it 11 | # needs the C libraries installed for the dependencies to typecheck. 12 | runs-on: ubuntu-latest 13 | container: tendermintdev/docker-tm-db-testing 14 | steps: 15 | - uses: actions/checkout@v3 16 | - uses: golangci/golangci-lint-action@v3.2.0 17 | with: 18 | args: --timeout 10m 19 | github-token: ${{ secrets.github_token }} 20 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | name: "Close stale pull requests" 2 | on: 3 | schedule: 4 | - cron: "0 0 * * *" 5 | 6 | jobs: 7 | stale: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/stale@v5 11 | with: 12 | repo-token: ${{ secrets.GITHUB_TOKEN }} 13 | stale-pr-message: "This pull request has been automatically marked as stale because it has not had 14 | recent activity. It will be closed if no further activity occurs. Thank you 15 | for your contributions." 16 | days-before-stale: -1 17 | days-before-close: -1 18 | days-before-pr-stale: 20 19 | days-before-pr-close: 10 20 | exempt-pr-labels: "pinned, security, proposal, blocked" 21 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, build with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | .idea 15 | .vscode 16 | vendor/* 17 | -------------------------------------------------------------------------------- /.gitpod.yml: -------------------------------------------------------------------------------- 1 | # This configuration file was automatically generated by Gitpod. 2 | # Please adjust to your needs (see https://www.gitpod.io/docs/config-gitpod-file) 3 | # and commit this file to your remote git repository to share the goodness with others. 4 | 5 | image: tendermintdev/docker-tm-db-testing 6 | 7 | # this means that there's a one-click known good environment available to developers. 8 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | linters: 2 | disable-all: true 3 | enable: 4 | - deadcode 5 | - depguard 6 | - dogsled 7 | - dupl 8 | - errcheck 9 | - exportloopref 10 | - goconst 11 | - gocritic 12 | - gofumpt 13 | - revive 14 | - gosec 15 | - gosimple 16 | - govet 17 | - ineffassign 18 | - lll 19 | - misspell 20 | - nakedret 21 | - prealloc 22 | - staticcheck 23 | - stylecheck 24 | - typecheck 25 | - revive 26 | - unconvert 27 | - unused 28 | - varcheck 29 | - nolintlint 30 | 31 | run: 32 | build-tags: 33 | - cleveldb 34 | - rocksdb 35 | - badgerdb 36 | - boltdb 37 | 38 | issues: 39 | exclude-rules: 40 | - path: _test\.go 41 | linters: 42 | - gosec 43 | linters-settings: 44 | maligned: 45 | suggest-new: true 46 | errcheck: 47 | # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; 48 | # default is false: such cases aren't reported by default. 49 | check-blank: true 50 | -------------------------------------------------------------------------------- /.mergify.yml: -------------------------------------------------------------------------------- 1 | queue_rules: 2 | - name: default 3 | conditions: 4 | - base=master 5 | - label=S:automerge 6 | 7 | pull_request_rules: 8 | - name: automerge to master with label S:automerge and branch protection passing 9 | conditions: 10 | - base=master 11 | - label=S:automerge 12 | actions: 13 | queue: 14 | method: squash 15 | name: default 16 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## Unreleased 4 | 5 | - remove mutex from prefixdb 6 | 7 | ## 0.6.7 8 | 9 | **2022-2-21** 10 | 11 | - Use cosmos/rocksdb instead of techbot/rocksdb 12 | - Add `ForceCopmact` to goleveldb database 13 | 14 | ## 0.6.6 15 | 16 | **2021-11-08** 17 | 18 | **Important note:** Version v0.6.5 was accidentally tagged and should be 19 | avoided. This version is identical to v0.6.4 in package structure and API, but 20 | has updated the version marker so that normal `go get` upgrades will not 21 | require modifying existing use of v0.6.4. 22 | 23 | ### Version bumps (since v0.6.4) 24 | 25 | - Bump grpc from to 1.42.0. 26 | - Bump dgraph/badger to v2 2.2007.3. 27 | - Bump go.etcd.io/bbolt to 1.3.6. 28 | 29 | ## 0.6.5 30 | 31 | **2021-08-04** 32 | 33 | **Important note**: This version was tagged by accident, and should not be 34 | used. The tag now points to the [package-reorg 35 | branch](https://github.com/tendermint/tm-db/tree/package-reorg) so that 36 | any existing dependencies should not break. 37 | 38 | ## 0.6.4 39 | 40 | **2021-02-09** 41 | 42 | Bump protobuf to 1.3.2 and grpc to 1.35.0. 43 | 44 | ## 0.6.3 45 | 46 | **2020-11-10** 47 | 48 | ### Improvements 49 | 50 | - [goleveldb] [\#134](https://github.com/tendermint/tm-db/pull/134) Improve iterator performance by bounding underlying iterator range (@klim0v) 51 | 52 | ## 0.6.2 53 | 54 | **2020-08-27** 55 | 56 | Bump grpc, badger and goleveldb (see versions in go.mod file) 57 | 58 | ## 0.6.1 59 | 60 | **2020-08-12** 61 | 62 | ### Improvements 63 | 64 | - [\#115](https://github.com/tendermint/tm-db/pull/115) Add a `BadgerDB` backend with build tag `badgerdb` (@mvdan) 65 | 66 | ## 0.6.0 67 | 68 | **2020-06-24** 69 | 70 | ### Breaking Changes 71 | 72 | - [\#99](https://github.com/tendermint/tm-db/pull/99) Keys can no longer be `nil` or empty, and values can no longer be `nil` (@erikgrinaker) 73 | 74 | - [\#98](https://github.com/tendermint/tm-db/pull/98) `NewDB` now returns an error instead of panicing (@erikgrinaker) 75 | 76 | - [\#96](https://github.com/tendermint/tm-db/pull/96) `Batch.Set()`, `Delete()`, and `Close()` may now error (@erikgrinaker) 77 | 78 | - [\#97](https://github.com/tendermint/tm-db/pull/97) `Iterator.Close()` may now error (@erikgrinaker) 79 | 80 | - [\#97](https://github.com/tendermint/tm-db/pull/97) Many iterator panics are now exposed via `Error()` instead (@erikgrinaker) 81 | 82 | - [\#96](https://github.com/tendermint/tm-db/pull/96) The `SetDeleter` interface has been removed (@erikgrinaker) 83 | 84 | ### Bug Fixes 85 | 86 | - [\#97](https://github.com/tendermint/tm-db/pull/97) `RemoteDB` iterators are now correctly primed with the first item when created, without calling `Next()` (@erikgrinaker) 87 | 88 | ## 0.5.2 89 | 90 | **2020-11-10** 91 | 92 | ### Improvements 93 | 94 | - [goleveldb] [\#134](https://github.com/tendermint/tm-db/pull/134) Improve iterator performance by bounding underlying iterator range (@klim0v) 95 | 96 | ## 0.5.1 97 | 98 | **2020-03-30** 99 | 100 | ### Bug Fixes 101 | 102 | - [boltdb] [\#81](https://github.com/tendermint/tm-db/pull/81) Use correct import path go.etcd.io/bbolt 103 | 104 | ## 0.5.0 105 | 106 | **2020-03-11** 107 | 108 | ### Breaking Changes 109 | 110 | - [\#71](https://github.com/tendermint/tm-db/pull/71) Closed or written batches can no longer be reused, all non-`Close()` calls will panic 111 | 112 | - [memdb] [\#74](https://github.com/tendermint/tm-db/pull/74) `Iterator()` and `ReverseIterator()` now take out database read locks for the duration of the iteration 113 | 114 | - [memdb] [\#56](https://github.com/tendermint/tm-db/pull/56) Removed some exported methods that were mainly meant for internal use: `Mutex()`, `SetNoLock()`, `SetNoLockSync()`, `DeleteNoLock()`, and `DeleteNoLockSync()` 115 | 116 | ### Improvements 117 | 118 | - [memdb] [\#53](https://github.com/tendermint/tm-db/pull/53) Use a B-tree for storage, which significantly improves range scan performance 119 | 120 | - [memdb] [\#56](https://github.com/tendermint/tm-db/pull/56) Use an RWMutex for improved performance with highly concurrent read-heavy workloads 121 | 122 | ### Bug Fixes 123 | 124 | - [boltdb] [\#69](https://github.com/tendermint/tm-db/pull/69) Properly handle blank keys in iterators 125 | 126 | - [cleveldb] [\#65](https://github.com/tendermint/tm-db/pull/65) Fix handling of empty keys as iterator endpoints 127 | 128 | ## 0.4.1 129 | 130 | **2020-2-26** 131 | 132 | ### Breaking Changes 133 | 134 | - [fsdb] [\#43](https://github.com/tendermint/tm-db/pull/43) Remove FSDB 135 | 136 | ### Bug Fixes 137 | 138 | - [boltdb] [\#45](https://github.com/tendermint/tm-db/pull/45) Bring BoltDB to adhere to the db interfaces 139 | 140 | ## 0.4 141 | 142 | **2020-1-7** 143 | 144 | ### BREAKING CHANGES 145 | 146 | - [\#30](https://github.com/tendermint/tm-db/pull/30) Interface Breaking, Interfaces return errors instead of panic: 147 | - Changes to function signatures: 148 | - DB interface: 149 | - `Get([]byte) ([]byte, error)` 150 | - `Has(key []byte) (bool, error)` 151 | - `Set([]byte, []byte) error` 152 | - `SetSync([]byte, []byte) error` 153 | - `Delete([]byte) error` 154 | - `DeleteSync([]byte) error` 155 | - `Iterator(start, end []byte) (Iterator, error)` 156 | - `ReverseIterator(start, end []byte) (Iterator, error)` 157 | - `Close() error` 158 | - `Print() error` 159 | - Batch interface: 160 | - `Write() error` 161 | - `WriteSync() error` 162 | - Iterator interface: 163 | - `Error() error` 164 | 165 | ### IMPROVEMENTS 166 | 167 | - [remotedb] [\#34](https://github.com/tendermint/tm-db/pull/34) Add proto file tests and regenerate remotedb.pb.go 168 | 169 | ## 0.3 170 | 171 | **2019-11-18** 172 | 173 | Special thanks to external contributors on this release: 174 | 175 | ### BREAKING CHANGES 176 | 177 | - [\#26](https://github.com/tendermint/tm-db/pull/26/files) Rename `DBBackendtype` to `BackendType` 178 | 179 | ## 0.2 180 | 181 | **2019-09-19** 182 | 183 | Special thanks to external contributors on this release: @stumble 184 | 185 | ### Features 186 | 187 | - [\#12](https://github.com/tendermint/tm-db/pull/12) Add `RocksDB` (@stumble) 188 | 189 | ## 0.1 190 | 191 | **2019-07-16** 192 | 193 | Special thanks to external contributors on this release: 194 | 195 | ### Initial Release 196 | 197 | - `db`, `random.go`, `bytes.go` and `os.go` from the tendermint repo. 198 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Thank you for your interest in contributing to tm-db! 4 | This repository follows the [contribution guidelines] of tendermint and the corresponding [coding repo]. 5 | Please take a look if you are not already familiar with those. 6 | 7 | [contribution guidelines]: https://github.com/tendermint/tendermint/blob/master/CONTRIBUTING.md 8 | [coding repo]: https://github.com/tendermint/coding 9 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2016 All in Bits, Inc 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Tendermint DB 2 | 3 | [![version](https://img.shields.io/github/tag/tendermint/tm-db.svg)](https://github.com/tendermint/tm-db/releases/latest) 4 | [![license](https://img.shields.io/github/license/tendermint/tm-db.svg)](https://github.com/tendermint/tm-db/blob/master/LICENSE) 5 | [![API Reference](https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667)](https://pkg.go.dev/github.com/tendermint/tm-db) 6 | [![codecov](https://codecov.io/gh/tendermint/tm-db/branch/master/graph/badge.svg)](https://codecov.io/gh/tendermint/tm-db) 7 | ![Lint](https://github.com/tendermint/tm-db/workflows/Lint/badge.svg?branch=master) 8 | ![Test](https://github.com/tendermint/tm-db/workflows/Test/badge.svg?branch=master) 9 | [![Discord chat](https://img.shields.io/discord/669268347736686612.svg)](https://discord.gg/AzefAFd) 10 | 11 | Common database interface for various database backends. Primarily meant for applications built on [Tendermint](https://github.com/tendermint/tendermint), such as the [Cosmos SDK](https://github.com/cosmos/cosmos-sdk), but can be used independently of these as well. 12 | 13 | ### Minimum Go Version 14 | 15 | Go 1.13+ 16 | 17 | ## Supported Database Backends 18 | 19 | - **[GoLevelDB](https://github.com/syndtr/goleveldb) [stable]**: A pure Go implementation of [LevelDB](https://github.com/google/leveldb) (see below). Currently the default on-disk database used in the Cosmos SDK. 20 | 21 | - **MemDB [stable]:** An in-memory database using [Google's B-tree package](https://github.com/google/btree). Has very high performance both for reads, writes, and range scans, but is not durable and will lose all data on process exit. Does not support transactions. Suitable for e.g. caches, working sets, and tests. Used for [IAVL](https://github.com/tendermint/iavl) working sets when the pruning strategy allows it. 22 | 23 | - **[LevelDB](https://github.com/google/leveldb) [experimental]:** A [Go wrapper](https://github.com/jmhodges/levigo) around [LevelDB](https://github.com/google/leveldb). Uses LSM-trees for on-disk storage, which have good performance for write-heavy workloads, particularly on spinning disks, but requires periodic compaction to maintain decent read performance and reclaim disk space. Does not support transactions. 24 | 25 | - **[BoltDB](https://github.com/etcd-io/bbolt) [experimental]:** A [fork](https://github.com/etcd-io/bbolt) of [BoltDB](https://github.com/boltdb/bolt). Uses B+trees for on-disk storage, which have good performance for read-heavy workloads and range scans. Supports serializable ACID transactions. 26 | 27 | - **[RocksDB](https://github.com/cosmos/gorocksdb) [experimental]:** A [Go wrapper](https://github.com/cosmos/gorocksdb) around [RocksDB](https://rocksdb.org). Similarly to LevelDB (above) it uses LSM-trees for on-disk storage, but is optimized for fast storage media such as SSDs and memory. Supports atomic transactions, but not full ACID transactions. 28 | 29 | - **[BadgerDB](https://github.com/dgraph-io/badger) [experimental]:** A key-value database written as a pure-Go alternative to e.g. LevelDB and RocksDB, with LSM-tree storage. Makes use of multiple goroutines for performance, and includes advanced features such as serializable ACID transactions, write batches, compression, and more. 30 | 31 | ## Meta-databases 32 | 33 | - **PrefixDB [stable]:** A database which wraps another database and uses a static prefix for all keys. This allows multiple logical databases to be stored in a common underlying databases by using different namespaces. Used by the Cosmos SDK to give different modules their own namespaced database in a single application database. 34 | 35 | - **RemoteDB [experimental]:** A database that connects to distributed Tendermint db instances via [gRPC](https://grpc.io/). This can help with detaching difficult deployments such as LevelDB, and can also ease dependency management for Tendermint developers. 36 | 37 | ## Tests 38 | 39 | To test common databases, run `make test`. If all databases are available on the local machine, use `make test-all` to test them all. 40 | -------------------------------------------------------------------------------- /backend_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "os" 7 | "path/filepath" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/assert" 11 | "github.com/stretchr/testify/require" 12 | ) 13 | 14 | // Register a test backend for PrefixDB as well, with some unrelated junk data 15 | func init() { 16 | // nolint: errcheck 17 | registerDBCreator("prefixdb", func(name, dir string) (DB, error) { 18 | mdb := NewMemDB() 19 | mdb.Set([]byte("a"), []byte{1}) 20 | mdb.Set([]byte("b"), []byte{2}) 21 | mdb.Set([]byte("t"), []byte{20}) 22 | mdb.Set([]byte("test"), []byte{0}) 23 | mdb.Set([]byte("u"), []byte{21}) 24 | mdb.Set([]byte("z"), []byte{26}) 25 | return NewPrefixDB(mdb, []byte("test/")), nil 26 | }, false) 27 | } 28 | 29 | func cleanupDBDir(dir, name string) { 30 | err := os.RemoveAll(filepath.Join(dir, name) + ".db") 31 | if err != nil { 32 | panic(err) 33 | } 34 | } 35 | 36 | func testBackendGetSetDelete(t *testing.T, backend BackendType) { 37 | // Default 38 | dirname, err := ioutil.TempDir("", fmt.Sprintf("test_backend_%s_", backend)) 39 | require.Nil(t, err) 40 | db, err := NewDB("testdb", backend, dirname) 41 | require.NoError(t, err) 42 | defer cleanupDBDir(dirname, "testdb") 43 | 44 | // A nonexistent key should return nil. 45 | value, err := db.Get([]byte("a")) 46 | require.NoError(t, err) 47 | require.Nil(t, value) 48 | 49 | ok, err := db.Has([]byte("a")) 50 | require.NoError(t, err) 51 | require.False(t, ok) 52 | 53 | // Set and get a value. 54 | err = db.Set([]byte("a"), []byte{0x01}) 55 | require.NoError(t, err) 56 | 57 | ok, err = db.Has([]byte("a")) 58 | require.NoError(t, err) 59 | require.True(t, ok) 60 | 61 | value, err = db.Get([]byte("a")) 62 | require.NoError(t, err) 63 | require.Equal(t, []byte{0x01}, value) 64 | 65 | err = db.SetSync([]byte("b"), []byte{0x02}) 66 | require.NoError(t, err) 67 | 68 | value, err = db.Get([]byte("b")) 69 | require.NoError(t, err) 70 | require.Equal(t, []byte{0x02}, value) 71 | 72 | // Deleting a non-existent value is fine. 73 | err = db.Delete([]byte("x")) 74 | require.NoError(t, err) 75 | 76 | err = db.DeleteSync([]byte("x")) 77 | require.NoError(t, err) 78 | 79 | // Delete a value. 80 | err = db.Delete([]byte("a")) 81 | require.NoError(t, err) 82 | 83 | value, err = db.Get([]byte("a")) 84 | require.NoError(t, err) 85 | require.Nil(t, value) 86 | 87 | err = db.DeleteSync([]byte("b")) 88 | require.NoError(t, err) 89 | 90 | value, err = db.Get([]byte("b")) 91 | require.NoError(t, err) 92 | require.Nil(t, value) 93 | 94 | // Setting, getting, and deleting an empty key should error. 95 | _, err = db.Get([]byte{}) 96 | require.Equal(t, errKeyEmpty, err) 97 | _, err = db.Get(nil) 98 | require.Equal(t, errKeyEmpty, err) 99 | 100 | _, err = db.Has([]byte{}) 101 | require.Equal(t, errKeyEmpty, err) 102 | _, err = db.Has(nil) 103 | require.Equal(t, errKeyEmpty, err) 104 | 105 | err = db.Set([]byte{}, []byte{0x01}) 106 | require.Equal(t, errKeyEmpty, err) 107 | err = db.Set(nil, []byte{0x01}) 108 | require.Equal(t, errKeyEmpty, err) 109 | err = db.SetSync([]byte{}, []byte{0x01}) 110 | require.Equal(t, errKeyEmpty, err) 111 | err = db.SetSync(nil, []byte{0x01}) 112 | require.Equal(t, errKeyEmpty, err) 113 | 114 | err = db.Delete([]byte{}) 115 | require.Equal(t, errKeyEmpty, err) 116 | err = db.Delete(nil) 117 | require.Equal(t, errKeyEmpty, err) 118 | err = db.DeleteSync([]byte{}) 119 | require.Equal(t, errKeyEmpty, err) 120 | err = db.DeleteSync(nil) 121 | require.Equal(t, errKeyEmpty, err) 122 | 123 | // Setting a nil value should error, but an empty value is fine. 124 | err = db.Set([]byte("x"), nil) 125 | require.Equal(t, errValueNil, err) 126 | err = db.SetSync([]byte("x"), nil) 127 | require.Equal(t, errValueNil, err) 128 | 129 | err = db.Set([]byte("x"), []byte{}) 130 | require.NoError(t, err) 131 | err = db.SetSync([]byte("x"), []byte{}) 132 | require.NoError(t, err) 133 | value, err = db.Get([]byte("x")) 134 | require.NoError(t, err) 135 | require.Equal(t, []byte{}, value) 136 | } 137 | 138 | func TestBackendsGetSetDelete(t *testing.T) { 139 | for dbType := range backends { 140 | t.Run(string(dbType), func(t *testing.T) { 141 | testBackendGetSetDelete(t, dbType) 142 | }) 143 | } 144 | } 145 | 146 | func TestGoLevelDBBackend(t *testing.T) { 147 | name := fmt.Sprintf("test_%x", randStr(12)) 148 | db, err := NewDB(name, GoLevelDBBackend, "") 149 | require.NoError(t, err) 150 | defer cleanupDBDir("", name) 151 | 152 | _, ok := db.(*GoLevelDB) 153 | assert.True(t, ok) 154 | } 155 | 156 | func TestDBIterator(t *testing.T) { 157 | for dbType := range backends { 158 | t.Run(string(dbType), func(t *testing.T) { 159 | testDBIterator(t, dbType) 160 | }) 161 | } 162 | } 163 | 164 | func testDBIterator(t *testing.T, backend BackendType) { 165 | name := fmt.Sprintf("test_%x", randStr(12)) 166 | dir := os.TempDir() 167 | db, err := NewDB(name, backend, dir) 168 | require.NoError(t, err) 169 | defer cleanupDBDir(dir, name) 170 | 171 | for i := 0; i < 10; i++ { 172 | if i != 6 { // but skip 6. 173 | err := db.Set(int642Bytes(int64(i)), []byte{}) 174 | require.NoError(t, err) 175 | } 176 | } 177 | 178 | // Blank iterator keys should error 179 | _, err = db.Iterator([]byte{}, nil) 180 | require.Equal(t, errKeyEmpty, err) 181 | _, err = db.Iterator(nil, []byte{}) 182 | require.Equal(t, errKeyEmpty, err) 183 | _, err = db.ReverseIterator([]byte{}, nil) 184 | require.Equal(t, errKeyEmpty, err) 185 | _, err = db.ReverseIterator(nil, []byte{}) 186 | require.Equal(t, errKeyEmpty, err) 187 | 188 | itr, err := db.Iterator(nil, nil) 189 | require.NoError(t, err) 190 | verifyIterator(t, itr, []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator") 191 | 192 | ritr, err := db.ReverseIterator(nil, nil) 193 | require.NoError(t, err) 194 | verifyIterator(t, ritr, []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator") 195 | 196 | itr, err = db.Iterator(nil, int642Bytes(0)) 197 | require.NoError(t, err) 198 | verifyIterator(t, itr, []int64(nil), "forward iterator to 0") 199 | 200 | ritr, err = db.ReverseIterator(int642Bytes(10), nil) 201 | require.NoError(t, err) 202 | verifyIterator(t, ritr, []int64(nil), "reverse iterator from 10 (ex)") 203 | 204 | itr, err = db.Iterator(int642Bytes(0), nil) 205 | require.NoError(t, err) 206 | verifyIterator(t, itr, []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 0") 207 | 208 | itr, err = db.Iterator(int642Bytes(1), nil) 209 | require.NoError(t, err) 210 | verifyIterator(t, itr, []int64{1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 1") 211 | 212 | ritr, err = db.ReverseIterator(nil, int642Bytes(10)) 213 | require.NoError(t, err) 214 | verifyIterator(t, ritr, 215 | []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 10 (ex)") 216 | 217 | ritr, err = db.ReverseIterator(nil, int642Bytes(9)) 218 | require.NoError(t, err) 219 | verifyIterator(t, ritr, 220 | []int64{8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 9 (ex)") 221 | 222 | ritr, err = db.ReverseIterator(nil, int642Bytes(8)) 223 | require.NoError(t, err) 224 | verifyIterator(t, ritr, 225 | []int64{7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 8 (ex)") 226 | 227 | itr, err = db.Iterator(int642Bytes(5), int642Bytes(6)) 228 | require.NoError(t, err) 229 | verifyIterator(t, itr, []int64{5}, "forward iterator from 5 to 6") 230 | 231 | itr, err = db.Iterator(int642Bytes(5), int642Bytes(7)) 232 | require.NoError(t, err) 233 | verifyIterator(t, itr, []int64{5}, "forward iterator from 5 to 7") 234 | 235 | itr, err = db.Iterator(int642Bytes(5), int642Bytes(8)) 236 | require.NoError(t, err) 237 | verifyIterator(t, itr, []int64{5, 7}, "forward iterator from 5 to 8") 238 | 239 | itr, err = db.Iterator(int642Bytes(6), int642Bytes(7)) 240 | require.NoError(t, err) 241 | verifyIterator(t, itr, []int64(nil), "forward iterator from 6 to 7") 242 | 243 | itr, err = db.Iterator(int642Bytes(6), int642Bytes(8)) 244 | require.NoError(t, err) 245 | verifyIterator(t, itr, []int64{7}, "forward iterator from 6 to 8") 246 | 247 | itr, err = db.Iterator(int642Bytes(7), int642Bytes(8)) 248 | require.NoError(t, err) 249 | verifyIterator(t, itr, []int64{7}, "forward iterator from 7 to 8") 250 | 251 | ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(5)) 252 | require.NoError(t, err) 253 | verifyIterator(t, ritr, []int64{4}, "reverse iterator from 5 (ex) to 4") 254 | 255 | ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(6)) 256 | require.NoError(t, err) 257 | verifyIterator(t, ritr, 258 | []int64{5, 4}, "reverse iterator from 6 (ex) to 4") 259 | 260 | ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(7)) 261 | require.NoError(t, err) 262 | verifyIterator(t, ritr, 263 | []int64{5, 4}, "reverse iterator from 7 (ex) to 4") 264 | 265 | ritr, err = db.ReverseIterator(int642Bytes(5), int642Bytes(6)) 266 | require.NoError(t, err) 267 | verifyIterator(t, ritr, []int64{5}, "reverse iterator from 6 (ex) to 5") 268 | 269 | ritr, err = db.ReverseIterator(int642Bytes(5), int642Bytes(7)) 270 | require.NoError(t, err) 271 | verifyIterator(t, ritr, []int64{5}, "reverse iterator from 7 (ex) to 5") 272 | 273 | ritr, err = db.ReverseIterator(int642Bytes(6), int642Bytes(7)) 274 | require.NoError(t, err) 275 | verifyIterator(t, ritr, 276 | []int64(nil), "reverse iterator from 7 (ex) to 6") 277 | 278 | ritr, err = db.ReverseIterator(int642Bytes(10), nil) 279 | require.NoError(t, err) 280 | verifyIterator(t, ritr, []int64(nil), "reverse iterator to 10") 281 | 282 | ritr, err = db.ReverseIterator(int642Bytes(6), nil) 283 | require.NoError(t, err) 284 | verifyIterator(t, ritr, []int64{9, 8, 7}, "reverse iterator to 6") 285 | 286 | ritr, err = db.ReverseIterator(int642Bytes(5), nil) 287 | require.NoError(t, err) 288 | verifyIterator(t, ritr, []int64{9, 8, 7, 5}, "reverse iterator to 5") 289 | 290 | ritr, err = db.ReverseIterator(int642Bytes(8), int642Bytes(9)) 291 | require.NoError(t, err) 292 | verifyIterator(t, ritr, []int64{8}, "reverse iterator from 9 (ex) to 8") 293 | 294 | ritr, err = db.ReverseIterator(int642Bytes(2), int642Bytes(4)) 295 | require.NoError(t, err) 296 | verifyIterator(t, ritr, 297 | []int64{3, 2}, "reverse iterator from 4 (ex) to 2") 298 | 299 | ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(2)) 300 | require.NoError(t, err) 301 | verifyIterator(t, ritr, 302 | []int64(nil), "reverse iterator from 2 (ex) to 4") 303 | 304 | // Ensure that the iterators don't panic with an empty database. 305 | dir2, err := ioutil.TempDir("", "tm-db-test") 306 | require.NoError(t, err) 307 | db2, err := NewDB(name, backend, dir2) 308 | require.NoError(t, err) 309 | defer cleanupDBDir(dir2, name) 310 | 311 | itr, err = db2.Iterator(nil, nil) 312 | require.NoError(t, err) 313 | verifyIterator(t, itr, nil, "forward iterator with empty db") 314 | 315 | ritr, err = db2.ReverseIterator(nil, nil) 316 | require.NoError(t, err) 317 | verifyIterator(t, ritr, nil, "reverse iterator with empty db") 318 | } 319 | 320 | func verifyIterator(t *testing.T, itr Iterator, expected []int64, msg string) { 321 | var list []int64 322 | for itr.Valid() { 323 | key := itr.Key() 324 | list = append(list, bytes2Int64(key)) 325 | itr.Next() 326 | } 327 | assert.Equal(t, expected, list, msg) 328 | } 329 | 330 | func TestDBBatch(t *testing.T) { 331 | for dbType := range backends { 332 | t.Run(fmt.Sprintf("%v", dbType), func(t *testing.T) { 333 | testDBBatch(t, dbType) 334 | }) 335 | } 336 | } 337 | 338 | func testDBBatch(t *testing.T, backend BackendType) { 339 | name := fmt.Sprintf("test_%x", randStr(12)) 340 | dir := os.TempDir() 341 | db, err := NewDB(name, backend, dir) 342 | require.NoError(t, err) 343 | defer cleanupDBDir(dir, name) 344 | 345 | // create a new batch, and some items - they should not be visible until we write 346 | batch := db.NewBatch() 347 | require.NoError(t, batch.Set([]byte("a"), []byte{1})) 348 | require.NoError(t, batch.Set([]byte("b"), []byte{2})) 349 | require.NoError(t, batch.Set([]byte("c"), []byte{3})) 350 | assertKeyValues(t, db, map[string][]byte{}) 351 | 352 | err = batch.Write() 353 | require.NoError(t, err) 354 | assertKeyValues(t, db, map[string][]byte{"a": {1}, "b": {2}, "c": {3}}) 355 | 356 | // trying to modify or rewrite a written batch should error, but closing it should work 357 | require.Error(t, batch.Set([]byte("a"), []byte{9})) 358 | require.Error(t, batch.Delete([]byte("a"))) 359 | require.Error(t, batch.Write()) 360 | require.Error(t, batch.WriteSync()) 361 | require.NoError(t, batch.Close()) 362 | 363 | // batches should write changes in order 364 | batch = db.NewBatch() 365 | require.NoError(t, batch.Delete([]byte("a"))) 366 | require.NoError(t, batch.Set([]byte("a"), []byte{1})) 367 | require.NoError(t, batch.Set([]byte("b"), []byte{1})) 368 | require.NoError(t, batch.Set([]byte("b"), []byte{2})) 369 | require.NoError(t, batch.Set([]byte("c"), []byte{3})) 370 | require.NoError(t, batch.Delete([]byte("c"))) 371 | require.NoError(t, batch.Write()) 372 | require.NoError(t, batch.Close()) 373 | assertKeyValues(t, db, map[string][]byte{"a": {1}, "b": {2}}) 374 | 375 | // empty and nil keys, as well as nil values, should be disallowed 376 | batch = db.NewBatch() 377 | err = batch.Set([]byte{}, []byte{0x01}) 378 | require.Equal(t, errKeyEmpty, err) 379 | err = batch.Set(nil, []byte{0x01}) 380 | require.Equal(t, errKeyEmpty, err) 381 | err = batch.Set([]byte("a"), nil) 382 | require.Equal(t, errValueNil, err) 383 | 384 | err = batch.Delete([]byte{}) 385 | require.Equal(t, errKeyEmpty, err) 386 | err = batch.Delete(nil) 387 | require.Equal(t, errKeyEmpty, err) 388 | 389 | err = batch.Close() 390 | require.NoError(t, err) 391 | 392 | // it should be possible to write an empty batch 393 | batch = db.NewBatch() 394 | err = batch.Write() 395 | require.NoError(t, err) 396 | assertKeyValues(t, db, map[string][]byte{"a": {1}, "b": {2}}) 397 | 398 | // it should be possible to close an empty batch, and to re-close a closed batch 399 | batch = db.NewBatch() 400 | batch.Close() 401 | batch.Close() 402 | 403 | // all other operations on a closed batch should error 404 | require.Error(t, batch.Set([]byte("a"), []byte{9})) 405 | require.Error(t, batch.Delete([]byte("a"))) 406 | require.Error(t, batch.Write()) 407 | require.Error(t, batch.WriteSync()) 408 | } 409 | 410 | func assertKeyValues(t *testing.T, db DB, expect map[string][]byte) { 411 | iter, err := db.Iterator(nil, nil) 412 | require.NoError(t, err) 413 | defer iter.Close() 414 | 415 | actual := make(map[string][]byte) 416 | for ; iter.Valid(); iter.Next() { 417 | require.NoError(t, iter.Error()) 418 | actual[string(iter.Key())] = iter.Value() 419 | } 420 | 421 | assert.Equal(t, expect, actual) 422 | } 423 | -------------------------------------------------------------------------------- /badger_db.go: -------------------------------------------------------------------------------- 1 | //go:build badgerdb 2 | // +build badgerdb 3 | 4 | package db 5 | 6 | import ( 7 | "bytes" 8 | "fmt" 9 | "os" 10 | "path/filepath" 11 | 12 | "github.com/dgraph-io/badger/v3" 13 | ) 14 | 15 | func init() { registerDBCreator(BadgerDBBackend, badgerDBCreator, true) } 16 | 17 | func badgerDBCreator(dbName, dir string) (DB, error) { 18 | return NewBadgerDB(dbName, dir) 19 | } 20 | 21 | // NewBadgerDB creates a Badger key-value store backed to the 22 | // directory dir supplied. If dir does not exist, it will be created. 23 | func NewBadgerDB(dbName, dir string) (*BadgerDB, error) { 24 | // Since Badger doesn't support database names, we join both to obtain 25 | // the final directory to use for the database. 26 | path := filepath.Join(dir, dbName) 27 | 28 | if err := os.MkdirAll(path, 0o755); err != nil { 29 | return nil, err 30 | } 31 | opts := badger.DefaultOptions(path) 32 | opts.SyncWrites = false // note that we have Sync methods 33 | opts.Logger = nil // badger is too chatty by default 34 | return NewBadgerDBWithOptions(opts) 35 | } 36 | 37 | // NewBadgerDBWithOptions creates a BadgerDB key value store 38 | // gives the flexibility of initializing a database with the 39 | // respective options. 40 | func NewBadgerDBWithOptions(opts badger.Options) (*BadgerDB, error) { 41 | db, err := badger.Open(opts) 42 | if err != nil { 43 | return nil, err 44 | } 45 | return &BadgerDB{db: db}, nil 46 | } 47 | 48 | type BadgerDB struct { 49 | db *badger.DB 50 | } 51 | 52 | var _ DB = (*BadgerDB)(nil) 53 | 54 | func (b *BadgerDB) Get(key []byte) ([]byte, error) { 55 | if len(key) == 0 { 56 | return nil, errKeyEmpty 57 | } 58 | var val []byte 59 | err := b.db.View(func(txn *badger.Txn) error { 60 | item, err := txn.Get(key) 61 | if err == badger.ErrKeyNotFound { 62 | return nil 63 | } else if err != nil { 64 | return err 65 | } 66 | val, err = item.ValueCopy(nil) 67 | if err == nil && val == nil { 68 | val = []byte{} 69 | } 70 | return err 71 | }) 72 | return val, err 73 | } 74 | 75 | func (b *BadgerDB) Has(key []byte) (bool, error) { 76 | if len(key) == 0 { 77 | return false, errKeyEmpty 78 | } 79 | var found bool 80 | err := b.db.View(func(txn *badger.Txn) error { 81 | _, err := txn.Get(key) 82 | if err != nil && err != badger.ErrKeyNotFound { 83 | return err 84 | } 85 | found = (err != badger.ErrKeyNotFound) 86 | return nil 87 | }) 88 | return found, err 89 | } 90 | 91 | func (b *BadgerDB) Set(key, value []byte) error { 92 | if len(key) == 0 { 93 | return errKeyEmpty 94 | } 95 | if value == nil { 96 | return errValueNil 97 | } 98 | return b.db.Update(func(txn *badger.Txn) error { 99 | return txn.Set(key, value) 100 | }) 101 | } 102 | 103 | func withSync(db *badger.DB, err error) error { 104 | if err != nil { 105 | return err 106 | } 107 | return db.Sync() 108 | } 109 | 110 | func (b *BadgerDB) SetSync(key, value []byte) error { 111 | return withSync(b.db, b.Set(key, value)) 112 | } 113 | 114 | func (b *BadgerDB) Delete(key []byte) error { 115 | if len(key) == 0 { 116 | return errKeyEmpty 117 | } 118 | return b.db.Update(func(txn *badger.Txn) error { 119 | return txn.Delete(key) 120 | }) 121 | } 122 | 123 | func (b *BadgerDB) DeleteSync(key []byte) error { 124 | return withSync(b.db, b.Delete(key)) 125 | } 126 | 127 | func (b *BadgerDB) Close() error { 128 | return b.db.Close() 129 | } 130 | 131 | func (b *BadgerDB) Print() error { 132 | return nil 133 | } 134 | 135 | func (b *BadgerDB) iteratorOpts(start, end []byte, opts badger.IteratorOptions) (*badgerDBIterator, error) { 136 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 137 | return nil, errKeyEmpty 138 | } 139 | txn := b.db.NewTransaction(false) 140 | iter := txn.NewIterator(opts) 141 | iter.Rewind() 142 | iter.Seek(start) 143 | if opts.Reverse && iter.Valid() && bytes.Equal(iter.Item().Key(), start) { 144 | // If we're going in reverse, our starting point was "end", 145 | // which is exclusive. 146 | iter.Next() 147 | } 148 | return &badgerDBIterator{ 149 | reverse: opts.Reverse, 150 | start: start, 151 | end: end, 152 | 153 | txn: txn, 154 | iter: iter, 155 | }, nil 156 | } 157 | 158 | func (b *BadgerDB) Iterator(start, end []byte) (Iterator, error) { 159 | opts := badger.DefaultIteratorOptions 160 | return b.iteratorOpts(start, end, opts) 161 | } 162 | 163 | func (b *BadgerDB) ReverseIterator(start, end []byte) (Iterator, error) { 164 | opts := badger.DefaultIteratorOptions 165 | opts.Reverse = true 166 | return b.iteratorOpts(end, start, opts) 167 | } 168 | 169 | func (b *BadgerDB) Stats() map[string]string { 170 | return nil 171 | } 172 | 173 | func (b *BadgerDB) NewBatch() Batch { 174 | wb := &badgerDBBatch{ 175 | db: b.db, 176 | wb: b.db.NewWriteBatch(), 177 | firstFlush: make(chan struct{}, 1), 178 | } 179 | wb.firstFlush <- struct{}{} 180 | return wb 181 | } 182 | 183 | var _ Batch = (*badgerDBBatch)(nil) 184 | 185 | type badgerDBBatch struct { 186 | db *badger.DB 187 | wb *badger.WriteBatch 188 | 189 | // Calling db.Flush twice panics, so we must keep track of whether we've 190 | // flushed already on our own. If Write can receive from the firstFlush 191 | // channel, then it's the first and only Flush call we should do. 192 | // 193 | // Upstream bug report: 194 | // https://github.com/dgraph-io/badger/issues/1394 195 | firstFlush chan struct{} 196 | } 197 | 198 | func (b *badgerDBBatch) Set(key, value []byte) error { 199 | if len(key) == 0 { 200 | return errKeyEmpty 201 | } 202 | if value == nil { 203 | return errValueNil 204 | } 205 | return b.wb.Set(key, value) 206 | } 207 | 208 | func (b *badgerDBBatch) Delete(key []byte) error { 209 | if len(key) == 0 { 210 | return errKeyEmpty 211 | } 212 | return b.wb.Delete(key) 213 | } 214 | 215 | func (b *badgerDBBatch) Write() error { 216 | select { 217 | case <-b.firstFlush: 218 | return b.wb.Flush() 219 | default: 220 | return fmt.Errorf("batch already flushed") 221 | } 222 | } 223 | 224 | func (b *badgerDBBatch) WriteSync() error { 225 | return withSync(b.db, b.Write()) 226 | } 227 | 228 | func (b *badgerDBBatch) Close() error { 229 | select { 230 | case <-b.firstFlush: // a Flush after Cancel panics too 231 | default: 232 | } 233 | b.wb.Cancel() 234 | return nil 235 | } 236 | 237 | type badgerDBIterator struct { 238 | reverse bool 239 | start, end []byte 240 | 241 | txn *badger.Txn 242 | iter *badger.Iterator 243 | 244 | lastErr error 245 | } 246 | 247 | func (i *badgerDBIterator) Close() error { 248 | i.iter.Close() 249 | i.txn.Discard() 250 | return nil 251 | } 252 | 253 | func (i *badgerDBIterator) Domain() (start, end []byte) { return i.start, i.end } 254 | func (i *badgerDBIterator) Error() error { return i.lastErr } 255 | 256 | func (i *badgerDBIterator) Next() { 257 | if !i.Valid() { 258 | panic("iterator is invalid") 259 | } 260 | i.iter.Next() 261 | } 262 | 263 | func (i *badgerDBIterator) Valid() bool { 264 | if !i.iter.Valid() { 265 | return false 266 | } 267 | if len(i.end) > 0 { 268 | key := i.iter.Item().Key() 269 | if c := bytes.Compare(key, i.end); (!i.reverse && c >= 0) || (i.reverse && c < 0) { 270 | // We're at the end key, or past the end. 271 | return false 272 | } 273 | } 274 | return true 275 | } 276 | 277 | func (i *badgerDBIterator) Key() []byte { 278 | if !i.Valid() { 279 | panic("iterator is invalid") 280 | } 281 | // Note that we don't use KeyCopy, so this is only valid until the next 282 | // call to Next. 283 | return i.iter.Item().KeyCopy(nil) 284 | } 285 | 286 | func (i *badgerDBIterator) Value() []byte { 287 | if !i.Valid() { 288 | panic("iterator is invalid") 289 | } 290 | val, err := i.iter.Item().ValueCopy(nil) 291 | if err != nil { 292 | i.lastErr = err 293 | } 294 | return val 295 | } 296 | -------------------------------------------------------------------------------- /boltdb.go: -------------------------------------------------------------------------------- 1 | //go:build boltdb 2 | // +build boltdb 3 | 4 | package db 5 | 6 | import ( 7 | "errors" 8 | "fmt" 9 | "os" 10 | "path/filepath" 11 | 12 | "go.etcd.io/bbolt" 13 | ) 14 | 15 | var bucket = []byte("tm") 16 | 17 | func init() { 18 | registerDBCreator(BoltDBBackend, NewBoltDB, false) 19 | } 20 | 21 | // BoltDB is a wrapper around etcd's fork of bolt (https://github.com/etcd-io/bbolt). 22 | // 23 | // NOTE: All operations (including Set, Delete) are synchronous by default. One 24 | // can globally turn it off by using NoSync config option (not recommended). 25 | // 26 | // A single bucket ([]byte("tm")) is used per a database instance. This could 27 | // lead to performance issues when/if there will be lots of keys. 28 | type BoltDB struct { 29 | db *bbolt.DB 30 | } 31 | 32 | var _ DB = (*BoltDB)(nil) 33 | 34 | // NewBoltDB returns a BoltDB with default options. 35 | func NewBoltDB(name, dir string) (DB, error) { 36 | return NewBoltDBWithOpts(name, dir, bbolt.DefaultOptions) 37 | } 38 | 39 | // NewBoltDBWithOpts allows you to supply *bbolt.Options. ReadOnly: true is not 40 | // supported because NewBoltDBWithOpts creates a global bucket. 41 | func NewBoltDBWithOpts(name string, dir string, opts *bbolt.Options) (DB, error) { 42 | if opts.ReadOnly { 43 | return nil, errors.New("ReadOnly: true is not supported") 44 | } 45 | 46 | dbPath := filepath.Join(dir, name+".db") 47 | db, err := bbolt.Open(dbPath, os.ModePerm, opts) 48 | if err != nil { 49 | return nil, err 50 | } 51 | 52 | // create a global bucket 53 | err = db.Update(func(tx *bbolt.Tx) error { 54 | _, err := tx.CreateBucketIfNotExists(bucket) 55 | return err 56 | }) 57 | if err != nil { 58 | return nil, err 59 | } 60 | 61 | return &BoltDB{db: db}, nil 62 | } 63 | 64 | // Get implements DB. 65 | func (bdb *BoltDB) Get(key []byte) (value []byte, err error) { 66 | if len(key) == 0 { 67 | return nil, errKeyEmpty 68 | } 69 | err = bdb.db.View(func(tx *bbolt.Tx) error { 70 | b := tx.Bucket(bucket) 71 | if v := b.Get(key); v != nil { 72 | value = append([]byte{}, v...) 73 | } 74 | return nil 75 | }) 76 | if err != nil { 77 | return nil, err 78 | } 79 | return 80 | } 81 | 82 | // Has implements DB. 83 | func (bdb *BoltDB) Has(key []byte) (bool, error) { 84 | bytes, err := bdb.Get(key) 85 | if err != nil { 86 | return false, err 87 | } 88 | return bytes != nil, nil 89 | } 90 | 91 | // Set implements DB. 92 | func (bdb *BoltDB) Set(key, value []byte) error { 93 | if len(key) == 0 { 94 | return errKeyEmpty 95 | } 96 | if value == nil { 97 | return errValueNil 98 | } 99 | err := bdb.db.Update(func(tx *bbolt.Tx) error { 100 | b := tx.Bucket(bucket) 101 | return b.Put(key, value) 102 | }) 103 | if err != nil { 104 | return err 105 | } 106 | return nil 107 | } 108 | 109 | // SetSync implements DB. 110 | func (bdb *BoltDB) SetSync(key, value []byte) error { 111 | return bdb.Set(key, value) 112 | } 113 | 114 | // Delete implements DB. 115 | func (bdb *BoltDB) Delete(key []byte) error { 116 | if len(key) == 0 { 117 | return errKeyEmpty 118 | } 119 | err := bdb.db.Update(func(tx *bbolt.Tx) error { 120 | return tx.Bucket(bucket).Delete(key) 121 | }) 122 | if err != nil { 123 | return err 124 | } 125 | return nil 126 | } 127 | 128 | // DeleteSync implements DB. 129 | func (bdb *BoltDB) DeleteSync(key []byte) error { 130 | return bdb.Delete(key) 131 | } 132 | 133 | // Close implements DB. 134 | func (bdb *BoltDB) Close() error { 135 | return bdb.db.Close() 136 | } 137 | 138 | // Print implements DB. 139 | // nolint: errcheck 140 | func (bdb *BoltDB) Print() error { 141 | stats := bdb.db.Stats() 142 | fmt.Printf("%v\n", stats) 143 | 144 | err := bdb.db.View(func(tx *bbolt.Tx) error { 145 | tx.Bucket(bucket).ForEach(func(k, v []byte) error { 146 | fmt.Printf("[%X]:\t[%X]\n", k, v) 147 | return nil 148 | }) 149 | return nil 150 | }) 151 | if err != nil { 152 | return err 153 | } 154 | return nil 155 | } 156 | 157 | // Stats implements DB. 158 | func (bdb *BoltDB) Stats() map[string]string { 159 | stats := bdb.db.Stats() 160 | m := make(map[string]string) 161 | 162 | // Freelist stats 163 | m["FreePageN"] = fmt.Sprintf("%v", stats.FreePageN) 164 | m["PendingPageN"] = fmt.Sprintf("%v", stats.PendingPageN) 165 | m["FreeAlloc"] = fmt.Sprintf("%v", stats.FreeAlloc) 166 | m["FreelistInuse"] = fmt.Sprintf("%v", stats.FreelistInuse) 167 | 168 | // Transaction stats 169 | m["TxN"] = fmt.Sprintf("%v", stats.TxN) 170 | m["OpenTxN"] = fmt.Sprintf("%v", stats.OpenTxN) 171 | 172 | return m 173 | } 174 | 175 | // NewBatch implements DB. 176 | func (bdb *BoltDB) NewBatch() Batch { 177 | return newBoltDBBatch(bdb) 178 | } 179 | 180 | // WARNING: Any concurrent writes or reads will block until the iterator is 181 | // closed. 182 | func (bdb *BoltDB) Iterator(start, end []byte) (Iterator, error) { 183 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 184 | return nil, errKeyEmpty 185 | } 186 | tx, err := bdb.db.Begin(false) 187 | if err != nil { 188 | return nil, err 189 | } 190 | return newBoltDBIterator(tx, start, end, false), nil 191 | } 192 | 193 | // WARNING: Any concurrent writes or reads will block until the iterator is 194 | // closed. 195 | func (bdb *BoltDB) ReverseIterator(start, end []byte) (Iterator, error) { 196 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 197 | return nil, errKeyEmpty 198 | } 199 | tx, err := bdb.db.Begin(false) 200 | if err != nil { 201 | return nil, err 202 | } 203 | return newBoltDBIterator(tx, start, end, true), nil 204 | } 205 | -------------------------------------------------------------------------------- /boltdb_batch.go: -------------------------------------------------------------------------------- 1 | //go:build boltdb 2 | // +build boltdb 3 | 4 | package db 5 | 6 | import "go.etcd.io/bbolt" 7 | 8 | // boltDBBatch stores operations internally and dumps them to BoltDB on Write(). 9 | type boltDBBatch struct { 10 | db *BoltDB 11 | ops []operation 12 | } 13 | 14 | var _ Batch = (*boltDBBatch)(nil) 15 | 16 | func newBoltDBBatch(db *BoltDB) *boltDBBatch { 17 | return &boltDBBatch{ 18 | db: db, 19 | ops: []operation{}, 20 | } 21 | } 22 | 23 | // Set implements Batch. 24 | func (b *boltDBBatch) Set(key, value []byte) error { 25 | if len(key) == 0 { 26 | return errKeyEmpty 27 | } 28 | if value == nil { 29 | return errValueNil 30 | } 31 | if b.ops == nil { 32 | return errBatchClosed 33 | } 34 | b.ops = append(b.ops, operation{opTypeSet, key, value}) 35 | return nil 36 | } 37 | 38 | // Delete implements Batch. 39 | func (b *boltDBBatch) Delete(key []byte) error { 40 | if len(key) == 0 { 41 | return errKeyEmpty 42 | } 43 | if b.ops == nil { 44 | return errBatchClosed 45 | } 46 | b.ops = append(b.ops, operation{opTypeDelete, key, nil}) 47 | return nil 48 | } 49 | 50 | // Write implements Batch. 51 | func (b *boltDBBatch) Write() error { 52 | if b.ops == nil { 53 | return errBatchClosed 54 | } 55 | err := b.db.db.Batch(func(tx *bbolt.Tx) error { 56 | bkt := tx.Bucket(bucket) 57 | for _, op := range b.ops { 58 | switch op.opType { 59 | case opTypeSet: 60 | if err := bkt.Put(op.key, op.value); err != nil { 61 | return err 62 | } 63 | case opTypeDelete: 64 | if err := bkt.Delete(op.key); err != nil { 65 | return err 66 | } 67 | } 68 | } 69 | return nil 70 | }) 71 | if err != nil { 72 | return err 73 | } 74 | // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. 75 | return b.Close() 76 | } 77 | 78 | // WriteSync implements Batch. 79 | func (b *boltDBBatch) WriteSync() error { 80 | return b.Write() 81 | } 82 | 83 | // Close implements Batch. 84 | func (b *boltDBBatch) Close() error { 85 | b.ops = nil 86 | return nil 87 | } 88 | -------------------------------------------------------------------------------- /boltdb_iterator.go: -------------------------------------------------------------------------------- 1 | //go:build boltdb 2 | // +build boltdb 3 | 4 | package db 5 | 6 | import ( 7 | "bytes" 8 | 9 | "go.etcd.io/bbolt" 10 | ) 11 | 12 | // boltDBIterator allows you to iterate on range of keys/values given some 13 | // start / end keys (nil & nil will result in doing full scan). 14 | type boltDBIterator struct { 15 | tx *bbolt.Tx 16 | 17 | itr *bbolt.Cursor 18 | start []byte 19 | end []byte 20 | 21 | currentKey []byte 22 | currentValue []byte 23 | 24 | isInvalid bool 25 | isReverse bool 26 | } 27 | 28 | var _ Iterator = (*boltDBIterator)(nil) 29 | 30 | // newBoltDBIterator creates a new boltDBIterator. 31 | func newBoltDBIterator(tx *bbolt.Tx, start, end []byte, isReverse bool) *boltDBIterator { 32 | itr := tx.Bucket(bucket).Cursor() 33 | 34 | var ck, cv []byte 35 | if isReverse { 36 | switch { 37 | case end == nil: 38 | ck, cv = itr.Last() 39 | default: 40 | _, _ = itr.Seek(end) // after key 41 | ck, cv = itr.Prev() // return to end key 42 | } 43 | } else { 44 | switch { 45 | case start == nil: 46 | ck, cv = itr.First() 47 | default: 48 | ck, cv = itr.Seek(start) 49 | } 50 | } 51 | 52 | return &boltDBIterator{ 53 | tx: tx, 54 | itr: itr, 55 | start: start, 56 | end: end, 57 | currentKey: ck, 58 | currentValue: cv, 59 | isReverse: isReverse, 60 | isInvalid: false, 61 | } 62 | } 63 | 64 | // Domain implements Iterator. 65 | func (itr *boltDBIterator) Domain() ([]byte, []byte) { 66 | return itr.start, itr.end 67 | } 68 | 69 | // Valid implements Iterator. 70 | func (itr *boltDBIterator) Valid() bool { 71 | if itr.isInvalid { 72 | return false 73 | } 74 | 75 | if itr.Error() != nil { 76 | itr.isInvalid = true 77 | return false 78 | } 79 | 80 | // iterated to the end of the cursor 81 | if itr.currentKey == nil { 82 | itr.isInvalid = true 83 | return false 84 | } 85 | 86 | if itr.isReverse { 87 | if itr.start != nil && bytes.Compare(itr.currentKey, itr.start) < 0 { 88 | itr.isInvalid = true 89 | return false 90 | } 91 | } else { 92 | if itr.end != nil && bytes.Compare(itr.end, itr.currentKey) <= 0 { 93 | itr.isInvalid = true 94 | return false 95 | } 96 | } 97 | 98 | // Valid 99 | return true 100 | } 101 | 102 | // Next implements Iterator. 103 | func (itr *boltDBIterator) Next() { 104 | itr.assertIsValid() 105 | if itr.isReverse { 106 | itr.currentKey, itr.currentValue = itr.itr.Prev() 107 | } else { 108 | itr.currentKey, itr.currentValue = itr.itr.Next() 109 | } 110 | } 111 | 112 | // Key implements Iterator. 113 | func (itr *boltDBIterator) Key() []byte { 114 | itr.assertIsValid() 115 | return append([]byte{}, itr.currentKey...) 116 | } 117 | 118 | // Value implements Iterator. 119 | func (itr *boltDBIterator) Value() []byte { 120 | itr.assertIsValid() 121 | var value []byte 122 | if itr.currentValue != nil { 123 | value = append([]byte{}, itr.currentValue...) 124 | } 125 | return value 126 | } 127 | 128 | // Error implements Iterator. 129 | func (itr *boltDBIterator) Error() error { 130 | return nil 131 | } 132 | 133 | // Close implements Iterator. 134 | func (itr *boltDBIterator) Close() error { 135 | return itr.tx.Rollback() 136 | } 137 | 138 | func (itr *boltDBIterator) assertIsValid() { 139 | if !itr.Valid() { 140 | panic("iterator is invalid") 141 | } 142 | } 143 | -------------------------------------------------------------------------------- /boltdb_test.go: -------------------------------------------------------------------------------- 1 | //go:build boltdb 2 | // +build boltdb 3 | 4 | package db 5 | 6 | import ( 7 | "fmt" 8 | "os" 9 | "path/filepath" 10 | "testing" 11 | 12 | "github.com/stretchr/testify/require" 13 | ) 14 | 15 | func TestBoltDBNewBoltDB(t *testing.T) { 16 | name := fmt.Sprintf("test_%x", randStr(12)) 17 | dir := os.TempDir() 18 | defer cleanupDBDir(dir, name) 19 | 20 | db, err := NewBoltDB(name, dir) 21 | require.NoError(t, err) 22 | db.Close() 23 | } 24 | 25 | func TestWithBoltDB(t *testing.T) { 26 | dir := t.TempDir() 27 | path := filepath.Join(dir, "boltdb") 28 | 29 | db, err := NewBoltDB(path, "") 30 | require.NoError(t, err) 31 | 32 | t.Run("BoltDB", func(t *testing.T) { Run(t, db) }) 33 | } 34 | 35 | func BenchmarkBoltDBRandomReadsWrites(b *testing.B) { 36 | name := fmt.Sprintf("test_%x", randStr(12)) 37 | db, err := NewBoltDB(name, "") 38 | if err != nil { 39 | b.Fatal(err) 40 | } 41 | defer func() { 42 | db.Close() 43 | cleanupDBDir("", name) 44 | }() 45 | 46 | benchmarkRandomReadsWrites(b, db) 47 | } 48 | -------------------------------------------------------------------------------- /cleveldb.go: -------------------------------------------------------------------------------- 1 | //go:build cleveldb 2 | // +build cleveldb 3 | 4 | package db 5 | 6 | import ( 7 | "fmt" 8 | "path/filepath" 9 | 10 | "github.com/jmhodges/levigo" 11 | ) 12 | 13 | func init() { 14 | dbCreator := func(name string, dir string) (DB, error) { 15 | return NewCLevelDB(name, dir) 16 | } 17 | registerDBCreator(CLevelDBBackend, dbCreator, false) 18 | } 19 | 20 | // CLevelDB uses the C LevelDB database via a Go wrapper. 21 | type CLevelDB struct { 22 | db *levigo.DB 23 | ro *levigo.ReadOptions 24 | wo *levigo.WriteOptions 25 | woSync *levigo.WriteOptions 26 | } 27 | 28 | var _ DB = (*CLevelDB)(nil) 29 | 30 | // NewCLevelDB creates a new CLevelDB. 31 | func NewCLevelDB(name string, dir string) (*CLevelDB, error) { 32 | dbPath := filepath.Join(dir, name+".db") 33 | 34 | opts := levigo.NewOptions() 35 | opts.SetCache(levigo.NewLRUCache(1 << 30)) 36 | opts.SetCreateIfMissing(true) 37 | db, err := levigo.Open(dbPath, opts) 38 | if err != nil { 39 | return nil, err 40 | } 41 | ro := levigo.NewReadOptions() 42 | wo := levigo.NewWriteOptions() 43 | woSync := levigo.NewWriteOptions() 44 | woSync.SetSync(true) 45 | database := &CLevelDB{ 46 | db: db, 47 | ro: ro, 48 | wo: wo, 49 | woSync: woSync, 50 | } 51 | return database, nil 52 | } 53 | 54 | // Get implements DB. 55 | func (db *CLevelDB) Get(key []byte) ([]byte, error) { 56 | if len(key) == 0 { 57 | return nil, errKeyEmpty 58 | } 59 | res, err := db.db.Get(db.ro, key) 60 | if err != nil { 61 | return nil, err 62 | } 63 | return res, nil 64 | } 65 | 66 | // Has implements DB. 67 | func (db *CLevelDB) Has(key []byte) (bool, error) { 68 | bytes, err := db.Get(key) 69 | if err != nil { 70 | return false, err 71 | } 72 | return bytes != nil, nil 73 | } 74 | 75 | // Set implements DB. 76 | func (db *CLevelDB) Set(key []byte, value []byte) error { 77 | if len(key) == 0 { 78 | return errKeyEmpty 79 | } 80 | if value == nil { 81 | return errValueNil 82 | } 83 | if err := db.db.Put(db.wo, key, value); err != nil { 84 | return err 85 | } 86 | return nil 87 | } 88 | 89 | // SetSync implements DB. 90 | func (db *CLevelDB) SetSync(key []byte, value []byte) error { 91 | if len(key) == 0 { 92 | return errKeyEmpty 93 | } 94 | if value == nil { 95 | return errValueNil 96 | } 97 | if err := db.db.Put(db.woSync, key, value); err != nil { 98 | return err 99 | } 100 | return nil 101 | } 102 | 103 | // Delete implements DB. 104 | func (db *CLevelDB) Delete(key []byte) error { 105 | if len(key) == 0 { 106 | return errKeyEmpty 107 | } 108 | if err := db.db.Delete(db.wo, key); err != nil { 109 | return err 110 | } 111 | return nil 112 | } 113 | 114 | // DeleteSync implements DB. 115 | func (db *CLevelDB) DeleteSync(key []byte) error { 116 | if len(key) == 0 { 117 | return errKeyEmpty 118 | } 119 | if err := db.db.Delete(db.woSync, key); err != nil { 120 | return err 121 | } 122 | return nil 123 | } 124 | 125 | // FIXME This should not be exposed 126 | func (db *CLevelDB) DB() *levigo.DB { 127 | return db.db 128 | } 129 | 130 | // Close implements DB. 131 | func (db *CLevelDB) Close() error { 132 | db.db.Close() 133 | db.ro.Close() 134 | db.wo.Close() 135 | db.woSync.Close() 136 | return nil 137 | } 138 | 139 | // Print implements DB. 140 | func (db *CLevelDB) Print() error { 141 | itr, err := db.Iterator(nil, nil) 142 | if err != nil { 143 | return err 144 | } 145 | defer itr.Close() 146 | for ; itr.Valid(); itr.Next() { 147 | key := itr.Key() 148 | value := itr.Value() 149 | fmt.Printf("[%X]:\t[%X]\n", key, value) 150 | } 151 | return nil 152 | } 153 | 154 | // Stats implements DB. 155 | func (db *CLevelDB) Stats() map[string]string { 156 | keys := []string{ 157 | "leveldb.num-files-at-level{n}", 158 | "leveldb.sstables", 159 | "leveldb.stats", 160 | "leveldb.approximate-memory-usage", 161 | } 162 | 163 | stats := make(map[string]string, len(keys)) 164 | for _, key := range keys { 165 | str := db.db.PropertyValue(key) 166 | stats[key] = str 167 | } 168 | return stats 169 | } 170 | 171 | // NewBatch implements DB. 172 | func (db *CLevelDB) NewBatch() Batch { 173 | return newCLevelDBBatch(db) 174 | } 175 | 176 | // Iterator implements DB. 177 | func (db *CLevelDB) Iterator(start, end []byte) (Iterator, error) { 178 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 179 | return nil, errKeyEmpty 180 | } 181 | itr := db.db.NewIterator(db.ro) 182 | return newCLevelDBIterator(itr, start, end, false), nil 183 | } 184 | 185 | // ReverseIterator implements DB. 186 | func (db *CLevelDB) ReverseIterator(start, end []byte) (Iterator, error) { 187 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 188 | return nil, errKeyEmpty 189 | } 190 | itr := db.db.NewIterator(db.ro) 191 | return newCLevelDBIterator(itr, start, end, true), nil 192 | } 193 | -------------------------------------------------------------------------------- /cleveldb_batch.go: -------------------------------------------------------------------------------- 1 | //go:build cleveldb 2 | // +build cleveldb 3 | 4 | package db 5 | 6 | import "github.com/jmhodges/levigo" 7 | 8 | // cLevelDBBatch is a LevelDB batch. 9 | type cLevelDBBatch struct { 10 | db *CLevelDB 11 | batch *levigo.WriteBatch 12 | } 13 | 14 | func newCLevelDBBatch(db *CLevelDB) *cLevelDBBatch { 15 | return &cLevelDBBatch{ 16 | db: db, 17 | batch: levigo.NewWriteBatch(), 18 | } 19 | } 20 | 21 | // Set implements Batch. 22 | func (b *cLevelDBBatch) Set(key, value []byte) error { 23 | if len(key) == 0 { 24 | return errKeyEmpty 25 | } 26 | if value == nil { 27 | return errValueNil 28 | } 29 | if b.batch == nil { 30 | return errBatchClosed 31 | } 32 | b.batch.Put(key, value) 33 | return nil 34 | } 35 | 36 | // Delete implements Batch. 37 | func (b *cLevelDBBatch) Delete(key []byte) error { 38 | if len(key) == 0 { 39 | return errKeyEmpty 40 | } 41 | if b.batch == nil { 42 | return errBatchClosed 43 | } 44 | b.batch.Delete(key) 45 | return nil 46 | } 47 | 48 | // Write implements Batch. 49 | func (b *cLevelDBBatch) Write() error { 50 | if b.batch == nil { 51 | return errBatchClosed 52 | } 53 | err := b.db.db.Write(b.db.wo, b.batch) 54 | if err != nil { 55 | return err 56 | } 57 | // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. 58 | return b.Close() 59 | } 60 | 61 | // WriteSync implements Batch. 62 | func (b *cLevelDBBatch) WriteSync() error { 63 | if b.batch == nil { 64 | return errBatchClosed 65 | } 66 | err := b.db.db.Write(b.db.woSync, b.batch) 67 | if err != nil { 68 | return err 69 | } 70 | // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. 71 | b.Close() 72 | return nil 73 | } 74 | 75 | // Close implements Batch. 76 | func (b *cLevelDBBatch) Close() error { 77 | if b.batch != nil { 78 | b.batch.Close() 79 | b.batch = nil 80 | } 81 | return nil 82 | } 83 | -------------------------------------------------------------------------------- /cleveldb_iterator.go: -------------------------------------------------------------------------------- 1 | //go:build cleveldb 2 | // +build cleveldb 3 | 4 | package db 5 | 6 | import ( 7 | "bytes" 8 | 9 | "github.com/jmhodges/levigo" 10 | ) 11 | 12 | // cLevelDBIterator is a cLevelDB iterator. 13 | type cLevelDBIterator struct { 14 | source *levigo.Iterator 15 | start, end []byte 16 | isReverse bool 17 | isInvalid bool 18 | } 19 | 20 | var _ Iterator = (*cLevelDBIterator)(nil) 21 | 22 | func newCLevelDBIterator(source *levigo.Iterator, start, end []byte, isReverse bool) *cLevelDBIterator { 23 | if isReverse { 24 | if len(end) == 0 { 25 | source.SeekToLast() 26 | } else { 27 | source.Seek(end) 28 | if source.Valid() { 29 | eoakey := source.Key() // end or after key 30 | if bytes.Compare(end, eoakey) <= 0 { 31 | source.Prev() 32 | } 33 | } else { 34 | source.SeekToLast() 35 | } 36 | } 37 | } else { 38 | if len(start) == 0 { 39 | source.SeekToFirst() 40 | } else { 41 | source.Seek(start) 42 | } 43 | } 44 | return &cLevelDBIterator{ 45 | source: source, 46 | start: start, 47 | end: end, 48 | isReverse: isReverse, 49 | isInvalid: false, 50 | } 51 | } 52 | 53 | // Domain implements Iterator. 54 | func (itr cLevelDBIterator) Domain() ([]byte, []byte) { 55 | return itr.start, itr.end 56 | } 57 | 58 | // Valid implements Iterator. 59 | func (itr cLevelDBIterator) Valid() bool { 60 | // Once invalid, forever invalid. 61 | if itr.isInvalid { 62 | return false 63 | } 64 | 65 | // If source errors, invalid. 66 | if itr.source.GetError() != nil { 67 | return false 68 | } 69 | 70 | // If source is invalid, invalid. 71 | if !itr.source.Valid() { 72 | return false 73 | } 74 | 75 | // If key is end or past it, invalid. 76 | start := itr.start 77 | end := itr.end 78 | key := itr.source.Key() 79 | if itr.isReverse { 80 | if start != nil && bytes.Compare(key, start) < 0 { 81 | return false 82 | } 83 | } else { 84 | if end != nil && bytes.Compare(end, key) <= 0 { 85 | return false 86 | } 87 | } 88 | 89 | // It's valid. 90 | return true 91 | } 92 | 93 | // Key implements Iterator. 94 | func (itr cLevelDBIterator) Key() []byte { 95 | itr.assertIsValid() 96 | return itr.source.Key() 97 | } 98 | 99 | // Value implements Iterator. 100 | func (itr cLevelDBIterator) Value() []byte { 101 | itr.assertIsValid() 102 | return itr.source.Value() 103 | } 104 | 105 | // Next implements Iterator. 106 | func (itr cLevelDBIterator) Next() { 107 | itr.assertIsValid() 108 | if itr.isReverse { 109 | itr.source.Prev() 110 | } else { 111 | itr.source.Next() 112 | } 113 | } 114 | 115 | // Error implements Iterator. 116 | func (itr cLevelDBIterator) Error() error { 117 | return itr.source.GetError() 118 | } 119 | 120 | // Close implements Iterator. 121 | func (itr cLevelDBIterator) Close() error { 122 | itr.source.Close() 123 | return nil 124 | } 125 | 126 | func (itr cLevelDBIterator) assertIsValid() { 127 | if !itr.Valid() { 128 | panic("iterator is invalid") 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /cleveldb_test.go: -------------------------------------------------------------------------------- 1 | //go:build cleveldb 2 | // +build cleveldb 3 | 4 | package db 5 | 6 | import ( 7 | "bytes" 8 | "fmt" 9 | "math/rand" 10 | "os" 11 | "path/filepath" 12 | "testing" 13 | 14 | "github.com/stretchr/testify/assert" 15 | "github.com/stretchr/testify/require" 16 | ) 17 | 18 | func TestWithClevelDB(t *testing.T) { 19 | dir := t.TempDir() 20 | path := filepath.Join(dir, "cleveldb") 21 | 22 | db, err := NewCLevelDB(path, "") 23 | require.NoError(t, err) 24 | 25 | t.Run("ClevelDB", func(t *testing.T) { Run(t, db) }) 26 | } 27 | 28 | //nolint: errcheck 29 | func BenchmarkRandomReadsWrites2(b *testing.B) { 30 | b.StopTimer() 31 | 32 | numItems := int64(1000000) 33 | internal := map[int64]int64{} 34 | for i := 0; i < int(numItems); i++ { 35 | internal[int64(i)] = int64(0) 36 | } 37 | db, err := NewCLevelDB(fmt.Sprintf("test_%x", randStr(12)), "") 38 | if err != nil { 39 | b.Fatal(err.Error()) 40 | return 41 | } 42 | 43 | fmt.Println("ok, starting") 44 | b.StartTimer() 45 | 46 | for i := 0; i < b.N; i++ { 47 | // Write something 48 | { 49 | idx := (int64(rand.Int()) % numItems) 50 | internal[idx]++ 51 | val := internal[idx] 52 | idxBytes := int642Bytes(idx) 53 | valBytes := int642Bytes(val) 54 | db.Set( 55 | idxBytes, 56 | valBytes, 57 | ) 58 | } 59 | // Read something 60 | { 61 | idx := (int64(rand.Int()) % numItems) 62 | val := internal[idx] 63 | idxBytes := int642Bytes(idx) 64 | valBytes, err := db.Get(idxBytes) 65 | if err != nil { 66 | b.Error(err) 67 | } 68 | // fmt.Printf("Get %X -> %X\n", idxBytes, valBytes) 69 | if val == 0 { 70 | if !bytes.Equal(valBytes, nil) { 71 | b.Errorf("Expected %v for %v, got %X", 72 | nil, idx, valBytes) 73 | break 74 | } 75 | } else { 76 | if len(valBytes) != 8 { 77 | b.Errorf("Expected length 8 for %v, got %X", 78 | idx, valBytes) 79 | break 80 | } 81 | valGot := bytes2Int64(valBytes) 82 | if val != valGot { 83 | b.Errorf("Expected %v for %v, got %v", 84 | val, idx, valGot) 85 | break 86 | } 87 | } 88 | } 89 | } 90 | 91 | db.Close() 92 | } 93 | 94 | func TestCLevelDBBackend(t *testing.T) { 95 | name := fmt.Sprintf("test_%x", randStr(12)) 96 | // Can't use "" (current directory) or "./" here because levigo.Open returns: 97 | // "Error initializing DB: IO error: test_XXX.db: Invalid argument" 98 | dir := os.TempDir() 99 | db, err := NewDB(name, CLevelDBBackend, dir) 100 | require.NoError(t, err) 101 | defer cleanupDBDir(dir, name) 102 | 103 | _, ok := db.(*CLevelDB) 104 | assert.True(t, ok) 105 | } 106 | 107 | func TestCLevelDBStats(t *testing.T) { 108 | name := fmt.Sprintf("test_%x", randStr(12)) 109 | dir := os.TempDir() 110 | db, err := NewDB(name, CLevelDBBackend, dir) 111 | require.NoError(t, err) 112 | defer cleanupDBDir(dir, name) 113 | 114 | assert.NotEmpty(t, db.Stats()) 115 | } 116 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | # 2 | # This codecov.yml is the default configuration for 3 | # all repositories on Codecov. You may adjust the settings 4 | # below in your own codecov.yml in your repository. 5 | # 6 | codecov: 7 | require_ci_to_pass: yes 8 | 9 | coverage: 10 | precision: 2 11 | round: down 12 | range: 70...100 13 | 14 | status: 15 | # Learn more at https://docs.codecov.io/docs/commit-status 16 | project: 17 | default: 18 | threshold: 1% # allow this much decrease on project 19 | 20 | comment: 21 | layout: "reach, diff, files, tree" 22 | behavior: default # update if exists else create new 23 | require_changes: true 24 | 25 | ignore: 26 | - "remotedb/proto" 27 | -------------------------------------------------------------------------------- /common_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "io/ioutil" 7 | "math/rand" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/assert" 11 | "github.com/stretchr/testify/require" 12 | ) 13 | 14 | //---------------------------------------- 15 | // Helper functions. 16 | 17 | func checkValue(t *testing.T, db DB, key []byte, valueWanted []byte) { 18 | valueGot, err := db.Get(key) 19 | assert.NoError(t, err) 20 | assert.Equal(t, valueWanted, valueGot) 21 | } 22 | 23 | func checkValid(t *testing.T, itr Iterator, expected bool) { 24 | valid := itr.Valid() 25 | require.Equal(t, expected, valid) 26 | } 27 | 28 | func checkNext(t *testing.T, itr Iterator, expected bool) { 29 | itr.Next() 30 | // assert.NoError(t, err) TODO: look at fixing this 31 | valid := itr.Valid() 32 | require.Equal(t, expected, valid) 33 | } 34 | 35 | func checkNextPanics(t *testing.T, itr Iterator) { 36 | assert.Panics(t, func() { itr.Next() }, "checkNextPanics expected an error but didn't") 37 | } 38 | 39 | func checkDomain(t *testing.T, itr Iterator, start, end []byte) { 40 | ds, de := itr.Domain() 41 | assert.Equal(t, start, ds, "checkDomain domain start incorrect") 42 | assert.Equal(t, end, de, "checkDomain domain end incorrect") 43 | } 44 | 45 | func checkItem(t *testing.T, itr Iterator, key []byte, value []byte) { 46 | v := itr.Value() 47 | 48 | k := itr.Key() 49 | 50 | assert.Exactly(t, key, k) 51 | assert.Exactly(t, value, v) 52 | } 53 | 54 | func checkInvalid(t *testing.T, itr Iterator) { 55 | checkValid(t, itr, false) 56 | checkKeyPanics(t, itr) 57 | checkValuePanics(t, itr) 58 | checkNextPanics(t, itr) 59 | } 60 | 61 | func checkKeyPanics(t *testing.T, itr Iterator) { 62 | assert.Panics(t, func() { itr.Key() }, "checkKeyPanics expected panic but didn't") 63 | } 64 | 65 | func checkValuePanics(t *testing.T, itr Iterator) { 66 | assert.Panics(t, func() { itr.Value() }) 67 | } 68 | 69 | func newTempDB(t *testing.T, backend BackendType) (db DB, dbDir string) { 70 | dirname, err := ioutil.TempDir("", "db_common_test") 71 | require.NoError(t, err) 72 | db, err = NewDB("testdb", backend, dirname) 73 | require.NoError(t, err) 74 | return db, dirname 75 | } 76 | 77 | func benchmarkRangeScans(b *testing.B, db DB, dbSize int64) { 78 | b.StopTimer() 79 | 80 | rangeSize := int64(10000) 81 | if dbSize < rangeSize { 82 | b.Errorf("db size %v cannot be less than range size %v", dbSize, rangeSize) 83 | } 84 | 85 | for i := int64(0); i < dbSize; i++ { 86 | bytes := int642Bytes(i) 87 | err := db.Set(bytes, bytes) 88 | if err != nil { 89 | // require.NoError() is very expensive (according to profiler), so check manually 90 | b.Fatal(b, err) 91 | } 92 | } 93 | b.StartTimer() 94 | 95 | for i := 0; i < b.N; i++ { 96 | start := rand.Int63n(dbSize - rangeSize) 97 | end := start + rangeSize 98 | iter, err := db.Iterator(int642Bytes(start), int642Bytes(end)) 99 | require.NoError(b, err) 100 | count := 0 101 | for ; iter.Valid(); iter.Next() { 102 | count++ 103 | } 104 | iter.Close() 105 | require.EqualValues(b, rangeSize, count) 106 | } 107 | } 108 | 109 | func benchmarkRandomReadsWrites(b *testing.B, db DB) { 110 | b.StopTimer() 111 | 112 | // create dummy data 113 | const numItems = int64(1000000) 114 | internal := map[int64]int64{} 115 | for i := 0; i < int(numItems); i++ { 116 | internal[int64(i)] = int64(0) 117 | } 118 | 119 | // fmt.Println("ok, starting") 120 | b.StartTimer() 121 | 122 | for i := 0; i < b.N; i++ { 123 | // Write something 124 | { 125 | idx := rand.Int63n(numItems) 126 | internal[idx]++ 127 | val := internal[idx] 128 | idxBytes := int642Bytes(idx) 129 | valBytes := int642Bytes(val) 130 | err := db.Set(idxBytes, valBytes) 131 | if err != nil { 132 | // require.NoError() is very expensive (according to profiler), so check manually 133 | b.Fatal(b, err) 134 | } 135 | } 136 | 137 | // Read something 138 | { 139 | idx := rand.Int63n(numItems) 140 | valExp := internal[idx] 141 | idxBytes := int642Bytes(idx) 142 | valBytes, err := db.Get(idxBytes) 143 | if err != nil { 144 | // require.NoError() is very expensive (according to profiler), so check manually 145 | b.Fatal(b, err) 146 | } 147 | if valExp == 0 { 148 | if !bytes.Equal(valBytes, nil) { 149 | b.Errorf("Expected %v for %v, got %X", nil, idx, valBytes) 150 | break 151 | } 152 | } else { 153 | if len(valBytes) != 8 { 154 | b.Errorf("Expected length 8 for %v, got %X", idx, valBytes) 155 | break 156 | } 157 | valGot := bytes2Int64(valBytes) 158 | if valExp != valGot { 159 | b.Errorf("Expected %v for %v, got %v", valExp, idx, valGot) 160 | break 161 | } 162 | } 163 | } 164 | 165 | } 166 | } 167 | 168 | func int642Bytes(i int64) []byte { 169 | buf := make([]byte, 8) 170 | binary.BigEndian.PutUint64(buf, uint64(i)) 171 | return buf 172 | } 173 | 174 | func bytes2Int64(buf []byte) int64 { 175 | return int64(binary.BigEndian.Uint64(buf)) 176 | } 177 | -------------------------------------------------------------------------------- /db.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | ) 7 | 8 | type BackendType string 9 | 10 | // These are valid backend types. 11 | const ( 12 | // GoLevelDBBackend represents goleveldb (github.com/syndtr/goleveldb - most 13 | // popular implementation) 14 | // - pure go 15 | // - stable 16 | GoLevelDBBackend BackendType = "goleveldb" 17 | // CLevelDBBackend represents cleveldb (uses levigo wrapper) 18 | // - fast 19 | // - requires gcc 20 | // - use cleveldb build tag (go build -tags cleveldb) 21 | CLevelDBBackend BackendType = "cleveldb" 22 | // MemDBBackend represents in-memory key value store, which is mostly used 23 | // for testing. 24 | MemDBBackend BackendType = "memdb" 25 | // BoltDBBackend represents bolt (uses etcd's fork of bolt - 26 | // github.com/etcd-io/bbolt) 27 | // - EXPERIMENTAL 28 | // - may be faster is some use-cases (random reads - indexer) 29 | // - use boltdb build tag (go build -tags boltdb) 30 | BoltDBBackend BackendType = "boltdb" 31 | // RocksDBBackend represents rocksdb (uses github.com/cosmos/gorocksdb) 32 | // - EXPERIMENTAL 33 | // - requires gcc 34 | // - use rocksdb build tag (go build -tags rocksdb) 35 | RocksDBBackend BackendType = "rocksdb" 36 | 37 | BadgerDBBackend BackendType = "badgerdb" 38 | ) 39 | 40 | type dbCreator func(name string, dir string) (DB, error) 41 | 42 | var backends = map[BackendType]dbCreator{} 43 | 44 | func registerDBCreator(backend BackendType, creator dbCreator, force bool) { 45 | _, ok := backends[backend] 46 | if !force && ok { 47 | return 48 | } 49 | backends[backend] = creator 50 | } 51 | 52 | // NewDB creates a new database of type backend with the given name. 53 | func NewDB(name string, backend BackendType, dir string) (DB, error) { 54 | dbCreator, ok := backends[backend] 55 | if !ok { 56 | keys := make([]string, 0, len(backends)) 57 | for k := range backends { 58 | keys = append(keys, string(k)) 59 | } 60 | return nil, fmt.Errorf("unknown db_backend %s, expected one of %v", 61 | backend, strings.Join(keys, ",")) 62 | } 63 | 64 | db, err := dbCreator(name, dir) 65 | if err != nil { 66 | return nil, fmt.Errorf("failed to initialize database: %w", err) 67 | } 68 | return db, nil 69 | } 70 | -------------------------------------------------------------------------------- /db_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestDBIteratorSingleKey(t *testing.T) { 12 | for backend := range backends { 13 | t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { 14 | db, dir := newTempDB(t, backend) 15 | defer os.RemoveAll(dir) 16 | 17 | err := db.SetSync(bz("1"), bz("value_1")) 18 | assert.NoError(t, err) 19 | itr, err := db.Iterator(nil, nil) 20 | assert.NoError(t, err) 21 | 22 | checkValid(t, itr, true) 23 | checkNext(t, itr, false) 24 | checkValid(t, itr, false) 25 | checkNextPanics(t, itr) 26 | 27 | // Once invalid... 28 | checkInvalid(t, itr) 29 | }) 30 | } 31 | } 32 | 33 | func TestDBIteratorTwoKeys(t *testing.T) { 34 | for backend := range backends { 35 | t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { 36 | db, dir := newTempDB(t, backend) 37 | defer os.RemoveAll(dir) 38 | 39 | err := db.SetSync(bz("1"), bz("value_1")) 40 | assert.NoError(t, err) 41 | 42 | err = db.SetSync(bz("2"), bz("value_1")) 43 | assert.NoError(t, err) 44 | 45 | { // Fail by calling Next too much 46 | itr, err := db.Iterator(nil, nil) 47 | assert.NoError(t, err) 48 | checkValid(t, itr, true) 49 | 50 | checkNext(t, itr, true) 51 | checkValid(t, itr, true) 52 | 53 | checkNext(t, itr, false) 54 | checkValid(t, itr, false) 55 | 56 | checkNextPanics(t, itr) 57 | 58 | // Once invalid... 59 | checkInvalid(t, itr) 60 | } 61 | }) 62 | } 63 | } 64 | 65 | func TestDBIteratorMany(t *testing.T) { 66 | for backend := range backends { 67 | t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { 68 | db, dir := newTempDB(t, backend) 69 | defer os.RemoveAll(dir) 70 | 71 | keys := make([][]byte, 100) 72 | for i := 0; i < 100; i++ { 73 | keys[i] = []byte{byte(i)} 74 | } 75 | 76 | value := []byte{5} 77 | for _, k := range keys { 78 | err := db.Set(k, value) 79 | assert.NoError(t, err) 80 | } 81 | 82 | itr, err := db.Iterator(nil, nil) 83 | assert.NoError(t, err) 84 | 85 | defer itr.Close() 86 | for ; itr.Valid(); itr.Next() { 87 | key := itr.Key() 88 | value = itr.Value() 89 | value1, err := db.Get(key) 90 | assert.NoError(t, err) 91 | assert.Equal(t, value1, value) 92 | } 93 | }) 94 | } 95 | } 96 | 97 | func TestDBIteratorEmpty(t *testing.T) { 98 | for backend := range backends { 99 | t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { 100 | db, dir := newTempDB(t, backend) 101 | defer os.RemoveAll(dir) 102 | 103 | itr, err := db.Iterator(nil, nil) 104 | assert.NoError(t, err) 105 | 106 | checkInvalid(t, itr) 107 | }) 108 | } 109 | } 110 | 111 | func TestDBIteratorEmptyBeginAfter(t *testing.T) { 112 | for backend := range backends { 113 | t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { 114 | db, dir := newTempDB(t, backend) 115 | defer os.RemoveAll(dir) 116 | 117 | itr, err := db.Iterator(bz("1"), nil) 118 | assert.NoError(t, err) 119 | 120 | checkInvalid(t, itr) 121 | }) 122 | } 123 | } 124 | 125 | func TestDBIteratorNonemptyBeginAfter(t *testing.T) { 126 | for backend := range backends { 127 | t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { 128 | db, dir := newTempDB(t, backend) 129 | defer os.RemoveAll(dir) 130 | 131 | err := db.SetSync(bz("1"), bz("value_1")) 132 | assert.NoError(t, err) 133 | itr, err := db.Iterator(bz("2"), nil) 134 | assert.NoError(t, err) 135 | 136 | checkInvalid(t, itr) 137 | }) 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /docs/how_to_release.md: -------------------------------------------------------------------------------- 1 | # How To Release tm db 2 | 3 | This document provides a step-by-step guide for creating a release of tm-db. 4 | 5 | ## 1. Update the changelog 6 | 7 | Open the `CHANGELOG.md` at the root of the repository. 8 | Amend the top of this file with a section for the latest version (0.6.x etc). 9 | Be sure to include any bug fixes, improvements, dependency upgrades, and breaking changes included in this version. 10 | (It's OK to exclude changes to tooling dependencies, like updates to Github Actions.) 11 | Finally, create a pull request for the changelog update. 12 | Once the tests pass and the pull request is approved, merge the change into master. 13 | 14 | ## 2. Tag the latest commit with the latest version 15 | 16 | tm-db is provided as a golang [module](https://blog.golang.org/publishing-go-modules), which rely on git tags for versioning information. 17 | 18 | Tag the changelog commit in master created in step 1 with the latest version. 19 | Be sure to prefix the version tag with `v`. For example, `v0.6.5` for version 0.6.5. 20 | This tagging can be done [using github](https://docs.github.com/en/desktop/contributing-and-collaborating-using-github-desktop/managing-commits/managing-tags#creating-a-tag) or [using git](https://git-scm.com/book/en/v2/Git-Basics-Tagging) on the command line. 21 | 22 | Note that the golang modules tooling expects tags to be immutable. 23 | If you make a mistake after pushing a tag, make a new tag and start over rather than fix and re-push the old tag. 24 | ## 3. Create a github release 25 | 26 | Finally, create a github release. 27 | To create a github release, follow the steps in the [github release documentation](https://docs.github.com/en/github/administering-a-repository/releasing-projects-on-github/managing-releases-in-a-repository#creating-a-release). 28 | 29 | When creating the github release, select the `Tag version` created in step 2. 30 | Use the version tag as the release title and paste in the changelog information for this release in the `Describe this release` section. 31 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/tendermint/tm-db 2 | 3 | go 1.17 4 | 5 | require ( 6 | github.com/cosmos/gorocksdb v1.2.0 7 | github.com/dgraph-io/badger/v3 v3.2103.2 8 | github.com/gogo/protobuf v1.3.2 9 | github.com/google/btree v1.1.2 10 | github.com/jmhodges/levigo v1.0.0 11 | github.com/stretchr/testify v1.8.0 12 | github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca 13 | go.etcd.io/bbolt v1.3.6 14 | google.golang.org/grpc v1.50.1 15 | ) 16 | 17 | require ( 18 | github.com/cespare/xxhash v1.1.0 // indirect 19 | github.com/cespare/xxhash/v2 v2.1.1 // indirect 20 | github.com/davecgh/go-spew v1.1.1 // indirect 21 | github.com/dgraph-io/ristretto v0.1.0 // indirect 22 | github.com/dustin/go-humanize v1.0.0 // indirect 23 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect 24 | github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect 25 | github.com/golang/protobuf v1.5.2 // indirect 26 | github.com/golang/snappy v0.0.3 // indirect 27 | github.com/google/flatbuffers v1.12.1 // indirect 28 | github.com/klauspost/compress v1.12.3 // indirect 29 | github.com/pkg/errors v0.9.1 // indirect 30 | github.com/pmezard/go-difflib v1.0.0 // indirect 31 | go.opencensus.io v0.22.5 // indirect 32 | golang.org/x/net v0.0.0-20201021035429-f5854403a974 // indirect 33 | golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9 // indirect 34 | golang.org/x/text v0.3.3 // indirect 35 | google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect 36 | google.golang.org/protobuf v1.27.1 // indirect 37 | gopkg.in/yaml.v3 v3.0.1 // indirect 38 | ) 39 | 40 | // Breaking changes were released with the wrong tag (use v0.6.6 or later). 41 | retract v0.6.5 42 | -------------------------------------------------------------------------------- /goleveldb.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "fmt" 5 | "path/filepath" 6 | 7 | "github.com/syndtr/goleveldb/leveldb" 8 | "github.com/syndtr/goleveldb/leveldb/errors" 9 | "github.com/syndtr/goleveldb/leveldb/opt" 10 | "github.com/syndtr/goleveldb/leveldb/util" 11 | ) 12 | 13 | func init() { 14 | dbCreator := func(name string, dir string) (DB, error) { 15 | return NewGoLevelDB(name, dir) 16 | } 17 | registerDBCreator(GoLevelDBBackend, dbCreator, false) 18 | } 19 | 20 | type GoLevelDB struct { 21 | db *leveldb.DB 22 | } 23 | 24 | var _ DB = (*GoLevelDB)(nil) 25 | 26 | func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { 27 | return NewGoLevelDBWithOpts(name, dir, nil) 28 | } 29 | 30 | func NewGoLevelDBWithOpts(name string, dir string, o *opt.Options) (*GoLevelDB, error) { 31 | dbPath := filepath.Join(dir, name+".db") 32 | db, err := leveldb.OpenFile(dbPath, o) 33 | if err != nil { 34 | return nil, err 35 | } 36 | database := &GoLevelDB{ 37 | db: db, 38 | } 39 | return database, nil 40 | } 41 | 42 | // Get implements DB. 43 | func (db *GoLevelDB) Get(key []byte) ([]byte, error) { 44 | if len(key) == 0 { 45 | return nil, errKeyEmpty 46 | } 47 | res, err := db.db.Get(key, nil) 48 | if err != nil { 49 | if err == errors.ErrNotFound { 50 | return nil, nil 51 | } 52 | return nil, err 53 | } 54 | return res, nil 55 | } 56 | 57 | // Has implements DB. 58 | func (db *GoLevelDB) Has(key []byte) (bool, error) { 59 | bytes, err := db.Get(key) 60 | if err != nil { 61 | return false, err 62 | } 63 | return bytes != nil, nil 64 | } 65 | 66 | // Set implements DB. 67 | func (db *GoLevelDB) Set(key []byte, value []byte) error { 68 | if len(key) == 0 { 69 | return errKeyEmpty 70 | } 71 | if value == nil { 72 | return errValueNil 73 | } 74 | if err := db.db.Put(key, value, nil); err != nil { 75 | return err 76 | } 77 | return nil 78 | } 79 | 80 | // SetSync implements DB. 81 | func (db *GoLevelDB) SetSync(key []byte, value []byte) error { 82 | if len(key) == 0 { 83 | return errKeyEmpty 84 | } 85 | if value == nil { 86 | return errValueNil 87 | } 88 | if err := db.db.Put(key, value, &opt.WriteOptions{Sync: true}); err != nil { 89 | return err 90 | } 91 | return nil 92 | } 93 | 94 | // Delete implements DB. 95 | func (db *GoLevelDB) Delete(key []byte) error { 96 | if len(key) == 0 { 97 | return errKeyEmpty 98 | } 99 | if err := db.db.Delete(key, nil); err != nil { 100 | return err 101 | } 102 | return nil 103 | } 104 | 105 | // DeleteSync implements DB. 106 | func (db *GoLevelDB) DeleteSync(key []byte) error { 107 | if len(key) == 0 { 108 | return errKeyEmpty 109 | } 110 | err := db.db.Delete(key, &opt.WriteOptions{Sync: true}) 111 | if err != nil { 112 | return err 113 | } 114 | return nil 115 | } 116 | 117 | func (db *GoLevelDB) DB() *leveldb.DB { 118 | return db.db 119 | } 120 | 121 | // Close implements DB. 122 | func (db *GoLevelDB) Close() error { 123 | if err := db.db.Close(); err != nil { 124 | return err 125 | } 126 | return nil 127 | } 128 | 129 | // Print implements DB. 130 | func (db *GoLevelDB) Print() error { 131 | str, err := db.db.GetProperty("leveldb.stats") 132 | if err != nil { 133 | return err 134 | } 135 | fmt.Printf("%v\n", str) 136 | 137 | itr := db.db.NewIterator(nil, nil) 138 | for itr.Next() { 139 | key := itr.Key() 140 | value := itr.Value() 141 | fmt.Printf("[%X]:\t[%X]\n", key, value) 142 | } 143 | return nil 144 | } 145 | 146 | // Stats implements DB. 147 | func (db *GoLevelDB) Stats() map[string]string { 148 | keys := []string{ 149 | "leveldb.num-files-at-level{n}", 150 | "leveldb.stats", 151 | "leveldb.sstables", 152 | "leveldb.blockpool", 153 | "leveldb.cachedblock", 154 | "leveldb.openedtables", 155 | "leveldb.alivesnaps", 156 | "leveldb.aliveiters", 157 | } 158 | 159 | stats := make(map[string]string) 160 | for _, key := range keys { 161 | str, err := db.db.GetProperty(key) 162 | if err == nil { 163 | stats[key] = str 164 | } 165 | } 166 | return stats 167 | } 168 | 169 | func (db *GoLevelDB) ForceCompact(start, limit []byte) error { 170 | return db.db.CompactRange(util.Range{Start: start, Limit: limit}) 171 | } 172 | 173 | // NewBatch implements DB. 174 | func (db *GoLevelDB) NewBatch() Batch { 175 | return newGoLevelDBBatch(db) 176 | } 177 | 178 | // Iterator implements DB. 179 | func (db *GoLevelDB) Iterator(start, end []byte) (Iterator, error) { 180 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 181 | return nil, errKeyEmpty 182 | } 183 | itr := db.db.NewIterator(&util.Range{Start: start, Limit: end}, nil) 184 | return newGoLevelDBIterator(itr, start, end, false), nil 185 | } 186 | 187 | // ReverseIterator implements DB. 188 | func (db *GoLevelDB) ReverseIterator(start, end []byte) (Iterator, error) { 189 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 190 | return nil, errKeyEmpty 191 | } 192 | itr := db.db.NewIterator(&util.Range{Start: start, Limit: end}, nil) 193 | return newGoLevelDBIterator(itr, start, end, true), nil 194 | } 195 | -------------------------------------------------------------------------------- /goleveldb_batch.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "github.com/syndtr/goleveldb/leveldb" 5 | "github.com/syndtr/goleveldb/leveldb/opt" 6 | ) 7 | 8 | type goLevelDBBatch struct { 9 | db *GoLevelDB 10 | batch *leveldb.Batch 11 | } 12 | 13 | var _ Batch = (*goLevelDBBatch)(nil) 14 | 15 | func newGoLevelDBBatch(db *GoLevelDB) *goLevelDBBatch { 16 | return &goLevelDBBatch{ 17 | db: db, 18 | batch: new(leveldb.Batch), 19 | } 20 | } 21 | 22 | // Set implements Batch. 23 | func (b *goLevelDBBatch) Set(key, value []byte) error { 24 | if len(key) == 0 { 25 | return errKeyEmpty 26 | } 27 | if value == nil { 28 | return errValueNil 29 | } 30 | if b.batch == nil { 31 | return errBatchClosed 32 | } 33 | b.batch.Put(key, value) 34 | return nil 35 | } 36 | 37 | // Delete implements Batch. 38 | func (b *goLevelDBBatch) Delete(key []byte) error { 39 | if len(key) == 0 { 40 | return errKeyEmpty 41 | } 42 | if b.batch == nil { 43 | return errBatchClosed 44 | } 45 | b.batch.Delete(key) 46 | return nil 47 | } 48 | 49 | // Write implements Batch. 50 | func (b *goLevelDBBatch) Write() error { 51 | return b.write(false) 52 | } 53 | 54 | // WriteSync implements Batch. 55 | func (b *goLevelDBBatch) WriteSync() error { 56 | return b.write(true) 57 | } 58 | 59 | func (b *goLevelDBBatch) write(sync bool) error { 60 | if b.batch == nil { 61 | return errBatchClosed 62 | } 63 | err := b.db.db.Write(b.batch, &opt.WriteOptions{Sync: sync}) 64 | if err != nil { 65 | return err 66 | } 67 | // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. 68 | return b.Close() 69 | } 70 | 71 | // Close implements Batch. 72 | func (b *goLevelDBBatch) Close() error { 73 | if b.batch != nil { 74 | b.batch.Reset() 75 | b.batch = nil 76 | } 77 | return nil 78 | } 79 | -------------------------------------------------------------------------------- /goleveldb_iterator.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "bytes" 5 | 6 | "github.com/syndtr/goleveldb/leveldb/iterator" 7 | ) 8 | 9 | type goLevelDBIterator struct { 10 | source iterator.Iterator 11 | start []byte 12 | end []byte 13 | isReverse bool 14 | isInvalid bool 15 | } 16 | 17 | var _ Iterator = (*goLevelDBIterator)(nil) 18 | 19 | func newGoLevelDBIterator(source iterator.Iterator, start, end []byte, isReverse bool) *goLevelDBIterator { 20 | if isReverse { 21 | if end == nil { 22 | source.Last() 23 | } else { 24 | valid := source.Seek(end) 25 | if valid { 26 | eoakey := source.Key() // end or after key 27 | if bytes.Compare(end, eoakey) <= 0 { 28 | source.Prev() 29 | } 30 | } else { 31 | source.Last() 32 | } 33 | } 34 | } else { 35 | if start == nil { 36 | source.First() 37 | } else { 38 | source.Seek(start) 39 | } 40 | } 41 | return &goLevelDBIterator{ 42 | source: source, 43 | start: start, 44 | end: end, 45 | isReverse: isReverse, 46 | isInvalid: false, 47 | } 48 | } 49 | 50 | // Domain implements Iterator. 51 | func (itr *goLevelDBIterator) Domain() ([]byte, []byte) { 52 | return itr.start, itr.end 53 | } 54 | 55 | // Valid implements Iterator. 56 | func (itr *goLevelDBIterator) Valid() bool { 57 | // Once invalid, forever invalid. 58 | if itr.isInvalid { 59 | return false 60 | } 61 | 62 | // If source errors, invalid. 63 | if err := itr.Error(); err != nil { 64 | itr.isInvalid = true 65 | return false 66 | } 67 | 68 | // If source is invalid, invalid. 69 | if !itr.source.Valid() { 70 | itr.isInvalid = true 71 | return false 72 | } 73 | 74 | // If key is end or past it, invalid. 75 | start := itr.start 76 | end := itr.end 77 | key := itr.source.Key() 78 | 79 | if itr.isReverse { 80 | if start != nil && bytes.Compare(key, start) < 0 { 81 | itr.isInvalid = true 82 | return false 83 | } 84 | } else { 85 | if end != nil && bytes.Compare(end, key) <= 0 { 86 | itr.isInvalid = true 87 | return false 88 | } 89 | } 90 | 91 | // Valid 92 | return true 93 | } 94 | 95 | // Key implements Iterator. 96 | func (itr *goLevelDBIterator) Key() []byte { 97 | // Key returns a copy of the current key. 98 | // See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88 99 | itr.assertIsValid() 100 | return cp(itr.source.Key()) 101 | } 102 | 103 | // Value implements Iterator. 104 | func (itr *goLevelDBIterator) Value() []byte { 105 | // Value returns a copy of the current value. 106 | // See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88 107 | itr.assertIsValid() 108 | return cp(itr.source.Value()) 109 | } 110 | 111 | // Next implements Iterator. 112 | func (itr *goLevelDBIterator) Next() { 113 | itr.assertIsValid() 114 | if itr.isReverse { 115 | itr.source.Prev() 116 | } else { 117 | itr.source.Next() 118 | } 119 | } 120 | 121 | // Error implements Iterator. 122 | func (itr *goLevelDBIterator) Error() error { 123 | return itr.source.Error() 124 | } 125 | 126 | // Close implements Iterator. 127 | func (itr *goLevelDBIterator) Close() error { 128 | itr.source.Release() 129 | return nil 130 | } 131 | 132 | func (itr goLevelDBIterator) assertIsValid() { 133 | if !itr.Valid() { 134 | panic("iterator is invalid") 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /goleveldb_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/require" 8 | "github.com/syndtr/goleveldb/leveldb/opt" 9 | ) 10 | 11 | func TestGoLevelDBNewGoLevelDB(t *testing.T) { 12 | name := fmt.Sprintf("test_%x", randStr(12)) 13 | defer cleanupDBDir("", name) 14 | 15 | // Test we can't open the db twice for writing 16 | wr1, err := NewGoLevelDB(name, "") 17 | require.Nil(t, err) 18 | _, err = NewGoLevelDB(name, "") 19 | require.NotNil(t, err) 20 | wr1.Close() // Close the db to release the lock 21 | 22 | // Test we can open the db twice for reading only 23 | ro1, err := NewGoLevelDBWithOpts(name, "", &opt.Options{ReadOnly: true}) 24 | require.Nil(t, err) 25 | defer ro1.Close() 26 | ro2, err := NewGoLevelDBWithOpts(name, "", &opt.Options{ReadOnly: true}) 27 | require.Nil(t, err) 28 | defer ro2.Close() 29 | } 30 | 31 | func BenchmarkGoLevelDBRandomReadsWrites(b *testing.B) { 32 | name := fmt.Sprintf("test_%x", randStr(12)) 33 | db, err := NewGoLevelDB(name, "") 34 | if err != nil { 35 | b.Fatal(err) 36 | } 37 | defer func() { 38 | db.Close() 39 | cleanupDBDir("", name) 40 | }() 41 | 42 | benchmarkRandomReadsWrites(b, db) 43 | } 44 | -------------------------------------------------------------------------------- /makefile: -------------------------------------------------------------------------------- 1 | GOTOOLS = github.com/golangci/golangci-lint/cmd/golangci-lint 2 | PACKAGES=$(shell go list ./...) 3 | INCLUDE = -I=${GOPATH}/src/github.com/tendermint/tm-db -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf 4 | 5 | export GO111MODULE = on 6 | 7 | all: lint test 8 | 9 | ### go tests 10 | ## By default this will only test memdb & goleveldb 11 | test: 12 | @echo "--> Running go test" 13 | @go test $(PACKAGES) -v 14 | 15 | test-cleveldb: 16 | @echo "--> Running go test" 17 | @go test $(PACKAGES) -tags cleveldb -v 18 | 19 | test-rocksdb: 20 | @echo "--> Running go test" 21 | @go test $(PACKAGES) -tags rocksdb -v 22 | 23 | test-boltdb: 24 | @echo "--> Running go test" 25 | @go test $(PACKAGES) -tags boltdb -v 26 | 27 | test-badgerdb: 28 | @echo "--> Running go test" 29 | @go test $(PACKAGES) -tags badgerdb -v 30 | 31 | test-all: 32 | @echo "--> Running go test" 33 | @go test $(PACKAGES) -tags cleveldb,boltdb,rocksdb,badgerdb -v 34 | 35 | lint: 36 | @echo "--> Running linter" 37 | @golangci-lint run 38 | @go mod verify 39 | .PHONY: lint 40 | 41 | format: 42 | find . -name '*.go' -type f -not -path "*.git*" -not -name '*.pb.go' -not -name '*pb_test.go' | xargs gofmt -w -s 43 | find . -name '*.go' -type f -not -path "*.git*" -not -name '*.pb.go' -not -name '*pb_test.go' | xargs goimports -w 44 | .PHONY: format 45 | 46 | tools: 47 | go get -v $(GOTOOLS) 48 | 49 | # generates certificates for TLS testing in remotedb 50 | gen_certs: clean_certs 51 | certstrap init --common-name "tendermint.com" --passphrase "" 52 | certstrap request-cert --common-name "remotedb" -ip "127.0.0.1" --passphrase "" 53 | certstrap sign "remotedb" --CA "tendermint.com" --passphrase "" 54 | mv out/remotedb.crt remotedb/test.crt 55 | mv out/remotedb.key remotedb/test.key 56 | rm -rf out 57 | 58 | clean_certs: 59 | rm -f db/remotedb/test.crt 60 | rm -f db/remotedb/test.key 61 | 62 | %.pb.go: %.proto 63 | ## If you get the following error, 64 | ## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory" 65 | ## See https://stackoverflow.com/a/25518702 66 | ## Note the $< here is substituted for the %.proto 67 | ## Note the $@ here is substituted for the %.pb.go 68 | protoc $(INCLUDE) $< --gogo_out=Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,plugins=grpc:../../.. 69 | 70 | 71 | protoc_remotedb: remotedb/proto/defs.pb.go 72 | -------------------------------------------------------------------------------- /memdb.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "sync" 7 | 8 | "github.com/google/btree" 9 | ) 10 | 11 | const ( 12 | // The approximate number of items and children per B-tree node. Tuned with benchmarks. 13 | bTreeDegree = 32 14 | ) 15 | 16 | func init() { 17 | registerDBCreator(MemDBBackend, func(name, dir string) (DB, error) { 18 | return NewMemDB(), nil 19 | }, false) 20 | } 21 | 22 | // item is a btree.Item with byte slices as keys and values 23 | type item struct { 24 | key []byte 25 | value []byte 26 | } 27 | 28 | // Less implements btree.Item. 29 | func (i item) Less(other btree.Item) bool { 30 | // this considers nil == []byte{}, but that's ok since we handle nil endpoints 31 | // in iterators specially anyway 32 | return bytes.Compare(i.key, other.(item).key) == -1 33 | } 34 | 35 | // newKey creates a new key item. 36 | func newKey(key []byte) item { 37 | return item{key: key} 38 | } 39 | 40 | // newPair creates a new pair item. 41 | func newPair(key, value []byte) item { 42 | return item{key: key, value: value} 43 | } 44 | 45 | // MemDB is an in-memory database backend using a B-tree for storage. 46 | // 47 | // For performance reasons, all given and returned keys and values are pointers to the in-memory 48 | // database, so modifying them will cause the stored values to be modified as well. All DB methods 49 | // already specify that keys and values should be considered read-only, but this is especially 50 | // important with MemDB. 51 | type MemDB struct { 52 | mtx sync.RWMutex 53 | btree *btree.BTree 54 | } 55 | 56 | var _ DB = (*MemDB)(nil) 57 | 58 | // NewMemDB creates a new in-memory database. 59 | func NewMemDB() *MemDB { 60 | database := &MemDB{ 61 | btree: btree.New(bTreeDegree), 62 | } 63 | return database 64 | } 65 | 66 | // Get implements DB. 67 | func (db *MemDB) Get(key []byte) ([]byte, error) { 68 | if len(key) == 0 { 69 | return nil, errKeyEmpty 70 | } 71 | db.mtx.RLock() 72 | defer db.mtx.RUnlock() 73 | 74 | i := db.btree.Get(newKey(key)) 75 | if i != nil { 76 | return i.(item).value, nil 77 | } 78 | return nil, nil 79 | } 80 | 81 | // Has implements DB. 82 | func (db *MemDB) Has(key []byte) (bool, error) { 83 | if len(key) == 0 { 84 | return false, errKeyEmpty 85 | } 86 | db.mtx.RLock() 87 | defer db.mtx.RUnlock() 88 | 89 | return db.btree.Has(newKey(key)), nil 90 | } 91 | 92 | // Set implements DB. 93 | func (db *MemDB) Set(key []byte, value []byte) error { 94 | if len(key) == 0 { 95 | return errKeyEmpty 96 | } 97 | if value == nil { 98 | return errValueNil 99 | } 100 | db.mtx.Lock() 101 | defer db.mtx.Unlock() 102 | 103 | db.set(key, value) 104 | return nil 105 | } 106 | 107 | // set sets a value without locking the mutex. 108 | func (db *MemDB) set(key []byte, value []byte) { 109 | db.btree.ReplaceOrInsert(newPair(key, value)) 110 | } 111 | 112 | // SetSync implements DB. 113 | func (db *MemDB) SetSync(key []byte, value []byte) error { 114 | return db.Set(key, value) 115 | } 116 | 117 | // Delete implements DB. 118 | func (db *MemDB) Delete(key []byte) error { 119 | if len(key) == 0 { 120 | return errKeyEmpty 121 | } 122 | db.mtx.Lock() 123 | defer db.mtx.Unlock() 124 | 125 | db.delete(key) 126 | return nil 127 | } 128 | 129 | // delete deletes a key without locking the mutex. 130 | func (db *MemDB) delete(key []byte) { 131 | db.btree.Delete(newKey(key)) 132 | } 133 | 134 | // DeleteSync implements DB. 135 | func (db *MemDB) DeleteSync(key []byte) error { 136 | return db.Delete(key) 137 | } 138 | 139 | // Close implements DB. 140 | func (db *MemDB) Close() error { 141 | // Close is a noop since for an in-memory database, we don't have a destination to flush 142 | // contents to nor do we want any data loss on invoking Close(). 143 | // See the discussion in https://github.com/tendermint/tendermint/libs/pull/56 144 | return nil 145 | } 146 | 147 | // Print implements DB. 148 | func (db *MemDB) Print() error { 149 | db.mtx.RLock() 150 | defer db.mtx.RUnlock() 151 | 152 | db.btree.Ascend(func(i btree.Item) bool { 153 | item := i.(item) 154 | fmt.Printf("[%X]:\t[%X]\n", item.key, item.value) 155 | return true 156 | }) 157 | return nil 158 | } 159 | 160 | // Stats implements DB. 161 | func (db *MemDB) Stats() map[string]string { 162 | db.mtx.RLock() 163 | defer db.mtx.RUnlock() 164 | 165 | stats := make(map[string]string) 166 | stats["database.type"] = "memDB" 167 | stats["database.size"] = fmt.Sprintf("%d", db.btree.Len()) 168 | return stats 169 | } 170 | 171 | // NewBatch implements DB. 172 | func (db *MemDB) NewBatch() Batch { 173 | return newMemDBBatch(db) 174 | } 175 | 176 | // Iterator implements DB. 177 | // Takes out a read-lock on the database until the iterator is closed. 178 | func (db *MemDB) Iterator(start, end []byte) (Iterator, error) { 179 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 180 | return nil, errKeyEmpty 181 | } 182 | return newMemDBIterator(db, start, end, false), nil 183 | } 184 | 185 | // ReverseIterator implements DB. 186 | // Takes out a read-lock on the database until the iterator is closed. 187 | func (db *MemDB) ReverseIterator(start, end []byte) (Iterator, error) { 188 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 189 | return nil, errKeyEmpty 190 | } 191 | return newMemDBIterator(db, start, end, true), nil 192 | } 193 | 194 | // IteratorNoMtx makes an iterator with no mutex. 195 | func (db *MemDB) IteratorNoMtx(start, end []byte) (Iterator, error) { 196 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 197 | return nil, errKeyEmpty 198 | } 199 | return newMemDBIteratorMtxChoice(db, start, end, false, false), nil 200 | } 201 | 202 | // ReverseIteratorNoMtx makes an iterator with no mutex. 203 | func (db *MemDB) ReverseIteratorNoMtx(start, end []byte) (Iterator, error) { 204 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 205 | return nil, errKeyEmpty 206 | } 207 | return newMemDBIteratorMtxChoice(db, start, end, true, false), nil 208 | } 209 | -------------------------------------------------------------------------------- /memdb_batch.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import "fmt" 4 | 5 | // memDBBatch operations 6 | type opType int 7 | 8 | const ( 9 | opTypeSet opType = iota + 1 10 | opTypeDelete 11 | ) 12 | 13 | type operation struct { 14 | opType 15 | key []byte 16 | value []byte 17 | } 18 | 19 | // memDBBatch handles in-memory batching. 20 | type memDBBatch struct { 21 | db *MemDB 22 | ops []operation 23 | } 24 | 25 | var _ Batch = (*memDBBatch)(nil) 26 | 27 | // newMemDBBatch creates a new memDBBatch 28 | func newMemDBBatch(db *MemDB) *memDBBatch { 29 | return &memDBBatch{ 30 | db: db, 31 | ops: []operation{}, 32 | } 33 | } 34 | 35 | // Set implements Batch. 36 | func (b *memDBBatch) Set(key, value []byte) error { 37 | if len(key) == 0 { 38 | return errKeyEmpty 39 | } 40 | if value == nil { 41 | return errValueNil 42 | } 43 | if b.ops == nil { 44 | return errBatchClosed 45 | } 46 | b.ops = append(b.ops, operation{opTypeSet, key, value}) 47 | return nil 48 | } 49 | 50 | // Delete implements Batch. 51 | func (b *memDBBatch) Delete(key []byte) error { 52 | if len(key) == 0 { 53 | return errKeyEmpty 54 | } 55 | if b.ops == nil { 56 | return errBatchClosed 57 | } 58 | b.ops = append(b.ops, operation{opTypeDelete, key, nil}) 59 | return nil 60 | } 61 | 62 | // Write implements Batch. 63 | func (b *memDBBatch) Write() error { 64 | if b.ops == nil { 65 | return errBatchClosed 66 | } 67 | b.db.mtx.Lock() 68 | defer b.db.mtx.Unlock() 69 | 70 | for _, op := range b.ops { 71 | switch op.opType { 72 | case opTypeSet: 73 | b.db.set(op.key, op.value) 74 | case opTypeDelete: 75 | b.db.delete(op.key) 76 | default: 77 | return fmt.Errorf("unknown operation type %v (%v)", op.opType, op) 78 | } 79 | } 80 | 81 | // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. 82 | return b.Close() 83 | } 84 | 85 | // WriteSync implements Batch. 86 | func (b *memDBBatch) WriteSync() error { 87 | return b.Write() 88 | } 89 | 90 | // Close implements Batch. 91 | func (b *memDBBatch) Close() error { 92 | b.ops = nil 93 | return nil 94 | } 95 | -------------------------------------------------------------------------------- /memdb_iterator.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | 7 | "github.com/google/btree" 8 | ) 9 | 10 | const ( 11 | // Size of the channel buffer between traversal goroutine and iterator. Using an unbuffered 12 | // channel causes two context switches per item sent, while buffering allows more work per 13 | // context switch. Tuned with benchmarks. 14 | chBufferSize = 64 15 | ) 16 | 17 | // memDBIterator is a memDB iterator. 18 | type memDBIterator struct { 19 | ch <-chan *item 20 | cancel context.CancelFunc 21 | item *item 22 | start []byte 23 | end []byte 24 | useMtx bool 25 | } 26 | 27 | var _ Iterator = (*memDBIterator)(nil) 28 | 29 | // newMemDBIterator creates a new memDBIterator. 30 | func newMemDBIterator(db *MemDB, start []byte, end []byte, reverse bool) *memDBIterator { 31 | return newMemDBIteratorMtxChoice(db, start, end, reverse, true) 32 | } 33 | 34 | func newMemDBIteratorMtxChoice(db *MemDB, start []byte, end []byte, reverse bool, useMtx bool) *memDBIterator { 35 | ctx, cancel := context.WithCancel(context.Background()) 36 | ch := make(chan *item, chBufferSize) 37 | iter := &memDBIterator{ 38 | ch: ch, 39 | cancel: cancel, 40 | start: start, 41 | end: end, 42 | useMtx: useMtx, 43 | } 44 | 45 | if useMtx { 46 | db.mtx.RLock() 47 | } 48 | go func() { 49 | if useMtx { 50 | defer db.mtx.RUnlock() 51 | } 52 | // Because we use [start, end) for reverse ranges, while btree uses (start, end], we need 53 | // the following variables to handle some reverse iteration conditions ourselves. 54 | var ( 55 | skipEqual []byte 56 | abortLessThan []byte 57 | ) 58 | visitor := func(i btree.Item) bool { 59 | item := i.(item) 60 | if skipEqual != nil && bytes.Equal(item.key, skipEqual) { 61 | skipEqual = nil 62 | return true 63 | } 64 | if abortLessThan != nil && bytes.Compare(item.key, abortLessThan) == -1 { 65 | return false 66 | } 67 | select { 68 | case <-ctx.Done(): 69 | return false 70 | case ch <- &item: 71 | return true 72 | } 73 | } 74 | switch { 75 | case start == nil && end == nil && !reverse: 76 | db.btree.Ascend(visitor) 77 | case start == nil && end == nil && reverse: 78 | db.btree.Descend(visitor) 79 | case end == nil && !reverse: 80 | // must handle this specially, since nil is considered less than anything else 81 | db.btree.AscendGreaterOrEqual(newKey(start), visitor) 82 | case !reverse: 83 | db.btree.AscendRange(newKey(start), newKey(end), visitor) 84 | case end == nil: 85 | // abort after start, since we use [start, end) while btree uses (start, end] 86 | abortLessThan = start 87 | db.btree.Descend(visitor) 88 | default: 89 | // skip end and abort after start, since we use [start, end) while btree uses (start, end] 90 | skipEqual = end 91 | abortLessThan = start 92 | db.btree.DescendLessOrEqual(newKey(end), visitor) 93 | } 94 | close(ch) 95 | }() 96 | 97 | // prime the iterator with the first value, if any 98 | if item, ok := <-ch; ok { 99 | iter.item = item 100 | } 101 | 102 | return iter 103 | } 104 | 105 | // Close implements Iterator. 106 | func (i *memDBIterator) Close() error { 107 | i.cancel() 108 | for range i.ch { // drain channel 109 | } 110 | i.item = nil 111 | return nil 112 | } 113 | 114 | // Domain implements Iterator. 115 | func (i *memDBIterator) Domain() ([]byte, []byte) { 116 | return i.start, i.end 117 | } 118 | 119 | // Valid implements Iterator. 120 | func (i *memDBIterator) Valid() bool { 121 | return i.item != nil 122 | } 123 | 124 | // Next implements Iterator. 125 | func (i *memDBIterator) Next() { 126 | i.assertIsValid() 127 | item, ok := <-i.ch 128 | switch { 129 | case ok: 130 | i.item = item 131 | default: 132 | i.item = nil 133 | } 134 | } 135 | 136 | // Error implements Iterator. 137 | func (i *memDBIterator) Error() error { 138 | return nil // famous last words 139 | } 140 | 141 | // Key implements Iterator. 142 | func (i *memDBIterator) Key() []byte { 143 | i.assertIsValid() 144 | return i.item.key 145 | } 146 | 147 | // Value implements Iterator. 148 | func (i *memDBIterator) Value() []byte { 149 | i.assertIsValid() 150 | return i.item.value 151 | } 152 | 153 | func (i *memDBIterator) assertIsValid() { 154 | if !i.Valid() { 155 | panic("iterator is invalid") 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /memdb_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func BenchmarkMemDBRangeScans1M(b *testing.B) { 8 | db := NewMemDB() 9 | defer db.Close() 10 | 11 | benchmarkRangeScans(b, db, int64(1e6)) 12 | } 13 | 14 | func BenchmarkMemDBRangeScans10M(b *testing.B) { 15 | db := NewMemDB() 16 | defer db.Close() 17 | 18 | benchmarkRangeScans(b, db, int64(10e6)) 19 | } 20 | 21 | func BenchmarkMemDBRandomReadsWrites(b *testing.B) { 22 | db := NewMemDB() 23 | defer db.Close() 24 | 25 | benchmarkRandomReadsWrites(b, db) 26 | } 27 | -------------------------------------------------------------------------------- /prefixdb.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | ) 7 | 8 | // PrefixDB wraps a namespace of another database as a logical database. 9 | type PrefixDB struct { 10 | mtx sync.Mutex 11 | prefix []byte 12 | db DB 13 | } 14 | 15 | var _ DB = (*PrefixDB)(nil) 16 | 17 | // NewPrefixDB lets you namespace multiple DBs within a single DB. 18 | func NewPrefixDB(db DB, prefix []byte) *PrefixDB { 19 | return &PrefixDB{ 20 | prefix: prefix, 21 | db: db, 22 | } 23 | } 24 | 25 | // Get implements DB. 26 | func (pdb *PrefixDB) Get(key []byte) ([]byte, error) { 27 | if len(key) == 0 { 28 | return nil, errKeyEmpty 29 | } 30 | 31 | pkey := pdb.prefixed(key) 32 | value, err := pdb.db.Get(pkey) 33 | if err != nil { 34 | return nil, err 35 | } 36 | return value, nil 37 | } 38 | 39 | // Has implements DB. 40 | func (pdb *PrefixDB) Has(key []byte) (bool, error) { 41 | if len(key) == 0 { 42 | return false, errKeyEmpty 43 | } 44 | 45 | ok, err := pdb.db.Has(pdb.prefixed(key)) 46 | if err != nil { 47 | return ok, err 48 | } 49 | 50 | return ok, nil 51 | } 52 | 53 | // Set implements DB. 54 | func (pdb *PrefixDB) Set(key []byte, value []byte) error { 55 | if len(key) == 0 { 56 | return errKeyEmpty 57 | } 58 | if value == nil { 59 | return errValueNil 60 | } 61 | 62 | pkey := pdb.prefixed(key) 63 | if err := pdb.db.Set(pkey, value); err != nil { 64 | return err 65 | } 66 | return nil 67 | } 68 | 69 | // SetSync implements DB. 70 | func (pdb *PrefixDB) SetSync(key []byte, value []byte) error { 71 | if len(key) == 0 { 72 | return errKeyEmpty 73 | } 74 | if value == nil { 75 | return errValueNil 76 | } 77 | 78 | return pdb.db.SetSync(pdb.prefixed(key), value) 79 | } 80 | 81 | // Delete implements DB. 82 | func (pdb *PrefixDB) Delete(key []byte) error { 83 | if len(key) == 0 { 84 | return errKeyEmpty 85 | } 86 | 87 | return pdb.db.Delete(pdb.prefixed(key)) 88 | } 89 | 90 | // DeleteSync implements DB. 91 | func (pdb *PrefixDB) DeleteSync(key []byte) error { 92 | if len(key) == 0 { 93 | return errKeyEmpty 94 | } 95 | 96 | return pdb.db.DeleteSync(pdb.prefixed(key)) 97 | } 98 | 99 | // Iterator implements DB. 100 | func (pdb *PrefixDB) Iterator(start, end []byte) (Iterator, error) { 101 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 102 | return nil, errKeyEmpty 103 | } 104 | 105 | var pstart, pend []byte 106 | pstart = append(cp(pdb.prefix), start...) 107 | if end == nil { 108 | pend = cpIncr(pdb.prefix) 109 | } else { 110 | pend = append(cp(pdb.prefix), end...) 111 | } 112 | itr, err := pdb.db.Iterator(pstart, pend) 113 | if err != nil { 114 | return nil, err 115 | } 116 | 117 | return newPrefixIterator(pdb.prefix, start, end, itr) 118 | } 119 | 120 | // ReverseIterator implements DB. 121 | func (pdb *PrefixDB) ReverseIterator(start, end []byte) (Iterator, error) { 122 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 123 | return nil, errKeyEmpty 124 | } 125 | 126 | var pstart, pend []byte 127 | pstart = append(cp(pdb.prefix), start...) 128 | if end == nil { 129 | pend = cpIncr(pdb.prefix) 130 | } else { 131 | pend = append(cp(pdb.prefix), end...) 132 | } 133 | ritr, err := pdb.db.ReverseIterator(pstart, pend) 134 | if err != nil { 135 | return nil, err 136 | } 137 | 138 | return newPrefixIterator(pdb.prefix, start, end, ritr) 139 | } 140 | 141 | // NewBatch implements DB. 142 | func (pdb *PrefixDB) NewBatch() Batch { 143 | return newPrefixBatch(pdb.prefix, pdb.db.NewBatch()) 144 | } 145 | 146 | // Close implements DB. 147 | func (pdb *PrefixDB) Close() error { 148 | pdb.mtx.Lock() 149 | defer pdb.mtx.Unlock() 150 | 151 | return pdb.db.Close() 152 | } 153 | 154 | // Print implements DB. 155 | func (pdb *PrefixDB) Print() error { 156 | fmt.Printf("prefix: %X\n", pdb.prefix) 157 | 158 | itr, err := pdb.Iterator(nil, nil) 159 | if err != nil { 160 | return err 161 | } 162 | defer itr.Close() 163 | for ; itr.Valid(); itr.Next() { 164 | key := itr.Key() 165 | value := itr.Value() 166 | fmt.Printf("[%X]:\t[%X]\n", key, value) 167 | } 168 | return nil 169 | } 170 | 171 | // Stats implements DB. 172 | func (pdb *PrefixDB) Stats() map[string]string { 173 | stats := make(map[string]string) 174 | stats["prefixdb.prefix.string"] = string(pdb.prefix) 175 | stats["prefixdb.prefix.hex"] = fmt.Sprintf("%X", pdb.prefix) 176 | source := pdb.db.Stats() 177 | for key, value := range source { 178 | stats["prefixdb.source."+key] = value 179 | } 180 | return stats 181 | } 182 | 183 | func (pdb *PrefixDB) prefixed(key []byte) []byte { 184 | return append(cp(pdb.prefix), key...) 185 | } 186 | -------------------------------------------------------------------------------- /prefixdb_batch.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | type prefixDBBatch struct { 4 | prefix []byte 5 | source Batch 6 | } 7 | 8 | var _ Batch = (*prefixDBBatch)(nil) 9 | 10 | func newPrefixBatch(prefix []byte, source Batch) prefixDBBatch { 11 | return prefixDBBatch{ 12 | prefix: prefix, 13 | source: source, 14 | } 15 | } 16 | 17 | // Set implements Batch. 18 | func (pb prefixDBBatch) Set(key, value []byte) error { 19 | if len(key) == 0 { 20 | return errKeyEmpty 21 | } 22 | if value == nil { 23 | return errValueNil 24 | } 25 | pkey := append(cp(pb.prefix), key...) 26 | return pb.source.Set(pkey, value) 27 | } 28 | 29 | // Delete implements Batch. 30 | func (pb prefixDBBatch) Delete(key []byte) error { 31 | if len(key) == 0 { 32 | return errKeyEmpty 33 | } 34 | pkey := append(cp(pb.prefix), key...) 35 | return pb.source.Delete(pkey) 36 | } 37 | 38 | // Write implements Batch. 39 | func (pb prefixDBBatch) Write() error { 40 | return pb.source.Write() 41 | } 42 | 43 | // WriteSync implements Batch. 44 | func (pb prefixDBBatch) WriteSync() error { 45 | return pb.source.WriteSync() 46 | } 47 | 48 | // Close implements Batch. 49 | func (pb prefixDBBatch) Close() error { 50 | return pb.source.Close() 51 | } 52 | -------------------------------------------------------------------------------- /prefixdb_iterator.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | ) 7 | 8 | // IteratePrefix is a convenience function for iterating over a key domain 9 | // restricted by prefix. 10 | func IteratePrefix(db DB, prefix []byte) (Iterator, error) { 11 | var start, end []byte 12 | if len(prefix) == 0 { 13 | start = nil 14 | end = nil 15 | } else { 16 | start = cp(prefix) 17 | end = cpIncr(prefix) 18 | } 19 | itr, err := db.Iterator(start, end) 20 | if err != nil { 21 | return nil, err 22 | } 23 | return itr, nil 24 | } 25 | 26 | // Strips prefix while iterating from Iterator. 27 | type prefixDBIterator struct { 28 | prefix []byte 29 | start []byte 30 | end []byte 31 | source Iterator 32 | valid bool 33 | err error 34 | } 35 | 36 | var _ Iterator = (*prefixDBIterator)(nil) 37 | 38 | func newPrefixIterator(prefix, start, end []byte, source Iterator) (*prefixDBIterator, error) { 39 | pitrInvalid := &prefixDBIterator{ 40 | prefix: prefix, 41 | start: start, 42 | end: end, 43 | source: source, 44 | valid: false, 45 | } 46 | 47 | // Empty keys are not allowed, so if a key exists in the database that exactly matches the 48 | // prefix we need to skip it. 49 | if source.Valid() && bytes.Equal(source.Key(), prefix) { 50 | source.Next() 51 | } 52 | 53 | if !source.Valid() || !bytes.HasPrefix(source.Key(), prefix) { 54 | return pitrInvalid, nil 55 | } 56 | 57 | return &prefixDBIterator{ 58 | prefix: prefix, 59 | start: start, 60 | end: end, 61 | source: source, 62 | valid: true, 63 | }, nil 64 | } 65 | 66 | // Domain implements Iterator. 67 | func (itr *prefixDBIterator) Domain() (start []byte, end []byte) { 68 | return itr.start, itr.end 69 | } 70 | 71 | // Valid implements Iterator. 72 | func (itr *prefixDBIterator) Valid() bool { 73 | if !itr.valid || itr.err != nil || !itr.source.Valid() { 74 | return false 75 | } 76 | 77 | key := itr.source.Key() 78 | if len(key) < len(itr.prefix) || !bytes.Equal(key[:len(itr.prefix)], itr.prefix) { 79 | itr.err = fmt.Errorf("received invalid key from backend: %x (expected prefix %x)", 80 | key, itr.prefix) 81 | return false 82 | } 83 | 84 | return true 85 | } 86 | 87 | // Next implements Iterator. 88 | func (itr *prefixDBIterator) Next() { 89 | itr.assertIsValid() 90 | itr.source.Next() 91 | 92 | if !itr.source.Valid() || !bytes.HasPrefix(itr.source.Key(), itr.prefix) { 93 | itr.valid = false 94 | } else if bytes.Equal(itr.source.Key(), itr.prefix) { 95 | // Empty keys are not allowed, so if a key exists in the database that exactly matches the 96 | // prefix we need to skip it. 97 | itr.Next() 98 | } 99 | } 100 | 101 | // Next implements Iterator. 102 | func (itr *prefixDBIterator) Key() []byte { 103 | itr.assertIsValid() 104 | key := itr.source.Key() 105 | return key[len(itr.prefix):] // we have checked the key in Valid() 106 | } 107 | 108 | // Value implements Iterator. 109 | func (itr *prefixDBIterator) Value() []byte { 110 | itr.assertIsValid() 111 | return itr.source.Value() 112 | } 113 | 114 | // Error implements Iterator. 115 | func (itr *prefixDBIterator) Error() error { 116 | if err := itr.source.Error(); err != nil { 117 | return err 118 | } 119 | return itr.err 120 | } 121 | 122 | // Close implements Iterator. 123 | func (itr *prefixDBIterator) Close() error { 124 | return itr.source.Close() 125 | } 126 | 127 | func (itr *prefixDBIterator) assertIsValid() { 128 | if !itr.Valid() { 129 | panic("iterator is invalid") 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /prefixdb_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "fmt" 7 | "math/rand" 8 | "path/filepath" 9 | "sync" 10 | "testing" 11 | 12 | "github.com/stretchr/testify/require" 13 | ) 14 | 15 | func mockDBWithStuff(t *testing.T) DB { 16 | db := NewMemDB() 17 | // Under "key" prefix 18 | require.NoError(t, db.Set(bz("key"), bz("value"))) 19 | require.NoError(t, db.Set(bz("key1"), bz("value1"))) 20 | require.NoError(t, db.Set(bz("key2"), bz("value2"))) 21 | require.NoError(t, db.Set(bz("key3"), bz("value3"))) 22 | require.NoError(t, db.Set(bz("something"), bz("else"))) 23 | require.NoError(t, db.Set(bz("k"), bz("val"))) 24 | require.NoError(t, db.Set(bz("ke"), bz("valu"))) 25 | require.NoError(t, db.Set(bz("kee"), bz("valuu"))) 26 | return db 27 | } 28 | 29 | func taskKey(i, k int) []byte { 30 | return []byte(fmt.Sprintf("task-%d-key-%d", i, k)) 31 | } 32 | 33 | func randomValue() []byte { 34 | b := make([]byte, 16) 35 | rand.Read(b) 36 | return b 37 | } 38 | 39 | func TestGolevelDB(t *testing.T) { 40 | path := filepath.Join(t.TempDir(), "goleveldb") 41 | 42 | db, err := NewGoLevelDB(path, "") 43 | require.NoError(t, err) 44 | 45 | Run(t, db) 46 | } 47 | 48 | /* We don't seem to test badger anywhere. 49 | func TestWithBadgerDB(t *testing.T) { 50 | dir := t.TempDir() 51 | path := filepath.Join(dir, "badgerdb") 52 | 53 | db, err := NewBadgerDB(path, "") 54 | require.NoError(t, err) 55 | 56 | t.Run("BadgerDB", func(t *testing.T) { Run(t, db) }) 57 | } 58 | */ 59 | 60 | func TestWithMemDB(t *testing.T) { 61 | db := NewMemDB() 62 | 63 | t.Run("MemDB", func(t *testing.T) { Run(t, db) }) 64 | } 65 | 66 | // Run generates concurrent reads and writes to db so the race detector can 67 | // verify concurrent operations are properly synchronized. 68 | // The contents of db are garbage after Run returns. 69 | func Run(t *testing.T, db DB) { 70 | t.Helper() 71 | 72 | const numWorkers = 10 73 | const numKeys = 64 74 | 75 | var wg sync.WaitGroup 76 | for i := 0; i < numWorkers; i++ { 77 | wg.Add(1) 78 | i := i 79 | go func() { 80 | defer wg.Done() 81 | 82 | // Insert a bunch of keys with random data. 83 | for k := 1; k <= numKeys; k++ { 84 | key := taskKey(i, k) // say, "task--key-" 85 | value := randomValue() 86 | if err := db.Set(key, value); err != nil { 87 | t.Errorf("Task %d: db.Set(%q=%q) failed: %v", 88 | i, string(key), string(value), err) 89 | } 90 | } 91 | 92 | // Iterate over the database to make sure our keys are there. 93 | it, err := db.Iterator(nil, nil) 94 | if err != nil { 95 | t.Errorf("Iterator[%d]: %v", i, err) 96 | return 97 | } 98 | found := make(map[string][]byte) 99 | mine := []byte(fmt.Sprintf("task-%d-", i)) 100 | for { 101 | if key := it.Key(); bytes.HasPrefix(key, mine) { 102 | found[string(key)] = it.Value() 103 | } 104 | it.Next() 105 | if !it.Valid() { 106 | break 107 | } 108 | } 109 | if err := it.Error(); err != nil { 110 | t.Errorf("Iterator[%d] reported error: %v", i, err) 111 | } 112 | if err := it.Close(); err != nil { 113 | t.Errorf("Close iterator[%d]: %v", i, err) 114 | } 115 | if len(found) != numKeys { 116 | t.Errorf("Task %d: found %d keys, wanted %d", i, len(found), numKeys) 117 | } 118 | 119 | // Delete all the keys we inserted. 120 | for key := range mine { 121 | bs := make([]byte, 4) 122 | binary.LittleEndian.PutUint32(bs, uint32(key)) 123 | if err := db.Delete(bs); err != nil { 124 | t.Errorf("Delete %q: %v", key, err) 125 | } 126 | } 127 | }() 128 | } 129 | wg.Wait() 130 | } 131 | 132 | func TestPrefixDBSimple(t *testing.T) { 133 | db := mockDBWithStuff(t) 134 | pdb := NewPrefixDB(db, bz("key")) 135 | 136 | checkValue(t, pdb, bz("key"), nil) 137 | checkValue(t, pdb, bz("key1"), nil) 138 | checkValue(t, pdb, bz("1"), bz("value1")) 139 | checkValue(t, pdb, bz("key2"), nil) 140 | checkValue(t, pdb, bz("2"), bz("value2")) 141 | checkValue(t, pdb, bz("key3"), nil) 142 | checkValue(t, pdb, bz("3"), bz("value3")) 143 | checkValue(t, pdb, bz("something"), nil) 144 | checkValue(t, pdb, bz("k"), nil) 145 | checkValue(t, pdb, bz("ke"), nil) 146 | checkValue(t, pdb, bz("kee"), nil) 147 | } 148 | 149 | func TestPrefixDBIterator1(t *testing.T) { 150 | db := mockDBWithStuff(t) 151 | pdb := NewPrefixDB(db, bz("key")) 152 | 153 | itr, err := pdb.Iterator(nil, nil) 154 | require.NoError(t, err) 155 | checkDomain(t, itr, nil, nil) 156 | checkItem(t, itr, bz("1"), bz("value1")) 157 | checkNext(t, itr, true) 158 | checkItem(t, itr, bz("2"), bz("value2")) 159 | checkNext(t, itr, true) 160 | checkItem(t, itr, bz("3"), bz("value3")) 161 | checkNext(t, itr, false) 162 | checkInvalid(t, itr) 163 | itr.Close() 164 | } 165 | 166 | func TestPrefixDBReverseIterator1(t *testing.T) { 167 | db := mockDBWithStuff(t) 168 | pdb := NewPrefixDB(db, bz("key")) 169 | 170 | itr, err := pdb.ReverseIterator(nil, nil) 171 | require.NoError(t, err) 172 | checkDomain(t, itr, nil, nil) 173 | checkItem(t, itr, bz("3"), bz("value3")) 174 | checkNext(t, itr, true) 175 | checkItem(t, itr, bz("2"), bz("value2")) 176 | checkNext(t, itr, true) 177 | checkItem(t, itr, bz("1"), bz("value1")) 178 | checkNext(t, itr, false) 179 | checkInvalid(t, itr) 180 | itr.Close() 181 | } 182 | 183 | func TestPrefixDBReverseIterator5(t *testing.T) { 184 | db := mockDBWithStuff(t) 185 | pdb := NewPrefixDB(db, bz("key")) 186 | 187 | itr, err := pdb.ReverseIterator(bz("1"), nil) 188 | require.NoError(t, err) 189 | checkDomain(t, itr, bz("1"), nil) 190 | checkItem(t, itr, bz("3"), bz("value3")) 191 | checkNext(t, itr, true) 192 | checkItem(t, itr, bz("2"), bz("value2")) 193 | checkNext(t, itr, true) 194 | checkItem(t, itr, bz("1"), bz("value1")) 195 | checkNext(t, itr, false) 196 | checkInvalid(t, itr) 197 | itr.Close() 198 | } 199 | 200 | func TestPrefixDBReverseIterator6(t *testing.T) { 201 | db := mockDBWithStuff(t) 202 | pdb := NewPrefixDB(db, bz("key")) 203 | 204 | itr, err := pdb.ReverseIterator(bz("2"), nil) 205 | require.NoError(t, err) 206 | checkDomain(t, itr, bz("2"), nil) 207 | checkItem(t, itr, bz("3"), bz("value3")) 208 | checkNext(t, itr, true) 209 | checkItem(t, itr, bz("2"), bz("value2")) 210 | checkNext(t, itr, false) 211 | checkInvalid(t, itr) 212 | itr.Close() 213 | } 214 | 215 | func TestPrefixDBReverseIterator7(t *testing.T) { 216 | db := mockDBWithStuff(t) 217 | pdb := NewPrefixDB(db, bz("key")) 218 | 219 | itr, err := pdb.ReverseIterator(nil, bz("2")) 220 | require.NoError(t, err) 221 | checkDomain(t, itr, nil, bz("2")) 222 | checkItem(t, itr, bz("1"), bz("value1")) 223 | checkNext(t, itr, false) 224 | checkInvalid(t, itr) 225 | itr.Close() 226 | } 227 | -------------------------------------------------------------------------------- /remotedb/batch.go: -------------------------------------------------------------------------------- 1 | package remotedb 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | 7 | db "github.com/tendermint/tm-db" 8 | protodb "github.com/tendermint/tm-db/remotedb/proto" 9 | ) 10 | 11 | var errBatchClosed = errors.New("batch has been written or closed") 12 | 13 | type batch struct { 14 | db *RemoteDB 15 | ops []*protodb.Operation 16 | } 17 | 18 | var _ db.Batch = (*batch)(nil) 19 | 20 | func newBatch(rdb *RemoteDB) *batch { 21 | return &batch{ 22 | db: rdb, 23 | ops: []*protodb.Operation{}, 24 | } 25 | } 26 | 27 | // Set implements Batch. 28 | func (b *batch) Set(key, value []byte) error { 29 | if b.ops == nil { 30 | return errBatchClosed 31 | } 32 | op := &protodb.Operation{ 33 | Entity: &protodb.Entity{Key: key, Value: value}, 34 | Type: protodb.Operation_SET, 35 | } 36 | b.ops = append(b.ops, op) 37 | return nil 38 | } 39 | 40 | // Delete implements Batch. 41 | func (b *batch) Delete(key []byte) error { 42 | if b.ops == nil { 43 | return errBatchClosed 44 | } 45 | op := &protodb.Operation{ 46 | Entity: &protodb.Entity{Key: key}, 47 | Type: protodb.Operation_DELETE, 48 | } 49 | b.ops = append(b.ops, op) 50 | return nil 51 | } 52 | 53 | // Write implements Batch. 54 | func (b *batch) Write() error { 55 | if b.ops == nil { 56 | return errBatchClosed 57 | } 58 | _, err := b.db.dc.BatchWrite(b.db.ctx, &protodb.Batch{Ops: b.ops}) 59 | if err != nil { 60 | return fmt.Errorf("remoteDB.BatchWrite: %w", err) 61 | } 62 | // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. 63 | b.Close() 64 | return nil 65 | } 66 | 67 | // WriteSync implements Batch. 68 | func (b *batch) WriteSync() error { 69 | if b.ops == nil { 70 | return errBatchClosed 71 | } 72 | _, err := b.db.dc.BatchWriteSync(b.db.ctx, &protodb.Batch{Ops: b.ops}) 73 | if err != nil { 74 | return fmt.Errorf("RemoteDB.BatchWriteSync: %w", err) 75 | } 76 | // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. 77 | return b.Close() 78 | } 79 | 80 | // Close implements Batch. 81 | func (b *batch) Close() error { 82 | b.ops = nil 83 | return nil 84 | } 85 | -------------------------------------------------------------------------------- /remotedb/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | remotedb is a package for connecting to distributed Tendermint db.DB 3 | instances. The purpose is to detach difficult deployments such as 4 | CLevelDB that requires gcc or perhaps for databases that require 5 | custom configurations such as extra disk space. It also eases 6 | the burden and cost of deployment of dependencies for databases 7 | to be used by Tendermint developers. Most importantly it is built 8 | over the high performant gRPC transport. 9 | 10 | remotedb's RemoteDB implements db.DB so can be used normally 11 | like other databases. One just has to explicitly connect to the 12 | remote database with a client setup such as: 13 | 14 | client, err := remotedb.NewRemoteDB(addr, cert) 15 | // Make sure to invoke InitRemote! 16 | if err := client.InitRemote(&remotedb.Init{Name: "test-remote-db", Type: "leveldb"}); err != nil { 17 | log.Fatalf("Failed to initialize the remote db") 18 | } 19 | 20 | client.Set(key1, value) 21 | gv1 := client.SetSync(k2, v2) 22 | 23 | client.Delete(k1) 24 | gv2 := client.Get(k1) 25 | 26 | for itr := client.Iterator(k1, k9); itr.Valid(); itr.Next() { 27 | ik, iv := itr.Key(), itr.Value() 28 | ds, de := itr.Domain() 29 | } 30 | 31 | stats := client.Stats() 32 | 33 | if !client.Has(dk1) { 34 | client.SetSync(dk1, dv1) 35 | } 36 | */ 37 | package remotedb 38 | -------------------------------------------------------------------------------- /remotedb/grpcdb/client.go: -------------------------------------------------------------------------------- 1 | package grpcdb 2 | 3 | import ( 4 | "google.golang.org/grpc" 5 | "google.golang.org/grpc/credentials" 6 | 7 | protodb "github.com/tendermint/tm-db/remotedb/proto" 8 | ) 9 | 10 | // NewClient creates a gRPC client connected to the bound gRPC server at serverAddr. 11 | // Use kind to set the level of security to either Secure or Insecure. 12 | func NewClient(serverAddr, serverCert string) (protodb.DBClient, error) { 13 | creds, err := credentials.NewClientTLSFromFile(serverCert, "") 14 | if err != nil { 15 | return nil, err 16 | } 17 | cc, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(creds)) 18 | if err != nil { 19 | return nil, err 20 | } 21 | return protodb.NewDBClient(cc), nil 22 | } 23 | -------------------------------------------------------------------------------- /remotedb/grpcdb/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | grpcdb is the distribution of Tendermint's db.DB instances using 3 | the gRPC transport to decouple local db.DB usages from applications, 4 | to using them over a network in a highly performant manner. 5 | 6 | grpcdb allows users to initialize a database's server like 7 | they would locally and invoke the respective methods of db.DB. 8 | 9 | Most users shouldn't use this package, but should instead use 10 | remotedb. Only the lower level users and database server deployers 11 | should use it, for functionality such as: 12 | 13 | ln, err := net.Listen("tcp", "0.0.0.0:0") 14 | srv := grpcdb.NewServer() 15 | defer srv.Stop() 16 | go func() { 17 | if err := srv.Serve(ln); err != nil { 18 | t.Fatalf("BindServer: %v", err) 19 | } 20 | }() 21 | 22 | or 23 | addr := ":8998" 24 | cert := "server.crt" 25 | key := "server.key" 26 | go func() { 27 | if err := grpcdb.ListenAndServe(addr, cert, key); err != nil { 28 | log.Fatalf("BindServer: %v", err) 29 | } 30 | }() 31 | */ 32 | package grpcdb 33 | -------------------------------------------------------------------------------- /remotedb/grpcdb/example_test.go: -------------------------------------------------------------------------------- 1 | package grpcdb_test 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "log" 7 | 8 | grpcdb "github.com/tendermint/tm-db/remotedb/grpcdb" 9 | protodb "github.com/tendermint/tm-db/remotedb/proto" 10 | ) 11 | 12 | func Example() { 13 | addr := ":8998" 14 | cert := "server.crt" 15 | key := "server.key" 16 | go func() { 17 | if err := grpcdb.ListenAndServe(addr, cert, key); err != nil { 18 | log.Fatalf("BindServer: %v", err) 19 | } 20 | }() 21 | 22 | client, err := grpcdb.NewClient(addr, cert) 23 | if err != nil { 24 | log.Fatalf("Failed to create grpcDB client: %v", err) 25 | } 26 | 27 | ctx := context.Background() 28 | // 1. Initialize the DB 29 | in := &protodb.Init{ 30 | Type: "leveldb", 31 | Name: "grpc-uno-test", 32 | Dir: ".", 33 | } 34 | if _, err := client.Init(ctx, in); err != nil { 35 | log.Fatalf("Init error: %v", err) 36 | } 37 | 38 | // 2. Now it can be used! 39 | query1 := &protodb.Entity{Key: []byte("Project"), Value: []byte("Tmlibs-on-gRPC")} 40 | if _, err := client.SetSync(ctx, query1); err != nil { 41 | log.Fatalf("SetSync err: %v", err) 42 | } 43 | 44 | query2 := &protodb.Entity{Key: []byte("Project")} 45 | read, err := client.Get(ctx, query2) 46 | if err != nil { 47 | log.Fatalf("Get err: %v", err) 48 | } 49 | if g, w := read.Value, []byte("Tmlibs-on-gRPC"); !bytes.Equal(g, w) { 50 | log.Fatalf("got= (%q ==> % X)\nwant=(%q ==> % X)", g, g, w, w) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /remotedb/grpcdb/server.go: -------------------------------------------------------------------------------- 1 | package grpcdb 2 | 3 | import ( 4 | "context" 5 | "net" 6 | "sync" 7 | "time" 8 | 9 | "google.golang.org/grpc" 10 | "google.golang.org/grpc/credentials" 11 | 12 | db "github.com/tendermint/tm-db" 13 | protodb "github.com/tendermint/tm-db/remotedb/proto" 14 | ) 15 | 16 | // ListenAndServe is a blocking function that sets up a gRPC based 17 | // server at the address supplied, with the gRPC options passed in. 18 | // Normally in usage, invoke it in a goroutine like you would for http.ListenAndServe. 19 | func ListenAndServe(addr, cert, key string, opts ...grpc.ServerOption) error { 20 | ln, err := net.Listen("tcp", addr) 21 | if err != nil { 22 | return err 23 | } 24 | srv, err := NewServer(cert, key, opts...) 25 | if err != nil { 26 | return err 27 | } 28 | return srv.Serve(ln) 29 | } 30 | 31 | func NewServer(cert, key string, opts ...grpc.ServerOption) (*grpc.Server, error) { 32 | creds, err := credentials.NewServerTLSFromFile(cert, key) 33 | if err != nil { 34 | return nil, err 35 | } 36 | opts = append(opts, grpc.Creds(creds)) 37 | srv := grpc.NewServer(opts...) 38 | protodb.RegisterDBServer(srv, new(server)) 39 | return srv, nil 40 | } 41 | 42 | type server struct { 43 | mu sync.Mutex 44 | db db.DB 45 | } 46 | 47 | var _ protodb.DBServer = (*server)(nil) 48 | 49 | // Init initializes the server's database. Only one type of database 50 | // can be initialized per server. 51 | // 52 | // Dir is the directory on the file system in which the DB will be stored(if backed by disk) (TODO: remove) 53 | // 54 | // Name is representative filesystem entry's basepath 55 | // 56 | // Type can be either one of: 57 | // * cleveldb (if built with gcc enabled) 58 | // * fsdb 59 | // * memdB 60 | // * goleveldb 61 | // See https://godoc.org/github.com/tendermint/tendermint/libs/db#BackendType 62 | func (s *server) Init(ctx context.Context, in *protodb.Init) (*protodb.Entity, error) { 63 | s.mu.Lock() 64 | defer s.mu.Unlock() 65 | 66 | var err error 67 | s.db, err = db.NewDB(in.Name, db.BackendType(in.Type), in.Dir) 68 | if err != nil { 69 | return nil, err 70 | } 71 | return &protodb.Entity{CreatedAt: time.Now().Unix()}, nil 72 | } 73 | 74 | func (s *server) Delete(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { 75 | err := s.db.Delete(in.Key) 76 | if err != nil { 77 | return nil, err 78 | } 79 | return nothing, nil 80 | } 81 | 82 | var nothing = new(protodb.Nothing) 83 | 84 | func (s *server) DeleteSync(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { 85 | err := s.db.DeleteSync(in.Key) 86 | if err != nil { 87 | return nil, err 88 | } 89 | return nothing, nil 90 | } 91 | 92 | func (s *server) Get(ctx context.Context, in *protodb.Entity) (*protodb.Entity, error) { 93 | value, err := s.db.Get(in.Key) 94 | if err != nil { 95 | return nil, err 96 | } 97 | return &protodb.Entity{Value: value}, nil 98 | } 99 | 100 | func (s *server) GetStream(ds protodb.DB_GetStreamServer) error { 101 | // Receive routine 102 | responsesChan := make(chan *protodb.Entity) 103 | go func() { 104 | defer close(responsesChan) 105 | ctx := context.Background() 106 | for { 107 | in, err := ds.Recv() 108 | if err != nil { 109 | responsesChan <- &protodb.Entity{Err: err.Error()} 110 | return 111 | } 112 | out, err := s.Get(ctx, in) 113 | if err != nil { 114 | if out == nil { 115 | out = new(protodb.Entity) 116 | out.Key = in.Key 117 | } 118 | out.Err = err.Error() 119 | responsesChan <- out 120 | return 121 | } 122 | 123 | // Otherwise continue on 124 | responsesChan <- out 125 | } 126 | }() 127 | 128 | // Send routine, block until we return 129 | for out := range responsesChan { 130 | if err := ds.Send(out); err != nil { 131 | return err 132 | } 133 | } 134 | return nil 135 | } 136 | 137 | func (s *server) Has(ctx context.Context, in *protodb.Entity) (*protodb.Entity, error) { 138 | exists, err := s.db.Has(in.Key) 139 | if err != nil { 140 | return nil, err 141 | } 142 | return &protodb.Entity{Exists: exists}, nil 143 | } 144 | 145 | func (s *server) Set(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { 146 | err := s.db.Set(in.Key, in.Value) 147 | if err != nil { 148 | return nil, err 149 | } 150 | return nothing, nil 151 | } 152 | 153 | func (s *server) SetSync(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { 154 | err := s.db.SetSync(in.Key, in.Value) 155 | if err != nil { 156 | return nil, err 157 | } 158 | return nothing, nil 159 | } 160 | 161 | func (s *server) Iterator(query *protodb.Entity, dis protodb.DB_IteratorServer) error { 162 | it, err := s.db.Iterator(query.Start, query.End) 163 | if err != nil { 164 | return err 165 | } 166 | defer it.Close() 167 | return s.handleIterator(it, dis.Send) 168 | } 169 | 170 | func (s *server) handleIterator(it db.Iterator, sendFunc func(*protodb.Iterator) error) error { 171 | for it.Valid() { 172 | start, end := it.Domain() 173 | key := it.Key() 174 | value := it.Value() 175 | 176 | out := &protodb.Iterator{ 177 | Domain: &protodb.Domain{Start: start, End: end}, 178 | Valid: it.Valid(), 179 | Key: key, 180 | Value: value, 181 | } 182 | if err := sendFunc(out); err != nil { 183 | return err 184 | } 185 | 186 | // Finally move the iterator forward, 187 | it.Next() 188 | 189 | } 190 | return nil 191 | } 192 | 193 | func (s *server) ReverseIterator(query *protodb.Entity, dis protodb.DB_ReverseIteratorServer) error { 194 | it, err := s.db.ReverseIterator(query.Start, query.End) 195 | if err != nil { 196 | return err 197 | } 198 | defer it.Close() 199 | return s.handleIterator(it, dis.Send) 200 | } 201 | 202 | func (s *server) Stats(context.Context, *protodb.Nothing) (*protodb.Stats, error) { 203 | stats := s.db.Stats() 204 | return &protodb.Stats{Data: stats, TimeAt: time.Now().Unix()}, nil 205 | } 206 | 207 | func (s *server) BatchWrite(c context.Context, b *protodb.Batch) (*protodb.Nothing, error) { 208 | return s.batchWrite(c, b, false) 209 | } 210 | 211 | func (s *server) BatchWriteSync(c context.Context, b *protodb.Batch) (*protodb.Nothing, error) { 212 | return s.batchWrite(c, b, true) 213 | } 214 | 215 | func (s *server) batchWrite(c context.Context, b *protodb.Batch, sync bool) (*protodb.Nothing, error) { 216 | bat := s.db.NewBatch() 217 | defer bat.Close() 218 | for _, op := range b.Ops { 219 | switch op.Type { 220 | case protodb.Operation_SET: 221 | err := bat.Set(op.Entity.Key, op.Entity.Value) 222 | if err != nil { 223 | return nil, err 224 | } 225 | case protodb.Operation_DELETE: 226 | err := bat.Delete(op.Entity.Key) 227 | if err != nil { 228 | return nil, err 229 | } 230 | } 231 | } 232 | if sync { 233 | err := bat.WriteSync() 234 | if err != nil { 235 | return nil, err 236 | } 237 | } else { 238 | err := bat.Write() 239 | if err != nil { 240 | return nil, err 241 | } 242 | } 243 | return nothing, nil 244 | } 245 | -------------------------------------------------------------------------------- /remotedb/iterator.go: -------------------------------------------------------------------------------- 1 | package remotedb 2 | 3 | import ( 4 | db "github.com/tendermint/tm-db" 5 | protodb "github.com/tendermint/tm-db/remotedb/proto" 6 | ) 7 | 8 | func makeIterator(dic protodb.DB_IteratorClient) db.Iterator { 9 | itr := &iterator{dic: dic} 10 | itr.Next() // We need to call Next to prime the iterator 11 | return itr 12 | } 13 | 14 | func makeReverseIterator(dric protodb.DB_ReverseIteratorClient) db.Iterator { 15 | rItr := &reverseIterator{dric: dric} 16 | rItr.Next() // We need to call Next to prime the iterator 17 | return rItr 18 | } 19 | 20 | type reverseIterator struct { 21 | dric protodb.DB_ReverseIteratorClient 22 | cur *protodb.Iterator 23 | err error 24 | } 25 | 26 | var _ db.Iterator = (*iterator)(nil) 27 | 28 | // Valid implements Iterator. 29 | func (rItr *reverseIterator) Valid() bool { 30 | return rItr.cur != nil && rItr.cur.Valid && rItr.err == nil 31 | } 32 | 33 | // Domain implements Iterator. 34 | func (rItr *reverseIterator) Domain() (start, end []byte) { 35 | if rItr.cur == nil || rItr.cur.Domain == nil { 36 | return nil, nil 37 | } 38 | return rItr.cur.Domain.Start, rItr.cur.Domain.End 39 | } 40 | 41 | // Next implements Iterator. 42 | func (rItr *reverseIterator) Next() { 43 | var err error 44 | rItr.cur, err = rItr.dric.Recv() 45 | if err != nil { 46 | rItr.err = err 47 | } 48 | } 49 | 50 | // Key implements Iterator. 51 | func (rItr *reverseIterator) Key() []byte { 52 | rItr.assertIsValid() 53 | return rItr.cur.Key 54 | } 55 | 56 | // Value implements Iterator. 57 | func (rItr *reverseIterator) Value() []byte { 58 | rItr.assertIsValid() 59 | return rItr.cur.Value 60 | } 61 | 62 | // Error implements Iterator. 63 | func (rItr *reverseIterator) Error() error { 64 | return rItr.err 65 | } 66 | 67 | // Close implements Iterator. 68 | func (rItr *reverseIterator) Close() error { 69 | return nil 70 | } 71 | 72 | func (rItr *reverseIterator) assertIsValid() { 73 | if !rItr.Valid() { 74 | panic("iterator is invalid") 75 | } 76 | } 77 | 78 | // iterator implements the db.Iterator by retrieving 79 | // streamed iterators from the remote backend as 80 | // needed. It is NOT safe for concurrent usage, 81 | // matching the behavior of other iterators. 82 | type iterator struct { 83 | dic protodb.DB_IteratorClient 84 | cur *protodb.Iterator 85 | err error 86 | } 87 | 88 | var _ db.Iterator = (*iterator)(nil) 89 | 90 | // Valid implements Iterator. 91 | func (itr *iterator) Valid() bool { 92 | return itr.cur != nil && itr.cur.Valid && itr.err == nil 93 | } 94 | 95 | // Domain implements Iterator. 96 | func (itr *iterator) Domain() (start, end []byte) { 97 | if itr.cur == nil || itr.cur.Domain == nil { 98 | return nil, nil 99 | } 100 | return itr.cur.Domain.Start, itr.cur.Domain.End 101 | } 102 | 103 | // Next implements Iterator. 104 | func (itr *iterator) Next() { 105 | var err error 106 | itr.cur, err = itr.dic.Recv() 107 | if err != nil { 108 | itr.err = err 109 | } 110 | } 111 | 112 | // Key implements Iterator. 113 | func (itr *iterator) Key() []byte { 114 | itr.assertIsValid() 115 | return itr.cur.Key 116 | } 117 | 118 | // Value implements Iterator. 119 | func (itr *iterator) Value() []byte { 120 | itr.assertIsValid() 121 | return itr.cur.Value 122 | } 123 | 124 | // Error implements Iterator. 125 | func (itr *iterator) Error() error { 126 | return itr.err 127 | } 128 | 129 | // Close implements Iterator. 130 | func (itr *iterator) Close() error { 131 | return itr.dic.CloseSend() 132 | } 133 | 134 | func (itr *iterator) assertIsValid() { 135 | if !itr.Valid() { 136 | panic("iterator is invalid") 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /remotedb/proto/defs.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package protodb; 4 | 5 | import "github.com/gogo/protobuf/gogoproto/gogo.proto"; 6 | 7 | // Generate tests 8 | option (gogoproto.populate_all) = true; 9 | option (gogoproto.equal_all) = true; 10 | option (gogoproto.testgen_all) = true; 11 | 12 | message Batch { 13 | repeated Operation ops = 1; 14 | } 15 | 16 | message Operation { 17 | Entity entity = 1; 18 | enum Type { 19 | SET = 0; 20 | DELETE = 1; 21 | } 22 | Type type = 2; 23 | } 24 | 25 | message Entity { 26 | int32 id = 1; 27 | bytes key = 2; 28 | bytes value = 3; 29 | bool exists = 4; 30 | bytes start = 5; 31 | bytes end = 6; 32 | string err = 7; 33 | int64 created_at = 8; 34 | } 35 | 36 | message Nothing { 37 | } 38 | 39 | message Domain { 40 | bytes start = 1; 41 | bytes end = 2; 42 | } 43 | 44 | message Iterator { 45 | Domain domain = 1; 46 | bool valid = 2; 47 | bytes key = 3; 48 | bytes value = 4; 49 | } 50 | 51 | message Stats { 52 | map data = 1; 53 | int64 time_at = 2; 54 | } 55 | 56 | message Init { 57 | string Type = 1; 58 | string Name = 2; 59 | string Dir = 3; 60 | } 61 | 62 | service DB { 63 | rpc init(Init) returns (Entity) {} 64 | rpc get(Entity) returns (Entity) {} 65 | rpc getStream(stream Entity) returns (stream Entity) {} 66 | 67 | rpc has(Entity) returns (Entity) {} 68 | rpc set(Entity) returns (Nothing) {} 69 | rpc setSync(Entity) returns (Nothing) {} 70 | rpc delete(Entity) returns (Nothing) {} 71 | rpc deleteSync(Entity) returns (Nothing) {} 72 | rpc iterator(Entity) returns (stream Iterator) {} 73 | rpc reverseIterator(Entity) returns (stream Iterator) {} 74 | // rpc print(Nothing) returns (Entity) {} 75 | rpc stats(Nothing) returns (Stats) {} 76 | rpc batchWrite(Batch) returns (Nothing) {} 77 | rpc batchWriteSync(Batch) returns (Nothing) {} 78 | } 79 | -------------------------------------------------------------------------------- /remotedb/proto/defspb_test.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-gogo. DO NOT EDIT. 2 | // source: remotedb/proto/defs.proto 3 | 4 | package protodb 5 | 6 | import ( 7 | fmt "fmt" 8 | math "math" 9 | math_rand "math/rand" 10 | testing "testing" 11 | time "time" 12 | 13 | _ "github.com/gogo/protobuf/gogoproto" 14 | github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb" 15 | github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" 16 | proto "github.com/gogo/protobuf/proto" 17 | ) 18 | 19 | // Reference imports to suppress errors if they are not otherwise used. 20 | var _ = proto.Marshal 21 | var _ = fmt.Errorf 22 | var _ = math.Inf 23 | 24 | func TestBatchProto(t *testing.T) { 25 | seed := time.Now().UnixNano() 26 | popr := math_rand.New(math_rand.NewSource(seed)) 27 | p := NewPopulatedBatch(popr, false) 28 | dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) 29 | if err != nil { 30 | t.Fatalf("seed = %d, err = %v", seed, err) 31 | } 32 | msg := &Batch{} 33 | if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { 34 | t.Fatalf("seed = %d, err = %v", seed, err) 35 | } 36 | littlefuzz := make([]byte, len(dAtA)) 37 | copy(littlefuzz, dAtA) 38 | for i := range dAtA { 39 | dAtA[i] = byte(popr.Intn(256)) 40 | } 41 | if !p.Equal(msg) { 42 | t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) 43 | } 44 | if len(littlefuzz) > 0 { 45 | fuzzamount := 100 46 | for i := 0; i < fuzzamount; i++ { 47 | littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) 48 | littlefuzz = append(littlefuzz, byte(popr.Intn(256))) 49 | } 50 | // shouldn't panic 51 | _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) 52 | } 53 | } 54 | 55 | func TestOperationProto(t *testing.T) { 56 | seed := time.Now().UnixNano() 57 | popr := math_rand.New(math_rand.NewSource(seed)) 58 | p := NewPopulatedOperation(popr, false) 59 | dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) 60 | if err != nil { 61 | t.Fatalf("seed = %d, err = %v", seed, err) 62 | } 63 | msg := &Operation{} 64 | if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { 65 | t.Fatalf("seed = %d, err = %v", seed, err) 66 | } 67 | littlefuzz := make([]byte, len(dAtA)) 68 | copy(littlefuzz, dAtA) 69 | for i := range dAtA { 70 | dAtA[i] = byte(popr.Intn(256)) 71 | } 72 | if !p.Equal(msg) { 73 | t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) 74 | } 75 | if len(littlefuzz) > 0 { 76 | fuzzamount := 100 77 | for i := 0; i < fuzzamount; i++ { 78 | littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) 79 | littlefuzz = append(littlefuzz, byte(popr.Intn(256))) 80 | } 81 | // shouldn't panic 82 | _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) 83 | } 84 | } 85 | 86 | func TestEntityProto(t *testing.T) { 87 | seed := time.Now().UnixNano() 88 | popr := math_rand.New(math_rand.NewSource(seed)) 89 | p := NewPopulatedEntity(popr, false) 90 | dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) 91 | if err != nil { 92 | t.Fatalf("seed = %d, err = %v", seed, err) 93 | } 94 | msg := &Entity{} 95 | if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { 96 | t.Fatalf("seed = %d, err = %v", seed, err) 97 | } 98 | littlefuzz := make([]byte, len(dAtA)) 99 | copy(littlefuzz, dAtA) 100 | for i := range dAtA { 101 | dAtA[i] = byte(popr.Intn(256)) 102 | } 103 | if !p.Equal(msg) { 104 | t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) 105 | } 106 | if len(littlefuzz) > 0 { 107 | fuzzamount := 100 108 | for i := 0; i < fuzzamount; i++ { 109 | littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) 110 | littlefuzz = append(littlefuzz, byte(popr.Intn(256))) 111 | } 112 | // shouldn't panic 113 | _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) 114 | } 115 | } 116 | 117 | func TestNothingProto(t *testing.T) { 118 | seed := time.Now().UnixNano() 119 | popr := math_rand.New(math_rand.NewSource(seed)) 120 | p := NewPopulatedNothing(popr, false) 121 | dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) 122 | if err != nil { 123 | t.Fatalf("seed = %d, err = %v", seed, err) 124 | } 125 | msg := &Nothing{} 126 | if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { 127 | t.Fatalf("seed = %d, err = %v", seed, err) 128 | } 129 | littlefuzz := make([]byte, len(dAtA)) 130 | copy(littlefuzz, dAtA) 131 | for i := range dAtA { 132 | dAtA[i] = byte(popr.Intn(256)) 133 | } 134 | if !p.Equal(msg) { 135 | t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) 136 | } 137 | if len(littlefuzz) > 0 { 138 | fuzzamount := 100 139 | for i := 0; i < fuzzamount; i++ { 140 | littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) 141 | littlefuzz = append(littlefuzz, byte(popr.Intn(256))) 142 | } 143 | // shouldn't panic 144 | _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) 145 | } 146 | } 147 | 148 | func TestDomainProto(t *testing.T) { 149 | seed := time.Now().UnixNano() 150 | popr := math_rand.New(math_rand.NewSource(seed)) 151 | p := NewPopulatedDomain(popr, false) 152 | dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) 153 | if err != nil { 154 | t.Fatalf("seed = %d, err = %v", seed, err) 155 | } 156 | msg := &Domain{} 157 | if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { 158 | t.Fatalf("seed = %d, err = %v", seed, err) 159 | } 160 | littlefuzz := make([]byte, len(dAtA)) 161 | copy(littlefuzz, dAtA) 162 | for i := range dAtA { 163 | dAtA[i] = byte(popr.Intn(256)) 164 | } 165 | if !p.Equal(msg) { 166 | t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) 167 | } 168 | if len(littlefuzz) > 0 { 169 | fuzzamount := 100 170 | for i := 0; i < fuzzamount; i++ { 171 | littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) 172 | littlefuzz = append(littlefuzz, byte(popr.Intn(256))) 173 | } 174 | // shouldn't panic 175 | _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) 176 | } 177 | } 178 | 179 | func TestIteratorProto(t *testing.T) { 180 | seed := time.Now().UnixNano() 181 | popr := math_rand.New(math_rand.NewSource(seed)) 182 | p := NewPopulatedIterator(popr, false) 183 | dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) 184 | if err != nil { 185 | t.Fatalf("seed = %d, err = %v", seed, err) 186 | } 187 | msg := &Iterator{} 188 | if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { 189 | t.Fatalf("seed = %d, err = %v", seed, err) 190 | } 191 | littlefuzz := make([]byte, len(dAtA)) 192 | copy(littlefuzz, dAtA) 193 | for i := range dAtA { 194 | dAtA[i] = byte(popr.Intn(256)) 195 | } 196 | if !p.Equal(msg) { 197 | t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) 198 | } 199 | if len(littlefuzz) > 0 { 200 | fuzzamount := 100 201 | for i := 0; i < fuzzamount; i++ { 202 | littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) 203 | littlefuzz = append(littlefuzz, byte(popr.Intn(256))) 204 | } 205 | // shouldn't panic 206 | _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) 207 | } 208 | } 209 | 210 | func TestStatsProto(t *testing.T) { 211 | seed := time.Now().UnixNano() 212 | popr := math_rand.New(math_rand.NewSource(seed)) 213 | p := NewPopulatedStats(popr, false) 214 | dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) 215 | if err != nil { 216 | t.Fatalf("seed = %d, err = %v", seed, err) 217 | } 218 | msg := &Stats{} 219 | if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { 220 | t.Fatalf("seed = %d, err = %v", seed, err) 221 | } 222 | littlefuzz := make([]byte, len(dAtA)) 223 | copy(littlefuzz, dAtA) 224 | for i := range dAtA { 225 | dAtA[i] = byte(popr.Intn(256)) 226 | } 227 | if !p.Equal(msg) { 228 | t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) 229 | } 230 | if len(littlefuzz) > 0 { 231 | fuzzamount := 100 232 | for i := 0; i < fuzzamount; i++ { 233 | littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) 234 | littlefuzz = append(littlefuzz, byte(popr.Intn(256))) 235 | } 236 | // shouldn't panic 237 | _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) 238 | } 239 | } 240 | 241 | func TestInitProto(t *testing.T) { 242 | seed := time.Now().UnixNano() 243 | popr := math_rand.New(math_rand.NewSource(seed)) 244 | p := NewPopulatedInit(popr, false) 245 | dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) 246 | if err != nil { 247 | t.Fatalf("seed = %d, err = %v", seed, err) 248 | } 249 | msg := &Init{} 250 | if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { 251 | t.Fatalf("seed = %d, err = %v", seed, err) 252 | } 253 | littlefuzz := make([]byte, len(dAtA)) 254 | copy(littlefuzz, dAtA) 255 | for i := range dAtA { 256 | dAtA[i] = byte(popr.Intn(256)) 257 | } 258 | if !p.Equal(msg) { 259 | t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) 260 | } 261 | if len(littlefuzz) > 0 { 262 | fuzzamount := 100 263 | for i := 0; i < fuzzamount; i++ { 264 | littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) 265 | littlefuzz = append(littlefuzz, byte(popr.Intn(256))) 266 | } 267 | // shouldn't panic 268 | _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) 269 | } 270 | } 271 | 272 | func TestBatchJSON(t *testing.T) { 273 | seed := time.Now().UnixNano() 274 | popr := math_rand.New(math_rand.NewSource(seed)) 275 | p := NewPopulatedBatch(popr, true) 276 | marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} 277 | jsondata, err := marshaler.MarshalToString(p) 278 | if err != nil { 279 | t.Fatalf("seed = %d, err = %v", seed, err) 280 | } 281 | msg := &Batch{} 282 | err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) 283 | if err != nil { 284 | t.Fatalf("seed = %d, err = %v", seed, err) 285 | } 286 | if !p.Equal(msg) { 287 | t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) 288 | } 289 | } 290 | func TestOperationJSON(t *testing.T) { 291 | seed := time.Now().UnixNano() 292 | popr := math_rand.New(math_rand.NewSource(seed)) 293 | p := NewPopulatedOperation(popr, true) 294 | marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} 295 | jsondata, err := marshaler.MarshalToString(p) 296 | if err != nil { 297 | t.Fatalf("seed = %d, err = %v", seed, err) 298 | } 299 | msg := &Operation{} 300 | err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) 301 | if err != nil { 302 | t.Fatalf("seed = %d, err = %v", seed, err) 303 | } 304 | if !p.Equal(msg) { 305 | t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) 306 | } 307 | } 308 | func TestEntityJSON(t *testing.T) { 309 | seed := time.Now().UnixNano() 310 | popr := math_rand.New(math_rand.NewSource(seed)) 311 | p := NewPopulatedEntity(popr, true) 312 | marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} 313 | jsondata, err := marshaler.MarshalToString(p) 314 | if err != nil { 315 | t.Fatalf("seed = %d, err = %v", seed, err) 316 | } 317 | msg := &Entity{} 318 | err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) 319 | if err != nil { 320 | t.Fatalf("seed = %d, err = %v", seed, err) 321 | } 322 | if !p.Equal(msg) { 323 | t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) 324 | } 325 | } 326 | func TestNothingJSON(t *testing.T) { 327 | seed := time.Now().UnixNano() 328 | popr := math_rand.New(math_rand.NewSource(seed)) 329 | p := NewPopulatedNothing(popr, true) 330 | marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} 331 | jsondata, err := marshaler.MarshalToString(p) 332 | if err != nil { 333 | t.Fatalf("seed = %d, err = %v", seed, err) 334 | } 335 | msg := &Nothing{} 336 | err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) 337 | if err != nil { 338 | t.Fatalf("seed = %d, err = %v", seed, err) 339 | } 340 | if !p.Equal(msg) { 341 | t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) 342 | } 343 | } 344 | func TestDomainJSON(t *testing.T) { 345 | seed := time.Now().UnixNano() 346 | popr := math_rand.New(math_rand.NewSource(seed)) 347 | p := NewPopulatedDomain(popr, true) 348 | marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} 349 | jsondata, err := marshaler.MarshalToString(p) 350 | if err != nil { 351 | t.Fatalf("seed = %d, err = %v", seed, err) 352 | } 353 | msg := &Domain{} 354 | err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) 355 | if err != nil { 356 | t.Fatalf("seed = %d, err = %v", seed, err) 357 | } 358 | if !p.Equal(msg) { 359 | t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) 360 | } 361 | } 362 | func TestIteratorJSON(t *testing.T) { 363 | seed := time.Now().UnixNano() 364 | popr := math_rand.New(math_rand.NewSource(seed)) 365 | p := NewPopulatedIterator(popr, true) 366 | marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} 367 | jsondata, err := marshaler.MarshalToString(p) 368 | if err != nil { 369 | t.Fatalf("seed = %d, err = %v", seed, err) 370 | } 371 | msg := &Iterator{} 372 | err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) 373 | if err != nil { 374 | t.Fatalf("seed = %d, err = %v", seed, err) 375 | } 376 | if !p.Equal(msg) { 377 | t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) 378 | } 379 | } 380 | func TestStatsJSON(t *testing.T) { 381 | seed := time.Now().UnixNano() 382 | popr := math_rand.New(math_rand.NewSource(seed)) 383 | p := NewPopulatedStats(popr, true) 384 | marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} 385 | jsondata, err := marshaler.MarshalToString(p) 386 | if err != nil { 387 | t.Fatalf("seed = %d, err = %v", seed, err) 388 | } 389 | msg := &Stats{} 390 | err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) 391 | if err != nil { 392 | t.Fatalf("seed = %d, err = %v", seed, err) 393 | } 394 | if !p.Equal(msg) { 395 | t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) 396 | } 397 | } 398 | func TestInitJSON(t *testing.T) { 399 | seed := time.Now().UnixNano() 400 | popr := math_rand.New(math_rand.NewSource(seed)) 401 | p := NewPopulatedInit(popr, true) 402 | marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} 403 | jsondata, err := marshaler.MarshalToString(p) 404 | if err != nil { 405 | t.Fatalf("seed = %d, err = %v", seed, err) 406 | } 407 | msg := &Init{} 408 | err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) 409 | if err != nil { 410 | t.Fatalf("seed = %d, err = %v", seed, err) 411 | } 412 | if !p.Equal(msg) { 413 | t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) 414 | } 415 | } 416 | func TestBatchProtoText(t *testing.T) { 417 | seed := time.Now().UnixNano() 418 | popr := math_rand.New(math_rand.NewSource(seed)) 419 | p := NewPopulatedBatch(popr, true) 420 | dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) 421 | msg := &Batch{} 422 | if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { 423 | t.Fatalf("seed = %d, err = %v", seed, err) 424 | } 425 | if !p.Equal(msg) { 426 | t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) 427 | } 428 | } 429 | 430 | func TestBatchProtoCompactText(t *testing.T) { 431 | seed := time.Now().UnixNano() 432 | popr := math_rand.New(math_rand.NewSource(seed)) 433 | p := NewPopulatedBatch(popr, true) 434 | dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) 435 | msg := &Batch{} 436 | if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { 437 | t.Fatalf("seed = %d, err = %v", seed, err) 438 | } 439 | if !p.Equal(msg) { 440 | t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) 441 | } 442 | } 443 | 444 | func TestOperationProtoText(t *testing.T) { 445 | seed := time.Now().UnixNano() 446 | popr := math_rand.New(math_rand.NewSource(seed)) 447 | p := NewPopulatedOperation(popr, true) 448 | dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) 449 | msg := &Operation{} 450 | if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { 451 | t.Fatalf("seed = %d, err = %v", seed, err) 452 | } 453 | if !p.Equal(msg) { 454 | t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) 455 | } 456 | } 457 | 458 | func TestOperationProtoCompactText(t *testing.T) { 459 | seed := time.Now().UnixNano() 460 | popr := math_rand.New(math_rand.NewSource(seed)) 461 | p := NewPopulatedOperation(popr, true) 462 | dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) 463 | msg := &Operation{} 464 | if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { 465 | t.Fatalf("seed = %d, err = %v", seed, err) 466 | } 467 | if !p.Equal(msg) { 468 | t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) 469 | } 470 | } 471 | 472 | func TestEntityProtoText(t *testing.T) { 473 | seed := time.Now().UnixNano() 474 | popr := math_rand.New(math_rand.NewSource(seed)) 475 | p := NewPopulatedEntity(popr, true) 476 | dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) 477 | msg := &Entity{} 478 | if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { 479 | t.Fatalf("seed = %d, err = %v", seed, err) 480 | } 481 | if !p.Equal(msg) { 482 | t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) 483 | } 484 | } 485 | 486 | func TestEntityProtoCompactText(t *testing.T) { 487 | seed := time.Now().UnixNano() 488 | popr := math_rand.New(math_rand.NewSource(seed)) 489 | p := NewPopulatedEntity(popr, true) 490 | dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) 491 | msg := &Entity{} 492 | if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { 493 | t.Fatalf("seed = %d, err = %v", seed, err) 494 | } 495 | if !p.Equal(msg) { 496 | t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) 497 | } 498 | } 499 | 500 | func TestNothingProtoText(t *testing.T) { 501 | seed := time.Now().UnixNano() 502 | popr := math_rand.New(math_rand.NewSource(seed)) 503 | p := NewPopulatedNothing(popr, true) 504 | dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) 505 | msg := &Nothing{} 506 | if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { 507 | t.Fatalf("seed = %d, err = %v", seed, err) 508 | } 509 | if !p.Equal(msg) { 510 | t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) 511 | } 512 | } 513 | 514 | func TestNothingProtoCompactText(t *testing.T) { 515 | seed := time.Now().UnixNano() 516 | popr := math_rand.New(math_rand.NewSource(seed)) 517 | p := NewPopulatedNothing(popr, true) 518 | dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) 519 | msg := &Nothing{} 520 | if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { 521 | t.Fatalf("seed = %d, err = %v", seed, err) 522 | } 523 | if !p.Equal(msg) { 524 | t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) 525 | } 526 | } 527 | 528 | func TestDomainProtoText(t *testing.T) { 529 | seed := time.Now().UnixNano() 530 | popr := math_rand.New(math_rand.NewSource(seed)) 531 | p := NewPopulatedDomain(popr, true) 532 | dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) 533 | msg := &Domain{} 534 | if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { 535 | t.Fatalf("seed = %d, err = %v", seed, err) 536 | } 537 | if !p.Equal(msg) { 538 | t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) 539 | } 540 | } 541 | 542 | func TestDomainProtoCompactText(t *testing.T) { 543 | seed := time.Now().UnixNano() 544 | popr := math_rand.New(math_rand.NewSource(seed)) 545 | p := NewPopulatedDomain(popr, true) 546 | dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) 547 | msg := &Domain{} 548 | if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { 549 | t.Fatalf("seed = %d, err = %v", seed, err) 550 | } 551 | if !p.Equal(msg) { 552 | t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) 553 | } 554 | } 555 | 556 | func TestIteratorProtoText(t *testing.T) { 557 | seed := time.Now().UnixNano() 558 | popr := math_rand.New(math_rand.NewSource(seed)) 559 | p := NewPopulatedIterator(popr, true) 560 | dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) 561 | msg := &Iterator{} 562 | if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { 563 | t.Fatalf("seed = %d, err = %v", seed, err) 564 | } 565 | if !p.Equal(msg) { 566 | t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) 567 | } 568 | } 569 | 570 | func TestIteratorProtoCompactText(t *testing.T) { 571 | seed := time.Now().UnixNano() 572 | popr := math_rand.New(math_rand.NewSource(seed)) 573 | p := NewPopulatedIterator(popr, true) 574 | dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) 575 | msg := &Iterator{} 576 | if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { 577 | t.Fatalf("seed = %d, err = %v", seed, err) 578 | } 579 | if !p.Equal(msg) { 580 | t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) 581 | } 582 | } 583 | 584 | func TestStatsProtoText(t *testing.T) { 585 | seed := time.Now().UnixNano() 586 | popr := math_rand.New(math_rand.NewSource(seed)) 587 | p := NewPopulatedStats(popr, true) 588 | dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) 589 | msg := &Stats{} 590 | if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { 591 | t.Fatalf("seed = %d, err = %v", seed, err) 592 | } 593 | if !p.Equal(msg) { 594 | t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) 595 | } 596 | } 597 | 598 | func TestStatsProtoCompactText(t *testing.T) { 599 | seed := time.Now().UnixNano() 600 | popr := math_rand.New(math_rand.NewSource(seed)) 601 | p := NewPopulatedStats(popr, true) 602 | dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) 603 | msg := &Stats{} 604 | if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { 605 | t.Fatalf("seed = %d, err = %v", seed, err) 606 | } 607 | if !p.Equal(msg) { 608 | t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) 609 | } 610 | } 611 | 612 | func TestInitProtoText(t *testing.T) { 613 | seed := time.Now().UnixNano() 614 | popr := math_rand.New(math_rand.NewSource(seed)) 615 | p := NewPopulatedInit(popr, true) 616 | dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) 617 | msg := &Init{} 618 | if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { 619 | t.Fatalf("seed = %d, err = %v", seed, err) 620 | } 621 | if !p.Equal(msg) { 622 | t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) 623 | } 624 | } 625 | 626 | func TestInitProtoCompactText(t *testing.T) { 627 | seed := time.Now().UnixNano() 628 | popr := math_rand.New(math_rand.NewSource(seed)) 629 | p := NewPopulatedInit(popr, true) 630 | dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) 631 | msg := &Init{} 632 | if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { 633 | t.Fatalf("seed = %d, err = %v", seed, err) 634 | } 635 | if !p.Equal(msg) { 636 | t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) 637 | } 638 | } 639 | 640 | //These tests are generated by github.com/gogo/protobuf/plugin/testgen 641 | -------------------------------------------------------------------------------- /remotedb/remotedb.go: -------------------------------------------------------------------------------- 1 | package remotedb 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | 8 | db "github.com/tendermint/tm-db" 9 | "github.com/tendermint/tm-db/remotedb/grpcdb" 10 | protodb "github.com/tendermint/tm-db/remotedb/proto" 11 | ) 12 | 13 | type RemoteDB struct { 14 | ctx context.Context 15 | dc protodb.DBClient 16 | } 17 | 18 | func NewRemoteDB(serverAddr string, serverKey string) (*RemoteDB, error) { 19 | return newRemoteDB(grpcdb.NewClient(serverAddr, serverKey)) 20 | } 21 | 22 | func newRemoteDB(gdc protodb.DBClient, err error) (*RemoteDB, error) { 23 | if err != nil { 24 | return nil, err 25 | } 26 | return &RemoteDB{dc: gdc, ctx: context.Background()}, nil 27 | } 28 | 29 | type Init struct { 30 | Dir string 31 | Name string 32 | Type string 33 | } 34 | 35 | func (rd *RemoteDB) InitRemote(in *Init) error { 36 | _, err := rd.dc.Init(rd.ctx, &protodb.Init{Dir: in.Dir, Type: in.Type, Name: in.Name}) 37 | return err 38 | } 39 | 40 | var _ db.DB = (*RemoteDB)(nil) 41 | 42 | // Close is a noop currently 43 | func (rd *RemoteDB) Close() error { 44 | return nil 45 | } 46 | 47 | func (rd *RemoteDB) Delete(key []byte) error { 48 | if _, err := rd.dc.Delete(rd.ctx, &protodb.Entity{Key: key}); err != nil { 49 | return fmt.Errorf("remoteDB.Delete: %w", err) 50 | } 51 | return nil 52 | } 53 | 54 | func (rd *RemoteDB) DeleteSync(key []byte) error { 55 | if _, err := rd.dc.DeleteSync(rd.ctx, &protodb.Entity{Key: key}); err != nil { 56 | return fmt.Errorf("remoteDB.DeleteSync: %w", err) 57 | } 58 | return nil 59 | } 60 | 61 | func (rd *RemoteDB) Set(key, value []byte) error { 62 | if _, err := rd.dc.Set(rd.ctx, &protodb.Entity{Key: key, Value: value}); err != nil { 63 | return fmt.Errorf("remoteDB.Set: %w", err) 64 | } 65 | return nil 66 | } 67 | 68 | func (rd *RemoteDB) SetSync(key, value []byte) error { 69 | if _, err := rd.dc.SetSync(rd.ctx, &protodb.Entity{Key: key, Value: value}); err != nil { 70 | return fmt.Errorf("remoteDB.SetSync: %w", err) 71 | } 72 | return nil 73 | } 74 | 75 | func (rd *RemoteDB) Get(key []byte) ([]byte, error) { 76 | res, err := rd.dc.Get(rd.ctx, &protodb.Entity{Key: key}) 77 | if err != nil { 78 | return nil, fmt.Errorf("remoteDB.Get error: %w", err) 79 | } 80 | return res.Value, nil 81 | } 82 | 83 | func (rd *RemoteDB) Has(key []byte) (bool, error) { 84 | res, err := rd.dc.Has(rd.ctx, &protodb.Entity{Key: key}) 85 | if err != nil { 86 | return false, err 87 | } 88 | return res.Exists, nil 89 | } 90 | 91 | func (rd *RemoteDB) ReverseIterator(start, end []byte) (db.Iterator, error) { 92 | dic, err := rd.dc.ReverseIterator(rd.ctx, &protodb.Entity{Start: start, End: end}) 93 | if err != nil { 94 | return nil, fmt.Errorf("RemoteDB.Iterator error: %w", err) 95 | } 96 | return makeReverseIterator(dic), nil 97 | } 98 | 99 | func (rd *RemoteDB) NewBatch() db.Batch { 100 | return newBatch(rd) 101 | } 102 | 103 | // TODO: Implement Print when db.DB implements a method 104 | // to print to a string and not db.Print to stdout. 105 | func (rd *RemoteDB) Print() error { 106 | return errors.New("remoteDB.Print: unimplemented") 107 | } 108 | 109 | func (rd *RemoteDB) Stats() map[string]string { 110 | stats, err := rd.dc.Stats(rd.ctx, &protodb.Nothing{}) 111 | if err != nil || stats == nil { 112 | return nil 113 | } 114 | return stats.Data 115 | } 116 | 117 | func (rd *RemoteDB) Iterator(start, end []byte) (db.Iterator, error) { 118 | dic, err := rd.dc.Iterator(rd.ctx, &protodb.Entity{Start: start, End: end}) 119 | if err != nil { 120 | return nil, fmt.Errorf("RemoteDB.Iterator error: %w", err) 121 | } 122 | return makeIterator(dic), nil 123 | } 124 | -------------------------------------------------------------------------------- /remotedb/remotedb_test.go: -------------------------------------------------------------------------------- 1 | package remotedb_test 2 | 3 | import ( 4 | "net" 5 | "os" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/require" 10 | 11 | "github.com/tendermint/tm-db/remotedb" 12 | "github.com/tendermint/tm-db/remotedb/grpcdb" 13 | ) 14 | 15 | func TestRemoteDB(t *testing.T) { 16 | cert := "test.crt" 17 | key := "test.key" 18 | ln, err := net.Listen("tcp", "localhost:0") 19 | require.Nil(t, err, "expecting a port to have been assigned on which we can listen") 20 | srv, err := grpcdb.NewServer(cert, key) 21 | require.Nil(t, err) 22 | defer srv.Stop() 23 | go func() { 24 | if err := srv.Serve(ln); err != nil { 25 | panic(err) 26 | } 27 | }() 28 | 29 | client, err := remotedb.NewRemoteDB(ln.Addr().String(), cert) 30 | require.Nil(t, err, "expecting a successful client creation") 31 | dbName := "test-remote-db" 32 | require.Nil(t, client.InitRemote(&remotedb.Init{Name: dbName, Type: "goleveldb"})) 33 | defer os.RemoveAll(dbName + ".db") 34 | 35 | k1 := []byte("key-1") 36 | v1, err := client.Get(k1) 37 | require.NoError(t, err) 38 | require.Equal(t, 0, len(v1), "expecting no key1 to have been stored, got %X (%s)", v1, v1) 39 | vv1 := []byte("value-1") 40 | err = client.Set(k1, vv1) 41 | require.NoError(t, err) 42 | 43 | gv1, err := client.Get(k1) 44 | require.NoError(t, err) 45 | require.Equal(t, gv1, vv1) 46 | 47 | // Simple iteration 48 | itr, err := client.Iterator(nil, nil) 49 | require.NoError(t, err) 50 | assert.True(t, itr.Valid()) 51 | 52 | key1 := itr.Key() 53 | value := itr.Value() 54 | 55 | require.Equal(t, key1, []byte("key-1")) 56 | require.Equal(t, value, []byte("value-1")) 57 | itr.Close() 58 | 59 | // Set some more keys 60 | k2 := []byte("key-2") 61 | v2 := []byte("value-2") 62 | err = client.SetSync(k2, v2) 63 | require.NoError(t, err) 64 | has, err := client.Has(k2) 65 | require.NoError(t, err) 66 | require.True(t, has) 67 | gv2, err := client.Get(k2) 68 | require.NoError(t, err) 69 | require.Equal(t, gv2, v2) 70 | 71 | // More iteration 72 | itr, err = client.Iterator(nil, nil) 73 | require.NoError(t, err) 74 | 75 | key1 = itr.Key() 76 | value = itr.Value() 77 | 78 | require.Equal(t, key1, []byte("key-1")) 79 | require.Equal(t, value, []byte("value-1")) 80 | itr.Next() 81 | 82 | key1 = itr.Key() 83 | 84 | value = itr.Value() 85 | require.Equal(t, key1, []byte("key-2")) 86 | require.Equal(t, value, []byte("value-2")) 87 | itr.Close() 88 | 89 | // Deletion 90 | err = client.Delete(k1) 91 | require.NoError(t, err) 92 | err = client.DeleteSync(k2) 93 | require.NoError(t, err) 94 | gv1, err = client.Get(k1) 95 | require.NoError(t, err) 96 | gv2, err = client.Get(k2) 97 | require.NoError(t, err) 98 | require.Equal(t, len(gv2), 0, "after deletion, not expecting the key to exist anymore") 99 | require.Equal(t, len(gv1), 0, "after deletion, not expecting the key to exist anymore") 100 | 101 | // Batch tests - set 102 | k3 := []byte("key-3") 103 | k4 := []byte("key-4") 104 | k5 := []byte("key-5") 105 | v3 := []byte("value-3") 106 | v4 := []byte("value-4") 107 | v5 := []byte("value-5") 108 | bat := client.NewBatch() 109 | err = bat.Set(k3, v3) 110 | require.NoError(t, err) 111 | err = bat.Set(k4, v4) 112 | require.NoError(t, err) 113 | 114 | rv3, err := client.Get(k3) 115 | require.NoError(t, err) 116 | require.Equal(t, 0, len(rv3), "expecting no k3 to have been stored") 117 | 118 | rv4, err := client.Get(k4) 119 | require.NoError(t, err) 120 | require.Equal(t, 0, len(rv4), "expecting no k4 to have been stored") 121 | err = bat.Write() 122 | require.NoError(t, err) 123 | 124 | rv3, err = client.Get(k3) 125 | require.NoError(t, err) 126 | require.Equal(t, rv3, v3, "expecting k3 to have been stored") 127 | 128 | rv4, err = client.Get(k4) 129 | require.NoError(t, err) 130 | require.Equal(t, rv4, v4, "expecting k4 to have been stored") 131 | 132 | // Batch tests - deletion 133 | bat = client.NewBatch() 134 | err = bat.Delete(k4) 135 | require.NoError(t, err) 136 | err = bat.Delete(k3) 137 | require.NoError(t, err) 138 | err = bat.WriteSync() 139 | require.NoError(t, err) 140 | 141 | rv3, err = client.Get(k3) 142 | require.NoError(t, err) 143 | require.Equal(t, 0, len(rv3), "expecting k3 to have been deleted") 144 | 145 | rv4, err = client.Get(k4) 146 | require.NoError(t, err) 147 | require.Equal(t, 0, len(rv4), "expecting k4 to have been deleted") 148 | 149 | // Batch tests - set and delete 150 | bat = client.NewBatch() 151 | err = bat.Set(k4, v4) 152 | require.NoError(t, err) 153 | err = bat.Set(k5, v5) 154 | require.NoError(t, err) 155 | err = bat.Delete(k4) 156 | require.NoError(t, err) 157 | err = bat.WriteSync() 158 | require.NoError(t, err) 159 | 160 | rv4, err = client.Get(k4) 161 | require.NoError(t, err) 162 | require.Equal(t, 0, len(rv4), "expecting k4 to have been deleted") 163 | 164 | rv5, err := client.Get(k5) 165 | require.NoError(t, err) 166 | require.Equal(t, rv5, v5, "expecting k5 to have been stored") 167 | } 168 | -------------------------------------------------------------------------------- /remotedb/test.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIEOjCCAiKgAwIBAgIQELi28YSz7wnGMCFY/LkGTDANBgkqhkiG9w0BAQsFADAZ 3 | MRcwFQYDVQQDEw50ZW5kZXJtaW50LmNvbTAeFw0yMjA3MjYxMTQ1NTVaFw0yNDAx 4 | MjYxMTQ1NTVaMBMxETAPBgNVBAMTCHJlbW90ZWRiMIIBIjANBgkqhkiG9w0BAQEF 5 | AAOCAQ8AMIIBCgKCAQEA93GhF6GfEyh7VRBsLGlKMsG7rbE5Z6HjoMzWvURXjV2C 6 | jsSfCmmVhv6UeaTQc8Wci7Sdabwm9eTiv1ikeY0ZYt+oZdcvhjXP4+so4yhPiBMk 7 | II/Ds4VcojZ+aGXdbvhcdemFy0ZpvQ1nqJGdKgMt7CSExe8/Q06Xgy3JzlhlVMlb 8 | KRC1OywwjxTxCjdA1MzmUG+P/4wYls0ejvco87UfSmaIm6GJwi3H9QlrtPAaI7JH 9 | sZS1puR4JkA1xusBY1A5LeWLDaCmiSYh2x2NC7CiF23Fj5K4YQegQ1TsZxgS7pG1 10 | OZiAsa65V/pNXI3MAGk7k0Yb6Ai+IhzaczHAvhf4VQIDAQABo4GDMIGAMA4GA1Ud 11 | DwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHQYDVR0O 12 | BBYEFKaGp/Pd8Iy9l+V5oeFAbv0MylOTMB8GA1UdIwQYMBaAFNSp7CXUG885Al3n 13 | oJjlAZ0I+/yfMA8GA1UdEQQIMAaHBH8AAAEwDQYJKoZIhvcNAQELBQADggIBAHfM 14 | odmek2HhUKzmqBSaLMHstyiGlxUDuUALfkJY8TNAtC02J9TIrAcT5dqT7zdKq3d5 15 | AldjBMcj3R9WlUPZv4prEvaLJSCpUilgGETVo4Q6EjTwplUiC558XspORF5WAZuB 16 | 73gxrz6rc8zUXDKcf0ey/kV/WOFS4ICjVhJMVa8hesd9JiQIqYnf0N1XrXk/YAqf 17 | 10lH+AWza5EsVH4sg45DVdwM45OxRIK1fQbSpBYczpT+UocGQWe1J2ehthFlh5Ab 18 | V9OQ4TdJEdIs/p9WSZ9tmRlXJVvo2A9wWD3NOOSWiAXhKBBr5QsqNqaMjqQpajbT 19 | STaQS3zeVAEYKdOUGy16ymfBm4nwcneDbwFOAGMahu3l/V4vlXA+Eb5tdSX1kwcf 20 | l3ImQglBwwXTXPf1yyLXOVHFxqXTZI9fVOFB1qd4l76aGN0fBvnleqbhmXNohcVe 21 | B/x/liaiGhL/udhDM7Y+dspw8LmNTGsqYGgYKmD3prFfWrkYcaRc32bPs9fk8hOz 22 | e7vCQwSAbTEfXZ9/q7MwNcBG35iz37GJa9tV5Mg5UbJeZzj85PpnfIcCu6bmHlA9 23 | ROlJ8XF0TDqtvQrxz3WGaSm22DgkUP/Z+anCy+7E78dX1Ef+d1lg045PiHGijFIC 24 | 5NMT9dYCR7J3c6S6fDkSl0/iR0v5URCFJ7t2xjIC 25 | -----END CERTIFICATE----- 26 | -------------------------------------------------------------------------------- /remotedb/test.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEpQIBAAKCAQEA93GhF6GfEyh7VRBsLGlKMsG7rbE5Z6HjoMzWvURXjV2CjsSf 3 | CmmVhv6UeaTQc8Wci7Sdabwm9eTiv1ikeY0ZYt+oZdcvhjXP4+so4yhPiBMkII/D 4 | s4VcojZ+aGXdbvhcdemFy0ZpvQ1nqJGdKgMt7CSExe8/Q06Xgy3JzlhlVMlbKRC1 5 | OywwjxTxCjdA1MzmUG+P/4wYls0ejvco87UfSmaIm6GJwi3H9QlrtPAaI7JHsZS1 6 | puR4JkA1xusBY1A5LeWLDaCmiSYh2x2NC7CiF23Fj5K4YQegQ1TsZxgS7pG1OZiA 7 | sa65V/pNXI3MAGk7k0Yb6Ai+IhzaczHAvhf4VQIDAQABAoIBAQC2tuA2O/Djy6uu 8 | d275KFJSwn2cV1ZFMOSN01Pp9DIWP+ttEsFBhg+U3B206T/HjS74dkkaT4YRYo8Q 9 | rhrdapRJT0/gy5HbL5cv/HB3tEdt+nxd0uq2gA6T9VtIKZfmHlzf8K0MGhVwNRrK 10 | /aMo56ocSicEpZJ4V+tHQSNYTCue+5CzAsBzebh96/F0xx7Tb9AkjE3P0IPtgBgq 11 | z0xT8rZxlxwDXgS1kw0EGGCFAFInJTziOqgS7rI6A6mznxNYAgWitziBiQrljXQj 12 | 3zssbt+nQ5iZLlICRBMgZXzOmI5nH5EXqYY+ZPANHpzb52MCMrA/3VNfxQj0OHth 13 | ELH1BoptAoGBAPtRKv7hd+B6DXyuNZNpx33ELrS1dFC/1mtAN+6hBXDbTyYrNpcb 14 | 83BDP7l8W8RIZyh/6iG2lc8VQQxYHUPqHVfUSn4MLxFvF1vrxuROaCleSy2TXcwS 15 | FFtBaBF8Mz2Gpnojhpu7jZ5FcG0gJ+oScRTSzCwY/Ko/Q1VaiZfOWAVjAoGBAPwN 16 | /ELfkp2VNZY46NUQVhFMMbGI34XJ1W7oe63GwnrkRIbAqs7/Kj3tD3vy3PNgB1HK 17 | ZERAgq/a4rwaOl2YvcAhTKgkYEbHpdz0ktv9qu3ShqJOpD1wOAGKmE8XXXuu/unw 18 | /ScXqWwFH/bqs+/wktmzJHWWbmOOy1LX8DhwETTnAoGANFI1rVKrbmR6olZyePow 19 | uhI51w1f5d/KeBGqk1ealmBSHhQpDVSYXeriPW+Se07Hizr2N4aXscEvBa7iiN0Y 20 | tsxPpeZLdkm2h0CS670XGmWzKQ3hHTc2XblEPT+qO0jpJ8x1nb5yQV0bhtyG8shc 21 | GoW4VAXvtFHvZrmuo5gl4xkCgYEA+YwLJlZdvWC9xjYf5tqeq8+JH6FI1BfJFV5d 22 | HOa9I5iec9+K/RfKRbdP7kK8GMUJWiQMczp/aQZIFz3MbWBM9UzCrXIeU9VUVNdc 23 | Eywpr/4QR9+eYimZeYUzWJLkfhD61rk+mhamKYlFZVxnu/WuHpVrUnQWZME6cpHS 24 | hr4Fex8CgYEA1ma2f7DppXXZYEReqedsf+sN5/7KmnEH4H/f/7SAPLBLTJhV9zua 25 | Mv3ur8wZH1NOb36o0jzFGdwXqjF1ubbJihKcJHz658q6lVbnaREa1NenZiVrm7H6 26 | SOHxWjPSvyUoj91ci4/5xeVWWtX2YyGHSMfJs5h4PNBVQPfON+uIhwQ= 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /rocksdb.go: -------------------------------------------------------------------------------- 1 | //go:build rocksdb 2 | // +build rocksdb 3 | 4 | package db 5 | 6 | import ( 7 | "fmt" 8 | "path/filepath" 9 | "runtime" 10 | 11 | "github.com/cosmos/gorocksdb" 12 | ) 13 | 14 | func init() { 15 | dbCreator := func(name string, dir string) (DB, error) { 16 | return NewRocksDB(name, dir) 17 | } 18 | registerDBCreator(RocksDBBackend, dbCreator, false) 19 | } 20 | 21 | // RocksDB is a RocksDB backend. 22 | type RocksDB struct { 23 | db *gorocksdb.DB 24 | ro *gorocksdb.ReadOptions 25 | wo *gorocksdb.WriteOptions 26 | woSync *gorocksdb.WriteOptions 27 | } 28 | 29 | var _ DB = (*RocksDB)(nil) 30 | 31 | func NewRocksDB(name string, dir string) (*RocksDB, error) { 32 | // default rocksdb option, good enough for most cases, including heavy workloads. 33 | // 1GB table cache, 512MB write buffer(may use 50% more on heavy workloads). 34 | // compression: snappy as default, need to -lsnappy to enable. 35 | bbto := gorocksdb.NewDefaultBlockBasedTableOptions() 36 | bbto.SetBlockCache(gorocksdb.NewLRUCache(1 << 30)) 37 | bbto.SetFilterPolicy(gorocksdb.NewBloomFilter(10)) 38 | 39 | opts := gorocksdb.NewDefaultOptions() 40 | opts.SetBlockBasedTableFactory(bbto) 41 | // SetMaxOpenFiles to 4096 seems to provide a reliable performance boost 42 | opts.SetMaxOpenFiles(4096) 43 | opts.SetCreateIfMissing(true) 44 | opts.IncreaseParallelism(runtime.NumCPU()) 45 | // 1.5GB maximum memory use for writebuffer. 46 | opts.OptimizeLevelStyleCompaction(512 * 1024 * 1024) 47 | return NewRocksDBWithOptions(name, dir, opts) 48 | } 49 | 50 | func NewRocksDBWithOptions(name string, dir string, opts *gorocksdb.Options) (*RocksDB, error) { 51 | dbPath := filepath.Join(dir, name+".db") 52 | db, err := gorocksdb.OpenDb(opts, dbPath) 53 | if err != nil { 54 | return nil, err 55 | } 56 | ro := gorocksdb.NewDefaultReadOptions() 57 | wo := gorocksdb.NewDefaultWriteOptions() 58 | woSync := gorocksdb.NewDefaultWriteOptions() 59 | woSync.SetSync(true) 60 | database := &RocksDB{ 61 | db: db, 62 | ro: ro, 63 | wo: wo, 64 | woSync: woSync, 65 | } 66 | return database, nil 67 | } 68 | 69 | // Get implements DB. 70 | func (db *RocksDB) Get(key []byte) ([]byte, error) { 71 | if len(key) == 0 { 72 | return nil, errKeyEmpty 73 | } 74 | res, err := db.db.Get(db.ro, key) 75 | if err != nil { 76 | return nil, err 77 | } 78 | return moveSliceToBytes(res), nil 79 | } 80 | 81 | // Has implements DB. 82 | func (db *RocksDB) Has(key []byte) (bool, error) { 83 | bytes, err := db.Get(key) 84 | if err != nil { 85 | return false, err 86 | } 87 | return bytes != nil, nil 88 | } 89 | 90 | // Set implements DB. 91 | func (db *RocksDB) Set(key []byte, value []byte) error { 92 | if len(key) == 0 { 93 | return errKeyEmpty 94 | } 95 | if value == nil { 96 | return errValueNil 97 | } 98 | return db.db.Put(db.wo, key, value) 99 | } 100 | 101 | // SetSync implements DB. 102 | func (db *RocksDB) SetSync(key []byte, value []byte) error { 103 | if len(key) == 0 { 104 | return errKeyEmpty 105 | } 106 | if value == nil { 107 | return errValueNil 108 | } 109 | return db.db.Put(db.woSync, key, value) 110 | } 111 | 112 | // Delete implements DB. 113 | func (db *RocksDB) Delete(key []byte) error { 114 | if len(key) == 0 { 115 | return errKeyEmpty 116 | } 117 | return db.db.Delete(db.wo, key) 118 | } 119 | 120 | // DeleteSync implements DB. 121 | func (db *RocksDB) DeleteSync(key []byte) error { 122 | if len(key) == 0 { 123 | return errKeyEmpty 124 | } 125 | return db.db.Delete(db.woSync, key) 126 | } 127 | 128 | func (db *RocksDB) DB() *gorocksdb.DB { 129 | return db.db 130 | } 131 | 132 | // Close implements DB. 133 | func (db *RocksDB) Close() error { 134 | db.ro.Destroy() 135 | db.wo.Destroy() 136 | db.woSync.Destroy() 137 | db.db.Close() 138 | return nil 139 | } 140 | 141 | // Print implements DB. 142 | func (db *RocksDB) Print() error { 143 | itr, err := db.Iterator(nil, nil) 144 | if err != nil { 145 | return err 146 | } 147 | defer itr.Close() 148 | for ; itr.Valid(); itr.Next() { 149 | key := itr.Key() 150 | value := itr.Value() 151 | fmt.Printf("[%X]:\t[%X]\n", key, value) 152 | } 153 | return nil 154 | } 155 | 156 | // Stats implements DB. 157 | func (db *RocksDB) Stats() map[string]string { 158 | keys := []string{"rocksdb.stats"} 159 | stats := make(map[string]string, len(keys)) 160 | for _, key := range keys { 161 | stats[key] = db.db.GetProperty(key) 162 | } 163 | return stats 164 | } 165 | 166 | // NewBatch implements DB. 167 | func (db *RocksDB) NewBatch() Batch { 168 | return newRocksDBBatch(db) 169 | } 170 | 171 | // Iterator implements DB. 172 | func (db *RocksDB) Iterator(start, end []byte) (Iterator, error) { 173 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 174 | return nil, errKeyEmpty 175 | } 176 | itr := db.db.NewIterator(db.ro) 177 | return newRocksDBIterator(itr, start, end, false), nil 178 | } 179 | 180 | // ReverseIterator implements DB. 181 | func (db *RocksDB) ReverseIterator(start, end []byte) (Iterator, error) { 182 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 183 | return nil, errKeyEmpty 184 | } 185 | itr := db.db.NewIterator(db.ro) 186 | return newRocksDBIterator(itr, start, end, true), nil 187 | } 188 | -------------------------------------------------------------------------------- /rocksdb_batch.go: -------------------------------------------------------------------------------- 1 | //go:build rocksdb 2 | // +build rocksdb 3 | 4 | package db 5 | 6 | import "github.com/cosmos/gorocksdb" 7 | 8 | type rocksDBBatch struct { 9 | db *RocksDB 10 | batch *gorocksdb.WriteBatch 11 | } 12 | 13 | var _ Batch = (*rocksDBBatch)(nil) 14 | 15 | func newRocksDBBatch(db *RocksDB) *rocksDBBatch { 16 | return &rocksDBBatch{ 17 | db: db, 18 | batch: gorocksdb.NewWriteBatch(), 19 | } 20 | } 21 | 22 | // Set implements Batch. 23 | func (b *rocksDBBatch) Set(key, value []byte) error { 24 | if len(key) == 0 { 25 | return errKeyEmpty 26 | } 27 | if value == nil { 28 | return errValueNil 29 | } 30 | if b.batch == nil { 31 | return errBatchClosed 32 | } 33 | b.batch.Put(key, value) 34 | return nil 35 | } 36 | 37 | // Delete implements Batch. 38 | func (b *rocksDBBatch) Delete(key []byte) error { 39 | if len(key) == 0 { 40 | return errKeyEmpty 41 | } 42 | if b.batch == nil { 43 | return errBatchClosed 44 | } 45 | b.batch.Delete(key) 46 | return nil 47 | } 48 | 49 | // Write implements Batch. 50 | func (b *rocksDBBatch) Write() error { 51 | if b.batch == nil { 52 | return errBatchClosed 53 | } 54 | err := b.db.db.Write(b.db.wo, b.batch) 55 | if err != nil { 56 | return err 57 | } 58 | // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. 59 | b.Close() 60 | return nil 61 | } 62 | 63 | // WriteSync implements Batch. 64 | func (b *rocksDBBatch) WriteSync() error { 65 | if b.batch == nil { 66 | return errBatchClosed 67 | } 68 | err := b.db.db.Write(b.db.woSync, b.batch) 69 | if err != nil { 70 | return err 71 | } 72 | // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. 73 | return b.Close() 74 | } 75 | 76 | // Close implements Batch. 77 | func (b *rocksDBBatch) Close() error { 78 | if b.batch != nil { 79 | b.batch.Destroy() 80 | b.batch = nil 81 | } 82 | return nil 83 | } 84 | -------------------------------------------------------------------------------- /rocksdb_iterator.go: -------------------------------------------------------------------------------- 1 | //go:build rocksdb 2 | // +build rocksdb 3 | 4 | package db 5 | 6 | import ( 7 | "bytes" 8 | 9 | "github.com/cosmos/gorocksdb" 10 | ) 11 | 12 | type rocksDBIterator struct { 13 | source *gorocksdb.Iterator 14 | start, end []byte 15 | isReverse bool 16 | isInvalid bool 17 | } 18 | 19 | var _ Iterator = (*rocksDBIterator)(nil) 20 | 21 | func newRocksDBIterator(source *gorocksdb.Iterator, start, end []byte, isReverse bool) *rocksDBIterator { 22 | if isReverse { 23 | if end == nil { 24 | source.SeekToLast() 25 | } else { 26 | source.Seek(end) 27 | if source.Valid() { 28 | eoakey := moveSliceToBytes(source.Key()) // end or after key 29 | if bytes.Compare(end, eoakey) <= 0 { 30 | source.Prev() 31 | } 32 | } else { 33 | source.SeekToLast() 34 | } 35 | } 36 | } else { 37 | if start == nil { 38 | source.SeekToFirst() 39 | } else { 40 | source.Seek(start) 41 | } 42 | } 43 | return &rocksDBIterator{ 44 | source: source, 45 | start: start, 46 | end: end, 47 | isReverse: isReverse, 48 | isInvalid: false, 49 | } 50 | } 51 | 52 | // Domain implements Iterator. 53 | func (itr *rocksDBIterator) Domain() ([]byte, []byte) { 54 | return itr.start, itr.end 55 | } 56 | 57 | // Valid implements Iterator. 58 | func (itr *rocksDBIterator) Valid() bool { 59 | // Once invalid, forever invalid. 60 | if itr.isInvalid { 61 | return false 62 | } 63 | 64 | // If source has error, invalid. 65 | if err := itr.source.Err(); err != nil { 66 | itr.isInvalid = true 67 | return false 68 | } 69 | 70 | // If source is invalid, invalid. 71 | if !itr.source.Valid() { 72 | itr.isInvalid = true 73 | return false 74 | } 75 | 76 | // If key is end or past it, invalid. 77 | start := itr.start 78 | end := itr.end 79 | key := moveSliceToBytes(itr.source.Key()) 80 | if itr.isReverse { 81 | if start != nil && bytes.Compare(key, start) < 0 { 82 | itr.isInvalid = true 83 | return false 84 | } 85 | } else { 86 | if end != nil && bytes.Compare(end, key) <= 0 { 87 | itr.isInvalid = true 88 | return false 89 | } 90 | } 91 | 92 | // It's valid. 93 | return true 94 | } 95 | 96 | // Key implements Iterator. 97 | func (itr *rocksDBIterator) Key() []byte { 98 | itr.assertIsValid() 99 | return moveSliceToBytes(itr.source.Key()) 100 | } 101 | 102 | // Value implements Iterator. 103 | func (itr *rocksDBIterator) Value() []byte { 104 | itr.assertIsValid() 105 | return moveSliceToBytes(itr.source.Value()) 106 | } 107 | 108 | // Next implements Iterator. 109 | func (itr rocksDBIterator) Next() { 110 | itr.assertIsValid() 111 | if itr.isReverse { 112 | itr.source.Prev() 113 | } else { 114 | itr.source.Next() 115 | } 116 | } 117 | 118 | // Error implements Iterator. 119 | func (itr *rocksDBIterator) Error() error { 120 | return itr.source.Err() 121 | } 122 | 123 | // Close implements Iterator. 124 | func (itr *rocksDBIterator) Close() error { 125 | itr.source.Close() 126 | return nil 127 | } 128 | 129 | func (itr *rocksDBIterator) assertIsValid() { 130 | if !itr.Valid() { 131 | panic("iterator is invalid") 132 | } 133 | } 134 | 135 | // moveSliceToBytes will free the slice and copy out a go []byte 136 | // This function can be applied on *Slice returned from Key() and Value() 137 | // of an Iterator, because they are marked as freed. 138 | func moveSliceToBytes(s *gorocksdb.Slice) []byte { 139 | defer s.Free() 140 | if !s.Exists() { 141 | return nil 142 | } 143 | v := make([]byte, len(s.Data())) 144 | copy(v, s.Data()) 145 | return v 146 | } 147 | -------------------------------------------------------------------------------- /rocksdb_test.go: -------------------------------------------------------------------------------- 1 | //go:build rocksdb 2 | // +build rocksdb 3 | 4 | package db 5 | 6 | import ( 7 | "fmt" 8 | "os" 9 | "path/filepath" 10 | "testing" 11 | 12 | "github.com/stretchr/testify/assert" 13 | "github.com/stretchr/testify/require" 14 | ) 15 | 16 | func TestRocksDBBackend(t *testing.T) { 17 | name := fmt.Sprintf("test_%x", randStr(12)) 18 | dir := os.TempDir() 19 | db, err := NewDB(name, RocksDBBackend, dir) 20 | require.NoError(t, err) 21 | defer cleanupDBDir(dir, name) 22 | 23 | _, ok := db.(*RocksDB) 24 | assert.True(t, ok) 25 | } 26 | 27 | func TestWithRocksDB(t *testing.T) { 28 | dir := t.TempDir() 29 | path := filepath.Join(dir, "rocksdb") 30 | 31 | db, err := NewRocksDB(path, "") 32 | require.NoError(t, err) 33 | 34 | t.Run("RocksDB", func(t *testing.T) { Run(t, db) }) 35 | } 36 | 37 | func TestRocksDBStats(t *testing.T) { 38 | name := fmt.Sprintf("test_%x", randStr(12)) 39 | dir := os.TempDir() 40 | db, err := NewDB(name, RocksDBBackend, dir) 41 | require.NoError(t, err) 42 | defer cleanupDBDir(dir, name) 43 | 44 | assert.NotEmpty(t, db.Stats()) 45 | } 46 | 47 | // TODO: Add tests for rocksdb 48 | -------------------------------------------------------------------------------- /test_helpers.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import "math/rand" 4 | 5 | const ( 6 | strChars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" // 62 characters 7 | ) 8 | 9 | // For testing convenience. 10 | func bz(s string) []byte { 11 | return []byte(s) 12 | } 13 | 14 | // Str constructs a random alphanumeric string of given length. 15 | func randStr(length int) string { 16 | chars := []byte{} 17 | MAIN_LOOP: 18 | for { 19 | val := rand.Int63() // nolint:gosec // G404: Use of weak random number generator 20 | for i := 0; i < 10; i++ { 21 | v := int(val & 0x3f) // rightmost 6 bits 22 | if v >= 62 { // only 62 characters in strChars 23 | val >>= 6 24 | continue 25 | } else { 26 | chars = append(chars, strChars[v]) 27 | if len(chars) == length { 28 | break MAIN_LOOP 29 | } 30 | val >>= 6 31 | } 32 | } 33 | } 34 | 35 | return string(chars) 36 | } 37 | -------------------------------------------------------------------------------- /tools/Dockerfile: -------------------------------------------------------------------------------- 1 | # This file defines the container image used to build and test tm-db in CI. 2 | # The CI workflows use the latest tag of tendermintdev/docker-tm-db-testing 3 | # built from these settings. 4 | # 5 | # The jobs defined in the Build & Push workflow will build and update the image 6 | # when changes to this file are merged. If you have other changes that require 7 | # updates here, merge the changes here first and let the image get updated (or 8 | # push a new version manually) before PRs that depend on them. 9 | 10 | FROM golang:1.17-bullseye AS build 11 | 12 | ENV LD_LIBRARY_PATH=/usr/local/lib 13 | 14 | RUN apt-get update && apt-get install -y --no-install-recommends \ 15 | libbz2-dev libgflags-dev libsnappy-dev libzstd-dev zlib1g-dev \ 16 | make tar wget 17 | 18 | FROM build AS install 19 | ARG LEVELDB=1.20 20 | ARG ROCKSDB=6.24.2 21 | 22 | # Install cleveldb 23 | RUN \ 24 | wget -q https://github.com/google/leveldb/archive/v${LEVELDB}.tar.gz \ 25 | && tar xvf v${LEVELDB}.tar.gz \ 26 | && cd leveldb-${LEVELDB} \ 27 | && make \ 28 | && cp -a out-static/lib* out-shared/lib* /usr/local/lib \ 29 | && cd include \ 30 | && cp -a leveldb /usr/local/include \ 31 | && ldconfig \ 32 | && cd ../.. \ 33 | && rm -rf v${LEVELDB}.tar.gz leveldb-${LEVELDB} 34 | 35 | # Install Rocksdb 36 | RUN \ 37 | wget -q https://github.com/facebook/rocksdb/archive/v${ROCKSDB}.tar.gz \ 38 | && tar -zxf v${ROCKSDB}.tar.gz \ 39 | && cd rocksdb-${ROCKSDB} \ 40 | && DEBUG_LEVEL=0 make -j4 shared_lib \ 41 | && make install-shared \ 42 | && ldconfig \ 43 | && cd .. \ 44 | && rm -rf v${ROCKSDB}.tar.gz rocksdb-${ROCKSDB} 45 | -------------------------------------------------------------------------------- /types.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import "errors" 4 | 5 | var ( 6 | // errBatchClosed is returned when a closed or written batch is used. 7 | errBatchClosed = errors.New("batch has been written or closed") 8 | 9 | // errKeyEmpty is returned when attempting to use an empty or nil key. 10 | errKeyEmpty = errors.New("key cannot be empty") 11 | 12 | // errValueNil is returned when attempting to set a nil value. 13 | errValueNil = errors.New("value cannot be nil") 14 | ) 15 | 16 | // DB is the main interface for all database backends. DBs are concurrency-safe. Callers must call 17 | // Close on the database when done. 18 | // 19 | // Keys cannot be nil or empty, while values cannot be nil. Keys and values should be considered 20 | // read-only, both when returned and when given, and must be copied before they are modified. 21 | type DB interface { 22 | // Get fetches the value of the given key, or nil if it does not exist. 23 | // CONTRACT: key, value readonly []byte 24 | Get([]byte) ([]byte, error) 25 | 26 | // Has checks if a key exists. 27 | // CONTRACT: key, value readonly []byte 28 | Has(key []byte) (bool, error) 29 | 30 | // Set sets the value for the given key, replacing it if it already exists. 31 | // CONTRACT: key, value readonly []byte 32 | Set([]byte, []byte) error 33 | 34 | // SetSync sets the value for the given key, and flushes it to storage before returning. 35 | SetSync([]byte, []byte) error 36 | 37 | // Delete deletes the key, or does nothing if the key does not exist. 38 | // CONTRACT: key readonly []byte 39 | Delete([]byte) error 40 | 41 | // DeleteSync deletes the key, and flushes the delete to storage before returning. 42 | DeleteSync([]byte) error 43 | 44 | // Iterator returns an iterator over a domain of keys, in ascending order. The caller must call 45 | // Close when done. End is exclusive, and start must be less than end. A nil start iterates 46 | // from the first key, and a nil end iterates to the last key (inclusive). Empty keys are not 47 | // valid. 48 | // CONTRACT: No writes may happen within a domain while an iterator exists over it. 49 | // CONTRACT: start, end readonly []byte 50 | Iterator(start, end []byte) (Iterator, error) 51 | 52 | // ReverseIterator returns an iterator over a domain of keys, in descending order. The caller 53 | // must call Close when done. End is exclusive, and start must be less than end. A nil end 54 | // iterates from the last key (inclusive), and a nil start iterates to the first key (inclusive). 55 | // Empty keys are not valid. 56 | // CONTRACT: No writes may happen within a domain while an iterator exists over it. 57 | // CONTRACT: start, end readonly []byte 58 | ReverseIterator(start, end []byte) (Iterator, error) 59 | 60 | // Close closes the database connection. 61 | Close() error 62 | 63 | // NewBatch creates a batch for atomic updates. The caller must call Batch.Close. 64 | NewBatch() Batch 65 | 66 | // Print is used for debugging. 67 | Print() error 68 | 69 | // Stats returns a map of property values for all keys and the size of the cache. 70 | Stats() map[string]string 71 | } 72 | 73 | // Batch represents a group of writes. They may or may not be written atomically depending on the 74 | // backend. Callers must call Close on the batch when done. 75 | // 76 | // As with DB, given keys and values should be considered read-only, and must not be modified after 77 | // passing them to the batch. 78 | type Batch interface { 79 | // Set sets a key/value pair. 80 | // CONTRACT: key, value readonly []byte 81 | Set(key, value []byte) error 82 | 83 | // Delete deletes a key/value pair. 84 | // CONTRACT: key readonly []byte 85 | Delete(key []byte) error 86 | 87 | // Write writes the batch, possibly without flushing to disk. Only Close() can be called after, 88 | // other methods will error. 89 | Write() error 90 | 91 | // WriteSync writes the batch and flushes it to disk. Only Close() can be called after, other 92 | // methods will error. 93 | WriteSync() error 94 | 95 | // Close closes the batch. It is idempotent, but calls to other methods afterwards will error. 96 | Close() error 97 | } 98 | 99 | // Iterator represents an iterator over a domain of keys. Callers must call Close when done. 100 | // No writes can happen to a domain while there exists an iterator over it, some backends may take 101 | // out database locks to ensure this will not happen. 102 | // 103 | // Callers must make sure the iterator is valid before calling any methods on it, otherwise 104 | // these methods will panic. This is in part caused by most backend databases using this convention. 105 | // 106 | // As with DB, keys and values should be considered read-only, and must be copied before they are 107 | // modified. 108 | // 109 | // Typical usage: 110 | // 111 | // var itr Iterator = ... 112 | // defer itr.Close() 113 | // 114 | // for ; itr.Valid(); itr.Next() { 115 | // k, v := itr.Key(); itr.Value() 116 | // ... 117 | // } 118 | // if err := itr.Error(); err != nil { 119 | // ... 120 | // } 121 | type Iterator interface { 122 | // Domain returns the start (inclusive) and end (exclusive) limits of the iterator. 123 | // CONTRACT: start, end readonly []byte 124 | Domain() (start []byte, end []byte) 125 | 126 | // Valid returns whether the current iterator is valid. Once invalid, the Iterator remains 127 | // invalid forever. 128 | Valid() bool 129 | 130 | // Next moves the iterator to the next key in the database, as defined by order of iteration. 131 | // If Valid returns false, this method will panic. 132 | Next() 133 | 134 | // Key returns the key at the current position. Panics if the iterator is invalid. 135 | // CONTRACT: key readonly []byte 136 | Key() (key []byte) 137 | 138 | // Value returns the value at the current position. Panics if the iterator is invalid. 139 | // CONTRACT: value readonly []byte 140 | Value() (value []byte) 141 | 142 | // Error returns the last error encountered by the iterator, if any. 143 | Error() error 144 | 145 | // Close closes the iterator, relasing any allocated resources. 146 | Close() error 147 | } 148 | -------------------------------------------------------------------------------- /util.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "bytes" 5 | "os" 6 | ) 7 | 8 | func cp(bz []byte) (ret []byte) { 9 | ret = make([]byte, len(bz)) 10 | copy(ret, bz) 11 | return ret 12 | } 13 | 14 | // Returns a slice of the same length (big endian) 15 | // except incremented by one. 16 | // Returns nil on overflow (e.g. if bz bytes are all 0xFF) 17 | // CONTRACT: len(bz) > 0 18 | func cpIncr(bz []byte) (ret []byte) { 19 | if len(bz) == 0 { 20 | panic("cpIncr expects non-zero bz length") 21 | } 22 | ret = cp(bz) 23 | for i := len(bz) - 1; i >= 0; i-- { 24 | if ret[i] < byte(0xFF) { 25 | ret[i]++ 26 | return 27 | } 28 | ret[i] = byte(0x00) 29 | if i == 0 { 30 | // Overflow 31 | return nil 32 | } 33 | } 34 | return nil 35 | } 36 | 37 | // See DB interface documentation for more information. 38 | func IsKeyInDomain(key, start, end []byte) bool { 39 | if bytes.Compare(key, start) < 0 { 40 | return false 41 | } 42 | if end != nil && bytes.Compare(end, key) <= 0 { 43 | return false 44 | } 45 | return true 46 | } 47 | 48 | func FileExists(filePath string) bool { 49 | _, err := os.Stat(filePath) 50 | return !os.IsNotExist(err) 51 | } 52 | -------------------------------------------------------------------------------- /util_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | // Empty iterator for empty db. 12 | func TestPrefixIteratorNoMatchNil(t *testing.T) { 13 | for backend := range backends { 14 | t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { 15 | db, dir := newTempDB(t, backend) 16 | defer os.RemoveAll(dir) 17 | itr, err := IteratePrefix(db, []byte("2")) 18 | require.NoError(t, err) 19 | 20 | checkInvalid(t, itr) 21 | }) 22 | } 23 | } 24 | 25 | // Empty iterator for db populated after iterator created. 26 | func TestPrefixIteratorNoMatch1(t *testing.T) { 27 | for backend := range backends { 28 | if backend == BoltDBBackend { 29 | t.Log("bolt does not support concurrent writes while iterating") 30 | continue 31 | } 32 | 33 | t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { 34 | db, dir := newTempDB(t, backend) 35 | defer os.RemoveAll(dir) 36 | itr, err := IteratePrefix(db, []byte("2")) 37 | require.NoError(t, err) 38 | err = db.SetSync(bz("1"), bz("value_1")) 39 | require.NoError(t, err) 40 | 41 | checkInvalid(t, itr) 42 | }) 43 | } 44 | } 45 | 46 | // Empty iterator for prefix starting after db entry. 47 | func TestPrefixIteratorNoMatch2(t *testing.T) { 48 | for backend := range backends { 49 | t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { 50 | db, dir := newTempDB(t, backend) 51 | defer os.RemoveAll(dir) 52 | err := db.SetSync(bz("3"), bz("value_3")) 53 | require.NoError(t, err) 54 | itr, err := IteratePrefix(db, []byte("4")) 55 | require.NoError(t, err) 56 | 57 | checkInvalid(t, itr) 58 | }) 59 | } 60 | } 61 | 62 | // Iterator with single val for db with single val, starting from that val. 63 | func TestPrefixIteratorMatch1(t *testing.T) { 64 | for backend := range backends { 65 | t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { 66 | db, dir := newTempDB(t, backend) 67 | defer os.RemoveAll(dir) 68 | err := db.SetSync(bz("2"), bz("value_2")) 69 | require.NoError(t, err) 70 | itr, err := IteratePrefix(db, bz("2")) 71 | require.NoError(t, err) 72 | 73 | checkValid(t, itr, true) 74 | checkItem(t, itr, bz("2"), bz("value_2")) 75 | checkNext(t, itr, false) 76 | 77 | // Once invalid... 78 | checkInvalid(t, itr) 79 | }) 80 | } 81 | } 82 | 83 | // Iterator with prefix iterates over everything with same prefix. 84 | func TestPrefixIteratorMatches1N(t *testing.T) { 85 | for backend := range backends { 86 | t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { 87 | db, dir := newTempDB(t, backend) 88 | defer os.RemoveAll(dir) 89 | 90 | // prefixed 91 | err := db.SetSync(bz("a/1"), bz("value_1")) 92 | require.NoError(t, err) 93 | err = db.SetSync(bz("a/3"), bz("value_3")) 94 | require.NoError(t, err) 95 | 96 | // not 97 | err = db.SetSync(bz("b/3"), bz("value_3")) 98 | require.NoError(t, err) 99 | err = db.SetSync(bz("a-3"), bz("value_3")) 100 | require.NoError(t, err) 101 | err = db.SetSync(bz("a.3"), bz("value_3")) 102 | require.NoError(t, err) 103 | err = db.SetSync(bz("abcdefg"), bz("value_3")) 104 | require.NoError(t, err) 105 | itr, err := IteratePrefix(db, bz("a/")) 106 | require.NoError(t, err) 107 | 108 | checkValid(t, itr, true) 109 | checkItem(t, itr, bz("a/1"), bz("value_1")) 110 | checkNext(t, itr, true) 111 | checkItem(t, itr, bz("a/3"), bz("value_3")) 112 | 113 | // Bad! 114 | checkNext(t, itr, false) 115 | 116 | // Once invalid... 117 | checkInvalid(t, itr) 118 | }) 119 | } 120 | } 121 | --------------------------------------------------------------------------------