├── .changelog ├── unreleased │ ├── .gitkeep │ └── breaking │ │ └── 254-go-mod-retract-v102-v103.md ├── v0.15.0 │ ├── summary.md │ ├── dependencies │ │ └── 189-bump-gorocksdb-version.md │ └── breaking-changes │ │ └── 4039-go-2024-3107.md ├── v1.0.2 │ └── summary.md ├── v1.0.4 │ └── summary.md ├── v0.12.0 │ ├── summary.md │ └── features │ │ └── 153-deprecate-boltdb-cleveldb.md ├── v0.10.0 │ ├── breaking-changes │ │ └── 118-remove-remotedb.md │ ├── features │ │ └── 112-pebbledb.md │ └── summary.md ├── v0.8.0 │ ├── compiler │ │ └── 40-update-to-go1.19.md │ ├── dependencies │ │ ├── 40-bump-golang-x-net.md │ │ └── 42-use-grocksdb.md │ └── summary.md ├── v0.9.0 │ ├── go-version │ │ └── 98-bump-go-v1.21.md │ ├── dependencies │ │ └── 97-update-rocksdb.md │ └── summary.md ├── v1.0.0 │ ├── breaking │ │ └── 578-add-goleveldb-build-flag.md │ └── summary.md ├── v0.11.0 │ ├── features │ │ └── 111-compaction-support.md │ ├── summary.md │ └── breaking-changes │ │ └── 111-compaction-support.md ├── v0.13.0 │ ├── breaking-changes │ │ └── 155-remove-bolt-cleveldb.md │ ├── features │ │ └── 168-iter-key.md │ └── summary.md ├── v1.0.3 │ ├── dependencies │ │ └── 236-bump-go-version.md │ ├── summary.md │ └── bug-fixes │ │ └── 234-fix-arm64-build.md ├── v0.7.0 │ ├── breaking-changes │ │ └── 7-rename-to-cometbft-db.md │ └── summary.md ├── v1.0.1 │ └── summary.md ├── v0.14.0 │ ├── dependencies │ │ └── 177-reinstate-boltdb-cleveldb.md │ └── summary.md ├── v0.9.1 │ └── summary.md ├── epilogue.md └── config.toml ├── .github ├── ISSUE_TEMPLATE.md ├── CODEOWNERS ├── linters │ ├── yaml-lint.yml │ └── markdownlint.yml ├── PULL_REQUEST_TEMPLATE.md ├── dependabot.yml ├── workflows │ ├── stale.yml │ ├── lint.yml │ ├── markdown-linter.yml │ ├── test.yml │ ├── conventional-pr-title.yml │ ├── codeql.yml │ └── docker.yml └── mergify.yml ├── .gitignore ├── CONTRIBUTING.md ├── .mergify.yml ├── memdb_test.go ├── codecov.yml ├── tools └── Dockerfile ├── boltdb_test.go ├── rocksdb_test.go ├── test_helpers.go ├── pebble_test.go ├── RELEASES.md ├── util.go ├── prefixdb_batch.go ├── goleveldb_test.go ├── SECURITY.md ├── goleveldb_batch.go ├── rocksdb_batch.go ├── cleveldb_batch.go ├── boltdb_batch.go ├── memdb_batch.go ├── db.go ├── go.mod ├── cleveldb_test.go ├── goleveldb_iterator.go ├── cleveldb_iterator.go ├── boltdb_iterator.go ├── prefixdb_iterator.go ├── Makefile ├── db_test.go ├── .golangci.yml ├── util_test.go ├── rocksdb_iterator.go ├── prefixdb_test.go ├── memdb_iterator.go ├── README.md ├── goleveldb.go ├── prefixdb.go ├── common_test.go ├── cleveldb.go ├── rocksdb.go ├── boltdb.go ├── memdb.go ├── types.go ├── CHANGELOG.md ├── badger_db.go ├── pebble.go ├── LICENSE ├── backend_test.go └── go.sum /.changelog/unreleased/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | --- 2 | labels: needs-triage 3 | --- 4 | 5 | -------------------------------------------------------------------------------- /.changelog/v0.15.0/summary.md: -------------------------------------------------------------------------------- 1 | *September 9, 2024* 2 | 3 | This release bumps the Go version to 1.23. 4 | -------------------------------------------------------------------------------- /.changelog/v1.0.2/summary.md: -------------------------------------------------------------------------------- 1 | *January 29, 2025* 2 | 3 | This release bumps the Go version to 1.23.5. 4 | -------------------------------------------------------------------------------- /.changelog/v1.0.4/summary.md: -------------------------------------------------------------------------------- 1 | *February 28, 2025* 2 | 3 | This release relaxes Go version constraints and fixes Docker builds. 4 | -------------------------------------------------------------------------------- /.changelog/v0.12.0/summary.md: -------------------------------------------------------------------------------- 1 | *Apr 10, 2024* 2 | 3 | This release deprecates boltdb and cleveldb. Also, Go MSRV is bumped to 1.22. 4 | -------------------------------------------------------------------------------- /.changelog/v0.10.0/breaking-changes/118-remove-remotedb.md: -------------------------------------------------------------------------------- 1 | - Remove remotedb ([\#121](https://github.com/cometbft/cometbft-db/pull/121)) 2 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # CODEOWNERS: https://help.github.com/articles/about-codeowners/ 2 | 3 | * @CometBFT/engineering @cometbft/interchain-inc 4 | -------------------------------------------------------------------------------- /.changelog/v0.8.0/compiler/40-update-to-go1.19.md: -------------------------------------------------------------------------------- 1 | - Bump minimum Go version to 1.19 2 | ([\#40](https://github.com/cometbft/cometbft-db/pull/40)) 3 | -------------------------------------------------------------------------------- /.changelog/v0.9.0/go-version/98-bump-go-v1.21.md: -------------------------------------------------------------------------------- 1 | - Bump minimum Go version to v1.21 2 | ([\#98](https://github.com/cometbft/cometbft-db/pull/98)) 3 | -------------------------------------------------------------------------------- /.changelog/v0.9.0/dependencies/97-update-rocksdb.md: -------------------------------------------------------------------------------- 1 | - Use RocksDB v8, testing with v8.8.1 2 | ([\#97](https://github.com/cometbft/cometbft-db/pull/97)) 3 | -------------------------------------------------------------------------------- /.changelog/v1.0.0/breaking/578-add-goleveldb-build-flag.md: -------------------------------------------------------------------------------- 1 | - Add `goleveldb` build flag. 2 | ([\#202](https://github.com/cometbft/cometbft-db/pull/202)) 3 | -------------------------------------------------------------------------------- /.changelog/v0.10.0/features/112-pebbledb.md: -------------------------------------------------------------------------------- 1 | - Add [pebbledb](https://github.com/cockroachdb/pebble) ([\#112](https://github.com/cometbft/cometbft-db/pull/112)) 2 | -------------------------------------------------------------------------------- /.changelog/v0.11.0/features/111-compaction-support.md: -------------------------------------------------------------------------------- 1 | - Add compaction support to the databases ([\#111](https://github.com/cometbft/cometbft-db/pull/111)) 2 | -------------------------------------------------------------------------------- /.changelog/v0.13.0/breaking-changes/155-remove-bolt-cleveldb.md: -------------------------------------------------------------------------------- 1 | - removed deprecated boltdb and cleveldb ([\#155](https://github.com/cometbft/cometbft-db/pull/155)) -------------------------------------------------------------------------------- /.changelog/v0.15.0/dependencies/189-bump-gorocksdb-version.md: -------------------------------------------------------------------------------- 1 | - Use RocksDB 9, testing with v9.3.1 2 | ([\#189](https://github.com/cometbft/cometbft-db/pull/189)) -------------------------------------------------------------------------------- /.changelog/v1.0.3/dependencies/236-bump-go-version.md: -------------------------------------------------------------------------------- 1 | - `[deps]` Bump Go version to 1.23.6 2 | ([\#236](https://github.com/cometbft/cometbft-db/pull/236)) 3 | -------------------------------------------------------------------------------- /.changelog/v1.0.3/summary.md: -------------------------------------------------------------------------------- 1 | *February 7, 2025* 2 | 3 | This release bumps the Go version to 1.23.6 and brings back arm64 Docker build 4 | (without rocksdb). 5 | -------------------------------------------------------------------------------- /.changelog/v0.11.0/summary.md: -------------------------------------------------------------------------------- 1 | *Feb 7, 2024* 2 | 3 | This release adds support for explicit compaction. Please note that badger and 4 | bolt do not support this. 5 | -------------------------------------------------------------------------------- /.changelog/v0.8.0/dependencies/40-bump-golang-x-net.md: -------------------------------------------------------------------------------- 1 | - Update to the latest version of golang.org/x/net 2 | ([\#40](https://github.com/cometbft/cometbft-db/pull/40)) -------------------------------------------------------------------------------- /.changelog/v1.0.3/bug-fixes/234-fix-arm64-build.md: -------------------------------------------------------------------------------- 1 | - `[docker]` Bring back `arm64` build target 2 | ([\#234](https://github.com/cometbft/cometbft-db/issues/234)) 3 | -------------------------------------------------------------------------------- /.changelog/unreleased/breaking/254-go-mod-retract-v102-v103.md: -------------------------------------------------------------------------------- 1 | - `[go.mod]` Retract v1.0.2 and v1.0.3 2 | ([\#254](https://github.com/cometbft/cometbft-db/issues/254)) 3 | -------------------------------------------------------------------------------- /.changelog/v0.11.0/breaking-changes/111-compaction-support.md: -------------------------------------------------------------------------------- 1 | - Expanded db interface to support compaction ([\#111](https://github.com/cometbft/cometbft-db/pull/111)) 2 | -------------------------------------------------------------------------------- /.changelog/v0.7.0/breaking-changes/7-rename-to-cometbft-db.md: -------------------------------------------------------------------------------- 1 | - Fork tm-db and rename fork to cometbft-db 2 | ([\#7](https://github.com/cometbft/cometbft-db/issues/7)) 3 | -------------------------------------------------------------------------------- /.changelog/v1.0.1/summary.md: -------------------------------------------------------------------------------- 1 | *September 23, 2024* 2 | 3 | This release reverts the addition of the `goleveldb` flag, which was deemed as 4 | too disruptive to users. 5 | -------------------------------------------------------------------------------- /.changelog/v0.14.0/dependencies/177-reinstate-boltdb-cleveldb.md: -------------------------------------------------------------------------------- 1 | - reinstate BoltDB and ClevelDB as backend DBs 2 | ([\#177](https://github.com/cometbft/cometbft-db/pull/177)) -------------------------------------------------------------------------------- /.changelog/v0.15.0/breaking-changes/4039-go-2024-3107.md: -------------------------------------------------------------------------------- 1 | - `[go/runtime]` Bump minimum Go version to v1.23 2 | ([\#4039](https://github.com/cometbft/cometbft/issues/4039)) 3 | -------------------------------------------------------------------------------- /.changelog/v0.13.0/features/168-iter-key.md: -------------------------------------------------------------------------------- 1 | - Iterator Key and Value APIs now return an object that must be copied before 2 | use ([\#168](https://github.com/cometbft/cometbft-db/pull/168)) -------------------------------------------------------------------------------- /.changelog/v0.12.0/features/153-deprecate-boltdb-cleveldb.md: -------------------------------------------------------------------------------- 1 | - Deprecate boltdb and cleveldb. If you're using either of those, please reach 2 | out ([\#153](https://github.com/cometbft/cometbft-db/pull/153)) 3 | -------------------------------------------------------------------------------- /.changelog/v0.8.0/dependencies/42-use-grocksdb.md: -------------------------------------------------------------------------------- 1 | - Switch rocksdb binding from gorocksdb to grocksdb, bump librocksdb dependency 2 | to `v7.10.2` ([\#42](https://github.com/cometbft/cometbft-db/pull/42)) 3 | -------------------------------------------------------------------------------- /.changelog/v0.14.0/summary.md: -------------------------------------------------------------------------------- 1 | *Aug 9, 2024* 2 | 3 | This release reinstates boltdb and cleveldb as deprecated backend types. 4 | Please note that we discourage the use of them, as we plan to discontinue support in a future release. 5 | 6 | -------------------------------------------------------------------------------- /.changelog/v0.9.1/summary.md: -------------------------------------------------------------------------------- 1 | *December 4, 2023* 2 | 3 | This release is precisely the same code-wise as v0.9.0, except that it builds 4 | the `cometbft/cometbft-db-testing` Docker image for both `linux/amd64` and 5 | `linux/arm64` platforms. 6 | -------------------------------------------------------------------------------- /.changelog/v1.0.0/summary.md: -------------------------------------------------------------------------------- 1 | *September 20, 2024* 2 | 3 | This release swaps the "default" DB from goleveldb to pebbledb. There's now a 4 | `goleveldb` build flag that must be used when using goleveldb. If you're using 5 | `pebbledb`, you don't need a build flag anymore. 6 | -------------------------------------------------------------------------------- /.github/linters/yaml-lint.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Default rules for YAML linting from super-linter. 3 | # See: See https://yamllint.readthedocs.io/en/stable/rules.html 4 | extends: default 5 | rules: 6 | document-end: disable 7 | document-start: disable 8 | line-length: disable 9 | truthy: disable 10 | -------------------------------------------------------------------------------- /.changelog/v0.9.0/summary.md: -------------------------------------------------------------------------------- 1 | *December 1, 2023* 2 | 3 | This release primarily updates some key dependencies, including adding support 4 | for RocksDB v8. It also bumps the minimum Go version to v1.21 in order for 5 | CometBFT to be able to use it in the E2E testing framework for the latest major 6 | releases. 7 | -------------------------------------------------------------------------------- /.changelog/v0.13.0/summary.md: -------------------------------------------------------------------------------- 1 | *Aug 2, 2024* 2 | 3 | This release: 4 | - changes the contract of the Iterator Key() and Value() APIs. Namely, the caller is now responsible for creating a copy of their returned value if they want to modify it. 5 | - removes support for boltDB and clevelDB, which were marked as deprecated in release v0.12.0. -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, build with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | .idea 15 | vendor/* 16 | 17 | # Code coverage 18 | coverage.txt 19 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Thank you for your interest in contributing to CometBFT DB! This repository 4 | follows the [contribution guidelines] of CometBFT - please take a look if you 5 | are not already familiar with those. 6 | 7 | [contribution guidelines]: https://github.com/cometbft/cometbft/blob/main/CONTRIBUTING.md 8 | -------------------------------------------------------------------------------- /.changelog/v0.10.0/summary.md: -------------------------------------------------------------------------------- 1 | *Jan 26, 2024* 2 | 3 | This release adds experimental support for 4 | [pebble](https://github.com/cockroachdb/pebble) and drops `remotedb`. If you 5 | experience any issues with pebble, please open an issue on Github. 6 | 7 | Special thanks to @faddat and @baabeetaa for their contributions to this 8 | release! 9 | -------------------------------------------------------------------------------- /.changelog/epilogue.md: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | CometBFT DB is a fork of [tm-db](https://github.com/tendermint/tm-db) 4 | effectively as of v0.6.6. 5 | 6 | For changes prior to the creation of this fork, please refer to the upstream 7 | [CHANGELOG.md](https://github.com/tendermint/tm-db/blob/774cdfe7e6b0a249b1144998d81a4de7b8037941/CHANGELOG.md) 8 | for v0.6.6 and earlier. 9 | -------------------------------------------------------------------------------- /.mergify.yml: -------------------------------------------------------------------------------- 1 | queue_rules: 2 | - name: default 3 | conditions: 4 | - base=master 5 | - label=S:automerge 6 | 7 | pull_request_rules: 8 | - name: automerge to master with label S:automerge and branch protection passing 9 | conditions: 10 | - base=master 11 | - label=S:automerge 12 | actions: 13 | queue: 14 | method: squash 15 | name: default 16 | -------------------------------------------------------------------------------- /.changelog/config.toml: -------------------------------------------------------------------------------- 1 | project_url = 'https://github.com/cometbft/cometbft-db' 2 | 3 | sort_releases_by = [ 4 | "date", 5 | "version" 6 | ] 7 | release_date_formats = [ 8 | # "*December 1, 2023* 9 | "*%B %d, %Y*", 10 | # "*Dec 1, 2023* 11 | "*%b %d, %Y*", 12 | # "2023-12-01" (ISO format) 13 | "%F", 14 | ] 15 | 16 | [change_set_sections] 17 | sort_entries_by = "entry-text" 18 | -------------------------------------------------------------------------------- /.changelog/v0.7.0/summary.md: -------------------------------------------------------------------------------- 1 | *Jan 17, 2023* 2 | 3 | This is the first official release of CometBFT DB, which is a fork of 4 | [tm-db](https://github.com/tendermint/tm-db). 5 | 6 | This fork is intended to be used by 7 | [CometBFT](https://github.com/cometbft/cometbft) until such time that 8 | [cometbft/cometbft\#48](https://github.com/cometbft/cometbft/issues/48) is 9 | resolved, after which time this fork will be retired and archived. Do not use 10 | this as a dependency in any new projects. 11 | -------------------------------------------------------------------------------- /.github/linters/markdownlint.yml: -------------------------------------------------------------------------------- 1 | # markdownlint configuration for Super-Linter 2 | # - https://github.com/DavidAnson/markdownlint 3 | # - https://github.com/github/super-linter 4 | 5 | # Default state for all rules 6 | default: true 7 | 8 | # See https://github.com/DavidAnson/markdownlint#rules--aliases for rules 9 | MD007: {"indent": 4} 10 | MD013: false 11 | MD024: {siblings_only: true} 12 | MD025: false 13 | MD033: {no-inline-html: false} 14 | no-hard-tabs: false 15 | whitespace: false 16 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 8 | 9 | --- 10 | 11 | #### PR checklist 12 | 13 | - [ ] Tests written/updated 14 | - [ ] Changelog entry added in `.changelog` (we use [unclog](https://github.com/informalsystems/unclog) to manage our changelog) 15 | -------------------------------------------------------------------------------- /memdb_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func BenchmarkMemDBRangeScans1M(b *testing.B) { 8 | db := NewMemDB() 9 | defer db.Close() 10 | 11 | benchmarkRangeScans(b, db, int64(1e6)) 12 | } 13 | 14 | func BenchmarkMemDBRangeScans10M(b *testing.B) { 15 | db := NewMemDB() 16 | defer db.Close() 17 | 18 | benchmarkRangeScans(b, db, int64(10e6)) 19 | } 20 | 21 | func BenchmarkMemDBRandomReadsWrites(b *testing.B) { 22 | db := NewMemDB() 23 | defer db.Close() 24 | 25 | benchmarkRandomReadsWrites(b, db) 26 | } 27 | -------------------------------------------------------------------------------- /.changelog/v0.8.0/summary.md: -------------------------------------------------------------------------------- 1 | *Apr 26, 2023* 2 | 3 | This release bumps the supported version of RocksDB, which requires cometbft-db 4 | RocksDB users to update their builds (and hence requires a "major" release, but 5 | does not introduce any other breaking changes). Special thanks to @yihuang for 6 | this update! 7 | 8 | While the minimum supported version of the Go compiler was bumped to 1.19, no 9 | 1.19-specific code changes were introduced and this should, therefore, still be 10 | able to be compiled with earlier versions of Go. It is, however, recommended to 11 | upgrade to the latest version(s) of Go ASAP. 12 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: github-actions 4 | directory: "/" 5 | schedule: 6 | interval: weekly 7 | target-branch: "main" 8 | open-pull-requests-limit: 10 9 | labels: 10 | - dependencies 11 | - automerge 12 | 13 | ################################### 14 | ## 15 | ## Update All Go Dependencies 16 | 17 | - package-ecosystem: gomod 18 | directory: "/" 19 | schedule: 20 | interval: weekly 21 | target-branch: "main" 22 | open-pull-requests-limit: 10 23 | labels: 24 | - dependencies 25 | - automerge 26 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | # 2 | # This codecov.yml is the default configuration for 3 | # all repositories on Codecov. You may adjust the settings 4 | # below in your own codecov.yml in your repository. 5 | # 6 | codecov: 7 | require_ci_to_pass: yes 8 | 9 | coverage: 10 | precision: 2 11 | round: down 12 | range: 70...100 13 | 14 | status: 15 | # Learn more at https://docs.codecov.io/docs/commit-status 16 | project: 17 | default: 18 | threshold: 1% # allow this much decrease on project 19 | 20 | comment: 21 | layout: "reach, diff, files, tree" 22 | behavior: default # update if exists else create new 23 | require_changes: true 24 | -------------------------------------------------------------------------------- /tools/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.23 2 | 3 | RUN apt update \ 4 | && apt install -y \ 5 | libbz2-dev libgflags-dev libsnappy-dev libzstd-dev zlib1g-dev liblz4-dev \ 6 | make tar wget build-essential g++ cmake \ 7 | libleveldb-dev libleveldb1d 8 | 9 | ARG ROCKSDB=9.8.4 10 | 11 | # Install RocksDB 12 | RUN \ 13 | wget -q https://github.com/facebook/rocksdb/archive/refs/tags/v${ROCKSDB}.tar.gz \ 14 | && tar -zxf v${ROCKSDB}.tar.gz \ 15 | && cd rocksdb-${ROCKSDB} \ 16 | && DEBUG_LEVEL=0 make -j4 shared_lib \ 17 | && make install-shared \ 18 | && ldconfig \ 19 | && cd .. \ 20 | && rm -rf v${ROCKSDB}.tar.gz rocksdb-${ROCKSDB} 21 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | name: "Close stale pull requests" 2 | on: 3 | schedule: 4 | - cron: "0 0 * * *" 5 | 6 | jobs: 7 | stale: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/stale@v9 11 | with: 12 | repo-token: ${{ secrets.GITHUB_TOKEN }} 13 | stale-pr-message: "This pull request has been automatically marked as stale because it has not had 14 | recent activity. It will be closed if no further activity occurs. Thank you 15 | for your contributions." 16 | days-before-stale: -1 17 | days-before-close: -1 18 | days-before-pr-stale: 10 19 | days-before-pr-close: 4 20 | exempt-pr-labels: "wip" 21 | -------------------------------------------------------------------------------- /boltdb_test.go: -------------------------------------------------------------------------------- 1 | //go:build boltdb 2 | // +build boltdb 3 | 4 | package db 5 | 6 | import ( 7 | "fmt" 8 | "os" 9 | "testing" 10 | 11 | "github.com/stretchr/testify/require" 12 | ) 13 | 14 | func TestBoltDBNewBoltDB(t *testing.T) { 15 | name := fmt.Sprintf("test_%x", randStr(12)) 16 | dir := os.TempDir() 17 | defer cleanupDBDir(dir, name) 18 | 19 | db, err := NewBoltDB(name, dir) 20 | require.NoError(t, err) 21 | db.Close() 22 | } 23 | 24 | func BenchmarkBoltDBRandomReadsWrites(b *testing.B) { 25 | name := fmt.Sprintf("test_%x", randStr(12)) 26 | db, err := NewBoltDB(name, "") 27 | if err != nil { 28 | b.Fatal(err) 29 | } 30 | defer func() { 31 | db.Close() 32 | cleanupDBDir("", name) 33 | }() 34 | 35 | benchmarkRandomReadsWrites(b, db) 36 | } 37 | -------------------------------------------------------------------------------- /.github/mergify.yml: -------------------------------------------------------------------------------- 1 | pull_request_rules: 2 | - name: automatic approval for Dependabot pull requests 3 | conditions: 4 | - author=dependabot[bot] 5 | actions: 6 | review: 7 | type: APPROVE 8 | message: Automatically approving dependabot 9 | 10 | - name: automatically merge PR with automerge label 11 | conditions: 12 | - '-label=manual-backport' 13 | - label=automerge 14 | actions: 15 | merge: 16 | method: squash 17 | 18 | - name: Make sure PR are up to date before merging 19 | description: >- 20 | This automatically updates PRs when they are out-of-date with the base 21 | branch to avoid semantic conflicts (next step is using a merge queue). 22 | conditions: 23 | - '-draft' 24 | actions: 25 | update: 26 | -------------------------------------------------------------------------------- /rocksdb_test.go: -------------------------------------------------------------------------------- 1 | //go:build rocksdb 2 | // +build rocksdb 3 | 4 | package db 5 | 6 | import ( 7 | "fmt" 8 | "os" 9 | "testing" 10 | 11 | "github.com/stretchr/testify/assert" 12 | "github.com/stretchr/testify/require" 13 | ) 14 | 15 | func TestRocksDBBackend(t *testing.T) { 16 | name := fmt.Sprintf("test_%x", randStr(12)) 17 | dir := os.TempDir() 18 | db, err := NewDB(name, RocksDBBackend, dir) 19 | require.NoError(t, err) 20 | defer cleanupDBDir(dir, name) 21 | 22 | _, ok := db.(*RocksDB) 23 | assert.True(t, ok) 24 | } 25 | 26 | func TestRocksDBStats(t *testing.T) { 27 | name := fmt.Sprintf("test_%x", randStr(12)) 28 | dir := os.TempDir() 29 | db, err := NewDB(name, RocksDBBackend, dir) 30 | require.NoError(t, err) 31 | defer cleanupDBDir(dir, name) 32 | 33 | assert.NotEmpty(t, db.Stats()) 34 | } 35 | 36 | // TODO: Add tests for rocksdb 37 | -------------------------------------------------------------------------------- /test_helpers.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import "math/rand" 4 | 5 | const ( 6 | strChars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" // 62 characters 7 | ) 8 | 9 | // For testing convenience. 10 | func bz(s string) []byte { 11 | return []byte(s) 12 | } 13 | 14 | // Str constructs a random alphanumeric string of given length. 15 | func randStr(length int) string { //nolint:unparam 16 | chars := []byte{} 17 | MAIN_LOOP: 18 | for { 19 | val := rand.Int63() //nolint:gosec // G404: Use of weak random number generator 20 | for i := 0; i < 10; i++ { 21 | v := int(val & 0x3f) // rightmost 6 bits 22 | if v >= 62 { // only 62 characters in strChars 23 | val >>= 6 24 | continue 25 | } 26 | chars = append(chars, strChars[v]) 27 | if len(chars) == length { 28 | break MAIN_LOOP 29 | } 30 | val >>= 6 31 | } 32 | } 33 | 34 | return string(chars) 35 | } 36 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | on: 3 | pull_request: 4 | merge_group: 5 | push: 6 | branches: 7 | - main 8 | 9 | concurrency: 10 | group: ${{ github.workflow }}-${{ github.ref }} 11 | cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} 12 | 13 | env: 14 | ORG: cometbft 15 | IMAGE_NAME: cometbft-db-testing 16 | 17 | jobs: 18 | golangci: 19 | runs-on: ubuntu-latest 20 | steps: 21 | - uses: actions/checkout@v4 22 | 23 | - name: Set up Docker Buildx 24 | uses: docker/setup-buildx-action@v3 25 | 26 | - name: Build and load 27 | uses: docker/build-push-action@v6 28 | with: 29 | platforms: linux/amd64 30 | file: ./tools/Dockerfile 31 | tags: "${{ env.ORG }}/${{ env.IMAGE_NAME }}:latest" 32 | load: true 33 | 34 | - name: lint 35 | run: | 36 | NON_INTERACTIVE=1 make docker-lint 37 | -------------------------------------------------------------------------------- /pebble_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | func TestPebbleDBBackend(t *testing.T) { 13 | name := fmt.Sprintf("test_%x", randStr(12)) 14 | dir := os.TempDir() 15 | db, err := NewDB(name, PebbleDBBackend, dir) 16 | require.NoError(t, err) 17 | defer cleanupDBDir(dir, name) 18 | 19 | _, ok := db.(*PebbleDB) 20 | assert.True(t, ok) 21 | } 22 | 23 | func BenchmarkPebbleDBRandomReadsWrites(b *testing.B) { 24 | name := fmt.Sprintf("test_%x", randStr(12)) 25 | dir := os.TempDir() 26 | db, err := NewDB(name, PebbleDBBackend, dir) 27 | if err != nil { 28 | b.Fatal(err) 29 | } 30 | defer func() { 31 | err = db.Close() 32 | require.NoError(b, err) 33 | 34 | cleanupDBDir("", name) 35 | }() 36 | 37 | benchmarkRandomReadsWrites(b, db) 38 | } 39 | 40 | // TODO: Add tests for pebble 41 | -------------------------------------------------------------------------------- /RELEASES.md: -------------------------------------------------------------------------------- 1 | # Releases 2 | 3 | This document provides a step-by-step guide for creating a release of CometBFT 4 | DB. 5 | 6 | 1. Create a local branch `release/vX.X.X`, where `vX.X.X` corresponds to the 7 | version of the release you want to cut. 8 | 2. Update and build the changelog on your local release branch. 9 | 3. Submit a pull request from your release branch, targeting the `main` branch. 10 | 4. Once approved and merged, tag the commit associated with the merged release 11 | branch. 12 | 5. Create a [GitHub release] from the new tag, and include a link from the 13 | description to the heading associated with the new version in the changelog. 14 | 6. Build and push the Docker image associated with the new release by running 15 | `Build Docker Image` GH action with the newly created tag `vX.X.X`. 16 | 17 | [GitHub release]: https://docs.github.com/en/github/administering-a-repository/releasing-projects-on-github/managing-releases-in-a-repository#creating-a-release 18 | -------------------------------------------------------------------------------- /.github/workflows/markdown-linter.yml: -------------------------------------------------------------------------------- 1 | name: Markdown Linter 2 | on: 3 | push: 4 | branches: 5 | - main 6 | paths: 7 | - "**.md" 8 | - "**.yml" 9 | - "**.yaml" 10 | pull_request: 11 | branches: [main] 12 | paths: 13 | - "**.md" 14 | - "**.yml" 15 | 16 | concurrency: 17 | group: ${{ github.workflow }}-${{ github.ref }} 18 | cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} 19 | 20 | jobs: 21 | build: 22 | name: Super linter 23 | runs-on: ubuntu-latest 24 | steps: 25 | - name: Checkout Code 26 | uses: actions/checkout@v4 27 | - name: Lint Code Base 28 | uses: docker://github/super-linter:v4 29 | env: 30 | VALIDATE_ALL_CODEBASE: true 31 | DEFAULT_BRANCH: main 32 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 33 | VALIDATE_MD: true 34 | VALIDATE_OPENAPI: true 35 | VALIDATE_YAML: true 36 | YAML_CONFIG_FILE: yaml-lint.yml 37 | -------------------------------------------------------------------------------- /util.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "bytes" 5 | "os" 6 | ) 7 | 8 | func cp(bz []byte) (ret []byte) { 9 | ret = make([]byte, len(bz)) 10 | copy(ret, bz) 11 | return ret 12 | } 13 | 14 | // Returns a slice of the same length (big endian) 15 | // except incremented by one. 16 | // Returns nil on overflow (e.g. if bz bytes are all 0xFF) 17 | // CONTRACT: len(bz) > 0. 18 | func cpIncr(bz []byte) (ret []byte) { 19 | if len(bz) == 0 { 20 | panic("cpIncr expects non-zero bz length") 21 | } 22 | ret = cp(bz) 23 | for i := len(bz) - 1; i >= 0; i-- { 24 | if ret[i] < byte(0xFF) { 25 | ret[i]++ 26 | return ret 27 | } 28 | ret[i] = byte(0x00) 29 | if i == 0 { 30 | // Overflow 31 | return nil 32 | } 33 | } 34 | return nil 35 | } 36 | 37 | // See DB interface documentation for more information. 38 | func IsKeyInDomain(key, start, end []byte) bool { 39 | if bytes.Compare(key, start) < 0 { 40 | return false 41 | } 42 | if end != nil && bytes.Compare(end, key) <= 0 { 43 | return false 44 | } 45 | return true 46 | } 47 | 48 | func FileExists(filePath string) bool { 49 | _, err := os.Stat(filePath) 50 | return !os.IsNotExist(err) 51 | } 52 | -------------------------------------------------------------------------------- /prefixdb_batch.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | type prefixDBBatch struct { 4 | prefix []byte 5 | source Batch 6 | } 7 | 8 | var _ Batch = (*prefixDBBatch)(nil) 9 | 10 | func newPrefixBatch(prefix []byte, source Batch) prefixDBBatch { 11 | return prefixDBBatch{ 12 | prefix: prefix, 13 | source: source, 14 | } 15 | } 16 | 17 | // Set implements Batch. 18 | func (pb prefixDBBatch) Set(key, value []byte) error { 19 | if len(key) == 0 { 20 | return errKeyEmpty 21 | } 22 | if value == nil { 23 | return errValueNil 24 | } 25 | pkey := append(cp(pb.prefix), key...) 26 | return pb.source.Set(pkey, value) 27 | } 28 | 29 | // Delete implements Batch. 30 | func (pb prefixDBBatch) Delete(key []byte) error { 31 | if len(key) == 0 { 32 | return errKeyEmpty 33 | } 34 | pkey := append(cp(pb.prefix), key...) 35 | return pb.source.Delete(pkey) 36 | } 37 | 38 | // Write implements Batch. 39 | func (pb prefixDBBatch) Write() error { 40 | return pb.source.Write() 41 | } 42 | 43 | // WriteSync implements Batch. 44 | func (pb prefixDBBatch) WriteSync() error { 45 | return pb.source.WriteSync() 46 | } 47 | 48 | // Close implements Batch. 49 | func (pb prefixDBBatch) Close() error { 50 | return pb.source.Close() 51 | } 52 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | on: 3 | pull_request: 4 | merge_group: 5 | push: 6 | branches: 7 | - main 8 | 9 | concurrency: 10 | group: ${{ github.workflow }}-${{ github.ref }} 11 | cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} 12 | 13 | env: 14 | ORG: cometbft 15 | IMAGE_NAME: cometbft-db-testing 16 | 17 | jobs: 18 | test: 19 | strategy: 20 | fail-fast: true 21 | matrix: 22 | include: 23 | - os: ubuntu-24.04 24 | platform: linux/amd64 25 | - os: ubuntu-24.04-arm 26 | platform: linux/arm64 27 | runs-on: ${{ matrix.os }} 28 | steps: 29 | - uses: actions/checkout@v4 30 | 31 | - name: Set up Docker Buildx 32 | uses: docker/setup-buildx-action@v3 33 | 34 | - name: Build and load 35 | uses: docker/build-push-action@v6 36 | with: 37 | platforms: ${{ matrix.platform }} 38 | file: ./tools/Dockerfile 39 | tags: "${{ env.ORG }}/${{ env.IMAGE_NAME }}:latest" 40 | load: true 41 | 42 | - name: test & coverage report creation 43 | run: | 44 | NON_INTERACTIVE=1 make docker-test 45 | 46 | - uses: codecov/codecov-action@v4 47 | with: 48 | file: ./coverage.txt 49 | -------------------------------------------------------------------------------- /goleveldb_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | "github.com/stretchr/testify/require" 9 | "github.com/syndtr/goleveldb/leveldb/opt" 10 | ) 11 | 12 | func TestGoLevelDBNewGoLevelDB(t *testing.T) { 13 | name := fmt.Sprintf("test_%x", randStr(12)) 14 | defer cleanupDBDir("", name) 15 | 16 | // Test we can't open the db twice for writing 17 | wr1, err := NewGoLevelDB(name, "") 18 | require.Nil(t, err) 19 | _, err = NewGoLevelDB(name, "") 20 | require.NotNil(t, err) 21 | err = wr1.Close() // Close the db to release the lock 22 | require.Nil(t, err) 23 | 24 | // Test we can open the db twice for reading only 25 | ro1, err := NewGoLevelDBWithOpts(name, "", &opt.Options{ReadOnly: true}) 26 | require.Nil(t, err) 27 | defer ro1.Close() 28 | ro2, err := NewGoLevelDBWithOpts(name, "", &opt.Options{ReadOnly: true}) 29 | require.Nil(t, err) 30 | defer ro2.Close() 31 | } 32 | 33 | func BenchmarkGoLevelDBRandomReadsWrites(b *testing.B) { 34 | name := fmt.Sprintf("test_%x", randStr(12)) 35 | db, err := NewGoLevelDB(name, "") 36 | if err != nil { 37 | b.Fatal(err) 38 | } 39 | defer func() { 40 | err = db.Close() 41 | require.NoError(b, err) 42 | cleanupDBDir("", name) 43 | }() 44 | 45 | benchmarkRandomReadsWrites(b, db) 46 | } 47 | 48 | func TestGoLevelDBBackend(t *testing.T) { 49 | name := fmt.Sprintf("test_%x", randStr(12)) 50 | db, err := NewDB(name, GoLevelDBBackend, "") 51 | require.NoError(t, err) 52 | defer cleanupDBDir("", name) 53 | 54 | _, ok := db.(*GoLevelDB) 55 | assert.True(t, ok) 56 | } 57 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # How to Report a Security Bug 2 | 3 | If you believe you have found a security vulnerability in the Interchain Stack, 4 | you can report it to our primary vulnerability disclosure channel, the [Cosmos 5 | HackerOne Bug Bounty program][h1]. 6 | 7 | If you prefer to report an issue via email, you may send a bug report to 8 | with the issue details, reproduction, impact, and other 9 | information. Please submit only one unique email thread per vulnerability. Any 10 | issues reported via email are ineligible for bounty rewards. 11 | 12 | Artifacts from an email report are saved at the time the email is triaged. 13 | Please note: our team is not able to monitor dynamic content (e.g. a Google Docs 14 | link that is edited after receipt) throughout the lifecycle of a report. If you 15 | would like to share additional information or modify previous information, 16 | please include it in an additional reply as an additional attachment. 17 | 18 | Please **DO NOT** file a public issue in this repository to report a security 19 | vulnerability. 20 | 21 | ## Coordinated Vulnerability Disclosure Policy and Safe Harbor 22 | 23 | For the most up-to-date version of the policies that govern vulnerability 24 | disclosure, please consult the [HackerOne program page][h1-policy]. 25 | 26 | The policy hosted on HackerOne is the official Coordinated Vulnerability 27 | Disclosure policy and Safe Harbor for the Interchain Stack, and the teams and 28 | infrastructure it supports, and it supersedes previous security policies that 29 | have been used in the past by individual teams and projects with targets in 30 | scope of the program. 31 | 32 | [h1]: https://hackerone.com/cosmos?type=team 33 | [h1-policy]: https://hackerone.com/cosmos?type=team&view_policy=true 34 | -------------------------------------------------------------------------------- /goleveldb_batch.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "github.com/syndtr/goleveldb/leveldb" 5 | "github.com/syndtr/goleveldb/leveldb/opt" 6 | ) 7 | 8 | type goLevelDBBatch struct { 9 | db *GoLevelDB 10 | batch *leveldb.Batch 11 | } 12 | 13 | var _ Batch = (*goLevelDBBatch)(nil) 14 | 15 | func newGoLevelDBBatch(db *GoLevelDB) *goLevelDBBatch { 16 | return &goLevelDBBatch{ 17 | db: db, 18 | batch: new(leveldb.Batch), 19 | } 20 | } 21 | 22 | // Set implements Batch. 23 | func (b *goLevelDBBatch) Set(key, value []byte) error { 24 | if len(key) == 0 { 25 | return errKeyEmpty 26 | } 27 | if value == nil { 28 | return errValueNil 29 | } 30 | if b.batch == nil { 31 | return errBatchClosed 32 | } 33 | b.batch.Put(key, value) 34 | return nil 35 | } 36 | 37 | // Delete implements Batch. 38 | func (b *goLevelDBBatch) Delete(key []byte) error { 39 | if len(key) == 0 { 40 | return errKeyEmpty 41 | } 42 | if b.batch == nil { 43 | return errBatchClosed 44 | } 45 | b.batch.Delete(key) 46 | return nil 47 | } 48 | 49 | // Write implements Batch. 50 | func (b *goLevelDBBatch) Write() error { 51 | return b.write(false) 52 | } 53 | 54 | // WriteSync implements Batch. 55 | func (b *goLevelDBBatch) WriteSync() error { 56 | return b.write(true) 57 | } 58 | 59 | func (b *goLevelDBBatch) write(sync bool) error { 60 | if b.batch == nil { 61 | return errBatchClosed 62 | } 63 | 64 | err := b.db.db.Write(b.batch, &opt.WriteOptions{Sync: sync}) 65 | if err != nil { 66 | return err 67 | } 68 | // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. 69 | return b.Close() 70 | } 71 | 72 | // Close implements Batch. 73 | func (b *goLevelDBBatch) Close() error { 74 | if b.batch != nil { 75 | b.batch.Reset() 76 | b.batch = nil 77 | } 78 | return nil 79 | } 80 | -------------------------------------------------------------------------------- /rocksdb_batch.go: -------------------------------------------------------------------------------- 1 | //go:build rocksdb 2 | // +build rocksdb 3 | 4 | package db 5 | 6 | import "github.com/linxGnu/grocksdb" 7 | 8 | type rocksDBBatch struct { 9 | db *RocksDB 10 | batch *grocksdb.WriteBatch 11 | } 12 | 13 | var _ Batch = (*rocksDBBatch)(nil) 14 | 15 | func newRocksDBBatch(db *RocksDB) *rocksDBBatch { 16 | return &rocksDBBatch{ 17 | db: db, 18 | batch: grocksdb.NewWriteBatch(), 19 | } 20 | } 21 | 22 | // Set implements Batch. 23 | func (b *rocksDBBatch) Set(key, value []byte) error { 24 | if len(key) == 0 { 25 | return errKeyEmpty 26 | } 27 | if value == nil { 28 | return errValueNil 29 | } 30 | if b.batch == nil { 31 | return errBatchClosed 32 | } 33 | b.batch.Put(key, value) 34 | return nil 35 | } 36 | 37 | // Delete implements Batch. 38 | func (b *rocksDBBatch) Delete(key []byte) error { 39 | if len(key) == 0 { 40 | return errKeyEmpty 41 | } 42 | if b.batch == nil { 43 | return errBatchClosed 44 | } 45 | b.batch.Delete(key) 46 | return nil 47 | } 48 | 49 | // Write implements Batch. 50 | func (b *rocksDBBatch) Write() error { 51 | if b.batch == nil { 52 | return errBatchClosed 53 | } 54 | err := b.db.db.Write(b.db.wo, b.batch) 55 | if err != nil { 56 | return err 57 | } 58 | // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. 59 | b.Close() 60 | return nil 61 | } 62 | 63 | // WriteSync implements Batch. 64 | func (b *rocksDBBatch) WriteSync() error { 65 | if b.batch == nil { 66 | return errBatchClosed 67 | } 68 | err := b.db.db.Write(b.db.woSync, b.batch) 69 | if err != nil { 70 | return err 71 | } 72 | // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. 73 | return b.Close() 74 | } 75 | 76 | // Close implements Batch. 77 | func (b *rocksDBBatch) Close() error { 78 | if b.batch != nil { 79 | b.batch.Destroy() 80 | b.batch = nil 81 | } 82 | return nil 83 | } 84 | -------------------------------------------------------------------------------- /cleveldb_batch.go: -------------------------------------------------------------------------------- 1 | //go:build cleveldb 2 | // +build cleveldb 3 | 4 | package db 5 | 6 | import "github.com/jmhodges/levigo" 7 | 8 | // cLevelDBBatch is a LevelDB batch. 9 | type cLevelDBBatch struct { 10 | db *CLevelDB 11 | batch *levigo.WriteBatch 12 | } 13 | 14 | func newCLevelDBBatch(db *CLevelDB) *cLevelDBBatch { 15 | return &cLevelDBBatch{ 16 | db: db, 17 | batch: levigo.NewWriteBatch(), 18 | } 19 | } 20 | 21 | // Set implements Batch. 22 | func (b *cLevelDBBatch) Set(key, value []byte) error { 23 | if len(key) == 0 { 24 | return errKeyEmpty 25 | } 26 | if value == nil { 27 | return errValueNil 28 | } 29 | if b.batch == nil { 30 | return errBatchClosed 31 | } 32 | b.batch.Put(key, value) 33 | return nil 34 | } 35 | 36 | // Delete implements Batch. 37 | func (b *cLevelDBBatch) Delete(key []byte) error { 38 | if len(key) == 0 { 39 | return errKeyEmpty 40 | } 41 | if b.batch == nil { 42 | return errBatchClosed 43 | } 44 | b.batch.Delete(key) 45 | return nil 46 | } 47 | 48 | // Write implements Batch. 49 | func (b *cLevelDBBatch) Write() error { 50 | if b.batch == nil { 51 | return errBatchClosed 52 | } 53 | err := b.db.db.Write(b.db.wo, b.batch) 54 | if err != nil { 55 | return err 56 | } 57 | // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. 58 | return b.Close() 59 | } 60 | 61 | // WriteSync implements Batch. 62 | func (b *cLevelDBBatch) WriteSync() error { 63 | if b.batch == nil { 64 | return errBatchClosed 65 | } 66 | err := b.db.db.Write(b.db.woSync, b.batch) 67 | if err != nil { 68 | return err 69 | } 70 | // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. 71 | b.Close() 72 | return nil 73 | } 74 | 75 | // Close implements Batch. 76 | func (b *cLevelDBBatch) Close() error { 77 | if b.batch != nil { 78 | b.batch.Close() 79 | b.batch = nil 80 | } 81 | return nil 82 | } 83 | -------------------------------------------------------------------------------- /boltdb_batch.go: -------------------------------------------------------------------------------- 1 | //go:build boltdb 2 | // +build boltdb 3 | 4 | package db 5 | 6 | import "go.etcd.io/bbolt" 7 | 8 | // boltDBBatch stores operations internally and dumps them to BoltDB on Write(). 9 | type boltDBBatch struct { 10 | db *BoltDB 11 | ops []operation 12 | } 13 | 14 | var _ Batch = (*boltDBBatch)(nil) 15 | 16 | func newBoltDBBatch(db *BoltDB) *boltDBBatch { 17 | return &boltDBBatch{ 18 | db: db, 19 | ops: []operation{}, 20 | } 21 | } 22 | 23 | // Set implements Batch. 24 | func (b *boltDBBatch) Set(key, value []byte) error { 25 | if len(key) == 0 { 26 | return errKeyEmpty 27 | } 28 | if value == nil { 29 | return errValueNil 30 | } 31 | if b.ops == nil { 32 | return errBatchClosed 33 | } 34 | b.ops = append(b.ops, operation{opTypeSet, key, value}) 35 | return nil 36 | } 37 | 38 | // Delete implements Batch. 39 | func (b *boltDBBatch) Delete(key []byte) error { 40 | if len(key) == 0 { 41 | return errKeyEmpty 42 | } 43 | if b.ops == nil { 44 | return errBatchClosed 45 | } 46 | b.ops = append(b.ops, operation{opTypeDelete, key, nil}) 47 | return nil 48 | } 49 | 50 | // Write implements Batch. 51 | func (b *boltDBBatch) Write() error { 52 | if b.ops == nil { 53 | return errBatchClosed 54 | } 55 | err := b.db.db.Batch(func(tx *bbolt.Tx) error { 56 | bkt := tx.Bucket(bucket) 57 | for _, op := range b.ops { 58 | switch op.opType { 59 | case opTypeSet: 60 | if err := bkt.Put(op.key, op.value); err != nil { 61 | return err 62 | } 63 | case opTypeDelete: 64 | if err := bkt.Delete(op.key); err != nil { 65 | return err 66 | } 67 | } 68 | } 69 | return nil 70 | }) 71 | if err != nil { 72 | return err 73 | } 74 | // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. 75 | return b.Close() 76 | } 77 | 78 | // WriteSync implements Batch. 79 | func (b *boltDBBatch) WriteSync() error { 80 | return b.Write() 81 | } 82 | 83 | // Close implements Batch. 84 | func (b *boltDBBatch) Close() error { 85 | b.ops = nil 86 | return nil 87 | } 88 | -------------------------------------------------------------------------------- /memdb_batch.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import "fmt" 4 | 5 | // memDBBatch operations. 6 | type opType int 7 | 8 | const ( 9 | opTypeSet opType = iota + 1 10 | opTypeDelete 11 | ) 12 | 13 | type operation struct { 14 | opType 15 | key []byte 16 | value []byte 17 | } 18 | 19 | // memDBBatch handles in-memory batching. 20 | type memDBBatch struct { 21 | db *MemDB 22 | ops []operation 23 | } 24 | 25 | var _ Batch = (*memDBBatch)(nil) 26 | 27 | // newMemDBBatch creates a new memDBBatch. 28 | func newMemDBBatch(db *MemDB) *memDBBatch { 29 | return &memDBBatch{ 30 | db: db, 31 | ops: []operation{}, 32 | } 33 | } 34 | 35 | // Set implements Batch. 36 | func (b *memDBBatch) Set(key, value []byte) error { 37 | if len(key) == 0 { 38 | return errKeyEmpty 39 | } 40 | if value == nil { 41 | return errValueNil 42 | } 43 | if b.ops == nil { 44 | return errBatchClosed 45 | } 46 | b.ops = append(b.ops, operation{opTypeSet, key, value}) 47 | return nil 48 | } 49 | 50 | // Delete implements Batch. 51 | func (b *memDBBatch) Delete(key []byte) error { 52 | if len(key) == 0 { 53 | return errKeyEmpty 54 | } 55 | if b.ops == nil { 56 | return errBatchClosed 57 | } 58 | b.ops = append(b.ops, operation{opTypeDelete, key, nil}) 59 | return nil 60 | } 61 | 62 | // Write implements Batch. 63 | func (b *memDBBatch) Write() error { 64 | if b.ops == nil { 65 | return errBatchClosed 66 | } 67 | b.db.mtx.Lock() 68 | defer b.db.mtx.Unlock() 69 | 70 | for _, op := range b.ops { 71 | switch op.opType { 72 | case opTypeSet: 73 | b.db.set(op.key, op.value) 74 | case opTypeDelete: 75 | b.db.delete(op.key) 76 | default: 77 | return fmt.Errorf("unknown operation type %v (%v)", op.opType, op) 78 | } 79 | } 80 | 81 | // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. 82 | return b.Close() 83 | } 84 | 85 | // WriteSync implements Batch. 86 | func (b *memDBBatch) WriteSync() error { 87 | return b.Write() 88 | } 89 | 90 | // Close implements Batch. 91 | func (b *memDBBatch) Close() error { 92 | b.ops = nil 93 | return nil 94 | } 95 | -------------------------------------------------------------------------------- /.github/workflows/conventional-pr-title.yml: -------------------------------------------------------------------------------- 1 | name: "Conventional PR Title" 2 | on: 3 | pull_request_target: 4 | types: 5 | - opened 6 | - edited 7 | - synchronize 8 | 9 | permissions: 10 | pull-requests: write 11 | 12 | jobs: 13 | main: 14 | name: Validate PR title 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: amannn/action-semantic-pull-request@v5 18 | id: lint_pr_title 19 | env: 20 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 21 | with: 22 | types: | 23 | feat 24 | fix 25 | build 26 | chore 27 | ci 28 | docs 29 | refactor 30 | perf 31 | test 32 | revert 33 | spec 34 | merge 35 | 36 | - uses: marocchino/sticky-pull-request-comment@v2 37 | # When the previous steps fails, the workflow would stop. By adding this 38 | # condition you can continue the execution with the populated error message. 39 | if: always() && (steps.lint_pr_title.outputs.error_message != null) 40 | with: 41 | header: pr-title-lint-error 42 | message: | 43 | Hey there and thank you for opening this pull request! 👋🏼 44 | 45 | We require pull request titles to follow the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/) and it looks like your proposed title needs to be adjusted. 46 | 47 | Details: 48 | 49 | ``` 50 | ${{ steps.lint_pr_title.outputs.error_message }} 51 | ``` 52 | 53 | General format: `type(scope): msg` 54 | Breaking change: `type(scope)!: msg` 55 | Multi-scope change: `type: msg` 56 | Types: `feat`, `fix`, `build`, `chore`, `ci`, `docs`, `refactor`, `perf`, `test`, `revert`, `spec`, `merge`. 57 | Example: `fix(cmd/cometbft/commands/debug): execute p.Signal only when p is not nil` 58 | 59 | # Delete a previous comment when the issue has been resolved 60 | - if: ${{ steps.lint_pr_title.outputs.error_message == null }} 61 | uses: marocchino/sticky-pull-request-comment@v2 62 | with: 63 | header: pr-title-lint-error 64 | delete: true 65 | -------------------------------------------------------------------------------- /db.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | ) 7 | 8 | type BackendType string 9 | 10 | // These are valid backend types. 11 | const ( 12 | // GoLevelDBBackend represents goleveldb (github.com/syndtr/goleveldb - most 13 | // popular implementation) 14 | // - UNMAINTANED 15 | // - pure go 16 | // - stable 17 | GoLevelDBBackend BackendType = "goleveldb" 18 | // CLevelDBBackend represents cleveldb (uses levigo wrapper) 19 | // - DEPRECATED 20 | // - fast 21 | // - requires gcc 22 | // - use cleveldb build tag (go build -tags cleveldb) 23 | CLevelDBBackend BackendType = "cleveldb" 24 | // MemDBBackend represents in-memory key value store, which is mostly used 25 | // for testing. 26 | MemDBBackend BackendType = "memdb" 27 | // BoltDBBackend represents bolt (uses etcd's fork of bolt - 28 | // github.com/etcd-io/bbolt) 29 | // - DEPRECATED 30 | // - pure go 31 | // - use boltdb build tag (go build -tags boltdb) 32 | BoltDBBackend BackendType = "boltdb" 33 | // RocksDBBackend represents rocksdb (uses https://github.com/linxGnu/grocksdb) 34 | // - requires gcc 35 | // - use rocksdb build tag (go build -tags rocksdb) 36 | RocksDBBackend BackendType = "rocksdb" 37 | // BadgerDBBackend represents badger (uses github.com/dgraph-io/badger) 38 | // - pure go 39 | // - use badgerdb build tag (go build -tags badgerdb) 40 | BadgerDBBackend BackendType = "badgerdb" 41 | // PebbleDBDBBackend represents pebble (uses github.com/cockroachdb/pebble) 42 | // - pure go 43 | PebbleDBBackend BackendType = "pebbledb" 44 | ) 45 | 46 | type dbCreator func(name string, dir string) (DB, error) 47 | 48 | var backends = map[BackendType]dbCreator{} 49 | 50 | func registerDBCreator(backend BackendType, creator dbCreator) { 51 | _, ok := backends[backend] 52 | if ok { 53 | return 54 | } 55 | backends[backend] = creator 56 | } 57 | 58 | // NewDB creates a new database of type backend with the given name. 59 | func NewDB(name string, backend BackendType, dir string) (DB, error) { 60 | dbCreator, ok := backends[backend] 61 | if !ok { 62 | keys := make([]string, 0, len(backends)) 63 | for k := range backends { 64 | keys = append(keys, string(k)) 65 | } 66 | return nil, fmt.Errorf("unknown db_backend %s, expected one of %v", 67 | backend, strings.Join(keys, ",")) 68 | } 69 | 70 | db, err := dbCreator(name, dir) 71 | if err != nil { 72 | return nil, fmt.Errorf("failed to initialize database: %w", err) 73 | } 74 | return db, nil 75 | } 76 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/cometbft/cometbft-db 2 | 3 | go 1.23 4 | 5 | require ( 6 | github.com/cockroachdb/pebble v1.1.4 7 | github.com/dgraph-io/badger/v4 v4.5.1 8 | github.com/google/btree v1.1.3 9 | github.com/jmhodges/levigo v1.0.0 10 | github.com/linxGnu/grocksdb v1.9.8 11 | github.com/stretchr/testify v1.10.0 12 | github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca 13 | go.etcd.io/bbolt v1.4.0 14 | ) 15 | 16 | require ( 17 | github.com/DataDog/zstd v1.5.6 // indirect 18 | github.com/beorn7/perks v1.0.1 // indirect 19 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 20 | github.com/cockroachdb/errors v1.11.3 // indirect 21 | github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0 // indirect 22 | github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506 // indirect 23 | github.com/cockroachdb/redact v1.1.5 // indirect 24 | github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect 25 | github.com/davecgh/go-spew v1.1.1 // indirect 26 | github.com/dgraph-io/ristretto/v2 v2.1.0 // indirect 27 | github.com/dustin/go-humanize v1.0.1 // indirect 28 | github.com/getsentry/sentry-go v0.31.1 // indirect 29 | github.com/gogo/protobuf v1.3.2 // indirect 30 | github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect 31 | github.com/golang/snappy v0.0.4 // indirect 32 | github.com/google/flatbuffers v25.1.24+incompatible // indirect 33 | github.com/klauspost/compress v1.17.11 // indirect 34 | github.com/kr/pretty v0.3.1 // indirect 35 | github.com/kr/text v0.2.0 // indirect 36 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 37 | github.com/pkg/errors v0.9.1 // indirect 38 | github.com/pmezard/go-difflib v1.0.0 // indirect 39 | github.com/prometheus/client_golang v1.20.5 // indirect 40 | github.com/prometheus/client_model v0.6.1 // indirect 41 | github.com/prometheus/common v0.62.0 // indirect 42 | github.com/prometheus/procfs v0.15.1 // indirect 43 | github.com/rogpeppe/go-internal v1.13.1 // indirect 44 | go.opencensus.io v0.24.0 // indirect 45 | golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect 46 | golang.org/x/net v0.34.0 // indirect 47 | golang.org/x/sys v0.29.0 // indirect 48 | golang.org/x/text v0.21.0 // indirect 49 | google.golang.org/protobuf v1.36.4 // indirect 50 | gopkg.in/yaml.v3 v3.0.1 // indirect 51 | ) 52 | 53 | 54 | retract ( 55 | v0.6.5 // Breaking changes were released with the wrong tag (use v0.6.6 or later). 56 | [v1.0.2, v1.0.3] // These do not have Docker images due to the broken pipeline. 57 | ) 58 | -------------------------------------------------------------------------------- /cleveldb_test.go: -------------------------------------------------------------------------------- 1 | //go:build cleveldb 2 | // +build cleveldb 3 | 4 | package db 5 | 6 | import ( 7 | "bytes" 8 | "fmt" 9 | "math/rand" 10 | "os" 11 | "testing" 12 | 13 | "github.com/stretchr/testify/assert" 14 | "github.com/stretchr/testify/require" 15 | ) 16 | 17 | func BenchmarkRandomReadsWrites2(b *testing.B) { 18 | b.StopTimer() 19 | 20 | numItems := int64(1000000) 21 | internal := map[int64]int64{} 22 | for i := 0; i < int(numItems); i++ { 23 | internal[int64(i)] = int64(0) 24 | } 25 | db, err := NewCLevelDB(fmt.Sprintf("test_%x", randStr(12)), "") 26 | if err != nil { 27 | b.Fatal(err.Error()) 28 | return 29 | } 30 | 31 | b.StartTimer() 32 | 33 | for i := 0; i < b.N; i++ { 34 | // Write something 35 | { 36 | idx := (int64(rand.Int()) % numItems) 37 | internal[idx]++ 38 | val := internal[idx] 39 | idxBytes := int642Bytes(int64(idx)) 40 | valBytes := int642Bytes(int64(val)) 41 | db.Set( 42 | idxBytes, 43 | valBytes, 44 | ) 45 | } 46 | // Read something 47 | { 48 | idx := (int64(rand.Int()) % numItems) 49 | val := internal[idx] 50 | idxBytes := int642Bytes(int64(idx)) 51 | valBytes, err := db.Get(idxBytes) 52 | if err != nil { 53 | b.Error(err) 54 | } 55 | if val == 0 { 56 | if !bytes.Equal(valBytes, nil) { 57 | b.Errorf("Expected %v for %v, got %X", 58 | nil, idx, valBytes) 59 | break 60 | } 61 | } else { 62 | if len(valBytes) != 8 { 63 | b.Errorf("Expected length 8 for %v, got %X", 64 | idx, valBytes) 65 | break 66 | } 67 | valGot := bytes2Int64(valBytes) 68 | if val != valGot { 69 | b.Errorf("Expected %v for %v, got %v", 70 | val, idx, valGot) 71 | break 72 | } 73 | } 74 | } 75 | } 76 | 77 | db.Close() 78 | } 79 | 80 | func TestCLevelDBBackend(t *testing.T) { 81 | name := fmt.Sprintf("test_%x", randStr(12)) 82 | // Can't use "" (current directory) or "./" here because levigo.Open returns: 83 | // "Error initializing DB: IO error: test_XXX.db: Invalid argument" 84 | dir := os.TempDir() 85 | db, err := NewDB(name, CLevelDBBackend, dir) 86 | require.NoError(t, err) 87 | defer cleanupDBDir(dir, name) 88 | 89 | _, ok := db.(*CLevelDB) 90 | assert.True(t, ok) 91 | } 92 | 93 | func TestCLevelDBStats(t *testing.T) { 94 | name := fmt.Sprintf("test_%x", randStr(12)) 95 | dir := os.TempDir() 96 | db, err := NewDB(name, CLevelDBBackend, dir) 97 | require.NoError(t, err) 98 | defer cleanupDBDir(dir, name) 99 | 100 | assert.NotEmpty(t, db.Stats()) 101 | } 102 | -------------------------------------------------------------------------------- /goleveldb_iterator.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "bytes" 5 | 6 | "github.com/syndtr/goleveldb/leveldb/iterator" 7 | ) 8 | 9 | type goLevelDBIterator struct { 10 | source iterator.Iterator 11 | start []byte 12 | end []byte 13 | isReverse bool 14 | isInvalid bool 15 | } 16 | 17 | var _ Iterator = (*goLevelDBIterator)(nil) 18 | 19 | func newGoLevelDBIterator(source iterator.Iterator, start, end []byte, isReverse bool) *goLevelDBIterator { 20 | if isReverse { 21 | if end == nil { 22 | source.Last() 23 | } else { 24 | valid := source.Seek(end) 25 | if valid { 26 | eoakey := source.Key() // end or after key 27 | if bytes.Compare(end, eoakey) <= 0 { 28 | source.Prev() 29 | } 30 | } else { 31 | source.Last() 32 | } 33 | } 34 | } else { 35 | if start == nil { 36 | source.First() 37 | } else { 38 | source.Seek(start) 39 | } 40 | } 41 | return &goLevelDBIterator{ 42 | source: source, 43 | start: start, 44 | end: end, 45 | isReverse: isReverse, 46 | isInvalid: false, 47 | } 48 | } 49 | 50 | // Domain implements Iterator. 51 | func (itr *goLevelDBIterator) Domain() (start []byte, end []byte) { 52 | return itr.start, itr.end 53 | } 54 | 55 | // Valid implements Iterator. 56 | func (itr *goLevelDBIterator) Valid() bool { 57 | // Once invalid, forever invalid. 58 | if itr.isInvalid { 59 | return false 60 | } 61 | 62 | // If source errors, invalid. 63 | if err := itr.Error(); err != nil { 64 | itr.isInvalid = true 65 | return false 66 | } 67 | 68 | // If source is invalid, invalid. 69 | if !itr.source.Valid() { 70 | itr.isInvalid = true 71 | return false 72 | } 73 | 74 | // If key is end or past it, invalid. 75 | start := itr.start 76 | end := itr.end 77 | key := itr.source.Key() 78 | 79 | if itr.isReverse { 80 | if start != nil && bytes.Compare(key, start) < 0 { 81 | itr.isInvalid = true 82 | return false 83 | } 84 | } else { 85 | if end != nil && bytes.Compare(end, key) <= 0 { 86 | itr.isInvalid = true 87 | return false 88 | } 89 | } 90 | 91 | // Valid 92 | return true 93 | } 94 | 95 | // Key implements Iterator. 96 | // The caller should not modify the contents of the returned slice. 97 | // Instead, the caller should make a copy and work on the copy. 98 | func (itr *goLevelDBIterator) Key() []byte { 99 | itr.assertIsValid() 100 | return itr.source.Key() 101 | } 102 | 103 | // Value implements Iterator. 104 | // The caller should not modify the contents of the returned slice. 105 | // Instead, the caller should make a copy and work on the copy. 106 | func (itr *goLevelDBIterator) Value() []byte { 107 | itr.assertIsValid() 108 | return itr.source.Value() 109 | } 110 | 111 | // Next implements Iterator. 112 | func (itr *goLevelDBIterator) Next() { 113 | itr.assertIsValid() 114 | if itr.isReverse { 115 | itr.source.Prev() 116 | } else { 117 | itr.source.Next() 118 | } 119 | } 120 | 121 | // Error implements Iterator. 122 | func (itr *goLevelDBIterator) Error() error { 123 | return itr.source.Error() 124 | } 125 | 126 | // Close implements Iterator. 127 | func (itr *goLevelDBIterator) Close() error { 128 | itr.source.Release() 129 | return nil 130 | } 131 | 132 | func (itr goLevelDBIterator) assertIsValid() { 133 | if !itr.Valid() { 134 | panic("iterator is invalid") 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /.github/workflows/codeql.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | on: 14 | workflow_dispatch: # allow running workflow manually 15 | push: 16 | branches: ["main"] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: ["main"] 20 | 21 | concurrency: 22 | group: ${{ github.workflow }}-${{ github.ref }} 23 | cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} 24 | 25 | jobs: 26 | analyze: 27 | name: Analyze 28 | runs-on: ubuntu-latest 29 | permissions: 30 | actions: read 31 | contents: read 32 | security-events: write 33 | 34 | strategy: 35 | fail-fast: false 36 | matrix: 37 | language: ['go'] 38 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] 39 | # Use only 'java' to analyze code written in Java, Kotlin or both 40 | # Use only 'javascript' to analyze code written in JavaScript, TypeScript or both 41 | # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support 42 | 43 | steps: 44 | - name: Checkout repository 45 | uses: actions/checkout@v4 46 | 47 | # Initializes the CodeQL tools for scanning. 48 | - name: Initialize CodeQL 49 | uses: github/codeql-action/init@v3 50 | with: 51 | languages: ${{ matrix.language }} 52 | # If you wish to specify custom queries, you can do so here or in a config file. 53 | # By default, queries listed here will override any specified in a config file. 54 | # Prefix the list here with "+" to use these queries and those in the config file. 55 | 56 | # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs 57 | # queries: security-extended,security-and-quality 58 | 59 | 60 | # Autobuild attempts to build any compiled languages (C/C++, C#, Go, or Java). 61 | # If this step fails, then you should remove it and run the build manually (see below) 62 | - name: Autobuild 63 | uses: github/codeql-action/autobuild@v3 64 | 65 | # ℹ️ Command-line programs to run using the OS shell. 66 | # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun 67 | 68 | # If the Autobuild fails above, remove it and uncomment the following three lines. 69 | # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. 70 | 71 | # - run: | 72 | # echo "Run, Build Application using script" 73 | # ./location_of_script_within_repo/buildscript.sh 74 | 75 | - name: Perform CodeQL Analysis 76 | uses: github/codeql-action/analyze@v3 77 | with: 78 | category: "/language:${{matrix.language}}" 79 | -------------------------------------------------------------------------------- /cleveldb_iterator.go: -------------------------------------------------------------------------------- 1 | //go:build cleveldb 2 | // +build cleveldb 3 | 4 | package db 5 | 6 | import ( 7 | "bytes" 8 | 9 | "github.com/jmhodges/levigo" 10 | ) 11 | 12 | // cLevelDBIterator is a cLevelDB iterator. 13 | type cLevelDBIterator struct { 14 | source *levigo.Iterator 15 | start, end []byte 16 | isReverse bool 17 | isInvalid bool 18 | } 19 | 20 | var _ Iterator = (*cLevelDBIterator)(nil) 21 | 22 | func newCLevelDBIterator(source *levigo.Iterator, start, end []byte, isReverse bool) *cLevelDBIterator { 23 | if isReverse { 24 | if end == nil || len(end) == 0 { 25 | source.SeekToLast() 26 | } else { 27 | source.Seek(end) 28 | if source.Valid() { 29 | eoakey := source.Key() // end or after key 30 | if bytes.Compare(end, eoakey) <= 0 { 31 | source.Prev() 32 | } 33 | } else { 34 | source.SeekToLast() 35 | } 36 | } 37 | } else { 38 | if start == nil || len(start) == 0 { 39 | source.SeekToFirst() 40 | } else { 41 | source.Seek(start) 42 | } 43 | } 44 | return &cLevelDBIterator{ 45 | source: source, 46 | start: start, 47 | end: end, 48 | isReverse: isReverse, 49 | isInvalid: false, 50 | } 51 | } 52 | 53 | // Domain implements Iterator. 54 | func (itr cLevelDBIterator) Domain() ([]byte, []byte) { 55 | return itr.start, itr.end 56 | } 57 | 58 | // Valid implements Iterator. 59 | func (itr cLevelDBIterator) Valid() bool { 60 | // Once invalid, forever invalid. 61 | if itr.isInvalid { 62 | return false 63 | } 64 | 65 | // If source errors, invalid. 66 | if itr.source.GetError() != nil { 67 | itr.isInvalid = true 68 | return false 69 | } 70 | 71 | // If source is invalid, invalid. 72 | if !itr.source.Valid() { 73 | itr.isInvalid = true 74 | return false 75 | } 76 | 77 | // If key is end or past it, invalid. 78 | start := itr.start 79 | end := itr.end 80 | key := itr.source.Key() 81 | if itr.isReverse { 82 | if start != nil && bytes.Compare(key, start) < 0 { 83 | itr.isInvalid = true 84 | return false 85 | } 86 | } else { 87 | if end != nil && bytes.Compare(end, key) <= 0 { 88 | itr.isInvalid = true 89 | return false 90 | } 91 | } 92 | 93 | // It's valid. 94 | return true 95 | } 96 | 97 | // Key implements Iterator. 98 | // The caller should not modify the contents of the returned slice. 99 | // Instead, the caller should make a copy and work on the copy. 100 | func (itr cLevelDBIterator) Key() []byte { 101 | itr.assertIsValid() 102 | return itr.source.Key() 103 | } 104 | 105 | // Value implements Iterator. 106 | // The caller should not modify the contents of the returned slice. 107 | // Instead, the caller should make a copy and work on the copy. 108 | func (itr cLevelDBIterator) Value() []byte { 109 | itr.assertIsValid() 110 | return itr.source.Value() 111 | } 112 | 113 | // Next implements Iterator. 114 | func (itr cLevelDBIterator) Next() { 115 | itr.assertIsValid() 116 | if itr.isReverse { 117 | itr.source.Prev() 118 | } else { 119 | itr.source.Next() 120 | } 121 | } 122 | 123 | // Error implements Iterator. 124 | func (itr cLevelDBIterator) Error() error { 125 | return itr.source.GetError() 126 | } 127 | 128 | // Close implements Iterator. 129 | func (itr cLevelDBIterator) Close() error { 130 | itr.source.Close() 131 | return nil 132 | } 133 | 134 | func (itr cLevelDBIterator) assertIsValid() { 135 | if !itr.Valid() { 136 | panic("iterator is invalid") 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /boltdb_iterator.go: -------------------------------------------------------------------------------- 1 | //go:build boltdb 2 | // +build boltdb 3 | 4 | package db 5 | 6 | import ( 7 | "bytes" 8 | 9 | "go.etcd.io/bbolt" 10 | ) 11 | 12 | // boltDBIterator allows you to iterate on range of keys/values given some 13 | // start / end keys (nil & nil will result in doing full scan). 14 | type boltDBIterator struct { 15 | tx *bbolt.Tx 16 | 17 | itr *bbolt.Cursor 18 | start []byte 19 | end []byte 20 | 21 | currentKey []byte 22 | currentValue []byte 23 | 24 | isInvalid bool 25 | isReverse bool 26 | } 27 | 28 | var _ Iterator = (*boltDBIterator)(nil) 29 | 30 | // newBoltDBIterator creates a new boltDBIterator. 31 | func newBoltDBIterator(tx *bbolt.Tx, start, end []byte, isReverse bool) *boltDBIterator { 32 | itr := tx.Bucket(bucket).Cursor() 33 | 34 | var ck, cv []byte 35 | if isReverse { 36 | switch { 37 | case end == nil: 38 | ck, cv = itr.Last() 39 | default: 40 | _, _ = itr.Seek(end) // after key 41 | ck, cv = itr.Prev() // return to end key 42 | } 43 | } else { 44 | switch { 45 | case start == nil: 46 | ck, cv = itr.First() 47 | default: 48 | ck, cv = itr.Seek(start) 49 | } 50 | } 51 | 52 | return &boltDBIterator{ 53 | tx: tx, 54 | itr: itr, 55 | start: start, 56 | end: end, 57 | currentKey: ck, 58 | currentValue: cv, 59 | isReverse: isReverse, 60 | isInvalid: false, 61 | } 62 | } 63 | 64 | // Domain implements Iterator. 65 | func (itr *boltDBIterator) Domain() ([]byte, []byte) { 66 | return itr.start, itr.end 67 | } 68 | 69 | // Valid implements Iterator. 70 | func (itr *boltDBIterator) Valid() bool { 71 | if itr.isInvalid { 72 | return false 73 | } 74 | 75 | if itr.Error() != nil { 76 | itr.isInvalid = true 77 | return false 78 | } 79 | 80 | // iterated to the end of the cursor 81 | if itr.currentKey == nil { 82 | itr.isInvalid = true 83 | return false 84 | } 85 | 86 | if itr.isReverse { 87 | if itr.start != nil && bytes.Compare(itr.currentKey, itr.start) < 0 { 88 | itr.isInvalid = true 89 | return false 90 | } 91 | } else { 92 | if itr.end != nil && bytes.Compare(itr.end, itr.currentKey) <= 0 { 93 | itr.isInvalid = true 94 | return false 95 | } 96 | } 97 | 98 | // Valid 99 | return true 100 | } 101 | 102 | // Next implements Iterator. 103 | func (itr *boltDBIterator) Next() { 104 | itr.assertIsValid() 105 | if itr.isReverse { 106 | itr.currentKey, itr.currentValue = itr.itr.Prev() 107 | } else { 108 | itr.currentKey, itr.currentValue = itr.itr.Next() 109 | } 110 | } 111 | 112 | // Key implements Iterator. 113 | // The caller should not modify the contents of the returned slice. 114 | // Instead, the caller should make a copy and work on the copy. 115 | func (itr *boltDBIterator) Key() []byte { 116 | itr.assertIsValid() 117 | return itr.currentKey 118 | } 119 | 120 | // Value implements Iterator. 121 | // The caller should not modify the contents of the returned slice. 122 | // Instead, the caller should make a copy and work on the copy. 123 | func (itr *boltDBIterator) Value() []byte { 124 | itr.assertIsValid() 125 | return itr.currentValue 126 | } 127 | 128 | // Error implements Iterator. 129 | func (itr *boltDBIterator) Error() error { 130 | return nil 131 | } 132 | 133 | // Close implements Iterator. 134 | func (itr *boltDBIterator) Close() error { 135 | return itr.tx.Rollback() 136 | } 137 | 138 | func (itr *boltDBIterator) assertIsValid() { 139 | if !itr.Valid() { 140 | panic("iterator is invalid") 141 | } 142 | } 143 | -------------------------------------------------------------------------------- /prefixdb_iterator.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | ) 7 | 8 | // IteratePrefix is a convenience function for iterating over a key domain 9 | // restricted by prefix. 10 | func IteratePrefix(db DB, prefix []byte) (Iterator, error) { 11 | var start, end []byte 12 | if len(prefix) == 0 { 13 | start = nil 14 | end = nil 15 | } else { 16 | start = cp(prefix) 17 | end = cpIncr(prefix) 18 | } 19 | itr, err := db.Iterator(start, end) 20 | if err != nil { 21 | return nil, err 22 | } 23 | return itr, nil 24 | } 25 | 26 | // Strips prefix while iterating from Iterator. 27 | type prefixDBIterator struct { 28 | prefix []byte 29 | start []byte 30 | end []byte 31 | source Iterator 32 | valid bool 33 | err error 34 | } 35 | 36 | var _ Iterator = (*prefixDBIterator)(nil) 37 | 38 | func newPrefixIterator(prefix, start, end []byte, source Iterator) (*prefixDBIterator, error) { //nolint:unparam 39 | pitrInvalid := &prefixDBIterator{ 40 | prefix: prefix, 41 | start: start, 42 | end: end, 43 | source: source, 44 | valid: false, 45 | } 46 | 47 | // Empty keys are not allowed, so if a key exists in the database that exactly matches the 48 | // prefix we need to skip it. 49 | if source.Valid() && bytes.Equal(source.Key(), prefix) { 50 | source.Next() 51 | } 52 | 53 | if !source.Valid() || !bytes.HasPrefix(source.Key(), prefix) { 54 | return pitrInvalid, nil 55 | } 56 | 57 | return &prefixDBIterator{ 58 | prefix: prefix, 59 | start: start, 60 | end: end, 61 | source: source, 62 | valid: true, 63 | }, nil 64 | } 65 | 66 | // Domain implements Iterator. 67 | func (itr *prefixDBIterator) Domain() (start []byte, end []byte) { 68 | return itr.start, itr.end 69 | } 70 | 71 | // Valid implements Iterator. 72 | func (itr *prefixDBIterator) Valid() bool { 73 | if !itr.valid || itr.err != nil || !itr.source.Valid() { 74 | return false 75 | } 76 | 77 | key := itr.source.Key() 78 | if len(key) < len(itr.prefix) || !bytes.Equal(key[:len(itr.prefix)], itr.prefix) { 79 | itr.err = fmt.Errorf("received invalid key from backend: %x (expected prefix %x)", 80 | key, itr.prefix) 81 | return false 82 | } 83 | 84 | return true 85 | } 86 | 87 | // Next implements Iterator. 88 | func (itr *prefixDBIterator) Next() { 89 | itr.assertIsValid() 90 | itr.source.Next() 91 | 92 | if !itr.source.Valid() || !bytes.HasPrefix(itr.source.Key(), itr.prefix) { 93 | itr.valid = false 94 | } else if bytes.Equal(itr.source.Key(), itr.prefix) { 95 | // Empty keys are not allowed, so if a key exists in the database that exactly matches the 96 | // prefix we need to skip it. 97 | itr.Next() 98 | } 99 | } 100 | 101 | // Next implements Iterator. 102 | func (itr *prefixDBIterator) Key() []byte { 103 | itr.assertIsValid() 104 | key := itr.source.Key() 105 | return key[len(itr.prefix):] // we have checked the key in Valid() 106 | } 107 | 108 | // Value implements Iterator. 109 | func (itr *prefixDBIterator) Value() []byte { 110 | itr.assertIsValid() 111 | return itr.source.Value() 112 | } 113 | 114 | // Error implements Iterator. 115 | func (itr *prefixDBIterator) Error() error { 116 | if err := itr.source.Error(); err != nil { 117 | return err 118 | } 119 | return itr.err 120 | } 121 | 122 | // Close implements Iterator. 123 | func (itr *prefixDBIterator) Close() error { 124 | return itr.source.Close() 125 | } 126 | 127 | func (itr *prefixDBIterator) assertIsValid() { 128 | if !itr.Valid() { 129 | panic("iterator is invalid") 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | GOTOOLS = github.com/golangci/golangci-lint/cmd/golangci-lint 2 | PACKAGES=$(shell go list ./...) 3 | INCLUDE = -I=${GOPATH}/src/github.com/cometbft/cometbft-db -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf 4 | DOCKER_TEST_IMAGE ?= cometbft/cometbft-db-testing 5 | DOCKER_TEST_IMAGE_VERSION ?= latest 6 | NON_INTERACTIVE ?= 0 7 | DOCKER_TEST_INTERACTIVE_FLAGS ?= -it 8 | 9 | ifeq (1,$(NON_INTERACTIVE)) 10 | DOCKER_TEST_INTERACTIVE_FLAGS := 11 | endif 12 | 13 | all: lint test 14 | 15 | #? test: Run pure Go tests only 16 | test: 17 | @echo "--> Running go test" 18 | @go test $(PACKAGES) -tags boltdb,badgerdb 19 | .PHONY: test 20 | 21 | #? test-cleveldb: Run cleveldb tests 22 | test-cleveldb: 23 | @echo "--> Running go test" 24 | @go test $(PACKAGES) -tags cleveldb -v 25 | .PHONY: test-cleveldb 26 | 27 | #? test-rocksdb: Run rocksdb tests 28 | test-rocksdb: 29 | @echo "--> Running go test" 30 | @go test $(PACKAGES) -tags rocksdb -v 31 | .PHONY: test-rocksdb 32 | 33 | #? test-boltdb: Run boltdb tests 34 | test-boltdb: 35 | @echo "--> Running go test" 36 | @go test $(PACKAGES) -tags boltdb -v 37 | .PHONY: test-boltdb 38 | 39 | #? test-badgerdb: Run badgerdb tests 40 | test-badgerdb: 41 | @echo "--> Running go test" 42 | @go test $(PACKAGES) -tags badgerdb -v 43 | .PHONY: test-badgerdb 44 | 45 | #? test-pebbledb: Run pebbledb tests 46 | test-pebbledb: 47 | @echo "--> Running go test" 48 | @go test $(PACKAGES) -v 49 | 50 | #? test-all-with-coverage: Run all tests with coverage 51 | test-all-with-coverage: 52 | @echo "--> Running go test for all databases, with coverage" 53 | @CGO_ENABLED=1 go test ./... \ 54 | -mod=readonly \ 55 | -timeout 8m \ 56 | -race \ 57 | -coverprofile=coverage.txt \ 58 | -covermode=atomic \ 59 | -tags=cleveldb,boltdb,rocksdb,badgerdb\ 60 | -v 61 | .PHONY: test-all-with-coverage 62 | 63 | #? lint: Run linter 64 | lint: 65 | @echo "--> Running linter" 66 | @go run github.com/golangci/golangci-lint/cmd/golangci-lint@latest run 67 | @go mod verify 68 | .PHONY: lint 69 | 70 | #? format: Format the code 71 | format: 72 | find . -name '*.go' -type f -not -path "*.git*" -not -name '*.pb.go' -not -name '*pb_test.go' | xargs gofmt -w -s 73 | find . -name '*.go' -type f -not -path "*.git*" -not -name '*.pb.go' -not -name '*pb_test.go' | xargs goimports -w 74 | .PHONY: format 75 | 76 | #? docker-test-image: Build the Docker test image 77 | docker-test-image: 78 | @echo "--> Building Docker test image" 79 | @cd tools && \ 80 | docker build -t $(DOCKER_TEST_IMAGE):$(DOCKER_TEST_IMAGE_VERSION) . 81 | .PHONY: docker-test-image 82 | 83 | #? docker-test: Run the same test as is executed in CI, but locally. 84 | docker-test: 85 | @echo "--> Running all tests with all databases with Docker (interactive flags: \"$(DOCKER_TEST_INTERACTIVE_FLAGS)\")" 86 | @docker run $(DOCKER_TEST_INTERACTIVE_FLAGS) --rm --name cometbft-db-test \ 87 | -v `pwd`:/cometbft \ 88 | -w /cometbft \ 89 | --entrypoint "" \ 90 | $(DOCKER_TEST_IMAGE):$(DOCKER_TEST_IMAGE_VERSION) \ 91 | make test-all-with-coverage 92 | .PHONY: docker-test 93 | 94 | docker-lint: 95 | @docker run $(DOCKER_TEST_INTERACTIVE_FLAGS) --rm --name cometbft-db-test \ 96 | -v `pwd`:/cometbft \ 97 | -w /cometbft \ 98 | --entrypoint "" \ 99 | $(DOCKER_TEST_IMAGE):$(DOCKER_TEST_IMAGE_VERSION) \ 100 | make lint 101 | .PHONY: docker-lint 102 | 103 | #? tools: Install tools 104 | tools: 105 | go get -v $(GOTOOLS) 106 | .PHONY: tools 107 | 108 | #? vulncheck: Run go vuln check 109 | vulncheck: 110 | @go run golang.org/x/vuln/cmd/govulncheck@latest ./... 111 | .PHONY: vulncheck 112 | -------------------------------------------------------------------------------- /db_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestDBIteratorSingleKey(t *testing.T) { 12 | for backend := range backends { 13 | t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { 14 | db, dir := newTempDB(t, backend) 15 | defer os.RemoveAll(dir) 16 | 17 | err := db.SetSync(bz("1"), bz("value_1")) 18 | assert.NoError(t, err) 19 | itr, err := db.Iterator(nil, nil) 20 | assert.NoError(t, err) 21 | 22 | checkValid(t, itr, true) 23 | checkNext(t, itr, false) 24 | checkValid(t, itr, false) 25 | checkNextPanics(t, itr) 26 | 27 | // Once invalid... 28 | checkInvalid(t, itr) 29 | }) 30 | } 31 | } 32 | 33 | func TestDBIteratorTwoKeys(t *testing.T) { 34 | for backend := range backends { 35 | t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { 36 | db, dir := newTempDB(t, backend) 37 | defer os.RemoveAll(dir) 38 | 39 | err := db.SetSync(bz("1"), bz("value_1")) 40 | assert.NoError(t, err) 41 | 42 | err = db.SetSync(bz("2"), bz("value_1")) 43 | assert.NoError(t, err) 44 | 45 | { // Fail by calling Next too much 46 | itr, err := db.Iterator(nil, nil) 47 | assert.NoError(t, err) 48 | checkValid(t, itr, true) 49 | 50 | checkNext(t, itr, true) 51 | checkValid(t, itr, true) 52 | 53 | checkNext(t, itr, false) 54 | checkValid(t, itr, false) 55 | 56 | checkNextPanics(t, itr) 57 | 58 | // Once invalid... 59 | checkInvalid(t, itr) 60 | } 61 | }) 62 | } 63 | } 64 | 65 | func TestDBIteratorMany(t *testing.T) { 66 | for backend := range backends { 67 | t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { 68 | db, dir := newTempDB(t, backend) 69 | defer os.RemoveAll(dir) 70 | 71 | keys := make([][]byte, 100) 72 | for i := 0; i < 100; i++ { 73 | keys[i] = []byte{byte(i)} 74 | } 75 | 76 | value := []byte{5} 77 | for _, k := range keys { 78 | err := db.Set(k, value) 79 | assert.NoError(t, err) 80 | } 81 | 82 | itr, err := db.Iterator(nil, nil) 83 | assert.NoError(t, err) 84 | 85 | defer itr.Close() 86 | for ; itr.Valid(); itr.Next() { 87 | key := itr.Key() 88 | value = itr.Value() 89 | value1, err := db.Get(key) 90 | assert.NoError(t, err) 91 | assert.Equal(t, value1, value) 92 | } 93 | }) 94 | } 95 | } 96 | 97 | func TestDBIteratorEmpty(t *testing.T) { 98 | for backend := range backends { 99 | t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { 100 | db, dir := newTempDB(t, backend) 101 | defer os.RemoveAll(dir) 102 | 103 | itr, err := db.Iterator(nil, nil) 104 | assert.NoError(t, err) 105 | 106 | checkInvalid(t, itr) 107 | }) 108 | } 109 | } 110 | 111 | func TestDBIteratorEmptyBeginAfter(t *testing.T) { 112 | for backend := range backends { 113 | t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { 114 | db, dir := newTempDB(t, backend) 115 | defer os.RemoveAll(dir) 116 | 117 | itr, err := db.Iterator(bz("1"), nil) 118 | assert.NoError(t, err) 119 | 120 | checkInvalid(t, itr) 121 | }) 122 | } 123 | } 124 | 125 | func TestDBIteratorNonemptyBeginAfter(t *testing.T) { 126 | for backend := range backends { 127 | t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { 128 | db, dir := newTempDB(t, backend) 129 | defer os.RemoveAll(dir) 130 | 131 | err := db.SetSync(bz("1"), bz("value_1")) 132 | assert.NoError(t, err) 133 | itr, err := db.Iterator(bz("2"), nil) 134 | assert.NoError(t, err) 135 | 136 | checkInvalid(t, itr) 137 | }) 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | run: 2 | tests: true 3 | timeout: 10m 4 | 5 | linters: 6 | enable-all: true 7 | disable: 8 | - containedctx 9 | - contextcheck 10 | - cyclop 11 | - dupword 12 | - err113 13 | - errname 14 | - errorlint 15 | - exhaustive 16 | - exhaustruct 17 | - forbidigo 18 | - forcetypeassert 19 | - funlen 20 | - gochecknoglobals 21 | - gochecknoinits 22 | - gocognit 23 | - gocyclo 24 | - godox 25 | - interfacebloat 26 | - intrange 27 | - ireturn 28 | - lll 29 | - maintidx 30 | - mnd 31 | - nestif 32 | - nilnil 33 | - nlreturn 34 | - nonamedreturns 35 | - predeclared 36 | - recvcheck 37 | - tagliatelle 38 | - testifylint 39 | - usetesting 40 | - varnamelen 41 | - wrapcheck 42 | - wsl 43 | 44 | issues: 45 | exclude-rules: 46 | - path: _test\.go 47 | linters: 48 | - gocritic 49 | - gofmt 50 | - goimport 51 | - gosec 52 | - noctx 53 | - paralleltest 54 | - testpackage 55 | - tparallel 56 | max-issues-per-linter: 10000 57 | max-same-issues: 10000 58 | 59 | linters-settings: 60 | dogsled: 61 | max-blank-identifiers: 3 62 | goconst: 63 | ignore-tests: true 64 | misspell: 65 | locale: US 66 | gci: 67 | sections: 68 | - standard # Standard section: captures all standard packages. 69 | - default # Default section: contains all imports that could not be matched to another section type. 70 | - blank # blank imports 71 | - dot # dot imports 72 | - prefix(github.com/cometbft/cometbft-db) 73 | custom-order: true 74 | depguard: 75 | rules: 76 | main: 77 | files: 78 | - $all 79 | - "!$test" 80 | allow: 81 | - $gostd 82 | - github.com/cockroachdb/pebble 83 | - github.com/google/btree 84 | - github.com/syndtr/goleveldb/leveldb 85 | test: 86 | files: 87 | - "$test" 88 | allow: 89 | - $gostd 90 | - github.com/stretchr/testify 91 | - github.com/syndtr/goleveldb/leveldb/opt 92 | 93 | revive: 94 | enable-all-rules: true 95 | rules: 96 | - name: comment-spacings # temporarily disabled 97 | disabled: true 98 | - name: max-public-structs 99 | disabled: true 100 | - name: cognitive-complexity 101 | disabled: true 102 | - name: argument-limit 103 | disabled: true 104 | - name: cyclomatic 105 | disabled: true 106 | - name: deep-exit 107 | disabled: true 108 | - name: file-header 109 | disabled: true 110 | - name: function-length 111 | disabled: true 112 | - name: function-result-limit 113 | disabled: true 114 | - name: line-length-limit 115 | disabled: true 116 | - name: flag-parameter 117 | disabled: true 118 | - name: add-constant 119 | disabled: true 120 | - name: empty-lines 121 | disabled: true 122 | - name: import-shadowing 123 | disabled: true 124 | - name: modifies-value-receiver 125 | disabled: true 126 | - name: confusing-naming 127 | disabled: true 128 | - name: defer 129 | disabled: true 130 | - name: unchecked-type-assertion 131 | disabled: true 132 | - name: unhandled-error 133 | disabled: true 134 | arguments: 135 | - "fmt.Printf" 136 | - "fmt.Print" 137 | - "fmt.Println" 138 | gosec: 139 | excludes: 140 | - G115 141 | -------------------------------------------------------------------------------- /util_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | // Empty iterator for empty db. 12 | func TestPrefixIteratorNoMatchNil(t *testing.T) { 13 | for backend := range backends { 14 | t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { 15 | db, dir := newTempDB(t, backend) 16 | defer os.RemoveAll(dir) 17 | itr, err := IteratePrefix(db, []byte("2")) 18 | require.NoError(t, err) 19 | 20 | checkInvalid(t, itr) 21 | }) 22 | } 23 | } 24 | 25 | // Empty iterator for db populated after iterator created. 26 | func TestPrefixIteratorNoMatch1(t *testing.T) { 27 | for backend := range backends { 28 | if backend == BoltDBBackend { 29 | t.Log("bolt does not support concurrent writes while iterating") 30 | continue 31 | } 32 | 33 | t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { 34 | db, dir := newTempDB(t, backend) 35 | defer os.RemoveAll(dir) 36 | itr, err := IteratePrefix(db, []byte("2")) 37 | require.NoError(t, err) 38 | err = db.SetSync(bz("1"), bz("value_1")) 39 | require.NoError(t, err) 40 | 41 | checkInvalid(t, itr) 42 | }) 43 | } 44 | } 45 | 46 | // Empty iterator for prefix starting after db entry. 47 | func TestPrefixIteratorNoMatch2(t *testing.T) { 48 | for backend := range backends { 49 | t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { 50 | db, dir := newTempDB(t, backend) 51 | defer os.RemoveAll(dir) 52 | err := db.SetSync(bz("3"), bz("value_3")) 53 | require.NoError(t, err) 54 | itr, err := IteratePrefix(db, []byte("4")) 55 | require.NoError(t, err) 56 | 57 | checkInvalid(t, itr) 58 | }) 59 | } 60 | } 61 | 62 | // Iterator with single val for db with single val, starting from that val. 63 | func TestPrefixIteratorMatch1(t *testing.T) { 64 | for backend := range backends { 65 | t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { 66 | db, dir := newTempDB(t, backend) 67 | defer os.RemoveAll(dir) 68 | err := db.SetSync(bz("2"), bz("value_2")) 69 | require.NoError(t, err) 70 | itr, err := IteratePrefix(db, bz("2")) 71 | require.NoError(t, err) 72 | 73 | checkValid(t, itr, true) 74 | checkItem(t, itr, bz("2"), bz("value_2")) 75 | checkNext(t, itr, false) 76 | 77 | // Once invalid... 78 | checkInvalid(t, itr) 79 | }) 80 | } 81 | } 82 | 83 | // Iterator with prefix iterates over everything with same prefix. 84 | func TestPrefixIteratorMatches1N(t *testing.T) { 85 | for backend := range backends { 86 | t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { 87 | db, dir := newTempDB(t, backend) 88 | defer os.RemoveAll(dir) 89 | 90 | // prefixed 91 | err := db.SetSync(bz("a/1"), bz("value_1")) 92 | require.NoError(t, err) 93 | err = db.SetSync(bz("a/3"), bz("value_3")) 94 | require.NoError(t, err) 95 | 96 | // not 97 | err = db.SetSync(bz("b/3"), bz("value_3")) 98 | require.NoError(t, err) 99 | err = db.SetSync(bz("a-3"), bz("value_3")) 100 | require.NoError(t, err) 101 | err = db.SetSync(bz("a.3"), bz("value_3")) 102 | require.NoError(t, err) 103 | err = db.SetSync(bz("abcdefg"), bz("value_3")) 104 | require.NoError(t, err) 105 | itr, err := IteratePrefix(db, bz("a/")) 106 | require.NoError(t, err) 107 | 108 | checkValid(t, itr, true) 109 | checkItem(t, itr, bz("a/1"), bz("value_1")) 110 | checkNext(t, itr, true) 111 | checkItem(t, itr, bz("a/3"), bz("value_3")) 112 | 113 | // Bad! 114 | checkNext(t, itr, false) 115 | 116 | // Once invalid... 117 | checkInvalid(t, itr) 118 | }) 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /rocksdb_iterator.go: -------------------------------------------------------------------------------- 1 | //go:build rocksdb 2 | // +build rocksdb 3 | 4 | package db 5 | 6 | import ( 7 | "bytes" 8 | 9 | "github.com/linxGnu/grocksdb" 10 | ) 11 | 12 | type rocksDBIterator struct { 13 | source *grocksdb.Iterator 14 | start, end []byte 15 | isReverse bool 16 | isInvalid bool 17 | } 18 | 19 | var _ Iterator = (*rocksDBIterator)(nil) 20 | 21 | func newRocksDBIterator(source *grocksdb.Iterator, start, end []byte, isReverse bool) *rocksDBIterator { 22 | if isReverse { 23 | if end == nil { 24 | source.SeekToLast() 25 | } else { 26 | source.Seek(end) 27 | if source.Valid() { 28 | eoakey := moveSliceToBytes(source.Key()) // end or after key 29 | if bytes.Compare(end, eoakey) <= 0 { 30 | source.Prev() 31 | } 32 | } else { 33 | source.SeekToLast() 34 | } 35 | } 36 | } else { 37 | if start == nil { 38 | source.SeekToFirst() 39 | } else { 40 | source.Seek(start) 41 | } 42 | } 43 | return &rocksDBIterator{ 44 | source: source, 45 | start: start, 46 | end: end, 47 | isReverse: isReverse, 48 | isInvalid: false, 49 | } 50 | } 51 | 52 | // Domain implements Iterator. 53 | func (itr *rocksDBIterator) Domain() ([]byte, []byte) { 54 | return itr.start, itr.end 55 | } 56 | 57 | // Valid implements Iterator. 58 | func (itr *rocksDBIterator) Valid() bool { 59 | // Once invalid, forever invalid. 60 | if itr.isInvalid { 61 | return false 62 | } 63 | 64 | // If source has error, invalid. 65 | if err := itr.source.Err(); err != nil { 66 | itr.isInvalid = true 67 | return false 68 | } 69 | 70 | // If source is invalid, invalid. 71 | if !itr.source.Valid() { 72 | itr.isInvalid = true 73 | return false 74 | } 75 | 76 | // If key is end or past it, invalid. 77 | start := itr.start 78 | end := itr.end 79 | key := moveSliceToBytes(itr.source.Key()) 80 | if itr.isReverse { 81 | if start != nil && bytes.Compare(key, start) < 0 { 82 | itr.isInvalid = true 83 | return false 84 | } 85 | } else { 86 | if end != nil && bytes.Compare(end, key) <= 0 { 87 | itr.isInvalid = true 88 | return false 89 | } 90 | } 91 | 92 | // It's valid. 93 | return true 94 | } 95 | 96 | // Key implements Iterator. 97 | // The returned slice is a copy of the original data, therefore it is safe to modify. 98 | func (itr *rocksDBIterator) Key() []byte { 99 | itr.assertIsValid() 100 | return moveSliceToBytes(itr.source.Key()) 101 | } 102 | 103 | // Value implements Iterator. 104 | // The returned slice is a copy of the original data, therefore it is safe to modify. 105 | func (itr *rocksDBIterator) Value() []byte { 106 | itr.assertIsValid() 107 | return moveSliceToBytes(itr.source.Value()) 108 | } 109 | 110 | // Next implements Iterator. 111 | func (itr rocksDBIterator) Next() { 112 | itr.assertIsValid() 113 | if itr.isReverse { 114 | itr.source.Prev() 115 | } else { 116 | itr.source.Next() 117 | } 118 | } 119 | 120 | // Error implements Iterator. 121 | func (itr *rocksDBIterator) Error() error { 122 | return itr.source.Err() 123 | } 124 | 125 | // Close implements Iterator. 126 | func (itr *rocksDBIterator) Close() error { 127 | itr.source.Close() 128 | return nil 129 | } 130 | 131 | func (itr *rocksDBIterator) assertIsValid() { 132 | if !itr.Valid() { 133 | panic("iterator is invalid") 134 | } 135 | } 136 | 137 | // moveSliceToBytes will free the slice and copy out a go []byte 138 | // This function can be applied on *Slice returned from Key() and Value() 139 | // of an Iterator, because they are marked as freed. 140 | func moveSliceToBytes(s *grocksdb.Slice) []byte { 141 | defer s.Free() 142 | if !s.Exists() { 143 | return nil 144 | } 145 | v := make([]byte, len(s.Data())) 146 | copy(v, s.Data()) 147 | return v 148 | } 149 | -------------------------------------------------------------------------------- /prefixdb_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/require" 7 | ) 8 | 9 | func mockDBWithStuff(t *testing.T) DB { 10 | t.Helper() 11 | db := NewMemDB() 12 | // Under "key" prefix 13 | require.NoError(t, db.Set(bz("key"), bz("value"))) 14 | require.NoError(t, db.Set(bz("key1"), bz("value1"))) 15 | require.NoError(t, db.Set(bz("key2"), bz("value2"))) 16 | require.NoError(t, db.Set(bz("key3"), bz("value3"))) 17 | require.NoError(t, db.Set(bz("something"), bz("else"))) 18 | require.NoError(t, db.Set(bz("k"), bz("val"))) 19 | require.NoError(t, db.Set(bz("ke"), bz("valu"))) 20 | require.NoError(t, db.Set(bz("kee"), bz("valuu"))) 21 | return db 22 | } 23 | 24 | func TestPrefixDBSimple(t *testing.T) { 25 | db := mockDBWithStuff(t) 26 | pdb := NewPrefixDB(db, bz("key")) 27 | 28 | checkValue(t, pdb, bz("key"), nil) 29 | checkValue(t, pdb, bz("key1"), nil) 30 | checkValue(t, pdb, bz("1"), bz("value1")) 31 | checkValue(t, pdb, bz("key2"), nil) 32 | checkValue(t, pdb, bz("2"), bz("value2")) 33 | checkValue(t, pdb, bz("key3"), nil) 34 | checkValue(t, pdb, bz("3"), bz("value3")) 35 | checkValue(t, pdb, bz("something"), nil) 36 | checkValue(t, pdb, bz("k"), nil) 37 | checkValue(t, pdb, bz("ke"), nil) 38 | checkValue(t, pdb, bz("kee"), nil) 39 | } 40 | 41 | func TestPrefixDBIterator1(t *testing.T) { 42 | db := mockDBWithStuff(t) 43 | pdb := NewPrefixDB(db, bz("key")) 44 | 45 | itr, err := pdb.Iterator(nil, nil) 46 | require.NoError(t, err) 47 | checkDomain(t, itr, nil, nil) 48 | checkItem(t, itr, bz("1"), bz("value1")) 49 | checkNext(t, itr, true) 50 | checkItem(t, itr, bz("2"), bz("value2")) 51 | checkNext(t, itr, true) 52 | checkItem(t, itr, bz("3"), bz("value3")) 53 | checkNext(t, itr, false) 54 | checkInvalid(t, itr) 55 | err = itr.Close() 56 | require.NoError(t, err) 57 | } 58 | 59 | func TestPrefixDBReverseIterator1(t *testing.T) { 60 | db := mockDBWithStuff(t) 61 | pdb := NewPrefixDB(db, bz("key")) 62 | 63 | itr, err := pdb.ReverseIterator(nil, nil) 64 | require.NoError(t, err) 65 | checkDomain(t, itr, nil, nil) 66 | checkItem(t, itr, bz("3"), bz("value3")) 67 | checkNext(t, itr, true) 68 | checkItem(t, itr, bz("2"), bz("value2")) 69 | checkNext(t, itr, true) 70 | checkItem(t, itr, bz("1"), bz("value1")) 71 | checkNext(t, itr, false) 72 | checkInvalid(t, itr) 73 | err = itr.Close() 74 | require.NoError(t, err) 75 | } 76 | 77 | func TestPrefixDBReverseIterator5(t *testing.T) { 78 | db := mockDBWithStuff(t) 79 | pdb := NewPrefixDB(db, bz("key")) 80 | 81 | itr, err := pdb.ReverseIterator(bz("1"), nil) 82 | require.NoError(t, err) 83 | checkDomain(t, itr, bz("1"), nil) 84 | checkItem(t, itr, bz("3"), bz("value3")) 85 | checkNext(t, itr, true) 86 | checkItem(t, itr, bz("2"), bz("value2")) 87 | checkNext(t, itr, true) 88 | checkItem(t, itr, bz("1"), bz("value1")) 89 | checkNext(t, itr, false) 90 | checkInvalid(t, itr) 91 | err = itr.Close() 92 | require.NoError(t, err) 93 | } 94 | 95 | func TestPrefixDBReverseIterator6(t *testing.T) { 96 | db := mockDBWithStuff(t) 97 | pdb := NewPrefixDB(db, bz("key")) 98 | 99 | itr, err := pdb.ReverseIterator(bz("2"), nil) 100 | require.NoError(t, err) 101 | checkDomain(t, itr, bz("2"), nil) 102 | checkItem(t, itr, bz("3"), bz("value3")) 103 | checkNext(t, itr, true) 104 | checkItem(t, itr, bz("2"), bz("value2")) 105 | checkNext(t, itr, false) 106 | checkInvalid(t, itr) 107 | err = itr.Close() 108 | require.NoError(t, err) 109 | } 110 | 111 | func TestPrefixDBReverseIterator7(t *testing.T) { 112 | db := mockDBWithStuff(t) 113 | pdb := NewPrefixDB(db, bz("key")) 114 | 115 | itr, err := pdb.ReverseIterator(nil, bz("2")) 116 | require.NoError(t, err) 117 | checkDomain(t, itr, nil, bz("2")) 118 | checkItem(t, itr, bz("1"), bz("value1")) 119 | checkNext(t, itr, false) 120 | checkInvalid(t, itr) 121 | err = itr.Close() 122 | require.NoError(t, err) 123 | } 124 | -------------------------------------------------------------------------------- /.github/workflows/docker.yml: -------------------------------------------------------------------------------- 1 | name: Build Docker Image 2 | on: 3 | workflow_dispatch: 4 | inputs: 5 | tag: 6 | description: "The tag of the image to build" 7 | required: true 8 | type: string 9 | is_latest: 10 | description: "Push as latest?" 11 | required: false 12 | default: false 13 | type: boolean 14 | 15 | concurrency: 16 | group: ${{ github.workflow }}-${{ github.ref }} 17 | cancel-in-progress: true 18 | 19 | env: 20 | ORG: cometbft 21 | IMAGE_NAME: cometbft-db-testing 22 | GIT_TAG: "${{ inputs.tag }}" 23 | 24 | jobs: 25 | build-image-at-tag: 26 | strategy: 27 | fail-fast: false 28 | matrix: 29 | include: 30 | - os: ubuntu-24.04 31 | platform: linux/amd64 32 | arch: amd64 33 | - os: ubuntu-24.04-arm 34 | platform: linux/arm64 35 | arch: arm64 36 | runs-on: ${{ matrix.os }} 37 | outputs: 38 | digest_amd64: ${{ steps.capture-digest.outputs.digest_amd64 }} 39 | digest_arm64: ${{ steps.capture-digest.outputs.digest_arm64 }} 40 | steps: 41 | - uses: actions/checkout@v4 42 | with: 43 | ref: "${{ env.GIT_TAG }}" 44 | fetch-depth: 0 45 | 46 | - name: Set up Docker Buildx 47 | uses: docker/setup-buildx-action@v3 48 | 49 | - name: Login to DockerHub 50 | if: ${{ github.event_name != 'pull_request' }} 51 | uses: docker/login-action@v3 52 | with: 53 | username: ${{ secrets.DOCKERHUB_USERNAME }} 54 | password: ${{ secrets.DOCKERHUB_TOKEN }} 55 | 56 | - name: Build and push image 57 | id: build 58 | uses: docker/build-push-action@v6 59 | with: 60 | platforms: ${{ matrix.platform }} 61 | file: ./tools/Dockerfile 62 | tags: | 63 | ${{ env.ORG }}/${{ env.IMAGE_NAME }}:${{ env.GIT_TAG }} 64 | push: true 65 | outputs: type=image,name=${{ env.ORG }}/${{ env.IMAGE_NAME }},digest=true 66 | 67 | - name: Capture Image Digest 68 | id: capture-digest 69 | run: | 70 | echo "digest_${{ matrix.arch }}=${{ steps.build.outputs.digest }}" >> $GITHUB_ENV 71 | echo "::set-output name=digest_${{ matrix.arch }}::${{ steps.build.outputs.digest }}" 72 | 73 | merge: 74 | runs-on: ubuntu-latest 75 | needs: build-image-at-tag 76 | steps: 77 | - name: Get sanitized Docker tag 78 | run: echo "DOCKER_TAG=$(echo $GIT_TAG | sed 's/[^a-zA-Z0-9\.]/-/g')" >> $GITHUB_ENV 79 | 80 | - name: Debug Output Digests 81 | run: | 82 | echo "AMD64 Digest: ${{ needs.build-image-at-tag.outputs.digest_amd64 }}" 83 | echo "ARM64 Digest: ${{ needs.build-image-at-tag.outputs.digest_arm64 }}" 84 | 85 | - name: Login to DockerHub 86 | if: ${{ github.event_name != 'pull_request' }} 87 | uses: docker/login-action@v3 88 | with: 89 | username: ${{ secrets.DOCKERHUB_USERNAME }} 90 | password: ${{ secrets.DOCKERHUB_TOKEN }} 91 | 92 | - name: Create Multi-Arch Manifest 93 | run: | 94 | docker buildx imagetools create \ 95 | --tag ${{ env.ORG }}/${{ env.IMAGE_NAME }}:${{ env.DOCKER_TAG }} \ 96 | ${{ env.ORG }}/${{ env.IMAGE_NAME }}@${{ needs.build-image-at-tag.outputs.digest_amd64 }} \ 97 | ${{ env.ORG }}/${{ env.IMAGE_NAME }}@${{ needs.build-image-at-tag.outputs.digest_arm64 }} 98 | 99 | - name: Tag and Push Latest (if applicable) 100 | if: ${{ inputs.is_latest == true }} 101 | run: | 102 | docker buildx imagetools create \ 103 | --tag ${{ env.ORG }}/${{ env.IMAGE_NAME }}:latest \ 104 | ${{ env.ORG }}/${{ env.IMAGE_NAME }}@${{ needs.build-image-at-tag.outputs.digest_amd64 }} \ 105 | ${{ env.ORG }}/${{ env.IMAGE_NAME }}@${{ needs.build-image-at-tag.outputs.digest_arm64 }} 106 | -------------------------------------------------------------------------------- /memdb_iterator.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | 7 | "github.com/google/btree" 8 | ) 9 | 10 | const ( 11 | // Size of the channel buffer between traversal goroutine and iterator. Using an unbuffered 12 | // channel causes two context switches per item sent, while buffering allows more work per 13 | // context switch. Tuned with benchmarks. 14 | chBufferSize = 64 15 | ) 16 | 17 | // memDBIterator is a memDB iterator. 18 | type memDBIterator struct { 19 | ch <-chan *item 20 | cancel context.CancelFunc 21 | item *item 22 | start []byte 23 | end []byte 24 | useMtx bool 25 | } 26 | 27 | var _ Iterator = (*memDBIterator)(nil) 28 | 29 | // newMemDBIterator creates a new memDBIterator. 30 | func newMemDBIterator(db *MemDB, start []byte, end []byte, reverse bool) *memDBIterator { 31 | return newMemDBIteratorMtxChoice(db, start, end, reverse, true) 32 | } 33 | 34 | func newMemDBIteratorMtxChoice(db *MemDB, start []byte, end []byte, reverse bool, useMtx bool) *memDBIterator { 35 | ctx, cancel := context.WithCancel(context.Background()) 36 | ch := make(chan *item, chBufferSize) 37 | iter := &memDBIterator{ 38 | ch: ch, 39 | cancel: cancel, 40 | start: start, 41 | end: end, 42 | useMtx: useMtx, 43 | } 44 | 45 | if useMtx { 46 | db.mtx.RLock() 47 | } 48 | go func() { 49 | if useMtx { 50 | defer db.mtx.RUnlock() 51 | } 52 | // Because we use [start, end) for reverse ranges, while btree uses (start, end], we need 53 | // the following variables to handle some reverse iteration conditions ourselves. 54 | var ( 55 | skipEqual []byte 56 | abortLessThan []byte 57 | ) 58 | visitor := func(i btree.Item) bool { 59 | item, ok := i.(*item) 60 | if !ok { 61 | return false // or handle the error as appropriate 62 | } 63 | if skipEqual != nil && bytes.Equal(item.key, skipEqual) { 64 | skipEqual = nil 65 | return true 66 | } 67 | if abortLessThan != nil && bytes.Compare(item.key, abortLessThan) == -1 { 68 | return false 69 | } 70 | select { 71 | case <-ctx.Done(): 72 | return false 73 | case ch <- item: 74 | return true 75 | } 76 | } 77 | switch { 78 | case start == nil && end == nil && !reverse: 79 | db.btree.Ascend(visitor) 80 | case start == nil && end == nil && reverse: 81 | db.btree.Descend(visitor) 82 | case end == nil && !reverse: 83 | // must handle this specially, since nil is considered less than anything else 84 | db.btree.AscendGreaterOrEqual(newKey(start), visitor) 85 | case !reverse: 86 | db.btree.AscendRange(newKey(start), newKey(end), visitor) 87 | case end == nil: 88 | // abort after start, since we use [start, end) while btree uses (start, end] 89 | abortLessThan = start 90 | db.btree.Descend(visitor) 91 | default: 92 | // skip end and abort after start, since we use [start, end) while btree uses (start, end] 93 | skipEqual = end 94 | abortLessThan = start 95 | db.btree.DescendLessOrEqual(newKey(end), visitor) 96 | } 97 | close(ch) 98 | }() 99 | 100 | // prime the iterator with the first value, if any 101 | if item, ok := <-ch; ok { 102 | iter.item = item 103 | } 104 | 105 | return iter 106 | } 107 | 108 | // Close implements Iterator. 109 | func (i *memDBIterator) Close() error { 110 | i.cancel() 111 | for range i.ch { //nolint:revive // drain channel 112 | } 113 | i.item = nil 114 | return nil 115 | } 116 | 117 | // Domain implements Iterator. 118 | func (i *memDBIterator) Domain() (start []byte, end []byte) { 119 | return i.start, i.end 120 | } 121 | 122 | // Valid implements Iterator. 123 | func (i *memDBIterator) Valid() bool { 124 | return i.item != nil 125 | } 126 | 127 | // Next implements Iterator. 128 | func (i *memDBIterator) Next() { 129 | i.assertIsValid() 130 | item, ok := <-i.ch 131 | switch { 132 | case ok: 133 | i.item = item 134 | default: 135 | i.item = nil 136 | } 137 | } 138 | 139 | // Error implements Iterator. 140 | func (*memDBIterator) Error() error { 141 | return nil // famous last words 142 | } 143 | 144 | // Key implements Iterator. 145 | func (i *memDBIterator) Key() []byte { 146 | i.assertIsValid() 147 | return i.item.key 148 | } 149 | 150 | // Value implements Iterator. 151 | func (i *memDBIterator) Value() []byte { 152 | i.assertIsValid() 153 | return i.item.value 154 | } 155 | 156 | func (i *memDBIterator) assertIsValid() { 157 | if !i.Valid() { 158 | panic("iterator is invalid") 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # CometBFT DB 2 | 3 | [![version](https://img.shields.io/github/tag/cometbft/cometbft-db.svg)](https://github.com/cometbft/cometbft-db/releases/latest) 4 | [![license](https://img.shields.io/github/license/cometbft/cometbft-db.svg)](https://github.com/cometbft/cometbft-db/blob/main/LICENSE) 5 | [![API Reference](https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667)](https://pkg.go.dev/github.com/cometbft/cometbft-db) 6 | [![codecov](https://codecov.io/gh/cometbft/cometbft-db/branch/main/graph/badge.svg)](https://codecov.io/gh/cometbft/cometbft-db) 7 | ![Lint](https://github.com/cometbft/cometbft-db/workflows/Lint/badge.svg?branch=main) 8 | ![Test](https://github.com/cometbft/cometbft-db/workflows/Test/badge.svg?branch=main) 9 | 10 | A fork of [tm-db]. 11 | 12 | Common database interface for various database backends. Primarily meant for 13 | applications built on [CometBFT], such as the [Cosmos SDK]. 14 | 15 | **NB:** As per [cometbft/cometbft\#48], the CometBFT team plans on eventually 16 | totally deprecating and removing this library from CometBFT. As such, we do not 17 | recommend depending on this library for new projects. 18 | 19 | ## Minimum Go Version 20 | 21 | Go 1.23+ 22 | 23 | ## Supported Database Backends 24 | 25 | - **[GoLevelDB](https://github.com/syndtr/goleveldb) [stable]**: A pure Go 26 | implementation of [LevelDB](https://github.com/google/leveldb) (see below). 27 | Currently the default on-disk database used in the Cosmos SDK. 28 | 29 | - **MemDB [stable]:** An in-memory database using [Google's B-tree 30 | package](https://github.com/google/btree). Has very high performance both for 31 | reads, writes, and range scans, but is not durable and will lose all data on 32 | process exit. Does not support transactions. Suitable for e.g. caches, working 33 | sets, and tests. Used for [IAVL](https://github.com/tendermint/iavl) working 34 | sets when the pruning strategy allows it. 35 | 36 | - **[LevelDB](https://github.com/google/leveldb) [DEPRECATED]:** A [Go 37 | wrapper](https://github.com/jmhodges/levigo) around 38 | [LevelDB](https://github.com/google/leveldb). Uses LSM-trees for on-disk 39 | storage, which have good performance for write-heavy workloads, particularly 40 | on spinning disks, but requires periodic compaction to maintain decent read 41 | performance and reclaim disk space. Does not support transactions. 42 | 43 | - **[BoltDB](https://github.com/etcd-io/bbolt) [DEPRECATED]:** A 44 | [fork](https://github.com/etcd-io/bbolt) of 45 | [BoltDB](https://github.com/boltdb/bolt). Uses B+trees for on-disk storage, 46 | which have good performance for read-heavy workloads and range scans. Supports 47 | serializable ACID transactions. 48 | 49 | - **[RocksDB](https://github.com/linxGnu/grocksdb) [experimental]:** A [Go 50 | wrapper](https://github.com/linxGnu/grocksdb) around 51 | [RocksDB](https://rocksdb.org). Similarly to LevelDB (above) it uses LSM-trees 52 | for on-disk storage, but is optimized for fast storage media such as SSDs and 53 | memory. Supports atomic transactions, but not full ACID transactions. 54 | 55 | - **[BadgerDB](https://github.com/dgraph-io/badger) [experimental]:** A 56 | key-value database written as a pure-Go alternative to e.g. LevelDB and 57 | RocksDB, with LSM-tree storage. Makes use of multiple goroutines for 58 | performance, and includes advanced features such as serializable ACID 59 | transactions, write batches, compression, and more. 60 | 61 | - **[PebbleDB](https://github.com/cockroachdb/pebble) [experimental]:** Pebble 62 | is a LevelDB/RocksDB inspired key-value store focused on performance and 63 | internal usage by CockroachDB. Pebble inherits the RocksDB file formats and a 64 | few extensions such as range deletion tombstones, table-level bloom filters, 65 | and updates to the MANIFEST format. 66 | 67 | CAVEAT: there are reports of broken upgrade process when using [Cosmos 68 | SDK](https://github.com/cosmos/cosmos-sdk). 69 | 70 | ## Meta-databases 71 | 72 | - **PrefixDB [stable]:** A database which wraps another database and uses a 73 | static prefix for all keys. This allows multiple logical databases to be 74 | stored in a common underlying databases by using different namespaces. Used by 75 | the Cosmos SDK to give different modules their own namespaced database in a 76 | single application database. 77 | 78 | ## Tests 79 | 80 | To test common databases, run `make test`. If all databases are available on the 81 | local machine, use `make test-all` to test them all. 82 | 83 | To test all databases within a Docker container, run: 84 | 85 | ```bash 86 | make docker-test-image 87 | make docker-test 88 | ``` 89 | 90 | [tm-db]: https://github.com/tendermint/tm-db 91 | [CometBFT]: https://github.com/cometbft/cometbft-db 92 | [Cosmos SDK]: https://github.com/cosmos/cosmos-sdk 93 | [cometbft/cometbft\#48]: https://github.com/cometbft/cometbft/issues/48 94 | -------------------------------------------------------------------------------- /goleveldb.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "fmt" 5 | "path/filepath" 6 | 7 | "github.com/syndtr/goleveldb/leveldb" 8 | "github.com/syndtr/goleveldb/leveldb/errors" 9 | "github.com/syndtr/goleveldb/leveldb/opt" 10 | "github.com/syndtr/goleveldb/leveldb/util" 11 | ) 12 | 13 | func init() { 14 | dbCreator := func(name string, dir string) (DB, error) { 15 | return NewGoLevelDB(name, dir) 16 | } 17 | registerDBCreator(GoLevelDBBackend, dbCreator) 18 | } 19 | 20 | type GoLevelDB struct { 21 | db *leveldb.DB 22 | } 23 | 24 | var _ DB = (*GoLevelDB)(nil) 25 | 26 | func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { 27 | return NewGoLevelDBWithOpts(name, dir, nil) 28 | } 29 | 30 | func NewGoLevelDBWithOpts(name string, dir string, o *opt.Options) (*GoLevelDB, error) { 31 | dbPath := filepath.Join(dir, name+".db") 32 | db, err := leveldb.OpenFile(dbPath, o) 33 | if err != nil { 34 | return nil, err 35 | } 36 | 37 | database := &GoLevelDB{ 38 | db: db, 39 | } 40 | return database, nil 41 | } 42 | 43 | // Get implements DB. 44 | func (db *GoLevelDB) Get(key []byte) ([]byte, error) { 45 | if len(key) == 0 { 46 | return nil, errKeyEmpty 47 | } 48 | res, err := db.db.Get(key, nil) 49 | if err != nil { 50 | if err == errors.ErrNotFound { 51 | return nil, nil 52 | } 53 | return nil, err 54 | } 55 | return res, nil 56 | } 57 | 58 | // Has implements DB. 59 | func (db *GoLevelDB) Has(key []byte) (bool, error) { 60 | bytes, err := db.Get(key) 61 | if err != nil { 62 | return false, err 63 | } 64 | return bytes != nil, nil 65 | } 66 | 67 | // Set implements DB. 68 | func (db *GoLevelDB) Set(key []byte, value []byte) error { 69 | if len(key) == 0 { 70 | return errKeyEmpty 71 | } 72 | if value == nil { 73 | return errValueNil 74 | } 75 | err := db.db.Put(key, value, nil) 76 | if err != nil { 77 | return err 78 | } 79 | return nil 80 | } 81 | 82 | // SetSync implements DB. 83 | func (db *GoLevelDB) SetSync(key []byte, value []byte) error { 84 | if len(key) == 0 { 85 | return errKeyEmpty 86 | } 87 | if value == nil { 88 | return errValueNil 89 | } 90 | 91 | err := db.db.Put(key, value, &opt.WriteOptions{Sync: true}) 92 | if err != nil { 93 | return err 94 | } 95 | return nil 96 | } 97 | 98 | // Delete implements DB. 99 | func (db *GoLevelDB) Delete(key []byte) error { 100 | if len(key) == 0 { 101 | return errKeyEmpty 102 | } 103 | 104 | err := db.db.Delete(key, nil) 105 | if err != nil { 106 | return err 107 | } 108 | return nil 109 | } 110 | 111 | // DeleteSync implements DB. 112 | func (db *GoLevelDB) DeleteSync(key []byte) error { 113 | if len(key) == 0 { 114 | return errKeyEmpty 115 | } 116 | err := db.db.Delete(key, &opt.WriteOptions{Sync: true}) 117 | if err != nil { 118 | return err 119 | } 120 | return nil 121 | } 122 | 123 | func (db *GoLevelDB) DB() *leveldb.DB { 124 | return db.db 125 | } 126 | 127 | // Close implements DB. 128 | func (db *GoLevelDB) Close() error { 129 | return db.db.Close() 130 | } 131 | 132 | // Print implements DB. 133 | func (db *GoLevelDB) Print() error { 134 | str, err := db.db.GetProperty("leveldb.stats") 135 | if err != nil { 136 | return err 137 | } 138 | fmt.Printf("%v\n", str) 139 | 140 | itr := db.db.NewIterator(nil, nil) 141 | for itr.Next() { 142 | key := itr.Key() 143 | value := itr.Value() 144 | fmt.Printf("[%X]:\t[%X]\n", key, value) 145 | } 146 | return nil 147 | } 148 | 149 | // Stats implements DB. 150 | func (db *GoLevelDB) Stats() map[string]string { 151 | keys := []string{ 152 | "leveldb.num-files-at-level{n}", 153 | "leveldb.stats", 154 | "leveldb.sstables", 155 | "leveldb.blockpool", 156 | "leveldb.cachedblock", 157 | "leveldb.openedtables", 158 | "leveldb.alivesnaps", 159 | "leveldb.aliveiters", 160 | } 161 | 162 | stats := make(map[string]string) 163 | for _, key := range keys { 164 | str, err := db.db.GetProperty(key) 165 | if err == nil { 166 | stats[key] = str 167 | } 168 | } 169 | return stats 170 | } 171 | 172 | // NewBatch implements DB. 173 | func (db *GoLevelDB) NewBatch() Batch { 174 | return newGoLevelDBBatch(db) 175 | } 176 | 177 | // Iterator implements DB. 178 | func (db *GoLevelDB) Iterator(start, end []byte) (Iterator, error) { 179 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 180 | return nil, errKeyEmpty 181 | } 182 | itr := db.db.NewIterator(&util.Range{Start: start, Limit: end}, nil) 183 | return newGoLevelDBIterator(itr, start, end, false), nil 184 | } 185 | 186 | // ReverseIterator implements DB. 187 | func (db *GoLevelDB) ReverseIterator(start, end []byte) (Iterator, error) { 188 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 189 | return nil, errKeyEmpty 190 | } 191 | itr := db.db.NewIterator(&util.Range{Start: start, Limit: end}, nil) 192 | return newGoLevelDBIterator(itr, start, end, true), nil 193 | } 194 | 195 | // Compact range. 196 | func (db *GoLevelDB) Compact(start, end []byte) error { 197 | return db.db.CompactRange(util.Range{Start: start, Limit: end}) 198 | } 199 | -------------------------------------------------------------------------------- /prefixdb.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | ) 7 | 8 | // PrefixDB wraps a namespace of another database as a logical database. 9 | type PrefixDB struct { 10 | mtx sync.Mutex 11 | prefix []byte 12 | db DB 13 | } 14 | 15 | var _ DB = (*PrefixDB)(nil) 16 | 17 | // NewPrefixDB lets you namespace multiple DBs within a single DB. 18 | func NewPrefixDB(db DB, prefix []byte) *PrefixDB { 19 | return &PrefixDB{ 20 | prefix: prefix, 21 | db: db, 22 | } 23 | } 24 | 25 | // Get implements DB. 26 | func (pdb *PrefixDB) Get(key []byte) ([]byte, error) { 27 | if len(key) == 0 { 28 | return nil, errKeyEmpty 29 | } 30 | pdb.mtx.Lock() 31 | defer pdb.mtx.Unlock() 32 | 33 | pkey := pdb.prefixed(key) 34 | value, err := pdb.db.Get(pkey) 35 | if err != nil { 36 | return nil, err 37 | } 38 | return value, nil 39 | } 40 | 41 | // Has implements DB. 42 | func (pdb *PrefixDB) Has(key []byte) (bool, error) { 43 | if len(key) == 0 { 44 | return false, errKeyEmpty 45 | } 46 | pdb.mtx.Lock() 47 | defer pdb.mtx.Unlock() 48 | 49 | ok, err := pdb.db.Has(pdb.prefixed(key)) 50 | if err != nil { 51 | return ok, err 52 | } 53 | 54 | return ok, nil 55 | } 56 | 57 | // Set implements DB. 58 | func (pdb *PrefixDB) Set(key []byte, value []byte) error { 59 | if len(key) == 0 { 60 | return errKeyEmpty 61 | } 62 | if value == nil { 63 | return errValueNil 64 | } 65 | pdb.mtx.Lock() 66 | defer pdb.mtx.Unlock() 67 | 68 | pkey := pdb.prefixed(key) 69 | return pdb.db.Set(pkey, value) 70 | } 71 | 72 | // SetSync implements DB. 73 | func (pdb *PrefixDB) SetSync(key []byte, value []byte) error { 74 | if len(key) == 0 { 75 | return errKeyEmpty 76 | } 77 | if value == nil { 78 | return errValueNil 79 | } 80 | pdb.mtx.Lock() 81 | defer pdb.mtx.Unlock() 82 | 83 | return pdb.db.SetSync(pdb.prefixed(key), value) 84 | } 85 | 86 | // Delete implements DB. 87 | func (pdb *PrefixDB) Delete(key []byte) error { 88 | if len(key) == 0 { 89 | return errKeyEmpty 90 | } 91 | pdb.mtx.Lock() 92 | defer pdb.mtx.Unlock() 93 | 94 | return pdb.db.Delete(pdb.prefixed(key)) 95 | } 96 | 97 | // DeleteSync implements DB. 98 | func (pdb *PrefixDB) DeleteSync(key []byte) error { 99 | if len(key) == 0 { 100 | return errKeyEmpty 101 | } 102 | pdb.mtx.Lock() 103 | defer pdb.mtx.Unlock() 104 | 105 | return pdb.db.DeleteSync(pdb.prefixed(key)) 106 | } 107 | 108 | // Iterator implements DB. 109 | func (pdb *PrefixDB) Iterator(start, end []byte) (Iterator, error) { 110 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 111 | return nil, errKeyEmpty 112 | } 113 | pdb.mtx.Lock() 114 | defer pdb.mtx.Unlock() 115 | 116 | var pstart, pend []byte 117 | pstart = append(cp(pdb.prefix), start...) 118 | if end == nil { 119 | pend = cpIncr(pdb.prefix) 120 | } else { 121 | pend = append(cp(pdb.prefix), end...) 122 | } 123 | itr, err := pdb.db.Iterator(pstart, pend) 124 | if err != nil { 125 | return nil, err 126 | } 127 | 128 | return newPrefixIterator(pdb.prefix, start, end, itr) 129 | } 130 | 131 | // ReverseIterator implements DB. 132 | func (pdb *PrefixDB) ReverseIterator(start, end []byte) (Iterator, error) { 133 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 134 | return nil, errKeyEmpty 135 | } 136 | pdb.mtx.Lock() 137 | defer pdb.mtx.Unlock() 138 | 139 | var pstart, pend []byte 140 | pstart = append(cp(pdb.prefix), start...) 141 | if end == nil { 142 | pend = cpIncr(pdb.prefix) 143 | } else { 144 | pend = append(cp(pdb.prefix), end...) 145 | } 146 | ritr, err := pdb.db.ReverseIterator(pstart, pend) 147 | if err != nil { 148 | return nil, err 149 | } 150 | 151 | return newPrefixIterator(pdb.prefix, start, end, ritr) 152 | } 153 | 154 | // NewBatch implements DB. 155 | func (pdb *PrefixDB) NewBatch() Batch { 156 | pdb.mtx.Lock() 157 | defer pdb.mtx.Unlock() 158 | 159 | return newPrefixBatch(pdb.prefix, pdb.db.NewBatch()) 160 | } 161 | 162 | // Close implements DB. 163 | func (pdb *PrefixDB) Close() error { 164 | pdb.mtx.Lock() 165 | defer pdb.mtx.Unlock() 166 | 167 | return pdb.db.Close() 168 | } 169 | 170 | // Print implements DB. 171 | func (pdb *PrefixDB) Print() error { 172 | fmt.Printf("prefix: %X\n", pdb.prefix) 173 | 174 | itr, err := pdb.Iterator(nil, nil) 175 | if err != nil { 176 | return err 177 | } 178 | defer itr.Close() 179 | for ; itr.Valid(); itr.Next() { 180 | key := itr.Key() 181 | value := itr.Value() 182 | fmt.Printf("[%X]:\t[%X]\n", key, value) 183 | } 184 | return nil 185 | } 186 | 187 | // Stats implements DB. 188 | func (pdb *PrefixDB) Stats() map[string]string { 189 | stats := make(map[string]string) 190 | stats["prefixdb.prefix.string"] = string(pdb.prefix) 191 | stats["prefixdb.prefix.hex"] = fmt.Sprintf("%X", pdb.prefix) 192 | source := pdb.db.Stats() 193 | for key, value := range source { 194 | stats["prefixdb.source."+key] = value 195 | } 196 | return stats 197 | } 198 | 199 | func (pdb *PrefixDB) prefixed(key []byte) []byte { 200 | return append(cp(pdb.prefix), key...) 201 | } 202 | 203 | func (pdb *PrefixDB) Compact(start, end []byte) error { 204 | return pdb.db.Compact(start, end) 205 | } 206 | -------------------------------------------------------------------------------- /common_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "math/rand" 7 | "os" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/assert" 11 | "github.com/stretchr/testify/require" 12 | ) 13 | 14 | // ---------------------------------------- 15 | // Helper functions. 16 | 17 | func checkValue(t *testing.T, db DB, key []byte, valueWanted []byte) { 18 | t.Helper() 19 | valueGot, err := db.Get(key) 20 | assert.NoError(t, err) 21 | assert.Equal(t, valueWanted, valueGot) 22 | } 23 | 24 | func checkValid(t *testing.T, itr Iterator, expected bool) { 25 | t.Helper() 26 | valid := itr.Valid() 27 | require.Equal(t, expected, valid) 28 | } 29 | 30 | func checkNext(t *testing.T, itr Iterator, expected bool) { 31 | t.Helper() 32 | itr.Next() 33 | // assert.NoError(t, err) TODO: look at fixing this 34 | valid := itr.Valid() 35 | require.Equal(t, expected, valid) 36 | } 37 | 38 | func checkNextPanics(t *testing.T, itr Iterator) { 39 | t.Helper() 40 | assert.Panics(t, func() { itr.Next() }, "checkNextPanics expected an error but didn't") 41 | } 42 | 43 | func checkDomain(t *testing.T, itr Iterator, start, end []byte) { 44 | t.Helper() 45 | ds, de := itr.Domain() 46 | assert.Equal(t, start, ds, "checkDomain domain start incorrect") 47 | assert.Equal(t, end, de, "checkDomain domain end incorrect") 48 | } 49 | 50 | func checkItem(t *testing.T, itr Iterator, key []byte, value []byte) { 51 | t.Helper() 52 | v := itr.Value() 53 | 54 | k := itr.Key() 55 | 56 | assert.Exactly(t, key, k) 57 | assert.Exactly(t, value, v) 58 | } 59 | 60 | func checkInvalid(t *testing.T, itr Iterator) { 61 | t.Helper() 62 | checkValid(t, itr, false) 63 | checkKeyPanics(t, itr) 64 | checkValuePanics(t, itr) 65 | checkNextPanics(t, itr) 66 | } 67 | 68 | func checkKeyPanics(t *testing.T, itr Iterator) { 69 | t.Helper() 70 | assert.Panics(t, func() { itr.Key() }, "checkKeyPanics expected panic but didn't") 71 | } 72 | 73 | func checkValuePanics(t *testing.T, itr Iterator) { 74 | t.Helper() 75 | 76 | msg := "checkValuePanics expected panic but didn't" 77 | assert.Panics(t, func() { itr.Value() }, msg) 78 | } 79 | 80 | func newTempDB(t *testing.T, backend BackendType) (db DB, dbDir string) { 81 | t.Helper() 82 | dirname, err := os.MkdirTemp("", "db_common_test") 83 | require.NoError(t, err) 84 | db, err = NewDB("testdb", backend, dirname) 85 | require.NoError(t, err) 86 | return db, dirname 87 | } 88 | 89 | func benchmarkRangeScans(b *testing.B, db DB, dbSize int64) { 90 | b.Helper() 91 | b.StopTimer() 92 | 93 | rangeSize := int64(10000) 94 | if dbSize < rangeSize { 95 | b.Errorf("db size %v cannot be less than range size %v", dbSize, rangeSize) 96 | } 97 | 98 | for i := int64(0); i < dbSize; i++ { 99 | int64bytes := int642Bytes(i) 100 | err := db.Set(int64bytes, int64bytes) 101 | if err != nil { 102 | // require.NoError() is very expensive (according to profiler), so check manually 103 | b.Fatal(b, err) 104 | } 105 | } 106 | b.StartTimer() 107 | 108 | for i := 0; i < b.N; i++ { 109 | start := rand.Int63n(dbSize - rangeSize) 110 | end := start + rangeSize 111 | iter, err := db.Iterator(int642Bytes(start), int642Bytes(end)) 112 | require.NoError(b, err) 113 | count := 0 114 | for ; iter.Valid(); iter.Next() { 115 | count++ 116 | } 117 | err = iter.Close() 118 | require.NoError(b, err) 119 | require.EqualValues(b, rangeSize, count) 120 | } 121 | } 122 | 123 | func benchmarkRandomReadsWrites(b *testing.B, db DB) { 124 | b.Helper() 125 | b.StopTimer() 126 | 127 | // create dummy data 128 | const numItems = int64(1000000) 129 | internal := map[int64]int64{} 130 | for i := 0; i < int(numItems); i++ { 131 | internal[int64(i)] = int64(0) 132 | } 133 | 134 | b.StartTimer() 135 | 136 | for i := 0; i < b.N; i++ { 137 | // Write something 138 | { 139 | idx := rand.Int63n(numItems) 140 | internal[idx]++ 141 | val := internal[idx] 142 | idxBytes := int642Bytes(idx) 143 | valBytes := int642Bytes(val) 144 | err := db.Set(idxBytes, valBytes) 145 | if err != nil { 146 | // require.NoError() is very expensive (according to profiler), so check manually 147 | b.Fatal(b, err) 148 | } 149 | } 150 | 151 | // Read something 152 | { 153 | idx := rand.Int63n(numItems) 154 | valExp := internal[idx] 155 | idxBytes := int642Bytes(idx) 156 | valBytes, err := db.Get(idxBytes) 157 | if err != nil { 158 | // require.NoError() is very expensive (according to profiler), so check manually 159 | b.Fatal(b, err) 160 | } 161 | if valExp == 0 { 162 | if !bytes.Equal(valBytes, nil) { 163 | b.Errorf("Expected %v for %v, got %X", nil, idx, valBytes) 164 | break 165 | } 166 | } else { 167 | if len(valBytes) != 8 { 168 | b.Errorf("Expected length 8 for %v, got %X", idx, valBytes) 169 | break 170 | } 171 | valGot := bytes2Int64(valBytes) 172 | if valExp != valGot { 173 | b.Errorf("Expected %v for %v, got %v", valExp, idx, valGot) 174 | break 175 | } 176 | } 177 | } 178 | } 179 | } 180 | 181 | func int642Bytes(i int64) []byte { 182 | buf := make([]byte, 8) 183 | binary.BigEndian.PutUint64(buf, uint64(i)) 184 | return buf 185 | } 186 | 187 | func bytes2Int64(buf []byte) int64 { 188 | return int64(binary.BigEndian.Uint64(buf)) 189 | } 190 | -------------------------------------------------------------------------------- /cleveldb.go: -------------------------------------------------------------------------------- 1 | //go:build cleveldb 2 | // +build cleveldb 3 | 4 | package db 5 | 6 | import ( 7 | "fmt" 8 | "path/filepath" 9 | 10 | "github.com/jmhodges/levigo" 11 | ) 12 | 13 | func init() { 14 | dbCreator := func(name string, dir string) (DB, error) { 15 | return NewCLevelDB(name, dir) 16 | } 17 | registerDBCreator(CLevelDBBackend, dbCreator) 18 | } 19 | 20 | // CLevelDB uses the C LevelDB database via a Go wrapper. 21 | type CLevelDB struct { 22 | db *levigo.DB 23 | ro *levigo.ReadOptions 24 | wo *levigo.WriteOptions 25 | woSync *levigo.WriteOptions 26 | } 27 | 28 | var _ DB = (*CLevelDB)(nil) 29 | 30 | // NewCLevelDB creates a new CLevelDB. 31 | // 32 | // Deprecated: cleveldb is deprecated and will be removed in the future. 33 | func NewCLevelDB(name string, dir string) (*CLevelDB, error) { 34 | dbPath := filepath.Join(dir, name+".db") 35 | 36 | opts := levigo.NewOptions() 37 | opts.SetCache(levigo.NewLRUCache(1 << 30)) 38 | opts.SetCreateIfMissing(true) 39 | db, err := levigo.Open(dbPath, opts) 40 | if err != nil { 41 | return nil, err 42 | } 43 | ro := levigo.NewReadOptions() 44 | wo := levigo.NewWriteOptions() 45 | woSync := levigo.NewWriteOptions() 46 | woSync.SetSync(true) 47 | database := &CLevelDB{ 48 | db: db, 49 | ro: ro, 50 | wo: wo, 51 | woSync: woSync, 52 | } 53 | return database, nil 54 | } 55 | 56 | // Get implements DB. 57 | func (db *CLevelDB) Get(key []byte) ([]byte, error) { 58 | if len(key) == 0 { 59 | return nil, errKeyEmpty 60 | } 61 | res, err := db.db.Get(db.ro, key) 62 | if err != nil { 63 | return nil, err 64 | } 65 | return res, nil 66 | } 67 | 68 | // Has implements DB. 69 | func (db *CLevelDB) Has(key []byte) (bool, error) { 70 | bytes, err := db.Get(key) 71 | if err != nil { 72 | return false, err 73 | } 74 | return bytes != nil, nil 75 | } 76 | 77 | // Set implements DB. 78 | func (db *CLevelDB) Set(key []byte, value []byte) error { 79 | if len(key) == 0 { 80 | return errKeyEmpty 81 | } 82 | if value == nil { 83 | return errValueNil 84 | } 85 | if err := db.db.Put(db.wo, key, value); err != nil { 86 | return err 87 | } 88 | return nil 89 | } 90 | 91 | // SetSync implements DB. 92 | func (db *CLevelDB) SetSync(key []byte, value []byte) error { 93 | if len(key) == 0 { 94 | return errKeyEmpty 95 | } 96 | if value == nil { 97 | return errValueNil 98 | } 99 | if err := db.db.Put(db.woSync, key, value); err != nil { 100 | return err 101 | } 102 | return nil 103 | } 104 | 105 | // Delete implements DB. 106 | func (db *CLevelDB) Delete(key []byte) error { 107 | if len(key) == 0 { 108 | return errKeyEmpty 109 | } 110 | if err := db.db.Delete(db.wo, key); err != nil { 111 | return err 112 | } 113 | return nil 114 | } 115 | 116 | // DeleteSync implements DB. 117 | func (db *CLevelDB) DeleteSync(key []byte) error { 118 | if len(key) == 0 { 119 | return errKeyEmpty 120 | } 121 | if err := db.db.Delete(db.woSync, key); err != nil { 122 | return err 123 | } 124 | return nil 125 | } 126 | 127 | // Compact implements DB and compacts the given range of the DB 128 | func (db *CLevelDB) Compact(start, end []byte) error { 129 | // CompactRange of clevelDB does not return anything 130 | db.db.CompactRange(levigo.Range{Start: start, Limit: end}) 131 | return nil 132 | } 133 | 134 | // FIXME This should not be exposed 135 | func (db *CLevelDB) DB() *levigo.DB { 136 | return db.db 137 | } 138 | 139 | // Close implements DB. 140 | func (db *CLevelDB) Close() error { 141 | db.db.Close() 142 | db.ro.Close() 143 | db.wo.Close() 144 | db.woSync.Close() 145 | return nil 146 | } 147 | 148 | // Print implements DB. 149 | func (db *CLevelDB) Print() error { 150 | itr, err := db.Iterator(nil, nil) 151 | if err != nil { 152 | return err 153 | } 154 | defer itr.Close() 155 | for ; itr.Valid(); itr.Next() { 156 | key := itr.Key() 157 | value := itr.Value() 158 | fmt.Printf("[%X]:\t[%X]\n", key, value) 159 | } 160 | return nil 161 | } 162 | 163 | // Stats implements DB. 164 | func (db *CLevelDB) Stats() map[string]string { 165 | keys := []string{ 166 | "leveldb.aliveiters", 167 | "leveldb.alivesnaps", 168 | "leveldb.blockpool", 169 | "leveldb.cachedblock", 170 | "leveldb.num-files-at-level{n}", 171 | "leveldb.openedtables", 172 | "leveldb.sstables", 173 | "leveldb.stats", 174 | } 175 | 176 | stats := make(map[string]string, len(keys)) 177 | for _, key := range keys { 178 | str := db.db.PropertyValue(key) 179 | stats[key] = str 180 | } 181 | return stats 182 | } 183 | 184 | // NewBatch implements DB. 185 | func (db *CLevelDB) NewBatch() Batch { 186 | return newCLevelDBBatch(db) 187 | } 188 | 189 | // Iterator implements DB. 190 | func (db *CLevelDB) Iterator(start, end []byte) (Iterator, error) { 191 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 192 | return nil, errKeyEmpty 193 | } 194 | itr := db.db.NewIterator(db.ro) 195 | return newCLevelDBIterator(itr, start, end, false), nil 196 | } 197 | 198 | // ReverseIterator implements DB. 199 | func (db *CLevelDB) ReverseIterator(start, end []byte) (Iterator, error) { 200 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 201 | return nil, errKeyEmpty 202 | } 203 | itr := db.db.NewIterator(db.ro) 204 | return newCLevelDBIterator(itr, start, end, true), nil 205 | } 206 | -------------------------------------------------------------------------------- /rocksdb.go: -------------------------------------------------------------------------------- 1 | //go:build rocksdb 2 | // +build rocksdb 3 | 4 | package db 5 | 6 | import ( 7 | "fmt" 8 | "path/filepath" 9 | "runtime" 10 | 11 | "github.com/linxGnu/grocksdb" 12 | ) 13 | 14 | func init() { 15 | dbCreator := func(name string, dir string) (DB, error) { 16 | return NewRocksDB(name, dir) 17 | } 18 | registerDBCreator(RocksDBBackend, dbCreator) 19 | } 20 | 21 | // RocksDB is a RocksDB backend. 22 | type RocksDB struct { 23 | db *grocksdb.DB 24 | ro *grocksdb.ReadOptions 25 | wo *grocksdb.WriteOptions 26 | woSync *grocksdb.WriteOptions 27 | } 28 | 29 | var _ DB = (*RocksDB)(nil) 30 | 31 | func NewRocksDB(name string, dir string) (*RocksDB, error) { 32 | // default rocksdb option, good enough for most cases, including heavy workloads. 33 | // 1GB table cache, 512MB write buffer(may use 50% more on heavy workloads). 34 | // compression: snappy as default, need to -lsnappy to enable. 35 | bbto := grocksdb.NewDefaultBlockBasedTableOptions() 36 | bbto.SetBlockCache(grocksdb.NewLRUCache(1 << 30)) 37 | bbto.SetFilterPolicy(grocksdb.NewBloomFilter(10)) 38 | 39 | opts := grocksdb.NewDefaultOptions() 40 | opts.SetBlockBasedTableFactory(bbto) 41 | opts.SetCreateIfMissing(true) 42 | opts.IncreaseParallelism(runtime.NumCPU()) 43 | // 1.5GB maximum memory use for writebuffer. 44 | opts.OptimizeLevelStyleCompaction(512 * 1024 * 1024) 45 | return NewRocksDBWithOptions(name, dir, opts) 46 | } 47 | 48 | func NewRocksDBWithOptions(name string, dir string, opts *grocksdb.Options) (*RocksDB, error) { 49 | dbPath := filepath.Join(dir, name+".db") 50 | db, err := grocksdb.OpenDb(opts, dbPath) 51 | if err != nil { 52 | return nil, err 53 | } 54 | ro := grocksdb.NewDefaultReadOptions() 55 | wo := grocksdb.NewDefaultWriteOptions() 56 | woSync := grocksdb.NewDefaultWriteOptions() 57 | woSync.SetSync(true) 58 | return NewRocksDBWithRawDB(db, ro, wo, woSync), nil 59 | } 60 | 61 | func NewRocksDBWithRawDB(db *grocksdb.DB, ro *grocksdb.ReadOptions, wo *grocksdb.WriteOptions, woSync *grocksdb.WriteOptions) *RocksDB { 62 | return &RocksDB{ 63 | db: db, 64 | ro: ro, 65 | wo: wo, 66 | woSync: woSync, 67 | } 68 | } 69 | 70 | // Get implements DB. 71 | func (db *RocksDB) Get(key []byte) ([]byte, error) { 72 | if len(key) == 0 { 73 | return nil, errKeyEmpty 74 | } 75 | res, err := db.db.Get(db.ro, key) 76 | if err != nil { 77 | return nil, err 78 | } 79 | return moveSliceToBytes(res), nil 80 | } 81 | 82 | // Has implements DB. 83 | func (db *RocksDB) Has(key []byte) (bool, error) { 84 | bytes, err := db.Get(key) 85 | if err != nil { 86 | return false, err 87 | } 88 | return bytes != nil, nil 89 | } 90 | 91 | // Set implements DB. 92 | func (db *RocksDB) Set(key []byte, value []byte) error { 93 | if len(key) == 0 { 94 | return errKeyEmpty 95 | } 96 | if value == nil { 97 | return errValueNil 98 | } 99 | err := db.db.Put(db.wo, key, value) 100 | if err != nil { 101 | return err 102 | } 103 | return nil 104 | } 105 | 106 | // SetSync implements DB. 107 | func (db *RocksDB) SetSync(key []byte, value []byte) error { 108 | if len(key) == 0 { 109 | return errKeyEmpty 110 | } 111 | if value == nil { 112 | return errValueNil 113 | } 114 | err := db.db.Put(db.woSync, key, value) 115 | if err != nil { 116 | return err 117 | } 118 | return nil 119 | } 120 | 121 | // Delete implements DB. 122 | func (db *RocksDB) Delete(key []byte) error { 123 | if len(key) == 0 { 124 | return errKeyEmpty 125 | } 126 | err := db.db.Delete(db.wo, key) 127 | if err != nil { 128 | return err 129 | } 130 | return nil 131 | } 132 | 133 | // DeleteSync implements DB. 134 | func (db *RocksDB) DeleteSync(key []byte) error { 135 | if len(key) == 0 { 136 | return errKeyEmpty 137 | } 138 | err := db.db.Delete(db.woSync, key) 139 | if err != nil { 140 | return nil 141 | } 142 | return nil 143 | } 144 | 145 | func (db *RocksDB) DB() *grocksdb.DB { 146 | return db.db 147 | } 148 | 149 | // Close implements DB. 150 | func (db *RocksDB) Close() error { 151 | db.ro.Destroy() 152 | db.wo.Destroy() 153 | db.woSync.Destroy() 154 | db.db.Close() 155 | return nil 156 | } 157 | 158 | // Print implements DB. 159 | func (db *RocksDB) Print() error { 160 | itr, err := db.Iterator(nil, nil) 161 | if err != nil { 162 | return err 163 | } 164 | defer itr.Close() 165 | for ; itr.Valid(); itr.Next() { 166 | key := itr.Key() 167 | value := itr.Value() 168 | fmt.Printf("[%X]:\t[%X]\n", key, value) 169 | } 170 | return nil 171 | } 172 | 173 | // Stats implements DB. 174 | func (db *RocksDB) Stats() map[string]string { 175 | keys := []string{"rocksdb.stats"} 176 | stats := make(map[string]string, len(keys)) 177 | for _, key := range keys { 178 | stats[key] = db.db.GetProperty(key) 179 | } 180 | return stats 181 | } 182 | 183 | // NewBatch implements DB. 184 | func (db *RocksDB) NewBatch() Batch { 185 | return newRocksDBBatch(db) 186 | } 187 | 188 | // Iterator implements DB. 189 | func (db *RocksDB) Iterator(start, end []byte) (Iterator, error) { 190 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 191 | return nil, errKeyEmpty 192 | } 193 | itr := db.db.NewIterator(db.ro) 194 | return newRocksDBIterator(itr, start, end, false), nil 195 | } 196 | 197 | // ReverseIterator implements DB. 198 | func (db *RocksDB) ReverseIterator(start, end []byte) (Iterator, error) { 199 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 200 | return nil, errKeyEmpty 201 | } 202 | itr := db.db.NewIterator(db.ro) 203 | return newRocksDBIterator(itr, start, end, true), nil 204 | } 205 | 206 | func (db *RocksDB) Compact(start, end []byte) error { 207 | db.db.CompactRange(grocksdb.Range{Start: start, Limit: end}) 208 | return nil 209 | } 210 | -------------------------------------------------------------------------------- /boltdb.go: -------------------------------------------------------------------------------- 1 | //go:build boltdb 2 | // +build boltdb 3 | 4 | package db 5 | 6 | import ( 7 | "errors" 8 | "fmt" 9 | "os" 10 | "path/filepath" 11 | 12 | "go.etcd.io/bbolt" 13 | ) 14 | 15 | var bucket = []byte("tm") 16 | 17 | func init() { 18 | registerDBCreator(BoltDBBackend, func(name, dir string) (DB, error) { 19 | return NewBoltDB(name, dir) 20 | }) 21 | } 22 | 23 | // BoltDB is a wrapper around etcd's fork of bolt (https://github.com/etcd-io/bbolt). 24 | // 25 | // NOTE: All operations (including Set, Delete) are synchronous by default. One 26 | // can globally turn it off by using NoSync config option (not recommended). 27 | // 28 | // A single bucket ([]byte("tm")) is used per a database instance. This could 29 | // lead to performance issues when/if there will be lots of keys. 30 | type BoltDB struct { 31 | db *bbolt.DB 32 | } 33 | 34 | var _ DB = (*BoltDB)(nil) 35 | 36 | // NewBoltDB returns a BoltDB with default options. 37 | // 38 | // Deprecated: boltdb is deprecated and will be removed in the future. 39 | func NewBoltDB(name, dir string) (DB, error) { 40 | return NewBoltDBWithOpts(name, dir, bbolt.DefaultOptions) 41 | } 42 | 43 | // NewBoltDBWithOpts allows you to supply *bbolt.Options. ReadOnly: true is not 44 | // supported because NewBoltDBWithOpts creates a global bucket. 45 | func NewBoltDBWithOpts(name string, dir string, opts *bbolt.Options) (DB, error) { 46 | if opts.ReadOnly { 47 | return nil, errors.New("ReadOnly: true is not supported") 48 | } 49 | 50 | dbPath := filepath.Join(dir, name+".db") 51 | db, err := bbolt.Open(dbPath, os.ModePerm, opts) 52 | if err != nil { 53 | return nil, err 54 | } 55 | 56 | // create a global bucket 57 | err = db.Update(func(tx *bbolt.Tx) error { 58 | _, err := tx.CreateBucketIfNotExists(bucket) 59 | return err 60 | }) 61 | if err != nil { 62 | return nil, err 63 | } 64 | 65 | return &BoltDB{db: db}, nil 66 | } 67 | 68 | // Get implements DB. 69 | func (bdb *BoltDB) Get(key []byte) (value []byte, err error) { 70 | if len(key) == 0 { 71 | return nil, errKeyEmpty 72 | } 73 | err = bdb.db.View(func(tx *bbolt.Tx) error { 74 | b := tx.Bucket(bucket) 75 | if v := b.Get(key); v != nil { 76 | value = append([]byte{}, v...) 77 | } 78 | return nil 79 | }) 80 | if err != nil { 81 | return nil, err 82 | } 83 | return 84 | } 85 | 86 | // Has implements DB. 87 | func (bdb *BoltDB) Has(key []byte) (bool, error) { 88 | bytes, err := bdb.Get(key) 89 | if err != nil { 90 | return false, err 91 | } 92 | return bytes != nil, nil 93 | } 94 | 95 | // Set implements DB. 96 | func (bdb *BoltDB) Set(key, value []byte) error { 97 | if len(key) == 0 { 98 | return errKeyEmpty 99 | } 100 | if value == nil { 101 | return errValueNil 102 | } 103 | err := bdb.db.Update(func(tx *bbolt.Tx) error { 104 | b := tx.Bucket(bucket) 105 | return b.Put(key, value) 106 | }) 107 | if err != nil { 108 | return err 109 | } 110 | return nil 111 | } 112 | 113 | // SetSync implements DB. 114 | func (bdb *BoltDB) SetSync(key, value []byte) error { 115 | return bdb.Set(key, value) 116 | } 117 | 118 | // Delete implements DB. 119 | func (bdb *BoltDB) Delete(key []byte) error { 120 | if len(key) == 0 { 121 | return errKeyEmpty 122 | } 123 | err := bdb.db.Update(func(tx *bbolt.Tx) error { 124 | return tx.Bucket(bucket).Delete(key) 125 | }) 126 | if err != nil { 127 | return err 128 | } 129 | return nil 130 | } 131 | 132 | // DeleteSync implements DB. 133 | func (bdb *BoltDB) DeleteSync(key []byte) error { 134 | return bdb.Delete(key) 135 | } 136 | 137 | // Close implements DB. 138 | func (bdb *BoltDB) Close() error { 139 | return bdb.db.Close() 140 | } 141 | 142 | // Print implements DB. 143 | func (bdb *BoltDB) Print() error { 144 | stats := bdb.db.Stats() 145 | fmt.Printf("%v\n", stats) 146 | 147 | err := bdb.db.View(func(tx *bbolt.Tx) error { 148 | tx.Bucket(bucket).ForEach(func(k, v []byte) error { 149 | fmt.Printf("[%X]:\t[%X]\n", k, v) 150 | return nil 151 | }) 152 | return nil 153 | }) 154 | if err != nil { 155 | return err 156 | } 157 | return nil 158 | } 159 | 160 | // Stats implements DB. 161 | func (bdb *BoltDB) Stats() map[string]string { 162 | stats := bdb.db.Stats() 163 | m := make(map[string]string) 164 | 165 | // Freelist stats 166 | m["FreePageN"] = fmt.Sprintf("%v", stats.FreePageN) 167 | m["PendingPageN"] = fmt.Sprintf("%v", stats.PendingPageN) 168 | m["FreeAlloc"] = fmt.Sprintf("%v", stats.FreeAlloc) 169 | m["FreelistInuse"] = fmt.Sprintf("%v", stats.FreelistInuse) 170 | 171 | // Transaction stats 172 | m["TxN"] = fmt.Sprintf("%v", stats.TxN) 173 | m["OpenTxN"] = fmt.Sprintf("%v", stats.OpenTxN) 174 | 175 | return m 176 | } 177 | 178 | // NewBatch implements DB. 179 | func (bdb *BoltDB) NewBatch() Batch { 180 | return newBoltDBBatch(bdb) 181 | } 182 | 183 | // WARNING: Any concurrent writes or reads will block until the iterator is 184 | // closed. 185 | func (bdb *BoltDB) Iterator(start, end []byte) (Iterator, error) { 186 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 187 | return nil, errKeyEmpty 188 | } 189 | tx, err := bdb.db.Begin(false) 190 | if err != nil { 191 | return nil, err 192 | } 193 | return newBoltDBIterator(tx, start, end, false), nil 194 | } 195 | 196 | // WARNING: Any concurrent writes or reads will block until the iterator is 197 | // closed. 198 | func (bdb *BoltDB) ReverseIterator(start, end []byte) (Iterator, error) { 199 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 200 | return nil, errKeyEmpty 201 | } 202 | tx, err := bdb.db.Begin(false) 203 | if err != nil { 204 | return nil, err 205 | } 206 | return newBoltDBIterator(tx, start, end, true), nil 207 | } 208 | 209 | func (bdb *BoltDB) Compact(start, end []byte) error { 210 | // There is no explicit CompactRange support in BoltDB, only a function that copies the 211 | // entire DB from one place to another while doing deletions. Hence we do not support it. 212 | return nil 213 | } 214 | -------------------------------------------------------------------------------- /memdb.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "strconv" 7 | "sync" 8 | 9 | "github.com/google/btree" 10 | ) 11 | 12 | const ( 13 | // The approximate number of items and children per B-tree node. Tuned with benchmarks. 14 | bTreeDegree = 32 15 | ) 16 | 17 | func init() { 18 | registerDBCreator(MemDBBackend, func(_, _ string) (DB, error) { 19 | return NewMemDB(), nil 20 | }) 21 | } 22 | 23 | // item is a btree.Item with byte slices as keys and values. 24 | type item struct { 25 | key []byte 26 | value []byte 27 | } 28 | 29 | // Less implements btree.Item. 30 | func (i *item) Less(other btree.Item) bool { 31 | // this considers nil == []byte{}, but that's ok since we handle nil endpoints 32 | // in iterators specially anyway 33 | return bytes.Compare(i.key, other.(*item).key) == -1 34 | } 35 | 36 | // newKey creates a new key item. 37 | func newKey(key []byte) *item { 38 | return &item{key: key} 39 | } 40 | 41 | // newPair creates a new pair item. 42 | func newPair(key, value []byte) *item { 43 | return &item{key: key, value: value} 44 | } 45 | 46 | // MemDB is an in-memory database backend using a B-tree for storage. 47 | // 48 | // For performance reasons, all given and returned keys and values are pointers to the in-memory 49 | // database, so modifying them will cause the stored values to be modified as well. All DB methods 50 | // already specify that keys and values should be considered read-only, but this is especially 51 | // important with MemDB. 52 | type MemDB struct { 53 | mtx sync.RWMutex 54 | btree *btree.BTree 55 | } 56 | 57 | var _ DB = (*MemDB)(nil) 58 | 59 | // NewMemDB creates a new in-memory database. 60 | func NewMemDB() *MemDB { 61 | database := &MemDB{ 62 | btree: btree.New(bTreeDegree), 63 | } 64 | return database 65 | } 66 | 67 | // Get implements DB. 68 | func (db *MemDB) Get(key []byte) ([]byte, error) { 69 | if len(key) == 0 { 70 | return nil, errKeyEmpty 71 | } 72 | db.mtx.RLock() 73 | defer db.mtx.RUnlock() 74 | 75 | i := db.btree.Get(newKey(key)) 76 | if i != nil { 77 | return i.(*item).value, nil 78 | } 79 | return nil, nil 80 | } 81 | 82 | // Has implements DB. 83 | func (db *MemDB) Has(key []byte) (bool, error) { 84 | if len(key) == 0 { 85 | return false, errKeyEmpty 86 | } 87 | db.mtx.RLock() 88 | defer db.mtx.RUnlock() 89 | 90 | return db.btree.Has(newKey(key)), nil 91 | } 92 | 93 | // Set implements DB. 94 | func (db *MemDB) Set(key []byte, value []byte) error { 95 | if len(key) == 0 { 96 | return errKeyEmpty 97 | } 98 | if value == nil { 99 | return errValueNil 100 | } 101 | db.mtx.Lock() 102 | defer db.mtx.Unlock() 103 | 104 | db.set(key, value) 105 | return nil 106 | } 107 | 108 | // set sets a value without locking the mutex. 109 | func (db *MemDB) set(key []byte, value []byte) { 110 | db.btree.ReplaceOrInsert(newPair(key, value)) 111 | } 112 | 113 | // SetSync implements DB. 114 | func (db *MemDB) SetSync(key []byte, value []byte) error { 115 | return db.Set(key, value) 116 | } 117 | 118 | // Delete implements DB. 119 | func (db *MemDB) Delete(key []byte) error { 120 | if len(key) == 0 { 121 | return errKeyEmpty 122 | } 123 | db.mtx.Lock() 124 | defer db.mtx.Unlock() 125 | 126 | db.delete(key) 127 | return nil 128 | } 129 | 130 | // delete deletes a key without locking the mutex. 131 | func (db *MemDB) delete(key []byte) { 132 | db.btree.Delete(newKey(key)) 133 | } 134 | 135 | // DeleteSync implements DB. 136 | func (db *MemDB) DeleteSync(key []byte) error { 137 | return db.Delete(key) 138 | } 139 | 140 | // Close implements DB. 141 | func (*MemDB) Close() error { 142 | // Close is a noop since for an in-memory database, we don't have a destination to flush 143 | // contents to nor do we want any data loss on invoking Close(). 144 | return nil 145 | } 146 | 147 | // Print implements DB. 148 | func (db *MemDB) Print() error { 149 | db.mtx.RLock() 150 | defer db.mtx.RUnlock() 151 | 152 | db.btree.Ascend(func(i btree.Item) bool { 153 | item, ok := i.(*item) 154 | if !ok { 155 | return false // or handle the error as appropriate 156 | } 157 | fmt.Printf("[%X]:\t[%X]\n", item.key, item.value) 158 | return true 159 | }) 160 | return nil 161 | } 162 | 163 | // Stats implements DB. 164 | func (db *MemDB) Stats() map[string]string { 165 | db.mtx.RLock() 166 | defer db.mtx.RUnlock() 167 | 168 | stats := make(map[string]string) 169 | stats["database.type"] = "memDB" 170 | stats["database.size"] = strconv.Itoa(db.btree.Len()) 171 | return stats 172 | } 173 | 174 | // NewBatch implements DB. 175 | func (db *MemDB) NewBatch() Batch { 176 | return newMemDBBatch(db) 177 | } 178 | 179 | // Iterator implements DB. 180 | // Takes out a read-lock on the database until the iterator is closed. 181 | func (db *MemDB) Iterator(start, end []byte) (Iterator, error) { 182 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 183 | return nil, errKeyEmpty 184 | } 185 | return newMemDBIterator(db, start, end, false), nil 186 | } 187 | 188 | // ReverseIterator implements DB. 189 | // Takes out a read-lock on the database until the iterator is closed. 190 | func (db *MemDB) ReverseIterator(start, end []byte) (Iterator, error) { 191 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 192 | return nil, errKeyEmpty 193 | } 194 | return newMemDBIterator(db, start, end, true), nil 195 | } 196 | 197 | // IteratorNoMtx makes an iterator with no mutex. 198 | func (db *MemDB) IteratorNoMtx(start, end []byte) (Iterator, error) { 199 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 200 | return nil, errKeyEmpty 201 | } 202 | return newMemDBIteratorMtxChoice(db, start, end, false, false), nil 203 | } 204 | 205 | // ReverseIteratorNoMtx makes an iterator with no mutex. 206 | func (db *MemDB) ReverseIteratorNoMtx(start, end []byte) (Iterator, error) { 207 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 208 | return nil, errKeyEmpty 209 | } 210 | return newMemDBIteratorMtxChoice(db, start, end, true, false), nil 211 | } 212 | 213 | func (*MemDB) Compact(_, _ []byte) error { 214 | // No Compaction is supported for memDB and there is no point in supporting compaction for a memory DB 215 | return nil 216 | } 217 | -------------------------------------------------------------------------------- /types.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import "errors" 4 | 5 | var ( 6 | // errBatchClosed is returned when a closed or written batch is used. 7 | errBatchClosed = errors.New("batch has been written or closed") 8 | 9 | // errKeyEmpty is returned when attempting to use an empty or nil key. 10 | errKeyEmpty = errors.New("key cannot be empty") 11 | 12 | // errValueNil is returned when attempting to set a nil value. 13 | errValueNil = errors.New("value cannot be nil") 14 | ) 15 | 16 | // DB is the main interface for all database backends. DBs are concurrency-safe. Callers must call 17 | // Close on the database when done. 18 | // 19 | // Keys cannot be nil or empty, while values cannot be nil. Keys and values should be considered 20 | // read-only, both when returned and when given, and must be copied before they are modified. 21 | type DB interface { 22 | // Get fetches the value of the given key, or nil if it does not exist. 23 | // CONTRACT: key, value readonly []byte 24 | Get(key []byte) ([]byte, error) 25 | 26 | // Has checks if a key exists. 27 | // CONTRACT: key, value readonly []byte 28 | Has(key []byte) (bool, error) 29 | 30 | // Set sets the value for the given key, replacing it if it already exists. 31 | // CONTRACT: key, value readonly []byte 32 | Set(key []byte, value []byte) error 33 | 34 | // SetSync sets the value for the given key, and flushes it to storage before returning. 35 | SetSync(key []byte, value []byte) error 36 | 37 | // Delete deletes the key, or does nothing if the key does not exist. 38 | // CONTRACT: key readonly []byte 39 | Delete(key []byte) error 40 | 41 | // DeleteSync deletes the key, and flushes the delete to storage before returning. 42 | DeleteSync(key []byte) error 43 | 44 | // Iterator returns an iterator over a domain of keys, in ascending order. The caller must call 45 | // Close when done. End is exclusive, and start must be less than end. A nil start iterates 46 | // from the first key, and a nil end iterates to the last key (inclusive). Empty keys are not 47 | // valid. 48 | // CONTRACT: No writes may happen within a domain while an iterator exists over it. 49 | // CONTRACT: start, end readonly []byte 50 | Iterator(start, end []byte) (Iterator, error) 51 | 52 | // ReverseIterator returns an iterator over a domain of keys, in descending order. The caller 53 | // must call Close when done. End is exclusive, and start must be less than end. A nil end 54 | // iterates from the last key (inclusive), and a nil start iterates to the first key (inclusive). 55 | // Empty keys are not valid. 56 | // CONTRACT: No writes may happen within a domain while an iterator exists over it. 57 | // CONTRACT: start, end readonly []byte 58 | ReverseIterator(start, end []byte) (Iterator, error) 59 | 60 | // Close closes the database connection. 61 | Close() error 62 | 63 | // NewBatch creates a batch for atomic updates. The caller must call Batch.Close. 64 | NewBatch() Batch 65 | 66 | // Print is used for debugging. 67 | Print() error 68 | 69 | // Stats returns a map of property values for all keys and the size of the cache. 70 | Stats() map[string]string 71 | 72 | // Compact explicitly 73 | Compact(start, end []byte) error 74 | } 75 | 76 | // Batch represents a group of writes. They may or may not be written atomically depending on the 77 | // backend. Callers must call Close on the batch when done. 78 | // 79 | // As with DB, given keys and values should be considered read-only, and must not be modified after 80 | // passing them to the batch. 81 | type Batch interface { 82 | // Set sets a key/value pair. 83 | // CONTRACT: key, value readonly []byte 84 | Set(key, value []byte) error 85 | 86 | // Delete deletes a key/value pair. 87 | // CONTRACT: key readonly []byte 88 | Delete(key []byte) error 89 | 90 | // Write writes the batch, possibly without flushing to disk. Only Close() can be called after, 91 | // other methods will error. 92 | Write() error 93 | 94 | // WriteSync writes the batch and flushes it to disk. Only Close() can be called after, other 95 | // methods will error. 96 | WriteSync() error 97 | 98 | // Close closes the batch. It is idempotent, but calls to other methods afterwards will error. 99 | Close() error 100 | } 101 | 102 | // Iterator represents an iterator over a domain of keys. Callers must call Close when done. 103 | // No writes can happen to a domain while there exists an iterator over it, some backends may take 104 | // out database locks to ensure this will not happen. 105 | // 106 | // Callers must make sure the iterator is valid before calling any methods on it, otherwise 107 | // these methods will panic. This is in part caused by most backend databases using this convention. 108 | // 109 | // As with DB, keys and values should be considered read-only, and must be copied before they are 110 | // modified. 111 | // 112 | // Typical usage: 113 | // 114 | // var itr Iterator = ... 115 | // defer itr.Close() 116 | // 117 | // for ; itr.Valid(); itr.Next() { 118 | // k, v := itr.Key(); itr.Value() 119 | // ... 120 | // } 121 | // 122 | // if err := itr.Error(); err != nil { 123 | // ... 124 | // } 125 | type Iterator interface { 126 | // Domain returns the start (inclusive) and end (exclusive) limits of the iterator. 127 | // CONTRACT: start, end readonly []byte 128 | Domain() (start []byte, end []byte) 129 | 130 | // Valid returns whether the current iterator is valid. Once invalid, the Iterator remains 131 | // invalid forever. 132 | Valid() bool 133 | 134 | // Next moves the iterator to the next key in the database, as defined by order of iteration. 135 | // If Valid returns false, this method will panic. 136 | Next() 137 | 138 | // Key returns the key at the current position. Panics if the iterator is invalid. 139 | // Key returns the key of the current key/value pair, or nil if done. 140 | // The caller should not modify the contents of the returned slice, and 141 | // its contents may change on the next call to any 'seeks method'. 142 | // Instead, the caller should make a copy and work on the copy. 143 | Key() (key []byte) 144 | 145 | // Value returns the value at the current position. Panics if the iterator is invalid. 146 | // Value returns the value of the current key/value pair, or nil if done. 147 | // The caller should not modify the contents of the returned slice, and 148 | // its contents may change on the next call to any 'seeks method'. 149 | // Instead, the caller should make a copy and work on the copy. 150 | Value() (value []byte) 151 | 152 | // Error returns the last error encountered by the iterator, if any. 153 | Error() error 154 | 155 | // Close closes the iterator, relasing any allocated resources. 156 | Close() error 157 | } 158 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # CHANGELOG 2 | 3 | ## v1.0.4 4 | 5 | *February 28, 2025* 6 | 7 | This release relaxes Go version constraints and fixes Docker builds. 8 | 9 | ## v1.0.3 10 | 11 | *February 7, 2025* 12 | 13 | This release bumps the Go version to 1.23.6 and brings back arm64 Docker build 14 | (without rocksdb). 15 | 16 | ### BUG FIXES 17 | 18 | - `[docker]` Bring back `arm64` build target 19 | ([\#234](https://github.com/cometbft/cometbft-db/issues/234)) 20 | 21 | ### DEPENDENCIES 22 | 23 | - `[deps]` Bump Go version to 1.23.6 24 | ([\#236](https://github.com/cometbft/cometbft-db/pull/236)) 25 | 26 | ## v1.0.2 27 | 28 | *January 29, 2025* 29 | 30 | This release bumps the Go version to 1.23.5. 31 | 32 | ## v1.0.1 33 | 34 | *September 23, 2024* 35 | 36 | This release reverts the addition of the `goleveldb` flag, which was deemed as 37 | too disruptive to users. 38 | 39 | ## v1.0.0 40 | 41 | *September 20, 2024* 42 | 43 | This release swaps the "default" DB from goleveldb to pebbledb. There's now a 44 | `goleveldb` build flag that must be used when using goleveldb. If you're using 45 | `pebbledb`, you don't need a build flag anymore. 46 | 47 | ### BREAKING 48 | 49 | - Add `goleveldb` build flag. 50 | ([\#202](https://github.com/cometbft/cometbft-db/pull/202)) 51 | 52 | ## v0.15.0 53 | 54 | *September 9, 2024* 55 | 56 | This release bumps the Go version to 1.23. 57 | 58 | ### BREAKING CHANGES 59 | 60 | - `[go/runtime]` Bump minimum Go version to v1.23 61 | ([\#4039](https://github.com/cometbft/cometbft/issues/4039)) 62 | 63 | ### DEPENDENCIES 64 | 65 | - Use RocksDB 9, testing with v9.3.1 66 | ([\#189](https://github.com/cometbft/cometbft-db/pull/189)) 67 | 68 | ## v0.14.0 69 | 70 | *Aug 9, 2024* 71 | 72 | This release reinstates boltdb and cleveldb as deprecated backend types. 73 | Please note that we discourage the use of them, as we plan to discontinue support in a future release. 74 | 75 | ### DEPENDENCIES 76 | 77 | - reinstate BoltDB and ClevelDB as backend DBs 78 | ([\#177](https://github.com/cometbft/cometbft-db/pull/177)) 79 | 80 | ## v0.13.0 81 | 82 | *Aug 2, 2024* 83 | 84 | This release: 85 | - changes the contract of the Iterator Key() and Value() APIs. Namely, the caller is now responsible for creating a copy of their returned value if they want to modify it. 86 | - removes support for boltDB and clevelDB, which were marked as deprecated in release v0.12.0. 87 | 88 | ### BREAKING CHANGES 89 | 90 | - removed deprecated boltdb and cleveldb ([\#155](https://github.com/cometbft/cometbft-db/pull/155)) 91 | 92 | ### FEATURES 93 | 94 | - Iterator Key and Value APIs now return an object that must be copied before 95 | use ([\#168](https://github.com/cometbft/cometbft-db/pull/168)) 96 | 97 | ## v0.12.0 98 | 99 | *Apr 10, 2024* 100 | 101 | This release deprecates boltdb and cleveldb. Also, Go MSRV is bumped to 1.22. 102 | 103 | ### FEATURES 104 | 105 | - Deprecate boltdb and cleveldb. If you're using either of those, please reach 106 | out ([\#153](https://github.com/cometbft/cometbft-db/pull/153)) 107 | 108 | ## v0.11.0 109 | 110 | *Feb 7, 2024* 111 | 112 | This release adds support for explicit compaction. Please note that badger and 113 | bolt do not support this. 114 | 115 | ### BREAKING CHANGES 116 | 117 | - Expanded db interface to support compaction ([\#111](https://github.com/cometbft/cometbft-db/pull/111)) 118 | 119 | ### FEATURES 120 | 121 | - Add compaction support to the databases ([\#111](https://github.com/cometbft/cometbft-db/pull/111)) 122 | 123 | ## v0.10.0 124 | 125 | *Jan 26, 2024* 126 | 127 | This release adds experimental support for 128 | [pebble](https://github.com/cockroachdb/pebble) and drops `remotedb`. If you 129 | experience any issues with pebble, please open an issue on Github. 130 | 131 | Special thanks to @faddat and @baabeetaa for their contributions to this 132 | release! 133 | 134 | ### BREAKING CHANGES 135 | 136 | - Remove remotedb ([\#121](https://github.com/cometbft/cometbft-db/pull/121)) 137 | 138 | ### FEATURES 139 | 140 | - Add [pebbledb](https://github.com/cockroachdb/pebble) ([\#112](https://github.com/cometbft/cometbft-db/pull/112)) 141 | 142 | ## v0.9.1 143 | 144 | *December 4, 2023* 145 | 146 | This release is precisely the same code-wise as v0.9.0, except that it builds 147 | the `cometbft/cometbft-db-testing` Docker image for both `linux/amd64` and 148 | `linux/arm64` platforms. 149 | 150 | ## v0.9.0 151 | 152 | *December 1, 2023* 153 | 154 | This release primarily updates some key dependencies, including adding support 155 | for RocksDB v8. It also bumps the minimum Go version to v1.21 in order for 156 | CometBFT to be able to use it in the E2E testing framework for the latest major 157 | releases. 158 | 159 | ### DEPENDENCIES 160 | 161 | - Use RocksDB v8, testing with v8.8.1 162 | ([\#97](https://github.com/cometbft/cometbft-db/pull/97)) 163 | 164 | ### GO VERSION 165 | 166 | - Bump minimum Go version to v1.21 167 | ([\#98](https://github.com/cometbft/cometbft-db/pull/98)) 168 | 169 | ## v0.8.0 170 | 171 | *Apr 26, 2023* 172 | 173 | This release bumps the supported version of RocksDB, which requires cometbft-db 174 | RocksDB users to update their builds (and hence requires a "major" release, but 175 | does not introduce any other breaking changes). Special thanks to @yihuang for 176 | this update! 177 | 178 | While the minimum supported version of the Go compiler was bumped to 1.19, no 179 | 1.19-specific code changes were introduced and this should, therefore, still be 180 | able to be compiled with earlier versions of Go. It is, however, recommended to 181 | upgrade to the latest version(s) of Go ASAP. 182 | 183 | ### COMPILER 184 | 185 | - Bump minimum Go version to 1.19 186 | ([\#40](https://github.com/cometbft/cometbft-db/pull/40)) 187 | 188 | ### DEPENDENCIES 189 | 190 | - Switch rocksdb binding from gorocksdb to grocksdb, bump librocksdb dependency 191 | to `v7.10.2` ([\#42](https://github.com/cometbft/cometbft-db/pull/42)) 192 | - Update to the latest version of golang.org/x/net 193 | ([\#40](https://github.com/cometbft/cometbft-db/pull/40)) 194 | 195 | ## v0.7.0 196 | 197 | *Jan 17, 2023* 198 | 199 | This is the first official release of CometBFT DB, which is a fork of 200 | [tm-db](https://github.com/tendermint/tm-db). 201 | 202 | This fork is intended to be used by 203 | [CometBFT](https://github.com/cometbft/cometbft) until such time that 204 | [cometbft/cometbft\#48](https://github.com/cometbft/cometbft/issues/48) is 205 | resolved, after which time this fork will be retired and archived. Do not use 206 | this as a dependency in any new projects. 207 | 208 | ### BREAKING CHANGES 209 | 210 | - Fork tm-db and rename fork to cometbft-db 211 | ([\#7](https://github.com/cometbft/cometbft-db/issues/7)) 212 | 213 | --- 214 | 215 | CometBFT DB is a fork of [tm-db](https://github.com/tendermint/tm-db) 216 | effectively as of v0.6.6. 217 | 218 | For changes prior to the creation of this fork, please refer to the upstream 219 | [CHANGELOG.md](https://github.com/tendermint/tm-db/blob/774cdfe7e6b0a249b1144998d81a4de7b8037941/CHANGELOG.md) 220 | for v0.6.6 and earlier. 221 | 222 | -------------------------------------------------------------------------------- /badger_db.go: -------------------------------------------------------------------------------- 1 | //go:build badgerdb 2 | // +build badgerdb 3 | 4 | package db 5 | 6 | import ( 7 | "bytes" 8 | "fmt" 9 | "os" 10 | "path/filepath" 11 | 12 | "github.com/dgraph-io/badger/v4" 13 | ) 14 | 15 | func init() { registerDBCreator(BadgerDBBackend, badgerDBCreator) } 16 | 17 | func badgerDBCreator(dbName, dir string) (DB, error) { 18 | return NewBadgerDB(dbName, dir) 19 | } 20 | 21 | // NewBadgerDB creates a Badger key-value store backed to the 22 | // directory dir supplied. If dir does not exist, it will be created. 23 | func NewBadgerDB(dbName, dir string) (*BadgerDB, error) { 24 | // Since Badger doesn't support database names, we join both to obtain 25 | // the final directory to use for the database. 26 | path := filepath.Join(dir, dbName) 27 | 28 | if err := os.MkdirAll(path, 0o755); err != nil { 29 | return nil, err 30 | } 31 | opts := badger.DefaultOptions(path) 32 | opts.SyncWrites = false // note that we have Sync methods 33 | opts.Logger = nil // badger is too chatty by default 34 | return NewBadgerDBWithOptions(opts) 35 | } 36 | 37 | // NewBadgerDBWithOptions creates a BadgerDB key value store 38 | // gives the flexibility of initializing a database with the 39 | // respective options. 40 | func NewBadgerDBWithOptions(opts badger.Options) (*BadgerDB, error) { 41 | db, err := badger.Open(opts) 42 | if err != nil { 43 | return nil, err 44 | } 45 | return &BadgerDB{db: db}, nil 46 | } 47 | 48 | type BadgerDB struct { 49 | db *badger.DB 50 | } 51 | 52 | var _ DB = (*BadgerDB)(nil) 53 | 54 | func (b *BadgerDB) Get(key []byte) ([]byte, error) { 55 | if len(key) == 0 { 56 | return nil, errKeyEmpty 57 | } 58 | var val []byte 59 | err := b.db.View(func(txn *badger.Txn) error { 60 | item, err := txn.Get(key) 61 | if err == badger.ErrKeyNotFound { 62 | return nil 63 | } else if err != nil { 64 | return err 65 | } 66 | val, err = item.ValueCopy(nil) 67 | if err == nil && val == nil { 68 | val = []byte{} 69 | } 70 | return err 71 | }) 72 | return val, err 73 | } 74 | 75 | func (b *BadgerDB) Has(key []byte) (bool, error) { 76 | if len(key) == 0 { 77 | return false, errKeyEmpty 78 | } 79 | var found bool 80 | err := b.db.View(func(txn *badger.Txn) error { 81 | _, err := txn.Get(key) 82 | if err != nil && err != badger.ErrKeyNotFound { 83 | return err 84 | } 85 | found = (err != badger.ErrKeyNotFound) 86 | return nil 87 | }) 88 | return found, err 89 | } 90 | 91 | func (b *BadgerDB) Set(key, value []byte) error { 92 | if len(key) == 0 { 93 | return errKeyEmpty 94 | } 95 | if value == nil { 96 | return errValueNil 97 | } 98 | return b.db.Update(func(txn *badger.Txn) error { 99 | return txn.Set(key, value) 100 | }) 101 | } 102 | 103 | func withSync(db *badger.DB, err error) error { 104 | if err != nil { 105 | return err 106 | } 107 | return db.Sync() 108 | } 109 | 110 | func (b *BadgerDB) SetSync(key, value []byte) error { 111 | return withSync(b.db, b.Set(key, value)) 112 | } 113 | 114 | func (b *BadgerDB) Delete(key []byte) error { 115 | if len(key) == 0 { 116 | return errKeyEmpty 117 | } 118 | return b.db.Update(func(txn *badger.Txn) error { 119 | return txn.Delete(key) 120 | }) 121 | } 122 | 123 | func (b *BadgerDB) DeleteSync(key []byte) error { 124 | return withSync(b.db, b.Delete(key)) 125 | } 126 | 127 | func (b *BadgerDB) Close() error { 128 | return b.db.Close() 129 | } 130 | 131 | func (b *BadgerDB) Print() error { 132 | return nil 133 | } 134 | 135 | func (b *BadgerDB) iteratorOpts(start, end []byte, opts badger.IteratorOptions) (*badgerDBIterator, error) { 136 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 137 | return nil, errKeyEmpty 138 | } 139 | txn := b.db.NewTransaction(false) 140 | iter := txn.NewIterator(opts) 141 | iter.Rewind() 142 | iter.Seek(start) 143 | if opts.Reverse && iter.Valid() && bytes.Equal(iter.Item().Key(), start) { 144 | // If we're going in reverse, our starting point was "end", 145 | // which is exclusive. 146 | iter.Next() 147 | } 148 | return &badgerDBIterator{ 149 | reverse: opts.Reverse, 150 | start: start, 151 | end: end, 152 | 153 | txn: txn, 154 | iter: iter, 155 | }, nil 156 | } 157 | 158 | func (b *BadgerDB) Iterator(start, end []byte) (Iterator, error) { 159 | opts := badger.DefaultIteratorOptions 160 | return b.iteratorOpts(start, end, opts) 161 | } 162 | 163 | func (b *BadgerDB) ReverseIterator(start, end []byte) (Iterator, error) { 164 | opts := badger.DefaultIteratorOptions 165 | opts.Reverse = true 166 | return b.iteratorOpts(end, start, opts) 167 | } 168 | 169 | func (b *BadgerDB) Stats() map[string]string { 170 | return nil 171 | } 172 | 173 | func (b *BadgerDB) Compact(start, end []byte) error { 174 | // Explicit compaction is not currently supported in badger 175 | return nil 176 | } 177 | 178 | func (b *BadgerDB) NewBatch() Batch { 179 | wb := &badgerDBBatch{ 180 | db: b.db, 181 | wb: b.db.NewWriteBatch(), 182 | firstFlush: make(chan struct{}, 1), 183 | } 184 | wb.firstFlush <- struct{}{} 185 | return wb 186 | } 187 | 188 | var _ Batch = (*badgerDBBatch)(nil) 189 | 190 | type badgerDBBatch struct { 191 | db *badger.DB 192 | wb *badger.WriteBatch 193 | 194 | // Calling db.Flush twice panics, so we must keep track of whether we've 195 | // flushed already on our own. If Write can receive from the firstFlush 196 | // channel, then it's the first and only Flush call we should do. 197 | // 198 | // Upstream bug report: 199 | // https://github.com/dgraph-io/badger/issues/1394 200 | firstFlush chan struct{} 201 | } 202 | 203 | func (b *badgerDBBatch) Set(key, value []byte) error { 204 | if len(key) == 0 { 205 | return errKeyEmpty 206 | } 207 | if value == nil { 208 | return errValueNil 209 | } 210 | return b.wb.Set(key, value) 211 | } 212 | 213 | func (b *badgerDBBatch) Delete(key []byte) error { 214 | if len(key) == 0 { 215 | return errKeyEmpty 216 | } 217 | return b.wb.Delete(key) 218 | } 219 | 220 | func (b *badgerDBBatch) Write() error { 221 | select { 222 | case <-b.firstFlush: 223 | return b.wb.Flush() 224 | default: 225 | return fmt.Errorf("batch already flushed") 226 | } 227 | } 228 | 229 | func (b *badgerDBBatch) WriteSync() error { 230 | return withSync(b.db, b.Write()) 231 | } 232 | 233 | func (b *badgerDBBatch) Close() error { 234 | select { 235 | case <-b.firstFlush: // a Flush after Cancel panics too 236 | default: 237 | } 238 | b.wb.Cancel() 239 | return nil 240 | } 241 | 242 | type badgerDBIterator struct { 243 | reverse bool 244 | start, end []byte 245 | 246 | txn *badger.Txn 247 | iter *badger.Iterator 248 | 249 | lastErr error 250 | } 251 | 252 | func (i *badgerDBIterator) Close() error { 253 | i.iter.Close() 254 | i.txn.Discard() 255 | return nil 256 | } 257 | 258 | func (i *badgerDBIterator) Domain() (start, end []byte) { return i.start, i.end } 259 | func (i *badgerDBIterator) Error() error { return i.lastErr } 260 | 261 | func (i *badgerDBIterator) Next() { 262 | if !i.Valid() { 263 | panic("iterator is invalid") 264 | } 265 | i.iter.Next() 266 | } 267 | 268 | func (i *badgerDBIterator) Valid() bool { 269 | if !i.iter.Valid() { 270 | return false 271 | } 272 | if len(i.end) > 0 { 273 | key := i.iter.Item().Key() 274 | if c := bytes.Compare(key, i.end); (!i.reverse && c >= 0) || (i.reverse && c < 0) { 275 | // We're at the end key, or past the end. 276 | return false 277 | } 278 | } 279 | return true 280 | } 281 | 282 | // Key implements Iterator. 283 | // The caller should not modify the contents of the returned slice. 284 | // Instead, the caller should make a copy and work on the copy. 285 | func (i *badgerDBIterator) Key() []byte { 286 | if !i.Valid() { 287 | panic("iterator is invalid") 288 | } 289 | return i.iter.Item().Key() 290 | } 291 | 292 | // Value implements Iterator. 293 | // The returned slice is a copy of the original data, therefore it is safe to modify. 294 | func (i *badgerDBIterator) Value() []byte { 295 | if !i.Valid() { 296 | panic("iterator is invalid") 297 | } 298 | 299 | val, err := i.iter.Item().ValueCopy(nil) 300 | if err != nil { 301 | i.lastErr = err 302 | } 303 | return val 304 | } 305 | -------------------------------------------------------------------------------- /pebble.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "path/filepath" 7 | 8 | "github.com/cockroachdb/pebble" 9 | ) 10 | 11 | func init() { 12 | dbCreator := func(name string, dir string) (DB, error) { 13 | return NewPebbleDB(name, dir) 14 | } 15 | registerDBCreator(PebbleDBBackend, dbCreator) 16 | } 17 | 18 | // PebbleDB is a PebbleDB backend. 19 | type PebbleDB struct { 20 | db *pebble.DB 21 | } 22 | 23 | var _ DB = (*PebbleDB)(nil) 24 | 25 | func NewPebbleDB(name string, dir string) (*PebbleDB, error) { 26 | opts := &pebble.Options{} 27 | opts.EnsureDefaults() 28 | return NewPebbleDBWithOpts(name, dir, opts) 29 | } 30 | 31 | func NewPebbleDBWithOpts(name string, dir string, opts *pebble.Options) (*PebbleDB, error) { 32 | dbPath := filepath.Join(dir, name+".db") 33 | opts.EnsureDefaults() 34 | p, err := pebble.Open(dbPath, opts) 35 | if err != nil { 36 | return nil, err 37 | } 38 | return &PebbleDB{ 39 | db: p, 40 | }, err 41 | } 42 | 43 | // Get implements DB. 44 | func (db *PebbleDB) Get(key []byte) ([]byte, error) { 45 | if len(key) == 0 { 46 | return nil, errKeyEmpty 47 | } 48 | 49 | res, closer, err := db.db.Get(key) 50 | if err != nil { 51 | if err == pebble.ErrNotFound { 52 | return nil, nil 53 | } 54 | return nil, err 55 | } 56 | defer closer.Close() 57 | 58 | return cp(res), nil 59 | } 60 | 61 | // Has implements DB. 62 | func (db *PebbleDB) Has(key []byte) (bool, error) { 63 | if len(key) == 0 { 64 | return false, errKeyEmpty 65 | } 66 | 67 | bytesPeb, err := db.Get(key) 68 | if err != nil { 69 | return false, err 70 | } 71 | return bytesPeb != nil, nil 72 | } 73 | 74 | // Set implements DB. 75 | func (db *PebbleDB) Set(key []byte, value []byte) error { 76 | if len(key) == 0 { 77 | return errKeyEmpty 78 | } 79 | if value == nil { 80 | return errValueNil 81 | } 82 | 83 | wopts := pebble.NoSync 84 | err := db.db.Set(key, value, wopts) 85 | if err != nil { 86 | return err 87 | } 88 | return nil 89 | } 90 | 91 | // SetSync implements DB. 92 | func (db *PebbleDB) SetSync(key []byte, value []byte) error { 93 | if len(key) == 0 { 94 | return errKeyEmpty 95 | } 96 | if value == nil { 97 | return errValueNil 98 | } 99 | err := db.db.Set(key, value, pebble.Sync) 100 | if err != nil { 101 | return err 102 | } 103 | return nil 104 | } 105 | 106 | // Delete implements DB. 107 | func (db *PebbleDB) Delete(key []byte) error { 108 | if len(key) == 0 { 109 | return errKeyEmpty 110 | } 111 | 112 | wopts := pebble.NoSync 113 | return db.db.Delete(key, wopts) 114 | } 115 | 116 | // DeleteSync implements DB. 117 | func (db PebbleDB) DeleteSync(key []byte) error { 118 | if len(key) == 0 { 119 | return errKeyEmpty 120 | } 121 | return db.db.Delete(key, pebble.Sync) 122 | } 123 | 124 | func (db *PebbleDB) DB() *pebble.DB { 125 | return db.db 126 | } 127 | 128 | func (db *PebbleDB) Compact(start, end []byte) (err error) { 129 | // Currently nil,nil is an invalid range in Pebble. 130 | // This was taken from https://github.com/cockroachdb/pebble/issues/1474 131 | // In case the start and end keys are the same 132 | // pebbleDB will throw an error that it cannot compact. 133 | if start != nil && end != nil { 134 | return db.db.Compact(start, end, true) 135 | } 136 | iter, err := db.db.NewIter(nil) 137 | if err != nil { 138 | return err 139 | } 140 | defer func() { 141 | err2 := iter.Close() 142 | if err2 != nil { 143 | err = err2 144 | } 145 | }() 146 | if start == nil && iter.First() { 147 | start = append(start, iter.Key()...) 148 | } 149 | if end == nil && iter.Last() { 150 | end = append(end, iter.Key()...) 151 | } 152 | return db.db.Compact(start, end, true) 153 | } 154 | 155 | // Close implements DB. 156 | func (db PebbleDB) Close() error { 157 | db.db.Close() 158 | return nil 159 | } 160 | 161 | // Print implements DB. 162 | func (db *PebbleDB) Print() error { 163 | itr, err := db.Iterator(nil, nil) 164 | if err != nil { 165 | return err 166 | } 167 | defer itr.Close() 168 | for ; itr.Valid(); itr.Next() { 169 | key := itr.Key() 170 | value := itr.Value() 171 | fmt.Printf("[%X]:\t[%X]\n", key, value) 172 | } 173 | return nil 174 | } 175 | 176 | // Stats implements DB. 177 | func (*PebbleDB) Stats() map[string]string { 178 | return nil 179 | } 180 | 181 | // NewBatch implements DB. 182 | func (db *PebbleDB) NewBatch() Batch { 183 | return newPebbleDBBatch(db) 184 | } 185 | 186 | // Iterator implements DB. 187 | func (db *PebbleDB) Iterator(start, end []byte) (Iterator, error) { 188 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 189 | return nil, errKeyEmpty 190 | } 191 | o := pebble.IterOptions{ 192 | LowerBound: start, 193 | UpperBound: end, 194 | } 195 | itr, err := db.db.NewIter(&o) 196 | if err != nil { 197 | return nil, err 198 | } 199 | itr.First() 200 | 201 | return newPebbleDBIterator(itr, start, end, false), nil 202 | } 203 | 204 | // ReverseIterator implements DB. 205 | func (db *PebbleDB) ReverseIterator(start, end []byte) (Iterator, error) { 206 | if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { 207 | return nil, errKeyEmpty 208 | } 209 | o := pebble.IterOptions{ 210 | LowerBound: start, 211 | UpperBound: end, 212 | } 213 | itr, err := db.db.NewIter(&o) 214 | if err != nil { 215 | return nil, err 216 | } 217 | itr.Last() 218 | return newPebbleDBIterator(itr, start, end, true), nil 219 | } 220 | 221 | var _ Batch = (*pebbleDBBatch)(nil) 222 | 223 | type pebbleDBBatch struct { 224 | db *PebbleDB 225 | batch *pebble.Batch 226 | } 227 | 228 | var _ Batch = (*pebbleDBBatch)(nil) 229 | 230 | func newPebbleDBBatch(db *PebbleDB) *pebbleDBBatch { 231 | return &pebbleDBBatch{ 232 | // For regular batch operations batch.db is going to be set to db 233 | // and it is not needed to initialize the DB here. 234 | // This is set to enable general DB operations like compaction 235 | // (e.x. a call do pebbleDBBatch.db.Compact() would throw a nil pointer exception) 236 | db: db, 237 | batch: db.db.NewBatch(), 238 | } 239 | } 240 | 241 | // Set implements Batch. 242 | func (b *pebbleDBBatch) Set(key, value []byte) error { 243 | if len(key) == 0 { 244 | return errKeyEmpty 245 | } 246 | if value == nil { 247 | return errValueNil 248 | } 249 | if b.batch == nil { 250 | return errBatchClosed 251 | } 252 | 253 | return b.batch.Set(key, value, nil) 254 | } 255 | 256 | // Delete implements Batch. 257 | func (b *pebbleDBBatch) Delete(key []byte) error { 258 | if len(key) == 0 { 259 | return errKeyEmpty 260 | } 261 | if b.batch == nil { 262 | return errBatchClosed 263 | } 264 | 265 | return b.batch.Delete(key, nil) 266 | } 267 | 268 | // Write implements Batch. 269 | func (b *pebbleDBBatch) Write() error { 270 | if b.batch == nil { 271 | return errBatchClosed 272 | } 273 | 274 | wopts := pebble.NoSync 275 | err := b.batch.Commit(wopts) 276 | if err != nil { 277 | return err 278 | } 279 | // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. 280 | 281 | return b.Close() 282 | } 283 | 284 | // WriteSync implements Batch. 285 | func (b *pebbleDBBatch) WriteSync() error { 286 | if b.batch == nil { 287 | return errBatchClosed 288 | } 289 | err := b.batch.Commit(pebble.Sync) 290 | if err != nil { 291 | return err 292 | } 293 | // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. 294 | return b.Close() 295 | } 296 | 297 | // Close implements Batch. 298 | func (b *pebbleDBBatch) Close() error { 299 | if b.batch != nil { 300 | err := b.batch.Close() 301 | if err != nil { 302 | return err 303 | } 304 | b.batch = nil 305 | } 306 | 307 | return nil 308 | } 309 | 310 | type pebbleDBIterator struct { 311 | source *pebble.Iterator 312 | start, end []byte 313 | isReverse bool 314 | isInvalid bool 315 | } 316 | 317 | var _ Iterator = (*pebbleDBIterator)(nil) 318 | 319 | func newPebbleDBIterator(source *pebble.Iterator, start, end []byte, isReverse bool) *pebbleDBIterator { 320 | if isReverse { 321 | if end == nil { 322 | source.Last() 323 | } 324 | } else { 325 | if start == nil { 326 | source.First() 327 | } 328 | } 329 | return &pebbleDBIterator{ 330 | source: source, 331 | start: start, 332 | end: end, 333 | isReverse: isReverse, 334 | isInvalid: false, 335 | } 336 | } 337 | 338 | // Domain implements Iterator. 339 | func (itr *pebbleDBIterator) Domain() (start []byte, end []byte) { 340 | return itr.start, itr.end 341 | } 342 | 343 | // Valid implements Iterator. 344 | func (itr *pebbleDBIterator) Valid() bool { 345 | // Once invalid, forever invalid. 346 | if itr.isInvalid { 347 | return false 348 | } 349 | 350 | // If source has error, invalid. 351 | if err := itr.source.Error(); err != nil { 352 | itr.isInvalid = true 353 | 354 | return false 355 | } 356 | 357 | // If source is invalid, invalid. 358 | if !itr.source.Valid() { 359 | itr.isInvalid = true 360 | 361 | return false 362 | } 363 | 364 | // If key is end or past it, invalid. 365 | start := itr.start 366 | end := itr.end 367 | key := itr.source.Key() 368 | if itr.isReverse { 369 | if start != nil && bytes.Compare(key, start) < 0 { 370 | itr.isInvalid = true 371 | 372 | return false 373 | } 374 | } else { 375 | if end != nil && bytes.Compare(end, key) <= 0 { 376 | itr.isInvalid = true 377 | 378 | return false 379 | } 380 | } 381 | 382 | // It's valid. 383 | return true 384 | } 385 | 386 | // Key implements Iterator. 387 | // The caller should not modify the contents of the returned slice. 388 | // Instead, the caller should make a copy and work on the copy. 389 | func (itr *pebbleDBIterator) Key() []byte { 390 | itr.assertIsValid() 391 | return itr.source.Key() 392 | } 393 | 394 | // Value implements Iterator. 395 | // The caller should not modify the contents of the returned slice. 396 | // Instead, the caller should make a copy and work on the copy. 397 | func (itr *pebbleDBIterator) Value() []byte { 398 | itr.assertIsValid() 399 | return itr.source.Value() 400 | } 401 | 402 | // Next implements Iterator. 403 | func (itr pebbleDBIterator) Next() { 404 | itr.assertIsValid() 405 | if itr.isReverse { 406 | itr.source.Prev() 407 | } else { 408 | itr.source.Next() 409 | } 410 | } 411 | 412 | // Error implements Iterator. 413 | func (itr *pebbleDBIterator) Error() error { 414 | return itr.source.Error() 415 | } 416 | 417 | // Close implements Iterator. 418 | func (itr *pebbleDBIterator) Close() error { 419 | err := itr.source.Close() 420 | if err != nil { 421 | return err 422 | } 423 | return nil 424 | } 425 | 426 | func (itr *pebbleDBIterator) assertIsValid() { 427 | if !itr.Valid() { 428 | panic("iterator is invalid") 429 | } 430 | } 431 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /backend_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "strings" 8 | "testing" 9 | "time" 10 | 11 | "github.com/stretchr/testify/assert" 12 | "github.com/stretchr/testify/require" 13 | ) 14 | 15 | // Register a test backend for PrefixDB as well, with some unrelated junk data. 16 | func init() { 17 | //nolint: errcheck, revive // probably should check errors? 18 | registerDBCreator("prefixdb", func(name, dir string) (DB, error) { 19 | mdb := NewMemDB() 20 | mdb.Set([]byte("a"), []byte{1}) 21 | mdb.Set([]byte("b"), []byte{2}) 22 | mdb.Set([]byte("t"), []byte{20}) 23 | mdb.Set([]byte("test"), []byte{0}) 24 | mdb.Set([]byte("u"), []byte{21}) 25 | mdb.Set([]byte("z"), []byte{26}) 26 | return NewPrefixDB(mdb, []byte("test/")), nil 27 | }) 28 | } 29 | 30 | func cleanupDBDir(dir, name string) { 31 | err := os.RemoveAll(filepath.Join(dir, name) + ".db") 32 | if err != nil { 33 | panic(err) 34 | } 35 | } 36 | 37 | func testBackendGetSetDelete(t *testing.T, backend BackendType) { 38 | t.Helper() 39 | // Default 40 | dirname, err := os.MkdirTemp("", fmt.Sprintf("test_backend_%s_", backend)) 41 | require.Nil(t, err) 42 | name := fmt.Sprintf("testdb_%x", randStr(12)) 43 | db, err := NewDB(name, backend, dirname) 44 | require.NoError(t, err) 45 | defer cleanupDBDir(dirname, name) 46 | 47 | // A nonexistent key should return nil. 48 | value, err := db.Get([]byte("a")) 49 | require.NoError(t, err) 50 | require.Nil(t, value) 51 | 52 | ok, err := db.Has([]byte("a")) 53 | require.NoError(t, err) 54 | require.False(t, ok) 55 | 56 | // Set and get a value. 57 | err = db.Set([]byte("a"), []byte{0x01}) 58 | require.NoError(t, err) 59 | 60 | ok, err = db.Has([]byte("a")) 61 | require.NoError(t, err) 62 | require.True(t, ok) 63 | 64 | value, err = db.Get([]byte("a")) 65 | require.NoError(t, err) 66 | require.Equal(t, []byte{0x01}, value) 67 | 68 | err = db.SetSync([]byte("b"), []byte{0x02}) 69 | require.NoError(t, err) 70 | 71 | value, err = db.Get([]byte("b")) 72 | require.NoError(t, err) 73 | require.Equal(t, []byte{0x02}, value) 74 | 75 | // Deleting a non-existent value is fine. 76 | err = db.Delete([]byte("x")) 77 | require.NoError(t, err) 78 | 79 | err = db.DeleteSync([]byte("x")) 80 | require.NoError(t, err) 81 | 82 | // Delete a value. 83 | err = db.Delete([]byte("a")) 84 | require.NoError(t, err) 85 | 86 | value, err = db.Get([]byte("a")) 87 | require.NoError(t, err) 88 | require.Nil(t, value) 89 | 90 | err = db.DeleteSync([]byte("b")) 91 | require.NoError(t, err) 92 | 93 | value, err = db.Get([]byte("b")) 94 | require.NoError(t, err) 95 | require.Nil(t, value) 96 | 97 | // Setting, getting, and deleting an empty key should error. 98 | _, err = db.Get([]byte{}) 99 | require.Equal(t, errKeyEmpty, err) 100 | _, err = db.Get(nil) 101 | require.Equal(t, errKeyEmpty, err) 102 | 103 | _, err = db.Has([]byte{}) 104 | require.Equal(t, errKeyEmpty, err) 105 | _, err = db.Has(nil) 106 | require.Equal(t, errKeyEmpty, err) 107 | 108 | err = db.Set([]byte{}, []byte{0x01}) 109 | require.Equal(t, errKeyEmpty, err) 110 | err = db.Set(nil, []byte{0x01}) 111 | require.Equal(t, errKeyEmpty, err) 112 | err = db.SetSync([]byte{}, []byte{0x01}) 113 | require.Equal(t, errKeyEmpty, err) 114 | err = db.SetSync(nil, []byte{0x01}) 115 | require.Equal(t, errKeyEmpty, err) 116 | 117 | err = db.Delete([]byte{}) 118 | require.Equal(t, errKeyEmpty, err) 119 | err = db.Delete(nil) 120 | require.Equal(t, errKeyEmpty, err) 121 | err = db.DeleteSync([]byte{}) 122 | require.Equal(t, errKeyEmpty, err) 123 | err = db.DeleteSync(nil) 124 | require.Equal(t, errKeyEmpty, err) 125 | 126 | // Setting a nil value should error, but an empty value is fine. 127 | err = db.Set([]byte("x"), nil) 128 | require.Equal(t, errValueNil, err) 129 | err = db.SetSync([]byte("x"), nil) 130 | require.Equal(t, errValueNil, err) 131 | 132 | err = db.Set([]byte("x"), []byte{}) 133 | require.NoError(t, err) 134 | err = db.SetSync([]byte("x"), []byte{}) 135 | require.NoError(t, err) 136 | value, err = db.Get([]byte("x")) 137 | require.NoError(t, err) 138 | require.Equal(t, []byte{}, value) 139 | 140 | err = db.Compact(nil, nil) 141 | if strings.Contains(string(backend), "pebbledb") { 142 | // In pebble the start and end will be the same so 143 | // we expect an error 144 | require.Error(t, err) 145 | } 146 | 147 | err = db.Set([]byte("y"), []byte{}) 148 | require.NoError(t, err) 149 | 150 | err = db.Compact(nil, nil) 151 | require.NoError(t, err) 152 | 153 | if strings.Contains(string(backend), "pebbledb") { 154 | // When running the test the folder can't be cleaned up and there 155 | // is a panic on removing the tmp testing directories. 156 | // The compaction process is slow to release the lock on the folder. 157 | time.Sleep(time.Second * 5) 158 | } 159 | } 160 | 161 | func TestBackendsGetSetDelete(t *testing.T) { 162 | for dbType := range backends { 163 | t.Run(string(dbType), func(t *testing.T) { 164 | testBackendGetSetDelete(t, dbType) 165 | }) 166 | } 167 | } 168 | 169 | func TestDBIterator(t *testing.T) { 170 | for dbType := range backends { 171 | t.Run(string(dbType), func(t *testing.T) { 172 | testDBIterator(t, dbType) 173 | }) 174 | } 175 | } 176 | 177 | func testDBIterator(t *testing.T, backend BackendType) { 178 | t.Helper() 179 | 180 | name := fmt.Sprintf("test_%x", randStr(12)) 181 | dir := os.TempDir() 182 | db, err := NewDB(name, backend, dir) 183 | require.NoError(t, err) 184 | defer cleanupDBDir(dir, name) 185 | 186 | for i := 0; i < 10; i++ { 187 | if i != 6 { // but skip 6. 188 | err := db.Set(int642Bytes(int64(i)), []byte{}) 189 | require.NoError(t, err) 190 | } 191 | } 192 | 193 | // Blank iterator keys should error 194 | _, err = db.Iterator([]byte{}, nil) 195 | require.Equal(t, errKeyEmpty, err) 196 | _, err = db.Iterator(nil, []byte{}) 197 | require.Equal(t, errKeyEmpty, err) 198 | _, err = db.ReverseIterator([]byte{}, nil) 199 | require.Equal(t, errKeyEmpty, err) 200 | _, err = db.ReverseIterator(nil, []byte{}) 201 | require.Equal(t, errKeyEmpty, err) 202 | 203 | itr, err := db.Iterator(nil, nil) 204 | require.NoError(t, err) 205 | verifyIterator(t, itr, []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator") 206 | 207 | ritr, err := db.ReverseIterator(nil, nil) 208 | require.NoError(t, err) 209 | verifyIterator(t, ritr, []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator") 210 | 211 | itr, err = db.Iterator(nil, int642Bytes(0)) 212 | require.NoError(t, err) 213 | verifyIterator(t, itr, []int64(nil), "forward iterator to 0") 214 | 215 | ritr, err = db.ReverseIterator(int642Bytes(10), nil) 216 | require.NoError(t, err) 217 | verifyIterator(t, ritr, []int64(nil), "reverse iterator from 10 (ex)") 218 | 219 | itr, err = db.Iterator(int642Bytes(0), nil) 220 | require.NoError(t, err) 221 | verifyIterator(t, itr, []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 0") 222 | 223 | itr, err = db.Iterator(int642Bytes(1), nil) 224 | require.NoError(t, err) 225 | verifyIterator(t, itr, []int64{1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 1") 226 | 227 | ritr, err = db.ReverseIterator(nil, int642Bytes(10)) 228 | require.NoError(t, err) 229 | verifyIterator(t, ritr, 230 | []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 10 (ex)") 231 | 232 | ritr, err = db.ReverseIterator(nil, int642Bytes(9)) 233 | require.NoError(t, err) 234 | verifyIterator(t, ritr, 235 | []int64{8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 9 (ex)") 236 | 237 | ritr, err = db.ReverseIterator(nil, int642Bytes(8)) 238 | require.NoError(t, err) 239 | verifyIterator(t, ritr, 240 | []int64{7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 8 (ex)") 241 | 242 | itr, err = db.Iterator(int642Bytes(5), int642Bytes(6)) 243 | require.NoError(t, err) 244 | verifyIterator(t, itr, []int64{5}, "forward iterator from 5 to 6") 245 | 246 | itr, err = db.Iterator(int642Bytes(5), int642Bytes(7)) 247 | require.NoError(t, err) 248 | verifyIterator(t, itr, []int64{5}, "forward iterator from 5 to 7") 249 | 250 | itr, err = db.Iterator(int642Bytes(5), int642Bytes(8)) 251 | require.NoError(t, err) 252 | verifyIterator(t, itr, []int64{5, 7}, "forward iterator from 5 to 8") 253 | 254 | itr, err = db.Iterator(int642Bytes(6), int642Bytes(7)) 255 | require.NoError(t, err) 256 | verifyIterator(t, itr, []int64(nil), "forward iterator from 6 to 7") 257 | 258 | itr, err = db.Iterator(int642Bytes(6), int642Bytes(8)) 259 | require.NoError(t, err) 260 | verifyIterator(t, itr, []int64{7}, "forward iterator from 6 to 8") 261 | 262 | itr, err = db.Iterator(int642Bytes(7), int642Bytes(8)) 263 | require.NoError(t, err) 264 | verifyIterator(t, itr, []int64{7}, "forward iterator from 7 to 8") 265 | 266 | ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(5)) 267 | require.NoError(t, err) 268 | verifyIterator(t, ritr, []int64{4}, "reverse iterator from 5 (ex) to 4") 269 | 270 | ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(6)) 271 | require.NoError(t, err) 272 | verifyIterator(t, ritr, 273 | []int64{5, 4}, "reverse iterator from 6 (ex) to 4") 274 | 275 | ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(7)) 276 | require.NoError(t, err) 277 | verifyIterator(t, ritr, 278 | []int64{5, 4}, "reverse iterator from 7 (ex) to 4") 279 | 280 | ritr, err = db.ReverseIterator(int642Bytes(5), int642Bytes(6)) 281 | require.NoError(t, err) 282 | verifyIterator(t, ritr, []int64{5}, "reverse iterator from 6 (ex) to 5") 283 | 284 | ritr, err = db.ReverseIterator(int642Bytes(5), int642Bytes(7)) 285 | require.NoError(t, err) 286 | verifyIterator(t, ritr, []int64{5}, "reverse iterator from 7 (ex) to 5") 287 | 288 | ritr, err = db.ReverseIterator(int642Bytes(6), int642Bytes(7)) 289 | require.NoError(t, err) 290 | verifyIterator(t, ritr, 291 | []int64(nil), "reverse iterator from 7 (ex) to 6") 292 | 293 | ritr, err = db.ReverseIterator(int642Bytes(10), nil) 294 | require.NoError(t, err) 295 | verifyIterator(t, ritr, []int64(nil), "reverse iterator to 10") 296 | 297 | ritr, err = db.ReverseIterator(int642Bytes(6), nil) 298 | require.NoError(t, err) 299 | verifyIterator(t, ritr, []int64{9, 8, 7}, "reverse iterator to 6") 300 | 301 | ritr, err = db.ReverseIterator(int642Bytes(5), nil) 302 | require.NoError(t, err) 303 | verifyIterator(t, ritr, []int64{9, 8, 7, 5}, "reverse iterator to 5") 304 | 305 | ritr, err = db.ReverseIterator(int642Bytes(8), int642Bytes(9)) 306 | require.NoError(t, err) 307 | verifyIterator(t, ritr, []int64{8}, "reverse iterator from 9 (ex) to 8") 308 | 309 | ritr, err = db.ReverseIterator(int642Bytes(2), int642Bytes(4)) 310 | require.NoError(t, err) 311 | verifyIterator(t, ritr, 312 | []int64{3, 2}, "reverse iterator from 4 (ex) to 2") 313 | 314 | ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(2)) 315 | require.NoError(t, err) 316 | verifyIterator(t, ritr, 317 | []int64(nil), "reverse iterator from 2 (ex) to 4") 318 | 319 | // Ensure that the iterators don't panic with an empty database. 320 | dir2, err := os.MkdirTemp("", "tm-db-test") 321 | require.NoError(t, err) 322 | name = fmt.Sprintf("test_%x", randStr(12)) 323 | db2, err := NewDB(name, backend, dir2) 324 | require.NoError(t, err) 325 | defer cleanupDBDir(dir2, name) 326 | 327 | itr, err = db2.Iterator(nil, nil) 328 | require.NoError(t, err) 329 | verifyIterator(t, itr, nil, "forward iterator with empty db") 330 | 331 | ritr, err = db2.ReverseIterator(nil, nil) 332 | require.NoError(t, err) 333 | verifyIterator(t, ritr, nil, "reverse iterator with empty db") 334 | } 335 | 336 | func verifyIterator(t *testing.T, itr Iterator, expected []int64, msg string) { 337 | t.Helper() 338 | 339 | var list []int64 340 | for itr.Valid() { 341 | key := make([]byte, len(itr.Key())) 342 | copy(key, itr.Key()) 343 | list = append(list, bytes2Int64(key)) 344 | itr.Next() 345 | } 346 | assert.Equal(t, expected, list, msg) 347 | } 348 | 349 | func TestDBBatch(t *testing.T) { 350 | for dbType := range backends { 351 | t.Run(fmt.Sprintf("%v", dbType), func(t *testing.T) { 352 | testDBBatch(t, dbType) 353 | }) 354 | } 355 | } 356 | 357 | func testDBBatch(t *testing.T, backend BackendType) { 358 | t.Helper() 359 | 360 | name := fmt.Sprintf("test_%x", randStr(12)) 361 | dir := os.TempDir() 362 | db, err := NewDB(name, backend, dir) 363 | require.NoError(t, err) 364 | defer cleanupDBDir(dir, name) 365 | 366 | // create a new batch, and some items - they should not be visible until we write 367 | batch := db.NewBatch() 368 | require.NoError(t, batch.Set([]byte("a"), []byte{1})) 369 | require.NoError(t, batch.Set([]byte("b"), []byte{2})) 370 | require.NoError(t, batch.Set([]byte("c"), []byte{3})) 371 | assertKeyValues(t, db, map[string][]byte{}) 372 | 373 | err = batch.Write() 374 | require.NoError(t, err) 375 | assertKeyValues(t, db, map[string][]byte{"a": {1}, "b": {2}, "c": {3}}) 376 | 377 | // trying to modify or rewrite a written batch should error, but closing it should work 378 | require.Error(t, batch.Set([]byte("a"), []byte{9})) 379 | require.Error(t, batch.Delete([]byte("a"))) 380 | require.Error(t, batch.Write()) 381 | require.Error(t, batch.WriteSync()) 382 | require.NoError(t, batch.Close()) 383 | 384 | // batches should write changes in order 385 | batch = db.NewBatch() 386 | require.NoError(t, batch.Delete([]byte("a"))) 387 | require.NoError(t, batch.Set([]byte("a"), []byte{1})) 388 | require.NoError(t, batch.Set([]byte("b"), []byte{1})) 389 | require.NoError(t, batch.Set([]byte("b"), []byte{2})) 390 | require.NoError(t, batch.Set([]byte("c"), []byte{3})) 391 | require.NoError(t, batch.Delete([]byte("c"))) 392 | require.NoError(t, batch.Write()) 393 | require.NoError(t, batch.Close()) 394 | assertKeyValues(t, db, map[string][]byte{"a": {1}, "b": {2}}) 395 | 396 | // empty and nil keys, as well as nil values, should be disallowed 397 | batch = db.NewBatch() 398 | err = batch.Set([]byte{}, []byte{0x01}) 399 | require.Equal(t, errKeyEmpty, err) 400 | err = batch.Set(nil, []byte{0x01}) 401 | require.Equal(t, errKeyEmpty, err) 402 | err = batch.Set([]byte("a"), nil) 403 | require.Equal(t, errValueNil, err) 404 | 405 | err = batch.Delete([]byte{}) 406 | require.Equal(t, errKeyEmpty, err) 407 | err = batch.Delete(nil) 408 | require.Equal(t, errKeyEmpty, err) 409 | 410 | err = batch.Close() 411 | require.NoError(t, err) 412 | 413 | // it should be possible to write an empty batch 414 | batch = db.NewBatch() 415 | err = batch.Write() 416 | require.NoError(t, err) 417 | assertKeyValues(t, db, map[string][]byte{"a": {1}, "b": {2}}) 418 | 419 | // it should be possible to close an empty batch, and to re-close a closed batch 420 | batch = db.NewBatch() 421 | if err := batch.Close(); err != nil { 422 | require.NoError(t, err) 423 | } 424 | if err := batch.Close(); err != nil { 425 | require.NoError(t, err) 426 | } 427 | 428 | // all other operations on a closed batch should error 429 | require.Error(t, batch.Set([]byte("a"), []byte{9})) 430 | require.Error(t, batch.Delete([]byte("a"))) 431 | require.Error(t, batch.Write()) 432 | require.Error(t, batch.WriteSync()) 433 | } 434 | 435 | func assertKeyValues(t *testing.T, db DB, expect map[string][]byte) { 436 | t.Helper() 437 | iter, err := db.Iterator(nil, nil) 438 | require.NoError(t, err) 439 | defer iter.Close() 440 | 441 | actual := make(map[string][]byte) 442 | for ; iter.Valid(); iter.Next() { 443 | require.NoError(t, iter.Error()) 444 | 445 | value := make([]byte, len(iter.Value())) 446 | copy(value, iter.Value()) 447 | actual[string(iter.Key())] = value 448 | } 449 | 450 | assert.Equal(t, expect, actual) 451 | } 452 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= 2 | github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= 3 | github.com/DataDog/zstd v1.5.6 h1:LbEglqepa/ipmmQJUDnSsfvA8e8IStVcGaFWDuxvGOY= 4 | github.com/DataDog/zstd v1.5.6/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= 5 | github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= 6 | github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= 7 | github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= 8 | github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= 9 | github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 10 | github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= 11 | github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= 12 | github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= 13 | github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= 14 | github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= 15 | github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= 16 | github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0 h1:pU88SPhIFid6/k0egdR5V6eALQYq2qbSmukrkgIh/0A= 17 | github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= 18 | github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506 h1:ASDL+UJcILMqgNeV5jiqR4j+sTuvQNHdf2chuKj1M5k= 19 | github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506/go.mod h1:Mw7HqKr2kdtu6aYGn3tPmAftiP3QPX63LdK/zcariIo= 20 | github.com/cockroachdb/pebble v1.1.4 h1:5II1uEP4MyHLDnsrbv/EZ36arcb9Mxg3n+owhZ3GrG8= 21 | github.com/cockroachdb/pebble v1.1.4/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU= 22 | github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= 23 | github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= 24 | github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= 25 | github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= 26 | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= 27 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 28 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 29 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 30 | github.com/dgraph-io/badger/v4 v4.5.1 h1:7DCIXrQjo1LKmM96YD+hLVJ2EEsyyoWxJfpdd56HLps= 31 | github.com/dgraph-io/badger/v4 v4.5.1/go.mod h1:qn3Be0j3TfV4kPbVoK0arXCD1/nr1ftth6sbL5jxdoA= 32 | github.com/dgraph-io/ristretto/v2 v2.1.0 h1:59LjpOJLNDULHh8MC4UaegN52lC4JnO2dITsie/Pa8I= 33 | github.com/dgraph-io/ristretto/v2 v2.1.0/go.mod h1:uejeqfYXpUomfse0+lO+13ATz4TypQYLJZzBSAemuB4= 34 | github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= 35 | github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= 36 | github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= 37 | github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= 38 | github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= 39 | github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= 40 | github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= 41 | github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= 42 | github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= 43 | github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= 44 | github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= 45 | github.com/getsentry/sentry-go v0.31.1 h1:ELVc0h7gwyhnXHDouXkhqTFSO5oslsRDk0++eyE0KJ4= 46 | github.com/getsentry/sentry-go v0.31.1/go.mod h1:CYNcMMz73YigoHljQRG+qPF+eMq8gG72XcGN/p71BAY= 47 | github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= 48 | github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= 49 | github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= 50 | github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= 51 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= 52 | github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= 53 | github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= 54 | github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= 55 | github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= 56 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 57 | github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 58 | github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= 59 | github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= 60 | github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= 61 | github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= 62 | github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= 63 | github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= 64 | github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= 65 | github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= 66 | github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 67 | github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= 68 | github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 69 | github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= 70 | github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= 71 | github.com/google/flatbuffers v25.1.24+incompatible h1:4wPqL3K7GzBd1CwyhSd3usxLKOaJN/AC6puCca6Jm7o= 72 | github.com/google/flatbuffers v25.1.24+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= 73 | github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= 74 | github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 75 | github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 76 | github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 77 | github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 78 | github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 79 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 80 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 81 | github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 82 | github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= 83 | github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= 84 | github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= 85 | github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= 86 | github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= 87 | github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= 88 | github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= 89 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 90 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 91 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 92 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 93 | github.com/linxGnu/grocksdb v1.9.8 h1:vOIKv9/+HKiqJAElJIEYv3ZLcihRxyP7Suu/Mu8Dxjs= 94 | github.com/linxGnu/grocksdb v1.9.8/go.mod h1:C3CNe9UYc9hlEM2pC82AqiGS3LRW537u9LFV4wIZuHk= 95 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= 96 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= 97 | github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= 98 | github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= 99 | github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 100 | github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= 101 | github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= 102 | github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= 103 | github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= 104 | github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= 105 | github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= 106 | github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= 107 | github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= 108 | github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= 109 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 110 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 111 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 112 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 113 | github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= 114 | github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= 115 | github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= 116 | github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= 117 | github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= 118 | github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= 119 | github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= 120 | github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= 121 | github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= 122 | github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= 123 | github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= 124 | github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= 125 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 126 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= 127 | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= 128 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 129 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 130 | github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= 131 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 132 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 133 | github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca h1:Ld/zXl5t4+D69SiV4JoN7kkfvJdOWlPpfxrzxpLMoUk= 134 | github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= 135 | github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 136 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 137 | go.etcd.io/bbolt v1.4.0 h1:TU77id3TnN/zKr7CO/uk+fBCwF2jGcMuw2B/FMAzYIk= 138 | go.etcd.io/bbolt v1.4.0/go.mod h1:AsD+OCi/qPN1giOX1aiLAha3o1U8rAz65bvN4j0sRuk= 139 | go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= 140 | go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= 141 | go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= 142 | go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= 143 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 144 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 145 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 146 | golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= 147 | golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= 148 | golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= 149 | golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= 150 | golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= 151 | golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= 152 | golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 153 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 154 | golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 155 | golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 156 | golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 157 | golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 158 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 159 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 160 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 161 | golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 162 | golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= 163 | golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= 164 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 165 | golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 166 | golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= 167 | golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= 168 | golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= 169 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 170 | golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 171 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 172 | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 173 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 174 | golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= 175 | golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= 176 | golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 177 | golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 178 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 179 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 180 | golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 181 | golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 182 | golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 183 | golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 184 | golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 185 | golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 186 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 187 | golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= 188 | golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 189 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 190 | golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= 191 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 192 | golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= 193 | golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= 194 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 195 | golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 196 | golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= 197 | golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 198 | golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= 199 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 200 | golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= 201 | golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= 202 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 203 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 204 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 205 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= 206 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 207 | google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= 208 | google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= 209 | google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= 210 | google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= 211 | google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= 212 | google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= 213 | google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= 214 | google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= 215 | google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= 216 | google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= 217 | google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= 218 | google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= 219 | google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= 220 | google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= 221 | google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= 222 | google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 223 | google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 224 | google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 225 | google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= 226 | google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM= 227 | google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= 228 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 229 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 230 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 231 | gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= 232 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= 233 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= 234 | gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 235 | gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 236 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 237 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 238 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 239 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 240 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 241 | honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 242 | honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 243 | --------------------------------------------------------------------------------