├── .github └── workflows │ ├── audit.yml │ └── rust.yml ├── .gitignore ├── .gitmodules ├── CHANGELOG.md ├── CONTRIBUTING.md ├── Cargo.toml ├── LICENSE ├── MAINTAINERSHIP.md ├── README.md ├── code-of-conduct.md ├── librocksdb-sys ├── Cargo.toml ├── Makefile ├── README.md ├── build.rs ├── build_version.cc ├── gen_statistics.bash ├── rocksdb_lib_sources.txt ├── snappy-stubs-public.h ├── src │ ├── lib.rs │ └── test.rs └── tests │ └── ffi.rs ├── rust-toolchain.toml ├── src ├── backup.rs ├── checkpoint.rs ├── column_family.rs ├── compaction_filter.rs ├── compaction_filter_factory.rs ├── comparator.rs ├── db.rs ├── db_iterator.rs ├── db_options.rs ├── db_pinnable_slice.rs ├── env.rs ├── ffi_util.rs ├── iter_range.rs ├── lib.rs ├── merge_operator.rs ├── perf.rs ├── prop_name.rs ├── properties.rs ├── slice_transform.rs ├── snapshot.rs ├── sst_file_writer.rs ├── statistics.rs ├── statistics_enum_histogram.rs ├── statistics_enum_ticker.rs ├── transactions │ ├── mod.rs │ ├── optimistic_transaction_db.rs │ ├── options.rs │ ├── transaction.rs │ └── transaction_db.rs └── write_batch.rs └── tests ├── fail ├── checkpoint_outlive_db.rs ├── checkpoint_outlive_db.stderr ├── iterator_outlive_db.rs ├── iterator_outlive_db.stderr ├── open_with_multiple_refs_as_single_threaded.rs ├── open_with_multiple_refs_as_single_threaded.stderr ├── snapshot_outlive_db.rs ├── snapshot_outlive_db.stderr ├── snapshot_outlive_transaction.rs ├── snapshot_outlive_transaction.stderr ├── snapshot_outlive_transaction_db.rs ├── snapshot_outlive_transaction_db.stderr ├── transaction_outlive_transaction_db.rs └── transaction_outlive_transaction_db.stderr ├── test_backup.rs ├── test_checkpoint.rs ├── test_column_family.rs ├── test_compactionfilter.rs ├── test_comparator.rs ├── test_db.rs ├── test_iterator.rs ├── test_merge_operator.rs ├── test_multithreaded.rs ├── test_optimistic_transaction_db.rs ├── test_optimistic_transaction_db_memory_usage.rs ├── test_pinnable_slice.rs ├── test_property.rs ├── test_raw_iterator.rs ├── test_rocksdb_options.rs ├── test_slice_transform.rs ├── test_sst_file_writer.rs ├── test_transaction_db.rs ├── test_transaction_db_memory_usage.rs ├── test_transaction_db_property.rs ├── test_write_batch.rs └── util └── mod.rs /.github/workflows/audit.yml: -------------------------------------------------------------------------------- 1 | name: Security Audit 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | paths: 8 | - "**/Cargo.toml" 9 | schedule: 10 | - cron: "0 2 * * *" # run at 2 AM UTC 11 | 12 | permissions: 13 | contents: read 14 | 15 | jobs: 16 | security-audit: 17 | permissions: 18 | checks: write # for rustsec/audit-check to create check 19 | contents: read # for actions/checkout to fetch code 20 | issues: write # for rustsec/audit-check to create issues 21 | runs-on: ubuntu-latest 22 | steps: 23 | - uses: actions/checkout@v4 24 | 25 | - name: Install rust 26 | uses: dtolnay/rust-toolchain@stable 27 | 28 | - name: use stable rust 29 | run: rustup override set stable 30 | 31 | - name: Generate Cargo.lock 32 | run: cargo generate-lockfile 33 | 34 | - name: Audit Check 35 | # https://github.com/rustsec/audit-check/issues/2 36 | uses: rustsec/audit-check@master 37 | with: 38 | token: ${{ secrets.GITHUB_TOKEN }} 39 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: RocksDB CI 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | style: 7 | name: Rustfmt 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Checkout sources 11 | uses: actions/checkout@v4 12 | 13 | - name: Install rust 14 | uses: dtolnay/rust-toolchain@stable 15 | with: 16 | components: rustfmt 17 | 18 | - name: use stable rust 19 | run: rustup override set stable 20 | 21 | - name: generate Cargo.lock 22 | run: cargo generate-lockfile 23 | 24 | - name: Run rustfmt 25 | run: cargo fmt --all -- --check 26 | 27 | doc-check: 28 | name: Rustdoc-check 29 | runs-on: ubuntu-latest 30 | steps: 31 | - name: Checkout sources 32 | uses: actions/checkout@v4 33 | 34 | - name: Install rust 35 | uses: dtolnay/rust-toolchain@stable 36 | with: 37 | components: rust-docs 38 | 39 | - uses: Swatinem/rust-cache@v2 40 | with: 41 | cache-on-failure: true 42 | 43 | - name: Run cargo rustdoc 44 | run: cargo rustdoc -- -D warnings 45 | 46 | doctest: # doctest are no supported in cargo nextest yet. https://github.com/nextest-rs/nextest/issues/16 47 | name: Doctests 48 | runs-on: ubuntu-latest 49 | steps: 50 | - name: Checkout sources 51 | uses: actions/checkout@v4 52 | 53 | - name: Install rust 54 | uses: dtolnay/rust-toolchain@stable 55 | 56 | - uses: Swatinem/rust-cache@v2 57 | with: 58 | cache-on-failure: true 59 | 60 | - name: Run doctests 61 | run: cargo test --doc 62 | 63 | clippy: 64 | name: Clippy 65 | runs-on: ubuntu-latest 66 | steps: 67 | - name: Checkout sources 68 | uses: actions/checkout@v4 69 | 70 | - name: Install rust 71 | uses: dtolnay/rust-toolchain@stable 72 | with: 73 | components: clippy 74 | 75 | - name: use stable rust 76 | run: rustup override set stable 77 | 78 | - name: generate Cargo.lock 79 | run: cargo generate-lockfile 80 | 81 | - uses: Swatinem/rust-cache@v2 82 | with: 83 | cache-on-failure: true 84 | - name: Install dependencies 85 | run: sudo apt-get update && sudo apt-get install -y liburing-dev pkg-config 86 | - name: Set PKG_CONFIG_PATH 87 | run: echo "PKG_CONFIG_PATH=/usr/lib/x86_64-linux-gnu/pkgconfig" >> $GITHUB_ENV 88 | 89 | - name: Run clippy 90 | run: | 91 | cargo clippy --all-targets --features \ 92 | "jemalloc \ 93 | io-uring \ 94 | valgrind \ 95 | mt_static \ 96 | rtti \ 97 | multi-threaded-cf \ 98 | malloc-usable-size \ 99 | zstd-static-linking-only \ 100 | serde1" \ 101 | -- -D warnings 102 | 103 | audit: 104 | name: Security audit 105 | runs-on: ubuntu-latest 106 | steps: 107 | - uses: actions/checkout@v4 108 | 109 | - name: Install rust 110 | uses: dtolnay/rust-toolchain@stable 111 | 112 | - name: use stable rust 113 | run: rustup override set stable 114 | 115 | - name: generate Cargo.lock 116 | run: cargo generate-lockfile 117 | 118 | # https://github.com/rustsec/audit-check/issues/2 119 | - uses: rustsec/audit-check@master 120 | with: 121 | token: ${{ secrets.GITHUB_TOKEN }} 122 | 123 | test: 124 | name: ${{ matrix.build }} 125 | runs-on: ${{ matrix.os }} 126 | strategy: 127 | matrix: 128 | build: [Linux, Linux-ARM, macOS, Windows] 129 | include: 130 | - build: Linux 131 | os: ubuntu-latest 132 | - build: Linux-ARM 133 | os: ubuntu-24.04-arm 134 | - build: macOS 135 | os: macos-latest 136 | - build: Windows 137 | os: windows-latest 138 | steps: 139 | - name: Checkout sources 140 | uses: actions/checkout@v4 141 | 142 | - name: Install rust 143 | uses: dtolnay/rust-toolchain@stable 144 | 145 | - uses: Swatinem/rust-cache@v2 146 | with: 147 | cache-on-failure: true 148 | - uses: taiki-e/install-action@nextest 149 | 150 | - name: Remove msys64 # Workaround to resolve link error with C:\msys64\mingw64\bin\libclang.dll 151 | if: runner.os == 'Windows' 152 | run: Remove-Item -LiteralPath "C:\msys64\" -Force -Recurse 153 | 154 | - name: Install dependencies 155 | if: runner.os == 'Windows' 156 | run: choco install llvm -y 157 | 158 | - name: Mark working directory as read-only 159 | if: runner.os == 'Linux' 160 | run: | 161 | mkdir -p target 162 | touch Cargo.lock 163 | git submodule update --init --recursive 164 | chmod -R a-w . 165 | chmod -R a+w target Cargo.lock 166 | 167 | - name: Run rocksdb tests 168 | run: | 169 | cargo nextest run --all 170 | cargo nextest run --all --features multi-threaded-cf 171 | 172 | - name: Mark working directory as writable 173 | if: runner.os == 'Linux' 174 | run: chmod -R a+w . 175 | 176 | - name: Free disk space 177 | run: cargo clean 178 | 179 | - name: Mark working directory as read-only 180 | if: runner.os == 'Linux' 181 | run: | 182 | mkdir -p target 183 | touch Cargo.lock 184 | chmod -R a-w . 185 | chmod -R a+w target Cargo.lock 186 | 187 | - name: Run rocksdb tests (jemalloc) 188 | if: runner.os != 'Windows' 189 | run: cargo nextest run --all --features jemalloc 190 | 191 | - name: Mark working directory as writable 192 | if: runner.os == 'Linux' 193 | run: chmod -R a+w . 194 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.swo 2 | target 3 | Cargo.lock 4 | *.orig 5 | *.bk 6 | *rlib 7 | tags 8 | path 9 | .DS_Store 10 | .idea 11 | .vscode 12 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "rocksdb_sys/snappy"] 2 | path = librocksdb-sys/snappy 3 | url = https://github.com/google/snappy.git 4 | [submodule "rocksdb_sys/rocksdb"] 5 | path = librocksdb-sys/rocksdb 6 | url = https://github.com/facebook/rocksdb.git 7 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to rust-rocksdb 2 | Thank you for taking an interest in the project, and contributing to it - it's appreciated! There are several ways you can contribute: 3 | - [Bug Reports](#bug-reports) 4 | - [Feature Requests](#feature-requests) 5 | - [Documentation](#documentation) 6 | - [Discussion](#discussion) 7 | - [Pull Requests](#pull-requests) 8 | 9 | **Please note all contributors must adhere to the [code of conduct](code-of-conduct.md).** 10 | 11 | ## Bug Reports 12 | [bug-reports]: #bug-reports 13 | - **Ensure the bug has not already been reported** - this can be done with a quick search of the [existing open issues](https://github.com/rust-rocksdb/rust-rocksdb/issues?q=is%3Aissue+is%3Aopen+). 14 | - **Ensure the bug applies to the Rust wrapper, and not the underlying library** - bugs in the RocksDB library should be [reported upstream](https://github.com/facebook/rocksdb/issues). 15 | - When [creating an issue](https://github.com/rust-rocksdb/rust-rocksdb/issues/new) please try to: 16 | - **Use a clear and descriptive title** to identify the issue 17 | - **Provide enough context** to accurately summarize the issue. Not every issue will need detailed steps to recreate, example code, stack traces, etc. - use your own judgment on what information would be helpful to anyone working on the issue. It's easier for someone to skim over too much context, than stop and wait for a response when context is missing. 18 | 19 | ## Feature Requests 20 | [feature-requests]: #feature-requests 21 | Feature requests will primarily come in the form of ergonomics involving the Rust language, or in bringing the wrapper into parity with the library's API. Please create an issue with any relevant information. 22 | 23 | ## Documentation 24 | [documentation]: #documentation 25 | Much of the documentation should mirror or reference the library's [documentation](https://github.com/facebook/rocksdb/wiki). If the wrapper or its exposed functions are missing documentation or contain inaccurate information please submit a pull request. 26 | 27 | ## Discussion 28 | [discussion]: #discussion 29 | Discussion around the design and development of the wrapper primarily occurs within issues and pull requests. Don't be afraid to participate if you have questions, concerns, insight, or advice. 30 | 31 | ## Pull Requests 32 | [pull-requests]: #pull-requests 33 | Pull requests are welcome, and when contributing code, the author agrees to do so under the project's [licensing](https://github.com/rust-rocksdb/rust-rocksdb/blob/master/LICENSE) - Apache 2.0 as of the time of this writing. The maintainers greatly appreciate PRs that follow open-source contribution best practices: 34 | 1. Fork this repository to your personal GitHub account. 35 | 1. Create a branch that includes your changes, **keep changes isolated and granular**. 36 | 1. Include any relevant documentation and/or tests. Write [documentation tests](https://doc.rust-lang.org/rustdoc/documentation-tests.html) when relevant. 37 | 1. Apply `cargo fmt` to ensure consistent formatting. 38 | 1. [Create a pull request](https://help.github.com/en/articles/about-pull-requests) against this repository. 39 | 40 | For pull requests that would benefit from discussion and review earlier in the development process, use a [Draft Pull Request](https://help.github.com/en/articles/about-pull-requests#draft-pull-requests). 41 | 42 | ## Additional Resources 43 | Some useful information for working with RocksDB in Rust: 44 | - [RocksDB library primary site](https://rocksdb.org) 45 | - [RocksDB library GitHub repository](https://github.com/facebook/rocksdb) 46 | - [RocksDB library documentation](https://github.com/facebook/rocksdb/wiki) 47 | - [Rust's Foreign Function Interface (ffi)](https://doc.rust-lang.org/nomicon/ffi.html) 48 | 49 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rust-rocksdb" 3 | description = "Rust wrapper for Facebook's RocksDB embeddable database" 4 | version = "0.41.0" 5 | edition = "2021" 6 | rust-version = "1.81.0" 7 | authors = [ 8 | "Tyler Neely ", 9 | "David Greenberg ", 10 | ] 11 | repository = "https://github.com/zaidoon1/rust-rocksdb" 12 | license = "Apache-2.0" 13 | categories = ["database"] 14 | keywords = ["database", "embedded", "LSM-tree", "persistence"] 15 | homepage = "https://github.com/zaidoon1/rust-rocksdb" 16 | exclude = [".gitignore", ".travis.yml", "deploy.sh", "test/**/*"] 17 | 18 | [workspace] 19 | members = ["librocksdb-sys"] 20 | 21 | [features] 22 | default = ["snappy", "lz4", "zstd", "zlib", "bzip2", "bindgen-runtime"] 23 | jemalloc = ["rust-librocksdb-sys/jemalloc"] 24 | io-uring = ["rust-librocksdb-sys/io-uring"] 25 | valgrind = [] 26 | snappy = ["rust-librocksdb-sys/snappy"] 27 | lz4 = ["rust-librocksdb-sys/lz4"] 28 | zstd = ["rust-librocksdb-sys/zstd"] 29 | zlib = ["rust-librocksdb-sys/zlib"] 30 | bzip2 = ["rust-librocksdb-sys/bzip2"] 31 | rtti = ["rust-librocksdb-sys/rtti"] 32 | mt_static = ["rust-librocksdb-sys/mt_static"] 33 | multi-threaded-cf = [] 34 | serde1 = ["serde"] 35 | malloc-usable-size = ["rust-librocksdb-sys/malloc-usable-size"] 36 | zstd-static-linking-only = ["rust-librocksdb-sys/zstd-static-linking-only"] 37 | bindgen-runtime = ["rust-librocksdb-sys/bindgen-runtime"] 38 | bindgen-static = ["rust-librocksdb-sys/bindgen-static"] 39 | lto = ["rust-librocksdb-sys/lto"] 40 | 41 | [dependencies] 42 | libc = "0.2" 43 | rust-librocksdb-sys = { path = "librocksdb-sys", version = "0.37.0", default-features = false, features = [ 44 | "static", 45 | ] } 46 | serde = { version = "1", features = ["derive"], optional = true } 47 | 48 | [dev-dependencies] 49 | trybuild = "1" 50 | tempfile = "3" 51 | pretty_assertions = "1" 52 | bincode = "1" 53 | serde = { version = "1", features = ["derive"] } 54 | -------------------------------------------------------------------------------- /MAINTAINERSHIP.md: -------------------------------------------------------------------------------- 1 | Maintainers agree to operate under this set of guidelines: 2 | 3 | #### Authority 4 | 5 | Maintainers are trusted to close issues, merge pull requests, and publish crates to cargo. 6 | 7 | #### Categories of Work 8 | 9 | 0. Minor 10 | * updating the changelog 11 | * requires no approval 12 | 1. Normal 13 | * librocksdb-sys updates 14 | * API tracking code in the rocksdb crate that does not change control flow 15 | * breaking changes due to removed functionality in rocksdb 16 | * require 1 approval from another maintainer. if no maintainer is able to be reached for 2 weeks, then progress may be made anyway 17 | * patch (and post-1.0, minor) releases to crates.io that contain only the above work 18 | * on each update of submodule rocksdb, run `make -C librocksdb-sys gen_statistics` 19 | 2. Major 20 | * breaking API changes that are not direct consequences of underlying rocksdb changes 21 | * refactoring, which should generally only be done for clearly functional reasons like to aid in the completion of a specific task 22 | * require consensus among all maintainers unless 2 weeks have gone by without full participation 23 | * if 2 weeks have gone by after seeking feedback, and at least one other maintainer has participated, and all participating maintainers are in agreement, then progress may be made anyway 24 | * if action is absolutely urgent, an organization owner may act as a tiebreaker if specifically requested to do so and they agree that making a controversial decision is worth the risk. This should hopefully never occur. 25 | 26 | If any maintainer thinks an issue is major, it is major. 27 | 28 | #### Changelog Maintenance 29 | 30 | * If you are the one who merges a PR that includes an externally-visible change, please describe the change in the changelog and merge it in. 31 | 32 | #### Releasing, Publishing 33 | 34 | * Releases adhere to [semver](https://semver.org/) 35 | * Releases should have an associated tag pushed to this repo. 36 | * The changelog serves as a sort of logical staging area for releases 37 | * If a breaking API change happens, and the changelog has not advanced to a new major version, we roll the changelog to a new major version and open an issue to release the previous patch (and post-1.0, minor) version. 38 | * Before rolling to a new major version, it would be nice to release a non-breaking point release to let current users silently take advantage of any improvements 39 | 40 | #### Becoming a Maintainer 41 | 42 | * If you have a history of participation in this repo, agree to these rules, and wish to take on maintainership responsibilities, you may open an issue. If an owner agrees, they will add you to the maintainer group and the crate publishers team. 43 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | rust-rocksdb 2 | ============ 3 | 4 | [![RocksDB build](https://github.com/zaidoon1/rust-rocksdb/actions/workflows/rust.yml/badge.svg?branch=master)](https://github.com/zaidoon1/rust-rocksdb/actions/workflows/rust.yml) 5 | [![crates.io](https://img.shields.io/crates/v/rust-rocksdb.svg)](https://crates.io/crates/rust-rocksdb) 6 | [![documentation](https://docs.rs/rust-rocksdb/badge.svg)](https://docs.rs/rust-rocksdb) 7 | [![license](https://img.shields.io/crates/l/rust-rocksdb.svg)](https://github.com/zaidoon1/rust-rocksdb/blob/master/LICENSE) 8 | ![rust 1.81.0 required](https://img.shields.io/badge/rust-1.81.0-blue.svg?label=MSRV) 9 | 10 | ![GitHub commits (since latest release)](https://img.shields.io/github/commits-since/zaidoon1/rust-rocksdb/latest.svg) 11 | 12 | ## Why The Fork 13 | 14 | The original [rust-rocksdb repo](https://github.com/rust-rocksdb/rust-rocksdb) is amazing and I appreciate all the work that has 15 | been done, however, for my use case, I need to stay up to date with the latest 16 | rocksdb releases as well as the latest rust releases so in order to to keep 17 | everything up to date, I decided to fork the original repo so I can have total 18 | control and be able to create regular releases. 19 | 20 | ## Requirements 21 | 22 | - Clang and LLVM 23 | 24 | ## Rust version 25 | 26 | rust-rocksdb keeps a rolling MSRV (minimum supported Rust version) policy of 6 months. This means we will accept PRs that upgrade the MSRV as long as the new Rust version used is at least 6 months old. 27 | 28 | Our current MSRV is 1.81.0 29 | 30 | ## Contributing 31 | 32 | Feedback and pull requests are welcome! If a particular feature of RocksDB is 33 | important to you, please let me know by opening an issue, and I'll 34 | prioritize it. 35 | 36 | ## Usage 37 | 38 | This binding is statically linked with a specific version of RocksDB. If you 39 | want to build it yourself, make sure you've also cloned the RocksDB and 40 | compression submodules: 41 | 42 | ```shell 43 | git submodule update --init --recursive 44 | ``` 45 | 46 | ## Features 47 | 48 | ### Compression Support 49 | 50 | By default, support for [Snappy](https://github.com/google/snappy), 51 | [LZ4](https://github.com/lz4/lz4), [Zstd](https://github.com/facebook/zstd), 52 | [Zlib](https://zlib.net), and [Bzip2](http://www.bzip.org) compression 53 | is enabled through crate features. If support for all of these compression 54 | algorithms is not needed, default features can be disabled and specific 55 | compression algorithms can be enabled. For example, to enable only LZ4 56 | compression support, make these changes to your Cargo.toml: 57 | 58 | ```toml 59 | [dependencies.rocksdb] 60 | default-features = false 61 | features = ["lz4"] 62 | ``` 63 | 64 | ### Multithreaded ColumnFamily alternation 65 | 66 | RocksDB allows column families to be created and dropped 67 | from multiple threads concurrently, but this crate doesn't allow it by default 68 | for compatibility. If you need to modify column families concurrently, enable 69 | the crate feature `multi-threaded-cf`, which makes this binding's 70 | data structures use `RwLock` by default. Alternatively, you can directly create 71 | `DBWithThreadMode` without enabling the crate feature. 72 | 73 | ### Switch between /MT or /MD run time library (Only for Windows) 74 | 75 | The feature `mt_static` will request the library to be built with [/MT](https://learn.microsoft.com/en-us/cpp/build/reference/md-mt-ld-use-run-time-library?view=msvc-170) 76 | flag, which results in library using the static version of the run-time library. 77 | *This can be useful in case there's a conflict in the dependency tree between different 78 | run-time versions.* 79 | 80 | ### Jemalloc 81 | 82 | The feature `jemalloc` will enable the 83 | `unprefixed_malloc_on_supported_platforms` feature of `tikv-jemalloc-sys`, 84 | hooking the actual malloc and free, so jemalloc is used to allocate memory. On 85 | Supported platforms such as Linux, Rocksdb will also be properly informed that 86 | Jemalloc is enabled so that it can apply internal optimizations gated behind 87 | Jemalloc being enabled. On [unsupported 88 | platforms](https://github.com/zaidoon1/rust-rocksdb/blob/master/librocksdb-sys/build.rs#L4-L7), 89 | Rocksdb won't be properly 90 | informed that Jemalloc is being used so some internal optimizations are skipped 91 | BUT you will still get the benefits of Jemalloc memory allocation. Note that by 92 | default, Rust uses libc malloc on Linux which is known to have more memory 93 | fragmentation than Jemalloc especially with Rocksdb. See [github 94 | issue](https://github.com/facebook/rocksdb/issues/12364) for more information. 95 | In general, I highly suggest enabling Jemalloc unless there is a specific reason 96 | not to (your system doesn't support it, etc.) 97 | 98 | ### Malloc Usable Size 99 | 100 | The feature `malloc-usable-size` will inform Rocksdb that malloc_usable_size is 101 | supported by the platform and is necessary if you want to use the 102 | `optimize_filters_for_memory` rocksdb feature as this feature is gated behind 103 | malloc_usable_size being available. See 104 | [rocksdb](https://github.com/facebook/rocksdb/blob/v9.0.0/include/rocksdb/table.h#L401-L434) 105 | for more information on the feature. 106 | 107 | ### ZSTD Static Linking Only 108 | 109 | The feature `zstd-static-linking-only` in combination with enabling zstd 110 | compression will cause Rocksdb to hold digested dictionaries in block cache to 111 | save repetitive deserialization overhead. This saves a lot of CPU for read-heavy 112 | workloads. This feature is gated behind a flag in Rocksdb because one of the 113 | digested dictionary APIs used is marked as experimental. However, this feature 114 | is still used at facebook in production per the [Preset Dictionary Compression 115 | Blog Post](https://rocksdb.org/blog/2021/05/31/dictionary-compression.html). 116 | 117 | ### Switch between static and dynamic linking for bindgen (features `bindgen-static` and `bindgen-runtime`) 118 | 119 | The feature `bindgen-runtime` will enable the `runtime` feature of bindgen, which dynamically 120 | links to libclang. This is suitable for most platforms, and is enabled by default. 121 | 122 | The feature `bindgen-static` will enable the `static` feature of bindgen, which statically 123 | links to libclang. This is suitable for musllinux platforms, such as Alpine linux. 124 | To build on Alpine linux for example, make these changes to your Cargo.toml: 125 | 126 | ```toml 127 | [dependencies.rocksdb] 128 | default-features = false 129 | features = ["bindgen-static", "snappy", "lz4", "zstd", "zlib", "bzip2"] 130 | ``` 131 | 132 | Notice that `runtime` and `static` features are mutually exclusive, and won't compile if both are enabled. 133 | 134 | ### LTO 135 | 136 | Enable the `lto` feature to enable link-time optimization. It will compile rocksdb with `-flto` flag. This feature is disabled by default. 137 | 138 | > [!IMPORTANT] 139 | > You must use clang as `CC`. Eg. `CC=/usr/bin/clang CXX=/usr/bin/clang++`. Clang llvm version must be the same as the one used by rust compiler. 140 | > On the rust side you should use `RUSTFLAGS="-Clinker-plugin-lto -Clinker=clang -Clink-arg=-fuse-ld=lld"`. 141 | 142 | Check the [Rust documentation](https://doc.rust-lang.org/rustc/linker-plugin-lto.html) for more information. 143 | -------------------------------------------------------------------------------- /code-of-conduct.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at t@jujit.su. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | https://www.contributor-covenant.org/faq 77 | -------------------------------------------------------------------------------- /librocksdb-sys/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rust-librocksdb-sys" 3 | version = "0.37.0+10.2.1" 4 | edition = "2021" 5 | rust-version = "1.81.0" 6 | authors = [ 7 | "Karl Hobley ", 8 | "Arkadiy Paronyan ", 9 | ] 10 | license = "MIT/Apache-2.0/BSD-3-Clause" 11 | description = "Native bindings to librocksdb" 12 | readme = "README.md" 13 | repository = "https://github.com/zaidoon1/rust-rocksdb" 14 | keywords = ["bindings", "ffi", "rocksdb"] 15 | categories = ["api-bindings", "database", "external-ffi-bindings"] 16 | links = "rocksdb" 17 | 18 | exclude = [ 19 | ".gitignore", 20 | "*.yml", 21 | "snappy/testdata", 22 | "*/doc", 23 | "*/docs", 24 | "*/examples", 25 | "*/tests", 26 | "tests", 27 | "*.md", 28 | ] 29 | 30 | [features] 31 | default = ["static", "bindgen/runtime"] 32 | jemalloc = ["tikv-jemalloc-sys"] 33 | static = ["libz-sys?/static", "bzip2-sys?/static"] 34 | bindgen-runtime = ["bindgen/runtime"] 35 | bindgen-static = ["bindgen/static"] 36 | mt_static = [] 37 | io-uring = ["pkg-config"] 38 | snappy = [] 39 | lz4 = ["lz4-sys"] 40 | zstd = ["zstd-sys"] 41 | zlib = ["libz-sys"] 42 | bzip2 = ["bzip2-sys"] 43 | rtti = [] 44 | malloc-usable-size = [] 45 | zstd-static-linking-only = [] 46 | lto = [] 47 | 48 | [dependencies] 49 | libc = "0.2" 50 | tikv-jemalloc-sys = { version = "0.6", features = [ 51 | "unprefixed_malloc_on_supported_platforms", 52 | ], optional = true } 53 | lz4-sys = { version = "1.11", optional = true } 54 | zstd-sys = { version = "2.0", features = [ 55 | "zdict_builder", 56 | "experimental", 57 | ], optional = true } 58 | libz-sys = { version = "1.1", default-features = false, optional = true } 59 | bzip2-sys = { version = "0.1", default-features = false, optional = true } 60 | 61 | [dev-dependencies] 62 | const-str = "0.6" 63 | uuid = { version = "1", features = ["v4"] } 64 | 65 | [build-dependencies] 66 | cc = { version = "1.2", features = ["parallel"] } 67 | bindgen = { version = "0.69", default-features = false } 68 | glob = "0.3" 69 | pkg-config = { version = "0.3", optional = true } 70 | libc = "0.2" 71 | -------------------------------------------------------------------------------- /librocksdb-sys/Makefile: -------------------------------------------------------------------------------- 1 | include rocksdb/src.mk 2 | 3 | rocksdb_lib_sources.txt: rocksdb/src.mk 4 | @echo -n "${LIB_SOURCES}" | tr ' ' '\n' > rocksdb_lib_sources.txt 5 | 6 | gen_lib_sources: rocksdb_lib_sources.txt 7 | 8 | gen_statistics: rocksdb/monitoring/statistics.cc 9 | bash gen_statistics.bash $< Ticker > ../src/statistics_enum_ticker.rs 10 | bash gen_statistics.bash $< Histogram > ../src/statistics_enum_histogram.rs 11 | -------------------------------------------------------------------------------- /librocksdb-sys/README.md: -------------------------------------------------------------------------------- 1 | # RocksDB bindings 2 | 3 | Low-level bindings to [RocksDB's](https://github.com/facebook/rocksdb) C API. 4 | 5 | Based on the original work by Tyler Neely 6 | https://github.com/rust-rocksdb/rust-rocksdb 7 | and Jeremy Fitzhardinge 8 | https://github.com/jsgf/rocksdb-sys 9 | 10 | ### Version 11 | 12 | The librocksdb-sys version number is in the format `X.Y.Z+RX.RY.RZ`, where 13 | `X.Y.Z` is the version of this crate and follows SemVer conventions, while 14 | `RX.RY.RZ` is the version of the bundled rocksdb. 15 | -------------------------------------------------------------------------------- /librocksdb-sys/build_version.cc: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 | 3 | #include 4 | 5 | #include "rocksdb/version.h" 6 | #include "rocksdb/utilities/object_registry.h" 7 | #include "util/string_util.h" 8 | 9 | // The build script may replace these values with real values based 10 | // on whether or not GIT is available and the platform settings 11 | static const std::string rocksdb_build_git_sha = "4b2122578e475cb88aef4dcf152cccd5dbf51060"; 12 | static const std::string rocksdb_build_git_tag = "rocksdb_build_git_tag:v10.2.1"; 13 | #define HAS_GIT_CHANGES 0 14 | #if HAS_GIT_CHANGES == 0 15 | // If HAS_GIT_CHANGES is 0, the GIT date is used. 16 | // Use the time the branch/tag was last modified 17 | static const std::string rocksdb_build_date = "rocksdb_build_date:2025-04-24 22:12:38"; 18 | #else 19 | // If HAS_GIT_CHANGES is > 0, the branch/tag has modifications. 20 | // Use the time the build was created. 21 | static const std::string rocksdb_build_date = "rocksdb_build_date:2025-04-24 22:12:38"; 22 | #endif 23 | 24 | std::unordered_map ROCKSDB_NAMESPACE::ObjectRegistry::builtins_ = {}; 25 | 26 | extern "C" bool RocksDbIOUringEnable() 27 | { 28 | return true; 29 | } 30 | 31 | namespace ROCKSDB_NAMESPACE 32 | { 33 | static void AddProperty(std::unordered_map *props, const std::string &name) 34 | { 35 | size_t colon = name.find(":"); 36 | if (colon != std::string::npos && colon > 0 && colon < name.length() - 1) 37 | { 38 | // If we found a "@:", then this property was a build-time substitution that failed. Skip it 39 | size_t at = name.find("@", colon); 40 | if (at != colon + 1) 41 | { 42 | // Everything before the colon is the name, after is the value 43 | (*props)[name.substr(0, colon)] = name.substr(colon + 1); 44 | } 45 | } 46 | } 47 | 48 | static std::unordered_map *LoadPropertiesSet() 49 | { 50 | auto *properties = new std::unordered_map(); 51 | AddProperty(properties, rocksdb_build_git_sha); 52 | AddProperty(properties, rocksdb_build_git_tag); 53 | AddProperty(properties, rocksdb_build_date); 54 | return properties; 55 | } 56 | 57 | const std::unordered_map &GetRocksBuildProperties() 58 | { 59 | static std::unique_ptr> props(LoadPropertiesSet()); 60 | return *props; 61 | } 62 | 63 | std::string GetRocksVersionAsString(bool with_patch) 64 | { 65 | std::string version = std::to_string(ROCKSDB_MAJOR) + "." + std::to_string(ROCKSDB_MINOR); 66 | if (with_patch) 67 | { 68 | return version + "." + std::to_string(ROCKSDB_PATCH); 69 | } 70 | else 71 | { 72 | return version; 73 | } 74 | } 75 | 76 | std::string GetRocksBuildInfoAsString(const std::string &program, bool verbose) 77 | { 78 | std::string info = program + " (RocksDB) " + GetRocksVersionAsString(true); 79 | if (verbose) 80 | { 81 | for (const auto &it : GetRocksBuildProperties()) 82 | { 83 | info.append("\n "); 84 | info.append(it.first); 85 | info.append(": "); 86 | info.append(it.second); 87 | } 88 | } 89 | return info; 90 | } 91 | } // namespace ROCKSDB_NAMESPACE 92 | -------------------------------------------------------------------------------- /librocksdb-sys/gen_statistics.bash: -------------------------------------------------------------------------------- 1 | File=$1 2 | EnumType=$2 3 | echo "// **** DO NOT modify this file! ****" 4 | echo "// This file is generated by cmd:" 5 | echo "// gen_statistics.bash $@" 6 | echo "iterable_named_enum! {" 7 | echo " #[derive(Debug, Copy, Clone, PartialEq, Eq)]" 8 | echo " #[repr(u32)]" 9 | echo " pub enum $EnumType {" 10 | perl -n0e '/const std::vector> '$EnumType'sNameMap.*?\};/sm && print $&' $File | 11 | perl -n0e ' 12 | while (/\{\s*([\w_]+)\s*,.*?"(.*?)"/smg) { 13 | $val = $2; 14 | $name = lc($1); 15 | $name =~ s/(\b|_)(\w)/\U$2/g; 16 | print " $name(\"$val\"),\n" 17 | }' 18 | echo " }" 19 | echo "}" 20 | -------------------------------------------------------------------------------- /librocksdb-sys/snappy-stubs-public.h: -------------------------------------------------------------------------------- 1 | // Copyright 2011 Google Inc. All Rights Reserved. 2 | // Author: sesse@google.com (Steinar H. Gunderson) 3 | // 4 | // Redistribution and use in source and binary forms, with or without 5 | // modification, are permitted provided that the following conditions are 6 | // met: 7 | // 8 | // * Redistributions of source code must retain the above copyright 9 | // notice, this list of conditions and the following disclaimer. 10 | // * Redistributions in binary form must reproduce the above 11 | // copyright notice, this list of conditions and the following disclaimer 12 | // in the documentation and/or other materials provided with the 13 | // distribution. 14 | // * Neither the name of Google Inc. nor the names of its 15 | // contributors may be used to endorse or promote products derived from 16 | // this software without specific prior written permission. 17 | // 18 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | // 30 | // Various type stubs for the open-source version of Snappy. 31 | // 32 | // This file cannot include config.h, as it is included from snappy.h, 33 | // which is a public header. Instead, snappy-stubs-public.h is generated by 34 | // from snappy-stubs-public.h.in at configure time. 35 | 36 | #ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_ 37 | #define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_ 38 | 39 | #define HAVE_STDINT_H 1 40 | 41 | 42 | #include 43 | 44 | #include 45 | 46 | //#include 47 | 48 | #if defined(_MSC_VER) 49 | #define ssize_t intptr_t 50 | #endif 51 | 52 | #define SNAPPY_MAJOR 1 53 | #define SNAPPY_MINOR 1 54 | #define SNAPPY_PATCHLEVEL 3 55 | #define SNAPPY_VERSION \ 56 | ((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL) 57 | 58 | #include 59 | 60 | namespace snappy { 61 | 62 | #if HAVE_STDINT_H 63 | typedef int8_t int8; 64 | typedef uint8_t uint8; 65 | typedef int16_t int16; 66 | typedef uint16_t uint16; 67 | typedef int32_t int32; 68 | typedef uint32_t uint32; 69 | typedef int64_t int64; 70 | typedef uint64_t uint64; 71 | #else 72 | typedef signed char int8; 73 | typedef unsigned char uint8; 74 | typedef short int16; 75 | typedef unsigned short uint16; 76 | typedef int int32; 77 | typedef unsigned int uint32; 78 | typedef long long int64; 79 | typedef unsigned long long uint64; 80 | #endif 81 | 82 | typedef std::string string; 83 | 84 | #define DISALLOW_COPY_AND_ASSIGN(TypeName) \ 85 | TypeName(const TypeName&); \ 86 | void operator=(const TypeName&) 87 | 88 | // Windows does not have an iovec type, yet the concept is universally useful. 89 | // It is simple to define it ourselves, so we put it inside our own namespace. 90 | struct iovec { 91 | void* iov_base; 92 | size_t iov_len; 93 | }; 94 | 95 | } // namespace snappy 96 | 97 | #endif // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_ 98 | -------------------------------------------------------------------------------- /librocksdb-sys/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely, Alex Regueiro 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #![allow(clippy::all)] 16 | #![allow(non_camel_case_types)] 17 | #![allow(non_snake_case)] 18 | #![allow(non_upper_case_globals)] 19 | 20 | // Ensure the libraries are linked in, despite it not being used directly 21 | #[cfg(feature = "bzip2")] 22 | extern crate bzip2_sys; 23 | #[cfg(feature = "zlib")] 24 | extern crate libz_sys; 25 | #[cfg(feature = "lz4")] 26 | extern crate lz4_sys; 27 | #[cfg(feature = "zstd")] 28 | extern crate zstd_sys; 29 | 30 | include!(concat!(env!("OUT_DIR"), "/bindings.rs")); 31 | -------------------------------------------------------------------------------- /librocksdb-sys/src/test.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | // 15 | 16 | use libc::*; 17 | use std::ffi::{CStr, CString}; 18 | use std::ptr; 19 | use std::str; 20 | 21 | use super::*; 22 | 23 | pub fn error_message(ptr: *const i8) -> String { 24 | let c_str = unsafe { CStr::from_ptr(ptr as *const _) }; 25 | let s = str::from_utf8(c_str.to_bytes()).unwrap().to_owned(); 26 | unsafe { 27 | free(ptr as *mut c_void); 28 | } 29 | s 30 | } 31 | 32 | #[test] 33 | fn internal() { 34 | unsafe { 35 | let opts = rocksdb_options_create(); 36 | assert!(!opts.is_null()); 37 | 38 | rocksdb_options_increase_parallelism(opts, 0); 39 | rocksdb_options_optimize_level_style_compaction(opts, 0); 40 | rocksdb_options_set_create_if_missing(opts, u8::from(true)); 41 | 42 | let rustpath = "_rust_rocksdb_internaltest"; 43 | let cpath = CString::new(rustpath).unwrap(); 44 | 45 | let mut err: *mut c_char = ptr::null_mut(); 46 | let err_ptr: *mut *mut c_char = &mut err; 47 | let db = rocksdb_open(opts, cpath.as_ptr() as *const _, err_ptr); 48 | if !err.is_null() { 49 | println!("failed to open rocksdb: {}", error_message(err)); 50 | } 51 | assert!(err.is_null()); 52 | 53 | let writeopts = rocksdb_writeoptions_create(); 54 | assert!(!writeopts.is_null()); 55 | 56 | let key = b"name\x00"; 57 | let val = b"spacejam\x00"; 58 | rocksdb_put( 59 | db, 60 | writeopts.clone(), 61 | key.as_ptr() as *const c_char, 62 | 4, 63 | val.as_ptr() as *const c_char, 64 | 8, 65 | err_ptr, 66 | ); 67 | rocksdb_writeoptions_destroy(writeopts); 68 | assert!(err.is_null()); 69 | 70 | let readopts = rocksdb_readoptions_create(); 71 | assert!(!readopts.is_null()); 72 | 73 | let mut val_len: size_t = 0; 74 | let val_len_ptr = &mut val_len as *mut size_t; 75 | rocksdb_get( 76 | db, 77 | readopts.clone(), 78 | key.as_ptr() as *const c_char, 79 | 4, 80 | val_len_ptr, 81 | err_ptr, 82 | ); 83 | rocksdb_readoptions_destroy(readopts); 84 | assert!(err.is_null()); 85 | rocksdb_close(db); 86 | rocksdb_destroy_db(opts, cpath.as_ptr() as *const _, err_ptr); 87 | assert!(err.is_null()); 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.81.0" 3 | -------------------------------------------------------------------------------- /src/checkpoint.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Eugene P. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | // 15 | 16 | //! Implementation of bindings to RocksDB Checkpoint[1] API 17 | //! 18 | //! [1]: https://github.com/facebook/rocksdb/wiki/Checkpoints 19 | 20 | use crate::{db::DBInner, ffi, ffi_util::to_cpath, DBCommon, Error, ThreadMode}; 21 | use std::{marker::PhantomData, path::Path}; 22 | 23 | /// Undocumented parameter for `ffi::rocksdb_checkpoint_create` function. Zero by default. 24 | const LOG_SIZE_FOR_FLUSH: u64 = 0_u64; 25 | 26 | /// Database's checkpoint object. 27 | /// Used to create checkpoints of the specified DB from time to time. 28 | pub struct Checkpoint<'db> { 29 | inner: *mut ffi::rocksdb_checkpoint_t, 30 | _db: PhantomData<&'db ()>, 31 | } 32 | 33 | impl<'db> Checkpoint<'db> { 34 | /// Creates new checkpoint object for specific DB. 35 | /// 36 | /// Does not actually produce checkpoints, call `.create_checkpoint()` method to produce 37 | /// a DB checkpoint. 38 | pub fn new(db: &'db DBCommon) -> Result { 39 | let checkpoint: *mut ffi::rocksdb_checkpoint_t; 40 | 41 | unsafe { 42 | checkpoint = ffi_try!(ffi::rocksdb_checkpoint_object_create(db.inner.inner())); 43 | } 44 | 45 | if checkpoint.is_null() { 46 | return Err(Error::new("Could not create checkpoint object.".to_owned())); 47 | } 48 | 49 | Ok(Self { 50 | inner: checkpoint, 51 | _db: PhantomData, 52 | }) 53 | } 54 | 55 | /// Creates new physical DB checkpoint in directory specified by `path`. 56 | pub fn create_checkpoint>(&self, path: P) -> Result<(), Error> { 57 | let cpath = to_cpath(path)?; 58 | unsafe { 59 | ffi_try!(ffi::rocksdb_checkpoint_create( 60 | self.inner, 61 | cpath.as_ptr(), 62 | LOG_SIZE_FOR_FLUSH, 63 | )); 64 | } 65 | Ok(()) 66 | } 67 | } 68 | 69 | impl Drop for Checkpoint<'_> { 70 | fn drop(&mut self) { 71 | unsafe { 72 | ffi::rocksdb_checkpoint_object_destroy(self.inner); 73 | } 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /src/column_family.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use crate::{db::MultiThreaded, ffi, Options}; 16 | 17 | use std::sync::Arc; 18 | use std::time::Duration; 19 | 20 | /// The name of the default column family. 21 | /// 22 | /// The column family with this name is created implicitly whenever column 23 | /// families are used. 24 | pub const DEFAULT_COLUMN_FAMILY_NAME: &str = "default"; 25 | 26 | /// A descriptor for a RocksDB column family. 27 | /// 28 | /// A description of the column family, containing the name and `Options`. 29 | pub struct ColumnFamilyDescriptor { 30 | pub(crate) name: String, 31 | pub(crate) options: Options, 32 | pub(crate) ttl: ColumnFamilyTtl, 33 | } 34 | 35 | impl ColumnFamilyDescriptor { 36 | /// Create a new column family descriptor with the specified name and options. 37 | /// *WARNING*: 38 | /// Will use [`ColumnFamilyTtl::SameAsDb`] as ttl. 39 | pub fn new(name: S, options: Options) -> Self 40 | where 41 | S: Into, 42 | { 43 | Self { 44 | name: name.into(), 45 | options, 46 | ttl: ColumnFamilyTtl::SameAsDb, 47 | } 48 | } 49 | 50 | /// Create a new column family descriptor with the specified name, options, and ttl. 51 | /// *WARNING*: 52 | /// The ttl is applied only when DB is opened with [`crate::db::DB::open_with_ttl()`]. 53 | pub fn new_with_ttl(name: S, options: Options, ttl: ColumnFamilyTtl) -> Self 54 | where 55 | S: Into, 56 | { 57 | Self { 58 | name: name.into(), 59 | options, 60 | ttl, 61 | } 62 | } 63 | 64 | /// Sets ttl for the column family. It's applied only when DB is opened with 65 | /// [`crate::db::DB::open_with_ttl()`]. Changing ttl after DB is opened has no effect. 66 | pub fn set_ttl(&mut self, ttl: ColumnFamilyTtl) { 67 | self.ttl = ttl; 68 | } 69 | 70 | /// Get the name of the ColumnFamilyDescriptor. 71 | pub fn name(&self) -> &str { 72 | &self.name 73 | } 74 | 75 | pub fn ttl(&self) -> ColumnFamilyTtl { 76 | self.ttl 77 | } 78 | } 79 | 80 | #[derive(Debug, Clone, Copy, Default)] 81 | /// Specifies the TTL behavior for a column family. 82 | /// 83 | pub enum ColumnFamilyTtl { 84 | /// Will internally set TTL to -1 (disabled) 85 | #[default] 86 | Disabled, 87 | /// Will set ttl to the specified duration 88 | Duration(Duration), 89 | /// Will use ttl specified at db open time 90 | SameAsDb, 91 | } 92 | 93 | /// An opaque type used to represent a column family. Returned from some functions, and used 94 | /// in others 95 | pub struct ColumnFamily { 96 | pub(crate) inner: *mut ffi::rocksdb_column_family_handle_t, 97 | } 98 | 99 | /// A specialized opaque type used to represent a column family by the [`MultiThreaded`] 100 | /// mode. Clone (and Copy) is derived to behave like `&ColumnFamily` (this is used for 101 | /// single-threaded mode). `Clone`/`Copy` is safe because this lifetime is bound to DB like 102 | /// iterators/snapshots. On top of it, this is as cheap and small as `&ColumnFamily` because 103 | /// this only has a single pointer-wide field. 104 | pub struct BoundColumnFamily<'a> { 105 | pub(crate) inner: *mut ffi::rocksdb_column_family_handle_t, 106 | pub(crate) multi_threaded_cfs: std::marker::PhantomData<&'a MultiThreaded>, 107 | } 108 | 109 | // internal struct which isn't exposed to public api. 110 | // but its memory will be exposed after transmute()-ing to BoundColumnFamily. 111 | // ColumnFamily's lifetime should be bound to DB. But, db holds cfs and cfs can't easily 112 | // self-reference DB as its lifetime due to rust's type system 113 | pub(crate) struct UnboundColumnFamily { 114 | pub(crate) inner: *mut ffi::rocksdb_column_family_handle_t, 115 | } 116 | 117 | impl UnboundColumnFamily { 118 | pub(crate) fn bound_column_family<'a>(self: Arc) -> Arc> { 119 | // SAFETY: the new BoundColumnFamily here just adding lifetime, 120 | // so that column family handle won't outlive db. 121 | unsafe { Arc::from_raw(Arc::into_raw(self).cast()) } 122 | } 123 | } 124 | 125 | fn destroy_handle(handle: *mut ffi::rocksdb_column_family_handle_t) { 126 | // SAFETY: This should be called only from various Drop::drop(), strictly keeping a 1-to-1 127 | // ownership to avoid double invocation to the rocksdb function with same handle. 128 | unsafe { 129 | ffi::rocksdb_column_family_handle_destroy(handle); 130 | } 131 | } 132 | 133 | impl Drop for ColumnFamily { 134 | fn drop(&mut self) { 135 | destroy_handle(self.inner); 136 | } 137 | } 138 | 139 | // these behaviors must be identical between BoundColumnFamily and UnboundColumnFamily 140 | // due to the unsafe transmute() in bound_column_family()! 141 | impl Drop for BoundColumnFamily<'_> { 142 | fn drop(&mut self) { 143 | destroy_handle(self.inner); 144 | } 145 | } 146 | 147 | impl Drop for UnboundColumnFamily { 148 | fn drop(&mut self) { 149 | destroy_handle(self.inner); 150 | } 151 | } 152 | 153 | /// Handy type alias to hide actual type difference to reference [`ColumnFamily`] 154 | /// depending on the `multi-threaded-cf` crate feature. 155 | #[cfg(not(feature = "multi-threaded-cf"))] 156 | pub type ColumnFamilyRef<'a> = &'a ColumnFamily; 157 | 158 | #[cfg(feature = "multi-threaded-cf")] 159 | pub type ColumnFamilyRef<'a> = Arc>; 160 | 161 | /// Utility trait to accept both supported references to `ColumnFamily` 162 | /// (`&ColumnFamily` and `BoundColumnFamily`) 163 | pub trait AsColumnFamilyRef { 164 | fn inner(&self) -> *mut ffi::rocksdb_column_family_handle_t; 165 | } 166 | 167 | impl AsColumnFamilyRef for ColumnFamily { 168 | fn inner(&self) -> *mut ffi::rocksdb_column_family_handle_t { 169 | self.inner 170 | } 171 | } 172 | 173 | impl AsColumnFamilyRef for &'_ ColumnFamily { 174 | fn inner(&self) -> *mut ffi::rocksdb_column_family_handle_t { 175 | self.inner 176 | } 177 | } 178 | 179 | // Only implement for Arc-ed BoundColumnFamily as this tightly coupled and 180 | // implementation detail, considering use of std::mem::transmute. BoundColumnFamily 181 | // isn't expected to be used as naked. 182 | // Also, ColumnFamilyRef might not be Arc> depending crate 183 | // feature flags so, we can't use the type alias here. 184 | impl AsColumnFamilyRef for Arc> { 185 | fn inner(&self) -> *mut ffi::rocksdb_column_family_handle_t { 186 | self.inner 187 | } 188 | } 189 | 190 | unsafe impl Send for ColumnFamily {} 191 | unsafe impl Sync for ColumnFamily {} 192 | unsafe impl Send for UnboundColumnFamily {} 193 | unsafe impl Sync for UnboundColumnFamily {} 194 | unsafe impl Send for BoundColumnFamily<'_> {} 195 | unsafe impl Sync for BoundColumnFamily<'_> {} 196 | -------------------------------------------------------------------------------- /src/compaction_filter.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | // 15 | 16 | use libc::{c_char, c_int, c_uchar, c_void, size_t}; 17 | use std::ffi::{CStr, CString}; 18 | use std::slice; 19 | 20 | /// Decision about how to handle compacting an object 21 | /// 22 | /// This is returned by a compaction filter callback. Depending 23 | /// on the value, the object may be kept, removed, or changed 24 | /// in the database during a compaction. 25 | pub enum Decision { 26 | /// Keep the old value 27 | Keep, 28 | /// Remove the object from the database 29 | Remove, 30 | /// Change the value for the key 31 | Change(&'static [u8]), 32 | } 33 | 34 | /// CompactionFilter allows an application to modify/delete a key-value at 35 | /// the time of compaction. 36 | pub trait CompactionFilter { 37 | /// The compaction process invokes this 38 | /// method for kv that is being compacted. The application can inspect 39 | /// the existing value of the key and make decision based on it. 40 | /// 41 | /// Key-Values that are results of merge operation during compaction are not 42 | /// passed into this function. Currently, when you have a mix of Put()s and 43 | /// Merge()s on a same key, we only guarantee to process the merge operands 44 | /// through the compaction filters. Put()s might be processed, or might not. 45 | /// 46 | /// When the value is to be preserved, the application has the option 47 | /// to modify the existing_value and pass it back through new_value. 48 | /// value_changed needs to be set to true in this case. 49 | /// 50 | /// Note that RocksDB snapshots (i.e. call GetSnapshot() API on a 51 | /// DB* object) will not guarantee to preserve the state of the DB with 52 | /// CompactionFilter. Data seen from a snapshot might disappear after a 53 | /// compaction finishes. If you use snapshots, think twice about whether you 54 | /// want to use compaction filter and whether you are using it in a safe way. 55 | /// 56 | /// If the CompactionFilter was created by a factory, then it will only ever 57 | /// be used by a single thread that is doing the compaction run, and this 58 | /// call does not need to be thread-safe. However, multiple filters may be 59 | /// in existence and operating concurrently. 60 | fn filter(&mut self, level: u32, key: &[u8], value: &[u8]) -> Decision; 61 | 62 | /// Returns a name that identifies this compaction filter. 63 | /// The name will be printed to LOG file on start up for diagnosis. 64 | fn name(&self) -> &CStr; 65 | } 66 | 67 | /// Function to filter compaction with. 68 | /// 69 | /// This function takes the level of compaction, the key, and the existing value 70 | /// and returns the decision about how to handle the Key-Value pair. 71 | /// 72 | /// See [Options::set_compaction_filter][set_compaction_filter] for more details 73 | /// 74 | /// [set_compaction_filter]: ../struct.Options.html#method.set_compaction_filter 75 | pub trait CompactionFilterFn: FnMut(u32, &[u8], &[u8]) -> Decision {} 76 | impl CompactionFilterFn for F where F: FnMut(u32, &[u8], &[u8]) -> Decision + Send + 'static {} 77 | 78 | pub struct CompactionFilterCallback 79 | where 80 | F: CompactionFilterFn, 81 | { 82 | pub name: CString, 83 | pub filter_fn: F, 84 | } 85 | 86 | impl CompactionFilter for CompactionFilterCallback 87 | where 88 | F: CompactionFilterFn, 89 | { 90 | fn name(&self) -> &CStr { 91 | self.name.as_c_str() 92 | } 93 | 94 | fn filter(&mut self, level: u32, key: &[u8], value: &[u8]) -> Decision { 95 | (self.filter_fn)(level, key, value) 96 | } 97 | } 98 | 99 | pub unsafe extern "C" fn destructor_callback(raw_cb: *mut c_void) 100 | where 101 | F: CompactionFilter, 102 | { 103 | drop(Box::from_raw(raw_cb as *mut F)); 104 | } 105 | 106 | pub unsafe extern "C" fn name_callback(raw_cb: *mut c_void) -> *const c_char 107 | where 108 | F: CompactionFilter, 109 | { 110 | let cb = &*(raw_cb as *mut F); 111 | cb.name().as_ptr() 112 | } 113 | 114 | pub unsafe extern "C" fn filter_callback( 115 | raw_cb: *mut c_void, 116 | level: c_int, 117 | raw_key: *const c_char, 118 | key_length: size_t, 119 | existing_value: *const c_char, 120 | value_length: size_t, 121 | new_value: *mut *mut c_char, 122 | new_value_length: *mut size_t, 123 | value_changed: *mut c_uchar, 124 | ) -> c_uchar 125 | where 126 | F: CompactionFilter, 127 | { 128 | use self::Decision::{Change, Keep, Remove}; 129 | 130 | let cb = &mut *(raw_cb as *mut F); 131 | let key = slice::from_raw_parts(raw_key as *const u8, key_length); 132 | let oldval = slice::from_raw_parts(existing_value as *const u8, value_length); 133 | let result = cb.filter(level as u32, key, oldval); 134 | match result { 135 | Keep => 0, 136 | Remove => 1, 137 | Change(newval) => { 138 | *new_value = newval.as_ptr() as *mut c_char; 139 | *new_value_length = newval.len() as size_t; 140 | *value_changed = 1_u8; 141 | 0 142 | } 143 | } 144 | } 145 | 146 | #[cfg(test)] 147 | #[allow(unused_variables)] 148 | fn test_filter(level: u32, key: &[u8], value: &[u8]) -> Decision { 149 | use self::Decision::{Change, Keep, Remove}; 150 | match key.first() { 151 | Some(&b'_') => Remove, 152 | Some(&b'%') => Change(b"secret"), 153 | _ => Keep, 154 | } 155 | } 156 | 157 | #[test] 158 | fn compaction_filter_test() { 159 | use crate::{Options, DB}; 160 | 161 | let tempdir = tempfile::Builder::new() 162 | .prefix("_rust_rocksdb_filter_test") 163 | .tempdir() 164 | .expect("Failed to create temporary path for the _rust_rocksdb_filter_test"); 165 | let path = tempdir.path(); 166 | let mut opts = Options::default(); 167 | opts.create_if_missing(true); 168 | opts.set_compaction_filter("test", test_filter); 169 | { 170 | let db = DB::open(&opts, path).unwrap(); 171 | let _r = db.put(b"k1", b"a"); 172 | let _r = db.put(b"_k", b"b"); 173 | let _r = db.put(b"%k", b"c"); 174 | db.compact_range(None::<&[u8]>, None::<&[u8]>); 175 | assert_eq!(&*db.get(b"k1").unwrap().unwrap(), b"a"); 176 | assert!(db.get(b"_k").unwrap().is_none()); 177 | assert_eq!(&*db.get(b"%k").unwrap().unwrap(), b"secret"); 178 | } 179 | let result = DB::destroy(&opts, path); 180 | assert!(result.is_ok()); 181 | } 182 | -------------------------------------------------------------------------------- /src/compaction_filter_factory.rs: -------------------------------------------------------------------------------- 1 | use std::ffi::CStr; 2 | 3 | use libc::{self, c_char, c_void}; 4 | 5 | use crate::{ 6 | compaction_filter::{self, CompactionFilter}, 7 | ffi, 8 | }; 9 | 10 | /// Each compaction will create a new CompactionFilter allowing the 11 | /// application to know about different compactions. 12 | /// 13 | /// See [compaction_filter::CompactionFilter][CompactionFilter] and 14 | /// [Options::set_compaction_filter_factory][set_compaction_filter_factory] 15 | /// for more details 16 | /// 17 | /// [CompactionFilter]: ../compaction_filter/trait.CompactionFilter.html 18 | /// [set_compaction_filter_factory]: ../struct.Options.html#method.set_compaction_filter_factory 19 | pub trait CompactionFilterFactory { 20 | type Filter: CompactionFilter; 21 | 22 | /// Returns a CompactionFilter for the compaction process 23 | fn create(&mut self, context: CompactionFilterContext) -> Self::Filter; 24 | 25 | /// Returns a name that identifies this compaction filter factory. 26 | fn name(&self) -> &CStr; 27 | } 28 | 29 | pub unsafe extern "C" fn destructor_callback(raw_self: *mut c_void) 30 | where 31 | F: CompactionFilterFactory, 32 | { 33 | drop(Box::from_raw(raw_self as *mut F)); 34 | } 35 | 36 | pub unsafe extern "C" fn name_callback(raw_self: *mut c_void) -> *const c_char 37 | where 38 | F: CompactionFilterFactory, 39 | { 40 | let self_ = &*(raw_self.cast_const() as *const F); 41 | self_.name().as_ptr() 42 | } 43 | 44 | /// Context information of a compaction run 45 | pub struct CompactionFilterContext { 46 | /// Does this compaction run include all data files 47 | pub is_full_compaction: bool, 48 | /// Is this compaction requested by the client (true), 49 | /// or is it occurring as an automatic compaction process 50 | pub is_manual_compaction: bool, 51 | } 52 | 53 | impl CompactionFilterContext { 54 | unsafe fn from_raw(ptr: *mut ffi::rocksdb_compactionfiltercontext_t) -> Self { 55 | let is_full_compaction = ffi::rocksdb_compactionfiltercontext_is_full_compaction(ptr) != 0; 56 | let is_manual_compaction = 57 | ffi::rocksdb_compactionfiltercontext_is_manual_compaction(ptr) != 0; 58 | 59 | Self { 60 | is_full_compaction, 61 | is_manual_compaction, 62 | } 63 | } 64 | } 65 | 66 | pub unsafe extern "C" fn create_compaction_filter_callback( 67 | raw_self: *mut c_void, 68 | context: *mut ffi::rocksdb_compactionfiltercontext_t, 69 | ) -> *mut ffi::rocksdb_compactionfilter_t 70 | where 71 | F: CompactionFilterFactory, 72 | { 73 | let self_ = &mut *(raw_self as *mut F); 74 | let context = CompactionFilterContext::from_raw(context); 75 | let filter = Box::new(self_.create(context)); 76 | 77 | let filter_ptr = Box::into_raw(filter); 78 | 79 | ffi::rocksdb_compactionfilter_create( 80 | filter_ptr as *mut c_void, 81 | Some(compaction_filter::destructor_callback::), 82 | Some(compaction_filter::filter_callback::), 83 | Some(compaction_filter::name_callback::), 84 | ) 85 | } 86 | 87 | #[cfg(test)] 88 | mod tests { 89 | use super::*; 90 | use crate::compaction_filter::Decision; 91 | use crate::{Options, DB}; 92 | use std::ffi::CString; 93 | 94 | struct CountFilter(u16, CString); 95 | impl CompactionFilter for CountFilter { 96 | fn filter(&mut self, _level: u32, _key: &[u8], _value: &[u8]) -> crate::CompactionDecision { 97 | self.0 += 1; 98 | if self.0 > 2 { 99 | Decision::Remove 100 | } else { 101 | Decision::Keep 102 | } 103 | } 104 | 105 | fn name(&self) -> &CStr { 106 | &self.1 107 | } 108 | } 109 | 110 | struct TestFactory(CString); 111 | impl CompactionFilterFactory for TestFactory { 112 | type Filter = CountFilter; 113 | 114 | fn create(&mut self, _context: CompactionFilterContext) -> Self::Filter { 115 | CountFilter(0, CString::new("CountFilter").unwrap()) 116 | } 117 | 118 | fn name(&self) -> &CStr { 119 | &self.0 120 | } 121 | } 122 | 123 | #[test] 124 | fn compaction_filter_factory_test() { 125 | let tempdir = tempfile::Builder::new() 126 | .prefix("_rust_rocksdb_filter_factory_test") 127 | .tempdir() 128 | .expect("Failed to create temporary path for the _rust_rocksdb_filter_factory_test."); 129 | let path = tempdir.path(); 130 | let mut opts = Options::default(); 131 | opts.create_if_missing(true); 132 | opts.set_compaction_filter_factory(TestFactory(CString::new("TestFactory").unwrap())); 133 | { 134 | let db = DB::open(&opts, path).unwrap(); 135 | let _r = db.put(b"k1", b"a"); 136 | let _r = db.put(b"_rk", b"b"); 137 | let _r = db.put(b"%k", b"c"); 138 | db.compact_range(None::<&[u8]>, None::<&[u8]>); 139 | assert_eq!(db.get(b"%k1").unwrap(), None); 140 | } 141 | let result = DB::destroy(&opts, path); 142 | assert!(result.is_ok()); 143 | } 144 | } 145 | -------------------------------------------------------------------------------- /src/comparator.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | // 15 | 16 | use libc::{c_char, c_int, c_uchar, c_void, size_t}; 17 | use std::cmp::Ordering; 18 | use std::ffi::CString; 19 | use std::slice; 20 | 21 | pub type CompareFn = dyn Fn(&[u8], &[u8]) -> Ordering; 22 | 23 | pub type CompareTsFn = dyn Fn(&[u8], &[u8]) -> Ordering; 24 | 25 | pub type CompareWithoutTsFn = dyn Fn(&[u8], bool, &[u8], bool) -> Ordering; 26 | 27 | pub struct ComparatorCallback { 28 | pub name: CString, 29 | pub compare_fn: Box, 30 | } 31 | 32 | impl ComparatorCallback { 33 | pub unsafe extern "C" fn destructor_callback(raw_cb: *mut c_void) { 34 | drop(Box::from_raw(raw_cb as *mut Self)); 35 | } 36 | 37 | pub unsafe extern "C" fn name_callback(raw_cb: *mut c_void) -> *const c_char { 38 | let cb: &mut Self = &mut *(raw_cb as *mut Self); 39 | let ptr = cb.name.as_ptr(); 40 | ptr as *const c_char 41 | } 42 | 43 | pub unsafe extern "C" fn compare_callback( 44 | raw_cb: *mut c_void, 45 | a_raw: *const c_char, 46 | a_len: size_t, 47 | b_raw: *const c_char, 48 | b_len: size_t, 49 | ) -> c_int { 50 | let cb: &mut Self = &mut *(raw_cb as *mut Self); 51 | let a: &[u8] = slice::from_raw_parts(a_raw as *const u8, a_len); 52 | let b: &[u8] = slice::from_raw_parts(b_raw as *const u8, b_len); 53 | (cb.compare_fn)(a, b) as c_int 54 | } 55 | } 56 | 57 | pub struct ComparatorWithTsCallback { 58 | pub name: CString, 59 | pub compare_fn: Box, 60 | pub compare_ts_fn: Box, 61 | pub compare_without_ts_fn: Box, 62 | } 63 | 64 | impl ComparatorWithTsCallback { 65 | pub unsafe extern "C" fn destructor_callback(raw_cb: *mut c_void) { 66 | drop(Box::from_raw(raw_cb as *mut Self)); 67 | } 68 | 69 | pub unsafe extern "C" fn name_callback(raw_cb: *mut c_void) -> *const c_char { 70 | let cb: &mut Self = &mut *(raw_cb as *mut Self); 71 | let ptr = cb.name.as_ptr(); 72 | ptr as *const c_char 73 | } 74 | 75 | pub unsafe extern "C" fn compare_callback( 76 | raw_cb: *mut c_void, 77 | a_raw: *const c_char, 78 | a_len: size_t, 79 | b_raw: *const c_char, 80 | b_len: size_t, 81 | ) -> c_int { 82 | let cb: &mut Self = &mut *(raw_cb as *mut Self); 83 | let a: &[u8] = slice::from_raw_parts(a_raw as *const u8, a_len); 84 | let b: &[u8] = slice::from_raw_parts(b_raw as *const u8, b_len); 85 | (cb.compare_fn)(a, b) as c_int 86 | } 87 | 88 | pub unsafe extern "C" fn compare_ts_callback( 89 | raw_cb: *mut c_void, 90 | a_ts_raw: *const c_char, 91 | a_ts_len: size_t, 92 | b_ts_raw: *const c_char, 93 | b_ts_len: size_t, 94 | ) -> c_int { 95 | let cb: &mut Self = &mut *(raw_cb as *mut Self); 96 | let a_ts: &[u8] = slice::from_raw_parts(a_ts_raw as *const u8, a_ts_len); 97 | let b_ts: &[u8] = slice::from_raw_parts(b_ts_raw as *const u8, b_ts_len); 98 | (cb.compare_ts_fn)(a_ts, b_ts) as c_int 99 | } 100 | 101 | pub unsafe extern "C" fn compare_without_ts_callback( 102 | raw_cb: *mut c_void, 103 | a_raw: *const c_char, 104 | a_len: size_t, 105 | a_has_ts_raw: c_uchar, 106 | b_raw: *const c_char, 107 | b_len: size_t, 108 | b_has_ts_raw: c_uchar, 109 | ) -> c_int { 110 | let cb: &mut Self = &mut *(raw_cb as *mut Self); 111 | let a: &[u8] = slice::from_raw_parts(a_raw as *const u8, a_len); 112 | let a_has_ts = a_has_ts_raw != 0; 113 | let b: &[u8] = slice::from_raw_parts(b_raw as *const u8, b_len); 114 | let b_has_ts = b_has_ts_raw != 0; 115 | (cb.compare_without_ts_fn)(a, a_has_ts, b, b_has_ts) as c_int 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /src/db_pinnable_slice.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use crate::{ffi, DB}; 16 | use core::ops::Deref; 17 | use libc::size_t; 18 | use std::marker::PhantomData; 19 | use std::slice; 20 | 21 | /// Wrapper around RocksDB PinnableSlice struct. 22 | /// 23 | /// With a pinnable slice, we can directly leverage in-memory data within 24 | /// RocksDB to avoid unnecessary memory copies. The struct here wraps the 25 | /// returned raw pointer and ensures proper finalization work. 26 | pub struct DBPinnableSlice<'a> { 27 | ptr: *mut ffi::rocksdb_pinnableslice_t, 28 | db: PhantomData<&'a DB>, 29 | } 30 | 31 | unsafe impl Send for DBPinnableSlice<'_> {} 32 | unsafe impl Sync for DBPinnableSlice<'_> {} 33 | 34 | impl AsRef<[u8]> for DBPinnableSlice<'_> { 35 | fn as_ref(&self) -> &[u8] { 36 | // Implement this via Deref so as not to repeat ourselves 37 | self 38 | } 39 | } 40 | 41 | impl Deref for DBPinnableSlice<'_> { 42 | type Target = [u8]; 43 | 44 | fn deref(&self) -> &[u8] { 45 | unsafe { 46 | let mut val_len: size_t = 0; 47 | let val = ffi::rocksdb_pinnableslice_value(self.ptr, &mut val_len) as *mut u8; 48 | slice::from_raw_parts(val, val_len) 49 | } 50 | } 51 | } 52 | 53 | impl Drop for DBPinnableSlice<'_> { 54 | fn drop(&mut self) { 55 | unsafe { 56 | ffi::rocksdb_pinnableslice_destroy(self.ptr); 57 | } 58 | } 59 | } 60 | 61 | impl DBPinnableSlice<'_> { 62 | /// Used to wrap a PinnableSlice from rocksdb to avoid unnecessary memcpy 63 | /// 64 | /// # Unsafe 65 | /// Requires that the pointer must be generated by rocksdb_get_pinned 66 | pub(crate) unsafe fn from_c(ptr: *mut ffi::rocksdb_pinnableslice_t) -> Self { 67 | Self { 68 | ptr, 69 | db: PhantomData, 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /src/env.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use libc::{self, c_int}; 4 | 5 | use crate::{ffi, Error}; 6 | 7 | /// An Env is an interface used by the rocksdb implementation to access 8 | /// operating system functionality like the filesystem etc. Callers 9 | /// may wish to provide a custom Env object when opening a database to 10 | /// get fine gain control; e.g., to rate limit file system operations. 11 | /// 12 | /// All Env implementations are safe for concurrent access from 13 | /// multiple threads without any external synchronization. 14 | /// 15 | /// Note: currently, C API behinds C++ API for various settings. 16 | /// See also: `rocksdb/include/env.h` 17 | #[derive(Clone)] 18 | pub struct Env(pub(crate) Arc); 19 | 20 | pub(crate) struct EnvWrapper { 21 | pub(crate) inner: *mut ffi::rocksdb_env_t, 22 | } 23 | 24 | impl Drop for EnvWrapper { 25 | fn drop(&mut self) { 26 | unsafe { 27 | ffi::rocksdb_env_destroy(self.inner); 28 | } 29 | } 30 | } 31 | 32 | impl Env { 33 | /// Returns default env 34 | pub fn new() -> Result { 35 | let env = unsafe { ffi::rocksdb_create_default_env() }; 36 | if env.is_null() { 37 | Err(Error::new("Could not create mem env".to_owned())) 38 | } else { 39 | Ok(Self(Arc::new(EnvWrapper { inner: env }))) 40 | } 41 | } 42 | 43 | /// Returns a new environment that stores its data in memory and delegates 44 | /// all non-file-storage tasks to base_env. 45 | pub fn mem_env() -> Result { 46 | let env = unsafe { ffi::rocksdb_create_mem_env() }; 47 | if env.is_null() { 48 | Err(Error::new("Could not create mem env".to_owned())) 49 | } else { 50 | Ok(Self(Arc::new(EnvWrapper { inner: env }))) 51 | } 52 | } 53 | 54 | /// Returns a new environment which wraps and takes ownership of the provided 55 | /// raw environment. 56 | /// 57 | /// # Safety 58 | /// 59 | /// Ownership of `env` is transferred to the returned Env, which becomes 60 | /// responsible for freeing it. The caller should forget the raw pointer 61 | /// after this call. 62 | /// 63 | /// # When would I use this? 64 | /// 65 | /// RocksDB's C++ [Env](https://github.com/facebook/rocksdb/blob/main/include/rocksdb/env.h) 66 | /// class provides many extension points for low-level database subsystems, such as file IO. 67 | /// These subsystems aren't covered within the scope of the C interface or this crate, 68 | /// but from_raw() may be used to hand a pre-instrumented Env to this crate for further use. 69 | /// 70 | pub unsafe fn from_raw(env: *mut ffi::rocksdb_env_t) -> Self { 71 | Self(Arc::new(EnvWrapper { inner: env })) 72 | } 73 | 74 | /// Sets the number of background worker threads of a specific thread pool for this environment. 75 | /// `LOW` is the default pool. 76 | /// 77 | /// Default: 1 78 | pub fn set_background_threads(&mut self, num_threads: c_int) { 79 | unsafe { 80 | ffi::rocksdb_env_set_background_threads(self.0.inner, num_threads); 81 | } 82 | } 83 | 84 | /// Sets the size of the high priority thread pool that can be used to 85 | /// prevent compactions from stalling memtable flushes. 86 | pub fn set_high_priority_background_threads(&mut self, n: c_int) { 87 | unsafe { 88 | ffi::rocksdb_env_set_high_priority_background_threads(self.0.inner, n); 89 | } 90 | } 91 | 92 | /// Sets the size of the low priority thread pool that can be used to 93 | /// prevent compactions from stalling memtable flushes. 94 | pub fn set_low_priority_background_threads(&mut self, n: c_int) { 95 | unsafe { 96 | ffi::rocksdb_env_set_low_priority_background_threads(self.0.inner, n); 97 | } 98 | } 99 | 100 | /// Sets the size of the bottom priority thread pool that can be used to 101 | /// prevent compactions from stalling memtable flushes. 102 | pub fn set_bottom_priority_background_threads(&mut self, n: c_int) { 103 | unsafe { 104 | ffi::rocksdb_env_set_bottom_priority_background_threads(self.0.inner, n); 105 | } 106 | } 107 | 108 | /// Wait for all threads started by StartThread to terminate. 109 | pub fn join_all_threads(&mut self) { 110 | unsafe { 111 | ffi::rocksdb_env_join_all_threads(self.0.inner); 112 | } 113 | } 114 | 115 | /// Lowering IO priority for threads from the specified pool. 116 | pub fn lower_thread_pool_io_priority(&mut self) { 117 | unsafe { 118 | ffi::rocksdb_env_lower_thread_pool_io_priority(self.0.inner); 119 | } 120 | } 121 | 122 | /// Lowering IO priority for high priority thread pool. 123 | pub fn lower_high_priority_thread_pool_io_priority(&mut self) { 124 | unsafe { 125 | ffi::rocksdb_env_lower_high_priority_thread_pool_io_priority(self.0.inner); 126 | } 127 | } 128 | 129 | /// Lowering CPU priority for threads from the specified pool. 130 | pub fn lower_thread_pool_cpu_priority(&mut self) { 131 | unsafe { 132 | ffi::rocksdb_env_lower_thread_pool_cpu_priority(self.0.inner); 133 | } 134 | } 135 | 136 | /// Lowering CPU priority for high priority thread pool. 137 | pub fn lower_high_priority_thread_pool_cpu_priority(&mut self) { 138 | unsafe { 139 | ffi::rocksdb_env_lower_high_priority_thread_pool_cpu_priority(self.0.inner); 140 | } 141 | } 142 | } 143 | 144 | unsafe impl Send for EnvWrapper {} 145 | unsafe impl Sync for EnvWrapper {} 146 | -------------------------------------------------------------------------------- /src/ffi_util.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Alex Regueiro 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | // 15 | 16 | use crate::{ffi, Error}; 17 | use libc::{self, c_char, c_void, size_t}; 18 | use std::ffi::{CStr, CString}; 19 | use std::path::Path; 20 | use std::ptr; 21 | 22 | pub(crate) unsafe fn from_cstr(ptr: *const c_char) -> String { 23 | let cstr = CStr::from_ptr(ptr as *const _); 24 | String::from_utf8_lossy(cstr.to_bytes()).into_owned() 25 | } 26 | 27 | pub(crate) unsafe fn raw_data(ptr: *const c_char, size: usize) -> Option> { 28 | if ptr.is_null() { 29 | None 30 | } else { 31 | let mut dst = vec![0; size]; 32 | ptr::copy_nonoverlapping(ptr as *const u8, dst.as_mut_ptr(), size); 33 | 34 | Some(dst) 35 | } 36 | } 37 | 38 | pub fn error_message(ptr: *const c_char) -> String { 39 | unsafe { 40 | let s = from_cstr(ptr); 41 | ffi::rocksdb_free(ptr as *mut c_void); 42 | s 43 | } 44 | } 45 | 46 | pub fn opt_bytes_to_ptr>(opt: Option) -> *const c_char { 47 | match opt { 48 | Some(v) => v.as_ref().as_ptr() as *const c_char, 49 | None => ptr::null(), 50 | } 51 | } 52 | 53 | pub(crate) fn to_cpath>(path: P) -> Result { 54 | match CString::new(path.as_ref().to_string_lossy().as_bytes()) { 55 | Ok(c) => Ok(c), 56 | Err(e) => Err(Error::new(format!( 57 | "Failed to convert path to CString: {e}" 58 | ))), 59 | } 60 | } 61 | 62 | macro_rules! ffi_try { 63 | ( $($function:ident)::*() ) => { 64 | ffi_try_impl!($($function)::*()) 65 | }; 66 | 67 | ( $($function:ident)::*( $arg1:expr $(, $arg:expr)* $(,)? ) ) => { 68 | ffi_try_impl!($($function)::*($arg1 $(, $arg)* ,)) 69 | }; 70 | } 71 | 72 | macro_rules! ffi_try_impl { 73 | ( $($function:ident)::*( $($arg:expr,)*) ) => {{ 74 | let mut err: *mut ::libc::c_char = ::std::ptr::null_mut(); 75 | let result = $($function)::*($($arg,)* &mut err); 76 | if !err.is_null() { 77 | return Err(Error::new($crate::ffi_util::error_message(err))); 78 | } 79 | result 80 | }}; 81 | } 82 | 83 | /// Value which can be converted into a C string. 84 | /// 85 | /// The trait is used as argument to functions which wish to accept either 86 | /// [`&str`] or [`&CStr`](CStr) arguments while internally need to interact with 87 | /// C APIs. Accepting [`&str`] may be more convenient for users but requires 88 | /// conversion into [`CString`] internally which requires allocation. With this 89 | /// trait, latency-conscious users may choose to prepare [`CStr`] in advance and 90 | /// then pass it directly without having to incur the conversion cost. 91 | /// 92 | /// To use the trait, function should accept `impl CStrLike` and after baking 93 | /// the argument (with [`CStrLike::bake`] method) it can use it as a [`&CStr`](CStr) 94 | /// (since the baked result dereferences into [`CStr`]). 95 | /// 96 | /// # Example 97 | /// 98 | /// ``` 99 | /// use std::ffi::{CStr, CString}; 100 | /// use rust_rocksdb::CStrLike; 101 | /// 102 | /// fn strlen(arg: impl CStrLike) -> std::result::Result { 103 | /// let baked = arg.bake().map_err(|err| err.to_string())?; 104 | /// Ok(unsafe { libc::strlen(baked.as_ptr()) }) 105 | /// } 106 | /// 107 | /// const FOO: &str = "foo"; 108 | /// const BAR: &CStr = unsafe { CStr::from_bytes_with_nul_unchecked(b"bar\0") }; 109 | /// 110 | /// assert_eq!(Ok(3), strlen(FOO)); 111 | /// assert_eq!(Ok(3), strlen(BAR)); 112 | /// ``` 113 | pub trait CStrLike { 114 | type Baked: std::ops::Deref; 115 | type Error: std::fmt::Debug + std::fmt::Display; 116 | 117 | /// Bakes self into value which can be freely converted into [`&CStr`](CStr). 118 | /// 119 | /// This may require allocation and may fail if `self` has invalid value. 120 | fn bake(self) -> Result; 121 | 122 | /// Consumers and converts value into an owned [`CString`]. 123 | /// 124 | /// If `Self` is already a `CString` simply returns it; if it’s a reference 125 | /// to a `CString` then the value is cloned. In other cases this may 126 | /// require allocation and may fail if `self` has invalid value. 127 | fn into_c_string(self) -> Result; 128 | } 129 | 130 | impl CStrLike for &str { 131 | type Baked = CString; 132 | type Error = std::ffi::NulError; 133 | 134 | fn bake(self) -> Result { 135 | CString::new(self) 136 | } 137 | fn into_c_string(self) -> Result { 138 | CString::new(self) 139 | } 140 | } 141 | 142 | // This is redundant for the most part and exists so that `foo(&string)` (where 143 | // `string: String` works just as if `foo` took `arg: &str` argument. 144 | impl CStrLike for &String { 145 | type Baked = CString; 146 | type Error = std::ffi::NulError; 147 | 148 | fn bake(self) -> Result { 149 | CString::new(self.as_bytes()) 150 | } 151 | fn into_c_string(self) -> Result { 152 | CString::new(self.as_bytes()) 153 | } 154 | } 155 | 156 | impl CStrLike for &CStr { 157 | type Baked = Self; 158 | type Error = std::convert::Infallible; 159 | 160 | fn bake(self) -> Result { 161 | Ok(self) 162 | } 163 | fn into_c_string(self) -> Result { 164 | Ok(self.to_owned()) 165 | } 166 | } 167 | 168 | // This exists so that if caller constructs a `CString` they can pass it into 169 | // the function accepting `CStrLike` argument. Some of such functions may take 170 | // the argument whereas otherwise they would need to allocated a new owned 171 | // object. 172 | impl CStrLike for CString { 173 | type Baked = CString; 174 | type Error = std::convert::Infallible; 175 | 176 | fn bake(self) -> Result { 177 | Ok(self) 178 | } 179 | fn into_c_string(self) -> Result { 180 | Ok(self) 181 | } 182 | } 183 | 184 | // This is redundant for the most part and exists so that `foo(&cstring)` (where 185 | // `string: CString` works just as if `foo` took `arg: &CStr` argument. 186 | impl<'a> CStrLike for &'a CString { 187 | type Baked = &'a CStr; 188 | type Error = std::convert::Infallible; 189 | 190 | fn bake(self) -> Result { 191 | Ok(self) 192 | } 193 | fn into_c_string(self) -> Result { 194 | Ok(self.clone()) 195 | } 196 | } 197 | 198 | /// Owned malloc-allocated memory slice. 199 | /// Do not derive `Clone` for this because it will cause double-free. 200 | pub struct CSlice { 201 | data: *const c_char, 202 | len: size_t, 203 | } 204 | 205 | impl CSlice { 206 | /// Constructing such a slice may be unsafe. 207 | /// 208 | /// # Safety 209 | /// The caller must ensure that the pointer and length are valid. 210 | /// Moreover, `CSlice` takes the ownership of the memory and will free it 211 | /// using `rocksdb_free`. The caller must ensure that the memory is 212 | /// allocated by `malloc` in RocksDB and will not be freed by any other 213 | /// means. 214 | pub(crate) unsafe fn from_raw_parts(data: *const c_char, len: size_t) -> Self { 215 | Self { data, len } 216 | } 217 | } 218 | 219 | impl AsRef<[u8]> for CSlice { 220 | fn as_ref(&self) -> &[u8] { 221 | unsafe { std::slice::from_raw_parts(self.data as *const u8, self.len) } 222 | } 223 | } 224 | 225 | impl Drop for CSlice { 226 | fn drop(&mut self) { 227 | unsafe { 228 | ffi::rocksdb_free(self.data as *mut c_void); 229 | } 230 | } 231 | } 232 | 233 | #[test] 234 | fn test_c_str_like_bake() { 235 | fn test(value: S) -> Result { 236 | value 237 | .bake() 238 | .map(|value| unsafe { libc::strlen(value.as_ptr()) }) 239 | } 240 | 241 | assert_eq!(Ok(3), test("foo")); // &str 242 | assert_eq!(Ok(3), test(&String::from("foo"))); // String 243 | assert_eq!(Ok(3), test(CString::new("foo").unwrap().as_ref())); // &CStr 244 | assert_eq!(Ok(3), test(&CString::new("foo").unwrap())); // &CString 245 | assert_eq!(Ok(3), test(CString::new("foo").unwrap())); // CString 246 | 247 | assert_eq!(3, test("foo\0bar").err().unwrap().nul_position()); 248 | } 249 | 250 | #[test] 251 | fn test_c_str_like_into() { 252 | fn test(value: S) -> Result { 253 | value.into_c_string() 254 | } 255 | 256 | let want = CString::new("foo").unwrap(); 257 | 258 | assert_eq!(Ok(want.clone()), test("foo")); // &str 259 | assert_eq!(Ok(want.clone()), test(&String::from("foo"))); // &String 260 | assert_eq!( 261 | Ok(want.clone()), 262 | test(CString::new("foo").unwrap().as_ref()) 263 | ); // &CStr 264 | assert_eq!(Ok(want.clone()), test(&CString::new("foo").unwrap())); // &CString 265 | assert_eq!(Ok(want), test(CString::new("foo").unwrap())); // CString 266 | 267 | assert_eq!(3, test("foo\0bar").err().unwrap().nul_position()); 268 | } 269 | -------------------------------------------------------------------------------- /src/iter_range.rs: -------------------------------------------------------------------------------- 1 | /// A range which can be set as iterate bounds on [`crate::ReadOptions`]. 2 | /// 3 | /// See [`crate::ReadOptions::set_iterate_range`] for documentation and 4 | /// examples. 5 | pub trait IterateBounds { 6 | /// Converts object into lower and upper bounds pair. 7 | /// 8 | /// If this object represents range with one of the bounds unset, 9 | /// corresponding element is returned as `None`. For example, `..upper` 10 | /// range would be converted into `(None, Some(upper))` pair. 11 | fn into_bounds(self) -> (Option>, Option>); 12 | } 13 | 14 | impl IterateBounds for std::ops::RangeFull { 15 | fn into_bounds(self) -> (Option>, Option>) { 16 | (None, None) 17 | } 18 | } 19 | 20 | impl>> IterateBounds for std::ops::Range { 21 | fn into_bounds(self) -> (Option>, Option>) { 22 | (Some(self.start.into()), Some(self.end.into())) 23 | } 24 | } 25 | 26 | impl>> IterateBounds for std::ops::RangeFrom { 27 | fn into_bounds(self) -> (Option>, Option>) { 28 | (Some(self.start.into()), None) 29 | } 30 | } 31 | 32 | impl>> IterateBounds for std::ops::RangeTo { 33 | fn into_bounds(self) -> (Option>, Option>) { 34 | (None, Some(self.end.into())) 35 | } 36 | } 37 | 38 | /// Representation of a range of keys starting with given prefix. 39 | /// 40 | /// Can be used as argument of [`crate::ReadOptions::set_iterate_range`] method 41 | /// to set iterate bounds. 42 | #[derive(Clone, Copy)] 43 | pub struct PrefixRange(pub K); 44 | 45 | impl>> IterateBounds for PrefixRange { 46 | /// Converts the prefix range representation into pair of bounds. 47 | /// 48 | /// The conversion assumes lexicographical sorting on `u8` values. For 49 | /// example, `PrefixRange("a")` is equivalent to `"a".."b"` range. Note 50 | /// that for some prefixes, either of the bounds may be `None`. For 51 | /// example, an empty prefix is equivalent to a full range (i.e. both bounds 52 | /// being `None`). 53 | fn into_bounds(self) -> (Option>, Option>) { 54 | let start = self.0.into(); 55 | if start.is_empty() { 56 | (None, None) 57 | } else { 58 | let end = next_prefix(&start); 59 | (Some(start), end) 60 | } 61 | } 62 | } 63 | 64 | /// Returns lowest value following largest value with given prefix. 65 | /// 66 | /// In other words, computes upper bound for a prefix scan over list of keys 67 | /// sorted in lexicographical order. This means that a prefix scan can be 68 | /// expressed as range scan over a right-open `[prefix, next_prefix(prefix))` 69 | /// range. 70 | /// 71 | /// For example, for prefix `foo` the function returns `fop`. 72 | /// 73 | /// Returns `None` if there is no value which can follow value with given 74 | /// prefix. This happens when prefix consists entirely of `'\xff'` bytes (or is 75 | /// empty). 76 | fn next_prefix(prefix: &[u8]) -> Option> { 77 | let ffs = prefix 78 | .iter() 79 | .rev() 80 | .take_while(|&&byte| byte == u8::MAX) 81 | .count(); 82 | let next = &prefix[..(prefix.len() - ffs)]; 83 | if next.is_empty() { 84 | // Prefix consisted of \xff bytes. There is no prefix that 85 | // follows it. 86 | None 87 | } else { 88 | let mut next = next.to_vec(); 89 | *next.last_mut().unwrap() += 1; 90 | Some(next) 91 | } 92 | } 93 | 94 | #[test] 95 | fn test_prefix_range() { 96 | fn test(start: &[u8], end: Option<&[u8]>) { 97 | let got = PrefixRange(start).into_bounds(); 98 | assert_eq!((Some(start), end), (got.0.as_deref(), got.1.as_deref())); 99 | } 100 | 101 | let empty: &[u8] = &[]; 102 | assert_eq!((None, None), PrefixRange(empty).into_bounds()); 103 | test(b"\xff", None); 104 | test(b"\xff\xff\xff\xff", None); 105 | test(b"a", Some(b"b")); 106 | test(b"a\xff\xff\xff", Some(b"b")); 107 | } 108 | -------------------------------------------------------------------------------- /src/merge_operator.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | // 15 | 16 | //! rustic merge operator 17 | //! 18 | //! ``` 19 | //! use rust_rocksdb::{Options, DB, MergeOperands}; 20 | //! 21 | //! fn concat_merge(new_key: &[u8], 22 | //! existing_val: Option<&[u8]>, 23 | //! operands: &MergeOperands) 24 | //! -> Option> { 25 | //! 26 | //! let mut result: Vec = Vec::with_capacity(operands.len()); 27 | //! existing_val.map(|v| { 28 | //! for e in v { 29 | //! result.push(*e) 30 | //! } 31 | //! }); 32 | //! for op in operands { 33 | //! for e in op { 34 | //! result.push(*e) 35 | //! } 36 | //! } 37 | //! Some(result) 38 | //! } 39 | //! 40 | //!let tempdir = tempfile::Builder::new() 41 | //! .prefix("_rust_path_to_rocksdb") 42 | //! .tempdir() 43 | //! .expect("Failed to create temporary path for the _rust_path_to_rocksdb"); 44 | //!let path = tempdir.path(); 45 | //!let mut opts = Options::default(); 46 | //! 47 | //!opts.create_if_missing(true); 48 | //!opts.set_merge_operator_associative("test operator", concat_merge); 49 | //!{ 50 | //! let db = DB::open(&opts, path).unwrap(); 51 | //! let p = db.put(b"k1", b"a"); 52 | //! db.merge(b"k1", b"b"); 53 | //! db.merge(b"k1", b"c"); 54 | //! db.merge(b"k1", b"d"); 55 | //! db.merge(b"k1", b"efg"); 56 | //! let r = db.get(b"k1"); 57 | //! assert_eq!(r.unwrap().unwrap(), b"abcdefg"); 58 | //!} 59 | //!let _ = DB::destroy(&opts, path); 60 | //! ``` 61 | 62 | use libc::{self, c_char, c_int, c_void, size_t}; 63 | use std::ffi::CString; 64 | use std::mem; 65 | use std::ptr; 66 | use std::slice; 67 | 68 | pub trait MergeFn: 69 | Fn(&[u8], Option<&[u8]>, &MergeOperands) -> Option> + Send + Sync + 'static 70 | { 71 | } 72 | impl MergeFn for F where 73 | F: Fn(&[u8], Option<&[u8]>, &MergeOperands) -> Option> + Send + Sync + 'static 74 | { 75 | } 76 | 77 | pub struct MergeOperatorCallback { 78 | pub name: CString, 79 | pub full_merge_fn: F, 80 | pub partial_merge_fn: PF, 81 | } 82 | 83 | pub unsafe extern "C" fn destructor_callback(raw_cb: *mut c_void) { 84 | drop(Box::from_raw(raw_cb as *mut MergeOperatorCallback)); 85 | } 86 | 87 | pub unsafe extern "C" fn delete_callback( 88 | _raw_cb: *mut c_void, 89 | value: *const c_char, 90 | value_length: size_t, 91 | ) { 92 | if !value.is_null() { 93 | drop(Box::from_raw(slice::from_raw_parts_mut( 94 | value as *mut u8, 95 | value_length, 96 | ))); 97 | } 98 | } 99 | 100 | pub unsafe extern "C" fn name_callback( 101 | raw_cb: *mut c_void, 102 | ) -> *const c_char { 103 | let cb = &mut *(raw_cb as *mut MergeOperatorCallback); 104 | cb.name.as_ptr() 105 | } 106 | 107 | pub unsafe extern "C" fn full_merge_callback( 108 | raw_cb: *mut c_void, 109 | raw_key: *const c_char, 110 | key_len: size_t, 111 | existing_value: *const c_char, 112 | existing_value_len: size_t, 113 | operands_list: *const *const c_char, 114 | operands_list_len: *const size_t, 115 | num_operands: c_int, 116 | success: *mut u8, 117 | new_value_length: *mut size_t, 118 | ) -> *mut c_char { 119 | let cb = &mut *(raw_cb as *mut MergeOperatorCallback); 120 | let operands = &MergeOperands::new(operands_list, operands_list_len, num_operands); 121 | let key = slice::from_raw_parts(raw_key as *const u8, key_len); 122 | let oldval = if existing_value.is_null() { 123 | None 124 | } else { 125 | Some(slice::from_raw_parts( 126 | existing_value as *const u8, 127 | existing_value_len, 128 | )) 129 | }; 130 | (cb.full_merge_fn)(key, oldval, operands).map_or_else( 131 | || { 132 | *new_value_length = 0; 133 | *success = 0_u8; 134 | ptr::null_mut() as *mut c_char 135 | }, 136 | |result| { 137 | *new_value_length = result.len() as size_t; 138 | *success = 1_u8; 139 | Box::into_raw(result.into_boxed_slice()) as *mut c_char 140 | }, 141 | ) 142 | } 143 | 144 | pub unsafe extern "C" fn partial_merge_callback( 145 | raw_cb: *mut c_void, 146 | raw_key: *const c_char, 147 | key_len: size_t, 148 | operands_list: *const *const c_char, 149 | operands_list_len: *const size_t, 150 | num_operands: c_int, 151 | success: *mut u8, 152 | new_value_length: *mut size_t, 153 | ) -> *mut c_char { 154 | let cb = &mut *(raw_cb as *mut MergeOperatorCallback); 155 | let operands = &MergeOperands::new(operands_list, operands_list_len, num_operands); 156 | let key = slice::from_raw_parts(raw_key as *const u8, key_len); 157 | (cb.partial_merge_fn)(key, None, operands).map_or_else( 158 | || { 159 | *new_value_length = 0; 160 | *success = 0_u8; 161 | ptr::null_mut::() 162 | }, 163 | |result| { 164 | *new_value_length = result.len() as size_t; 165 | *success = 1_u8; 166 | Box::into_raw(result.into_boxed_slice()) as *mut c_char 167 | }, 168 | ) 169 | } 170 | 171 | pub struct MergeOperands { 172 | operands_list: *const *const c_char, 173 | operands_list_len: *const size_t, 174 | num_operands: usize, 175 | } 176 | 177 | impl MergeOperands { 178 | fn new( 179 | operands_list: *const *const c_char, 180 | operands_list_len: *const size_t, 181 | num_operands: c_int, 182 | ) -> MergeOperands { 183 | assert!(num_operands >= 0); 184 | MergeOperands { 185 | operands_list, 186 | operands_list_len, 187 | num_operands: num_operands as usize, 188 | } 189 | } 190 | 191 | pub fn len(&self) -> usize { 192 | self.num_operands 193 | } 194 | 195 | pub fn is_empty(&self) -> bool { 196 | self.num_operands == 0 197 | } 198 | 199 | pub fn iter(&self) -> MergeOperandsIter { 200 | MergeOperandsIter { 201 | operands: self, 202 | cursor: 0, 203 | } 204 | } 205 | 206 | fn get_operand(&self, index: usize) -> Option<&[u8]> { 207 | if index >= self.num_operands { 208 | None 209 | } else { 210 | unsafe { 211 | let base = self.operands_list as usize; 212 | let base_len = self.operands_list_len as usize; 213 | let spacing = mem::size_of::<*const *const u8>(); 214 | let spacing_len = mem::size_of::<*const size_t>(); 215 | let len_ptr = (base_len + (spacing_len * index)) as *const size_t; 216 | let len = *len_ptr; 217 | let ptr = base + (spacing * index); 218 | Some(slice::from_raw_parts(*(ptr as *const *const u8), len)) 219 | } 220 | } 221 | } 222 | } 223 | 224 | pub struct MergeOperandsIter<'a> { 225 | operands: &'a MergeOperands, 226 | cursor: usize, 227 | } 228 | 229 | impl<'a> Iterator for MergeOperandsIter<'a> { 230 | type Item = &'a [u8]; 231 | 232 | fn next(&mut self) -> Option { 233 | let operand = self.operands.get_operand(self.cursor)?; 234 | self.cursor += 1; 235 | Some(operand) 236 | } 237 | 238 | fn size_hint(&self) -> (usize, Option) { 239 | let remaining = self.operands.num_operands - self.cursor; 240 | (remaining, Some(remaining)) 241 | } 242 | } 243 | 244 | impl<'a> IntoIterator for &'a MergeOperands { 245 | type Item = &'a [u8]; 246 | type IntoIter = MergeOperandsIter<'a>; 247 | 248 | fn into_iter(self) -> Self::IntoIter { 249 | Self::IntoIter { 250 | operands: self, 251 | cursor: 0, 252 | } 253 | } 254 | } 255 | -------------------------------------------------------------------------------- /src/perf.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tran Tuan Linh 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use libc::{c_int, c_uchar, c_void}; 16 | 17 | use crate::{db::DBInner, ffi, ffi_util::from_cstr, Cache, Error}; 18 | use crate::{DBCommon, ThreadMode, TransactionDB, DB}; 19 | 20 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] 21 | #[repr(i32)] 22 | pub enum PerfStatsLevel { 23 | /// Unknown settings 24 | Uninitialized = 0, 25 | /// Disable perf stats 26 | Disable, 27 | /// Enables only count stats 28 | EnableCount, 29 | /// Count stats and enable time stats except for mutexes 30 | EnableTimeExceptForMutex, 31 | /// Other than time, also measure CPU time counters. Still don't measure 32 | /// time (neither wall time nor CPU time) for mutexes 33 | EnableTimeAndCPUTimeExceptForMutex, 34 | /// Enables count and time stats 35 | EnableTime, 36 | /// N.B must always be the last value! 37 | OutOfBound, 38 | } 39 | 40 | #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] 41 | #[non_exhaustive] 42 | #[repr(i32)] 43 | pub enum PerfMetric { 44 | UserKeyComparisonCount = 0, 45 | BlockCacheHitCount = 1, 46 | BlockReadCount = 2, 47 | BlockReadByte = 3, 48 | BlockReadTime = 4, 49 | BlockChecksumTime = 5, 50 | BlockDecompressTime = 6, 51 | GetReadBytes = 7, 52 | MultigetReadBytes = 8, 53 | IterReadBytes = 9, 54 | InternalKeySkippedCount = 10, 55 | InternalDeleteSkippedCount = 11, 56 | InternalRecentSkippedCount = 12, 57 | InternalMergeCount = 13, 58 | GetSnapshotTime = 14, 59 | GetFromMemtableTime = 15, 60 | GetFromMemtableCount = 16, 61 | GetPostProcessTime = 17, 62 | GetFromOutputFilesTime = 18, 63 | SeekOnMemtableTime = 19, 64 | SeekOnMemtableCount = 20, 65 | NextOnMemtableCount = 21, 66 | PrevOnMemtableCount = 22, 67 | SeekChildSeekTime = 23, 68 | SeekChildSeekCount = 24, 69 | SeekMinHeapTime = 25, 70 | SeekMaxHeapTime = 26, 71 | SeekInternalSeekTime = 27, 72 | FindNextUserEntryTime = 28, 73 | WriteWalTime = 29, 74 | WriteMemtableTime = 30, 75 | WriteDelayTime = 31, 76 | WritePreAndPostProcessTime = 32, 77 | DbMutexLockNanos = 33, 78 | DbConditionWaitNanos = 34, 79 | MergeOperatorTimeNanos = 35, 80 | ReadIndexBlockNanos = 36, 81 | ReadFilterBlockNanos = 37, 82 | NewTableBlockIterNanos = 38, 83 | NewTableIteratorNanos = 39, 84 | BlockSeekNanos = 40, 85 | FindTableNanos = 41, 86 | BloomMemtableHitCount = 42, 87 | BloomMemtableMissCount = 43, 88 | BloomSstHitCount = 44, 89 | BloomSstMissCount = 45, 90 | KeyLockWaitTime = 46, 91 | KeyLockWaitCount = 47, 92 | EnvNewSequentialFileNanos = 48, 93 | EnvNewRandomAccessFileNanos = 49, 94 | EnvNewWritableFileNanos = 50, 95 | EnvReuseWritableFileNanos = 51, 96 | EnvNewRandomRwFileNanos = 52, 97 | EnvNewDirectoryNanos = 53, 98 | EnvFileExistsNanos = 54, 99 | EnvGetChildrenNanos = 55, 100 | EnvGetChildrenFileAttributesNanos = 56, 101 | EnvDeleteFileNanos = 57, 102 | EnvCreateDirNanos = 58, 103 | EnvCreateDirIfMissingNanos = 59, 104 | EnvDeleteDirNanos = 60, 105 | EnvGetFileSizeNanos = 61, 106 | EnvGetFileModificationTimeNanos = 62, 107 | EnvRenameFileNanos = 63, 108 | EnvLinkFileNanos = 64, 109 | EnvLockFileNanos = 65, 110 | EnvUnlockFileNanos = 66, 111 | EnvNewLoggerNanos = 67, 112 | TotalMetricCount = 68, 113 | } 114 | 115 | /// Sets the perf stats level for current thread. 116 | pub fn set_perf_stats(lvl: PerfStatsLevel) { 117 | unsafe { 118 | ffi::rocksdb_set_perf_level(lvl as c_int); 119 | } 120 | } 121 | 122 | /// Thread local context for gathering performance counter efficiently 123 | /// and transparently. 124 | pub struct PerfContext { 125 | pub(crate) inner: *mut ffi::rocksdb_perfcontext_t, 126 | } 127 | 128 | impl Default for PerfContext { 129 | fn default() -> Self { 130 | let ctx = unsafe { ffi::rocksdb_perfcontext_create() }; 131 | assert!(!ctx.is_null(), "Could not create Perf Context"); 132 | 133 | Self { inner: ctx } 134 | } 135 | } 136 | 137 | impl Drop for PerfContext { 138 | fn drop(&mut self) { 139 | unsafe { 140 | ffi::rocksdb_perfcontext_destroy(self.inner); 141 | } 142 | } 143 | } 144 | 145 | impl PerfContext { 146 | /// Reset context 147 | pub fn reset(&mut self) { 148 | unsafe { 149 | ffi::rocksdb_perfcontext_reset(self.inner); 150 | } 151 | } 152 | 153 | /// Get the report on perf 154 | pub fn report(&self, exclude_zero_counters: bool) -> String { 155 | unsafe { 156 | let ptr = 157 | ffi::rocksdb_perfcontext_report(self.inner, c_uchar::from(exclude_zero_counters)); 158 | let report = from_cstr(ptr); 159 | ffi::rocksdb_free(ptr as *mut c_void); 160 | report 161 | } 162 | } 163 | 164 | /// Returns value of a metric 165 | pub fn metric(&self, id: PerfMetric) -> u64 { 166 | unsafe { ffi::rocksdb_perfcontext_metric(self.inner, id as c_int) } 167 | } 168 | } 169 | 170 | /// Memory usage stats 171 | pub struct MemoryUsageStats { 172 | /// Approximate memory usage of all the mem-tables 173 | pub mem_table_total: u64, 174 | /// Approximate memory usage of un-flushed mem-tables 175 | pub mem_table_unflushed: u64, 176 | /// Approximate memory usage of all the table readers 177 | pub mem_table_readers_total: u64, 178 | /// Approximate memory usage by cache 179 | pub cache_total: u64, 180 | } 181 | 182 | /// Wrap over memory_usage_t. Hold current memory usage of the specified DB instances and caches 183 | pub struct MemoryUsage { 184 | inner: *mut ffi::rocksdb_memory_usage_t, 185 | } 186 | 187 | impl Drop for MemoryUsage { 188 | fn drop(&mut self) { 189 | unsafe { 190 | ffi::rocksdb_approximate_memory_usage_destroy(self.inner); 191 | } 192 | } 193 | } 194 | 195 | impl MemoryUsage { 196 | /// Approximate memory usage of all the mem-tables 197 | pub fn approximate_mem_table_total(&self) -> u64 { 198 | unsafe { ffi::rocksdb_approximate_memory_usage_get_mem_table_total(self.inner) } 199 | } 200 | 201 | /// Approximate memory usage of un-flushed mem-tables 202 | pub fn approximate_mem_table_unflushed(&self) -> u64 { 203 | unsafe { ffi::rocksdb_approximate_memory_usage_get_mem_table_unflushed(self.inner) } 204 | } 205 | 206 | /// Approximate memory usage of all the table readers 207 | pub fn approximate_mem_table_readers_total(&self) -> u64 { 208 | unsafe { ffi::rocksdb_approximate_memory_usage_get_mem_table_readers_total(self.inner) } 209 | } 210 | 211 | /// Approximate memory usage by cache 212 | pub fn approximate_cache_total(&self) -> u64 { 213 | unsafe { ffi::rocksdb_approximate_memory_usage_get_cache_total(self.inner) } 214 | } 215 | } 216 | 217 | /// Builder for MemoryUsage 218 | pub struct MemoryUsageBuilder { 219 | inner: *mut ffi::rocksdb_memory_consumers_t, 220 | } 221 | 222 | impl Drop for MemoryUsageBuilder { 223 | fn drop(&mut self) { 224 | unsafe { 225 | ffi::rocksdb_memory_consumers_destroy(self.inner); 226 | } 227 | } 228 | } 229 | 230 | impl MemoryUsageBuilder { 231 | /// Create new instance 232 | pub fn new() -> Result { 233 | let mc = unsafe { ffi::rocksdb_memory_consumers_create() }; 234 | if mc.is_null() { 235 | Err(Error::new( 236 | "Could not create MemoryUsage builder".to_owned(), 237 | )) 238 | } else { 239 | Ok(Self { inner: mc }) 240 | } 241 | } 242 | 243 | /// Add a DB instance to collect memory usage from it and add up in total stats 244 | pub fn add_tx_db(&mut self, db: &TransactionDB) { 245 | unsafe { 246 | let base = ffi::rocksdb_transactiondb_get_base_db(db.inner); 247 | ffi::rocksdb_memory_consumers_add_db(self.inner, base); 248 | } 249 | } 250 | 251 | /// Add a DB instance to collect memory usage from it and add up in total stats 252 | pub fn add_db(&mut self, db: &DBCommon) { 253 | unsafe { 254 | ffi::rocksdb_memory_consumers_add_db(self.inner, db.inner.inner()); 255 | } 256 | } 257 | 258 | /// Add a cache to collect memory usage from it and add up in total stats 259 | pub fn add_cache(&mut self, cache: &Cache) { 260 | unsafe { 261 | ffi::rocksdb_memory_consumers_add_cache(self.inner, cache.0.inner.as_ptr()); 262 | } 263 | } 264 | 265 | /// Build up MemoryUsage 266 | pub fn build(&self) -> Result { 267 | unsafe { 268 | let mu = ffi_try!(ffi::rocksdb_approximate_memory_usage_create(self.inner)); 269 | Ok(MemoryUsage { inner: mu }) 270 | } 271 | } 272 | } 273 | 274 | /// Get memory usage stats from DB instances and Cache instances 275 | pub fn get_memory_usage_stats( 276 | dbs: Option<&[&DB]>, 277 | caches: Option<&[&Cache]>, 278 | ) -> Result { 279 | let mut builder = MemoryUsageBuilder::new()?; 280 | if let Some(dbs_) = dbs { 281 | dbs_.iter().for_each(|db| builder.add_db(db)); 282 | } 283 | if let Some(caches_) = caches { 284 | caches_.iter().for_each(|cache| builder.add_cache(cache)); 285 | } 286 | 287 | let mu = builder.build()?; 288 | Ok(MemoryUsageStats { 289 | mem_table_total: mu.approximate_mem_table_total(), 290 | mem_table_unflushed: mu.approximate_mem_table_unflushed(), 291 | mem_table_readers_total: mu.approximate_mem_table_readers_total(), 292 | cache_total: mu.approximate_cache_total(), 293 | }) 294 | } 295 | -------------------------------------------------------------------------------- /src/prop_name.rs: -------------------------------------------------------------------------------- 1 | use crate::ffi_util::CStrLike; 2 | 3 | use std::ffi::{CStr, CString}; 4 | 5 | /// A borrowed name of a RocksDB property. 6 | /// 7 | /// The value is guaranteed to be a nul-terminated UTF-8 string. This means it 8 | /// can be converted to [`CStr`] and [`str`] at zero cost. 9 | #[derive(PartialEq, Eq, PartialOrd, Ord, Hash)] 10 | #[repr(transparent)] 11 | pub struct PropName(CStr); 12 | 13 | impl PropName { 14 | /// Creates a new object from a nul-terminated string with no internal nul 15 | /// bytes. 16 | /// 17 | /// Panics if the `value` isn’t terminated by a nul byte or contains 18 | /// interior nul bytes. 19 | pub(crate) const fn new_unwrap(value: &str) -> &Self { 20 | if let Some((&0, bytes)) = value.as_bytes().split_last() { 21 | let mut idx = 0; 22 | while idx < bytes.len() { 23 | assert!(bytes[idx] != 0, "input contained interior nul byte"); 24 | idx += 1; 25 | } 26 | 27 | // SAFETY: 1. We’ve just verified `value` is a nul-terminated with no 28 | // interior nul bytes and since its `str` it’s also valid UTF-8. 29 | // 2. Self and CStr have the same representation so casting is sound. 30 | unsafe { 31 | let value = CStr::from_bytes_with_nul_unchecked(value.as_bytes()); 32 | &*(std::ptr::from_ref::(value) as *const Self) 33 | } 34 | } else { 35 | panic!("input was not nul-terminated"); 36 | } 37 | } 38 | 39 | /// Converts the value into a C string slice. 40 | #[inline] 41 | pub fn as_c_str(&self) -> &CStr { 42 | &self.0 43 | } 44 | 45 | /// Converts the value into a string slice. 46 | /// 47 | /// Nul byte terminating the underlying C string is not included in the 48 | /// returned slice. 49 | #[inline] 50 | pub fn as_str(&self) -> &str { 51 | // SAFETY: self.0 is guaranteed to be valid ASCII string. 52 | unsafe { std::str::from_utf8_unchecked(self.0.to_bytes()) } 53 | } 54 | } 55 | 56 | impl core::ops::Deref for PropName { 57 | type Target = CStr; 58 | 59 | #[inline] 60 | fn deref(&self) -> &Self::Target { 61 | self.as_c_str() 62 | } 63 | } 64 | 65 | impl core::convert::AsRef for PropName { 66 | #[inline] 67 | fn as_ref(&self) -> &CStr { 68 | self.as_c_str() 69 | } 70 | } 71 | 72 | impl core::convert::AsRef for PropName { 73 | #[inline] 74 | fn as_ref(&self) -> &str { 75 | self.as_str() 76 | } 77 | } 78 | 79 | impl std::borrow::ToOwned for PropName { 80 | type Owned = PropertyName; 81 | 82 | #[inline] 83 | fn to_owned(&self) -> Self::Owned { 84 | PropertyName(self.0.to_owned()) 85 | } 86 | 87 | #[inline] 88 | fn clone_into(&self, target: &mut Self::Owned) { 89 | self.0.clone_into(&mut target.0); 90 | } 91 | } 92 | 93 | impl core::fmt::Display for PropName { 94 | #[inline] 95 | fn fmt(&self, fmtr: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 96 | self.as_str().fmt(fmtr) 97 | } 98 | } 99 | 100 | impl core::fmt::Debug for PropName { 101 | #[inline] 102 | fn fmt(&self, fmtr: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 103 | self.as_str().fmt(fmtr) 104 | } 105 | } 106 | 107 | impl core::cmp::PartialEq for PropName { 108 | #[inline] 109 | fn eq(&self, other: &CStr) -> bool { 110 | self.as_c_str().eq(other) 111 | } 112 | } 113 | 114 | impl core::cmp::PartialEq for PropName { 115 | #[inline] 116 | fn eq(&self, other: &str) -> bool { 117 | self.as_str().eq(other) 118 | } 119 | } 120 | 121 | impl core::cmp::PartialEq for CStr { 122 | #[inline] 123 | fn eq(&self, other: &PropName) -> bool { 124 | self.eq(other.as_c_str()) 125 | } 126 | } 127 | 128 | impl core::cmp::PartialEq for str { 129 | #[inline] 130 | fn eq(&self, other: &PropName) -> bool { 131 | self.eq(other.as_str()) 132 | } 133 | } 134 | 135 | impl<'a> CStrLike for &'a PropName { 136 | type Baked = &'a CStr; 137 | type Error = std::convert::Infallible; 138 | 139 | #[inline] 140 | fn bake(self) -> Result { 141 | Ok(&self.0) 142 | } 143 | 144 | #[inline] 145 | fn into_c_string(self) -> Result { 146 | Ok(self.0.to_owned()) 147 | } 148 | } 149 | 150 | /// An owned name of a RocksDB property. 151 | /// 152 | /// The value is guaranteed to be a nul-terminated UTF-8 string. This means it 153 | /// can be converted to [`CString`] and [`String`] at zero cost. 154 | #[derive(PartialEq, Eq, PartialOrd, Ord, Hash)] 155 | #[repr(transparent)] 156 | pub struct PropertyName(CString); 157 | 158 | impl PropertyName { 159 | /// Creates a new object from valid nul-terminated UTF-8 string. The string 160 | /// must not contain interior nul bytes. 161 | #[inline] 162 | unsafe fn from_vec_with_nul_unchecked(inner: Vec) -> Self { 163 | // SAFETY: Caller promises inner is nul-terminated and valid UTF-8. 164 | Self(CString::from_vec_with_nul_unchecked(inner)) 165 | } 166 | 167 | /// Converts the value into a C string. 168 | #[inline] 169 | pub fn into_c_string(self) -> CString { 170 | self.0 171 | } 172 | 173 | /// Converts the property name into a string. 174 | /// 175 | /// Nul byte terminating the underlying C string is not included in the 176 | /// returned value. 177 | #[inline] 178 | pub fn into_string(self) -> String { 179 | // SAFETY: self.0 is guaranteed to be valid UTF-8. 180 | unsafe { String::from_utf8_unchecked(self.0.into_bytes()) } 181 | } 182 | } 183 | 184 | impl std::ops::Deref for PropertyName { 185 | type Target = PropName; 186 | 187 | #[inline] 188 | fn deref(&self) -> &Self::Target { 189 | // SAFETY: 1. PropName and CStr have the same representation so casting 190 | // is safe. 2. self.0 is guaranteed to be valid nul-terminated UTF-8 191 | // string. 192 | unsafe { &*(std::ptr::from_ref::(self.0.as_c_str()) as *const PropName) } 193 | } 194 | } 195 | 196 | impl core::convert::AsRef for PropertyName { 197 | #[inline] 198 | fn as_ref(&self) -> &CStr { 199 | self.as_c_str() 200 | } 201 | } 202 | 203 | impl core::convert::AsRef for PropertyName { 204 | #[inline] 205 | fn as_ref(&self) -> &str { 206 | self.as_str() 207 | } 208 | } 209 | 210 | impl std::borrow::Borrow for PropertyName { 211 | #[inline] 212 | fn borrow(&self) -> &PropName { 213 | self 214 | } 215 | } 216 | 217 | impl core::fmt::Display for PropertyName { 218 | #[inline] 219 | fn fmt(&self, fmtr: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 220 | self.as_str().fmt(fmtr) 221 | } 222 | } 223 | 224 | impl core::fmt::Debug for PropertyName { 225 | #[inline] 226 | fn fmt(&self, fmtr: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 227 | self.as_str().fmt(fmtr) 228 | } 229 | } 230 | 231 | impl core::cmp::PartialEq for PropertyName { 232 | #[inline] 233 | fn eq(&self, other: &CString) -> bool { 234 | self.as_c_str().eq(other.as_c_str()) 235 | } 236 | } 237 | 238 | impl core::cmp::PartialEq for PropertyName { 239 | #[inline] 240 | fn eq(&self, other: &String) -> bool { 241 | self.as_str().eq(other.as_str()) 242 | } 243 | } 244 | 245 | impl core::cmp::PartialEq for CString { 246 | #[inline] 247 | fn eq(&self, other: &PropertyName) -> bool { 248 | self.as_c_str().eq(other.as_c_str()) 249 | } 250 | } 251 | 252 | impl core::cmp::PartialEq for String { 253 | #[inline] 254 | fn eq(&self, other: &PropertyName) -> bool { 255 | self.as_str().eq(other.as_str()) 256 | } 257 | } 258 | 259 | impl CStrLike for PropertyName { 260 | type Baked = CString; 261 | type Error = std::convert::Infallible; 262 | 263 | #[inline] 264 | fn bake(self) -> Result { 265 | Ok(self.0) 266 | } 267 | 268 | #[inline] 269 | fn into_c_string(self) -> Result { 270 | Ok(self.0) 271 | } 272 | } 273 | 274 | impl<'a> CStrLike for &'a PropertyName { 275 | type Baked = &'a CStr; 276 | type Error = std::convert::Infallible; 277 | 278 | #[inline] 279 | fn bake(self) -> Result { 280 | Ok(self.as_c_str()) 281 | } 282 | 283 | #[inline] 284 | fn into_c_string(self) -> Result { 285 | Ok(self.0.clone()) 286 | } 287 | } 288 | 289 | /// Constructs a property name for an ‘at level’ property. 290 | /// 291 | /// `name` is the infix of the property name (e.g. `"num-files-at-level"`) and 292 | /// `level` is level to get statistics of. The property name is constructed as 293 | /// `"rocksdb."`. 294 | /// 295 | /// Expects `name` not to contain any interior nul bytes. 296 | pub(crate) unsafe fn level_property(name: &str, level: usize) -> PropertyName { 297 | let bytes = format!("rocksdb.{name}{level}\0").into_bytes(); 298 | // SAFETY: We’re appending terminating nul and caller promises `name` has no 299 | // interior nul bytes. 300 | PropertyName::from_vec_with_nul_unchecked(bytes) 301 | } 302 | 303 | #[test] 304 | fn sanity_checks() { 305 | let want = "rocksdb.cfstats-no-file-histogram"; 306 | assert_eq!(want, crate::properties::CFSTATS_NO_FILE_HISTOGRAM); 307 | 308 | let want = "rocksdb.num-files-at-level5"; 309 | assert_eq!(want, &*crate::properties::num_files_at_level(5)); 310 | } 311 | 312 | #[test] 313 | #[should_panic(expected = "input contained interior nul byte")] 314 | fn test_interior_nul() { 315 | PropName::new_unwrap("interior nul\0\0"); 316 | } 317 | 318 | #[test] 319 | #[should_panic(expected = "input was not nul-terminated")] 320 | fn test_non_nul_terminated() { 321 | PropName::new_unwrap("no nul terminator"); 322 | } 323 | -------------------------------------------------------------------------------- /src/slice_transform.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::ffi::CString; 16 | use std::slice; 17 | 18 | use libc::{c_char, c_uchar, c_void, size_t}; 19 | 20 | use crate::{ffi, ffi_util::CStrLike}; 21 | 22 | /// A `SliceTransform` is a generic pluggable way of transforming one string 23 | /// to another. Its primary use-case is in configuring rocksdb 24 | /// to store prefix blooms by setting prefix_extractor in 25 | /// ColumnFamilyOptions. 26 | pub struct SliceTransform { 27 | pub inner: *mut ffi::rocksdb_slicetransform_t, 28 | } 29 | 30 | // NB we intentionally don't implement a Drop that passes 31 | // through to rocksdb_slicetransform_destroy because 32 | // this is currently only used (to my knowledge) 33 | // by people passing it as a prefix extractor when 34 | // opening a DB. 35 | 36 | impl SliceTransform { 37 | pub fn create( 38 | name: impl CStrLike, 39 | transform_fn: TransformFn, 40 | in_domain_fn: Option, 41 | ) -> SliceTransform { 42 | let cb = Box::into_raw(Box::new(TransformCallback { 43 | name: name.into_c_string().unwrap(), 44 | transform_fn, 45 | in_domain_fn, 46 | })); 47 | 48 | let st = unsafe { 49 | ffi::rocksdb_slicetransform_create( 50 | cb as *mut c_void, 51 | Some(slice_transform_destructor_callback), 52 | Some(transform_callback), 53 | Some(in_domain_callback), 54 | // this None points to the deprecated InRange callback 55 | None, 56 | Some(slice_transform_name_callback), 57 | ) 58 | }; 59 | 60 | SliceTransform { inner: st } 61 | } 62 | 63 | pub fn create_fixed_prefix(len: size_t) -> SliceTransform { 64 | SliceTransform { 65 | inner: unsafe { ffi::rocksdb_slicetransform_create_fixed_prefix(len) }, 66 | } 67 | } 68 | 69 | pub fn create_noop() -> SliceTransform { 70 | SliceTransform { 71 | inner: unsafe { ffi::rocksdb_slicetransform_create_noop() }, 72 | } 73 | } 74 | } 75 | 76 | pub type TransformFn<'a> = fn(&'a [u8]) -> &'a [u8]; 77 | pub type InDomainFn = fn(&[u8]) -> bool; 78 | 79 | pub struct TransformCallback<'a> { 80 | pub name: CString, 81 | pub transform_fn: TransformFn<'a>, 82 | pub in_domain_fn: Option, 83 | } 84 | 85 | pub unsafe extern "C" fn slice_transform_destructor_callback(raw_cb: *mut c_void) { 86 | drop(Box::from_raw(raw_cb as *mut TransformCallback)); 87 | } 88 | 89 | pub unsafe extern "C" fn slice_transform_name_callback(raw_cb: *mut c_void) -> *const c_char { 90 | let cb = &mut *(raw_cb as *mut TransformCallback); 91 | cb.name.as_ptr() 92 | } 93 | 94 | pub unsafe extern "C" fn transform_callback( 95 | raw_cb: *mut c_void, 96 | raw_key: *const c_char, 97 | key_len: size_t, 98 | dst_length: *mut size_t, 99 | ) -> *mut c_char { 100 | let cb = &mut *(raw_cb as *mut TransformCallback); 101 | let key = slice::from_raw_parts(raw_key as *const u8, key_len); 102 | let prefix = (cb.transform_fn)(key); 103 | *dst_length = prefix.len() as size_t; 104 | prefix.as_ptr() as *mut c_char 105 | } 106 | 107 | pub unsafe extern "C" fn in_domain_callback( 108 | raw_cb: *mut c_void, 109 | raw_key: *const c_char, 110 | key_len: size_t, 111 | ) -> c_uchar { 112 | let cb = &mut *(raw_cb as *mut TransformCallback); 113 | let key = slice::from_raw_parts(raw_key as *const u8, key_len); 114 | c_uchar::from(cb.in_domain_fn.map_or(true, |in_domain| in_domain(key))) 115 | } 116 | -------------------------------------------------------------------------------- /src/snapshot.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use crate::{ 16 | db::DBAccess, ffi, AsColumnFamilyRef, DBIteratorWithThreadMode, DBPinnableSlice, 17 | DBRawIteratorWithThreadMode, Error, IteratorMode, ReadOptions, DB, 18 | }; 19 | 20 | /// A type alias to keep compatibility. See [`SnapshotWithThreadMode`] for details 21 | pub type Snapshot<'a> = SnapshotWithThreadMode<'a, DB>; 22 | 23 | /// A consistent view of the database at the point of creation. 24 | /// 25 | /// # Examples 26 | /// 27 | /// ``` 28 | /// use rust_rocksdb::{DB, IteratorMode, Options}; 29 | /// 30 | /// let tempdir = tempfile::Builder::new() 31 | /// .prefix("_path_for_rocksdb_storage3") 32 | /// .tempdir() 33 | /// .expect("Failed to create temporary path for the _path_for_rocksdb_storage3"); 34 | /// let path = tempdir.path(); 35 | /// { 36 | /// let db = DB::open_default(path).unwrap(); 37 | /// let snapshot = db.snapshot(); // Creates a longer-term snapshot of the DB, but closed when goes out of scope 38 | /// let mut iter = snapshot.iterator(IteratorMode::Start); // Make as many iterators as you'd like from one snapshot 39 | /// } 40 | /// let _ = DB::destroy(&Options::default(), path); 41 | /// ``` 42 | /// 43 | pub struct SnapshotWithThreadMode<'a, D: DBAccess> { 44 | db: &'a D, 45 | pub(crate) inner: *const ffi::rocksdb_snapshot_t, 46 | } 47 | 48 | impl<'a, D: DBAccess> SnapshotWithThreadMode<'a, D> { 49 | /// Creates a new `SnapshotWithThreadMode` of the database `db`. 50 | pub fn new(db: &'a D) -> Self { 51 | let snapshot = unsafe { db.create_snapshot() }; 52 | Self { 53 | db, 54 | inner: snapshot, 55 | } 56 | } 57 | 58 | /// Creates an iterator over the data in this snapshot, using the default read options. 59 | pub fn iterator(&self, mode: IteratorMode) -> DBIteratorWithThreadMode<'a, D> { 60 | let readopts = ReadOptions::default(); 61 | self.iterator_opt(mode, readopts) 62 | } 63 | 64 | /// Creates an iterator over the data in this snapshot under the given column family, using 65 | /// the default read options. 66 | pub fn iterator_cf( 67 | &self, 68 | cf_handle: &impl AsColumnFamilyRef, 69 | mode: IteratorMode, 70 | ) -> DBIteratorWithThreadMode { 71 | let readopts = ReadOptions::default(); 72 | self.iterator_cf_opt(cf_handle, readopts, mode) 73 | } 74 | 75 | /// Creates an iterator over the data in this snapshot, using the given read options. 76 | pub fn iterator_opt( 77 | &self, 78 | mode: IteratorMode, 79 | mut readopts: ReadOptions, 80 | ) -> DBIteratorWithThreadMode<'a, D> { 81 | readopts.set_snapshot(self); 82 | DBIteratorWithThreadMode::::new(self.db, readopts, mode) 83 | } 84 | 85 | /// Creates an iterator over the data in this snapshot under the given column family, using 86 | /// the given read options. 87 | pub fn iterator_cf_opt( 88 | &self, 89 | cf_handle: &impl AsColumnFamilyRef, 90 | mut readopts: ReadOptions, 91 | mode: IteratorMode, 92 | ) -> DBIteratorWithThreadMode { 93 | readopts.set_snapshot(self); 94 | DBIteratorWithThreadMode::new_cf(self.db, cf_handle.inner(), readopts, mode) 95 | } 96 | 97 | /// Creates a raw iterator over the data in this snapshot, using the default read options. 98 | pub fn raw_iterator(&self) -> DBRawIteratorWithThreadMode { 99 | let readopts = ReadOptions::default(); 100 | self.raw_iterator_opt(readopts) 101 | } 102 | 103 | /// Creates a raw iterator over the data in this snapshot under the given column family, using 104 | /// the default read options. 105 | pub fn raw_iterator_cf( 106 | &self, 107 | cf_handle: &impl AsColumnFamilyRef, 108 | ) -> DBRawIteratorWithThreadMode { 109 | let readopts = ReadOptions::default(); 110 | self.raw_iterator_cf_opt(cf_handle, readopts) 111 | } 112 | 113 | /// Creates a raw iterator over the data in this snapshot, using the given read options. 114 | pub fn raw_iterator_opt(&self, mut readopts: ReadOptions) -> DBRawIteratorWithThreadMode { 115 | readopts.set_snapshot(self); 116 | DBRawIteratorWithThreadMode::new(self.db, readopts) 117 | } 118 | 119 | /// Creates a raw iterator over the data in this snapshot under the given column family, using 120 | /// the given read options. 121 | pub fn raw_iterator_cf_opt( 122 | &self, 123 | cf_handle: &impl AsColumnFamilyRef, 124 | mut readopts: ReadOptions, 125 | ) -> DBRawIteratorWithThreadMode { 126 | readopts.set_snapshot(self); 127 | DBRawIteratorWithThreadMode::new_cf(self.db, cf_handle.inner(), readopts) 128 | } 129 | 130 | /// Returns the bytes associated with a key value with default read options. 131 | pub fn get>(&self, key: K) -> Result>, Error> { 132 | let readopts = ReadOptions::default(); 133 | self.get_opt(key, readopts) 134 | } 135 | 136 | /// Returns the bytes associated with a key value and given column family with default read 137 | /// options. 138 | pub fn get_cf>( 139 | &self, 140 | cf: &impl AsColumnFamilyRef, 141 | key: K, 142 | ) -> Result>, Error> { 143 | let readopts = ReadOptions::default(); 144 | self.get_cf_opt(cf, key.as_ref(), readopts) 145 | } 146 | 147 | /// Returns the bytes associated with a key value and given read options. 148 | pub fn get_opt>( 149 | &self, 150 | key: K, 151 | mut readopts: ReadOptions, 152 | ) -> Result>, Error> { 153 | readopts.set_snapshot(self); 154 | self.db.get_opt(key.as_ref(), &readopts) 155 | } 156 | 157 | /// Returns the bytes associated with a key value, given column family and read options. 158 | pub fn get_cf_opt>( 159 | &self, 160 | cf: &impl AsColumnFamilyRef, 161 | key: K, 162 | mut readopts: ReadOptions, 163 | ) -> Result>, Error> { 164 | readopts.set_snapshot(self); 165 | self.db.get_cf_opt(cf, key.as_ref(), &readopts) 166 | } 167 | 168 | /// Return the value associated with a key using RocksDB's PinnableSlice 169 | /// so as to avoid unnecessary memory copy. Similar to get_pinned_opt but 170 | /// leverages default options. 171 | pub fn get_pinned>(&self, key: K) -> Result, Error> { 172 | let readopts = ReadOptions::default(); 173 | self.get_pinned_opt(key, readopts) 174 | } 175 | 176 | /// Return the value associated with a key using RocksDB's PinnableSlice 177 | /// so as to avoid unnecessary memory copy. Similar to get_pinned_cf_opt but 178 | /// leverages default options. 179 | pub fn get_pinned_cf>( 180 | &self, 181 | cf: &impl AsColumnFamilyRef, 182 | key: K, 183 | ) -> Result, Error> { 184 | let readopts = ReadOptions::default(); 185 | self.get_pinned_cf_opt(cf, key.as_ref(), readopts) 186 | } 187 | 188 | /// Return the value associated with a key using RocksDB's PinnableSlice 189 | /// so as to avoid unnecessary memory copy. 190 | pub fn get_pinned_opt>( 191 | &self, 192 | key: K, 193 | mut readopts: ReadOptions, 194 | ) -> Result, Error> { 195 | readopts.set_snapshot(self); 196 | self.db.get_pinned_opt(key.as_ref(), &readopts) 197 | } 198 | 199 | /// Return the value associated with a key using RocksDB's PinnableSlice 200 | /// so as to avoid unnecessary memory copy. Similar to get_pinned_opt but 201 | /// allows specifying ColumnFamily. 202 | pub fn get_pinned_cf_opt>( 203 | &self, 204 | cf: &impl AsColumnFamilyRef, 205 | key: K, 206 | mut readopts: ReadOptions, 207 | ) -> Result, Error> { 208 | readopts.set_snapshot(self); 209 | self.db.get_pinned_cf_opt(cf, key.as_ref(), &readopts) 210 | } 211 | 212 | /// Returns the bytes associated with the given key values and default read options. 213 | pub fn multi_get, I>(&self, keys: I) -> Vec>, Error>> 214 | where 215 | I: IntoIterator, 216 | { 217 | let readopts = ReadOptions::default(); 218 | self.multi_get_opt(keys, readopts) 219 | } 220 | 221 | /// Returns the bytes associated with the given key values and default read options. 222 | pub fn multi_get_cf<'b, K, I, W>(&self, keys_cf: I) -> Vec>, Error>> 223 | where 224 | K: AsRef<[u8]>, 225 | I: IntoIterator, 226 | W: AsColumnFamilyRef + 'b, 227 | { 228 | let readopts = ReadOptions::default(); 229 | self.multi_get_cf_opt(keys_cf, readopts) 230 | } 231 | 232 | /// Returns the bytes associated with the given key values and given read options. 233 | pub fn multi_get_opt( 234 | &self, 235 | keys: I, 236 | mut readopts: ReadOptions, 237 | ) -> Vec>, Error>> 238 | where 239 | K: AsRef<[u8]>, 240 | I: IntoIterator, 241 | { 242 | readopts.set_snapshot(self); 243 | self.db.multi_get_opt(keys, &readopts) 244 | } 245 | 246 | /// Returns the bytes associated with the given key values, given column family and read options. 247 | pub fn multi_get_cf_opt<'b, K, I, W>( 248 | &self, 249 | keys_cf: I, 250 | mut readopts: ReadOptions, 251 | ) -> Vec>, Error>> 252 | where 253 | K: AsRef<[u8]>, 254 | I: IntoIterator, 255 | W: AsColumnFamilyRef + 'b, 256 | { 257 | readopts.set_snapshot(self); 258 | self.db.multi_get_cf_opt(keys_cf, &readopts) 259 | } 260 | } 261 | 262 | impl Drop for SnapshotWithThreadMode<'_, D> { 263 | fn drop(&mut self) { 264 | unsafe { 265 | self.db.release_snapshot(self.inner); 266 | } 267 | } 268 | } 269 | 270 | /// `Send` and `Sync` implementations for `SnapshotWithThreadMode` are safe, because `SnapshotWithThreadMode` is 271 | /// immutable and can be safely shared between threads. 272 | unsafe impl Send for SnapshotWithThreadMode<'_, D> {} 273 | unsafe impl Sync for SnapshotWithThreadMode<'_, D> {} 274 | -------------------------------------------------------------------------------- /src/sst_file_writer.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Lucjan Suski 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | //` 15 | 16 | use crate::{ffi, ffi_util::to_cpath, Error, Options}; 17 | 18 | use libc::{self, c_char, size_t}; 19 | use std::{ffi::CString, marker::PhantomData, path::Path}; 20 | 21 | /// SstFileWriter is used to create sst files that can be added to database later 22 | /// All keys in files generated by SstFileWriter will have sequence number = 0. 23 | pub struct SstFileWriter<'a> { 24 | pub(crate) inner: *mut ffi::rocksdb_sstfilewriter_t, 25 | // Options are needed to be alive when calling open(), 26 | // so let's make sure it doesn't get, dropped for the lifetime of SstFileWriter 27 | phantom: PhantomData<&'a Options>, 28 | } 29 | 30 | unsafe impl Send for SstFileWriter<'_> {} 31 | unsafe impl Sync for SstFileWriter<'_> {} 32 | 33 | struct EnvOptions { 34 | inner: *mut ffi::rocksdb_envoptions_t, 35 | } 36 | 37 | impl Drop for EnvOptions { 38 | fn drop(&mut self) { 39 | unsafe { 40 | ffi::rocksdb_envoptions_destroy(self.inner); 41 | } 42 | } 43 | } 44 | 45 | impl Default for EnvOptions { 46 | fn default() -> Self { 47 | let opts = unsafe { ffi::rocksdb_envoptions_create() }; 48 | Self { inner: opts } 49 | } 50 | } 51 | 52 | impl<'a> SstFileWriter<'a> { 53 | /// Initializes SstFileWriter with given DB options. 54 | pub fn create(opts: &'a Options) -> Self { 55 | let env_options = EnvOptions::default(); 56 | 57 | let writer = Self::create_raw(opts, &env_options); 58 | 59 | Self { 60 | inner: writer, 61 | phantom: PhantomData, 62 | } 63 | } 64 | 65 | fn create_raw(opts: &Options, env_opts: &EnvOptions) -> *mut ffi::rocksdb_sstfilewriter_t { 66 | unsafe { ffi::rocksdb_sstfilewriter_create(env_opts.inner, opts.inner) } 67 | } 68 | 69 | /// Prepare SstFileWriter to write into file located at "file_path". 70 | pub fn open>(&'a self, path: P) -> Result<(), Error> { 71 | let cpath = to_cpath(&path)?; 72 | self.open_raw(&cpath) 73 | } 74 | 75 | fn open_raw(&'a self, cpath: &CString) -> Result<(), Error> { 76 | unsafe { 77 | ffi_try!(ffi::rocksdb_sstfilewriter_open( 78 | self.inner, 79 | cpath.as_ptr() as *const _ 80 | )); 81 | 82 | Ok(()) 83 | } 84 | } 85 | 86 | /// Finalize writing to sst file and close file. 87 | pub fn finish(&mut self) -> Result<(), Error> { 88 | unsafe { 89 | ffi_try!(ffi::rocksdb_sstfilewriter_finish(self.inner,)); 90 | Ok(()) 91 | } 92 | } 93 | 94 | /// returns the current file size 95 | pub fn file_size(&self) -> u64 { 96 | let mut file_size: u64 = 0; 97 | unsafe { 98 | ffi::rocksdb_sstfilewriter_file_size(self.inner, &mut file_size); 99 | } 100 | file_size 101 | } 102 | 103 | /// Adds a Put key with value to currently opened file 104 | /// REQUIRES: key is after any previously added key according to comparator. 105 | pub fn put(&mut self, key: K, value: V) -> Result<(), Error> 106 | where 107 | K: AsRef<[u8]>, 108 | V: AsRef<[u8]>, 109 | { 110 | let key = key.as_ref(); 111 | let value = value.as_ref(); 112 | unsafe { 113 | ffi_try!(ffi::rocksdb_sstfilewriter_put( 114 | self.inner, 115 | key.as_ptr() as *const c_char, 116 | key.len() as size_t, 117 | value.as_ptr() as *const c_char, 118 | value.len() as size_t, 119 | )); 120 | Ok(()) 121 | } 122 | } 123 | 124 | /// Adds a Put key with value to currently opened file 125 | /// REQUIRES: key is after any previously added key according to comparator. 126 | pub fn put_with_ts(&mut self, key: K, ts: S, value: V) -> Result<(), Error> 127 | where 128 | K: AsRef<[u8]>, 129 | V: AsRef<[u8]>, 130 | S: AsRef<[u8]>, 131 | { 132 | let key = key.as_ref(); 133 | let value = value.as_ref(); 134 | let ts = ts.as_ref(); 135 | unsafe { 136 | ffi_try!(ffi::rocksdb_sstfilewriter_put_with_ts( 137 | self.inner, 138 | key.as_ptr() as *const c_char, 139 | key.len() as size_t, 140 | ts.as_ptr() as *const c_char, 141 | ts.len() as size_t, 142 | value.as_ptr() as *const c_char, 143 | value.len() as size_t, 144 | )); 145 | Ok(()) 146 | } 147 | } 148 | 149 | /// Adds a Merge key with value to currently opened file 150 | /// REQUIRES: key is after any previously added key according to comparator. 151 | pub fn merge(&mut self, key: K, value: V) -> Result<(), Error> 152 | where 153 | K: AsRef<[u8]>, 154 | V: AsRef<[u8]>, 155 | { 156 | let key = key.as_ref(); 157 | let value = value.as_ref(); 158 | 159 | unsafe { 160 | ffi_try!(ffi::rocksdb_sstfilewriter_merge( 161 | self.inner, 162 | key.as_ptr() as *const c_char, 163 | key.len() as size_t, 164 | value.as_ptr() as *const c_char, 165 | value.len() as size_t, 166 | )); 167 | Ok(()) 168 | } 169 | } 170 | 171 | /// Adds a deletion key to currently opened file 172 | /// REQUIRES: key is after any previously added key according to comparator. 173 | pub fn delete>(&mut self, key: K) -> Result<(), Error> { 174 | let key = key.as_ref(); 175 | 176 | unsafe { 177 | ffi_try!(ffi::rocksdb_sstfilewriter_delete( 178 | self.inner, 179 | key.as_ptr() as *const c_char, 180 | key.len() as size_t, 181 | )); 182 | Ok(()) 183 | } 184 | } 185 | 186 | /// Adds a deletion key to currently opened file 187 | /// REQUIRES: key is after any previously added key according to comparator. 188 | pub fn delete_with_ts, S: AsRef<[u8]>>( 189 | &mut self, 190 | key: K, 191 | ts: S, 192 | ) -> Result<(), Error> { 193 | let key = key.as_ref(); 194 | let ts = ts.as_ref(); 195 | unsafe { 196 | ffi_try!(ffi::rocksdb_sstfilewriter_delete_with_ts( 197 | self.inner, 198 | key.as_ptr() as *const c_char, 199 | key.len() as size_t, 200 | ts.as_ptr() as *const c_char, 201 | ts.len() as size_t, 202 | )); 203 | Ok(()) 204 | } 205 | } 206 | } 207 | 208 | impl Drop for SstFileWriter<'_> { 209 | fn drop(&mut self) { 210 | unsafe { 211 | ffi::rocksdb_sstfilewriter_destroy(self.inner); 212 | } 213 | } 214 | } 215 | -------------------------------------------------------------------------------- /src/statistics.rs: -------------------------------------------------------------------------------- 1 | use crate::ffi; 2 | 3 | #[derive(Debug, Clone)] 4 | pub struct NameParseError; 5 | impl core::fmt::Display for NameParseError { 6 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 7 | write!(f, "unrecognized name") 8 | } 9 | } 10 | 11 | impl std::error::Error for NameParseError {} 12 | 13 | // Helper macro to generate iterable nums that translate into static strings mapped from the cpp 14 | // land. 15 | macro_rules! iterable_named_enum { 16 | ( 17 | $(#[$m:meta])* 18 | $type_vis:vis enum $typename:ident { 19 | $( 20 | $(#[$variant_meta:meta])* 21 | $variant:ident($variant_str:literal) $(= $value:expr)?, 22 | )+ 23 | } 24 | ) => { 25 | // Main Type 26 | #[allow(clippy::all)] 27 | $(#[$m])* 28 | $type_vis enum $typename { 29 | $( 30 | $(#[$variant_meta])* 31 | $variant$( = $value)?, 32 | )+ 33 | } 34 | 35 | #[automatically_derived] 36 | impl $typename { 37 | #[doc = "The corresponding rocksdb string identifier for this variant"] 38 | pub const fn name(&self) -> &'static str { 39 | match self { 40 | $( 41 | $typename::$variant => $variant_str, 42 | )+ 43 | } 44 | } 45 | pub fn iter() -> ::core::slice::Iter<'static, $typename> { 46 | static VARIANTS: &'static [$typename] = &[ 47 | $( 48 | $typename::$variant, 49 | )+ 50 | ]; 51 | VARIANTS.iter() 52 | } 53 | } 54 | 55 | 56 | #[automatically_derived] 57 | impl ::core::str::FromStr for $typename { 58 | type Err = NameParseError; 59 | fn from_str(s: &str) -> Result { 60 | match s { 61 | $( 62 | $variant_str => Ok($typename::$variant), 63 | )+ 64 | _ => Err(NameParseError), 65 | } 66 | } 67 | } 68 | 69 | #[automatically_derived] 70 | impl ::core::fmt::Display for $typename { 71 | fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { 72 | self.name().fmt(f) 73 | } 74 | } 75 | }; 76 | } 77 | 78 | /// StatsLevel can be used to reduce statistics overhead by skipping certain 79 | /// types of stats in the stats collection process. 80 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] 81 | #[repr(u8)] 82 | pub enum StatsLevel { 83 | /// Disable all metrics 84 | DisableAll = 0, 85 | /// Disable timer stats, and skip histogram stats 86 | ExceptHistogramOrTimers = 2, 87 | /// Skip timer stats 88 | ExceptTimers, 89 | /// Collect all stats except time inside mutex lock AND time spent on 90 | /// compression. 91 | ExceptDetailedTimers, 92 | /// Collect all stats except the counters requiring to get time inside the 93 | /// mutex lock. 94 | ExceptTimeForMutex, 95 | /// Collect all stats, including measuring duration of mutex operations. 96 | /// If getting time is expensive on the platform to run, it can 97 | /// reduce scalability to more threads, especially for writes. 98 | All, 99 | } 100 | 101 | include!("statistics_enum_ticker.rs"); 102 | include!("statistics_enum_histogram.rs"); 103 | 104 | pub struct HistogramData { 105 | pub(crate) inner: *mut ffi::rocksdb_statistics_histogram_data_t, 106 | } 107 | 108 | impl HistogramData { 109 | pub fn new() -> HistogramData { 110 | HistogramData::default() 111 | } 112 | pub fn median(&self) -> f64 { 113 | unsafe { ffi::rocksdb_statistics_histogram_data_get_median(self.inner) } 114 | } 115 | pub fn average(&self) -> f64 { 116 | unsafe { ffi::rocksdb_statistics_histogram_data_get_average(self.inner) } 117 | } 118 | pub fn p95(&self) -> f64 { 119 | unsafe { ffi::rocksdb_statistics_histogram_data_get_p95(self.inner) } 120 | } 121 | pub fn p99(&self) -> f64 { 122 | unsafe { ffi::rocksdb_statistics_histogram_data_get_p99(self.inner) } 123 | } 124 | pub fn max(&self) -> f64 { 125 | unsafe { ffi::rocksdb_statistics_histogram_data_get_max(self.inner) } 126 | } 127 | pub fn min(&self) -> f64 { 128 | unsafe { ffi::rocksdb_statistics_histogram_data_get_min(self.inner) } 129 | } 130 | pub fn sum(&self) -> u64 { 131 | unsafe { ffi::rocksdb_statistics_histogram_data_get_sum(self.inner) } 132 | } 133 | pub fn count(&self) -> u64 { 134 | unsafe { ffi::rocksdb_statistics_histogram_data_get_count(self.inner) } 135 | } 136 | pub fn std_dev(&self) -> f64 { 137 | unsafe { ffi::rocksdb_statistics_histogram_data_get_std_dev(self.inner) } 138 | } 139 | } 140 | 141 | impl Default for HistogramData { 142 | fn default() -> Self { 143 | let histogram_data_inner = unsafe { ffi::rocksdb_statistics_histogram_data_create() }; 144 | assert!( 145 | !histogram_data_inner.is_null(), 146 | "Could not create RocksDB histogram data" 147 | ); 148 | 149 | Self { 150 | inner: histogram_data_inner, 151 | } 152 | } 153 | } 154 | 155 | impl Drop for HistogramData { 156 | fn drop(&mut self) { 157 | unsafe { 158 | ffi::rocksdb_statistics_histogram_data_destroy(self.inner); 159 | } 160 | } 161 | } 162 | 163 | #[test] 164 | fn sanity_checks() { 165 | let want = "rocksdb.async.read.bytes"; 166 | assert_eq!(want, Histogram::AsyncReadBytes.name()); 167 | 168 | let want = "rocksdb.block.cache.index.miss"; 169 | assert_eq!(want, Ticker::BlockCacheIndexMiss.to_string()); 170 | 171 | // assert enum lengths 172 | assert_eq!(Ticker::iter().count(), 214 /* TICKER_ENUM_MAX */); 173 | assert_eq!(Histogram::iter().count(), 63 /* HISTOGRAM_ENUM_MAX */); 174 | } 175 | -------------------------------------------------------------------------------- /src/statistics_enum_histogram.rs: -------------------------------------------------------------------------------- 1 | // **** DO NOT modify this file! **** 2 | // This file is generated by cmd: 3 | // gen_statistics.bash rocksdb/monitoring/statistics.cc Histogram 4 | iterable_named_enum! { 5 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] 6 | #[repr(u32)] 7 | pub enum Histogram { 8 | DbGet("rocksdb.db.get.micros"), 9 | DbWrite("rocksdb.db.write.micros"), 10 | CompactionTime("rocksdb.compaction.times.micros"), 11 | CompactionCpuTime("rocksdb.compaction.times.cpu_micros"), 12 | SubcompactionSetupTime("rocksdb.subcompaction.setup.times.micros"), 13 | TableSyncMicros("rocksdb.table.sync.micros"), 14 | CompactionOutfileSyncMicros("rocksdb.compaction.outfile.sync.micros"), 15 | WalFileSyncMicros("rocksdb.wal.file.sync.micros"), 16 | ManifestFileSyncMicros("rocksdb.manifest.file.sync.micros"), 17 | TableOpenIoMicros("rocksdb.table.open.io.micros"), 18 | DbMultiget("rocksdb.db.multiget.micros"), 19 | ReadBlockCompactionMicros("rocksdb.read.block.compaction.micros"), 20 | ReadBlockGetMicros("rocksdb.read.block.get.micros"), 21 | WriteRawBlockMicros("rocksdb.write.raw.block.micros"), 22 | NumFilesInSingleCompaction("rocksdb.numfiles.in.singlecompaction"), 23 | DbSeek("rocksdb.db.seek.micros"), 24 | WriteStall("rocksdb.db.write.stall"), 25 | SstReadMicros("rocksdb.sst.read.micros"), 26 | FileReadFlushMicros("rocksdb.file.read.flush.micros"), 27 | FileReadCompactionMicros("rocksdb.file.read.compaction.micros"), 28 | FileReadDbOpenMicros("rocksdb.file.read.db.open.micros"), 29 | FileReadGetMicros("rocksdb.file.read.get.micros"), 30 | FileReadMultigetMicros("rocksdb.file.read.multiget.micros"), 31 | FileReadDbIteratorMicros("rocksdb.file.read.db.iterator.micros"), 32 | FileReadVerifyDbChecksumMicros("rocksdb.file.read.verify.db.checksum.micros"), 33 | FileReadVerifyFileChecksumsMicros("rocksdb.file.read.verify.file.checksums.micros"), 34 | SstWriteMicros("rocksdb.sst.write.micros"), 35 | FileWriteFlushMicros("rocksdb.file.write.flush.micros"), 36 | FileWriteCompactionMicros("rocksdb.file.write.compaction.micros"), 37 | FileWriteDbOpenMicros("rocksdb.file.write.db.open.micros"), 38 | NumSubcompactionsScheduled("rocksdb.num.subcompactions.scheduled"), 39 | BytesPerRead("rocksdb.bytes.per.read"), 40 | BytesPerWrite("rocksdb.bytes.per.write"), 41 | BytesPerMultiget("rocksdb.bytes.per.multiget"), 42 | CompressionTimesNanos("rocksdb.compression.times.nanos"), 43 | DecompressionTimesNanos("rocksdb.decompression.times.nanos"), 44 | ReadNumMergeOperands("rocksdb.read.num.merge_operands"), 45 | BlobDbKeySize("rocksdb.blobdb.key.size"), 46 | BlobDbValueSize("rocksdb.blobdb.value.size"), 47 | BlobDbWriteMicros("rocksdb.blobdb.write.micros"), 48 | BlobDbGetMicros("rocksdb.blobdb.get.micros"), 49 | BlobDbMultigetMicros("rocksdb.blobdb.multiget.micros"), 50 | BlobDbSeekMicros("rocksdb.blobdb.seek.micros"), 51 | BlobDbNextMicros("rocksdb.blobdb.next.micros"), 52 | BlobDbPrevMicros("rocksdb.blobdb.prev.micros"), 53 | BlobDbBlobFileWriteMicros("rocksdb.blobdb.blob.file.write.micros"), 54 | BlobDbBlobFileReadMicros("rocksdb.blobdb.blob.file.read.micros"), 55 | BlobDbBlobFileSyncMicros("rocksdb.blobdb.blob.file.sync.micros"), 56 | BlobDbCompressionMicros("rocksdb.blobdb.compression.micros"), 57 | BlobDbDecompressionMicros("rocksdb.blobdb.decompression.micros"), 58 | FlushTime("rocksdb.db.flush.micros"), 59 | SstBatchSize("rocksdb.sst.batch.size"), 60 | MultigetIoBatchSize("rocksdb.multiget.io.batch.size"), 61 | NumIndexAndFilterBlocksReadPerLevel("rocksdb.num.index.and.filter.blocks.read.per.level"), 62 | NumSstReadPerLevel("rocksdb.num.sst.read.per.level"), 63 | NumLevelReadPerMultiget("rocksdb.num.level.read.per.multiget"), 64 | ErrorHandlerAutoresumeRetryCount("rocksdb.error.handler.autoresume.retry.count"), 65 | AsyncReadBytes("rocksdb.async.read.bytes"), 66 | PollWaitMicros("rocksdb.poll.wait.micros"), 67 | CompactionPrefetchBytes("rocksdb.compaction.prefetch.bytes"), 68 | PrefetchedBytesDiscarded("rocksdb.prefetched.bytes.discarded"), 69 | AsyncPrefetchAbortMicros("rocksdb.async.prefetch.abort.micros"), 70 | TableOpenPrefetchTailReadBytes("rocksdb.table.open.prefetch.tail.read.bytes"), 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /src/transactions/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2021 Yiyuan Liu 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | // 15 | 16 | mod optimistic_transaction_db; 17 | mod options; 18 | mod transaction; 19 | mod transaction_db; 20 | 21 | pub use optimistic_transaction_db::OptimisticTransactionDB; 22 | pub use options::{OptimisticTransactionOptions, TransactionDBOptions, TransactionOptions}; 23 | pub use transaction::Transaction; 24 | pub use transaction_db::TransactionDB; 25 | -------------------------------------------------------------------------------- /tests/fail/checkpoint_outlive_db.rs: -------------------------------------------------------------------------------- 1 | use rust_rocksdb::{checkpoint::Checkpoint, DB}; 2 | 3 | fn main() { 4 | let _checkpoint = { 5 | let db = DB::open_default("foo").unwrap(); 6 | Checkpoint::new(&db) 7 | }; 8 | } 9 | -------------------------------------------------------------------------------- /tests/fail/checkpoint_outlive_db.stderr: -------------------------------------------------------------------------------- 1 | error[E0597]: `db` does not live long enough 2 | --> tests/fail/checkpoint_outlive_db.rs:6:25 3 | | 4 | 4 | let _checkpoint = { 5 | | ----------- borrow later stored here 6 | 5 | let db = DB::open_default("foo").unwrap(); 7 | | -- binding `db` declared here 8 | 6 | Checkpoint::new(&db) 9 | | ^^^ borrowed value does not live long enough 10 | 7 | }; 11 | | - `db` dropped here while still borrowed 12 | -------------------------------------------------------------------------------- /tests/fail/iterator_outlive_db.rs: -------------------------------------------------------------------------------- 1 | use rust_rocksdb::{IteratorMode, DB}; 2 | 3 | fn main() { 4 | let _iter = { 5 | let db = DB::open_default("foo").unwrap(); 6 | db.iterator(IteratorMode::Start) 7 | }; 8 | } 9 | -------------------------------------------------------------------------------- /tests/fail/iterator_outlive_db.stderr: -------------------------------------------------------------------------------- 1 | error[E0597]: `db` does not live long enough 2 | --> tests/fail/iterator_outlive_db.rs:6:9 3 | | 4 | 4 | let _iter = { 5 | | ----- borrow later stored here 6 | 5 | let db = DB::open_default("foo").unwrap(); 7 | | -- binding `db` declared here 8 | 6 | db.iterator(IteratorMode::Start) 9 | | ^^ borrowed value does not live long enough 10 | 7 | }; 11 | | - `db` dropped here while still borrowed 12 | -------------------------------------------------------------------------------- /tests/fail/open_with_multiple_refs_as_single_threaded.rs: -------------------------------------------------------------------------------- 1 | use rust_rocksdb::{DBWithThreadMode, Options, SingleThreaded}; 2 | 3 | fn main() { 4 | let db = DBWithThreadMode::::open_default("/path/to/dummy").unwrap(); 5 | let db_ref1 = &db; 6 | let db_ref2 = &db; 7 | let opts = Options::default(); 8 | db_ref1.create_cf("cf1", &opts).unwrap(); 9 | db_ref2.create_cf("cf2", &opts).unwrap(); 10 | } 11 | -------------------------------------------------------------------------------- /tests/fail/open_with_multiple_refs_as_single_threaded.stderr: -------------------------------------------------------------------------------- 1 | error[E0596]: cannot borrow `*db_ref1` as mutable, as it is behind a `&` reference 2 | --> tests/fail/open_with_multiple_refs_as_single_threaded.rs:8:5 3 | | 4 | 8 | db_ref1.create_cf("cf1", &opts).unwrap(); 5 | | ^^^^^^^ `db_ref1` is a `&` reference, so the data it refers to cannot be borrowed as mutable 6 | | 7 | help: consider changing this to be a mutable reference 8 | | 9 | 5 | let db_ref1 = &mut db; 10 | | +++ 11 | 12 | error[E0596]: cannot borrow `*db_ref2` as mutable, as it is behind a `&` reference 13 | --> tests/fail/open_with_multiple_refs_as_single_threaded.rs:9:5 14 | | 15 | 9 | db_ref2.create_cf("cf2", &opts).unwrap(); 16 | | ^^^^^^^ `db_ref2` is a `&` reference, so the data it refers to cannot be borrowed as mutable 17 | | 18 | help: consider changing this to be a mutable reference 19 | | 20 | 6 | let db_ref2 = &mut db; 21 | | +++ 22 | -------------------------------------------------------------------------------- /tests/fail/snapshot_outlive_db.rs: -------------------------------------------------------------------------------- 1 | use rust_rocksdb::DB; 2 | 3 | fn main() { 4 | let _snapshot = { 5 | let db = DB::open_default("foo").unwrap(); 6 | db.snapshot() 7 | }; 8 | } 9 | -------------------------------------------------------------------------------- /tests/fail/snapshot_outlive_db.stderr: -------------------------------------------------------------------------------- 1 | error[E0597]: `db` does not live long enough 2 | --> tests/fail/snapshot_outlive_db.rs:6:9 3 | | 4 | 4 | let _snapshot = { 5 | | --------- borrow later stored here 6 | 5 | let db = DB::open_default("foo").unwrap(); 7 | | -- binding `db` declared here 8 | 6 | db.snapshot() 9 | | ^^ borrowed value does not live long enough 10 | 7 | }; 11 | | - `db` dropped here while still borrowed 12 | -------------------------------------------------------------------------------- /tests/fail/snapshot_outlive_transaction.rs: -------------------------------------------------------------------------------- 1 | use rust_rocksdb::{SingleThreaded, TransactionDB}; 2 | 3 | fn main() { 4 | let db = TransactionDB::::open_default("foo").unwrap(); 5 | let _snapshot = { 6 | let txn = db.transaction(); 7 | txn.snapshot() 8 | }; 9 | } 10 | -------------------------------------------------------------------------------- /tests/fail/snapshot_outlive_transaction.stderr: -------------------------------------------------------------------------------- 1 | error[E0597]: `txn` does not live long enough 2 | --> tests/fail/snapshot_outlive_transaction.rs:7:9 3 | | 4 | 5 | let _snapshot = { 5 | | --------- borrow later stored here 6 | 6 | let txn = db.transaction(); 7 | | --- binding `txn` declared here 8 | 7 | txn.snapshot() 9 | | ^^^ borrowed value does not live long enough 10 | 8 | }; 11 | | - `txn` dropped here while still borrowed 12 | -------------------------------------------------------------------------------- /tests/fail/snapshot_outlive_transaction_db.rs: -------------------------------------------------------------------------------- 1 | use rust_rocksdb::{SingleThreaded, TransactionDB}; 2 | 3 | fn main() { 4 | let _snapshot = { 5 | let db = TransactionDB::::open_default("foo").unwrap(); 6 | db.snapshot() 7 | }; 8 | } 9 | -------------------------------------------------------------------------------- /tests/fail/snapshot_outlive_transaction_db.stderr: -------------------------------------------------------------------------------- 1 | error[E0597]: `db` does not live long enough 2 | --> tests/fail/snapshot_outlive_transaction_db.rs:6:9 3 | | 4 | 4 | let _snapshot = { 5 | | --------- borrow later stored here 6 | 5 | let db = TransactionDB::::open_default("foo").unwrap(); 7 | | -- binding `db` declared here 8 | 6 | db.snapshot() 9 | | ^^ borrowed value does not live long enough 10 | 7 | }; 11 | | - `db` dropped here while still borrowed 12 | -------------------------------------------------------------------------------- /tests/fail/transaction_outlive_transaction_db.rs: -------------------------------------------------------------------------------- 1 | use rust_rocksdb::{SingleThreaded, TransactionDB}; 2 | 3 | fn main() { 4 | let _txn = { 5 | let db = TransactionDB::::open_default("foo").unwrap(); 6 | db.transaction() 7 | }; 8 | } 9 | -------------------------------------------------------------------------------- /tests/fail/transaction_outlive_transaction_db.stderr: -------------------------------------------------------------------------------- 1 | error[E0597]: `db` does not live long enough 2 | --> tests/fail/transaction_outlive_transaction_db.rs:6:9 3 | | 4 | 4 | let _txn = { 5 | | ---- borrow later stored here 6 | 5 | let db = TransactionDB::::open_default("foo").unwrap(); 7 | | -- binding `db` declared here 8 | 6 | db.transaction() 9 | | ^^ borrowed value does not live long enough 10 | 7 | }; 11 | | - `db` dropped here while still borrowed 12 | -------------------------------------------------------------------------------- /tests/test_backup.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | mod util; 16 | 17 | use pretty_assertions::assert_eq; 18 | 19 | use rust_rocksdb::{ 20 | backup::{BackupEngine, BackupEngineOptions, RestoreOptions}, 21 | Env, DB, 22 | }; 23 | use util::DBPath; 24 | 25 | #[test] 26 | fn restore_from_latest() { 27 | // create backup 28 | let path = DBPath::new("restore_from_latest_test"); 29 | let restore_path = DBPath::new("restore_from_latest_path"); 30 | { 31 | let db = DB::open_default(&path).unwrap(); 32 | assert!(db.put(b"k1", b"v1111").is_ok()); 33 | let value = db.get(b"k1"); 34 | assert_eq!(value.unwrap().unwrap(), b"v1111"); 35 | { 36 | let backup_path = DBPath::new("restore_from_latest_test_backup"); 37 | let env = Env::new().unwrap(); 38 | let backup_opts = BackupEngineOptions::new(&backup_path).unwrap(); 39 | 40 | let mut backup_engine = BackupEngine::open(&backup_opts, &env).unwrap(); 41 | assert!(backup_engine.create_new_backup(&db).is_ok()); 42 | 43 | // check backup info 44 | let info = backup_engine.get_backup_info(); 45 | assert!(!info.is_empty()); 46 | info.iter().for_each(|i| { 47 | assert!(backup_engine.verify_backup(i.backup_id).is_ok()); 48 | assert!(i.size > 0); 49 | }); 50 | 51 | let mut restore_option = RestoreOptions::default(); 52 | restore_option.set_keep_log_files(false); // true to keep log files 53 | let restore_status = backup_engine.restore_from_latest_backup( 54 | &restore_path, 55 | &restore_path, 56 | &restore_option, 57 | ); 58 | assert!(restore_status.is_ok()); 59 | 60 | let db_restore = DB::open_default(&restore_path).unwrap(); 61 | let value = db_restore.get(b"k1"); 62 | assert_eq!(value.unwrap().unwrap(), b"v1111"); 63 | } 64 | } 65 | } 66 | 67 | #[test] 68 | fn restore_from_backup() { 69 | // create backup 70 | let path = DBPath::new("restore_from_backup_test"); 71 | let restore_path = DBPath::new("restore_from_backup_path"); 72 | { 73 | let db = DB::open_default(&path).unwrap(); 74 | assert!(db.put(b"k1", b"v1111").is_ok()); 75 | let value = db.get(b"k1"); 76 | assert_eq!(value.unwrap().unwrap(), b"v1111"); 77 | { 78 | let backup_path = DBPath::new("restore_from_latest_test_backup"); 79 | let env = Env::new().unwrap(); 80 | let backup_opts = BackupEngineOptions::new(&backup_path).unwrap(); 81 | 82 | let mut backup_engine = BackupEngine::open(&backup_opts, &env).unwrap(); 83 | assert!(backup_engine.create_new_backup(&db).is_ok()); 84 | 85 | // check backup info 86 | let info = backup_engine.get_backup_info(); 87 | assert!(!info.is_empty()); 88 | info.iter().for_each(|i| { 89 | assert!(backup_engine.verify_backup(i.backup_id).is_ok()); 90 | assert!(i.size > 0); 91 | }); 92 | 93 | let backup_id = info.first().unwrap().backup_id; 94 | let mut restore_option = RestoreOptions::default(); 95 | restore_option.set_keep_log_files(false); // true to keep log files 96 | let restore_status = backup_engine.restore_from_backup( 97 | &restore_path, 98 | &restore_path, 99 | &restore_option, 100 | backup_id, 101 | ); 102 | assert!(restore_status.is_ok()); 103 | 104 | let db_restore = DB::open_default(&restore_path).unwrap(); 105 | let value = db_restore.get(b"k1"); 106 | assert_eq!(value.unwrap().unwrap(), b"v1111"); 107 | } 108 | } 109 | } 110 | 111 | fn assert_send_generic() {} 112 | 113 | #[test] 114 | fn assert_send() { 115 | assert_send_generic::(); 116 | } 117 | -------------------------------------------------------------------------------- /tests/test_checkpoint.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | mod util; 16 | 17 | use pretty_assertions::assert_eq; 18 | 19 | use rust_rocksdb::{checkpoint::Checkpoint, Options, DB}; 20 | use util::DBPath; 21 | 22 | #[test] 23 | pub fn test_single_checkpoint() { 24 | const PATH_PREFIX: &str = "_rust_rocksdb_cp_single_"; 25 | 26 | // Create DB with some data 27 | let db_path = DBPath::new(&format!("{PATH_PREFIX}db1")); 28 | 29 | let mut opts = Options::default(); 30 | opts.create_if_missing(true); 31 | let db = DB::open(&opts, &db_path).unwrap(); 32 | 33 | db.put(b"k1", b"v1").unwrap(); 34 | db.put(b"k2", b"v2").unwrap(); 35 | db.put(b"k3", b"v3").unwrap(); 36 | db.put(b"k4", b"v4").unwrap(); 37 | 38 | // Create checkpoint 39 | let cp1 = Checkpoint::new(&db).unwrap(); 40 | let cp1_path = DBPath::new(&format!("{PATH_PREFIX}cp1")); 41 | cp1.create_checkpoint(&cp1_path).unwrap(); 42 | 43 | // Verify checkpoint 44 | let cp = DB::open_default(&cp1_path).unwrap(); 45 | 46 | assert_eq!(cp.get(b"k1").unwrap().unwrap(), b"v1"); 47 | assert_eq!(cp.get(b"k2").unwrap().unwrap(), b"v2"); 48 | assert_eq!(cp.get(b"k3").unwrap().unwrap(), b"v3"); 49 | assert_eq!(cp.get(b"k4").unwrap().unwrap(), b"v4"); 50 | } 51 | 52 | #[test] 53 | pub fn test_multi_checkpoints() { 54 | const PATH_PREFIX: &str = "_rust_rocksdb_cp_multi_"; 55 | 56 | // Create DB with some data 57 | let db_path = DBPath::new(&format!("{PATH_PREFIX}db1")); 58 | 59 | let mut opts = Options::default(); 60 | opts.create_if_missing(true); 61 | let db = DB::open(&opts, &db_path).unwrap(); 62 | 63 | db.put(b"k1", b"v1").unwrap(); 64 | db.put(b"k2", b"v2").unwrap(); 65 | db.put(b"k3", b"v3").unwrap(); 66 | db.put(b"k4", b"v4").unwrap(); 67 | 68 | // Create first checkpoint 69 | let cp1 = Checkpoint::new(&db).unwrap(); 70 | let cp1_path = DBPath::new(&format!("{PATH_PREFIX}cp1")); 71 | cp1.create_checkpoint(&cp1_path).unwrap(); 72 | 73 | // Verify checkpoint 74 | let cp = DB::open_default(&cp1_path).unwrap(); 75 | 76 | assert_eq!(cp.get(b"k1").unwrap().unwrap(), b"v1"); 77 | assert_eq!(cp.get(b"k2").unwrap().unwrap(), b"v2"); 78 | assert_eq!(cp.get(b"k3").unwrap().unwrap(), b"v3"); 79 | assert_eq!(cp.get(b"k4").unwrap().unwrap(), b"v4"); 80 | 81 | // Change some existing keys 82 | db.put(b"k1", b"modified").unwrap(); 83 | db.put(b"k2", b"changed").unwrap(); 84 | 85 | // Add some new keys 86 | db.put(b"k5", b"v5").unwrap(); 87 | db.put(b"k6", b"v6").unwrap(); 88 | 89 | // Create another checkpoint 90 | let cp2 = Checkpoint::new(&db).unwrap(); 91 | let cp2_path = DBPath::new(&format!("{PATH_PREFIX}cp2")); 92 | cp2.create_checkpoint(&cp2_path).unwrap(); 93 | 94 | // Verify second checkpoint 95 | let cp = DB::open_default(&cp2_path).unwrap(); 96 | 97 | assert_eq!(cp.get(b"k1").unwrap().unwrap(), b"modified"); 98 | assert_eq!(cp.get(b"k2").unwrap().unwrap(), b"changed"); 99 | assert_eq!(cp.get(b"k5").unwrap().unwrap(), b"v5"); 100 | assert_eq!(cp.get(b"k6").unwrap().unwrap(), b"v6"); 101 | } 102 | 103 | #[test] 104 | fn test_checkpoint_outlive_db() { 105 | let t = trybuild::TestCases::new(); 106 | t.compile_fail("tests/fail/checkpoint_outlive_db.rs"); 107 | } 108 | -------------------------------------------------------------------------------- /tests/test_compactionfilter.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | mod util; 16 | 17 | use pretty_assertions::assert_eq; 18 | 19 | use rust_rocksdb::{CompactionDecision, Options, DB}; 20 | use util::DBPath; 21 | 22 | #[cfg(test)] 23 | #[allow(unused_variables)] 24 | fn test_filter(level: u32, key: &[u8], value: &[u8]) -> CompactionDecision { 25 | use self::CompactionDecision::*; 26 | match key.first() { 27 | Some(&b'_') => Remove, 28 | Some(&b'%') => Change(b"secret"), 29 | _ => Keep, 30 | } 31 | } 32 | 33 | #[test] 34 | fn compaction_filter_test() { 35 | let path = DBPath::new("_rust_rocksdb_filter_test"); 36 | let mut opts = Options::default(); 37 | opts.create_if_missing(true); 38 | opts.set_compaction_filter("test", test_filter); 39 | { 40 | let db = DB::open(&opts, &path).unwrap(); 41 | let _ = db.put(b"k1", b"a"); 42 | let _ = db.put(b"_k", b"b"); 43 | let _ = db.put(b"%k", b"c"); 44 | db.compact_range(None::<&[u8]>, None::<&[u8]>); 45 | assert_eq!(&*db.get(b"k1").unwrap().unwrap(), b"a"); 46 | assert!(db.get(b"_k").unwrap().is_none()); 47 | assert_eq!(&*db.get(b"%k").unwrap().unwrap(), b"secret"); 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /tests/test_comparator.rs: -------------------------------------------------------------------------------- 1 | mod util; 2 | 3 | use rust_rocksdb::{CompactOptions, Options, ReadOptions, DB}; 4 | use std::cmp::Ordering; 5 | use std::iter::FromIterator; 6 | use util::{U64Comparator, U64Timestamp}; 7 | 8 | /// This function is for ensuring test of backwards compatibility 9 | pub fn rocks_old_compare(one: &[u8], two: &[u8]) -> Ordering { 10 | one.cmp(two) 11 | } 12 | 13 | type CompareFn = dyn Fn(&[u8], &[u8]) -> Ordering; 14 | 15 | /// create database add some values, and iterate over these 16 | pub fn write_to_db_with_comparator(compare_fn: Box) -> Vec { 17 | let mut result_vec = Vec::new(); 18 | 19 | let tempdir = tempfile::Builder::new() 20 | .prefix("_path_for_rocksdb_storage") 21 | .tempdir() 22 | .expect("Failed to create temporary path for the _path_for_rocksdb_storage"); 23 | let path = tempdir.path(); 24 | { 25 | let mut db_opts = Options::default(); 26 | 27 | db_opts.create_missing_column_families(true); 28 | db_opts.create_if_missing(true); 29 | db_opts.set_comparator("cname", compare_fn); 30 | let db = DB::open(&db_opts, path).unwrap(); 31 | db.put(b"a-key", b"a-value").unwrap(); 32 | db.put(b"b-key", b"b-value").unwrap(); 33 | let mut iter = db.raw_iterator(); 34 | iter.seek_to_first(); 35 | while iter.valid() { 36 | let key = iter.key().unwrap(); 37 | // maybe not best way to copy? 38 | let key_str = key.iter().map(|b| *b as char).collect::>(); 39 | result_vec.push(String::from_iter(key_str)); 40 | iter.next(); 41 | } 42 | } 43 | let _ = DB::destroy(&Options::default(), path); 44 | result_vec 45 | } 46 | 47 | #[test] 48 | /// First verify that using a function as a comparator works as expected 49 | /// This should verify backwards compatibility 50 | /// Then run a test with a clojure where an x-variable is passed 51 | /// Keep in mind that this variable must be moved to the clojure 52 | /// Then run a test with a reverse sorting clojure and make sure the order is reverted 53 | fn test_comparator() { 54 | let local_compare = move |one: &[u8], two: &[u8]| one.cmp(two); 55 | let x = 0; 56 | let local_compare_reverse = move |one: &[u8], two: &[u8]| { 57 | println!( 58 | "Use the x value from the closure scope to do something smart: {:?}", 59 | x 60 | ); 61 | match one.cmp(two) { 62 | Ordering::Less => Ordering::Greater, 63 | Ordering::Equal => Ordering::Equal, 64 | Ordering::Greater => Ordering::Less, 65 | } 66 | }; 67 | 68 | let old_res = write_to_db_with_comparator(Box::new(rocks_old_compare)); 69 | println!("Keys in normal sort order, no closure: {:?}", old_res); 70 | assert_eq!(vec!["a-key", "b-key"], old_res); 71 | let res_closure = write_to_db_with_comparator(Box::new(local_compare)); 72 | println!("Keys in normal sort order, closure: {:?}", res_closure); 73 | assert_eq!(res_closure, old_res); 74 | let res_closure_reverse = write_to_db_with_comparator(Box::new(local_compare_reverse)); 75 | println!( 76 | "Keys in reverse sort order, closure: {:?}", 77 | res_closure_reverse 78 | ); 79 | assert_eq!(vec!["b-key", "a-key"], res_closure_reverse); 80 | } 81 | 82 | #[test] 83 | fn test_comparator_with_ts() { 84 | let tempdir = tempfile::Builder::new() 85 | .prefix("_path_for_rocksdb_storage_with_ts") 86 | .tempdir() 87 | .expect("Failed to create temporary path for the _path_for_rocksdb_storage_with_ts."); 88 | let path = tempdir.path(); 89 | let _ = DB::destroy(&Options::default(), path); 90 | 91 | { 92 | let mut db_opts = Options::default(); 93 | db_opts.create_missing_column_families(true); 94 | db_opts.create_if_missing(true); 95 | db_opts.set_comparator_with_ts( 96 | U64Comparator::NAME, 97 | U64Timestamp::SIZE, 98 | Box::new(U64Comparator::compare), 99 | Box::new(U64Comparator::compare_ts), 100 | Box::new(U64Comparator::compare_without_ts), 101 | ); 102 | let db = DB::open(&db_opts, path).unwrap(); 103 | 104 | let key = b"hello"; 105 | let val1 = b"world0"; 106 | let val2 = b"world1"; 107 | 108 | let ts = U64Timestamp::new(1); 109 | let ts2 = U64Timestamp::new(2); 110 | let ts3 = U64Timestamp::new(3); 111 | 112 | let mut opts = ReadOptions::default(); 113 | opts.set_timestamp(ts); 114 | 115 | // basic put and get 116 | db.put_with_ts(key, ts, val1).unwrap(); 117 | let value = db.get_opt(key, &opts).unwrap(); 118 | assert_eq!(value.unwrap().as_slice(), val1); 119 | 120 | // update 121 | db.put_with_ts(key, ts2, val2).unwrap(); 122 | opts.set_timestamp(ts2); 123 | let value = db.get_opt(key, &opts).unwrap(); 124 | assert_eq!(value.unwrap().as_slice(), val2); 125 | 126 | // delete 127 | db.delete_with_ts(key, ts3).unwrap(); 128 | opts.set_timestamp(ts3); 129 | let value = db.get_opt(key, &opts).unwrap(); 130 | assert!(value.is_none()); 131 | 132 | // ts2 should read deleted data 133 | opts.set_timestamp(ts2); 134 | let value = db.get_opt(key, &opts).unwrap(); 135 | assert_eq!(value.unwrap().as_slice(), val2); 136 | 137 | // ts1 should read old data 138 | opts.set_timestamp(ts); 139 | let value = db.get_opt(key, &opts).unwrap(); 140 | assert_eq!(value.unwrap().as_slice(), val1); 141 | 142 | // test iterator with ts 143 | opts.set_timestamp(ts2); 144 | let mut iter = db.raw_iterator_opt(opts); 145 | iter.seek_to_first(); 146 | let mut result_vec = Vec::new(); 147 | while iter.valid() { 148 | let key = iter.key().unwrap(); 149 | // maybe not best way to copy? 150 | let key_str = key.iter().map(|b| *b as char).collect::>(); 151 | result_vec.push(String::from_iter(key_str)); 152 | iter.next(); 153 | } 154 | assert_eq!(result_vec, ["hello"]); 155 | 156 | // test full_history_ts_low works 157 | let mut compact_opts = CompactOptions::default(); 158 | compact_opts.set_full_history_ts_low(ts2); 159 | db.compact_range_opt(None::<&[u8]>, None::<&[u8]>, &compact_opts); 160 | db.flush().unwrap(); 161 | 162 | let mut opts = ReadOptions::default(); 163 | opts.set_timestamp(ts3); 164 | let value = db.get_opt(key, &opts).unwrap(); 165 | assert_eq!(value, None); 166 | // cannot read with timestamp older than full_history_ts_low 167 | opts.set_timestamp(ts); 168 | assert!(db.get_opt(key, &opts).is_err()); 169 | } 170 | 171 | let _ = DB::destroy(&Options::default(), path); 172 | } 173 | 174 | #[test] 175 | fn test_comparator_with_column_family_with_ts() { 176 | let tempdir = tempfile::Builder::new() 177 | .prefix("_path_for_rocksdb_storage_with_column_family_with_ts") 178 | .tempdir() 179 | .expect("Failed to create temporary path for the _path_for_rocksdb_storage_with_column_family_with_ts."); 180 | let path = tempdir.path(); 181 | let _ = DB::destroy(&Options::default(), path); 182 | 183 | { 184 | let mut db_opts = Options::default(); 185 | db_opts.create_missing_column_families(true); 186 | db_opts.create_if_missing(true); 187 | 188 | let mut cf_opts = Options::default(); 189 | cf_opts.set_comparator_with_ts( 190 | U64Comparator::NAME, 191 | U64Timestamp::SIZE, 192 | Box::new(U64Comparator::compare), 193 | Box::new(U64Comparator::compare_ts), 194 | Box::new(U64Comparator::compare_without_ts), 195 | ); 196 | 197 | let cfs = vec![("cf", cf_opts)]; 198 | 199 | let db = DB::open_cf_with_opts(&db_opts, path, cfs).unwrap(); 200 | let cf = db.cf_handle("cf").unwrap(); 201 | 202 | let key = b"hello"; 203 | let val1 = b"world0"; 204 | let val2 = b"world1"; 205 | 206 | let ts = U64Timestamp::new(1); 207 | let ts2 = U64Timestamp::new(2); 208 | let ts3 = U64Timestamp::new(3); 209 | 210 | let mut opts = ReadOptions::default(); 211 | opts.set_timestamp(ts); 212 | 213 | // basic put and get 214 | db.put_cf_with_ts(&cf, key, ts, val1).unwrap(); 215 | let value = db.get_cf_opt(&cf, key, &opts).unwrap(); 216 | assert_eq!(value.unwrap().as_slice(), val1); 217 | 218 | // update 219 | db.put_cf_with_ts(&cf, key, ts2, val2).unwrap(); 220 | opts.set_timestamp(ts2); 221 | let value = db.get_cf_opt(&cf, key, &opts).unwrap(); 222 | assert_eq!(value.unwrap().as_slice(), val2); 223 | 224 | // delete 225 | db.delete_cf_with_ts(&cf, key, ts3).unwrap(); 226 | opts.set_timestamp(ts3); 227 | let value = db.get_cf_opt(&cf, key, &opts).unwrap(); 228 | assert!(value.is_none()); 229 | 230 | // ts2 should read deleted data 231 | opts.set_timestamp(ts2); 232 | let value = db.get_cf_opt(&cf, key, &opts).unwrap(); 233 | assert_eq!(value.unwrap().as_slice(), val2); 234 | 235 | // ts1 should read old data 236 | opts.set_timestamp(ts); 237 | let value = db.get_cf_opt(&cf, key, &opts).unwrap(); 238 | assert_eq!(value.unwrap().as_slice(), val1); 239 | 240 | // test iterator with ts 241 | opts.set_timestamp(ts2); 242 | let mut iter = db.raw_iterator_cf_opt(&cf, opts); 243 | iter.seek_to_first(); 244 | let mut result_vec = Vec::new(); 245 | while iter.valid() { 246 | let key = iter.key().unwrap(); 247 | // maybe not best way to copy? 248 | let key_str = key.iter().map(|b| *b as char).collect::>(); 249 | result_vec.push(String::from_iter(key_str)); 250 | iter.next(); 251 | } 252 | assert_eq!(result_vec, ["hello"]); 253 | 254 | // test full_history_ts_low works 255 | let mut compact_opts = CompactOptions::default(); 256 | compact_opts.set_full_history_ts_low(ts2); 257 | db.compact_range_cf_opt(&cf, None::<&[u8]>, None::<&[u8]>, &compact_opts); 258 | db.flush().unwrap(); 259 | 260 | // Attempt to read `full_history_ts_low`. 261 | // It should match the value we set earlier (`ts2`). 262 | let full_history_ts_low = db.get_full_history_ts_low(&cf).unwrap(); 263 | assert_eq!(U64Timestamp::from(full_history_ts_low.as_slice()), ts2); 264 | 265 | let mut opts = ReadOptions::default(); 266 | opts.set_timestamp(ts3); 267 | let value = db.get_cf_opt(&cf, key, &opts).unwrap(); 268 | assert_eq!(value, None); 269 | // cannot read with timestamp older than full_history_ts_low 270 | opts.set_timestamp(ts); 271 | assert!(db.get_cf_opt(&cf, key, &opts).is_err()); 272 | } 273 | 274 | let _ = DB::destroy(&Options::default(), path); 275 | } 276 | -------------------------------------------------------------------------------- /tests/test_multithreaded.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | mod util; 16 | 17 | use std::{sync::Arc, thread}; 18 | 19 | use rust_rocksdb::DB; 20 | use util::DBPath; 21 | 22 | const N: usize = 100_000; 23 | 24 | #[test] 25 | pub fn test_multithreaded() { 26 | let n = DBPath::new("_rust_rocksdb_multithreadtest"); 27 | { 28 | let db = DB::open_default(&n).unwrap(); 29 | let db = Arc::new(db); 30 | 31 | db.put(b"key", b"value1").unwrap(); 32 | 33 | let db1 = db.clone(); 34 | let j1 = thread::spawn(move || { 35 | for _ in 1..N { 36 | db1.put(b"key", b"value1").unwrap(); 37 | } 38 | }); 39 | 40 | let db2 = db.clone(); 41 | let j2 = thread::spawn(move || { 42 | for _ in 1..N { 43 | db2.put(b"key", b"value2").unwrap(); 44 | } 45 | }); 46 | 47 | let j3 = thread::spawn(move || { 48 | for _ in 1..N { 49 | let result = match db.get(b"key") { 50 | Ok(Some(v)) => !(&v[..] != b"value1" && &v[..] != b"value2"), 51 | _ => false, 52 | }; 53 | assert!(result); 54 | } 55 | }); 56 | j1.join().unwrap(); 57 | j2.join().unwrap(); 58 | j3.join().unwrap(); 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /tests/test_optimistic_transaction_db_memory_usage.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2021 Yiyuan Liu 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | // 15 | 16 | mod util; 17 | 18 | use rust_rocksdb::{OptimisticTransactionDB, Options, SingleThreaded}; 19 | use util::DBPath; 20 | 21 | #[test] 22 | fn test_optimistic_transaction_db_memory_usage() { 23 | let path = DBPath::new("_rust_rocksdb_optimistic_transaction_db memory_usage_test"); 24 | { 25 | let mut options = Options::default(); 26 | options.create_if_missing(true); 27 | options.enable_statistics(); 28 | 29 | // setup cache: 30 | let cache = rust_rocksdb::Cache::new_lru_cache(1 << 20); // 1 MB cache 31 | let mut block_based_options = rust_rocksdb::BlockBasedOptions::default(); 32 | block_based_options.set_block_cache(&cache); 33 | options.set_block_based_table_factory(&block_based_options); 34 | 35 | let db: OptimisticTransactionDB = 36 | OptimisticTransactionDB::open(&options, &path).unwrap(); 37 | let mut builder = rust_rocksdb::perf::MemoryUsageBuilder::new().unwrap(); 38 | builder.add_db(&db); 39 | builder.add_cache(&cache); 40 | let memory_usage = builder.build().unwrap(); 41 | 42 | for i in 1..=1000 { 43 | let key = format!("key{}", i); 44 | let value = format!("value{}", i); 45 | db.put(&key, &value).unwrap(); 46 | } 47 | 48 | for i in 1..=1000 { 49 | let key = format!("key{}", i); 50 | let result = db.get(&key).unwrap().unwrap(); 51 | let result_str = String::from_utf8(result).unwrap(); 52 | assert_eq!(result_str, format!("value{}", i)); 53 | } 54 | 55 | assert_ne!(memory_usage.approximate_mem_table_total(), 0); 56 | assert_eq!(memory_usage.approximate_mem_table_readers_total(), 0); // Equals zero! 57 | assert_ne!(memory_usage.approximate_cache_total(), 0); 58 | assert_ne!(memory_usage.approximate_mem_table_unflushed(), 0); 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /tests/test_pinnable_slice.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | mod util; 16 | 17 | use pretty_assertions::assert_eq; 18 | 19 | use rust_rocksdb::{Options, DB}; 20 | use util::DBPath; 21 | 22 | #[test] 23 | fn test_pinnable_slice() { 24 | let path = DBPath::new("_rust_rocksdb_pinnable_slice_test"); 25 | 26 | let mut opts = Options::default(); 27 | opts.create_if_missing(true); 28 | let db = DB::open(&opts, &path).unwrap(); 29 | 30 | db.put(b"k1", b"value12345").unwrap(); 31 | 32 | let result = db.get_pinned(b"k1"); 33 | assert!(result.is_ok()); 34 | 35 | let value = result.unwrap(); 36 | assert!(value.is_some()); 37 | 38 | let pinnable_slice = value.unwrap(); 39 | 40 | assert_eq!(b"12345", &pinnable_slice[5..10]); 41 | } 42 | 43 | #[test] 44 | fn test_snapshot_pinnable_slice() { 45 | let path = DBPath::new("_rust_rocksdb_snapshot_pinnable_slice_test"); 46 | 47 | let mut opts = Options::default(); 48 | opts.create_if_missing(true); 49 | let db = DB::open(&opts, &path).unwrap(); 50 | 51 | db.put(b"k1", b"value12345").unwrap(); 52 | let snap = db.snapshot(); 53 | assert!(db.put(b"k1", b"value23456").is_ok()); 54 | 55 | let result = snap.get_pinned(b"k1"); 56 | assert!(result.is_ok()); 57 | 58 | let value = result.unwrap(); 59 | assert!(value.is_some()); 60 | 61 | let pinnable_slice = value.unwrap(); 62 | 63 | assert_eq!(b"12345", &pinnable_slice[5..10]); 64 | } 65 | -------------------------------------------------------------------------------- /tests/test_property.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | mod util; 16 | 17 | use pretty_assertions::assert_eq; 18 | 19 | use rust_rocksdb::{properties, Options, DB}; 20 | use util::DBPath; 21 | 22 | #[test] 23 | fn property_test() { 24 | let n = DBPath::new("_rust_rocksdb_property_test"); 25 | { 26 | let db = DB::open_default(&n).unwrap(); 27 | let prop_name: &std::ffi::CStr = properties::STATS; 28 | let value = db.property_value(prop_name).unwrap().unwrap(); 29 | assert!(value.contains("Stats")); 30 | } 31 | 32 | { 33 | let db = DB::open_default(&n).unwrap(); 34 | let prop_name: properties::PropertyName = properties::STATS.to_owned(); 35 | let value = db.property_value(&prop_name).unwrap().unwrap(); 36 | assert!(value.contains("Stats")); 37 | } 38 | 39 | { 40 | let db = DB::open_default(&n).unwrap(); 41 | let prop_name: String = properties::STATS.to_owned().into_string(); 42 | let value = db.property_value(&prop_name).unwrap().unwrap(); 43 | assert!(value.contains("Stats")); 44 | } 45 | } 46 | 47 | #[test] 48 | fn property_cf_test() { 49 | let n = DBPath::new("_rust_rocksdb_property_cf_test"); 50 | { 51 | let opts = Options::default(); 52 | #[cfg(feature = "multi-threaded-cf")] 53 | let db = DB::open_default(&n).unwrap(); 54 | #[cfg(not(feature = "multi-threaded-cf"))] 55 | let mut db = DB::open_default(&n).unwrap(); 56 | db.create_cf("cf1", &opts).unwrap(); 57 | let cf = db.cf_handle("cf1").unwrap(); 58 | let value = db 59 | .property_value_cf(&cf, properties::STATS) 60 | .unwrap() 61 | .unwrap(); 62 | 63 | assert!(value.contains("Stats")); 64 | } 65 | } 66 | 67 | #[test] 68 | fn property_int_test() { 69 | let n = DBPath::new("_rust_rocksdb_property_int_test"); 70 | { 71 | let db = DB::open_default(&n).unwrap(); 72 | let value = db 73 | .property_int_value(properties::ESTIMATE_LIVE_DATA_SIZE) 74 | .unwrap(); 75 | 76 | assert_eq!(value, Some(0)); 77 | } 78 | } 79 | 80 | #[test] 81 | fn property_int_cf_test() { 82 | let n = DBPath::new("_rust_rocksdb_property_int_cf_test"); 83 | { 84 | let opts = Options::default(); 85 | #[cfg(feature = "multi-threaded-cf")] 86 | let db = DB::open_default(&n).unwrap(); 87 | #[cfg(not(feature = "multi-threaded-cf"))] 88 | let mut db = DB::open_default(&n).unwrap(); 89 | db.create_cf("cf1", &opts).unwrap(); 90 | let cf = db.cf_handle("cf1").unwrap(); 91 | let total_keys = db 92 | .property_int_value_cf(&cf, properties::ESTIMATE_NUM_KEYS) 93 | .unwrap(); 94 | 95 | assert_eq!(total_keys, Some(0)); 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /tests/test_raw_iterator.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | mod util; 16 | 17 | use pretty_assertions::assert_eq; 18 | 19 | use rust_rocksdb::{DBAccess, DBRawIteratorWithThreadMode, DB}; 20 | use util::DBPath; 21 | 22 | fn assert_item(iter: &DBRawIteratorWithThreadMode<'_, D>, key: &[u8], value: &[u8]) { 23 | assert!(iter.valid()); 24 | assert_eq!(iter.key(), Some(key)); 25 | assert_eq!(iter.value(), Some(value)); 26 | assert_eq!(iter.item(), Some((key, value))); 27 | } 28 | 29 | fn assert_no_item(iter: &DBRawIteratorWithThreadMode<'_, D>) { 30 | assert!(!iter.valid()); 31 | assert_eq!(iter.key(), None); 32 | assert_eq!(iter.value(), None); 33 | assert_eq!(iter.item(), None); 34 | } 35 | 36 | #[test] 37 | pub fn test_forwards_iteration() { 38 | let n = DBPath::new("forwards_iteration"); 39 | { 40 | let db = DB::open_default(&n).unwrap(); 41 | db.put(b"k1", b"v1").unwrap(); 42 | db.put(b"k2", b"v2").unwrap(); 43 | db.put(b"k3", b"v3").unwrap(); 44 | db.put(b"k4", b"v4").unwrap(); 45 | 46 | let mut iter = db.raw_iterator(); 47 | iter.seek_to_first(); 48 | assert_item(&iter, b"k1", b"v1"); 49 | 50 | iter.next(); 51 | assert_item(&iter, b"k2", b"v2"); 52 | 53 | iter.next(); // k3 54 | iter.next(); // k4 55 | 56 | iter.next(); // invalid! 57 | assert_no_item(&iter); 58 | } 59 | } 60 | 61 | #[test] 62 | pub fn test_seek_last() { 63 | let n = DBPath::new("backwards_iteration"); 64 | { 65 | let db = DB::open_default(&n).unwrap(); 66 | db.put(b"k1", b"v1").unwrap(); 67 | db.put(b"k2", b"v2").unwrap(); 68 | db.put(b"k3", b"v3").unwrap(); 69 | db.put(b"k4", b"v4").unwrap(); 70 | 71 | let mut iter = db.raw_iterator(); 72 | iter.seek_to_last(); 73 | assert_item(&iter, b"k4", b"v4"); 74 | 75 | iter.prev(); 76 | assert_item(&iter, b"k3", b"v3"); 77 | 78 | iter.prev(); // k2 79 | iter.prev(); // k1 80 | 81 | iter.prev(); // invalid! 82 | assert_no_item(&iter); 83 | } 84 | } 85 | 86 | #[test] 87 | pub fn test_seek() { 88 | let n = DBPath::new("seek"); 89 | { 90 | let db = DB::open_default(&n).unwrap(); 91 | db.put(b"k1", b"v1").unwrap(); 92 | db.put(b"k2", b"v2").unwrap(); 93 | db.put(b"k4", b"v4").unwrap(); 94 | 95 | let mut iter = db.raw_iterator(); 96 | iter.seek(b"k2"); 97 | 98 | assert_item(&iter, b"k2", b"v2"); 99 | 100 | // Check it gets the next key when the key doesn't exist 101 | iter.seek(b"k3"); 102 | assert_item(&iter, b"k4", b"v4"); 103 | } 104 | } 105 | 106 | #[test] 107 | pub fn test_seek_to_nonexistent() { 108 | let n = DBPath::new("seek_to_nonexistent"); 109 | { 110 | let db = DB::open_default(&n).unwrap(); 111 | db.put(b"k1", b"v1").unwrap(); 112 | db.put(b"k3", b"v3").unwrap(); 113 | db.put(b"k4", b"v4").unwrap(); 114 | 115 | let mut iter = db.raw_iterator(); 116 | iter.seek(b"k2"); 117 | assert_item(&iter, b"k3", b"v3"); 118 | } 119 | } 120 | 121 | #[test] 122 | pub fn test_seek_for_prev() { 123 | let n = DBPath::new("seek_for_prev"); 124 | { 125 | let db = DB::open_default(&n).unwrap(); 126 | db.put(b"k1", b"v1").unwrap(); 127 | db.put(b"k2", b"v2").unwrap(); 128 | db.put(b"k4", b"v4").unwrap(); 129 | 130 | let mut iter = db.raw_iterator(); 131 | iter.seek(b"k2"); 132 | assert_item(&iter, b"k2", b"v2"); 133 | 134 | // Check it gets the previous key when the key doesn't exist 135 | iter.seek_for_prev(b"k3"); 136 | assert_item(&iter, b"k2", b"v2"); 137 | } 138 | } 139 | 140 | #[test] 141 | pub fn test_next_without_seek() { 142 | let n = DBPath::new("test_forgot_seek"); 143 | { 144 | let db = DB::open_default(&n).unwrap(); 145 | db.put(b"k1", b"v1").unwrap(); 146 | db.put(b"k2", b"v2").unwrap(); 147 | db.put(b"k4", b"v4").unwrap(); 148 | 149 | let mut iter = db.raw_iterator(); 150 | iter.next(); 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /tests/test_slice_transform.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | mod util; 16 | 17 | use pretty_assertions::assert_eq; 18 | 19 | use rust_rocksdb::{Options, SliceTransform, DB}; 20 | use util::{assert_iter, pair, DBPath}; 21 | 22 | #[test] 23 | pub fn test_slice_transform() { 24 | let db_path = DBPath::new("_rust_rocksdb_slice_transform_test"); 25 | { 26 | const A1: &[u8] = b"aaa1"; 27 | const A2: &[u8] = b"aaa2"; 28 | const B1: &[u8] = b"bbb1"; 29 | const B2: &[u8] = b"bbb2"; 30 | 31 | fn first_three(k: &[u8]) -> &[u8] { 32 | &k[..3] 33 | } 34 | 35 | let prefix_extractor = SliceTransform::create("first_three", first_three, None); 36 | 37 | let mut opts = Options::default(); 38 | opts.create_if_missing(true); 39 | opts.set_prefix_extractor(prefix_extractor); 40 | 41 | let db = DB::open(&opts, &db_path).unwrap(); 42 | 43 | assert!(db.put(A1, A1).is_ok()); 44 | assert!(db.put(A2, A2).is_ok()); 45 | assert!(db.put(B1, B1).is_ok()); 46 | assert!(db.put(B2, B2).is_ok()); 47 | 48 | assert_iter(db.prefix_iterator(b"aaa"), &[pair(A1, A1), pair(A2, A2)]); 49 | assert_iter(db.prefix_iterator(b"bbb"), &[pair(B1, B1), pair(B2, B2)]); 50 | } 51 | } 52 | 53 | #[test] 54 | fn test_no_in_domain() { 55 | fn extract_suffix(slice: &[u8]) -> &[u8] { 56 | if slice.len() > 4 { 57 | &slice[slice.len() - 4..slice.len()] 58 | } else { 59 | slice 60 | } 61 | } 62 | 63 | let db_path = DBPath::new("_rust_rocksdb_prefix_test"); 64 | { 65 | let mut opts = Options::default(); 66 | opts.create_if_missing(true); 67 | opts.set_prefix_extractor(SliceTransform::create( 68 | "test slice transform", 69 | extract_suffix, 70 | None, 71 | )); 72 | opts.set_memtable_prefix_bloom_ratio(0.1); 73 | 74 | let db = DB::open(&opts, &db_path).unwrap(); 75 | db.put(b"key_sfx1", b"a").unwrap(); 76 | db.put(b"key_sfx2", b"b").unwrap(); 77 | 78 | assert_eq!(db.get(b"key_sfx1").unwrap().unwrap(), b"a"); 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /tests/test_sst_file_writer.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Lucjan Suski 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | mod util; 16 | 17 | use pretty_assertions::assert_eq; 18 | 19 | use rust_rocksdb::{Error, Options, ReadOptions, SstFileWriter, DB}; 20 | use util::{DBPath, U64Comparator, U64Timestamp}; 21 | 22 | #[test] 23 | fn sst_file_writer_works() { 24 | let db_path = DBPath::new("_rust_rocksdb_sstfilewritertest"); 25 | let dir = tempfile::Builder::new() 26 | .prefix("_rust_rocksdb_sstfilewritertest") 27 | .tempdir() 28 | .expect("Failed to create temporary path for file writer."); 29 | let writer_path = dir.path().join("filewriter"); 30 | { 31 | let opts = Options::default(); 32 | let mut writer = SstFileWriter::create(&opts); 33 | writer.open(&writer_path).unwrap(); 34 | writer.put(b"k1", b"v1").unwrap(); 35 | 36 | writer.put(b"k2", b"v2").unwrap(); 37 | 38 | writer.delete(b"k3").unwrap(); 39 | writer.finish().unwrap(); 40 | assert!(writer.file_size() > 0); 41 | } 42 | { 43 | let db = DB::open_default(&db_path).unwrap(); 44 | db.put(b"k3", b"v3").unwrap(); 45 | db.ingest_external_file(vec![&writer_path]).unwrap(); 46 | let r: Result>, Error> = db.get(b"k1"); 47 | assert_eq!(r.unwrap().unwrap(), b"v1"); 48 | let r: Result>, Error> = db.get(b"k2"); 49 | assert_eq!(r.unwrap().unwrap(), b"v2"); 50 | assert!(db.get(b"k3").unwrap().is_none()); 51 | } 52 | } 53 | 54 | #[test] 55 | fn sst_file_writer_with_ts_works() { 56 | let db_path = DBPath::new("_rust_rocksdb_sstfilewritertest_with_ts"); 57 | let dir = tempfile::Builder::new() 58 | .prefix("_rust_rocksdb_sstfilewritertest_with_ts") 59 | .tempdir() 60 | .expect("Failed to create temporary path for file writer."); 61 | let writer_path = dir.path().join("filewriter"); 62 | 63 | let ts = U64Timestamp::new(1); 64 | let ts2 = U64Timestamp::new(2); 65 | let ts3 = U64Timestamp::new(3); 66 | { 67 | let mut opts = Options::default(); 68 | opts.set_comparator_with_ts( 69 | U64Comparator::NAME, 70 | U64Timestamp::SIZE, 71 | Box::new(U64Comparator::compare), 72 | Box::new(U64Comparator::compare_ts), 73 | Box::new(U64Comparator::compare_without_ts), 74 | ); 75 | 76 | let mut writer = SstFileWriter::create(&opts); 77 | writer.open(&writer_path).unwrap(); 78 | writer.put_with_ts(b"k1", ts, b"v1").unwrap(); 79 | writer.put_with_ts(b"k2", ts2, b"v2").unwrap(); 80 | writer.put_with_ts(b"k3", ts2, b"v3").unwrap(); 81 | writer.finish().unwrap(); 82 | 83 | assert!(writer.file_size() > 0); 84 | } 85 | 86 | { 87 | let _ = DB::destroy(&Options::default(), &db_path); 88 | 89 | let mut db_opts = Options::default(); 90 | db_opts.create_missing_column_families(true); 91 | db_opts.create_if_missing(true); 92 | db_opts.set_comparator_with_ts( 93 | U64Comparator::NAME, 94 | U64Timestamp::SIZE, 95 | Box::new(U64Comparator::compare), 96 | Box::new(U64Comparator::compare_ts), 97 | Box::new(U64Comparator::compare_without_ts), 98 | ); 99 | 100 | let db = DB::open(&db_opts, &db_path).unwrap(); 101 | db.ingest_external_file(vec![&writer_path]).unwrap(); 102 | db.delete_with_ts(b"k3", ts3).unwrap(); 103 | 104 | let mut opts = ReadOptions::default(); 105 | opts.set_timestamp(ts); 106 | 107 | let r: Result>, Error> = db.get_opt(b"k1", &opts); 108 | assert_eq!(r.unwrap().unwrap(), b"v1"); 109 | 110 | // at ts1 k2 should be invisible 111 | assert!(db.get_opt(b"k2", &opts).unwrap().is_none()); 112 | 113 | // at ts2 k2 and k3 should be visible 114 | opts.set_timestamp(ts2); 115 | let r: Result>, Error> = db.get_opt(b"k2", &opts); 116 | assert_eq!(r.unwrap().unwrap(), b"v2"); 117 | let r = db.get_opt(b"k3", &opts); 118 | assert_eq!(r.unwrap().unwrap(), b"v3"); 119 | 120 | // at ts3 the k3 should be deleted 121 | opts.set_timestamp(ts3); 122 | assert!(db.get_opt(b"k3", &opts).unwrap().is_none()); 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /tests/test_transaction_db_memory_usage.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | mod util; 16 | 17 | use pretty_assertions::assert_eq; 18 | 19 | use rust_rocksdb::{perf, Options, TransactionDB, TransactionDBOptions}; 20 | use util::DBPath; 21 | 22 | #[cfg(not(feature = "multi-threaded-cf"))] 23 | type DefaultThreadMode = rust_rocksdb::SingleThreaded; 24 | #[cfg(feature = "multi-threaded-cf")] 25 | type DefaultThreadMode = rust_rocksdb::MultiThreaded; 26 | 27 | #[test] 28 | fn test_transaction_db_memory_usage() { 29 | let path = DBPath::new("_rust_rocksdb_transaction_db_memory_usage_test"); 30 | { 31 | let mut options = Options::default(); 32 | options.create_if_missing(true); 33 | options.enable_statistics(); 34 | 35 | // setup cache: 36 | let cache = rust_rocksdb::Cache::new_lru_cache(1 << 20); // 1 MB cache 37 | let mut block_based_options = rust_rocksdb::BlockBasedOptions::default(); 38 | block_based_options.set_block_cache(&cache); 39 | options.set_block_based_table_factory(&block_based_options); 40 | 41 | let tx_db_options = TransactionDBOptions::default(); 42 | let db: TransactionDB = 43 | TransactionDB::open(&options, &tx_db_options, &path).unwrap(); 44 | 45 | let mut builder = perf::MemoryUsageBuilder::new().unwrap(); 46 | builder.add_tx_db(&db); 47 | builder.add_cache(&cache); 48 | let memory_usage = builder.build().unwrap(); 49 | 50 | for i in 1..=1000 { 51 | let key = format!("key{}", i); 52 | let value = format!("value{}", i); 53 | db.put(&key, &value).unwrap(); 54 | } 55 | 56 | for i in 1..=1000 { 57 | let key = format!("key{}", i); 58 | let result = db.get(&key).unwrap().unwrap(); 59 | let result_str = String::from_utf8(result).unwrap(); 60 | assert_eq!(result_str, format!("value{}", i)); 61 | } 62 | 63 | assert_ne!(memory_usage.approximate_mem_table_total(), 0); 64 | assert_eq!(memory_usage.approximate_mem_table_readers_total(), 0); // Equals zero! 65 | assert_ne!(memory_usage.approximate_cache_total(), 0); 66 | assert_ne!(memory_usage.approximate_mem_table_unflushed(), 0); 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /tests/test_transaction_db_property.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | mod util; 16 | 17 | use pretty_assertions::assert_eq; 18 | 19 | use rust_rocksdb::{properties, Options, TransactionDB, TransactionDBOptions}; 20 | use util::DBPath; 21 | 22 | #[test] 23 | fn transaction_db_property_test() { 24 | let path = DBPath::new("_rust_rocksdb_transaction_db_property_test"); 25 | { 26 | let mut options = Options::default(); 27 | options.create_if_missing(true); 28 | options.enable_statistics(); 29 | let tx_db_options = TransactionDBOptions::default(); 30 | let db = TransactionDB::open(&options, &tx_db_options, &path).unwrap(); 31 | 32 | db.put("key1", "value1").unwrap(); 33 | db.put("key2", "value2").unwrap(); 34 | db.put("key3", "value3").unwrap(); 35 | 36 | let prop_name: &std::ffi::CStr = properties::STATS; 37 | let value = db.property_value(prop_name).unwrap().unwrap(); 38 | 39 | assert!(value.contains("Compaction Stats")); 40 | assert!(value.contains("Cumulative writes: 3 writes")); 41 | } 42 | } 43 | 44 | #[test] 45 | fn transaction_db_int_property_test() { 46 | let path = DBPath::new("_rust_rocksdb_transaction_db_int_property_test"); 47 | { 48 | let mut options = Options::default(); 49 | options.create_if_missing(true); 50 | options.enable_statistics(); 51 | let tx_db_options = TransactionDBOptions::default(); 52 | let db = TransactionDB::open(&options, &tx_db_options, &path).unwrap(); 53 | 54 | db.put("key1", "value1").unwrap(); 55 | db.put("key2", "value2").unwrap(); 56 | 57 | let prop_name: properties::PropertyName = properties::ESTIMATE_NUM_KEYS.to_owned(); 58 | let value = db.property_int_value(&prop_name).unwrap().unwrap(); 59 | 60 | assert_eq!(value, 2); 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /tests/test_write_batch.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | mod util; 15 | 16 | use std::collections::HashMap; 17 | 18 | use pretty_assertions::assert_eq; 19 | 20 | use rust_rocksdb::{Error, WriteBatch, WriteBatchIterator, DB}; 21 | use util::DBPath; 22 | 23 | #[test] 24 | fn test_write_batch_clear() { 25 | let mut batch = WriteBatch::default(); 26 | batch.put(b"1", b"2"); 27 | assert_eq!(batch.len(), 1); 28 | batch.clear(); 29 | assert_eq!(batch.len(), 0); 30 | assert!(batch.is_empty()); 31 | } 32 | 33 | #[test] 34 | fn test_write_batch_with_serialized_data() { 35 | struct Iterator { 36 | data: HashMap, Vec>, 37 | } 38 | 39 | impl WriteBatchIterator for Iterator { 40 | fn put(&mut self, key: Box<[u8]>, value: Box<[u8]>) { 41 | match self.data.remove(key.as_ref()) { 42 | Some(expect) => { 43 | assert_eq!(value.as_ref(), expect.as_slice()); 44 | } 45 | None => { 46 | panic!("key not exists"); 47 | } 48 | } 49 | } 50 | 51 | fn delete(&mut self, _: Box<[u8]>) { 52 | panic!("invalid delete operation"); 53 | } 54 | } 55 | 56 | let mut kvs: HashMap, Vec> = HashMap::default(); 57 | kvs.insert(vec![1], vec![2]); 58 | kvs.insert(vec![2], vec![3]); 59 | kvs.insert(vec![1, 2, 3, 4, 5], vec![4]); 60 | 61 | let mut b1 = WriteBatch::default(); 62 | for (k, v) in &kvs { 63 | b1.put(k, v); 64 | } 65 | let data = b1.data(); 66 | 67 | let b2 = WriteBatch::from_data(data); 68 | let mut it = Iterator { data: kvs }; 69 | b2.iterate(&mut it); 70 | } 71 | 72 | #[test] 73 | fn test_write_batch_put_log_data() { 74 | let path = DBPath::new("writebatch_put_log_data"); 75 | let db = DB::open_default(&path).unwrap(); 76 | 77 | let mut batch = WriteBatch::default(); 78 | batch.put(b"k1", b"v11111111"); 79 | batch.put_log_data(b"log_data_value"); 80 | 81 | let p = db.write(batch); 82 | assert!(p.is_ok()); 83 | 84 | let r: Result>, Error> = db.get(b"k1"); 85 | assert_eq!(r.unwrap().unwrap(), b"v11111111"); 86 | 87 | let mut called = false; 88 | 89 | let mut wal_iter = db.get_updates_since(0).unwrap(); 90 | if let Ok((seq, write_batch)) = wal_iter.next().unwrap() { 91 | called = true; 92 | 93 | // Putting LOG data does not increase sequence number, only the put() call does 94 | assert_eq!(seq, 1); 95 | 96 | // there is only the put write in the WriteBatch 97 | assert_eq!(write_batch.len(), 1); 98 | 99 | // The WriteBatch data has the written "log_data" 100 | assert!(String::from_utf8(write_batch.data().to_vec()) 101 | .unwrap() 102 | .contains("log_data_value")); 103 | } 104 | 105 | assert!(called); 106 | } 107 | -------------------------------------------------------------------------------- /tests/util/mod.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | use std::{ 4 | cmp::Ordering, 5 | convert::TryInto, 6 | path::{Path, PathBuf}, 7 | }; 8 | 9 | use rust_rocksdb::{Error, Options, DB}; 10 | 11 | /// Temporary database path which calls DB::Destroy when DBPath is dropped. 12 | pub struct DBPath { 13 | dir: tempfile::TempDir, // kept for cleaning up during drop 14 | path: PathBuf, 15 | } 16 | 17 | impl DBPath { 18 | /// Produces a fresh (non-existent) temporary path which will be DB::destroy'ed automatically. 19 | pub fn new(prefix: &str) -> DBPath { 20 | let dir = tempfile::Builder::new() 21 | .prefix(prefix) 22 | .tempdir() 23 | .expect("Failed to create temporary path for db."); 24 | let path = dir.path().join("db"); 25 | 26 | DBPath { dir, path } 27 | } 28 | } 29 | 30 | impl Drop for DBPath { 31 | fn drop(&mut self) { 32 | let opts = Options::default(); 33 | DB::destroy(&opts, &self.path).expect("Failed to destroy temporary DB"); 34 | } 35 | } 36 | 37 | /// Convert a DBPath ref to a Path ref. 38 | /// We don't implement this for DBPath values because we want them to 39 | /// exist until the end of their scope, not get passed into functions and 40 | /// dropped early. 41 | impl AsRef for &DBPath { 42 | fn as_ref(&self) -> &Path { 43 | &self.path 44 | } 45 | } 46 | 47 | type Pair = (Box<[u8]>, Box<[u8]>); 48 | 49 | pub fn pair(left: &[u8], right: &[u8]) -> Pair { 50 | (Box::from(left), Box::from(right)) 51 | } 52 | 53 | #[track_caller] 54 | pub fn assert_iter(iter: impl Iterator>, want: &[Pair]) { 55 | let got = iter.collect::, _>>().unwrap(); 56 | assert_eq!(got.as_slice(), want); 57 | } 58 | 59 | #[track_caller] 60 | pub fn assert_iter_reversed(iter: impl Iterator>, want: &[Pair]) { 61 | let mut got = iter.collect::, _>>().unwrap(); 62 | got.reverse(); 63 | assert_eq!(got.as_slice(), want); 64 | } 65 | 66 | /// A timestamp type we use in testing [user-defined timestamp](https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp). 67 | /// This is a `u64` in little endian encoding. 68 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 69 | pub struct U64Timestamp([u8; Self::SIZE]); 70 | 71 | impl U64Timestamp { 72 | pub const SIZE: usize = 8; 73 | 74 | pub fn new(ts: u64) -> Self { 75 | Self(ts.to_le_bytes()) 76 | } 77 | } 78 | 79 | impl From<&[u8]> for U64Timestamp { 80 | fn from(slice: &[u8]) -> Self { 81 | assert_eq!( 82 | slice.len(), 83 | Self::SIZE, 84 | "incorrect timestamp length: {}, should be {}", 85 | slice.len(), 86 | Self::SIZE 87 | ); 88 | Self(slice.try_into().unwrap()) 89 | } 90 | } 91 | 92 | impl From for Vec { 93 | fn from(ts: U64Timestamp) -> Self { 94 | ts.0.to_vec() 95 | } 96 | } 97 | 98 | impl AsRef<[u8]> for U64Timestamp { 99 | fn as_ref(&self) -> &[u8] { 100 | &self.0 101 | } 102 | } 103 | 104 | impl PartialOrd for U64Timestamp { 105 | fn partial_cmp(&self, other: &Self) -> Option { 106 | Some(self.cmp(other)) 107 | } 108 | } 109 | 110 | impl Ord for U64Timestamp { 111 | fn cmp(&self, other: &Self) -> Ordering { 112 | let lhs = u64::from_le_bytes(self.0); 113 | let rhs = u64::from_le_bytes(other.0); 114 | lhs.cmp(&rhs) 115 | } 116 | } 117 | 118 | /// A comparator for use in column families with [user-defined timestamp](https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp) 119 | /// enabled. This comparator assumes `u64` timestamp in little endian encoding. 120 | /// This is the same behavior as RocksDB's built-in comparator. 121 | /// 122 | /// Adapted from C++ and Golang implementations from: 123 | /// - [rocksdb](https://github.com/facebook/rocksdb/blob/v9.4.0/test_util/testutil.cc#L112) 124 | /// - [gorocksdb](https://github.com/linxGnu/grocksdb/blob/v1.9.2/db_ts_test.go#L167) 125 | /// - [SeiDB](https://github.com/sei-protocol/sei-db/blob/v0.0.41/ss/rocksdb/comparator.go) 126 | pub struct U64Comparator; 127 | 128 | impl U64Comparator { 129 | pub const NAME: &'static str = "rust-rocksdb.U64Comparator"; 130 | 131 | pub fn compare(a: &[u8], b: &[u8]) -> Ordering { 132 | // First, compare the keys without timestamps. If the keys are different, 133 | // then we don't have to consider the timestamps at all. 134 | let ord = Self::compare_without_ts(a, true, b, true); 135 | if ord != Ordering::Equal { 136 | return ord; 137 | } 138 | 139 | // The keys are the same, so now we compare the timestamps. 140 | // The larger (i.e. newer) key should come first, hence the `reverse`. 141 | Self::compare_ts( 142 | extract_timestamp_from_user_key(a), 143 | extract_timestamp_from_user_key(b), 144 | ) 145 | .reverse() 146 | } 147 | 148 | pub fn compare_ts(bz1: &[u8], bz2: &[u8]) -> Ordering { 149 | let ts1 = U64Timestamp::from(bz1); 150 | let ts2 = U64Timestamp::from(bz2); 151 | ts1.cmp(&ts2) 152 | } 153 | 154 | pub fn compare_without_ts( 155 | mut a: &[u8], 156 | a_has_ts: bool, 157 | mut b: &[u8], 158 | b_has_ts: bool, 159 | ) -> Ordering { 160 | if a_has_ts { 161 | a = strip_timestamp_from_user_key(a); 162 | } 163 | if b_has_ts { 164 | b = strip_timestamp_from_user_key(b); 165 | } 166 | a.cmp(b) 167 | } 168 | } 169 | 170 | fn extract_timestamp_from_user_key(key: &[u8]) -> &[u8] { 171 | &key[(key.len() - U64Timestamp::SIZE)..] 172 | } 173 | 174 | fn strip_timestamp_from_user_key(key: &[u8]) -> &[u8] { 175 | &key[..(key.len() - U64Timestamp::SIZE)] 176 | } 177 | --------------------------------------------------------------------------------