├── .github └── workflows │ └── rust.yml ├── .gitignore ├── .gitmodules ├── CHANGELOG.md ├── CONTRIBUTING.md ├── Cargo.toml ├── LICENSE ├── MAINTAINERSHIP.md ├── README.md ├── code-of-conduct.md ├── libspeedb-sys ├── Cargo.toml ├── Makefile ├── README.md ├── build.rs ├── build_version.cc ├── snappy-stubs-public.h ├── speedb_lib_sources.txt ├── src │ ├── lib.rs │ └── test.rs └── tests │ └── ffi.rs ├── src ├── backup.rs ├── checkpoint.rs ├── column_family.rs ├── compaction_filter.rs ├── compaction_filter_factory.rs ├── comparator.rs ├── db.rs ├── db_iterator.rs ├── db_options.rs ├── db_pinnable_slice.rs ├── env.rs ├── ffi_util.rs ├── iter_range.rs ├── lib.rs ├── merge_operator.rs ├── perf.rs ├── prop_name.rs ├── properties.rs ├── slice_transform.rs ├── snapshot.rs ├── sst_file_writer.rs ├── transactions │ ├── mod.rs │ ├── optimistic_transaction_db.rs │ ├── options.rs │ ├── transaction.rs │ └── transaction_db.rs └── write_batch.rs └── tests ├── fail ├── checkpoint_outlive_db.rs ├── checkpoint_outlive_db.stderr ├── iterator_outlive_db.rs ├── iterator_outlive_db.stderr ├── open_with_multiple_refs_as_single_threaded.rs ├── open_with_multiple_refs_as_single_threaded.stderr ├── snapshot_outlive_db.rs ├── snapshot_outlive_db.stderr ├── snapshot_outlive_transaction.rs ├── snapshot_outlive_transaction.stderr ├── snapshot_outlive_transaction_db.rs ├── snapshot_outlive_transaction_db.stderr ├── transaction_outlive_transaction_db.rs └── transaction_outlive_transaction_db.stderr ├── test_backup.rs ├── test_checkpoint.rs ├── test_column_family.rs ├── test_comparator.rs ├── test_compationfilter.rs ├── test_db.rs ├── test_iterator.rs ├── test_merge_operator.rs ├── test_multithreaded.rs ├── test_optimistic_transaction_db.rs ├── test_pinnable_slice.rs ├── test_property.rs ├── test_raw_iterator.rs ├── test_rocksdb_options.rs ├── test_slice_transform.rs ├── test_sst_file_writer.rs ├── test_transaction_db.rs ├── test_write_batch.rs └── util └── mod.rs /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: RocksDB CI 2 | 3 | on: [push, pull_request] 4 | env: 5 | RUST_VERSION: 1.63.0 6 | 7 | jobs: 8 | fmt: 9 | name: Rustfmt 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Checkout sources 13 | uses: actions/checkout@v2 14 | - name: Install rust 15 | uses: actions-rs/toolchain@v1 16 | with: 17 | toolchain: ${{ env.RUST_VERSION }} 18 | components: rustfmt 19 | profile: minimal 20 | override: true 21 | - name: Run rustfmt 22 | uses: actions-rs/cargo@v1 23 | with: 24 | command: fmt 25 | args: --all -- --check 26 | 27 | doc-check: 28 | name: Rustdoc-check 29 | runs-on: ubuntu-latest 30 | steps: 31 | - name: Checkout sources 32 | uses: actions/checkout@v2 33 | - name: Install rust 34 | uses: actions-rs/toolchain@v1 35 | with: 36 | toolchain: ${{ env.RUST_VERSION }} 37 | components: rust-docs 38 | profile: minimal 39 | override: true 40 | - name: Run cargo rustdoc 41 | uses: actions-rs/cargo@v1 42 | with: 43 | command: rustdoc 44 | args: -- -D warnings 45 | 46 | clippy: 47 | name: Clippy 48 | runs-on: ubuntu-latest 49 | steps: 50 | - name: Checkout sources 51 | uses: actions/checkout@v2 52 | - name: Install rust 53 | uses: actions-rs/toolchain@v1 54 | with: 55 | toolchain: ${{ env.RUST_VERSION }} 56 | components: clippy 57 | profile: minimal 58 | override: true 59 | - name: Run clippy 60 | uses: actions-rs/clippy-check@v1 61 | with: 62 | token: ${{ secrets.GITHUB_TOKEN }} 63 | args: --all-targets -- -D warnings 64 | 65 | audit: 66 | name: Security audit 67 | runs-on: ubuntu-latest 68 | steps: 69 | - uses: actions/checkout@v2 70 | - uses: actions-rs/audit-check@v1 71 | with: 72 | token: ${{ secrets.GITHUB_TOKEN }} 73 | 74 | test: 75 | name: ${{ matrix.build }} 76 | runs-on: ${{ matrix.os }} 77 | strategy: 78 | matrix: 79 | build: [Linux, macOS, Windows] 80 | include: 81 | - build: Linux 82 | os: ubuntu-latest 83 | - build: macOS 84 | os: macos-latest 85 | - build: Windows 86 | os: windows-latest 87 | steps: 88 | - name: Checkout sources 89 | uses: actions/checkout@v2 90 | - name: Install rust 91 | uses: actions-rs/toolchain@v1 92 | with: 93 | toolchain: ${{ env.RUST_VERSION }} 94 | target: ${{ matrix.target }} 95 | profile: minimal 96 | override: true 97 | - name: Remove msys64 # Workaround to resolve link error with C:\msys64\mingw64\bin\libclang.dll 98 | if: runner.os == 'Windows' 99 | run: Remove-Item -LiteralPath "C:\msys64\" -Force -Recurse 100 | - name: Install dependencies 101 | if: runner.os == 'Windows' 102 | run: choco install llvm -y 103 | - name: Run rocksdb tests 104 | run: | 105 | cargo test --all 106 | cargo test --all --features multi-threaded-cf 107 | - name: Free disk space 108 | run: cargo clean 109 | - name: Run rocksdb tests (jemalloc) 110 | if: runner.os != 'Windows' 111 | run: cargo test --all --features jemalloc 112 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.swo 2 | target 3 | Cargo.lock 4 | *.orig 5 | *.bk 6 | *rlib 7 | tags 8 | path 9 | .DS_Store 10 | .idea 11 | .vscode 12 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "speedb_sys/snappy"] 2 | path = libspeedb-sys/snappy 3 | url = https://github.com/google/snappy.git 4 | branch = main 5 | [submodule "speedb_sys/speedb"] 6 | path = libspeedb-sys/speedb 7 | url = https://github.com/speedb-io/speedb.git 8 | branch = release/2.6 9 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to rust-rocksdb 2 | Thank you for taking an interest in the project, and contributing to it - it's appreciated! There are several ways you can contribute: 3 | - [Bug Reports](#bug-reports) 4 | - [Feature Requests](#feature-requests) 5 | - [Documentation](#documentation) 6 | - [Discussion](#discussion) 7 | - [Pull Requests](#pull-requests) 8 | 9 | **Please note all contributors must adhere to the [code of conduct](code-of-conduct.md).** 10 | 11 | ## Bug Reports 12 | [bug-reports]: #bug-reports 13 | - **Ensure the bug has not already been reported** - this can be done with a quick search of the [existing open issues](https://github.com/rust-rocksdb/rust-rocksdb/issues?q=is%3Aissue+is%3Aopen+). 14 | - **Ensure the bug applies to the Rust wrapper, and not the underlying library** - bugs in the RocksDB library should be [reported upstream](https://github.com/facebook/rocksdb/issues). 15 | - When [creating an issue](https://github.com/rust-rocksdb/rust-rocksdb/issues/new) please try to: 16 | - **Use a clear and descriptive title** to identify the issue 17 | - **Provide enough context** to acurately summarize the issue. Not every issue will need detailed steps to recreate, example code, stack traces, etc. - use your own judgment on what information would be helpful to anyone working on the issue. It's easier for someone to skim over too much context, than stop and wait for a response when context is missing. 18 | 19 | ## Feature Requests 20 | [feature-requests]: #feature-requests 21 | Feature requests will primarily come in the form of ergonomics involving the Rust language, or in bringing the wrapper into parity with the library's API. Please create an issue with any relevant information. 22 | 23 | ## Documentation 24 | [documentation]: #documentation 25 | Much of the documentation should mirror or reference the library's [documentation](https://github.com/facebook/rocksdb/wiki). If the wrapper or its exposed functions are missing documentation or contain inaccurate information please submit a pull request. 26 | 27 | ## Discussion 28 | [discussion]: #discussion 29 | Discussion around design and development of the wrapper primarily occurs within issues and pull requests. Don't be afraid to participate if you have questions, concerns, insight, or advice. 30 | 31 | ## Pull Requests 32 | [pull-requests]: #pull-requests 33 | Pull requests are welcome, and when contributing code, the author agrees to do so under the project's [licensing](https://github.com/rust-rocksdb/rust-rocksdb/blob/master/LICENSE) - Apache 2.0 as of the time of this writing. The maintainers greatly appreciate PRs that follow open-source contribution best practices: 34 | 1. Fork this repository to your personal GitHub account. 35 | 1. Create a branch that includes your changes, **keep changes isolated and granular**. 36 | 1. Include any relevant documentation and/or tests. Write [documentation tests](https://doc.rust-lang.org/rustdoc/documentation-tests.html) when relevant. 37 | 1. Apply `cargo fmt` to ensure consistent formatting. 38 | 1. [Create a pull request](https://help.github.com/en/articles/about-pull-requests) against this repository. 39 | 40 | For pull requests that would benefit from discussion and review earlier in the development process, use a [Draft Pull Request](https://help.github.com/en/articles/about-pull-requests#draft-pull-requests). 41 | 42 | ## Additional Resources 43 | Some useful information for working with RocksDB in Rust: 44 | - [RocksDB library primary site](https://rocksdb.org) 45 | - [RocksDB library GitHub repository](https://github.com/facebook/rocksdb) 46 | - [RocksDB library documentation](https://github.com/facebook/rocksdb/wiki) 47 | - [Rust's Foreign Function Interface (ffi)](https://doc.rust-lang.org/nomicon/ffi.html) 48 | 49 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "speedb" 3 | description = "Rust wrapper for Speedb" 4 | version = "0.0.3" 5 | edition = "2018" 6 | rust-version = "1.63" 7 | authors = ["Tyler Neely ", "David Greenberg "] 8 | repository = "https://github.com/speedb-io/rust-speedb" 9 | license = "Apache-2.0" 10 | categories = [ "database" ] 11 | keywords = ["database", "embedded", "LSM-tree", "persistence"] 12 | homepage = "https://github.com/speedb-io/rust-speedb" 13 | exclude = [ 14 | ".gitignore", 15 | ".travis.yml", 16 | "deploy.sh", 17 | "test/**/*", 18 | ] 19 | 20 | [workspace] 21 | members = ["libspeedb-sys"] 22 | 23 | [features] 24 | default = ["snappy", "lz4", "zstd", "zlib", "bzip2"] 25 | jemalloc = ["libspeedb-sys/jemalloc"] 26 | io-uring = ["libspeedb-sys/io-uring"] 27 | valgrind = [] 28 | snappy = ["libspeedb-sys/snappy"] 29 | lz4 = ["libspeedb-sys/lz4"] 30 | zstd = ["libspeedb-sys/zstd"] 31 | zlib = ["libspeedb-sys/zlib"] 32 | bzip2 = ["libspeedb-sys/bzip2"] 33 | rtti = ["libspeedb-sys/rtti"] 34 | multi-threaded-cf = [] 35 | serde1 = ["serde"] 36 | 37 | [dependencies] 38 | libc = "0.2" 39 | libspeedb-sys = { path = "libspeedb-sys", version = "0.0.3" } 40 | serde = { version = "1", features = [ "derive" ], optional = true } 41 | 42 | [dev-dependencies] 43 | trybuild = "1.0" 44 | tempfile = "3.1" 45 | pretty_assertions = "1.0" 46 | bincode = "1.3" 47 | serde = { version = "1", features = [ "derive" ] } 48 | -------------------------------------------------------------------------------- /MAINTAINERSHIP.md: -------------------------------------------------------------------------------- 1 | Maintainers agree to operate under this set of guidelines: 2 | 3 | #### Authority 4 | 5 | Maintainers are trusted to close issues, merge pull requests, and publish crates to cargo. 6 | 7 | #### Categories of Work 8 | 9 | 0. Minor 10 | * updating the changelog 11 | * requires no approval 12 | 1. Normal 13 | * librocksdb-sys updates 14 | * API tracking code in the rocksdb crate that does not change control flow 15 | * breaking changes due to removed functionality in rocksdb 16 | * require 1 approval from another maintainer. if no maintainer is able to be reached for 2 weeks, then progress may be made anyway 17 | * patch (and post-1.0, minor) releases to crates.io that contain only the above work 18 | 2. Major 19 | * breaking API changes that are not direct consequences of underlying rocksdb changes 20 | * refactoring, which should generally only be done for clearly functional reasons like to aid in the completion of a specific task 21 | * require consensus among all maintainers unless 2 weeks have gone by without full participation 22 | * if 2 weeks have gone by after seeking feedback, and at least one other maintainer has participated, and all participating maintainers are in agreement, then progress may be made anyway 23 | * if action is absolutely urgent, an organization owner may act as a tiebreaker if specifically requested to do so and they agree that making a controversial decision is worth the risk. This should hopefully never occur. 24 | 25 | If any maintainer thinks an issue is major, it is major. 26 | 27 | #### Changelog Maintenance 28 | 29 | * If you are the one who merges a PR that includes an externally-visible change, please describe the change in the changelog and merge it in. 30 | 31 | #### Releasing, Publishing 32 | 33 | * Releases adhere to [semver](https://semver.org/) 34 | * To cut a release, an issue should be opened for it and reach the required approval based on the above `Categories of Work` section above 35 | * When progress is possible, the issue may be closed and the proposer may publish to crates.io. This is controlled by those in the [crate publishers organization-level team](https://github.com/orgs/rust-rocksdb/teams/crate-publishers). 36 | * Releases should have an associated tag pushed to this repo. I recommend doing this after the publish to crates.io succeeds to prevent any mishaps around pushing a tag for something that can't actually be published. 37 | * The changelog serves as a sort of logical staging area for releases 38 | * If a breaking API change happens, and the changelog has not advanced to a new major version, we roll the changelog to a new major version and open an issue to release the previous patch (and post-1.0, minor) version. 39 | * Before rolling to a new major version, it would be nice to release a non-breaking point release to let current users silently take advantage of any improvements 40 | 41 | #### Becoming a Maintainer 42 | 43 | * If you have a history of participation in this repo, agree to these rules, and wish to take on maintainership responsibilities, you may open an issue. If an owner agrees, they will add you to the maintainer group and the crate publishers team. 44 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | rust-Speedb 2 | ============ 3 | ![RocksDB build](https://github.com/rust-rocksdb/rust-rocksdb/workflows/RocksDB%20build/badge.svg?branch=master) 4 | [![crates.io](https://img.shields.io/crates/v/rocksdb.svg)](https://crates.io/crates/rocksdb) 5 | [![documentation](https://docs.rs/rocksdb/badge.svg)](https://docs.rs/rocksdb) 6 | [![license](https://img.shields.io/crates/l/rocksdb.svg)](https://github.com/rust-rocksdb/rust-rocksdb/blob/master/LICENSE) 7 | [![Gitter chat](https://badges.gitter.im/rust-rocksdb/gitter.png)](https://gitter.im/rust-rocksdb/lobby) 8 | ![rust 1.63.0 required](https://img.shields.io/badge/rust-1.63.0-blue.svg?label=MSRV) 9 | 10 | 11 | ![GitHub commits (since latest release)](https://img.shields.io/github/commits-since/rust-rocksdb/rust-rocksdb/latest.svg) 12 | 13 | ## Requirements 14 | 15 | - Clang and LLVM 16 | 17 | ## Contributing 18 | 19 | Feedback and pull requests welcome! If a particular feature of RocksDB is 20 | important to you, please let me know by opening an issue, and I'll 21 | prioritize it. 22 | 23 | ## Usage 24 | 25 | This binding is statically linked with a specific version of RocksDB. If you 26 | want to build it yourself, make sure you've also cloned the RocksDB and 27 | compression submodules: 28 | 29 | git submodule update --init --recursive 30 | 31 | ## Compression Support 32 | By default, support for the [Snappy](https://github.com/google/snappy), 33 | [LZ4](https://github.com/lz4/lz4), [Zstd](https://github.com/facebook/zstd), 34 | [Zlib](https://zlib.net), and [Bzip2](http://www.bzip.org) compression 35 | is enabled through crate features. If support for all of these compression 36 | algorithms is not needed, default features can be disabled and specific 37 | compression algorithms can be enabled. For example, to enable only LZ4 38 | compression support, make these changes to your Cargo.toml: 39 | 40 | ``` 41 | [dependencies.rocksdb] 42 | default-features = false 43 | features = ["lz4"] 44 | ``` 45 | 46 | ## Multithreaded ColumnFamily alternation 47 | 48 | The underlying RocksDB does allow column families to be created and dropped 49 | from multiple threads concurrently. But this crate doesn't allow it by default 50 | for compatibility. If you need to modify column families concurrently, enable 51 | crate feature called `multi-threaded-cf`, which makes this binding's 52 | data structures to use RwLock by default. Alternatively, you can directly create 53 | `DBWithThreadMode` without enabling the crate feature. 54 | -------------------------------------------------------------------------------- /code-of-conduct.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at t@jujit.su. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | https://www.contributor-covenant.org/faq 77 | -------------------------------------------------------------------------------- /libspeedb-sys/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "libspeedb-sys" 3 | version = "0.0.3+2.6.0" 4 | edition = "2018" 5 | rust-version = "1.63" 6 | authors = ["Karl Hobley ", "Arkadiy Paronyan "] 7 | license = "MIT/Apache-2.0/BSD-3-Clause" 8 | description = "Native bindings to libspeedb" 9 | readme = "README.md" 10 | repository = "https://github.com/speedb-io/rust-speedb" 11 | keywords = [ "bindings", "ffi", "speedb" ] 12 | categories = [ "api-bindings", "database", "external-ffi-bindings" ] 13 | links = "speedb" 14 | exclude = [ 15 | "speedb/docs", 16 | "speedb/java", 17 | "speedb/fuzz", 18 | "speedb/plugin/speedb/java", 19 | "speedb/.github" 20 | ] 21 | 22 | 23 | [features] 24 | default = [ "static" ] 25 | jemalloc = ["tikv-jemalloc-sys"] 26 | static = ["libz-sys?/static", "bzip2-sys?/static"] 27 | io-uring = ["pkg-config"] 28 | snappy = [] 29 | lz4 = ["lz4-sys"] 30 | zstd = ["zstd-sys"] 31 | zlib = ["libz-sys"] 32 | bzip2 = ["bzip2-sys"] 33 | rtti = [] 34 | 35 | [dependencies] 36 | libc = "0.2" 37 | tikv-jemalloc-sys = { version = "0.5", features = ["unprefixed_malloc_on_supported_platforms"], optional = true } 38 | lz4-sys = { version = "1.9", optional = true } 39 | zstd-sys = { version = "2.0", features = ["zdict_builder"], optional = true } 40 | libz-sys = { version = "1.1", default-features = false, optional = true } 41 | bzip2-sys = { version = "0.1", default-features = false, optional = true } 42 | 43 | [dev-dependencies] 44 | const-cstr = "0.3" 45 | uuid = { version = "1.0", features = ["v4"] } 46 | 47 | [build-dependencies] 48 | cc = { version = "1.0", features = ["parallel"] } 49 | bindgen = { version = "0.65", default-features = false, features = ["runtime"] } 50 | glob = "0.3" 51 | pkg-config = { version = "0.3", optional = true } 52 | -------------------------------------------------------------------------------- /libspeedb-sys/Makefile: -------------------------------------------------------------------------------- 1 | include speedb/src.mk 2 | 3 | speedb_lib_sources.txt: speedb/src.mk 4 | @echo -n "${LIB_SOURCES}" | tr ' ' '\n' > speedb_lib_sources.txt 5 | 6 | gen_lib_sources: speedb_lib_sources.txt 7 | -------------------------------------------------------------------------------- /libspeedb-sys/README.md: -------------------------------------------------------------------------------- 1 | # RocksDB bindings 2 | 3 | Low-level bindings to [RocksDB's](https://github.com/facebook/rocksdb) C API. 4 | 5 | Based on original work by Tyler Neely 6 | https://github.com/rust-rocksdb/rust-rocksdb 7 | and Jeremy Fitzhardinge 8 | https://github.com/jsgf/rocksdb-sys 9 | 10 | ### Version 11 | 12 | The librocksdb-sys version number is in the format `X.Y.Z+RX.RY.RZ`, where 13 | `X.Y.Z` is the version of this crate and follows SemVer conventions, while 14 | `RX.RY.RZ` is the version of the bundled rocksdb. 15 | -------------------------------------------------------------------------------- /libspeedb-sys/build_version.cc: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 | 3 | #include 4 | 5 | #include "rocksdb/version.h" 6 | #include "speedb/version.h" 7 | #include "rocksdb/utilities/object_registry.h" 8 | #include "util/string_util.h" 9 | 10 | // The build script may replace these values with real values based 11 | // on whether or not GIT is available and the platform settings 12 | static const std::string speedb_build_git_sha = "speedb_build_git_sha:ea70d40f86b70b1ac71d0d15152185a54e623f76"; 13 | static const std::string speedb_build_git_tag = "speedb_build_git_tag:release/2.6"; 14 | #define HAS_GIT_CHANGES 0 15 | #if HAS_GIT_CHANGES == 0 16 | // If HAS_GIT_CHANGES is 0, the GIT date is used. 17 | // Use the time the branch/tag was last modified 18 | static const std::string speedb_build_date = "speedb_build_date: "; 19 | #else 20 | // If HAS_GIT_CHANGES is > 0, the branch/tag has modifications. 21 | // Use the time the build was created. 22 | static const std::string speedb_build_date = "speedb_build_date:2023-09-12 06:50:56"; 23 | #endif 24 | 25 | #define SPDB_BUILD_TAG "?" 26 | static const std::string speedb_build_tag = "speedb_build_tag:" SPDB_BUILD_TAG; 27 | 28 | #define USE_RTTI "" 29 | static const std::string use_rtti = "use_rtti:" USE_RTTI; 30 | 31 | #define DEBUG_LEVEL "0" 32 | static const std::string debug_level = "debug_level:" DEBUG_LEVEL; 33 | 34 | #define PORTABLE "" 35 | static const std::string portable = "portable:" PORTABLE; 36 | 37 | extern "C" { 38 | int register_SpeedbPlugins(ROCKSDB_NAMESPACE::ObjectLibrary&, const std::string&); 39 | } // extern "C" 40 | 41 | std::unordered_map ROCKSDB_NAMESPACE::ObjectRegistry::builtins_ = { 42 | {"speedb", register_SpeedbPlugins}, 43 | }; 44 | 45 | namespace ROCKSDB_NAMESPACE { 46 | static void AddProperty(std::unordered_map *props, const std::string& name) { 47 | size_t colon = name.find(":"); 48 | if (colon != std::string::npos && colon > 0 && colon < name.length() - 1) { 49 | // If we found a "@:", then this property was a build-time substitution that failed. Skip it 50 | size_t at = name.find("@", colon); 51 | if (at != colon + 1) { 52 | // Everything before the colon is the name, after is the value 53 | (*props)[name.substr(0, colon)] = name.substr(colon + 1); 54 | } 55 | } 56 | } 57 | 58 | static std::unordered_map* LoadPropertiesSet(std::string p) { 59 | if(p == "properties"){ 60 | auto * properties = new std::unordered_map(); 61 | AddProperty(properties, speedb_build_git_sha); 62 | AddProperty(properties, speedb_build_git_tag); 63 | AddProperty(properties, speedb_build_date); 64 | if (SPDB_BUILD_TAG[0] == '@') { 65 | AddProperty(properties, "?"); 66 | } else { 67 | AddProperty(properties, speedb_build_tag); 68 | } 69 | return properties; 70 | } else { 71 | auto * debug_properties = new std::unordered_map(); 72 | AddProperty(debug_properties, use_rtti); 73 | AddProperty(debug_properties, debug_level); 74 | AddProperty(debug_properties, portable); 75 | return debug_properties; 76 | } 77 | } 78 | 79 | const std::unordered_map& GetRocksBuildProperties() { 80 | static std::unique_ptr> props(LoadPropertiesSet("properties")); 81 | return *props; 82 | } 83 | const std::unordered_map& GetRocksDebugProperties() { 84 | static std::unique_ptr> props(LoadPropertiesSet("debug_properties")); 85 | return *props; 86 | } 87 | 88 | std::string GetRocksVersionAsString(bool with_patch) { 89 | std::string version = std::to_string(ROCKSDB_MAJOR) + "." + std::to_string(ROCKSDB_MINOR); 90 | if (with_patch) { 91 | return version + "." + std::to_string(ROCKSDB_PATCH); 92 | } else { 93 | return version; 94 | } 95 | } 96 | 97 | std::string GetSpeedbVersionAsString(bool with_patch) { 98 | std::string version = std::to_string(SPEEDB_MAJOR) + "." + std::to_string(SPEEDB_MINOR); 99 | if (with_patch) { 100 | version += "." + std::to_string(SPEEDB_PATCH); 101 | // Only add a build tag if it was specified (e.g. not a release build) 102 | if (SPDB_BUILD_TAG[0] != '\0') { 103 | if (SPDB_BUILD_TAG[0] == '@') { 104 | // In case build tag substitution at build time failed, add a question mark 105 | version += "-?"; 106 | } else { 107 | version += "-" + std::string(SPDB_BUILD_TAG); 108 | } 109 | } 110 | } 111 | return version; 112 | } 113 | 114 | std::string GetRocksBuildInfoAsString(const std::string& program, bool verbose) { 115 | std::string info = program + " (Speedb) " + GetSpeedbVersionAsString(true) + 116 | " (" + GetRocksVersionAsString(true) + ")"; 117 | if (verbose) { 118 | for (const auto& it : GetRocksBuildProperties()) { 119 | info.append("\n "); 120 | info.append(it.first); 121 | info.append(": "); 122 | info.append(it.second); 123 | } 124 | info.append("\n Build properties:"); 125 | info.append(GetRocksDebugPropertiesAsString()); 126 | } 127 | return info; 128 | } 129 | 130 | std::string GetRocksDebugPropertiesAsString() { 131 | std::string info; 132 | for (const auto& it : GetRocksDebugProperties()) { 133 | info.append(" "); 134 | info.append(it.first); 135 | info.append("="); 136 | info.append(it.second); 137 | } 138 | return info; 139 | } 140 | } // namespace ROCKSDB_NAMESPACE 141 | -------------------------------------------------------------------------------- /libspeedb-sys/snappy-stubs-public.h: -------------------------------------------------------------------------------- 1 | // Copyright 2011 Google Inc. All Rights Reserved. 2 | // Author: sesse@google.com (Steinar H. Gunderson) 3 | // 4 | // Redistribution and use in source and binary forms, with or without 5 | // modification, are permitted provided that the following conditions are 6 | // met: 7 | // 8 | // * Redistributions of source code must retain the above copyright 9 | // notice, this list of conditions and the following disclaimer. 10 | // * Redistributions in binary form must reproduce the above 11 | // copyright notice, this list of conditions and the following disclaimer 12 | // in the documentation and/or other materials provided with the 13 | // distribution. 14 | // * Neither the name of Google Inc. nor the names of its 15 | // contributors may be used to endorse or promote products derived from 16 | // this software without specific prior written permission. 17 | // 18 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | // 30 | // Various type stubs for the open-source version of Snappy. 31 | // 32 | // This file cannot include config.h, as it is included from snappy.h, 33 | // which is a public header. Instead, snappy-stubs-public.h is generated by 34 | // from snappy-stubs-public.h.in at configure time. 35 | 36 | #ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_ 37 | #define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_ 38 | 39 | #define HAVE_STDINT_H 1 40 | 41 | 42 | #include 43 | 44 | #include 45 | 46 | //#include 47 | 48 | #if defined(_MSC_VER) 49 | #define ssize_t intptr_t 50 | #endif 51 | 52 | #define SNAPPY_MAJOR 1 53 | #define SNAPPY_MINOR 1 54 | #define SNAPPY_PATCHLEVEL 3 55 | #define SNAPPY_VERSION \ 56 | ((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL) 57 | 58 | #include 59 | 60 | namespace snappy { 61 | 62 | #if HAVE_STDINT_H 63 | typedef int8_t int8; 64 | typedef uint8_t uint8; 65 | typedef int16_t int16; 66 | typedef uint16_t uint16; 67 | typedef int32_t int32; 68 | typedef uint32_t uint32; 69 | typedef int64_t int64; 70 | typedef uint64_t uint64; 71 | #else 72 | typedef signed char int8; 73 | typedef unsigned char uint8; 74 | typedef short int16; 75 | typedef unsigned short uint16; 76 | typedef int int32; 77 | typedef unsigned int uint32; 78 | typedef long long int64; 79 | typedef unsigned long long uint64; 80 | #endif 81 | 82 | typedef std::string string; 83 | 84 | #define DISALLOW_COPY_AND_ASSIGN(TypeName) \ 85 | TypeName(const TypeName&); \ 86 | void operator=(const TypeName&) 87 | 88 | // Windows does not have an iovec type, yet the concept is universally useful. 89 | // It is simple to define it ourselves, so we put it inside our own namespace. 90 | struct iovec { 91 | void* iov_base; 92 | size_t iov_len; 93 | }; 94 | 95 | } // namespace snappy 96 | 97 | #endif // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_ 98 | -------------------------------------------------------------------------------- /libspeedb-sys/speedb_lib_sources.txt: -------------------------------------------------------------------------------- 1 | cache/cache.cc 2 | cache/cache_entry_roles.cc 3 | cache/cache_key.cc 4 | cache/cache_helpers.cc 5 | cache/cache_reservation_manager.cc 6 | cache/charged_cache.cc 7 | cache/clock_cache.cc 8 | cache/lru_cache.cc 9 | cache/compressed_secondary_cache.cc 10 | cache/secondary_cache.cc 11 | cache/secondary_cache_adapter.cc 12 | cache/sharded_cache.cc 13 | db/arena_wrapped_db_iter.cc 14 | db/blob/blob_contents.cc 15 | db/blob/blob_fetcher.cc 16 | db/blob/blob_file_addition.cc 17 | db/blob/blob_file_builder.cc 18 | db/blob/blob_file_cache.cc 19 | db/blob/blob_file_garbage.cc 20 | db/blob/blob_file_meta.cc 21 | db/blob/blob_file_reader.cc 22 | db/blob/blob_garbage_meter.cc 23 | db/blob/blob_log_format.cc 24 | db/blob/blob_log_sequential_reader.cc 25 | db/blob/blob_log_writer.cc 26 | db/blob/blob_source.cc 27 | db/blob/prefetch_buffer_collection.cc 28 | db/builder.cc 29 | db/c.cc 30 | db/column_family.cc 31 | db/compaction/compaction.cc 32 | db/compaction/compaction_iterator.cc 33 | db/compaction/compaction_job.cc 34 | db/compaction/compaction_picker.cc 35 | db/compaction/compaction_picker_fifo.cc 36 | db/compaction/compaction_picker_level.cc 37 | db/compaction/compaction_picker_universal.cc 38 | db/compaction/compaction_service_job.cc 39 | db/compaction/compaction_state.cc 40 | db/compaction/compaction_outputs.cc 41 | db/compaction/sst_partitioner.cc 42 | db/compaction/subcompaction_state.cc 43 | db/convenience.cc 44 | db/db_filesnapshot.cc 45 | db/db_impl/compacted_db_impl.cc 46 | db/db_impl/db_impl.cc 47 | db/db_impl/db_impl_compaction_flush.cc 48 | db/db_impl/db_impl_debug.cc 49 | db/db_impl/db_impl_experimental.cc 50 | db/db_impl/db_impl_files.cc 51 | db/db_impl/db_impl_open.cc 52 | db/db_impl/db_impl_readonly.cc 53 | db/db_impl/db_impl_secondary.cc 54 | db/db_impl/db_impl_write.cc 55 | db/db_impl/db_spdb_impl_write.cc 56 | db/db_info_dumper.cc 57 | db/db_iter.cc 58 | db/dbformat.cc 59 | db/error_handler.cc 60 | db/event_helpers.cc 61 | db/experimental.cc 62 | db/external_sst_file_ingestion_job.cc 63 | db/file_indexer.cc 64 | db/flush_job.cc 65 | db/flush_scheduler.cc 66 | db/forward_iterator.cc 67 | db/import_column_family_job.cc 68 | db/internal_stats.cc 69 | db/logs_with_prep_tracker.cc 70 | db/log_reader.cc 71 | db/log_writer.cc 72 | db/malloc_stats.cc 73 | db/memtable.cc 74 | db/memtable_list.cc 75 | db/merge_helper.cc 76 | db/merge_operator.cc 77 | db/output_validator.cc 78 | db/periodic_task_scheduler.cc 79 | db/range_del_aggregator.cc 80 | db/range_tombstone_fragmenter.cc 81 | db/repair.cc 82 | db/seqno_to_time_mapping.cc 83 | db/snapshot_impl.cc 84 | db/table_cache.cc 85 | db/table_properties_collector.cc 86 | db/transaction_log_impl.cc 87 | db/trim_history_scheduler.cc 88 | db/version_builder.cc 89 | db/version_edit.cc 90 | db/version_edit_handler.cc 91 | db/version_set.cc 92 | db/wal_edit.cc 93 | db/wal_manager.cc 94 | db/wide/wide_column_serialization.cc 95 | db/wide/wide_columns.cc 96 | db/write_batch.cc 97 | db/write_batch_base.cc 98 | db/write_controller.cc 99 | db/write_stall_stats.cc 100 | db/write_thread.cc 101 | env/composite_env.cc 102 | env/env.cc 103 | env/env_chroot.cc 104 | env/env_encryption.cc 105 | env/env_posix.cc 106 | env/file_system.cc 107 | env/fs_posix.cc 108 | env/fs_remap.cc 109 | env/file_system_tracer.cc 110 | env/io_posix.cc 111 | env/mock_env.cc 112 | env/unique_id_gen.cc 113 | file/delete_scheduler.cc 114 | file/file_prefetch_buffer.cc 115 | file/file_util.cc 116 | file/filename.cc 117 | file/line_file_reader.cc 118 | file/random_access_file_reader.cc 119 | file/read_write_util.cc 120 | file/readahead_raf.cc 121 | file/sequence_file_reader.cc 122 | file/sst_file_manager_impl.cc 123 | file/writable_file_writer.cc 124 | logging/auto_roll_logger.cc 125 | logging/event_logger.cc 126 | logging/log_buffer.cc 127 | memory/arena.cc 128 | memory/concurrent_arena.cc 129 | memory/jemalloc_nodump_allocator.cc 130 | memory/memkind_kmem_allocator.cc 131 | memory/memory_allocator.cc 132 | memtable/alloc_tracker.cc 133 | memtable/hash_linklist_rep.cc 134 | memtable/hash_skiplist_rep.cc 135 | memtable/hash_spdb_rep.cc 136 | memtable/skiplistrep.cc 137 | memtable/vectorrep.cc 138 | memtable/write_buffer_manager.cc 139 | monitoring/histogram.cc 140 | monitoring/histogram_windowing.cc 141 | monitoring/in_memory_stats_history.cc 142 | monitoring/instrumented_mutex.cc 143 | monitoring/iostats_context.cc 144 | monitoring/perf_context.cc 145 | monitoring/perf_level.cc 146 | monitoring/persistent_stats_history.cc 147 | monitoring/statistics.cc 148 | monitoring/thread_status_impl.cc 149 | monitoring/thread_status_updater.cc 150 | monitoring/thread_status_updater_debug.cc 151 | monitoring/thread_status_util.cc 152 | monitoring/thread_status_util_debug.cc 153 | options/cf_options.cc 154 | options/configurable.cc 155 | options/customizable.cc 156 | options/db_options.cc 157 | options/options.cc 158 | options/options_helper.cc 159 | options/options_parser.cc 160 | port/mmap.cc 161 | port/port_posix.cc 162 | port/stack_trace.cc 163 | table/adaptive/adaptive_table_factory.cc 164 | table/block_based/binary_search_index_reader.cc 165 | table/block_based/block.cc 166 | table/block_based/block_based_table_builder.cc 167 | table/block_based/block_based_table_factory.cc 168 | table/block_based/block_based_table_iterator.cc 169 | table/block_based/block_based_table_reader.cc 170 | table/block_based/block_builder.cc 171 | table/block_based/block_cache.cc 172 | table/block_based/block_prefetcher.cc 173 | table/block_based/block_prefix_index.cc 174 | table/block_based/data_block_hash_index.cc 175 | table/block_based/data_block_footer.cc 176 | table/block_based/filter_block_reader_common.cc 177 | table/block_based/filter_policy.cc 178 | table/block_based/flush_block_policy.cc 179 | table/block_based/full_filter_block.cc 180 | table/block_based/hash_index_reader.cc 181 | table/block_based/index_builder.cc 182 | table/block_based/index_reader_common.cc 183 | table/block_based/parsed_full_filter_block.cc 184 | table/block_based/partitioned_filter_block.cc 185 | table/block_based/partitioned_index_iterator.cc 186 | table/block_based/partitioned_index_reader.cc 187 | table/block_based/reader_common.cc 188 | table/block_based/table_pinning_policy.cc 189 | table/block_based/uncompression_dict_reader.cc 190 | table/block_fetcher.cc 191 | table/cuckoo/cuckoo_table_builder.cc 192 | table/cuckoo/cuckoo_table_factory.cc 193 | table/cuckoo/cuckoo_table_reader.cc 194 | table/format.cc 195 | table/get_context.cc 196 | table/iterator.cc 197 | table/merging_iterator.cc 198 | table/compaction_merging_iterator.cc 199 | table/meta_blocks.cc 200 | table/persistent_cache_helper.cc 201 | table/plain/plain_table_bloom.cc 202 | table/plain/plain_table_builder.cc 203 | table/plain/plain_table_factory.cc 204 | table/plain/plain_table_index.cc 205 | table/plain/plain_table_key_coding.cc 206 | table/plain/plain_table_reader.cc 207 | table/sst_file_dumper.cc 208 | table/sst_file_reader.cc 209 | table/sst_file_writer.cc 210 | table/table_factory.cc 211 | table/table_properties.cc 212 | table/two_level_iterator.cc 213 | table/unique_id.cc 214 | test_util/sync_point.cc 215 | test_util/sync_point_impl.cc 216 | test_util/transaction_test_util.cc 217 | tools/dump/db_dump_tool.cc 218 | trace_replay/trace_record_handler.cc 219 | trace_replay/trace_record_result.cc 220 | trace_replay/trace_record.cc 221 | trace_replay/trace_replay.cc 222 | trace_replay/block_cache_tracer.cc 223 | trace_replay/io_tracer.cc 224 | util/async_file_reader.cc 225 | util/build_version.cc 226 | util/cleanable.cc 227 | util/coding.cc 228 | util/compaction_job_stats_impl.cc 229 | util/comparator.cc 230 | util/compression.cc 231 | util/compression_context_cache.cc 232 | util/concurrent_task_limiter_impl.cc 233 | util/crc32c.cc 234 | util/crc32c_arm64.cc 235 | util/data_structure.cc 236 | util/dynamic_bloom.cc 237 | util/hash.cc 238 | util/murmurhash.cc 239 | util/random.cc 240 | util/rate_limiter.cc 241 | util/ribbon_config.cc 242 | util/slice.cc 243 | util/file_checksum_helper.cc 244 | util/status.cc 245 | util/stderr_logger.cc 246 | util/string_util.cc 247 | util/thread_local.cc 248 | util/threadpool_imp.cc 249 | util/xxhash.cc 250 | utilities/agg_merge/agg_merge.cc 251 | utilities/backup/backup_engine.cc 252 | utilities/blob_db/blob_compaction_filter.cc 253 | utilities/blob_db/blob_db.cc 254 | utilities/blob_db/blob_db_impl.cc 255 | utilities/blob_db/blob_db_impl_filesnapshot.cc 256 | utilities/blob_db/blob_file.cc 257 | utilities/cache_dump_load.cc 258 | utilities/cache_dump_load_impl.cc 259 | utilities/cassandra/cassandra_compaction_filter.cc 260 | utilities/cassandra/format.cc 261 | utilities/cassandra/merge_operator.cc 262 | utilities/checkpoint/checkpoint_impl.cc 263 | utilities/compaction_filters.cc 264 | utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc 265 | utilities/convenience/info_log_finder.cc 266 | utilities/counted_fs.cc 267 | utilities/debug.cc 268 | utilities/env_mirror.cc 269 | utilities/env_timed.cc 270 | utilities/fault_injection_env.cc 271 | utilities/fault_injection_fs.cc 272 | utilities/fault_injection_secondary_cache.cc 273 | utilities/injection_fs.cc 274 | utilities/leveldb_options/leveldb_options.cc 275 | utilities/memory/memory_util.cc 276 | utilities/merge_operators.cc 277 | utilities/merge_operators/max.cc 278 | utilities/merge_operators/put.cc 279 | utilities/merge_operators/sortlist.cc 280 | utilities/merge_operators/string_append/stringappend.cc 281 | utilities/merge_operators/string_append/stringappend2.cc 282 | utilities/merge_operators/uint64add.cc 283 | utilities/merge_operators/bytesxor.cc 284 | utilities/object_registry.cc 285 | utilities/option_change_migration/option_change_migration.cc 286 | utilities/options/options_util.cc 287 | utilities/persistent_cache/block_cache_tier.cc 288 | utilities/persistent_cache/block_cache_tier_file.cc 289 | utilities/persistent_cache/block_cache_tier_metadata.cc 290 | utilities/persistent_cache/persistent_cache_tier.cc 291 | utilities/persistent_cache/volatile_tier_impl.cc 292 | utilities/simulator_cache/cache_simulator.cc 293 | utilities/simulator_cache/sim_cache.cc 294 | utilities/table_properties_collectors/compact_on_deletion_collector.cc 295 | utilities/trace/file_trace_reader_writer.cc 296 | utilities/trace/replayer_impl.cc 297 | utilities/transactions/lock/lock_manager.cc 298 | utilities/transactions/lock/point/point_lock_tracker.cc 299 | utilities/transactions/lock/point/point_lock_manager.cc 300 | utilities/transactions/optimistic_transaction.cc 301 | utilities/transactions/optimistic_transaction_db_impl.cc 302 | utilities/transactions/pessimistic_transaction.cc 303 | utilities/transactions/pessimistic_transaction_db.cc 304 | utilities/transactions/snapshot_checker.cc 305 | utilities/transactions/transaction_base.cc 306 | utilities/transactions/transaction_db_mutex_impl.cc 307 | utilities/transactions/transaction_util.cc 308 | utilities/transactions/write_prepared_txn.cc 309 | utilities/transactions/write_prepared_txn_db.cc 310 | utilities/transactions/write_unprepared_txn.cc 311 | utilities/transactions/write_unprepared_txn_db.cc 312 | utilities/ttl/db_ttl_impl.cc 313 | utilities/wal_filter.cc 314 | utilities/write_batch_with_index/write_batch_with_index.cc 315 | utilities/write_batch_with_index/write_batch_with_index_internal.cc 316 | plugin/speedb/pinning_policy/scoped_pinning_policy.cc 317 | plugin/speedb/speedb_registry.cc 318 | plugin/speedb/paired_filter/speedb_paired_bloom_internal.cc 319 | plugin/speedb/paired_filter/speedb_paired_bloom.cc 320 | 321 | -------------------------------------------------------------------------------- /libspeedb-sys/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely, Alex Regueiro 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #![allow(clippy::all)] 16 | #![allow(non_camel_case_types)] 17 | #![allow(non_snake_case)] 18 | #![allow(non_upper_case_globals)] 19 | 20 | // Ensure the libraries are linked in, despite it not being used directly 21 | #[cfg(feature = "bzip2")] 22 | extern crate bzip2_sys; 23 | #[cfg(feature = "zlib")] 24 | extern crate libz_sys; 25 | #[cfg(feature = "lz4")] 26 | extern crate lz4_sys; 27 | #[cfg(feature = "zstd")] 28 | extern crate zstd_sys; 29 | 30 | include!(concat!(env!("OUT_DIR"), "/bindings.rs")); 31 | -------------------------------------------------------------------------------- /libspeedb-sys/src/test.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | // 15 | 16 | use libc::*; 17 | use std::ffi::{CStr, CString}; 18 | use std::ptr; 19 | use std::str; 20 | 21 | use super::*; 22 | 23 | pub fn error_message(ptr: *const i8) -> String { 24 | let c_str = unsafe { CStr::from_ptr(ptr as *const _) }; 25 | let s = str::from_utf8(c_str.to_bytes()).unwrap().to_owned(); 26 | unsafe { 27 | free(ptr as *mut c_void); 28 | } 29 | s 30 | } 31 | 32 | #[test] 33 | fn internal() { 34 | unsafe { 35 | let opts = rocksdb_options_create(); 36 | assert!(!opts.is_null()); 37 | 38 | rocksdb_options_increase_parallelism(opts, 0); 39 | rocksdb_options_optimize_level_style_compaction(opts, 0); 40 | rocksdb_options_set_create_if_missing(opts, u8::from(true)); 41 | 42 | let rustpath = "_rust_rocksdb_internaltest"; 43 | let cpath = CString::new(rustpath).unwrap(); 44 | 45 | let mut err: *mut c_char = ptr::null_mut(); 46 | let err_ptr: *mut *mut c_char = &mut err; 47 | let db = rocksdb_open(opts, cpath.as_ptr() as *const _, err_ptr); 48 | if !err.is_null() { 49 | println!("failed to open rocksdb: {}", error_message(err)); 50 | } 51 | assert!(err.is_null()); 52 | 53 | let writeopts = rocksdb_writeoptions_create(); 54 | assert!(!writeopts.is_null()); 55 | 56 | let key = b"name\x00"; 57 | let val = b"spacejam\x00"; 58 | rocksdb_put( 59 | db, 60 | writeopts.clone(), 61 | key.as_ptr() as *const c_char, 62 | 4, 63 | val.as_ptr() as *const c_char, 64 | 8, 65 | err_ptr, 66 | ); 67 | rocksdb_writeoptions_destroy(writeopts); 68 | assert!(err.is_null()); 69 | 70 | let readopts = rocksdb_readoptions_create(); 71 | assert!(!readopts.is_null()); 72 | 73 | let mut val_len: size_t = 0; 74 | let val_len_ptr = &mut val_len as *mut size_t; 75 | rocksdb_get( 76 | db, 77 | readopts.clone(), 78 | key.as_ptr() as *const c_char, 79 | 4, 80 | val_len_ptr, 81 | err_ptr, 82 | ); 83 | rocksdb_readoptions_destroy(readopts); 84 | assert!(err.is_null()); 85 | rocksdb_close(db); 86 | rocksdb_destroy_db(opts, cpath.as_ptr() as *const _, err_ptr); 87 | assert!(err.is_null()); 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /src/checkpoint.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Eugene P. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | // 15 | 16 | //! Implementation of bindings to RocksDB Checkpoint[1] API 17 | //! 18 | //! [1]: https://github.com/facebook/rocksdb/wiki/Checkpoints 19 | 20 | use crate::{db::DBInner, ffi, ffi_util::to_cpath, DBCommon, Error, ThreadMode}; 21 | use std::{marker::PhantomData, path::Path}; 22 | 23 | /// Undocumented parameter for `ffi::rocksdb_checkpoint_create` function. Zero by default. 24 | const LOG_SIZE_FOR_FLUSH: u64 = 0_u64; 25 | 26 | /// Database's checkpoint object. 27 | /// Used to create checkpoints of the specified DB from time to time. 28 | pub struct Checkpoint<'db> { 29 | inner: *mut ffi::rocksdb_checkpoint_t, 30 | _db: PhantomData<&'db ()>, 31 | } 32 | 33 | impl<'db> Checkpoint<'db> { 34 | /// Creates new checkpoint object for specific DB. 35 | /// 36 | /// Does not actually produce checkpoints, call `.create_checkpoint()` method to produce 37 | /// a DB checkpoint. 38 | pub fn new(db: &'db DBCommon) -> Result { 39 | let checkpoint: *mut ffi::rocksdb_checkpoint_t; 40 | 41 | unsafe { 42 | checkpoint = ffi_try!(ffi::rocksdb_checkpoint_object_create(db.inner.inner())); 43 | } 44 | 45 | if checkpoint.is_null() { 46 | return Err(Error::new("Could not create checkpoint object.".to_owned())); 47 | } 48 | 49 | Ok(Self { 50 | inner: checkpoint, 51 | _db: PhantomData, 52 | }) 53 | } 54 | 55 | /// Creates new physical DB checkpoint in directory specified by `path`. 56 | pub fn create_checkpoint>(&self, path: P) -> Result<(), Error> { 57 | let cpath = to_cpath(path)?; 58 | unsafe { 59 | ffi_try!(ffi::rocksdb_checkpoint_create( 60 | self.inner, 61 | cpath.as_ptr(), 62 | LOG_SIZE_FOR_FLUSH, 63 | )); 64 | } 65 | Ok(()) 66 | } 67 | } 68 | 69 | impl<'db> Drop for Checkpoint<'db> { 70 | fn drop(&mut self) { 71 | unsafe { 72 | ffi::rocksdb_checkpoint_object_destroy(self.inner); 73 | } 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /src/column_family.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use crate::{db::MultiThreaded, ffi, Options}; 16 | 17 | use std::sync::Arc; 18 | 19 | /// The name of the default column family. 20 | /// 21 | /// The column family with this name is created implicitly whenever column 22 | /// families are used. 23 | pub const DEFAULT_COLUMN_FAMILY_NAME: &str = "default"; 24 | 25 | /// A descriptor for a RocksDB column family. 26 | /// 27 | /// A description of the column family, containing the name and `Options`. 28 | pub struct ColumnFamilyDescriptor { 29 | pub(crate) name: String, 30 | pub(crate) options: Options, 31 | } 32 | 33 | impl ColumnFamilyDescriptor { 34 | // Create a new column family descriptor with the specified name and options. 35 | pub fn new(name: S, options: Options) -> Self 36 | where 37 | S: Into, 38 | { 39 | Self { 40 | name: name.into(), 41 | options, 42 | } 43 | } 44 | 45 | /// Get the name of the ColumnFamilyDescriptor. 46 | pub fn name(&self) -> &str { 47 | &self.name 48 | } 49 | } 50 | 51 | /// An opaque type used to represent a column family. Returned from some functions, and used 52 | /// in others 53 | pub struct ColumnFamily { 54 | pub(crate) inner: *mut ffi::rocksdb_column_family_handle_t, 55 | } 56 | 57 | /// A specialized opaque type used to represent a column family by the [`MultiThreaded`] 58 | /// mode. Clone (and Copy) is derived to behave like `&ColumnFamily` (this is used for 59 | /// single-threaded mode). `Clone`/`Copy` is safe because this lifetime is bound to DB like 60 | /// iterators/snapshots. On top of it, this is as cheap and small as `&ColumnFamily` because 61 | /// this only has a single pointer-wide field. 62 | pub struct BoundColumnFamily<'a> { 63 | pub(crate) inner: *mut ffi::rocksdb_column_family_handle_t, 64 | pub(crate) multi_threaded_cfs: std::marker::PhantomData<&'a MultiThreaded>, 65 | } 66 | 67 | // internal struct which isn't exposed to public api. 68 | // but its memory will be exposed after transmute()-ing to BoundColumnFamily. 69 | // ColumnFamily's lifetime should be bound to DB. But, db holds cfs and cfs can't easily 70 | // self-reference DB as its lifetime due to rust's type system 71 | pub(crate) struct UnboundColumnFamily { 72 | pub(crate) inner: *mut ffi::rocksdb_column_family_handle_t, 73 | } 74 | 75 | impl UnboundColumnFamily { 76 | pub(crate) fn bound_column_family<'a>(self: Arc) -> Arc> { 77 | // SAFETY: the new BoundColumnFamily here just adding lifetime, 78 | // so that column family handle won't outlive db. 79 | unsafe { Arc::from_raw(Arc::into_raw(self).cast()) } 80 | } 81 | } 82 | 83 | fn destroy_handle(handle: *mut ffi::rocksdb_column_family_handle_t) { 84 | // SAFETY: This should be called only from various Drop::drop(), strictly keeping a 1-to-1 85 | // ownership to avoid double invocation to the rocksdb function with same handle. 86 | unsafe { 87 | ffi::rocksdb_column_family_handle_destroy(handle); 88 | } 89 | } 90 | 91 | impl Drop for ColumnFamily { 92 | fn drop(&mut self) { 93 | destroy_handle(self.inner); 94 | } 95 | } 96 | 97 | // these behaviors must be identical between BoundColumnFamily and UnboundColumnFamily 98 | // due to the unsafe transmute() in bound_column_family()! 99 | impl<'a> Drop for BoundColumnFamily<'a> { 100 | fn drop(&mut self) { 101 | destroy_handle(self.inner); 102 | } 103 | } 104 | 105 | impl Drop for UnboundColumnFamily { 106 | fn drop(&mut self) { 107 | destroy_handle(self.inner); 108 | } 109 | } 110 | 111 | /// Handy type alias to hide actual type difference to reference [`ColumnFamily`] 112 | /// depending on the `multi-threaded-cf` crate feature. 113 | #[cfg(not(feature = "multi-threaded-cf"))] 114 | pub type ColumnFamilyRef<'a> = &'a ColumnFamily; 115 | 116 | #[cfg(feature = "multi-threaded-cf")] 117 | pub type ColumnFamilyRef<'a> = Arc>; 118 | 119 | /// Utility trait to accept both supported references to `ColumnFamily` 120 | /// (`&ColumnFamily` and `BoundColumnFamily`) 121 | pub trait AsColumnFamilyRef { 122 | fn inner(&self) -> *mut ffi::rocksdb_column_family_handle_t; 123 | } 124 | 125 | impl AsColumnFamilyRef for ColumnFamily { 126 | fn inner(&self) -> *mut ffi::rocksdb_column_family_handle_t { 127 | self.inner 128 | } 129 | } 130 | 131 | impl<'a> AsColumnFamilyRef for &'a ColumnFamily { 132 | fn inner(&self) -> *mut ffi::rocksdb_column_family_handle_t { 133 | self.inner 134 | } 135 | } 136 | 137 | // Only implement for Arc-ed BoundColumnFamily as this tightly coupled and 138 | // implementation detail, considering use of std::mem::transmute. BoundColumnFamily 139 | // isn't expected to be used as naked. 140 | // Also, ColumnFamilyRef might not be Arc> depending crate 141 | // feature flags so, we can't use the type alias here. 142 | impl<'a> AsColumnFamilyRef for Arc> { 143 | fn inner(&self) -> *mut ffi::rocksdb_column_family_handle_t { 144 | self.inner 145 | } 146 | } 147 | 148 | unsafe impl Send for ColumnFamily {} 149 | unsafe impl Send for UnboundColumnFamily {} 150 | unsafe impl Sync for UnboundColumnFamily {} 151 | unsafe impl<'a> Send for BoundColumnFamily<'a> {} 152 | -------------------------------------------------------------------------------- /src/compaction_filter.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | // 15 | 16 | use libc::{c_char, c_int, c_uchar, c_void, size_t}; 17 | use std::ffi::{CStr, CString}; 18 | use std::slice; 19 | 20 | /// Decision about how to handle compacting an object 21 | /// 22 | /// This is returned by a compaction filter callback. Depending 23 | /// on the value, the object may be kept, removed, or changed 24 | /// in the database during a compaction. 25 | pub enum Decision { 26 | /// Keep the old value 27 | Keep, 28 | /// Remove the object from the database 29 | Remove, 30 | /// Change the value for the key 31 | Change(&'static [u8]), 32 | } 33 | 34 | /// CompactionFilter allows an application to modify/delete a key-value at 35 | /// the time of compaction. 36 | pub trait CompactionFilter { 37 | /// The compaction process invokes this 38 | /// method for kv that is being compacted. The application can inspect 39 | /// the existing value of the key and make decision based on it. 40 | /// 41 | /// Key-Values that are results of merge operation during compaction are not 42 | /// passed into this function. Currently, when you have a mix of Put()s and 43 | /// Merge()s on a same key, we only guarantee to process the merge operands 44 | /// through the compaction filters. Put()s might be processed, or might not. 45 | /// 46 | /// When the value is to be preserved, the application has the option 47 | /// to modify the existing_value and pass it back through new_value. 48 | /// value_changed needs to be set to true in this case. 49 | /// 50 | /// Note that RocksDB snapshots (i.e. call GetSnapshot() API on a 51 | /// DB* object) will not guarantee to preserve the state of the DB with 52 | /// CompactionFilter. Data seen from a snapshot might disappear after a 53 | /// compaction finishes. If you use snapshots, think twice about whether you 54 | /// want to use compaction filter and whether you are using it in a safe way. 55 | /// 56 | /// If the CompactionFilter was created by a factory, then it will only ever 57 | /// be used by a single thread that is doing the compaction run, and this 58 | /// call does not need to be thread-safe. However, multiple filters may be 59 | /// in existence and operating concurrently. 60 | fn filter(&mut self, level: u32, key: &[u8], value: &[u8]) -> Decision; 61 | 62 | /// Returns a name that identifies this compaction filter. 63 | /// The name will be printed to LOG file on start up for diagnosis. 64 | fn name(&self) -> &CStr; 65 | } 66 | 67 | /// Function to filter compaction with. 68 | /// 69 | /// This function takes the level of compaction, the key, and the existing value 70 | /// and returns the decision about how to handle the Key-Value pair. 71 | /// 72 | /// See [Options::set_compaction_filter][set_compaction_filter] for more details 73 | /// 74 | /// [set_compaction_filter]: ../struct.Options.html#method.set_compaction_filter 75 | pub trait CompactionFilterFn: FnMut(u32, &[u8], &[u8]) -> Decision {} 76 | impl CompactionFilterFn for F where F: FnMut(u32, &[u8], &[u8]) -> Decision + Send + 'static {} 77 | 78 | pub struct CompactionFilterCallback 79 | where 80 | F: CompactionFilterFn, 81 | { 82 | pub name: CString, 83 | pub filter_fn: F, 84 | } 85 | 86 | impl CompactionFilter for CompactionFilterCallback 87 | where 88 | F: CompactionFilterFn, 89 | { 90 | fn name(&self) -> &CStr { 91 | self.name.as_c_str() 92 | } 93 | 94 | fn filter(&mut self, level: u32, key: &[u8], value: &[u8]) -> Decision { 95 | (self.filter_fn)(level, key, value) 96 | } 97 | } 98 | 99 | pub unsafe extern "C" fn destructor_callback(raw_cb: *mut c_void) 100 | where 101 | F: CompactionFilter, 102 | { 103 | drop(Box::from_raw(raw_cb as *mut F)); 104 | } 105 | 106 | pub unsafe extern "C" fn name_callback(raw_cb: *mut c_void) -> *const c_char 107 | where 108 | F: CompactionFilter, 109 | { 110 | let cb = &*(raw_cb as *mut F); 111 | cb.name().as_ptr() 112 | } 113 | 114 | pub unsafe extern "C" fn filter_callback( 115 | raw_cb: *mut c_void, 116 | level: c_int, 117 | raw_key: *const c_char, 118 | key_length: size_t, 119 | existing_value: *const c_char, 120 | value_length: size_t, 121 | new_value: *mut *mut c_char, 122 | new_value_length: *mut size_t, 123 | value_changed: *mut c_uchar, 124 | ) -> c_uchar 125 | where 126 | F: CompactionFilter, 127 | { 128 | use self::Decision::{Change, Keep, Remove}; 129 | 130 | let cb = &mut *(raw_cb as *mut F); 131 | let key = slice::from_raw_parts(raw_key as *const u8, key_length); 132 | let oldval = slice::from_raw_parts(existing_value as *const u8, value_length); 133 | let result = cb.filter(level as u32, key, oldval); 134 | match result { 135 | Keep => 0, 136 | Remove => 1, 137 | Change(newval) => { 138 | *new_value = newval.as_ptr() as *mut c_char; 139 | *new_value_length = newval.len() as size_t; 140 | *value_changed = 1_u8; 141 | 0 142 | } 143 | } 144 | } 145 | 146 | #[cfg(test)] 147 | #[allow(unused_variables)] 148 | fn test_filter(level: u32, key: &[u8], value: &[u8]) -> Decision { 149 | use self::Decision::{Change, Keep, Remove}; 150 | match key.first() { 151 | Some(&b'_') => Remove, 152 | Some(&b'%') => Change(b"secret"), 153 | _ => Keep, 154 | } 155 | } 156 | 157 | #[test] 158 | fn compaction_filter_test() { 159 | use crate::{Options, DB}; 160 | 161 | let path = "_rust_rocksdb_filter_test"; 162 | let mut opts = Options::default(); 163 | opts.create_if_missing(true); 164 | opts.set_compaction_filter("test", test_filter); 165 | { 166 | let db = DB::open(&opts, path).unwrap(); 167 | let _r = db.put(b"k1", b"a"); 168 | let _r = db.put(b"_k", b"b"); 169 | let _r = db.put(b"%k", b"c"); 170 | db.compact_range(None::<&[u8]>, None::<&[u8]>); 171 | assert_eq!(&*db.get(b"k1").unwrap().unwrap(), b"a"); 172 | assert!(db.get(b"_k").unwrap().is_none()); 173 | assert_eq!(&*db.get(b"%k").unwrap().unwrap(), b"secret"); 174 | } 175 | let result = DB::destroy(&opts, path); 176 | assert!(result.is_ok()); 177 | } 178 | -------------------------------------------------------------------------------- /src/compaction_filter_factory.rs: -------------------------------------------------------------------------------- 1 | use std::ffi::CStr; 2 | 3 | use libc::{self, c_char, c_void}; 4 | 5 | use crate::{ 6 | compaction_filter::{self, CompactionFilter}, 7 | ffi, 8 | }; 9 | 10 | /// Each compaction will create a new CompactionFilter allowing the 11 | /// application to know about different compactions. 12 | /// 13 | /// See [compaction_filter::CompactionFilter][CompactionFilter] and 14 | /// [Options::set_compaction_filter_factory][set_compaction_filter_factory] 15 | /// for more details 16 | /// 17 | /// [CompactionFilter]: ../compaction_filter/trait.CompactionFilter.html 18 | /// [set_compaction_filter_factory]: ../struct.Options.html#method.set_compaction_filter_factory 19 | pub trait CompactionFilterFactory { 20 | type Filter: CompactionFilter; 21 | 22 | /// Returns a CompactionFilter for the compaction process 23 | fn create(&mut self, context: CompactionFilterContext) -> Self::Filter; 24 | 25 | /// Returns a name that identifies this compaction filter factory. 26 | fn name(&self) -> &CStr; 27 | } 28 | 29 | pub unsafe extern "C" fn destructor_callback(raw_self: *mut c_void) 30 | where 31 | F: CompactionFilterFactory, 32 | { 33 | drop(Box::from_raw(raw_self as *mut F)); 34 | } 35 | 36 | pub unsafe extern "C" fn name_callback(raw_self: *mut c_void) -> *const c_char 37 | where 38 | F: CompactionFilterFactory, 39 | { 40 | let self_ = &*(raw_self as *const c_void as *const F); 41 | self_.name().as_ptr() 42 | } 43 | 44 | /// Context information of a compaction run 45 | pub struct CompactionFilterContext { 46 | /// Does this compaction run include all data files 47 | pub is_full_compaction: bool, 48 | /// Is this compaction requested by the client (true), 49 | /// or is it occurring as an automatic compaction process 50 | pub is_manual_compaction: bool, 51 | } 52 | 53 | impl CompactionFilterContext { 54 | unsafe fn from_raw(ptr: *mut ffi::rocksdb_compactionfiltercontext_t) -> Self { 55 | let is_full_compaction = ffi::rocksdb_compactionfiltercontext_is_full_compaction(ptr) != 0; 56 | let is_manual_compaction = 57 | ffi::rocksdb_compactionfiltercontext_is_manual_compaction(ptr) != 0; 58 | 59 | Self { 60 | is_full_compaction, 61 | is_manual_compaction, 62 | } 63 | } 64 | } 65 | 66 | pub unsafe extern "C" fn create_compaction_filter_callback( 67 | raw_self: *mut c_void, 68 | context: *mut ffi::rocksdb_compactionfiltercontext_t, 69 | ) -> *mut ffi::rocksdb_compactionfilter_t 70 | where 71 | F: CompactionFilterFactory, 72 | { 73 | let self_ = &mut *(raw_self as *mut F); 74 | let context = CompactionFilterContext::from_raw(context); 75 | let filter = Box::new(self_.create(context)); 76 | 77 | let filter_ptr = Box::into_raw(filter); 78 | 79 | ffi::rocksdb_compactionfilter_create( 80 | filter_ptr as *mut c_void, 81 | Some(compaction_filter::destructor_callback::), 82 | Some(compaction_filter::filter_callback::), 83 | Some(compaction_filter::name_callback::), 84 | ) 85 | } 86 | 87 | #[cfg(test)] 88 | mod tests { 89 | use super::*; 90 | use crate::compaction_filter::Decision; 91 | use crate::{Options, DB}; 92 | use std::ffi::CString; 93 | 94 | struct CountFilter(u16, CString); 95 | impl CompactionFilter for CountFilter { 96 | fn filter(&mut self, _level: u32, _key: &[u8], _value: &[u8]) -> crate::CompactionDecision { 97 | self.0 += 1; 98 | if self.0 > 2 { 99 | Decision::Remove 100 | } else { 101 | Decision::Keep 102 | } 103 | } 104 | 105 | fn name(&self) -> &CStr { 106 | &self.1 107 | } 108 | } 109 | 110 | struct TestFactory(CString); 111 | impl CompactionFilterFactory for TestFactory { 112 | type Filter = CountFilter; 113 | 114 | fn create(&mut self, _context: CompactionFilterContext) -> Self::Filter { 115 | CountFilter(0, CString::new("CountFilter").unwrap()) 116 | } 117 | 118 | fn name(&self) -> &CStr { 119 | &self.0 120 | } 121 | } 122 | 123 | #[test] 124 | fn compaction_filter_factory_test() { 125 | let path = "_rust_rocksdb_filter_factory_test"; 126 | let mut opts = Options::default(); 127 | opts.create_if_missing(true); 128 | opts.set_compaction_filter_factory(TestFactory(CString::new("TestFactory").unwrap())); 129 | { 130 | let db = DB::open(&opts, path).unwrap(); 131 | let _r = db.put(b"k1", b"a"); 132 | let _r = db.put(b"_rk", b"b"); 133 | let _r = db.put(b"%k", b"c"); 134 | db.compact_range(None::<&[u8]>, None::<&[u8]>); 135 | assert_eq!(db.get(b"%k1").unwrap(), None); 136 | } 137 | let result = DB::destroy(&opts, path); 138 | assert!(result.is_ok()); 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /src/comparator.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | // 15 | 16 | use libc::{c_char, c_int, c_void, size_t}; 17 | use std::cmp::Ordering; 18 | use std::ffi::CString; 19 | use std::slice; 20 | 21 | pub type CompareFn = dyn Fn(&[u8], &[u8]) -> Ordering; 22 | 23 | pub struct ComparatorCallback { 24 | pub name: CString, 25 | pub f: Box, 26 | } 27 | 28 | pub unsafe extern "C" fn destructor_callback(raw_cb: *mut c_void) { 29 | drop(Box::from_raw(raw_cb as *mut ComparatorCallback)); 30 | } 31 | 32 | pub unsafe extern "C" fn name_callback(raw_cb: *mut c_void) -> *const c_char { 33 | let cb: &mut ComparatorCallback = &mut *(raw_cb as *mut ComparatorCallback); 34 | let ptr = cb.name.as_ptr(); 35 | ptr as *const c_char 36 | } 37 | 38 | pub unsafe extern "C" fn compare_callback( 39 | raw_cb: *mut c_void, 40 | a_raw: *const c_char, 41 | a_len: size_t, 42 | b_raw: *const c_char, 43 | b_len: size_t, 44 | ) -> c_int { 45 | let cb: &mut ComparatorCallback = &mut *(raw_cb as *mut ComparatorCallback); 46 | let a: &[u8] = slice::from_raw_parts(a_raw as *const u8, a_len); 47 | let b: &[u8] = slice::from_raw_parts(b_raw as *const u8, b_len); 48 | match (cb.f)(a, b) { 49 | Ordering::Less => -1, 50 | Ordering::Equal => 0, 51 | Ordering::Greater => 1, 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /src/db_pinnable_slice.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use crate::{ffi, DB}; 16 | use core::ops::Deref; 17 | use libc::size_t; 18 | use std::marker::PhantomData; 19 | use std::slice; 20 | 21 | /// Wrapper around RocksDB PinnableSlice struct. 22 | /// 23 | /// With a pinnable slice, we can directly leverage in-memory data within 24 | /// RocksDB to avoid unnecessary memory copies. The struct here wraps the 25 | /// returned raw pointer and ensures proper finalization work. 26 | pub struct DBPinnableSlice<'a> { 27 | ptr: *mut ffi::rocksdb_pinnableslice_t, 28 | db: PhantomData<&'a DB>, 29 | } 30 | 31 | unsafe impl<'a> Send for DBPinnableSlice<'a> {} 32 | unsafe impl<'a> Sync for DBPinnableSlice<'a> {} 33 | 34 | impl<'a> AsRef<[u8]> for DBPinnableSlice<'a> { 35 | fn as_ref(&self) -> &[u8] { 36 | // Implement this via Deref so as not to repeat ourselves 37 | self 38 | } 39 | } 40 | 41 | impl<'a> Deref for DBPinnableSlice<'a> { 42 | type Target = [u8]; 43 | 44 | fn deref(&self) -> &[u8] { 45 | unsafe { 46 | let mut val_len: size_t = 0; 47 | let val = ffi::rocksdb_pinnableslice_value(self.ptr, &mut val_len) as *mut u8; 48 | slice::from_raw_parts(val, val_len) 49 | } 50 | } 51 | } 52 | 53 | impl<'a> Drop for DBPinnableSlice<'a> { 54 | fn drop(&mut self) { 55 | unsafe { 56 | ffi::rocksdb_pinnableslice_destroy(self.ptr); 57 | } 58 | } 59 | } 60 | 61 | impl<'a> DBPinnableSlice<'a> { 62 | /// Used to wrap a PinnableSlice from rocksdb to avoid unnecessary memcpy 63 | /// 64 | /// # Unsafe 65 | /// Requires that the pointer must be generated by rocksdb_get_pinned 66 | pub(crate) unsafe fn from_c(ptr: *mut ffi::rocksdb_pinnableslice_t) -> Self { 67 | Self { 68 | ptr, 69 | db: PhantomData, 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /src/env.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use libc::{self, c_int}; 4 | 5 | use crate::{ffi, Error}; 6 | 7 | /// An Env is an interface used by the rocksdb implementation to access 8 | /// operating system functionality like the filesystem etc. Callers 9 | /// may wish to provide a custom Env object when opening a database to 10 | /// get fine gain control; e.g., to rate limit file system operations. 11 | /// 12 | /// All Env implementations are safe for concurrent access from 13 | /// multiple threads without any external synchronization. 14 | /// 15 | /// Note: currently, C API behinds C++ API for various settings. 16 | /// See also: `rocksdb/include/env.h` 17 | #[derive(Clone)] 18 | pub struct Env(pub(crate) Arc); 19 | 20 | pub(crate) struct EnvWrapper { 21 | pub(crate) inner: *mut ffi::rocksdb_env_t, 22 | } 23 | 24 | impl Drop for EnvWrapper { 25 | fn drop(&mut self) { 26 | unsafe { 27 | ffi::rocksdb_env_destroy(self.inner); 28 | } 29 | } 30 | } 31 | 32 | impl Env { 33 | /// Returns default env 34 | pub fn new() -> Result { 35 | let env = unsafe { ffi::rocksdb_create_default_env() }; 36 | if env.is_null() { 37 | Err(Error::new("Could not create mem env".to_owned())) 38 | } else { 39 | Ok(Self(Arc::new(EnvWrapper { inner: env }))) 40 | } 41 | } 42 | 43 | /// Returns a new environment that stores its data in memory and delegates 44 | /// all non-file-storage tasks to base_env. 45 | pub fn mem_env() -> Result { 46 | let env = unsafe { ffi::rocksdb_create_mem_env() }; 47 | if env.is_null() { 48 | Err(Error::new("Could not create mem env".to_owned())) 49 | } else { 50 | Ok(Self(Arc::new(EnvWrapper { inner: env }))) 51 | } 52 | } 53 | 54 | /// Sets the number of background worker threads of a specific thread pool for this environment. 55 | /// `LOW` is the default pool. 56 | /// 57 | /// Default: 1 58 | pub fn set_background_threads(&mut self, num_threads: c_int) { 59 | unsafe { 60 | ffi::rocksdb_env_set_background_threads(self.0.inner, num_threads); 61 | } 62 | } 63 | 64 | /// Sets the size of the high priority thread pool that can be used to 65 | /// prevent compactions from stalling memtable flushes. 66 | pub fn set_high_priority_background_threads(&mut self, n: c_int) { 67 | unsafe { 68 | ffi::rocksdb_env_set_high_priority_background_threads(self.0.inner, n); 69 | } 70 | } 71 | 72 | /// Sets the size of the low priority thread pool that can be used to 73 | /// prevent compactions from stalling memtable flushes. 74 | pub fn set_low_priority_background_threads(&mut self, n: c_int) { 75 | unsafe { 76 | ffi::rocksdb_env_set_low_priority_background_threads(self.0.inner, n); 77 | } 78 | } 79 | 80 | /// Sets the size of the bottom priority thread pool that can be used to 81 | /// prevent compactions from stalling memtable flushes. 82 | pub fn set_bottom_priority_background_threads(&mut self, n: c_int) { 83 | unsafe { 84 | ffi::rocksdb_env_set_bottom_priority_background_threads(self.0.inner, n); 85 | } 86 | } 87 | 88 | /// Wait for all threads started by StartThread to terminate. 89 | pub fn join_all_threads(&mut self) { 90 | unsafe { 91 | ffi::rocksdb_env_join_all_threads(self.0.inner); 92 | } 93 | } 94 | 95 | /// Lowering IO priority for threads from the specified pool. 96 | pub fn lower_thread_pool_io_priority(&mut self) { 97 | unsafe { 98 | ffi::rocksdb_env_lower_thread_pool_io_priority(self.0.inner); 99 | } 100 | } 101 | 102 | /// Lowering IO priority for high priority thread pool. 103 | pub fn lower_high_priority_thread_pool_io_priority(&mut self) { 104 | unsafe { 105 | ffi::rocksdb_env_lower_high_priority_thread_pool_io_priority(self.0.inner); 106 | } 107 | } 108 | 109 | /// Lowering CPU priority for threads from the specified pool. 110 | pub fn lower_thread_pool_cpu_priority(&mut self) { 111 | unsafe { 112 | ffi::rocksdb_env_lower_thread_pool_cpu_priority(self.0.inner); 113 | } 114 | } 115 | 116 | /// Lowering CPU priority for high priority thread pool. 117 | pub fn lower_high_priority_thread_pool_cpu_priority(&mut self) { 118 | unsafe { 119 | ffi::rocksdb_env_lower_high_priority_thread_pool_cpu_priority(self.0.inner); 120 | } 121 | } 122 | } 123 | 124 | unsafe impl Send for EnvWrapper {} 125 | unsafe impl Sync for EnvWrapper {} 126 | -------------------------------------------------------------------------------- /src/ffi_util.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Alex Regueiro 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | // 15 | 16 | use crate::{ffi, Error}; 17 | use libc::{self, c_char, c_void, size_t}; 18 | use std::ffi::{CStr, CString}; 19 | use std::path::Path; 20 | use std::ptr; 21 | 22 | pub(crate) unsafe fn from_cstr(ptr: *const c_char) -> String { 23 | let cstr = CStr::from_ptr(ptr as *const _); 24 | String::from_utf8_lossy(cstr.to_bytes()).into_owned() 25 | } 26 | 27 | pub(crate) unsafe fn raw_data(ptr: *const c_char, size: usize) -> Option> { 28 | if ptr.is_null() { 29 | None 30 | } else { 31 | let mut dst = vec![0; size]; 32 | ptr::copy_nonoverlapping(ptr as *const u8, dst.as_mut_ptr(), size); 33 | 34 | Some(dst) 35 | } 36 | } 37 | 38 | pub fn error_message(ptr: *const c_char) -> String { 39 | unsafe { 40 | let s = from_cstr(ptr); 41 | ffi::rocksdb_free(ptr as *mut c_void); 42 | s 43 | } 44 | } 45 | 46 | pub fn opt_bytes_to_ptr>(opt: Option) -> *const c_char { 47 | match opt { 48 | Some(v) => v.as_ref().as_ptr() as *const c_char, 49 | None => ptr::null(), 50 | } 51 | } 52 | 53 | pub(crate) fn to_cpath>(path: P) -> Result { 54 | match CString::new(path.as_ref().to_string_lossy().as_bytes()) { 55 | Ok(c) => Ok(c), 56 | Err(e) => Err(Error::new(format!( 57 | "Failed to convert path to CString: {e}" 58 | ))), 59 | } 60 | } 61 | 62 | macro_rules! ffi_try { 63 | ( $($function:ident)::*() ) => { 64 | ffi_try_impl!($($function)::*()) 65 | }; 66 | 67 | ( $($function:ident)::*( $arg1:expr $(, $arg:expr)* $(,)? ) ) => { 68 | ffi_try_impl!($($function)::*($arg1 $(, $arg)* ,)) 69 | }; 70 | } 71 | 72 | macro_rules! ffi_try_impl { 73 | ( $($function:ident)::*( $($arg:expr,)*) ) => {{ 74 | let mut err: *mut ::libc::c_char = ::std::ptr::null_mut(); 75 | let result = $($function)::*($($arg,)* &mut err); 76 | if !err.is_null() { 77 | return Err(Error::new($crate::ffi_util::error_message(err))); 78 | } 79 | result 80 | }}; 81 | } 82 | 83 | /// Value which can be converted into a C string. 84 | /// 85 | /// The trait is used as argument to functions which wish to accept either 86 | /// [`&str`] or [`&CStr`](CStr) arguments while internally need to interact with 87 | /// C APIs. Accepting [`&str`] may be more convenient for users but requires 88 | /// conversion into [`CString`] internally which requires allocation. With this 89 | /// trait, latency-conscious users may choose to prepare [`CStr`] in advance and 90 | /// then pass it directly without having to incur the conversion cost. 91 | /// 92 | /// To use the trait, function should accept `impl CStrLike` and after baking 93 | /// the argument (with [`CStrLike::bake`] method) it can use it as a [`&CStr`](CStr) 94 | /// (since the baked result dereferences into [`CStr`]). 95 | /// 96 | /// # Example 97 | /// 98 | /// ``` 99 | /// use std::ffi::{CStr, CString}; 100 | /// use speedb::CStrLike; 101 | /// 102 | /// fn strlen(arg: impl CStrLike) -> std::result::Result { 103 | /// let baked = arg.bake().map_err(|err| err.to_string())?; 104 | /// Ok(unsafe { libc::strlen(baked.as_ptr()) }) 105 | /// } 106 | /// 107 | /// const FOO: &str = "foo"; 108 | /// const BAR: &CStr = unsafe { CStr::from_bytes_with_nul_unchecked(b"bar\0") }; 109 | /// 110 | /// assert_eq!(Ok(3), strlen(FOO)); 111 | /// assert_eq!(Ok(3), strlen(BAR)); 112 | /// ``` 113 | pub trait CStrLike { 114 | type Baked: std::ops::Deref; 115 | type Error: std::fmt::Debug + std::fmt::Display; 116 | 117 | /// Bakes self into value which can be freely converted into [`&CStr`](CStr). 118 | /// 119 | /// This may require allocation and may fail if `self` has invalid value. 120 | fn bake(self) -> Result; 121 | 122 | /// Consumers and converts value into an owned [`CString`]. 123 | /// 124 | /// If `Self` is already a `CString` simply returns it; if it’s a reference 125 | /// to a `CString` then the value is cloned. In other cases this may 126 | /// require allocation and may fail if `self` has invalid value. 127 | fn into_c_string(self) -> Result; 128 | } 129 | 130 | impl CStrLike for &str { 131 | type Baked = CString; 132 | type Error = std::ffi::NulError; 133 | 134 | fn bake(self) -> Result { 135 | CString::new(self) 136 | } 137 | fn into_c_string(self) -> Result { 138 | CString::new(self) 139 | } 140 | } 141 | 142 | // This is redundant for the most part and exists so that `foo(&string)` (where 143 | // `string: String` works just as if `foo` took `arg: &str` argument. 144 | impl CStrLike for &String { 145 | type Baked = CString; 146 | type Error = std::ffi::NulError; 147 | 148 | fn bake(self) -> Result { 149 | CString::new(self.as_bytes()) 150 | } 151 | fn into_c_string(self) -> Result { 152 | CString::new(self.as_bytes()) 153 | } 154 | } 155 | 156 | impl CStrLike for &CStr { 157 | type Baked = Self; 158 | type Error = std::convert::Infallible; 159 | 160 | fn bake(self) -> Result { 161 | Ok(self) 162 | } 163 | fn into_c_string(self) -> Result { 164 | Ok(self.to_owned()) 165 | } 166 | } 167 | 168 | // This exists so that if caller constructs a `CString` they can pass it into 169 | // the function accepting `CStrLike` argument. Some of such functions may take 170 | // the argument whereas otherwise they would need to allocated a new owned 171 | // object. 172 | impl CStrLike for CString { 173 | type Baked = CString; 174 | type Error = std::convert::Infallible; 175 | 176 | fn bake(self) -> Result { 177 | Ok(self) 178 | } 179 | fn into_c_string(self) -> Result { 180 | Ok(self) 181 | } 182 | } 183 | 184 | // This is redundant for the most part and exists so that `foo(&cstring)` (where 185 | // `string: CString` works just as if `foo` took `arg: &CStr` argument. 186 | impl<'a> CStrLike for &'a CString { 187 | type Baked = &'a CStr; 188 | type Error = std::convert::Infallible; 189 | 190 | fn bake(self) -> Result { 191 | Ok(self) 192 | } 193 | fn into_c_string(self) -> Result { 194 | Ok(self.clone()) 195 | } 196 | } 197 | 198 | /// Owned malloc-allocated memory slice. 199 | /// Do not derive `Clone` for this because it will cause double-free. 200 | pub struct CSlice { 201 | data: *const c_char, 202 | len: size_t, 203 | } 204 | 205 | impl CSlice { 206 | /// Constructing such a slice may be unsafe. 207 | /// 208 | /// # Safety 209 | /// The caller must ensure that the pointer and length are valid. 210 | /// Moreover, `CSlice` takes the ownership of the memory and will free it 211 | /// using `rocksdb_free`. The caller must ensure that the memory is 212 | /// allocated by `malloc` in RocksDB and will not be freed by any other 213 | /// means. 214 | pub(crate) unsafe fn from_raw_parts(data: *const c_char, len: size_t) -> Self { 215 | Self { data, len } 216 | } 217 | } 218 | 219 | impl AsRef<[u8]> for CSlice { 220 | fn as_ref(&self) -> &[u8] { 221 | unsafe { std::slice::from_raw_parts(self.data as *const u8, self.len) } 222 | } 223 | } 224 | 225 | impl Drop for CSlice { 226 | fn drop(&mut self) { 227 | unsafe { 228 | ffi::rocksdb_free(self.data as *mut c_void); 229 | } 230 | } 231 | } 232 | 233 | #[test] 234 | fn test_c_str_like_bake() { 235 | fn test(value: S) -> Result { 236 | value 237 | .bake() 238 | .map(|value| unsafe { libc::strlen(value.as_ptr()) }) 239 | } 240 | 241 | assert_eq!(Ok(3), test("foo")); // &str 242 | assert_eq!(Ok(3), test(&String::from("foo"))); // String 243 | assert_eq!(Ok(3), test(CString::new("foo").unwrap().as_ref())); // &CStr 244 | assert_eq!(Ok(3), test(&CString::new("foo").unwrap())); // &CString 245 | assert_eq!(Ok(3), test(CString::new("foo").unwrap())); // CString 246 | 247 | assert_eq!(3, test("foo\0bar").err().unwrap().nul_position()); 248 | } 249 | 250 | #[test] 251 | fn test_c_str_like_into() { 252 | fn test(value: S) -> Result { 253 | value.into_c_string() 254 | } 255 | 256 | let want = CString::new("foo").unwrap(); 257 | 258 | assert_eq!(Ok(want.clone()), test("foo")); // &str 259 | assert_eq!(Ok(want.clone()), test(&String::from("foo"))); // &String 260 | assert_eq!( 261 | Ok(want.clone()), 262 | test(CString::new("foo").unwrap().as_ref()) 263 | ); // &CStr 264 | assert_eq!(Ok(want.clone()), test(&CString::new("foo").unwrap())); // &CString 265 | assert_eq!(Ok(want), test(CString::new("foo").unwrap())); // CString 266 | 267 | assert_eq!(3, test("foo\0bar").err().unwrap().nul_position()); 268 | } 269 | -------------------------------------------------------------------------------- /src/iter_range.rs: -------------------------------------------------------------------------------- 1 | /// A range which can be set as iterate bounds on [`crate::ReadOptions`]. 2 | /// 3 | /// See [`crate::ReadOptions::set_iterate_range`] for documentation and 4 | /// examples. 5 | pub trait IterateBounds { 6 | /// Converts object into lower and upper bounds pair. 7 | /// 8 | /// If this object represents range with one of the bounds unset, 9 | /// corresponding element is returned as `None`. For example, `..upper` 10 | /// range would be converted into `(None, Some(upper))` pair. 11 | fn into_bounds(self) -> (Option>, Option>); 12 | } 13 | 14 | impl IterateBounds for std::ops::RangeFull { 15 | fn into_bounds(self) -> (Option>, Option>) { 16 | (None, None) 17 | } 18 | } 19 | 20 | impl>> IterateBounds for std::ops::Range { 21 | fn into_bounds(self) -> (Option>, Option>) { 22 | (Some(self.start.into()), Some(self.end.into())) 23 | } 24 | } 25 | 26 | impl>> IterateBounds for std::ops::RangeFrom { 27 | fn into_bounds(self) -> (Option>, Option>) { 28 | (Some(self.start.into()), None) 29 | } 30 | } 31 | 32 | impl>> IterateBounds for std::ops::RangeTo { 33 | fn into_bounds(self) -> (Option>, Option>) { 34 | (None, Some(self.end.into())) 35 | } 36 | } 37 | 38 | /// Representation of a range of keys starting with given prefix. 39 | /// 40 | /// Can be used as argument of [`crate::ReadOptions::set_iterate_range`] method 41 | /// to set iterate bounds. 42 | #[derive(Clone, Copy)] 43 | pub struct PrefixRange(pub K); 44 | 45 | impl>> IterateBounds for PrefixRange { 46 | /// Converts the prefix range representation into pair of bounds. 47 | /// 48 | /// The conversion assumes lexicographical sorting on `u8` values. For 49 | /// example, `PrefixRange("a")` is equivalent to `"a".."b"` range. Note 50 | /// that for some prefixes, either of the bounds may be `None`. For 51 | /// example, an empty prefix is equivalent to a full range (i.e. both bounds 52 | /// being `None`). 53 | fn into_bounds(self) -> (Option>, Option>) { 54 | let start = self.0.into(); 55 | if start.is_empty() { 56 | (None, None) 57 | } else { 58 | let end = next_prefix(&start); 59 | (Some(start), end) 60 | } 61 | } 62 | } 63 | 64 | /// Returns lowest value following largest value with given prefix. 65 | /// 66 | /// In other words, computes upper bound for a prefix scan over list of keys 67 | /// sorted in lexicographical order. This means that a prefix scan can be 68 | /// expressed as range scan over a right-open `[prefix, next_prefix(prefix))` 69 | /// range. 70 | /// 71 | /// For example, for prefix `foo` the function returns `fop`. 72 | /// 73 | /// Returns `None` if there is no value which can follow value with given 74 | /// prefix. This happens when prefix consists entirely of `'\xff'` bytes (or is 75 | /// empty). 76 | fn next_prefix(prefix: &[u8]) -> Option> { 77 | let ffs = prefix 78 | .iter() 79 | .rev() 80 | .take_while(|&&byte| byte == u8::MAX) 81 | .count(); 82 | let next = &prefix[..(prefix.len() - ffs)]; 83 | if next.is_empty() { 84 | // Prefix consisted of \xff bytes. There is no prefix that 85 | // follows it. 86 | None 87 | } else { 88 | let mut next = next.to_vec(); 89 | *next.last_mut().unwrap() += 1; 90 | Some(next) 91 | } 92 | } 93 | 94 | #[test] 95 | fn test_prefix_range() { 96 | fn test(start: &[u8], end: Option<&[u8]>) { 97 | let got = PrefixRange(start).into_bounds(); 98 | assert_eq!((Some(start), end), (got.0.as_deref(), got.1.as_deref())); 99 | } 100 | 101 | let empty: &[u8] = &[]; 102 | assert_eq!((None, None), PrefixRange(empty).into_bounds()); 103 | test(b"\xff", None); 104 | test(b"\xff\xff\xff\xff", None); 105 | test(b"a", Some(b"b")); 106 | test(b"a\xff\xff\xff", Some(b"b")); 107 | } 108 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | // 15 | 16 | //! Rust wrapper for RocksDB. 17 | //! 18 | //! # Examples 19 | //! 20 | //! ``` 21 | //! use speedb::{DB, Options}; 22 | //! // NB: db is automatically closed at end of lifetime 23 | //! let path = "_path_for_rocksdb_storage"; 24 | //! { 25 | //! let db = DB::open_default(path).unwrap(); 26 | //! db.put(b"my key", b"my value").unwrap(); 27 | //! match db.get(b"my key") { 28 | //! Ok(Some(value)) => println!("retrieved value {}", String::from_utf8(value).unwrap()), 29 | //! Ok(None) => println!("value not found"), 30 | //! Err(e) => println!("operational problem encountered: {}", e), 31 | //! } 32 | //! db.delete(b"my key").unwrap(); 33 | //! } 34 | //! let _ = DB::destroy(&Options::default(), path); 35 | //! ``` 36 | //! 37 | //! Opening a database and a single column family with custom options: 38 | //! 39 | //! ``` 40 | //! use speedb::{DB, ColumnFamilyDescriptor, Options}; 41 | //! 42 | //! let path = "_path_for_rocksdb_storage_with_cfs"; 43 | //! let mut cf_opts = Options::default(); 44 | //! cf_opts.set_max_write_buffer_number(16); 45 | //! let cf = ColumnFamilyDescriptor::new("cf1", cf_opts); 46 | //! 47 | //! let mut db_opts = Options::default(); 48 | //! db_opts.create_missing_column_families(true); 49 | //! db_opts.create_if_missing(true); 50 | //! { 51 | //! let db = DB::open_cf_descriptors(&db_opts, path, vec![cf]).unwrap(); 52 | //! } 53 | //! let _ = DB::destroy(&db_opts, path); 54 | //! ``` 55 | //! 56 | 57 | #![warn(clippy::pedantic)] 58 | #![allow( 59 | // Next `cast_*` lints don't give alternatives. 60 | clippy::cast_possible_wrap, clippy::cast_possible_truncation, clippy::cast_sign_loss, 61 | // Next lints produce too much noise/false positives. 62 | clippy::module_name_repetitions, clippy::similar_names, clippy::must_use_candidate, 63 | // '... may panic' lints. 64 | // Too much work to fix. 65 | clippy::missing_errors_doc, 66 | // False positive: WebSocket 67 | clippy::doc_markdown, 68 | clippy::missing_safety_doc, 69 | clippy::needless_pass_by_value, 70 | clippy::ptr_as_ptr, 71 | clippy::missing_panics_doc, 72 | clippy::from_over_into, 73 | )] 74 | 75 | #[macro_use] 76 | mod ffi_util; 77 | 78 | pub mod backup; 79 | pub mod checkpoint; 80 | mod column_family; 81 | pub mod compaction_filter; 82 | pub mod compaction_filter_factory; 83 | mod comparator; 84 | mod db; 85 | mod db_iterator; 86 | mod db_options; 87 | mod db_pinnable_slice; 88 | mod env; 89 | mod iter_range; 90 | pub mod merge_operator; 91 | pub mod perf; 92 | mod prop_name; 93 | pub mod properties; 94 | mod slice_transform; 95 | mod snapshot; 96 | mod sst_file_writer; 97 | mod transactions; 98 | mod write_batch; 99 | 100 | pub use crate::{ 101 | column_family::{ 102 | AsColumnFamilyRef, BoundColumnFamily, ColumnFamily, ColumnFamilyDescriptor, 103 | ColumnFamilyRef, DEFAULT_COLUMN_FAMILY_NAME, 104 | }, 105 | compaction_filter::Decision as CompactionDecision, 106 | db::{ 107 | DBAccess, DBCommon, DBWithThreadMode, LiveFile, MultiThreaded, SingleThreaded, ThreadMode, 108 | DB, 109 | }, 110 | db_iterator::{ 111 | DBIterator, DBIteratorWithThreadMode, DBRawIterator, DBRawIteratorWithThreadMode, 112 | DBWALIterator, Direction, IteratorMode, 113 | }, 114 | db_options::{ 115 | BlockBasedIndexType, BlockBasedOptions, BottommostLevelCompaction, Cache, ChecksumType, 116 | CompactOptions, CuckooTableOptions, DBCompactionStyle, DBCompressionType, DBPath, 117 | DBRecoveryMode, DataBlockIndexType, FifoCompactOptions, FlushOptions, 118 | IngestExternalFileOptions, LogLevel, MemtableFactory, Options, PlainTableFactoryOptions, 119 | ReadOptions, UniversalCompactOptions, UniversalCompactionStopStyle, WriteOptions, 120 | }, 121 | db_pinnable_slice::DBPinnableSlice, 122 | env::Env, 123 | ffi_util::CStrLike, 124 | iter_range::{IterateBounds, PrefixRange}, 125 | merge_operator::MergeOperands, 126 | perf::{PerfContext, PerfMetric, PerfStatsLevel}, 127 | slice_transform::SliceTransform, 128 | snapshot::{Snapshot, SnapshotWithThreadMode}, 129 | sst_file_writer::SstFileWriter, 130 | transactions::{ 131 | OptimisticTransactionDB, OptimisticTransactionOptions, Transaction, TransactionDB, 132 | TransactionDBOptions, TransactionOptions, 133 | }, 134 | write_batch::{WriteBatch, WriteBatchIterator, WriteBatchWithTransaction}, 135 | }; 136 | 137 | use libspeedb_sys as ffi; 138 | 139 | use std::error; 140 | use std::fmt; 141 | 142 | /// RocksDB error kind. 143 | #[derive(Debug, Clone, PartialEq, Eq)] 144 | pub enum ErrorKind { 145 | NotFound, 146 | Corruption, 147 | NotSupported, 148 | InvalidArgument, 149 | IOError, 150 | MergeInProgress, 151 | Incomplete, 152 | ShutdownInProgress, 153 | TimedOut, 154 | Aborted, 155 | Busy, 156 | Expired, 157 | TryAgain, 158 | CompactionTooLarge, 159 | ColumnFamilyDropped, 160 | Unknown, 161 | } 162 | 163 | /// A simple wrapper round a string, used for errors reported from 164 | /// ffi calls. 165 | #[derive(Debug, Clone, PartialEq, Eq)] 166 | pub struct Error { 167 | message: String, 168 | } 169 | 170 | impl Error { 171 | fn new(message: String) -> Error { 172 | Error { message } 173 | } 174 | 175 | pub fn into_string(self) -> String { 176 | self.into() 177 | } 178 | 179 | /// Parse corresponding [`ErrorKind`] from error message. 180 | pub fn kind(&self) -> ErrorKind { 181 | match self.message.split(':').next().unwrap_or("") { 182 | "NotFound" => ErrorKind::NotFound, 183 | "Corruption" => ErrorKind::Corruption, 184 | "Not implemented" => ErrorKind::NotSupported, 185 | "Invalid argument" => ErrorKind::InvalidArgument, 186 | "IO error" => ErrorKind::IOError, 187 | "Merge in progress" => ErrorKind::MergeInProgress, 188 | "Result incomplete" => ErrorKind::Incomplete, 189 | "Shutdown in progress" => ErrorKind::ShutdownInProgress, 190 | "Operation timed out" => ErrorKind::TimedOut, 191 | "Operation aborted" => ErrorKind::Aborted, 192 | "Resource busy" => ErrorKind::Busy, 193 | "Operation expired" => ErrorKind::Expired, 194 | "Operation failed. Try again." => ErrorKind::TryAgain, 195 | "Compaction too large" => ErrorKind::CompactionTooLarge, 196 | "Column family dropped" => ErrorKind::ColumnFamilyDropped, 197 | _ => ErrorKind::Unknown, 198 | } 199 | } 200 | } 201 | 202 | impl AsRef for Error { 203 | fn as_ref(&self) -> &str { 204 | &self.message 205 | } 206 | } 207 | 208 | impl From for String { 209 | fn from(e: Error) -> String { 210 | e.message 211 | } 212 | } 213 | 214 | impl error::Error for Error { 215 | fn description(&self) -> &str { 216 | &self.message 217 | } 218 | } 219 | 220 | impl fmt::Display for Error { 221 | fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> { 222 | self.message.fmt(formatter) 223 | } 224 | } 225 | 226 | #[cfg(test)] 227 | mod test { 228 | use crate::{ 229 | OptimisticTransactionDB, OptimisticTransactionOptions, Transaction, TransactionDB, 230 | TransactionDBOptions, TransactionOptions, 231 | }; 232 | 233 | use super::{ 234 | column_family::UnboundColumnFamily, 235 | db_options::CacheWrapper, 236 | env::{Env, EnvWrapper}, 237 | BlockBasedOptions, BoundColumnFamily, Cache, ColumnFamily, ColumnFamilyDescriptor, 238 | DBIterator, DBRawIterator, IngestExternalFileOptions, Options, PlainTableFactoryOptions, 239 | ReadOptions, Snapshot, SstFileWriter, WriteBatch, WriteOptions, DB, 240 | }; 241 | 242 | #[test] 243 | fn is_send() { 244 | // test (at compile time) that certain types implement the auto-trait Send, either directly for 245 | // pointer-wrapping types or transitively for types with all Send fields 246 | 247 | fn is_send() { 248 | // dummy function just used for its parameterized type bound 249 | } 250 | 251 | is_send::(); 252 | is_send::>(); 253 | is_send::>(); 254 | is_send::(); 255 | is_send::(); 256 | is_send::(); 257 | is_send::(); 258 | is_send::(); 259 | is_send::(); 260 | is_send::(); 261 | is_send::(); 262 | is_send::(); 263 | is_send::>(); 264 | is_send::(); 265 | is_send::(); 266 | is_send::(); 267 | is_send::(); 268 | is_send::(); 269 | is_send::(); 270 | is_send::(); 271 | is_send::(); 272 | is_send::(); 273 | is_send::>(); 274 | is_send::(); 275 | is_send::(); 276 | is_send::(); 277 | } 278 | 279 | #[test] 280 | fn is_sync() { 281 | // test (at compile time) that certain types implement the auto-trait Sync 282 | 283 | fn is_sync() { 284 | // dummy function just used for its parameterized type bound 285 | } 286 | 287 | is_sync::(); 288 | is_sync::(); 289 | is_sync::(); 290 | is_sync::(); 291 | is_sync::(); 292 | is_sync::(); 293 | is_sync::(); 294 | is_sync::(); 295 | is_sync::(); 296 | is_sync::(); 297 | is_sync::(); 298 | is_sync::(); 299 | is_sync::(); 300 | is_sync::(); 301 | is_sync::(); 302 | is_sync::(); 303 | is_sync::(); 304 | is_sync::(); 305 | is_sync::(); 306 | is_sync::(); 307 | } 308 | } 309 | -------------------------------------------------------------------------------- /src/merge_operator.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | // 15 | 16 | //! rustic merge operator 17 | //! 18 | //! ``` 19 | //! use speedb::{Options, DB, MergeOperands}; 20 | //! 21 | //! fn concat_merge(new_key: &[u8], 22 | //! existing_val: Option<&[u8]>, 23 | //! operands: &MergeOperands) 24 | //! -> Option> { 25 | //! 26 | //! let mut result: Vec = Vec::with_capacity(operands.len()); 27 | //! existing_val.map(|v| { 28 | //! for e in v { 29 | //! result.push(*e) 30 | //! } 31 | //! }); 32 | //! for op in operands { 33 | //! for e in op { 34 | //! result.push(*e) 35 | //! } 36 | //! } 37 | //! Some(result) 38 | //! } 39 | //! 40 | //!let path = "_rust_path_to_rocksdb"; 41 | //!let mut opts = Options::default(); 42 | //! 43 | //!opts.create_if_missing(true); 44 | //!opts.set_merge_operator_associative("test operator", concat_merge); 45 | //!{ 46 | //! let db = DB::open(&opts, path).unwrap(); 47 | //! let p = db.put(b"k1", b"a"); 48 | //! db.merge(b"k1", b"b"); 49 | //! db.merge(b"k1", b"c"); 50 | //! db.merge(b"k1", b"d"); 51 | //! db.merge(b"k1", b"efg"); 52 | //! let r = db.get(b"k1"); 53 | //! assert_eq!(r.unwrap().unwrap(), b"abcdefg"); 54 | //!} 55 | //!let _ = DB::destroy(&opts, path); 56 | //! ``` 57 | 58 | use libc::{self, c_char, c_int, c_void, size_t}; 59 | use std::ffi::CString; 60 | use std::mem; 61 | use std::ptr; 62 | use std::slice; 63 | 64 | pub trait MergeFn: 65 | Fn(&[u8], Option<&[u8]>, &MergeOperands) -> Option> + Send + Sync + 'static 66 | { 67 | } 68 | impl MergeFn for F where 69 | F: Fn(&[u8], Option<&[u8]>, &MergeOperands) -> Option> + Send + Sync + 'static 70 | { 71 | } 72 | 73 | pub struct MergeOperatorCallback { 74 | pub name: CString, 75 | pub full_merge_fn: F, 76 | pub partial_merge_fn: PF, 77 | } 78 | 79 | pub unsafe extern "C" fn destructor_callback(raw_cb: *mut c_void) { 80 | drop(Box::from_raw(raw_cb as *mut MergeOperatorCallback)); 81 | } 82 | 83 | pub unsafe extern "C" fn delete_callback( 84 | _raw_cb: *mut c_void, 85 | value: *const c_char, 86 | value_length: size_t, 87 | ) { 88 | if !value.is_null() { 89 | drop(Box::from_raw(slice::from_raw_parts_mut( 90 | value as *mut u8, 91 | value_length, 92 | ))); 93 | } 94 | } 95 | 96 | pub unsafe extern "C" fn name_callback( 97 | raw_cb: *mut c_void, 98 | ) -> *const c_char { 99 | let cb = &mut *(raw_cb as *mut MergeOperatorCallback); 100 | cb.name.as_ptr() 101 | } 102 | 103 | pub unsafe extern "C" fn full_merge_callback( 104 | raw_cb: *mut c_void, 105 | raw_key: *const c_char, 106 | key_len: size_t, 107 | existing_value: *const c_char, 108 | existing_value_len: size_t, 109 | operands_list: *const *const c_char, 110 | operands_list_len: *const size_t, 111 | num_operands: c_int, 112 | success: *mut u8, 113 | new_value_length: *mut size_t, 114 | ) -> *mut c_char { 115 | let cb = &mut *(raw_cb as *mut MergeOperatorCallback); 116 | let operands = &MergeOperands::new(operands_list, operands_list_len, num_operands); 117 | let key = slice::from_raw_parts(raw_key as *const u8, key_len); 118 | let oldval = if existing_value.is_null() { 119 | None 120 | } else { 121 | Some(slice::from_raw_parts( 122 | existing_value as *const u8, 123 | existing_value_len, 124 | )) 125 | }; 126 | (cb.full_merge_fn)(key, oldval, operands).map_or_else( 127 | || { 128 | *new_value_length = 0; 129 | *success = 0_u8; 130 | ptr::null_mut() as *mut c_char 131 | }, 132 | |result| { 133 | *new_value_length = result.len() as size_t; 134 | *success = 1_u8; 135 | Box::into_raw(result.into_boxed_slice()) as *mut c_char 136 | }, 137 | ) 138 | } 139 | 140 | pub unsafe extern "C" fn partial_merge_callback( 141 | raw_cb: *mut c_void, 142 | raw_key: *const c_char, 143 | key_len: size_t, 144 | operands_list: *const *const c_char, 145 | operands_list_len: *const size_t, 146 | num_operands: c_int, 147 | success: *mut u8, 148 | new_value_length: *mut size_t, 149 | ) -> *mut c_char { 150 | let cb = &mut *(raw_cb as *mut MergeOperatorCallback); 151 | let operands = &MergeOperands::new(operands_list, operands_list_len, num_operands); 152 | let key = slice::from_raw_parts(raw_key as *const u8, key_len); 153 | (cb.partial_merge_fn)(key, None, operands).map_or_else( 154 | || { 155 | *new_value_length = 0; 156 | *success = 0_u8; 157 | ptr::null_mut::() 158 | }, 159 | |result| { 160 | *new_value_length = result.len() as size_t; 161 | *success = 1_u8; 162 | Box::into_raw(result.into_boxed_slice()) as *mut c_char 163 | }, 164 | ) 165 | } 166 | 167 | pub struct MergeOperands { 168 | operands_list: *const *const c_char, 169 | operands_list_len: *const size_t, 170 | num_operands: usize, 171 | } 172 | 173 | impl MergeOperands { 174 | fn new( 175 | operands_list: *const *const c_char, 176 | operands_list_len: *const size_t, 177 | num_operands: c_int, 178 | ) -> MergeOperands { 179 | assert!(num_operands >= 0); 180 | MergeOperands { 181 | operands_list, 182 | operands_list_len, 183 | num_operands: num_operands as usize, 184 | } 185 | } 186 | 187 | pub fn len(&self) -> usize { 188 | self.num_operands 189 | } 190 | 191 | pub fn is_empty(&self) -> bool { 192 | self.num_operands == 0 193 | } 194 | 195 | pub fn iter(&self) -> MergeOperandsIter { 196 | MergeOperandsIter { 197 | operands: self, 198 | cursor: 0, 199 | } 200 | } 201 | 202 | fn get_operand(&self, index: usize) -> Option<&[u8]> { 203 | if index >= self.num_operands { 204 | None 205 | } else { 206 | unsafe { 207 | let base = self.operands_list as usize; 208 | let base_len = self.operands_list_len as usize; 209 | let spacing = mem::size_of::<*const *const u8>(); 210 | let spacing_len = mem::size_of::<*const size_t>(); 211 | let len_ptr = (base_len + (spacing_len * index)) as *const size_t; 212 | let len = *len_ptr; 213 | let ptr = base + (spacing * index); 214 | Some(slice::from_raw_parts( 215 | *(ptr as *const *const u8) as *const u8, 216 | len, 217 | )) 218 | } 219 | } 220 | } 221 | } 222 | 223 | pub struct MergeOperandsIter<'a> { 224 | operands: &'a MergeOperands, 225 | cursor: usize, 226 | } 227 | 228 | impl<'a> Iterator for MergeOperandsIter<'a> { 229 | type Item = &'a [u8]; 230 | 231 | fn next(&mut self) -> Option { 232 | let operand = self.operands.get_operand(self.cursor)?; 233 | self.cursor += 1; 234 | Some(operand) 235 | } 236 | 237 | fn size_hint(&self) -> (usize, Option) { 238 | let remaining = self.operands.num_operands - self.cursor; 239 | (remaining, Some(remaining)) 240 | } 241 | } 242 | 243 | impl<'a> IntoIterator for &'a MergeOperands { 244 | type Item = &'a [u8]; 245 | type IntoIter = MergeOperandsIter<'a>; 246 | 247 | fn into_iter(self) -> Self::IntoIter { 248 | Self::IntoIter { 249 | operands: self, 250 | cursor: 0, 251 | } 252 | } 253 | } 254 | -------------------------------------------------------------------------------- /src/perf.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tran Tuan Linh 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use libc::{c_int, c_uchar, c_void}; 16 | 17 | use crate::{db::DBInner, ffi, ffi_util::from_cstr, Cache, Error, DB}; 18 | 19 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] 20 | #[repr(i32)] 21 | pub enum PerfStatsLevel { 22 | /// Unknown settings 23 | Uninitialized = 0, 24 | /// Disable perf stats 25 | Disable, 26 | /// Enables only count stats 27 | EnableCount, 28 | /// Count stats and enable time stats except for mutexes 29 | EnableTimeExceptForMutex, 30 | /// Other than time, also measure CPU time counters. Still don't measure 31 | /// time (neither wall time nor CPU time) for mutexes 32 | EnableTimeAndCPUTimeExceptForMutex, 33 | /// Enables count and time stats 34 | EnableTime, 35 | /// N.B must always be the last value! 36 | OutOfBound, 37 | } 38 | 39 | #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] 40 | #[non_exhaustive] 41 | #[repr(i32)] 42 | pub enum PerfMetric { 43 | UserKeyComparisonCount = 0, 44 | BlockCacheHitCount = 1, 45 | BlockReadCount = 2, 46 | BlockReadByte = 3, 47 | BlockReadTime = 4, 48 | BlockChecksumTime = 5, 49 | BlockDecompressTime = 6, 50 | GetReadBytes = 7, 51 | MultigetReadBytes = 8, 52 | IterReadBytes = 9, 53 | InternalKeySkippedCount = 10, 54 | InternalDeleteSkippedCount = 11, 55 | InternalRecentSkippedCount = 12, 56 | InternalMergeCount = 13, 57 | GetSnapshotTime = 14, 58 | GetFromMemtableTime = 15, 59 | GetFromMemtableCount = 16, 60 | GetPostProcessTime = 17, 61 | GetFromOutputFilesTime = 18, 62 | SeekOnMemtableTime = 19, 63 | SeekOnMemtableCount = 20, 64 | NextOnMemtableCount = 21, 65 | PrevOnMemtableCount = 22, 66 | SeekChildSeekTime = 23, 67 | SeekChildSeekCount = 24, 68 | SeekMinHeapTime = 25, 69 | SeekMaxHeapTime = 26, 70 | SeekInternalSeekTime = 27, 71 | FindNextUserEntryTime = 28, 72 | WriteWalTime = 29, 73 | WriteMemtableTime = 30, 74 | WriteDelayTime = 31, 75 | WritePreAndPostProcessTime = 32, 76 | DbMutexLockNanos = 33, 77 | DbConditionWaitNanos = 34, 78 | MergeOperatorTimeNanos = 35, 79 | ReadIndexBlockNanos = 36, 80 | ReadFilterBlockNanos = 37, 81 | NewTableBlockIterNanos = 38, 82 | NewTableIteratorNanos = 39, 83 | BlockSeekNanos = 40, 84 | FindTableNanos = 41, 85 | BloomMemtableHitCount = 42, 86 | BloomMemtableMissCount = 43, 87 | BloomSstHitCount = 44, 88 | BloomSstMissCount = 45, 89 | KeyLockWaitTime = 46, 90 | KeyLockWaitCount = 47, 91 | EnvNewSequentialFileNanos = 48, 92 | EnvNewRandomAccessFileNanos = 49, 93 | EnvNewWritableFileNanos = 50, 94 | EnvReuseWritableFileNanos = 51, 95 | EnvNewRandomRwFileNanos = 52, 96 | EnvNewDirectoryNanos = 53, 97 | EnvFileExistsNanos = 54, 98 | EnvGetChildrenNanos = 55, 99 | EnvGetChildrenFileAttributesNanos = 56, 100 | EnvDeleteFileNanos = 57, 101 | EnvCreateDirNanos = 58, 102 | EnvCreateDirIfMissingNanos = 59, 103 | EnvDeleteDirNanos = 60, 104 | EnvGetFileSizeNanos = 61, 105 | EnvGetFileModificationTimeNanos = 62, 106 | EnvRenameFileNanos = 63, 107 | EnvLinkFileNanos = 64, 108 | EnvLockFileNanos = 65, 109 | EnvUnlockFileNanos = 66, 110 | EnvNewLoggerNanos = 67, 111 | TotalMetricCount = 68, 112 | } 113 | 114 | /// Sets the perf stats level for current thread. 115 | pub fn set_perf_stats(lvl: PerfStatsLevel) { 116 | unsafe { 117 | ffi::rocksdb_set_perf_level(lvl as c_int); 118 | } 119 | } 120 | 121 | /// Thread local context for gathering performance counter efficiently 122 | /// and transparently. 123 | pub struct PerfContext { 124 | pub(crate) inner: *mut ffi::rocksdb_perfcontext_t, 125 | } 126 | 127 | impl Default for PerfContext { 128 | fn default() -> Self { 129 | let ctx = unsafe { ffi::rocksdb_perfcontext_create() }; 130 | assert!(!ctx.is_null(), "Could not create Perf Context"); 131 | 132 | Self { inner: ctx } 133 | } 134 | } 135 | 136 | impl Drop for PerfContext { 137 | fn drop(&mut self) { 138 | unsafe { 139 | ffi::rocksdb_perfcontext_destroy(self.inner); 140 | } 141 | } 142 | } 143 | 144 | impl PerfContext { 145 | /// Reset context 146 | pub fn reset(&mut self) { 147 | unsafe { 148 | ffi::rocksdb_perfcontext_reset(self.inner); 149 | } 150 | } 151 | 152 | /// Get the report on perf 153 | pub fn report(&self, exclude_zero_counters: bool) -> String { 154 | unsafe { 155 | let ptr = 156 | ffi::rocksdb_perfcontext_report(self.inner, c_uchar::from(exclude_zero_counters)); 157 | let report = from_cstr(ptr); 158 | ffi::rocksdb_free(ptr as *mut c_void); 159 | report 160 | } 161 | } 162 | 163 | /// Returns value of a metric 164 | pub fn metric(&self, id: PerfMetric) -> u64 { 165 | unsafe { ffi::rocksdb_perfcontext_metric(self.inner, id as c_int) } 166 | } 167 | } 168 | 169 | /// Memory usage stats 170 | pub struct MemoryUsageStats { 171 | /// Approximate memory usage of all the mem-tables 172 | pub mem_table_total: u64, 173 | /// Approximate memory usage of un-flushed mem-tables 174 | pub mem_table_unflushed: u64, 175 | /// Approximate memory usage of all the table readers 176 | pub mem_table_readers_total: u64, 177 | /// Approximate memory usage by cache 178 | pub cache_total: u64, 179 | } 180 | 181 | /// Wrap over memory_usage_t. Hold current memory usage of the specified DB instances and caches 182 | struct MemoryUsage { 183 | inner: *mut ffi::rocksdb_memory_usage_t, 184 | } 185 | 186 | impl Drop for MemoryUsage { 187 | fn drop(&mut self) { 188 | unsafe { 189 | ffi::rocksdb_approximate_memory_usage_destroy(self.inner); 190 | } 191 | } 192 | } 193 | 194 | impl MemoryUsage { 195 | /// Approximate memory usage of all the mem-tables 196 | fn approximate_mem_table_total(&self) -> u64 { 197 | unsafe { ffi::rocksdb_approximate_memory_usage_get_mem_table_total(self.inner) } 198 | } 199 | 200 | /// Approximate memory usage of un-flushed mem-tables 201 | fn approximate_mem_table_unflushed(&self) -> u64 { 202 | unsafe { ffi::rocksdb_approximate_memory_usage_get_mem_table_unflushed(self.inner) } 203 | } 204 | 205 | /// Approximate memory usage of all the table readers 206 | fn approximate_mem_table_readers_total(&self) -> u64 { 207 | unsafe { ffi::rocksdb_approximate_memory_usage_get_mem_table_readers_total(self.inner) } 208 | } 209 | 210 | /// Approximate memory usage by cache 211 | fn approximate_cache_total(&self) -> u64 { 212 | unsafe { ffi::rocksdb_approximate_memory_usage_get_cache_total(self.inner) } 213 | } 214 | } 215 | 216 | /// Builder for MemoryUsage 217 | struct MemoryUsageBuilder { 218 | inner: *mut ffi::rocksdb_memory_consumers_t, 219 | } 220 | 221 | impl Drop for MemoryUsageBuilder { 222 | fn drop(&mut self) { 223 | unsafe { 224 | ffi::rocksdb_memory_consumers_destroy(self.inner); 225 | } 226 | } 227 | } 228 | 229 | impl MemoryUsageBuilder { 230 | /// Create new instance 231 | fn new() -> Result { 232 | let mc = unsafe { ffi::rocksdb_memory_consumers_create() }; 233 | if mc.is_null() { 234 | Err(Error::new( 235 | "Could not create MemoryUsage builder".to_owned(), 236 | )) 237 | } else { 238 | Ok(Self { inner: mc }) 239 | } 240 | } 241 | 242 | /// Add a DB instance to collect memory usage from it and add up in total stats 243 | fn add_db(&mut self, db: &DB) { 244 | unsafe { 245 | ffi::rocksdb_memory_consumers_add_db(self.inner, db.inner.inner()); 246 | } 247 | } 248 | 249 | /// Add a cache to collect memory usage from it and add up in total stats 250 | fn add_cache(&mut self, cache: &Cache) { 251 | unsafe { 252 | ffi::rocksdb_memory_consumers_add_cache(self.inner, cache.0.inner.as_ptr()); 253 | } 254 | } 255 | 256 | /// Build up MemoryUsage 257 | fn build(&self) -> Result { 258 | unsafe { 259 | let mu = ffi_try!(ffi::rocksdb_approximate_memory_usage_create(self.inner)); 260 | Ok(MemoryUsage { inner: mu }) 261 | } 262 | } 263 | } 264 | 265 | /// Get memory usage stats from DB instances and Cache instances 266 | pub fn get_memory_usage_stats( 267 | dbs: Option<&[&DB]>, 268 | caches: Option<&[&Cache]>, 269 | ) -> Result { 270 | let mut builder = MemoryUsageBuilder::new()?; 271 | if let Some(dbs_) = dbs { 272 | dbs_.iter().for_each(|db| builder.add_db(db)); 273 | } 274 | if let Some(caches_) = caches { 275 | caches_.iter().for_each(|cache| builder.add_cache(cache)); 276 | } 277 | 278 | let mu = builder.build()?; 279 | Ok(MemoryUsageStats { 280 | mem_table_total: mu.approximate_mem_table_total(), 281 | mem_table_unflushed: mu.approximate_mem_table_unflushed(), 282 | mem_table_readers_total: mu.approximate_mem_table_readers_total(), 283 | cache_total: mu.approximate_cache_total(), 284 | }) 285 | } 286 | -------------------------------------------------------------------------------- /src/prop_name.rs: -------------------------------------------------------------------------------- 1 | use crate::ffi_util::CStrLike; 2 | 3 | use std::ffi::{CStr, CString}; 4 | 5 | /// A borrowed name of a RocksDB property. 6 | /// 7 | /// The value is guaranteed to be a nul-terminated UTF-8 string. This means it 8 | /// can be converted to [`CStr`] and [`str`] at zero cost. 9 | #[derive(PartialEq, Eq, PartialOrd, Ord, Hash)] 10 | #[repr(transparent)] 11 | pub struct PropName(CStr); 12 | 13 | impl PropName { 14 | /// Creates a new object from a nul-terminated string with no internal nul 15 | /// bytes. 16 | /// 17 | /// Panics if the `value` isn’t terminated by a nul byte or contains 18 | /// interior nul bytes. 19 | pub(crate) const fn new_unwrap(value: &str) -> &Self { 20 | let bytes = if let Some((&0, bytes)) = value.as_bytes().split_last() { 21 | bytes 22 | } else { 23 | panic!("input was not nul-terminated"); 24 | }; 25 | 26 | let mut idx = 0; 27 | while idx < bytes.len() { 28 | assert!(bytes[idx] != 0, "input contained interior nul byte"); 29 | idx += 1; 30 | } 31 | 32 | // SAFETY: 1. We’ve just verified `value` is a nul-terminated with no 33 | // interior nul bytes and since its `str` it’s also valid UTF-8. 34 | // 2. Self and CStr have the same representation so casting is sound. 35 | unsafe { 36 | let value = CStr::from_bytes_with_nul_unchecked(value.as_bytes()); 37 | &*(value as *const CStr as *const Self) 38 | } 39 | } 40 | 41 | /// Converts the value into a C string slice. 42 | #[inline] 43 | pub fn as_c_str(&self) -> &CStr { 44 | &self.0 45 | } 46 | 47 | /// Converts the value into a string slice. 48 | /// 49 | /// Nul byte terminating the underlying C string is not included in the 50 | /// returned slice. 51 | #[inline] 52 | pub fn as_str(&self) -> &str { 53 | // SAFETY: self.0 is guaranteed to be valid ASCII string. 54 | unsafe { std::str::from_utf8_unchecked(self.0.to_bytes()) } 55 | } 56 | } 57 | 58 | impl core::ops::Deref for PropName { 59 | type Target = CStr; 60 | 61 | #[inline] 62 | fn deref(&self) -> &Self::Target { 63 | self.as_c_str() 64 | } 65 | } 66 | 67 | impl core::convert::AsRef for PropName { 68 | #[inline] 69 | fn as_ref(&self) -> &CStr { 70 | self.as_c_str() 71 | } 72 | } 73 | 74 | impl core::convert::AsRef for PropName { 75 | #[inline] 76 | fn as_ref(&self) -> &str { 77 | self.as_str() 78 | } 79 | } 80 | 81 | impl std::borrow::ToOwned for PropName { 82 | type Owned = PropertyName; 83 | 84 | #[inline] 85 | fn to_owned(&self) -> Self::Owned { 86 | PropertyName(self.0.to_owned()) 87 | } 88 | 89 | #[inline] 90 | fn clone_into(&self, target: &mut Self::Owned) { 91 | self.0.clone_into(&mut target.0); 92 | } 93 | } 94 | 95 | impl core::fmt::Display for PropName { 96 | #[inline] 97 | fn fmt(&self, fmtr: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 98 | self.as_str().fmt(fmtr) 99 | } 100 | } 101 | 102 | impl core::fmt::Debug for PropName { 103 | #[inline] 104 | fn fmt(&self, fmtr: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 105 | self.as_str().fmt(fmtr) 106 | } 107 | } 108 | 109 | impl core::cmp::PartialEq for PropName { 110 | #[inline] 111 | fn eq(&self, other: &CStr) -> bool { 112 | self.as_c_str().eq(other) 113 | } 114 | } 115 | 116 | impl core::cmp::PartialEq for PropName { 117 | #[inline] 118 | fn eq(&self, other: &str) -> bool { 119 | self.as_str().eq(other) 120 | } 121 | } 122 | 123 | impl core::cmp::PartialEq for CStr { 124 | #[inline] 125 | fn eq(&self, other: &PropName) -> bool { 126 | self.eq(other.as_c_str()) 127 | } 128 | } 129 | 130 | impl core::cmp::PartialEq for str { 131 | #[inline] 132 | fn eq(&self, other: &PropName) -> bool { 133 | self.eq(other.as_str()) 134 | } 135 | } 136 | 137 | impl<'a> CStrLike for &'a PropName { 138 | type Baked = &'a CStr; 139 | type Error = std::convert::Infallible; 140 | 141 | #[inline] 142 | fn bake(self) -> Result { 143 | Ok(&self.0) 144 | } 145 | 146 | #[inline] 147 | fn into_c_string(self) -> Result { 148 | Ok(self.0.to_owned()) 149 | } 150 | } 151 | 152 | /// An owned name of a RocksDB property. 153 | /// 154 | /// The value is guaranteed to be a nul-terminated UTF-8 string. This means it 155 | /// can be converted to [`CString`] and [`String`] at zero cost. 156 | #[derive(PartialEq, Eq, PartialOrd, Ord, Hash)] 157 | #[repr(transparent)] 158 | pub struct PropertyName(CString); 159 | 160 | impl PropertyName { 161 | /// Creates a new object from valid nul-terminated UTF-8 string. The string 162 | /// must not contain interior nul bytes. 163 | #[inline] 164 | unsafe fn from_vec_with_nul_unchecked(inner: Vec) -> Self { 165 | // SAFETY: Caller promises inner is nul-terminated and valid UTF-8. 166 | Self(CString::from_vec_with_nul_unchecked(inner)) 167 | } 168 | 169 | /// Converts the value into a C string. 170 | #[inline] 171 | pub fn into_c_string(self) -> CString { 172 | self.0 173 | } 174 | 175 | /// Converts the property name into a string. 176 | /// 177 | /// Nul byte terminating the underlying C string is not included in the 178 | /// returned value. 179 | #[inline] 180 | pub fn into_string(self) -> String { 181 | // SAFETY: self.0 is guaranteed to be valid UTF-8. 182 | unsafe { String::from_utf8_unchecked(self.0.into_bytes()) } 183 | } 184 | } 185 | 186 | impl std::ops::Deref for PropertyName { 187 | type Target = PropName; 188 | 189 | #[inline] 190 | fn deref(&self) -> &Self::Target { 191 | // SAFETY: 1. PropName and CStr have the same representation so casting 192 | // is safe. 2. self.0 is guaranteed to be valid nul-terminated UTF-8 193 | // string. 194 | unsafe { &*(self.0.as_c_str() as *const CStr as *const PropName) } 195 | } 196 | } 197 | 198 | impl core::convert::AsRef for PropertyName { 199 | #[inline] 200 | fn as_ref(&self) -> &CStr { 201 | self.as_c_str() 202 | } 203 | } 204 | 205 | impl core::convert::AsRef for PropertyName { 206 | #[inline] 207 | fn as_ref(&self) -> &str { 208 | self.as_str() 209 | } 210 | } 211 | 212 | impl std::borrow::Borrow for PropertyName { 213 | #[inline] 214 | fn borrow(&self) -> &PropName { 215 | self 216 | } 217 | } 218 | 219 | impl core::fmt::Display for PropertyName { 220 | #[inline] 221 | fn fmt(&self, fmtr: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 222 | self.as_str().fmt(fmtr) 223 | } 224 | } 225 | 226 | impl core::fmt::Debug for PropertyName { 227 | #[inline] 228 | fn fmt(&self, fmtr: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 229 | self.as_str().fmt(fmtr) 230 | } 231 | } 232 | 233 | impl core::cmp::PartialEq for PropertyName { 234 | #[inline] 235 | fn eq(&self, other: &CString) -> bool { 236 | self.as_c_str().eq(other.as_c_str()) 237 | } 238 | } 239 | 240 | impl core::cmp::PartialEq for PropertyName { 241 | #[inline] 242 | fn eq(&self, other: &String) -> bool { 243 | self.as_str().eq(other.as_str()) 244 | } 245 | } 246 | 247 | impl core::cmp::PartialEq for CString { 248 | #[inline] 249 | fn eq(&self, other: &PropertyName) -> bool { 250 | self.as_c_str().eq(other.as_c_str()) 251 | } 252 | } 253 | 254 | impl core::cmp::PartialEq for String { 255 | #[inline] 256 | fn eq(&self, other: &PropertyName) -> bool { 257 | self.as_str().eq(other.as_str()) 258 | } 259 | } 260 | 261 | impl CStrLike for PropertyName { 262 | type Baked = CString; 263 | type Error = std::convert::Infallible; 264 | 265 | #[inline] 266 | fn bake(self) -> Result { 267 | Ok(self.0) 268 | } 269 | 270 | #[inline] 271 | fn into_c_string(self) -> Result { 272 | Ok(self.0) 273 | } 274 | } 275 | 276 | impl<'a> CStrLike for &'a PropertyName { 277 | type Baked = &'a CStr; 278 | type Error = std::convert::Infallible; 279 | 280 | #[inline] 281 | fn bake(self) -> Result { 282 | Ok(self.as_c_str()) 283 | } 284 | 285 | #[inline] 286 | fn into_c_string(self) -> Result { 287 | Ok(self.0.clone()) 288 | } 289 | } 290 | 291 | /// Constructs a property name for an ‘at level’ property. 292 | /// 293 | /// `name` is the infix of the property name (e.g. `"num-files-at-level"`) and 294 | /// `level` is level to get statistics of. The property name is constructed as 295 | /// `"rocksdb."`. 296 | /// 297 | /// Expects `name` not to contain any interior nul bytes. 298 | pub(crate) unsafe fn level_property(name: &str, level: usize) -> PropertyName { 299 | let bytes = format!("rocksdb.{name}{level}\0").into_bytes(); 300 | // SAFETY: We’re appending terminating nul and caller promises `name` has no 301 | // interior nul bytes. 302 | PropertyName::from_vec_with_nul_unchecked(bytes) 303 | } 304 | 305 | #[test] 306 | fn sanity_checks() { 307 | let want = "rocksdb.cfstats-no-file-histogram"; 308 | assert_eq!(want, crate::properties::CFSTATS_NO_FILE_HISTOGRAM); 309 | 310 | let want = "rocksdb.num-files-at-level5"; 311 | assert_eq!(want, &*crate::properties::num_files_at_level(5)); 312 | } 313 | 314 | #[test] 315 | #[should_panic] 316 | fn test_interior_nul() { 317 | PropName::new_unwrap("interior nul\0\0"); 318 | } 319 | 320 | #[test] 321 | #[should_panic] 322 | fn test_non_nul_terminated() { 323 | PropName::new_unwrap("no nul terminator"); 324 | } 325 | -------------------------------------------------------------------------------- /src/slice_transform.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::ffi::CString; 16 | use std::slice; 17 | 18 | use libc::{c_char, c_uchar, c_void, size_t}; 19 | 20 | use crate::{ffi, ffi_util::CStrLike}; 21 | 22 | /// A `SliceTransform` is a generic pluggable way of transforming one string 23 | /// to another. Its primary use-case is in configuring rocksdb 24 | /// to store prefix blooms by setting prefix_extractor in 25 | /// ColumnFamilyOptions. 26 | pub struct SliceTransform { 27 | pub inner: *mut ffi::rocksdb_slicetransform_t, 28 | } 29 | 30 | // NB we intentionally don't implement a Drop that passes 31 | // through to rocksdb_slicetransform_destroy because 32 | // this is currently only used (to my knowledge) 33 | // by people passing it as a prefix extractor when 34 | // opening a DB. 35 | 36 | impl SliceTransform { 37 | pub fn create( 38 | name: impl CStrLike, 39 | transform_fn: TransformFn, 40 | in_domain_fn: Option, 41 | ) -> SliceTransform { 42 | let cb = Box::into_raw(Box::new(TransformCallback { 43 | name: name.into_c_string().unwrap(), 44 | transform_fn, 45 | in_domain_fn, 46 | })); 47 | 48 | let st = unsafe { 49 | ffi::rocksdb_slicetransform_create( 50 | cb as *mut c_void, 51 | Some(slice_transform_destructor_callback), 52 | Some(transform_callback), 53 | Some(in_domain_callback), 54 | // this None points to the deprecated InRange callback 55 | None, 56 | Some(slice_transform_name_callback), 57 | ) 58 | }; 59 | 60 | SliceTransform { inner: st } 61 | } 62 | 63 | pub fn create_fixed_prefix(len: size_t) -> SliceTransform { 64 | SliceTransform { 65 | inner: unsafe { ffi::rocksdb_slicetransform_create_fixed_prefix(len) }, 66 | } 67 | } 68 | 69 | pub fn create_noop() -> SliceTransform { 70 | SliceTransform { 71 | inner: unsafe { ffi::rocksdb_slicetransform_create_noop() }, 72 | } 73 | } 74 | } 75 | 76 | pub type TransformFn<'a> = fn(&'a [u8]) -> &'a [u8]; 77 | pub type InDomainFn = fn(&[u8]) -> bool; 78 | 79 | pub struct TransformCallback<'a> { 80 | pub name: CString, 81 | pub transform_fn: TransformFn<'a>, 82 | pub in_domain_fn: Option, 83 | } 84 | 85 | pub unsafe extern "C" fn slice_transform_destructor_callback(raw_cb: *mut c_void) { 86 | drop(Box::from_raw(raw_cb as *mut TransformCallback)); 87 | } 88 | 89 | pub unsafe extern "C" fn slice_transform_name_callback(raw_cb: *mut c_void) -> *const c_char { 90 | let cb = &mut *(raw_cb as *mut TransformCallback); 91 | cb.name.as_ptr() 92 | } 93 | 94 | pub unsafe extern "C" fn transform_callback( 95 | raw_cb: *mut c_void, 96 | raw_key: *const c_char, 97 | key_len: size_t, 98 | dst_length: *mut size_t, 99 | ) -> *mut c_char { 100 | let cb = &mut *(raw_cb as *mut TransformCallback); 101 | let key = slice::from_raw_parts(raw_key as *const u8, key_len); 102 | let prefix = (cb.transform_fn)(key); 103 | *dst_length = prefix.len() as size_t; 104 | prefix.as_ptr() as *mut c_char 105 | } 106 | 107 | pub unsafe extern "C" fn in_domain_callback( 108 | raw_cb: *mut c_void, 109 | raw_key: *const c_char, 110 | key_len: size_t, 111 | ) -> c_uchar { 112 | let cb = &mut *(raw_cb as *mut TransformCallback); 113 | let key = slice::from_raw_parts(raw_key as *const u8, key_len); 114 | c_uchar::from(cb.in_domain_fn.map_or(true, |in_domain| in_domain(key))) 115 | } 116 | -------------------------------------------------------------------------------- /src/snapshot.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use crate::{ 16 | db::DBAccess, ffi, AsColumnFamilyRef, DBIteratorWithThreadMode, DBPinnableSlice, 17 | DBRawIteratorWithThreadMode, Error, IteratorMode, ReadOptions, DB, 18 | }; 19 | 20 | /// A type alias to keep compatibility. See [`SnapshotWithThreadMode`] for details 21 | pub type Snapshot<'a> = SnapshotWithThreadMode<'a, DB>; 22 | 23 | /// A consistent view of the database at the point of creation. 24 | /// 25 | /// # Examples 26 | /// 27 | /// ``` 28 | /// use speedb::{DB, IteratorMode, Options}; 29 | /// 30 | /// let path = "_path_for_rocksdb_storage3"; 31 | /// { 32 | /// let db = DB::open_default(path).unwrap(); 33 | /// let snapshot = db.snapshot(); // Creates a longer-term snapshot of the DB, but closed when goes out of scope 34 | /// let mut iter = snapshot.iterator(IteratorMode::Start); // Make as many iterators as you'd like from one snapshot 35 | /// } 36 | /// let _ = DB::destroy(&Options::default(), path); 37 | /// ``` 38 | /// 39 | pub struct SnapshotWithThreadMode<'a, D: DBAccess> { 40 | db: &'a D, 41 | pub(crate) inner: *const ffi::rocksdb_snapshot_t, 42 | } 43 | 44 | impl<'a, D: DBAccess> SnapshotWithThreadMode<'a, D> { 45 | /// Creates a new `SnapshotWithThreadMode` of the database `db`. 46 | pub fn new(db: &'a D) -> Self { 47 | let snapshot = unsafe { db.create_snapshot() }; 48 | Self { 49 | db, 50 | inner: snapshot, 51 | } 52 | } 53 | 54 | /// Creates an iterator over the data in this snapshot, using the default read options. 55 | pub fn iterator(&self, mode: IteratorMode) -> DBIteratorWithThreadMode<'a, D> { 56 | let readopts = ReadOptions::default(); 57 | self.iterator_opt(mode, readopts) 58 | } 59 | 60 | /// Creates an iterator over the data in this snapshot under the given column family, using 61 | /// the default read options. 62 | pub fn iterator_cf( 63 | &self, 64 | cf_handle: &impl AsColumnFamilyRef, 65 | mode: IteratorMode, 66 | ) -> DBIteratorWithThreadMode { 67 | let readopts = ReadOptions::default(); 68 | self.iterator_cf_opt(cf_handle, readopts, mode) 69 | } 70 | 71 | /// Creates an iterator over the data in this snapshot, using the given read options. 72 | pub fn iterator_opt( 73 | &self, 74 | mode: IteratorMode, 75 | mut readopts: ReadOptions, 76 | ) -> DBIteratorWithThreadMode<'a, D> { 77 | readopts.set_snapshot(self); 78 | DBIteratorWithThreadMode::::new(self.db, readopts, mode) 79 | } 80 | 81 | /// Creates an iterator over the data in this snapshot under the given column family, using 82 | /// the given read options. 83 | pub fn iterator_cf_opt( 84 | &self, 85 | cf_handle: &impl AsColumnFamilyRef, 86 | mut readopts: ReadOptions, 87 | mode: IteratorMode, 88 | ) -> DBIteratorWithThreadMode { 89 | readopts.set_snapshot(self); 90 | DBIteratorWithThreadMode::new_cf(self.db, cf_handle.inner(), readopts, mode) 91 | } 92 | 93 | /// Creates a raw iterator over the data in this snapshot, using the default read options. 94 | pub fn raw_iterator(&self) -> DBRawIteratorWithThreadMode { 95 | let readopts = ReadOptions::default(); 96 | self.raw_iterator_opt(readopts) 97 | } 98 | 99 | /// Creates a raw iterator over the data in this snapshot under the given column family, using 100 | /// the default read options. 101 | pub fn raw_iterator_cf( 102 | &self, 103 | cf_handle: &impl AsColumnFamilyRef, 104 | ) -> DBRawIteratorWithThreadMode { 105 | let readopts = ReadOptions::default(); 106 | self.raw_iterator_cf_opt(cf_handle, readopts) 107 | } 108 | 109 | /// Creates a raw iterator over the data in this snapshot, using the given read options. 110 | pub fn raw_iterator_opt(&self, mut readopts: ReadOptions) -> DBRawIteratorWithThreadMode { 111 | readopts.set_snapshot(self); 112 | DBRawIteratorWithThreadMode::new(self.db, readopts) 113 | } 114 | 115 | /// Creates a raw iterator over the data in this snapshot under the given column family, using 116 | /// the given read options. 117 | pub fn raw_iterator_cf_opt( 118 | &self, 119 | cf_handle: &impl AsColumnFamilyRef, 120 | mut readopts: ReadOptions, 121 | ) -> DBRawIteratorWithThreadMode { 122 | readopts.set_snapshot(self); 123 | DBRawIteratorWithThreadMode::new_cf(self.db, cf_handle.inner(), readopts) 124 | } 125 | 126 | /// Returns the bytes associated with a key value with default read options. 127 | pub fn get>(&self, key: K) -> Result>, Error> { 128 | let readopts = ReadOptions::default(); 129 | self.get_opt(key, readopts) 130 | } 131 | 132 | /// Returns the bytes associated with a key value and given column family with default read 133 | /// options. 134 | pub fn get_cf>( 135 | &self, 136 | cf: &impl AsColumnFamilyRef, 137 | key: K, 138 | ) -> Result>, Error> { 139 | let readopts = ReadOptions::default(); 140 | self.get_cf_opt(cf, key.as_ref(), readopts) 141 | } 142 | 143 | /// Returns the bytes associated with a key value and given read options. 144 | pub fn get_opt>( 145 | &self, 146 | key: K, 147 | mut readopts: ReadOptions, 148 | ) -> Result>, Error> { 149 | readopts.set_snapshot(self); 150 | self.db.get_opt(key.as_ref(), &readopts) 151 | } 152 | 153 | /// Returns the bytes associated with a key value, given column family and read options. 154 | pub fn get_cf_opt>( 155 | &self, 156 | cf: &impl AsColumnFamilyRef, 157 | key: K, 158 | mut readopts: ReadOptions, 159 | ) -> Result>, Error> { 160 | readopts.set_snapshot(self); 161 | self.db.get_cf_opt(cf, key.as_ref(), &readopts) 162 | } 163 | 164 | /// Return the value associated with a key using RocksDB's PinnableSlice 165 | /// so as to avoid unnecessary memory copy. Similar to get_pinned_opt but 166 | /// leverages default options. 167 | pub fn get_pinned>(&self, key: K) -> Result, Error> { 168 | let readopts = ReadOptions::default(); 169 | self.get_pinned_opt(key, readopts) 170 | } 171 | 172 | /// Return the value associated with a key using RocksDB's PinnableSlice 173 | /// so as to avoid unnecessary memory copy. Similar to get_pinned_cf_opt but 174 | /// leverages default options. 175 | pub fn get_pinned_cf>( 176 | &self, 177 | cf: &impl AsColumnFamilyRef, 178 | key: K, 179 | ) -> Result, Error> { 180 | let readopts = ReadOptions::default(); 181 | self.get_pinned_cf_opt(cf, key.as_ref(), readopts) 182 | } 183 | 184 | /// Return the value associated with a key using RocksDB's PinnableSlice 185 | /// so as to avoid unnecessary memory copy. 186 | pub fn get_pinned_opt>( 187 | &self, 188 | key: K, 189 | mut readopts: ReadOptions, 190 | ) -> Result, Error> { 191 | readopts.set_snapshot(self); 192 | self.db.get_pinned_opt(key.as_ref(), &readopts) 193 | } 194 | 195 | /// Return the value associated with a key using RocksDB's PinnableSlice 196 | /// so as to avoid unnecessary memory copy. Similar to get_pinned_opt but 197 | /// allows specifying ColumnFamily. 198 | pub fn get_pinned_cf_opt>( 199 | &self, 200 | cf: &impl AsColumnFamilyRef, 201 | key: K, 202 | mut readopts: ReadOptions, 203 | ) -> Result, Error> { 204 | readopts.set_snapshot(self); 205 | self.db.get_pinned_cf_opt(cf, key.as_ref(), &readopts) 206 | } 207 | 208 | /// Returns the bytes associated with the given key values and default read options. 209 | pub fn multi_get, I>(&self, keys: I) -> Vec>, Error>> 210 | where 211 | I: IntoIterator, 212 | { 213 | let readopts = ReadOptions::default(); 214 | self.multi_get_opt(keys, readopts) 215 | } 216 | 217 | /// Returns the bytes associated with the given key values and default read options. 218 | pub fn multi_get_cf<'b, K, I, W>(&self, keys_cf: I) -> Vec>, Error>> 219 | where 220 | K: AsRef<[u8]>, 221 | I: IntoIterator, 222 | W: AsColumnFamilyRef + 'b, 223 | { 224 | let readopts = ReadOptions::default(); 225 | self.multi_get_cf_opt(keys_cf, readopts) 226 | } 227 | 228 | /// Returns the bytes associated with the given key values and given read options. 229 | pub fn multi_get_opt( 230 | &self, 231 | keys: I, 232 | mut readopts: ReadOptions, 233 | ) -> Vec>, Error>> 234 | where 235 | K: AsRef<[u8]>, 236 | I: IntoIterator, 237 | { 238 | readopts.set_snapshot(self); 239 | self.db.multi_get_opt(keys, &readopts) 240 | } 241 | 242 | /// Returns the bytes associated with the given key values, given column family and read options. 243 | pub fn multi_get_cf_opt<'b, K, I, W>( 244 | &self, 245 | keys_cf: I, 246 | mut readopts: ReadOptions, 247 | ) -> Vec>, Error>> 248 | where 249 | K: AsRef<[u8]>, 250 | I: IntoIterator, 251 | W: AsColumnFamilyRef + 'b, 252 | { 253 | readopts.set_snapshot(self); 254 | self.db.multi_get_cf_opt(keys_cf, &readopts) 255 | } 256 | } 257 | 258 | impl<'a, D: DBAccess> Drop for SnapshotWithThreadMode<'a, D> { 259 | fn drop(&mut self) { 260 | unsafe { 261 | self.db.release_snapshot(self.inner); 262 | } 263 | } 264 | } 265 | 266 | /// `Send` and `Sync` implementations for `SnapshotWithThreadMode` are safe, because `SnapshotWithThreadMode` is 267 | /// immutable and can be safely shared between threads. 268 | unsafe impl<'a, D: DBAccess> Send for SnapshotWithThreadMode<'a, D> {} 269 | unsafe impl<'a, D: DBAccess> Sync for SnapshotWithThreadMode<'a, D> {} 270 | -------------------------------------------------------------------------------- /src/sst_file_writer.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Lucjan Suski 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | //` 15 | 16 | use crate::{ffi, ffi_util::to_cpath, Error, Options}; 17 | 18 | use libc::{self, c_char, size_t}; 19 | use std::{ffi::CString, marker::PhantomData, path::Path}; 20 | 21 | /// SstFileWriter is used to create sst files that can be added to database later 22 | /// All keys in files generated by SstFileWriter will have sequence number = 0. 23 | pub struct SstFileWriter<'a> { 24 | pub(crate) inner: *mut ffi::rocksdb_sstfilewriter_t, 25 | // Options are needed to be alive when calling open(), 26 | // so let's make sure it doesn't get, dropped for the lifetime of SstFileWriter 27 | phantom: PhantomData<&'a Options>, 28 | } 29 | 30 | unsafe impl<'a> Send for SstFileWriter<'a> {} 31 | unsafe impl<'a> Sync for SstFileWriter<'a> {} 32 | 33 | struct EnvOptions { 34 | inner: *mut ffi::rocksdb_envoptions_t, 35 | } 36 | 37 | impl Drop for EnvOptions { 38 | fn drop(&mut self) { 39 | unsafe { 40 | ffi::rocksdb_envoptions_destroy(self.inner); 41 | } 42 | } 43 | } 44 | 45 | impl Default for EnvOptions { 46 | fn default() -> Self { 47 | let opts = unsafe { ffi::rocksdb_envoptions_create() }; 48 | Self { inner: opts } 49 | } 50 | } 51 | 52 | impl<'a> SstFileWriter<'a> { 53 | /// Initializes SstFileWriter with given DB options. 54 | pub fn create(opts: &'a Options) -> Self { 55 | let env_options = EnvOptions::default(); 56 | 57 | let writer = Self::create_raw(opts, &env_options); 58 | 59 | Self { 60 | inner: writer, 61 | phantom: PhantomData, 62 | } 63 | } 64 | 65 | fn create_raw(opts: &Options, env_opts: &EnvOptions) -> *mut ffi::rocksdb_sstfilewriter_t { 66 | unsafe { ffi::rocksdb_sstfilewriter_create(env_opts.inner, opts.inner) } 67 | } 68 | 69 | /// Prepare SstFileWriter to write into file located at "file_path". 70 | pub fn open>(&'a self, path: P) -> Result<(), Error> { 71 | let cpath = to_cpath(&path)?; 72 | self.open_raw(&cpath) 73 | } 74 | 75 | fn open_raw(&'a self, cpath: &CString) -> Result<(), Error> { 76 | unsafe { 77 | ffi_try!(ffi::rocksdb_sstfilewriter_open( 78 | self.inner, 79 | cpath.as_ptr() as *const _ 80 | )); 81 | 82 | Ok(()) 83 | } 84 | } 85 | 86 | /// Finalize writing to sst file and close file. 87 | pub fn finish(&mut self) -> Result<(), Error> { 88 | unsafe { 89 | ffi_try!(ffi::rocksdb_sstfilewriter_finish(self.inner,)); 90 | Ok(()) 91 | } 92 | } 93 | 94 | /// returns the current file size 95 | pub fn file_size(&self) -> u64 { 96 | let mut file_size: u64 = 0; 97 | unsafe { 98 | ffi::rocksdb_sstfilewriter_file_size(self.inner, &mut file_size); 99 | } 100 | file_size 101 | } 102 | 103 | /// Adds a Put key with value to currently opened file 104 | /// REQUIRES: key is after any previously added key according to comparator. 105 | pub fn put(&mut self, key: K, value: V) -> Result<(), Error> 106 | where 107 | K: AsRef<[u8]>, 108 | V: AsRef<[u8]>, 109 | { 110 | let key = key.as_ref(); 111 | let value = value.as_ref(); 112 | 113 | unsafe { 114 | ffi_try!(ffi::rocksdb_sstfilewriter_put( 115 | self.inner, 116 | key.as_ptr() as *const c_char, 117 | key.len() as size_t, 118 | value.as_ptr() as *const c_char, 119 | value.len() as size_t, 120 | )); 121 | Ok(()) 122 | } 123 | } 124 | 125 | /// Adds a Merge key with value to currently opened file 126 | /// REQUIRES: key is after any previously added key according to comparator. 127 | pub fn merge(&mut self, key: K, value: V) -> Result<(), Error> 128 | where 129 | K: AsRef<[u8]>, 130 | V: AsRef<[u8]>, 131 | { 132 | let key = key.as_ref(); 133 | let value = value.as_ref(); 134 | 135 | unsafe { 136 | ffi_try!(ffi::rocksdb_sstfilewriter_merge( 137 | self.inner, 138 | key.as_ptr() as *const c_char, 139 | key.len() as size_t, 140 | value.as_ptr() as *const c_char, 141 | value.len() as size_t, 142 | )); 143 | Ok(()) 144 | } 145 | } 146 | 147 | /// Adds a deletion key to currently opened file 148 | /// REQUIRES: key is after any previously added key according to comparator. 149 | pub fn delete>(&mut self, key: K) -> Result<(), Error> { 150 | let key = key.as_ref(); 151 | 152 | unsafe { 153 | ffi_try!(ffi::rocksdb_sstfilewriter_delete( 154 | self.inner, 155 | key.as_ptr() as *const c_char, 156 | key.len() as size_t, 157 | )); 158 | Ok(()) 159 | } 160 | } 161 | } 162 | 163 | impl<'a> Drop for SstFileWriter<'a> { 164 | fn drop(&mut self) { 165 | unsafe { 166 | ffi::rocksdb_sstfilewriter_destroy(self.inner); 167 | } 168 | } 169 | } 170 | -------------------------------------------------------------------------------- /src/transactions/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2021 Yiyuan Liu 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | // 15 | 16 | mod optimistic_transaction_db; 17 | mod options; 18 | mod transaction; 19 | mod transaction_db; 20 | 21 | pub use optimistic_transaction_db::OptimisticTransactionDB; 22 | pub use options::{OptimisticTransactionOptions, TransactionDBOptions, TransactionOptions}; 23 | pub use transaction::Transaction; 24 | pub use transaction_db::TransactionDB; 25 | -------------------------------------------------------------------------------- /src/transactions/optimistic_transaction_db.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2021 Yiyuan Liu 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | // 15 | 16 | use std::{collections::BTreeMap, ffi::CString, fs, iter, marker::PhantomData, path::Path, ptr}; 17 | 18 | use libc::{c_char, c_int}; 19 | 20 | use crate::{ 21 | db::DBCommon, db::DBInner, ffi, ffi_util::to_cpath, write_batch::WriteBatchWithTransaction, 22 | ColumnFamilyDescriptor, Error, OptimisticTransactionOptions, Options, ThreadMode, Transaction, 23 | WriteOptions, DEFAULT_COLUMN_FAMILY_NAME, 24 | }; 25 | 26 | /// A type alias to RocksDB Optimistic Transaction DB. 27 | /// 28 | /// Please read the official 29 | /// [guide](https://github.com/facebook/rocksdb/wiki/Transactions#optimistictransactiondb) 30 | /// to learn more about RocksDB OptimisticTransactionDB. 31 | /// 32 | /// The default thread mode for [`OptimisticTransactionDB`] is [`SingleThreaded`] 33 | /// if feature `multi-threaded-cf` is not enabled. 34 | /// 35 | /// See [`DBCommon`] for full list of methods. 36 | /// 37 | /// # Examples 38 | /// 39 | /// ``` 40 | /// use speedb::{DB, Options, OptimisticTransactionDB, SingleThreaded}; 41 | /// let path = "_path_for_optimistic_transaction_db"; 42 | /// { 43 | /// let db: OptimisticTransactionDB = OptimisticTransactionDB::open_default(path).unwrap(); 44 | /// db.put(b"my key", b"my value").unwrap(); 45 | /// 46 | /// // create transaction 47 | /// let txn = db.transaction(); 48 | /// txn.put(b"key2", b"value2"); 49 | /// txn.put(b"key3", b"value3"); 50 | /// txn.commit().unwrap(); 51 | /// } 52 | /// let _ = DB::destroy(&Options::default(), path); 53 | /// ``` 54 | /// 55 | /// [`SingleThreaded`]: crate::SingleThreaded 56 | #[cfg(not(feature = "multi-threaded-cf"))] 57 | pub type OptimisticTransactionDB = 58 | DBCommon; 59 | #[cfg(feature = "multi-threaded-cf")] 60 | pub type OptimisticTransactionDB = 61 | DBCommon; 62 | 63 | pub struct OptimisticTransactionDBInner { 64 | base: *mut ffi::rocksdb_t, 65 | db: *mut ffi::rocksdb_optimistictransactiondb_t, 66 | } 67 | 68 | impl DBInner for OptimisticTransactionDBInner { 69 | fn inner(&self) -> *mut ffi::rocksdb_t { 70 | self.base 71 | } 72 | } 73 | 74 | impl Drop for OptimisticTransactionDBInner { 75 | fn drop(&mut self) { 76 | unsafe { 77 | ffi::rocksdb_optimistictransactiondb_close_base_db(self.base); 78 | ffi::rocksdb_optimistictransactiondb_close(self.db); 79 | } 80 | } 81 | } 82 | 83 | /// Methods of `OptimisticTransactionDB`. 84 | impl OptimisticTransactionDB { 85 | /// Opens a database with default options. 86 | pub fn open_default>(path: P) -> Result { 87 | let mut opts = Options::default(); 88 | opts.create_if_missing(true); 89 | Self::open(&opts, path) 90 | } 91 | 92 | /// Opens the database with the specified options. 93 | pub fn open>(opts: &Options, path: P) -> Result { 94 | Self::open_cf(opts, path, None::<&str>) 95 | } 96 | 97 | /// Opens a database with the given database options and column family names. 98 | /// 99 | /// Column families opened using this function will be created with default `Options`. 100 | pub fn open_cf(opts: &Options, path: P, cfs: I) -> Result 101 | where 102 | P: AsRef, 103 | I: IntoIterator, 104 | N: AsRef, 105 | { 106 | let cfs = cfs 107 | .into_iter() 108 | .map(|name| ColumnFamilyDescriptor::new(name.as_ref(), Options::default())); 109 | 110 | Self::open_cf_descriptors_internal(opts, path, cfs) 111 | } 112 | 113 | /// Opens a database with the given database options and column family descriptors. 114 | pub fn open_cf_descriptors(opts: &Options, path: P, cfs: I) -> Result 115 | where 116 | P: AsRef, 117 | I: IntoIterator, 118 | { 119 | Self::open_cf_descriptors_internal(opts, path, cfs) 120 | } 121 | 122 | /// Internal implementation for opening RocksDB. 123 | fn open_cf_descriptors_internal(opts: &Options, path: P, cfs: I) -> Result 124 | where 125 | P: AsRef, 126 | I: IntoIterator, 127 | { 128 | let cfs: Vec<_> = cfs.into_iter().collect(); 129 | let outlive = iter::once(opts.outlive.clone()) 130 | .chain(cfs.iter().map(|cf| cf.options.outlive.clone())) 131 | .collect(); 132 | 133 | let cpath = to_cpath(&path)?; 134 | 135 | if let Err(e) = fs::create_dir_all(&path) { 136 | return Err(Error::new(format!( 137 | "Failed to create RocksDB directory: `{e:?}`." 138 | ))); 139 | } 140 | 141 | let db: *mut ffi::rocksdb_optimistictransactiondb_t; 142 | let mut cf_map = BTreeMap::new(); 143 | 144 | if cfs.is_empty() { 145 | db = Self::open_raw(opts, &cpath)?; 146 | } else { 147 | let mut cfs_v = cfs; 148 | // Always open the default column family. 149 | if !cfs_v.iter().any(|cf| cf.name == DEFAULT_COLUMN_FAMILY_NAME) { 150 | cfs_v.push(ColumnFamilyDescriptor { 151 | name: String::from(DEFAULT_COLUMN_FAMILY_NAME), 152 | options: Options::default(), 153 | }); 154 | } 155 | // We need to store our CStrings in an intermediate vector 156 | // so that their pointers remain valid. 157 | let c_cfs: Vec = cfs_v 158 | .iter() 159 | .map(|cf| CString::new(cf.name.as_bytes()).unwrap()) 160 | .collect(); 161 | 162 | let cfnames: Vec<_> = c_cfs.iter().map(|cf| cf.as_ptr()).collect(); 163 | 164 | // These handles will be populated by DB. 165 | let mut cfhandles: Vec<_> = cfs_v.iter().map(|_| ptr::null_mut()).collect(); 166 | 167 | let cfopts: Vec<_> = cfs_v 168 | .iter() 169 | .map(|cf| cf.options.inner as *const _) 170 | .collect(); 171 | 172 | db = Self::open_cf_raw(opts, &cpath, &cfs_v, &cfnames, &cfopts, &mut cfhandles)?; 173 | 174 | for handle in &cfhandles { 175 | if handle.is_null() { 176 | return Err(Error::new( 177 | "Received null column family handle from DB.".to_owned(), 178 | )); 179 | } 180 | } 181 | 182 | for (cf_desc, inner) in cfs_v.iter().zip(cfhandles) { 183 | cf_map.insert(cf_desc.name.clone(), inner); 184 | } 185 | } 186 | 187 | if db.is_null() { 188 | return Err(Error::new("Could not initialize database.".to_owned())); 189 | } 190 | 191 | let base = unsafe { ffi::rocksdb_optimistictransactiondb_get_base_db(db) }; 192 | if base.is_null() { 193 | unsafe { 194 | ffi::rocksdb_optimistictransactiondb_close(db); 195 | } 196 | return Err(Error::new("Could not initialize database.".to_owned())); 197 | } 198 | let inner = OptimisticTransactionDBInner { base, db }; 199 | 200 | Ok(Self::new( 201 | inner, 202 | T::new_cf_map_internal(cf_map), 203 | path.as_ref().to_path_buf(), 204 | outlive, 205 | )) 206 | } 207 | 208 | fn open_raw( 209 | opts: &Options, 210 | cpath: &CString, 211 | ) -> Result<*mut ffi::rocksdb_optimistictransactiondb_t, Error> { 212 | unsafe { 213 | let db = ffi_try!(ffi::rocksdb_optimistictransactiondb_open( 214 | opts.inner, 215 | cpath.as_ptr() 216 | )); 217 | Ok(db) 218 | } 219 | } 220 | 221 | fn open_cf_raw( 222 | opts: &Options, 223 | cpath: &CString, 224 | cfs_v: &[ColumnFamilyDescriptor], 225 | cfnames: &[*const c_char], 226 | cfopts: &[*const ffi::rocksdb_options_t], 227 | cfhandles: &mut [*mut ffi::rocksdb_column_family_handle_t], 228 | ) -> Result<*mut ffi::rocksdb_optimistictransactiondb_t, Error> { 229 | unsafe { 230 | let db = ffi_try!(ffi::rocksdb_optimistictransactiondb_open_column_families( 231 | opts.inner, 232 | cpath.as_ptr(), 233 | cfs_v.len() as c_int, 234 | cfnames.as_ptr(), 235 | cfopts.as_ptr(), 236 | cfhandles.as_mut_ptr(), 237 | )); 238 | Ok(db) 239 | } 240 | } 241 | 242 | /// Creates a transaction with default options. 243 | pub fn transaction(&self) -> Transaction { 244 | self.transaction_opt( 245 | &WriteOptions::default(), 246 | &OptimisticTransactionOptions::default(), 247 | ) 248 | } 249 | 250 | /// Creates a transaction with default options. 251 | pub fn transaction_opt( 252 | &self, 253 | writeopts: &WriteOptions, 254 | otxn_opts: &OptimisticTransactionOptions, 255 | ) -> Transaction { 256 | Transaction { 257 | inner: unsafe { 258 | ffi::rocksdb_optimistictransaction_begin( 259 | self.inner.db, 260 | writeopts.inner, 261 | otxn_opts.inner, 262 | std::ptr::null_mut(), 263 | ) 264 | }, 265 | _marker: PhantomData::default(), 266 | } 267 | } 268 | 269 | pub fn write_opt( 270 | &self, 271 | batch: WriteBatchWithTransaction, 272 | writeopts: &WriteOptions, 273 | ) -> Result<(), Error> { 274 | unsafe { 275 | ffi_try!(ffi::rocksdb_optimistictransactiondb_write( 276 | self.inner.db, 277 | writeopts.inner, 278 | batch.inner 279 | )); 280 | } 281 | Ok(()) 282 | } 283 | 284 | pub fn write(&self, batch: WriteBatchWithTransaction) -> Result<(), Error> { 285 | self.write_opt(batch, &WriteOptions::default()) 286 | } 287 | 288 | pub fn write_without_wal(&self, batch: WriteBatchWithTransaction) -> Result<(), Error> { 289 | let mut wo = WriteOptions::new(); 290 | wo.disable_wal(true); 291 | self.write_opt(batch, &wo) 292 | } 293 | } 294 | -------------------------------------------------------------------------------- /src/transactions/options.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2021 Yiyuan Liu 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | // 15 | 16 | use crate::ffi; 17 | 18 | pub struct TransactionOptions { 19 | pub(crate) inner: *mut ffi::rocksdb_transaction_options_t, 20 | } 21 | 22 | unsafe impl Send for TransactionOptions {} 23 | unsafe impl Sync for TransactionOptions {} 24 | 25 | impl Default for TransactionOptions { 26 | fn default() -> Self { 27 | let txn_opts = unsafe { ffi::rocksdb_transaction_options_create() }; 28 | assert!( 29 | !txn_opts.is_null(), 30 | "Could not create RocksDB transaction options" 31 | ); 32 | Self { inner: txn_opts } 33 | } 34 | } 35 | 36 | impl TransactionOptions { 37 | pub fn new() -> TransactionOptions { 38 | TransactionOptions::default() 39 | } 40 | 41 | pub fn set_skip_prepare(&mut self, skip_prepare: bool) { 42 | unsafe { 43 | ffi::rocksdb_transaction_options_set_set_snapshot(self.inner, u8::from(skip_prepare)); 44 | } 45 | } 46 | 47 | /// Specifies use snapshot or not. 48 | /// 49 | /// Default: false. 50 | /// 51 | /// If a transaction has a snapshot set, the transaction will ensure that 52 | /// any keys successfully written(or fetched via `get_for_update`) have not 53 | /// been modified outside this transaction since the time the snapshot was 54 | /// set. 55 | /// If a snapshot has not been set, the transaction guarantees that keys have 56 | /// not been modified since the time each key was first written (or fetched via 57 | /// `get_for_update`). 58 | /// 59 | /// Using snapshot will provide stricter isolation guarantees at the 60 | /// expense of potentially more transaction failures due to conflicts with 61 | /// other writes. 62 | /// 63 | /// Calling `set_snapshot` will not affect the version of Data returned by `get` 64 | /// methods. 65 | pub fn set_snapshot(&mut self, snapshot: bool) { 66 | unsafe { 67 | ffi::rocksdb_transaction_options_set_set_snapshot(self.inner, u8::from(snapshot)); 68 | } 69 | } 70 | 71 | /// Specifies whether detect deadlock or not. 72 | /// 73 | /// Setting to true means that before acquiring locks, this transaction will 74 | /// check if doing so will cause a deadlock. If so, it will return with 75 | /// Status::Busy. The user should retry their transaction. 76 | /// 77 | /// Default: false. 78 | pub fn set_deadlock_detect(&mut self, deadlock_detect: bool) { 79 | unsafe { 80 | ffi::rocksdb_transaction_options_set_deadlock_detect( 81 | self.inner, 82 | u8::from(deadlock_detect), 83 | ); 84 | } 85 | } 86 | 87 | /// Specifies the wait timeout in milliseconds when a transaction attempts to lock a key. 88 | /// 89 | /// If 0, no waiting is done if a lock cannot instantly be acquired. 90 | /// If negative, transaction lock timeout in `TransactionDBOptions` will be used. 91 | /// 92 | /// Default: -1. 93 | pub fn set_lock_timeout(&mut self, lock_timeout: i64) { 94 | unsafe { 95 | ffi::rocksdb_transaction_options_set_lock_timeout(self.inner, lock_timeout); 96 | } 97 | } 98 | 99 | /// Specifies expiration duration in milliseconds. 100 | /// 101 | /// If non-negative, transactions that last longer than this many milliseconds will fail to commit. 102 | /// If not set, a forgotten transaction that is never committed, rolled back, or deleted 103 | /// will never relinquish any locks it holds. This could prevent keys from being by other writers. 104 | /// 105 | /// Default: -1. 106 | pub fn set_expiration(&mut self, expiration: i64) { 107 | unsafe { 108 | ffi::rocksdb_transaction_options_set_expiration(self.inner, expiration); 109 | } 110 | } 111 | 112 | /// Specifies the number of traversals to make during deadlock detection. 113 | /// 114 | /// Default: 50. 115 | pub fn set_deadlock_detect_depth(&mut self, depth: i64) { 116 | unsafe { 117 | ffi::rocksdb_transaction_options_set_deadlock_detect_depth(self.inner, depth); 118 | } 119 | } 120 | 121 | /// Specifies the maximum number of bytes used for the write batch. 0 means no limit. 122 | /// 123 | /// Default: 0. 124 | pub fn set_max_write_batch_size(&mut self, size: usize) { 125 | unsafe { 126 | ffi::rocksdb_transaction_options_set_max_write_batch_size(self.inner, size); 127 | } 128 | } 129 | } 130 | 131 | impl Drop for TransactionOptions { 132 | fn drop(&mut self) { 133 | unsafe { 134 | ffi::rocksdb_transaction_options_destroy(self.inner); 135 | } 136 | } 137 | } 138 | 139 | pub struct TransactionDBOptions { 140 | pub(crate) inner: *mut ffi::rocksdb_transactiondb_options_t, 141 | } 142 | 143 | unsafe impl Send for TransactionDBOptions {} 144 | unsafe impl Sync for TransactionDBOptions {} 145 | 146 | impl Default for TransactionDBOptions { 147 | fn default() -> Self { 148 | let txn_db_opts = unsafe { ffi::rocksdb_transactiondb_options_create() }; 149 | assert!( 150 | !txn_db_opts.is_null(), 151 | "Could not create RocksDB transaction_db options" 152 | ); 153 | Self { inner: txn_db_opts } 154 | } 155 | } 156 | 157 | impl TransactionDBOptions { 158 | pub fn new() -> TransactionDBOptions { 159 | TransactionDBOptions::default() 160 | } 161 | 162 | /// Specifies the wait timeout in milliseconds when writing a key 163 | /// outside a transaction (i.e. by calling `TransactionDB::put` directly). 164 | /// 165 | /// If 0, no waiting is done if a lock cannot instantly be acquired. 166 | /// If negative, there is no timeout and will block indefinitely when acquiring 167 | /// a lock. 168 | /// 169 | /// Not using a timeout can lead to deadlocks. Currently, there 170 | /// is no deadlock-detection to recover from a deadlock. While DB writes 171 | /// cannot deadlock with other DB writes, they can deadlock with a transaction. 172 | /// A negative timeout should only be used if all transactions have a small 173 | /// expiration set. 174 | /// 175 | /// Default: 1000(1s). 176 | pub fn set_default_lock_timeout(&mut self, default_lock_timeout: i64) { 177 | unsafe { 178 | ffi::rocksdb_transactiondb_options_set_default_lock_timeout( 179 | self.inner, 180 | default_lock_timeout, 181 | ); 182 | } 183 | } 184 | 185 | /// Specifies the default wait timeout in milliseconds when a transaction 186 | /// attempts to lock a key if not specified in `TransactionOptions`. 187 | /// 188 | /// If 0, no waiting is done if a lock cannot instantly be acquired. 189 | /// If negative, there is no timeout. Not using a timeout is not recommended 190 | /// as it can lead to deadlocks. Currently, there is no deadlock-detection to 191 | /// recover from a deadlock. 192 | /// 193 | /// Default: 1000(1s). 194 | pub fn set_txn_lock_timeout(&mut self, txn_lock_timeout: i64) { 195 | unsafe { 196 | ffi::rocksdb_transactiondb_options_set_transaction_lock_timeout( 197 | self.inner, 198 | txn_lock_timeout, 199 | ); 200 | } 201 | } 202 | 203 | /// Specifies the maximum number of keys that can be locked at the same time 204 | /// per column family. 205 | /// 206 | /// If the number of locked keys is greater than `max_num_locks`, transaction 207 | /// `writes` (or `get_for_update`) will return an error. 208 | /// If this value is not positive, no limit will be enforced. 209 | /// 210 | /// Default: -1. 211 | pub fn set_max_num_locks(&mut self, max_num_locks: i64) { 212 | unsafe { 213 | ffi::rocksdb_transactiondb_options_set_max_num_locks(self.inner, max_num_locks); 214 | } 215 | } 216 | 217 | /// Specifies lock table stripes count. 218 | /// 219 | /// Increasing this value will increase the concurrency by dividing the lock 220 | /// table (per column family) into more sub-tables, each with their own 221 | /// separate mutex. 222 | /// 223 | /// Default: 16. 224 | pub fn set_num_stripes(&mut self, num_stripes: usize) { 225 | unsafe { 226 | ffi::rocksdb_transactiondb_options_set_num_stripes(self.inner, num_stripes); 227 | } 228 | } 229 | } 230 | 231 | impl Drop for TransactionDBOptions { 232 | fn drop(&mut self) { 233 | unsafe { 234 | ffi::rocksdb_transactiondb_options_destroy(self.inner); 235 | } 236 | } 237 | } 238 | 239 | pub struct OptimisticTransactionOptions { 240 | pub(crate) inner: *mut ffi::rocksdb_optimistictransaction_options_t, 241 | } 242 | 243 | unsafe impl Send for OptimisticTransactionOptions {} 244 | unsafe impl Sync for OptimisticTransactionOptions {} 245 | 246 | impl Default for OptimisticTransactionOptions { 247 | fn default() -> Self { 248 | let txn_opts = unsafe { ffi::rocksdb_optimistictransaction_options_create() }; 249 | assert!( 250 | !txn_opts.is_null(), 251 | "Could not create RocksDB optimistic transaction options" 252 | ); 253 | Self { inner: txn_opts } 254 | } 255 | } 256 | 257 | impl OptimisticTransactionOptions { 258 | pub fn new() -> OptimisticTransactionOptions { 259 | OptimisticTransactionOptions::default() 260 | } 261 | 262 | /// Specifies use snapshot or not. 263 | /// 264 | /// Default: false. 265 | /// 266 | /// If a transaction has a snapshot set, the transaction will ensure that 267 | /// any keys successfully written(or fetched via `get_for_update`) have not 268 | /// been modified outside the transaction since the time the snapshot was 269 | /// set. 270 | /// If a snapshot has not been set, the transaction guarantees that keys have 271 | /// not been modified since the time each key was first written (or fetched via 272 | /// `get_for_update`). 273 | /// 274 | /// Using snapshot will provide stricter isolation guarantees at the 275 | /// expense of potentially more transaction failures due to conflicts with 276 | /// other writes. 277 | /// 278 | /// Calling `set_snapshot` will not affect the version of Data returned by `get` 279 | /// methods. 280 | pub fn set_snapshot(&mut self, snapshot: bool) { 281 | unsafe { 282 | ffi::rocksdb_optimistictransaction_options_set_set_snapshot( 283 | self.inner, 284 | u8::from(snapshot), 285 | ); 286 | } 287 | } 288 | } 289 | 290 | impl Drop for OptimisticTransactionOptions { 291 | fn drop(&mut self) { 292 | unsafe { 293 | ffi::rocksdb_optimistictransaction_options_destroy(self.inner); 294 | } 295 | } 296 | } 297 | -------------------------------------------------------------------------------- /tests/fail/checkpoint_outlive_db.rs: -------------------------------------------------------------------------------- 1 | use speedb::{DB, checkpoint::Checkpoint}; 2 | 3 | fn main() { 4 | let _checkpoint = { 5 | let db = DB::open_default("foo").unwrap(); 6 | Checkpoint::new(&db) 7 | }; 8 | } 9 | -------------------------------------------------------------------------------- /tests/fail/checkpoint_outlive_db.stderr: -------------------------------------------------------------------------------- 1 | error[E0597]: `db` does not live long enough 2 | --> tests/fail/checkpoint_outlive_db.rs:6:25 3 | | 4 | 4 | let _checkpoint = { 5 | | ----------- borrow later stored here 6 | 5 | let db = DB::open_default("foo").unwrap(); 7 | 6 | Checkpoint::new(&db) 8 | | ^^^ borrowed value does not live long enough 9 | 7 | }; 10 | | - `db` dropped here while still borrowed 11 | -------------------------------------------------------------------------------- /tests/fail/iterator_outlive_db.rs: -------------------------------------------------------------------------------- 1 | use speedb::{IteratorMode, DB}; 2 | 3 | fn main() { 4 | let _iter = { 5 | let db = DB::open_default("foo").unwrap(); 6 | db.iterator(IteratorMode::Start) 7 | }; 8 | } 9 | -------------------------------------------------------------------------------- /tests/fail/iterator_outlive_db.stderr: -------------------------------------------------------------------------------- 1 | error[E0597]: `db` does not live long enough 2 | --> tests/fail/iterator_outlive_db.rs:6:9 3 | | 4 | 4 | let _iter = { 5 | | ----- borrow later stored here 6 | 5 | let db = DB::open_default("foo").unwrap(); 7 | 6 | db.iterator(IteratorMode::Start) 8 | | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ borrowed value does not live long enough 9 | 7 | }; 10 | | - `db` dropped here while still borrowed 11 | -------------------------------------------------------------------------------- /tests/fail/open_with_multiple_refs_as_single_threaded.rs: -------------------------------------------------------------------------------- 1 | use speedb::{SingleThreaded, DBWithThreadMode, Options}; 2 | 3 | fn main() { 4 | let db = DBWithThreadMode::::open_default("/path/to/dummy").unwrap(); 5 | let db_ref1 = &db; 6 | let db_ref2 = &db; 7 | let opts = Options::default(); 8 | db_ref1.create_cf("cf1", &opts).unwrap(); 9 | db_ref2.create_cf("cf2", &opts).unwrap(); 10 | } 11 | -------------------------------------------------------------------------------- /tests/fail/open_with_multiple_refs_as_single_threaded.stderr: -------------------------------------------------------------------------------- 1 | error[E0596]: cannot borrow `*db_ref1` as mutable, as it is behind a `&` reference 2 | --> tests/fail/open_with_multiple_refs_as_single_threaded.rs:8:5 3 | | 4 | 5 | let db_ref1 = &db; 5 | | --- help: consider changing this to be a mutable reference: `&mut db` 6 | ... 7 | 8 | db_ref1.create_cf("cf1", &opts).unwrap(); 8 | | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `db_ref1` is a `&` reference, so the data it refers to cannot be borrowed as mutable 9 | 10 | error[E0596]: cannot borrow `*db_ref2` as mutable, as it is behind a `&` reference 11 | --> tests/fail/open_with_multiple_refs_as_single_threaded.rs:9:5 12 | | 13 | 6 | let db_ref2 = &db; 14 | | --- help: consider changing this to be a mutable reference: `&mut db` 15 | ... 16 | 9 | db_ref2.create_cf("cf2", &opts).unwrap(); 17 | | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `db_ref2` is a `&` reference, so the data it refers to cannot be borrowed as mutable 18 | -------------------------------------------------------------------------------- /tests/fail/snapshot_outlive_db.rs: -------------------------------------------------------------------------------- 1 | use speedb::DB; 2 | 3 | fn main() { 4 | let _snapshot = { 5 | let db = DB::open_default("foo").unwrap(); 6 | db.snapshot() 7 | }; 8 | } 9 | -------------------------------------------------------------------------------- /tests/fail/snapshot_outlive_db.stderr: -------------------------------------------------------------------------------- 1 | error[E0597]: `db` does not live long enough 2 | --> tests/fail/snapshot_outlive_db.rs:6:9 3 | | 4 | 4 | let _snapshot = { 5 | | --------- borrow later stored here 6 | 5 | let db = DB::open_default("foo").unwrap(); 7 | 6 | db.snapshot() 8 | | ^^^^^^^^^^^^^ borrowed value does not live long enough 9 | 7 | }; 10 | | - `db` dropped here while still borrowed 11 | -------------------------------------------------------------------------------- /tests/fail/snapshot_outlive_transaction.rs: -------------------------------------------------------------------------------- 1 | use speedb::{TransactionDB, SingleThreaded}; 2 | 3 | fn main() { 4 | let db = TransactionDB::::open_default("foo").unwrap(); 5 | let _snapshot = { 6 | let txn = db.transaction(); 7 | txn.snapshot() 8 | }; 9 | } -------------------------------------------------------------------------------- /tests/fail/snapshot_outlive_transaction.stderr: -------------------------------------------------------------------------------- 1 | error[E0597]: `txn` does not live long enough 2 | --> tests/fail/snapshot_outlive_transaction.rs:7:9 3 | | 4 | 5 | let _snapshot = { 5 | | --------- borrow later stored here 6 | 6 | let txn = db.transaction(); 7 | 7 | txn.snapshot() 8 | | ^^^^^^^^^^^^^^ borrowed value does not live long enough 9 | 8 | }; 10 | | - `txn` dropped here while still borrowed 11 | -------------------------------------------------------------------------------- /tests/fail/snapshot_outlive_transaction_db.rs: -------------------------------------------------------------------------------- 1 | use speedb::{TransactionDB, SingleThreaded}; 2 | 3 | fn main() { 4 | let _snapshot = { 5 | let db = TransactionDB::::open_default("foo").unwrap(); 6 | db.snapshot() 7 | }; 8 | } -------------------------------------------------------------------------------- /tests/fail/snapshot_outlive_transaction_db.stderr: -------------------------------------------------------------------------------- 1 | error[E0597]: `db` does not live long enough 2 | --> tests/fail/snapshot_outlive_transaction_db.rs:6:9 3 | | 4 | 4 | let _snapshot = { 5 | | --------- borrow later stored here 6 | 5 | let db = TransactionDB::::open_default("foo").unwrap(); 7 | 6 | db.snapshot() 8 | | ^^^^^^^^^^^^^ borrowed value does not live long enough 9 | 7 | }; 10 | | - `db` dropped here while still borrowed 11 | -------------------------------------------------------------------------------- /tests/fail/transaction_outlive_transaction_db.rs: -------------------------------------------------------------------------------- 1 | use speedb::{TransactionDB, SingleThreaded}; 2 | 3 | fn main() { 4 | let _txn = { 5 | let db = TransactionDB::::open_default("foo").unwrap(); 6 | db.transaction() 7 | }; 8 | } -------------------------------------------------------------------------------- /tests/fail/transaction_outlive_transaction_db.stderr: -------------------------------------------------------------------------------- 1 | error[E0597]: `db` does not live long enough 2 | --> tests/fail/transaction_outlive_transaction_db.rs:6:9 3 | | 4 | 4 | let _txn = { 5 | | ---- borrow later stored here 6 | 5 | let db = TransactionDB::::open_default("foo").unwrap(); 7 | 6 | db.transaction() 8 | | ^^^^^^^^^^^^^^^^ borrowed value does not live long enough 9 | 7 | }; 10 | | - `db` dropped here while still borrowed 11 | -------------------------------------------------------------------------------- /tests/test_backup.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | mod util; 16 | 17 | use pretty_assertions::assert_eq; 18 | 19 | use speedb::{ 20 | backup::{BackupEngine, BackupEngineOptions, RestoreOptions}, 21 | Env, DB, 22 | }; 23 | use util::DBPath; 24 | 25 | #[test] 26 | fn restore_from_latest() { 27 | // create backup 28 | let path = DBPath::new("restore_from_latest_test"); 29 | let restore_path = DBPath::new("restore_from_latest_path"); 30 | { 31 | let db = DB::open_default(&path).unwrap(); 32 | assert!(db.put(b"k1", b"v1111").is_ok()); 33 | let value = db.get(b"k1"); 34 | assert_eq!(value.unwrap().unwrap(), b"v1111"); 35 | { 36 | let backup_path = DBPath::new("restore_from_latest_test_backup"); 37 | let env = Env::new().unwrap(); 38 | let backup_opts = BackupEngineOptions::new(&backup_path).unwrap(); 39 | 40 | let mut backup_engine = BackupEngine::open(&backup_opts, &env).unwrap(); 41 | assert!(backup_engine.create_new_backup(&db).is_ok()); 42 | 43 | // check backup info 44 | let info = backup_engine.get_backup_info(); 45 | assert!(!info.is_empty()); 46 | info.iter().for_each(|i| { 47 | assert!(backup_engine.verify_backup(i.backup_id).is_ok()); 48 | assert!(i.size > 0); 49 | }); 50 | 51 | let mut restore_option = RestoreOptions::default(); 52 | restore_option.set_keep_log_files(false); // true to keep log files 53 | let restore_status = backup_engine.restore_from_latest_backup( 54 | &restore_path, 55 | &restore_path, 56 | &restore_option, 57 | ); 58 | assert!(restore_status.is_ok()); 59 | 60 | let db_restore = DB::open_default(&restore_path).unwrap(); 61 | let value = db_restore.get(b"k1"); 62 | assert_eq!(value.unwrap().unwrap(), b"v1111"); 63 | } 64 | } 65 | } 66 | 67 | #[test] 68 | fn restore_from_backup() { 69 | // create backup 70 | let path = DBPath::new("restore_from_backup_test"); 71 | let restore_path = DBPath::new("restore_from_backup_path"); 72 | { 73 | let db = DB::open_default(&path).unwrap(); 74 | assert!(db.put(b"k1", b"v1111").is_ok()); 75 | let value = db.get(b"k1"); 76 | assert_eq!(value.unwrap().unwrap(), b"v1111"); 77 | { 78 | let backup_path = DBPath::new("restore_from_latest_test_backup"); 79 | let env = Env::new().unwrap(); 80 | let backup_opts = BackupEngineOptions::new(&backup_path).unwrap(); 81 | 82 | let mut backup_engine = BackupEngine::open(&backup_opts, &env).unwrap(); 83 | assert!(backup_engine.create_new_backup(&db).is_ok()); 84 | 85 | // check backup info 86 | let info = backup_engine.get_backup_info(); 87 | assert!(!info.is_empty()); 88 | info.iter().for_each(|i| { 89 | assert!(backup_engine.verify_backup(i.backup_id).is_ok()); 90 | assert!(i.size > 0); 91 | }); 92 | 93 | let backup_id = info.get(0).unwrap().backup_id; 94 | let mut restore_option = RestoreOptions::default(); 95 | restore_option.set_keep_log_files(false); // true to keep log files 96 | let restore_status = backup_engine.restore_from_backup( 97 | &restore_path, 98 | &restore_path, 99 | &restore_option, 100 | backup_id, 101 | ); 102 | assert!(restore_status.is_ok()); 103 | 104 | let db_restore = DB::open_default(&restore_path).unwrap(); 105 | let value = db_restore.get(b"k1"); 106 | assert_eq!(value.unwrap().unwrap(), b"v1111"); 107 | } 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /tests/test_checkpoint.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | mod util; 16 | 17 | use pretty_assertions::assert_eq; 18 | 19 | use speedb::{checkpoint::Checkpoint, Options, DB}; 20 | use util::DBPath; 21 | 22 | #[test] 23 | pub fn test_single_checkpoint() { 24 | const PATH_PREFIX: &str = "_rust_rocksdb_cp_single_"; 25 | 26 | // Create DB with some data 27 | let db_path = DBPath::new(&format!("{PATH_PREFIX}db1")); 28 | 29 | let mut opts = Options::default(); 30 | opts.create_if_missing(true); 31 | let db = DB::open(&opts, &db_path).unwrap(); 32 | 33 | db.put(b"k1", b"v1").unwrap(); 34 | db.put(b"k2", b"v2").unwrap(); 35 | db.put(b"k3", b"v3").unwrap(); 36 | db.put(b"k4", b"v4").unwrap(); 37 | 38 | // Create checkpoint 39 | let cp1 = Checkpoint::new(&db).unwrap(); 40 | let cp1_path = DBPath::new(&format!("{PATH_PREFIX}cp1")); 41 | cp1.create_checkpoint(&cp1_path).unwrap(); 42 | 43 | // Verify checkpoint 44 | let cp = DB::open_default(&cp1_path).unwrap(); 45 | 46 | assert_eq!(cp.get(b"k1").unwrap().unwrap(), b"v1"); 47 | assert_eq!(cp.get(b"k2").unwrap().unwrap(), b"v2"); 48 | assert_eq!(cp.get(b"k3").unwrap().unwrap(), b"v3"); 49 | assert_eq!(cp.get(b"k4").unwrap().unwrap(), b"v4"); 50 | } 51 | 52 | #[test] 53 | pub fn test_multi_checkpoints() { 54 | const PATH_PREFIX: &str = "_rust_rocksdb_cp_multi_"; 55 | 56 | // Create DB with some data 57 | let db_path = DBPath::new(&format!("{PATH_PREFIX}db1")); 58 | 59 | let mut opts = Options::default(); 60 | opts.create_if_missing(true); 61 | let db = DB::open(&opts, &db_path).unwrap(); 62 | 63 | db.put(b"k1", b"v1").unwrap(); 64 | db.put(b"k2", b"v2").unwrap(); 65 | db.put(b"k3", b"v3").unwrap(); 66 | db.put(b"k4", b"v4").unwrap(); 67 | 68 | // Create first checkpoint 69 | let cp1 = Checkpoint::new(&db).unwrap(); 70 | let cp1_path = DBPath::new(&format!("{PATH_PREFIX}cp1")); 71 | cp1.create_checkpoint(&cp1_path).unwrap(); 72 | 73 | // Verify checkpoint 74 | let cp = DB::open_default(&cp1_path).unwrap(); 75 | 76 | assert_eq!(cp.get(b"k1").unwrap().unwrap(), b"v1"); 77 | assert_eq!(cp.get(b"k2").unwrap().unwrap(), b"v2"); 78 | assert_eq!(cp.get(b"k3").unwrap().unwrap(), b"v3"); 79 | assert_eq!(cp.get(b"k4").unwrap().unwrap(), b"v4"); 80 | 81 | // Change some existing keys 82 | db.put(b"k1", b"modified").unwrap(); 83 | db.put(b"k2", b"changed").unwrap(); 84 | 85 | // Add some new keys 86 | db.put(b"k5", b"v5").unwrap(); 87 | db.put(b"k6", b"v6").unwrap(); 88 | 89 | // Create another checkpoint 90 | let cp2 = Checkpoint::new(&db).unwrap(); 91 | let cp2_path = DBPath::new(&format!("{PATH_PREFIX}cp2")); 92 | cp2.create_checkpoint(&cp2_path).unwrap(); 93 | 94 | // Verify second checkpoint 95 | let cp = DB::open_default(&cp2_path).unwrap(); 96 | 97 | assert_eq!(cp.get(b"k1").unwrap().unwrap(), b"modified"); 98 | assert_eq!(cp.get(b"k2").unwrap().unwrap(), b"changed"); 99 | assert_eq!(cp.get(b"k5").unwrap().unwrap(), b"v5"); 100 | assert_eq!(cp.get(b"k6").unwrap().unwrap(), b"v6"); 101 | } 102 | 103 | #[test] 104 | fn test_checkpoint_outlive_db() { 105 | let t = trybuild::TestCases::new(); 106 | t.compile_fail("tests/fail/checkpoint_outlive_db.rs"); 107 | } 108 | -------------------------------------------------------------------------------- /tests/test_comparator.rs: -------------------------------------------------------------------------------- 1 | use speedb::{Options, DB}; 2 | use std::cmp::Ordering; 3 | use std::iter::FromIterator; 4 | 5 | /// This function is for ensuring test of backwards compatibility 6 | pub fn rocks_old_compare(one: &[u8], two: &[u8]) -> Ordering { 7 | one.cmp(two) 8 | } 9 | 10 | type CompareFn = dyn Fn(&[u8], &[u8]) -> Ordering; 11 | 12 | /// create database add some values, and iterate over these 13 | pub fn write_to_db_with_comparator(compare_fn: Box) -> Vec { 14 | let mut result_vec = Vec::new(); 15 | 16 | let path = "_path_for_rocksdb_storage"; 17 | { 18 | let mut db_opts = Options::default(); 19 | 20 | db_opts.create_missing_column_families(true); 21 | db_opts.create_if_missing(true); 22 | db_opts.set_comparator("cname", compare_fn); 23 | let db = DB::open(&db_opts, path).unwrap(); 24 | db.put(b"a-key", b"a-value").unwrap(); 25 | db.put(b"b-key", b"b-value").unwrap(); 26 | let mut iter = db.raw_iterator(); 27 | iter.seek_to_first(); 28 | while iter.valid() { 29 | let key = iter.key().unwrap(); 30 | // maybe not best way to copy? 31 | let key_str = key.iter().map(|b| *b as char).collect::>(); 32 | result_vec.push(String::from_iter(key_str)); 33 | iter.next(); 34 | } 35 | } 36 | let _ = DB::destroy(&Options::default(), path); 37 | result_vec 38 | } 39 | 40 | #[test] 41 | /// First verify that using a function as a comparator works as expected 42 | /// This should verify backwards compatablity 43 | /// Then run a test with a clojure where an x-variable is passed 44 | /// Keep in mind that this variable must be moved to the clojure 45 | /// Then run a test with a reverse sorting clojure and make sure the order is reverted 46 | fn test_comparator() { 47 | let local_compare = move |one: &[u8], two: &[u8]| one.cmp(two); 48 | 49 | let x = 0; 50 | let local_compare_reverse = move |one: &[u8], two: &[u8]| { 51 | println!( 52 | "Use the x value from the closure scope to do something smart: {:?}", 53 | x 54 | ); 55 | match one.cmp(two) { 56 | Ordering::Less => Ordering::Greater, 57 | Ordering::Equal => Ordering::Equal, 58 | Ordering::Greater => Ordering::Less, 59 | } 60 | }; 61 | 62 | let old_res = write_to_db_with_comparator(Box::new(rocks_old_compare)); 63 | println!("Keys in normal sort order, no closure: {:?}", old_res); 64 | assert_eq!(vec!["a-key", "b-key"], old_res); 65 | let res_closure = write_to_db_with_comparator(Box::new(local_compare)); 66 | println!("Keys in normal sort order, closure: {:?}", res_closure); 67 | assert_eq!(res_closure, old_res); 68 | let res_closure_reverse = write_to_db_with_comparator(Box::new(local_compare_reverse)); 69 | println!( 70 | "Keys in reverse sort order, closure: {:?}", 71 | res_closure_reverse 72 | ); 73 | assert_eq!(vec!["b-key", "a-key"], res_closure_reverse); 74 | } 75 | -------------------------------------------------------------------------------- /tests/test_compationfilter.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | mod util; 16 | 17 | use pretty_assertions::assert_eq; 18 | 19 | use speedb::{CompactionDecision, Options, DB}; 20 | use util::DBPath; 21 | 22 | #[cfg(test)] 23 | #[allow(unused_variables)] 24 | fn test_filter(level: u32, key: &[u8], value: &[u8]) -> CompactionDecision { 25 | use self::CompactionDecision::*; 26 | match key.first() { 27 | Some(&b'_') => Remove, 28 | Some(&b'%') => Change(b"secret"), 29 | _ => Keep, 30 | } 31 | } 32 | 33 | #[test] 34 | fn compaction_filter_test() { 35 | let path = DBPath::new("_rust_rocksdb_filter_test"); 36 | let mut opts = Options::default(); 37 | opts.create_if_missing(true); 38 | opts.set_compaction_filter("test", test_filter); 39 | { 40 | let db = DB::open(&opts, &path).unwrap(); 41 | let _ = db.put(b"k1", b"a"); 42 | let _ = db.put(b"_k", b"b"); 43 | let _ = db.put(b"%k", b"c"); 44 | db.compact_range(None::<&[u8]>, None::<&[u8]>); 45 | assert_eq!(&*db.get(b"k1").unwrap().unwrap(), b"a"); 46 | assert!(db.get(b"_k").unwrap().is_none()); 47 | assert_eq!(&*db.get(b"%k").unwrap().unwrap(), b"secret"); 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /tests/test_multithreaded.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | mod util; 16 | 17 | use std::{sync::Arc, thread}; 18 | 19 | use speedb::DB; 20 | use util::DBPath; 21 | 22 | const N: usize = 100_000; 23 | 24 | #[test] 25 | pub fn test_multithreaded() { 26 | let n = DBPath::new("_rust_rocksdb_multithreadtest"); 27 | { 28 | let db = DB::open_default(&n).unwrap(); 29 | let db = Arc::new(db); 30 | 31 | db.put(b"key", b"value1").unwrap(); 32 | 33 | let db1 = db.clone(); 34 | let j1 = thread::spawn(move || { 35 | for _ in 1..N { 36 | db1.put(b"key", b"value1").unwrap(); 37 | } 38 | }); 39 | 40 | let db2 = db.clone(); 41 | let j2 = thread::spawn(move || { 42 | for _ in 1..N { 43 | db2.put(b"key", b"value2").unwrap(); 44 | } 45 | }); 46 | 47 | let j3 = thread::spawn(move || { 48 | for _ in 1..N { 49 | let result = match db.get(b"key") { 50 | Ok(Some(v)) => !(&v[..] != b"value1" && &v[..] != b"value2"), 51 | _ => false, 52 | }; 53 | assert!(result); 54 | } 55 | }); 56 | j1.join().unwrap(); 57 | j2.join().unwrap(); 58 | j3.join().unwrap(); 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /tests/test_pinnable_slice.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | mod util; 16 | 17 | use pretty_assertions::assert_eq; 18 | 19 | use speedb::{Options, DB}; 20 | use util::DBPath; 21 | 22 | #[test] 23 | fn test_pinnable_slice() { 24 | let path = DBPath::new("_rust_rocksdb_pinnable_slice_test"); 25 | 26 | let mut opts = Options::default(); 27 | opts.create_if_missing(true); 28 | let db = DB::open(&opts, &path).unwrap(); 29 | 30 | db.put(b"k1", b"value12345").unwrap(); 31 | 32 | let result = db.get_pinned(b"k1"); 33 | assert!(result.is_ok()); 34 | 35 | let value = result.unwrap(); 36 | assert!(value.is_some()); 37 | 38 | let pinnable_slice = value.unwrap(); 39 | 40 | assert_eq!(b"12345", &pinnable_slice[5..10]); 41 | } 42 | 43 | #[test] 44 | fn test_snapshot_pinnable_slice() { 45 | let path = DBPath::new("_rust_rocksdb_snapshot_pinnable_slice_test"); 46 | 47 | let mut opts = Options::default(); 48 | opts.create_if_missing(true); 49 | let db = DB::open(&opts, &path).unwrap(); 50 | 51 | db.put(b"k1", b"value12345").unwrap(); 52 | let snap = db.snapshot(); 53 | assert!(db.put(b"k1", b"value23456").is_ok()); 54 | 55 | let result = snap.get_pinned(b"k1"); 56 | assert!(result.is_ok()); 57 | 58 | let value = result.unwrap(); 59 | assert!(value.is_some()); 60 | 61 | let pinnable_slice = value.unwrap(); 62 | 63 | assert_eq!(b"12345", &pinnable_slice[5..10]); 64 | } 65 | -------------------------------------------------------------------------------- /tests/test_property.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | mod util; 16 | 17 | use pretty_assertions::assert_eq; 18 | 19 | use speedb::{properties, Options, DB}; 20 | use util::DBPath; 21 | 22 | #[test] 23 | fn property_test() { 24 | let n = DBPath::new("_rust_rocksdb_property_test"); 25 | { 26 | let db = DB::open_default(&n).unwrap(); 27 | let prop_name: &std::ffi::CStr = properties::STATS; 28 | let value = db.property_value(prop_name).unwrap().unwrap(); 29 | assert!(value.contains("Stats")); 30 | } 31 | 32 | { 33 | let db = DB::open_default(&n).unwrap(); 34 | let prop_name: properties::PropertyName = properties::STATS.to_owned(); 35 | let value = db.property_value(&prop_name).unwrap().unwrap(); 36 | assert!(value.contains("Stats")); 37 | } 38 | 39 | { 40 | let db = DB::open_default(&n).unwrap(); 41 | let prop_name: String = properties::STATS.to_owned().into_string(); 42 | let value = db.property_value(&prop_name).unwrap().unwrap(); 43 | assert!(value.contains("Stats")); 44 | } 45 | } 46 | 47 | #[test] 48 | fn property_cf_test() { 49 | let n = DBPath::new("_rust_rocksdb_property_cf_test"); 50 | { 51 | let opts = Options::default(); 52 | #[cfg(feature = "multi-threaded-cf")] 53 | let db = DB::open_default(&n).unwrap(); 54 | #[cfg(not(feature = "multi-threaded-cf"))] 55 | let mut db = DB::open_default(&n).unwrap(); 56 | db.create_cf("cf1", &opts).unwrap(); 57 | let cf = db.cf_handle("cf1").unwrap(); 58 | let value = db 59 | .property_value_cf(&cf, properties::STATS) 60 | .unwrap() 61 | .unwrap(); 62 | 63 | assert!(value.contains("Stats")); 64 | } 65 | } 66 | 67 | #[test] 68 | fn property_int_test() { 69 | let n = DBPath::new("_rust_rocksdb_property_int_test"); 70 | { 71 | let db = DB::open_default(&n).unwrap(); 72 | let value = db 73 | .property_int_value(properties::ESTIMATE_LIVE_DATA_SIZE) 74 | .unwrap(); 75 | 76 | assert_eq!(value, Some(0)); 77 | } 78 | } 79 | 80 | #[test] 81 | fn property_int_cf_test() { 82 | let n = DBPath::new("_rust_rocksdb_property_int_cf_test"); 83 | { 84 | let opts = Options::default(); 85 | #[cfg(feature = "multi-threaded-cf")] 86 | let db = DB::open_default(&n).unwrap(); 87 | #[cfg(not(feature = "multi-threaded-cf"))] 88 | let mut db = DB::open_default(&n).unwrap(); 89 | db.create_cf("cf1", &opts).unwrap(); 90 | let cf = db.cf_handle("cf1").unwrap(); 91 | let total_keys = db 92 | .property_int_value_cf(&cf, properties::ESTIMATE_NUM_KEYS) 93 | .unwrap(); 94 | 95 | assert_eq!(total_keys, Some(0)); 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /tests/test_raw_iterator.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | mod util; 16 | 17 | use pretty_assertions::assert_eq; 18 | 19 | use speedb::{DBAccess, DBRawIteratorWithThreadMode, DB}; 20 | use util::DBPath; 21 | 22 | fn assert_item(iter: &DBRawIteratorWithThreadMode<'_, D>, key: &[u8], value: &[u8]) { 23 | assert!(iter.valid()); 24 | assert_eq!(iter.key(), Some(key)); 25 | assert_eq!(iter.value(), Some(value)); 26 | assert_eq!(iter.item(), Some((key, value))); 27 | } 28 | 29 | fn assert_no_item(iter: &DBRawIteratorWithThreadMode<'_, D>) { 30 | assert!(!iter.valid()); 31 | assert_eq!(iter.key(), None); 32 | assert_eq!(iter.value(), None); 33 | assert_eq!(iter.item(), None); 34 | } 35 | 36 | #[test] 37 | pub fn test_forwards_iteration() { 38 | let n = DBPath::new("forwards_iteration"); 39 | { 40 | let db = DB::open_default(&n).unwrap(); 41 | db.put(b"k1", b"v1").unwrap(); 42 | db.put(b"k2", b"v2").unwrap(); 43 | db.put(b"k3", b"v3").unwrap(); 44 | db.put(b"k4", b"v4").unwrap(); 45 | 46 | let mut iter = db.raw_iterator(); 47 | iter.seek_to_first(); 48 | assert_item(&iter, b"k1", b"v1"); 49 | 50 | iter.next(); 51 | assert_item(&iter, b"k2", b"v2"); 52 | 53 | iter.next(); // k3 54 | iter.next(); // k4 55 | 56 | iter.next(); // invalid! 57 | assert_no_item(&iter); 58 | } 59 | } 60 | 61 | #[test] 62 | pub fn test_seek_last() { 63 | let n = DBPath::new("backwards_iteration"); 64 | { 65 | let db = DB::open_default(&n).unwrap(); 66 | db.put(b"k1", b"v1").unwrap(); 67 | db.put(b"k2", b"v2").unwrap(); 68 | db.put(b"k3", b"v3").unwrap(); 69 | db.put(b"k4", b"v4").unwrap(); 70 | 71 | let mut iter = db.raw_iterator(); 72 | iter.seek_to_last(); 73 | assert_item(&iter, b"k4", b"v4"); 74 | 75 | iter.prev(); 76 | assert_item(&iter, b"k3", b"v3"); 77 | 78 | iter.prev(); // k2 79 | iter.prev(); // k1 80 | 81 | iter.prev(); // invalid! 82 | assert_no_item(&iter); 83 | } 84 | } 85 | 86 | #[test] 87 | pub fn test_seek() { 88 | let n = DBPath::new("seek"); 89 | { 90 | let db = DB::open_default(&n).unwrap(); 91 | db.put(b"k1", b"v1").unwrap(); 92 | db.put(b"k2", b"v2").unwrap(); 93 | db.put(b"k4", b"v4").unwrap(); 94 | 95 | let mut iter = db.raw_iterator(); 96 | iter.seek(b"k2"); 97 | 98 | assert_item(&iter, b"k2", b"v2"); 99 | 100 | // Check it gets the next key when the key doesn't exist 101 | iter.seek(b"k3"); 102 | assert_item(&iter, b"k4", b"v4"); 103 | } 104 | } 105 | 106 | #[test] 107 | pub fn test_seek_to_nonexistant() { 108 | let n = DBPath::new("seek_to_nonexistant"); 109 | { 110 | let db = DB::open_default(&n).unwrap(); 111 | db.put(b"k1", b"v1").unwrap(); 112 | db.put(b"k3", b"v3").unwrap(); 113 | db.put(b"k4", b"v4").unwrap(); 114 | 115 | let mut iter = db.raw_iterator(); 116 | iter.seek(b"k2"); 117 | assert_item(&iter, b"k3", b"v3"); 118 | } 119 | } 120 | 121 | #[test] 122 | pub fn test_seek_for_prev() { 123 | let n = DBPath::new("seek_for_prev"); 124 | { 125 | let db = DB::open_default(&n).unwrap(); 126 | db.put(b"k1", b"v1").unwrap(); 127 | db.put(b"k2", b"v2").unwrap(); 128 | db.put(b"k4", b"v4").unwrap(); 129 | 130 | let mut iter = db.raw_iterator(); 131 | iter.seek(b"k2"); 132 | assert_item(&iter, b"k2", b"v2"); 133 | 134 | // Check it gets the previous key when the key doesn't exist 135 | iter.seek_for_prev(b"k3"); 136 | assert_item(&iter, b"k2", b"v2"); 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /tests/test_rocksdb_options.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | mod util; 16 | 17 | use std::{fs, io::Read as _}; 18 | 19 | use speedb::{ 20 | BlockBasedOptions, Cache, DBCompressionType, DataBlockIndexType, Env, Options, ReadOptions, DB, 21 | }; 22 | use util::DBPath; 23 | 24 | #[test] 25 | fn test_load_latest() { 26 | let n = DBPath::new("_rust_rocksdb_test_load_latest"); 27 | { 28 | let mut opts = Options::default(); 29 | opts.create_if_missing(true); 30 | opts.create_missing_column_families(true); 31 | let _ = DB::open_cf(&opts, &n, vec!["cf0", "cf1"]).unwrap(); 32 | } 33 | let (_, cfs) = Options::load_latest( 34 | &n, 35 | Env::new().unwrap(), 36 | true, 37 | Cache::new_lru_cache(1024 * 8), 38 | ) 39 | .unwrap(); 40 | assert!(cfs.iter().any(|cf| cf.name() == "default")); 41 | assert!(cfs.iter().any(|cf| cf.name() == "cf0")); 42 | assert!(cfs.iter().any(|cf| cf.name() == "cf1")); 43 | } 44 | 45 | #[test] 46 | fn test_set_num_levels() { 47 | let n = DBPath::new("_rust_rocksdb_test_set_num_levels"); 48 | { 49 | let mut opts = Options::default(); 50 | opts.create_if_missing(true); 51 | opts.set_num_levels(2); 52 | let _db = DB::open(&opts, &n).unwrap(); 53 | } 54 | } 55 | 56 | #[test] 57 | fn test_increase_parallelism() { 58 | let n = DBPath::new("_rust_rocksdb_test_increase_parallelism"); 59 | { 60 | let mut opts = Options::default(); 61 | opts.create_if_missing(true); 62 | opts.increase_parallelism(4); 63 | let _db = DB::open(&opts, &n).unwrap(); 64 | } 65 | } 66 | 67 | #[test] 68 | fn test_set_level_compaction_dynamic_level_bytes() { 69 | let n = DBPath::new("_rust_rocksdb_test_set_level_compaction_dynamic_level_bytes"); 70 | { 71 | let mut opts = Options::default(); 72 | opts.create_if_missing(true); 73 | opts.set_level_compaction_dynamic_level_bytes(true); 74 | let _db = DB::open(&opts, &n).unwrap(); 75 | } 76 | } 77 | 78 | #[test] 79 | fn test_block_based_options() { 80 | let path = "_rust_rocksdb_test_block_based_options"; 81 | let n = DBPath::new(path); 82 | { 83 | let mut opts = Options::default(); 84 | opts.create_if_missing(true); 85 | 86 | let mut block_opts = BlockBasedOptions::default(); 87 | block_opts.set_cache_index_and_filter_blocks(true); 88 | block_opts.set_pin_l0_filter_and_index_blocks_in_cache(true); 89 | block_opts.set_format_version(4); 90 | block_opts.set_index_block_restart_interval(16); 91 | 92 | opts.set_block_based_table_factory(&block_opts); 93 | let _db = DB::open(&opts, &n).unwrap(); 94 | 95 | // read the setting from the LOG file 96 | let mut rocksdb_log = fs::File::open(format!("{}/LOG", (&n).as_ref().to_str().unwrap())) 97 | .expect("rocksdb creates a LOG file"); 98 | let mut settings = String::new(); 99 | rocksdb_log.read_to_string(&mut settings).unwrap(); 100 | 101 | // check the settings are set in the LOG file 102 | assert!(settings.contains("cache_index_and_filter_blocks: 1")); 103 | assert!(settings.contains("pin_l0_filter_and_index_blocks_in_cache: 1")); 104 | assert!(settings.contains("format_version: 4")); 105 | assert!(settings.contains("index_block_restart_interval: 16")); 106 | } 107 | } 108 | 109 | #[test] 110 | fn test_read_options() { 111 | let mut read_opts = ReadOptions::default(); 112 | read_opts.set_verify_checksums(false); 113 | } 114 | 115 | #[test] 116 | fn test_set_data_block_index_type() { 117 | let path = "_rust_rocksdb_test_set_data_block_index_type"; 118 | let n = DBPath::new(path); 119 | 120 | // Default is `BinarySearch` 121 | { 122 | let mut opts = Options::default(); 123 | opts.create_if_missing(true); 124 | 125 | let block_opts = BlockBasedOptions::default(); 126 | opts.set_block_based_table_factory(&block_opts); 127 | let _db = DB::open(&opts, &n).expect("open a db works"); 128 | 129 | let mut rocksdb_log = fs::File::open(format!("{}/LOG", (&n).as_ref().to_str().unwrap())) 130 | .expect("rocksdb creates a LOG file"); 131 | let mut settings = String::new(); 132 | rocksdb_log 133 | .read_to_string(&mut settings) 134 | .expect("can read the LOG file"); 135 | assert!(settings.contains("data_block_index_type: 0")); 136 | assert!(settings.contains("data_block_hash_table_util_ratio: 0.750000")); 137 | } 138 | 139 | // Setting the index type and hash table utilization ratio works 140 | { 141 | let mut opts = Options::default(); 142 | opts.create_if_missing(false); 143 | 144 | let mut block_opts = BlockBasedOptions::default(); 145 | block_opts.set_data_block_index_type(DataBlockIndexType::BinaryAndHash); 146 | block_opts.set_data_block_hash_ratio(0.35); 147 | opts.set_block_based_table_factory(&block_opts); 148 | let _db = DB::open(&opts, &n).expect("open a db works"); 149 | 150 | let mut rocksdb_log = fs::File::open(format!("{}/LOG", (&n).as_ref().to_str().unwrap())) 151 | .expect("rocksdb creates a LOG file"); 152 | let mut settings = String::new(); 153 | rocksdb_log 154 | .read_to_string(&mut settings) 155 | .expect("can read the LOG file"); 156 | assert!(settings.contains("data_block_index_type: 1")); 157 | assert!(settings.contains("data_block_hash_table_util_ratio: 0.350000")); 158 | } 159 | } 160 | 161 | #[test] 162 | #[cfg(feature = "zstd")] 163 | fn set_compression_options_zstd_max_train_bytes() { 164 | let path = DBPath::new("_rust_set_compression_options_zstd_max_train_bytes"); 165 | { 166 | let mut opts = Options::default(); 167 | opts.create_if_missing(true); 168 | opts.set_compression_options(4, 5, 6, 7); 169 | opts.set_zstd_max_train_bytes(100); 170 | let _db = DB::open(&opts, &path).unwrap(); 171 | } 172 | } 173 | 174 | fn test_compression_type(ty: DBCompressionType) { 175 | let path = DBPath::new("_test_compression_type"); 176 | 177 | let mut opts = Options::default(); 178 | opts.set_compression_type(ty); 179 | opts.create_if_missing(true); 180 | let db = DB::open(&opts, &path); 181 | 182 | let should_open = match ty { 183 | DBCompressionType::None => true, 184 | DBCompressionType::Snappy => cfg!(feature = "snappy"), 185 | DBCompressionType::Zlib => cfg!(feature = "zlib"), 186 | DBCompressionType::Bz2 => cfg!(feature = "bzip2"), 187 | DBCompressionType::Lz4 | DBCompressionType::Lz4hc => cfg!(feature = "lz4"), 188 | DBCompressionType::Zstd => cfg!(feature = "zstd"), 189 | }; 190 | 191 | if should_open { 192 | let _db = db.unwrap(); 193 | } else { 194 | let _err = db.unwrap_err(); 195 | } 196 | } 197 | 198 | #[test] 199 | fn test_none_compression() { 200 | test_compression_type(DBCompressionType::None); 201 | } 202 | 203 | #[test] 204 | fn test_snappy_compression() { 205 | test_compression_type(DBCompressionType::Snappy); 206 | } 207 | 208 | #[test] 209 | fn test_zlib_compression() { 210 | test_compression_type(DBCompressionType::Zlib); 211 | } 212 | 213 | #[test] 214 | fn test_bz2_compression() { 215 | test_compression_type(DBCompressionType::Bz2); 216 | } 217 | 218 | #[test] 219 | fn test_lz4_compression() { 220 | test_compression_type(DBCompressionType::Lz4); 221 | test_compression_type(DBCompressionType::Lz4hc); 222 | } 223 | 224 | #[test] 225 | fn test_zstd_compression() { 226 | test_compression_type(DBCompressionType::Zstd); 227 | } 228 | -------------------------------------------------------------------------------- /tests/test_slice_transform.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | mod util; 16 | 17 | use pretty_assertions::assert_eq; 18 | 19 | use speedb::{Options, SliceTransform, DB}; 20 | use util::{assert_iter, pair, DBPath}; 21 | 22 | #[test] 23 | pub fn test_slice_transform() { 24 | let db_path = DBPath::new("_rust_rocksdb_slice_transform_test"); 25 | { 26 | const A1: &[u8] = b"aaa1"; 27 | const A2: &[u8] = b"aaa2"; 28 | const B1: &[u8] = b"bbb1"; 29 | const B2: &[u8] = b"bbb2"; 30 | 31 | fn first_three(k: &[u8]) -> &[u8] { 32 | &k[..3] 33 | } 34 | 35 | let prefix_extractor = SliceTransform::create("first_three", first_three, None); 36 | 37 | let mut opts = Options::default(); 38 | opts.create_if_missing(true); 39 | opts.set_prefix_extractor(prefix_extractor); 40 | 41 | let db = DB::open(&opts, &db_path).unwrap(); 42 | 43 | assert!(db.put(A1, A1).is_ok()); 44 | assert!(db.put(A2, A2).is_ok()); 45 | assert!(db.put(B1, B1).is_ok()); 46 | assert!(db.put(B2, B2).is_ok()); 47 | 48 | assert_iter(db.prefix_iterator(b"aaa"), &[pair(A1, A1), pair(A2, A2)]); 49 | assert_iter(db.prefix_iterator(b"bbb"), &[pair(B1, B1), pair(B2, B2)]); 50 | } 51 | } 52 | 53 | #[test] 54 | fn test_no_in_domain() { 55 | fn extract_suffix(slice: &[u8]) -> &[u8] { 56 | if slice.len() > 4 { 57 | &slice[slice.len() - 4..slice.len()] 58 | } else { 59 | slice 60 | } 61 | } 62 | 63 | let db_path = DBPath::new("_rust_rocksdb_prefix_test"); 64 | { 65 | let mut opts = Options::default(); 66 | opts.create_if_missing(true); 67 | opts.set_prefix_extractor(SliceTransform::create( 68 | "test slice transform", 69 | extract_suffix, 70 | None, 71 | )); 72 | opts.set_memtable_prefix_bloom_ratio(0.1); 73 | 74 | let db = DB::open(&opts, &db_path).unwrap(); 75 | db.put(b"key_sfx1", b"a").unwrap(); 76 | db.put(b"key_sfx2", b"b").unwrap(); 77 | 78 | assert_eq!(db.get(b"key_sfx1").unwrap().unwrap(), b"a"); 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /tests/test_sst_file_writer.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Lucjan Suski 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | mod util; 16 | 17 | use pretty_assertions::assert_eq; 18 | 19 | use speedb::{Error, Options, SstFileWriter, DB}; 20 | use util::DBPath; 21 | 22 | #[test] 23 | fn sst_file_writer_works() { 24 | let db_path = DBPath::new("_rust_rocksdb_sstfilewritertest"); 25 | let dir = tempfile::Builder::new() 26 | .prefix("_rust_rocksdb_sstfilewritertest") 27 | .tempdir() 28 | .expect("Failed to create temporary path for file writer."); 29 | let writer_path = dir.path().join("filewriter"); 30 | { 31 | let opts = Options::default(); 32 | let mut writer = SstFileWriter::create(&opts); 33 | writer.open(&writer_path).unwrap(); 34 | writer.put(b"k1", b"v1").unwrap(); 35 | 36 | writer.put(b"k2", b"v2").unwrap(); 37 | 38 | writer.delete(b"k3").unwrap(); 39 | writer.finish().unwrap(); 40 | assert!(writer.file_size() > 0); 41 | } 42 | { 43 | let db = DB::open_default(&db_path).unwrap(); 44 | db.put(b"k3", b"v3").unwrap(); 45 | db.ingest_external_file(vec![&writer_path]).unwrap(); 46 | let r: Result>, Error> = db.get(b"k1"); 47 | assert_eq!(r.unwrap().unwrap(), b"v1"); 48 | let r: Result>, Error> = db.get(b"k2"); 49 | assert_eq!(r.unwrap().unwrap(), b"v2"); 50 | assert!(db.get(b"k3").unwrap().is_none()); 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /tests/test_write_batch.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Tyler Neely 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::collections::HashMap; 16 | 17 | use pretty_assertions::assert_eq; 18 | 19 | use speedb::{WriteBatch, WriteBatchIterator}; 20 | 21 | #[test] 22 | fn test_write_batch_clear() { 23 | let mut batch = WriteBatch::default(); 24 | batch.put(b"1", b"2"); 25 | assert_eq!(batch.len(), 1); 26 | batch.clear(); 27 | assert_eq!(batch.len(), 0); 28 | assert!(batch.is_empty()); 29 | } 30 | 31 | #[test] 32 | fn test_write_batch_with_serialized_data() { 33 | struct Iterator { 34 | data: HashMap, Vec>, 35 | } 36 | 37 | impl WriteBatchIterator for Iterator { 38 | fn put(&mut self, key: Box<[u8]>, value: Box<[u8]>) { 39 | match self.data.remove(key.as_ref()) { 40 | Some(expect) => { 41 | assert_eq!(value.as_ref(), expect.as_slice()); 42 | } 43 | None => { 44 | panic!("key not exists"); 45 | } 46 | } 47 | } 48 | 49 | fn delete(&mut self, _: Box<[u8]>) { 50 | panic!("invalid delete operation"); 51 | } 52 | } 53 | 54 | let mut kvs: HashMap, Vec> = HashMap::default(); 55 | kvs.insert(vec![1], vec![2]); 56 | kvs.insert(vec![2], vec![3]); 57 | kvs.insert(vec![1, 2, 3, 4, 5], vec![4]); 58 | 59 | let mut b1 = WriteBatch::default(); 60 | for (k, v) in &kvs { 61 | b1.put(k, v); 62 | } 63 | let data = b1.data(); 64 | 65 | let b2 = WriteBatch::from_data(data); 66 | let mut it = Iterator { data: kvs }; 67 | b2.iterate(&mut it); 68 | } 69 | -------------------------------------------------------------------------------- /tests/util/mod.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | use std::path::{Path, PathBuf}; 4 | 5 | use speedb::{Error, Options, DB}; 6 | 7 | /// Temporary database path which calls DB::Destroy when DBPath is dropped. 8 | pub struct DBPath { 9 | dir: tempfile::TempDir, // kept for cleaning up during drop 10 | path: PathBuf, 11 | } 12 | 13 | impl DBPath { 14 | /// Produces a fresh (non-existent) temporary path which will be DB::destroy'ed automatically. 15 | pub fn new(prefix: &str) -> DBPath { 16 | let dir = tempfile::Builder::new() 17 | .prefix(prefix) 18 | .tempdir() 19 | .expect("Failed to create temporary path for db."); 20 | let path = dir.path().join("db"); 21 | 22 | DBPath { dir, path } 23 | } 24 | } 25 | 26 | impl Drop for DBPath { 27 | fn drop(&mut self) { 28 | let opts = Options::default(); 29 | DB::destroy(&opts, &self.path).expect("Failed to destroy temporary DB"); 30 | } 31 | } 32 | 33 | /// Convert a DBPath ref to a Path ref. 34 | /// We don't implement this for DBPath values because we want them to 35 | /// exist until the end of their scope, not get passed in to functions and 36 | /// dropped early. 37 | impl AsRef for &DBPath { 38 | fn as_ref(&self) -> &Path { 39 | &self.path 40 | } 41 | } 42 | 43 | type Pair = (Box<[u8]>, Box<[u8]>); 44 | 45 | pub fn pair(left: &[u8], right: &[u8]) -> Pair { 46 | (Box::from(left), Box::from(right)) 47 | } 48 | 49 | #[track_caller] 50 | pub fn assert_iter(iter: impl Iterator>, want: &[Pair]) { 51 | let got = iter.collect::, _>>().unwrap(); 52 | assert_eq!(got.as_slice(), want); 53 | } 54 | 55 | #[track_caller] 56 | pub fn assert_iter_reversed(iter: impl Iterator>, want: &[Pair]) { 57 | let mut got = iter.collect::, _>>().unwrap(); 58 | got.reverse(); 59 | assert_eq!(got.as_slice(), want); 60 | } 61 | --------------------------------------------------------------------------------