├── .github ├── check-license-headers.yaml ├── dependabot.yml ├── license-header.txt └── workflows │ ├── attach-static-libs.yaml │ ├── cache-cleanup.yaml │ ├── ci.yaml │ ├── conventional-commits.yaml │ ├── default-branch-cache.yaml │ ├── gh-pages.yaml │ ├── pr-title.yaml │ └── release.yaml ├── .gitignore ├── .markdownlint.json ├── .vscode └── extensions.json ├── CHANGELOG.md ├── CODEOWNERS ├── CONTRIBUTING.md ├── Cargo.toml ├── LICENSE.md ├── README.docker.md ├── README.md ├── RELEASE.md ├── benchmark ├── Cargo.toml ├── Grafana-dashboard.json ├── README.md ├── cloud-config.txt ├── setup-scripts │ ├── README.md │ ├── build-environment.sh │ ├── build-firewood.sh │ ├── install-grafana.sh │ └── run-benchmarks.sh └── src │ ├── create.rs │ ├── main.rs │ ├── single.rs │ ├── tenkrandom.rs │ └── zipf.rs ├── cliff.toml ├── docs └── assets │ └── architecture.svg ├── ffi ├── .gitignore ├── .golangci.yaml ├── Cargo.toml ├── README.md ├── build.rs ├── cbindgen.toml ├── firewood.go ├── firewood.h ├── firewood_test.go ├── go.mod ├── go.sum ├── kvbackend.go ├── memory.go ├── proposal.go ├── revision.go ├── src │ ├── lib.rs │ └── metrics_setup.rs └── tests │ ├── eth │ ├── eth_compatibility_test.go │ ├── go.mod │ └── go.sum │ └── firewood │ ├── go.mod │ ├── go.sum │ └── merkle_compatibility_test.go ├── firewood ├── Cargo.toml ├── benches │ └── hashops.rs ├── examples │ └── insert.rs ├── src │ ├── db.rs │ ├── lib.rs │ ├── manager.rs │ ├── merkle.rs │ ├── proof.rs │ ├── range_proof.rs │ ├── stream.rs │ └── v2 │ │ ├── api.rs │ │ ├── emptydb.rs │ │ ├── mod.rs │ │ └── propose.rs └── tests │ ├── common │ └── mod.rs │ ├── db.rs │ └── v2api.rs ├── fwdctl ├── Cargo.toml ├── README.md ├── src │ ├── create.rs │ ├── delete.rs │ ├── dump.rs │ ├── get.rs │ ├── graph.rs │ ├── insert.rs │ ├── main.rs │ └── root.rs └── tests │ └── cli.rs ├── grpc-testtool ├── Cargo.toml ├── README.md ├── benches │ └── insert.rs ├── build.rs ├── proto │ ├── merkle │ │ └── merkle.proto │ ├── process-server │ │ └── process-server.proto │ ├── rpcdb │ │ └── rpcdb.proto │ └── sync │ │ └── sync.proto └── src │ ├── bin │ ├── client.rs │ └── process-server.rs │ ├── lib.rs │ ├── service.rs │ └── service │ ├── database.rs │ ├── db.rs │ └── process.rs ├── storage ├── Cargo.toml ├── benches │ └── serializer.rs └── src │ ├── hashednode.rs │ ├── hashers │ ├── ethhash.rs │ ├── merkledb.rs │ └── mod.rs │ ├── lib.rs │ ├── linear │ ├── filebacked.rs │ ├── memory.rs │ └── mod.rs │ ├── logger.rs │ ├── node │ ├── branch.rs │ ├── leaf.rs │ ├── mod.rs │ └── path.rs │ ├── nodestore.rs │ └── trie_hash.rs └── triehash ├── CHANGELOG.md ├── Cargo.toml ├── README.md ├── benches └── triehash.rs └── src └── lib.rs /.github/check-license-headers.yaml: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "include": [ 4 | "**/**/*.rs" 5 | ], 6 | "exclude": [ 7 | "target/**", 8 | "*/LICENSE*", 9 | "LICENSE.md", 10 | "RELEASE.md", 11 | "grpc-testtool/**", 12 | "README*", 13 | "**/README*", 14 | "Cargo.toml", 15 | "*/Cargo.toml", 16 | "docs/**", 17 | "CODEOWNERS", 18 | "CONTRIBUTING.md", 19 | "benchmark/**", 20 | "ffi/**", 21 | "triehash/**", 22 | "CHANGELOG.md", 23 | "cliff.toml", 24 | ], 25 | "license": "./.github/license-header.txt" 26 | }, 27 | { 28 | "include": [ 29 | "target/**", 30 | "*/LICENSE*", 31 | "LICENSE.md", 32 | "RELEASE.md", 33 | "grpc-testtool/**", 34 | "README*", 35 | "**/README*", 36 | "Cargo.toml", 37 | "*/Cargo.toml", 38 | "docs/**", 39 | "benchmark/**", 40 | "ffi/**", 41 | "CODEOWNERS", 42 | "CONTRIBUTING.md", 43 | "triehash/**", 44 | "CHANGELOG.md", 45 | "cliff.toml", 46 | ], 47 | } 48 | ] 49 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "cargo" 9 | directory: "/" 10 | schedule: 11 | interval: "daily" 12 | time: "05:00" 13 | timezone: "America/Los_Angeles" 14 | open-pull-requests-limit: 10 15 | allow: 16 | - dependency-type: "all" 17 | -------------------------------------------------------------------------------- /.github/license-header.txt: -------------------------------------------------------------------------------- 1 | // Copyright (C) %year%, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | -------------------------------------------------------------------------------- /.github/workflows/cache-cleanup.yaml: -------------------------------------------------------------------------------- 1 | name: cleanup caches by a branch 2 | on: 3 | pull_request: 4 | types: 5 | - closed 6 | 7 | jobs: 8 | cleanup: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Check out code 12 | uses: actions/checkout@v4 13 | 14 | - name: Cleanup 15 | run: | 16 | gh extension install actions/gh-actions-cache 17 | 18 | REPO=${{ github.repository }} 19 | BRANCH="refs/pull/${{ github.event.pull_request.number }}/merge" 20 | 21 | echo "Fetching list of cache key" 22 | cacheKeysForPR=$(gh actions-cache list -R $REPO -B $BRANCH | cut -f 1 ) 23 | 24 | ## Setting this to not fail the workflow while deleting cache keys. 25 | set +e 26 | echo "Deleting caches..." 27 | for cacheKey in $cacheKeysForPR 28 | do 29 | gh actions-cache delete $cacheKey -R $REPO -B $BRANCH --confirm 30 | done 31 | echo "Done" 32 | env: 33 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 34 | 35 | -------------------------------------------------------------------------------- /.github/workflows/conventional-commits.yaml: -------------------------------------------------------------------------------- 1 | name: Conventional Commits 2 | 3 | on: 4 | pull_request: 5 | branches: [ main ] 6 | 7 | jobs: 8 | build: 9 | name: Conventional Commits 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | 14 | - uses: webiny/action-conventional-commits@v1.3.0 15 | with: 16 | allowed-commit-types: "build,chore,ci,docs,feat,fix,perf,refactor,style,test" 17 | -------------------------------------------------------------------------------- /.github/workflows/default-branch-cache.yaml: -------------------------------------------------------------------------------- 1 | # because apparently caches are isolated across branches 2 | name: default-branch-cache 3 | 4 | on: 5 | workflow_dispatch: 6 | push: 7 | branches: 8 | - main 9 | 10 | env: 11 | CARGO_TERM_COLOR: always 12 | 13 | jobs: 14 | build: 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - uses: actions/checkout@v4 19 | - uses: dtolnay/rust-toolchain@stable 20 | - uses: arduino/setup-protoc@v3 21 | with: 22 | repo-token: ${{ secrets.GITHUB_TOKEN }} 23 | - uses: Swatinem/rust-cache@v2 24 | with: 25 | save-if: "false" 26 | shared-key: "debug-no-features" 27 | - name: Check 28 | run: cargo check --workspace --tests --examples --benches 29 | - name: Build 30 | run: cargo build --workspace --tests --examples --benches 31 | -------------------------------------------------------------------------------- /.github/workflows/gh-pages.yaml: -------------------------------------------------------------------------------- 1 | name: gh-pages 2 | 3 | on: 4 | push: 5 | branches: 6 | - "main" 7 | - "rkuris/gh-pages" 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | build: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/checkout@v4 17 | - uses: dtolnay/rust-toolchain@stable 18 | - uses: arduino/setup-protoc@v3 19 | with: 20 | repo-token: ${{ secrets.GITHUB_TOKEN }} 21 | # caution: this is the same restore as in ci.yaml 22 | - uses: Swatinem/rust-cache@v2 23 | with: 24 | save-if: "false" 25 | - name: Build 26 | run: cargo doc --document-private-items --no-deps 27 | - name: Set up _site redirect to firewood 28 | run: | 29 | rm -fr _site 30 | mkdir _site 31 | echo "" > _site/index.html 32 | - name: Copy doc files to _site 33 | run: | 34 | cp -rv target/doc/* ./_site 35 | cp -rv docs/assets ./_site 36 | - uses: actions/upload-artifact@v4 37 | with: 38 | name: pages 39 | path: _site 40 | if-no-files-found: error 41 | overwrite: true 42 | include-hidden-files: true 43 | deploy: 44 | needs: build 45 | permissions: 46 | pages: write 47 | id-token: write 48 | environment: 49 | name: github-pages 50 | url: ${{ steps.deployment.outputs.page_url }} 51 | runs-on: ubuntu-latest 52 | steps: 53 | - name: Download pages artifact 54 | uses: actions/download-artifact@v4 55 | with: 56 | name: pages 57 | path: . 58 | - name: Setup Pages 59 | uses: actions/configure-pages@v3 60 | - name: Upload artifact 61 | uses: actions/upload-pages-artifact@v3 62 | with: 63 | path: . 64 | - name: Deploy to GitHub pages 65 | id: deployment 66 | uses: actions/deploy-pages@v4 67 | -------------------------------------------------------------------------------- /.github/workflows/pr-title.yaml: -------------------------------------------------------------------------------- 1 | # Check that the PR title matches the conventional commit format 2 | name: pr-title 3 | 4 | permissions: 5 | pull-requests: write 6 | 7 | on: 8 | pull_request: 9 | types: 10 | - edited 11 | - opened 12 | - reopened 13 | 14 | jobs: 15 | check-pr-title: 16 | runs-on: ubuntu-latest 17 | permissions: 18 | pull-requests: read 19 | steps: 20 | - name: Check PR title follows conventional commits 21 | uses: amannn/action-semantic-pull-request@v5 22 | with: 23 | types: | 24 | build 25 | chore 26 | ci 27 | docs 28 | feat 29 | fix 30 | perf 31 | refactor 32 | style 33 | test 34 | # scope is not required ("feat: whatever" is okay) 35 | requireScope: false 36 | # if the PR only has one commit, we can validate the commit message 37 | # instead of the PR title 38 | validateSingleCommit: true 39 | subjectPattern: ^.{1,}$ 40 | subjectPatternError: | 41 | The subject "{subject}" found in the pull request title "{title}" 42 | didn't match the configured pattern. Please ensure that the subject 43 | matches the conventional commit format. 44 | env: 45 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 46 | 47 | -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: release 2 | 3 | on: 4 | push: 5 | tags: 6 | - "v*.*.*" 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Checkout 13 | uses: actions/checkout@v4 14 | - name: Release 15 | uses: softprops/action-gh-release@v1 16 | with: 17 | draft: true 18 | generate_release_notes: true 19 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore VSCode directory 2 | .vscode 3 | 4 | compose-dev.yaml 5 | 6 | # ignore test databases 7 | *_db 8 | 9 | #### Below sections are auto-generated #### 10 | 11 | # Created by https://www.toptal.com/developers/gitignore/api/rust,visualstudiocode,vim,macos 12 | # Edit at https://www.toptal.com/developers/gitignore?templates=rust,visualstudiocode,vim,macos 13 | 14 | ### macOS ### 15 | # General 16 | .DS_Store 17 | .AppleDouble 18 | .LSOverride 19 | 20 | # Icon must end with two \r 21 | Icon 22 | 23 | 24 | # Thumbnails 25 | ._* 26 | 27 | # Files that might appear in the root of a volume 28 | .DocumentRevisions-V100 29 | .fseventsd 30 | .Spotlight-V100 31 | .TemporaryItems 32 | .Trashes 33 | .VolumeIcon.icns 34 | .com.apple.timemachine.donotpresent 35 | 36 | # Directories potentially created on remote AFP share 37 | .AppleDB 38 | .AppleDesktop 39 | Network Trash Folder 40 | Temporary Items 41 | .apdisk 42 | 43 | ### macOS Patch ### 44 | # iCloud generated files 45 | *.icloud 46 | 47 | ### Rust ### 48 | # Generated by Cargo 49 | # will have compiled files and executables 50 | debug/ 51 | target/ 52 | 53 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 54 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 55 | Cargo.lock 56 | 57 | # These are backup files generated by rustfmt 58 | **/*.rs.bk 59 | 60 | # MSVC Windows builds of rustc generate these, which store debugging information 61 | *.pdb 62 | 63 | ### Vim ### 64 | # Swap 65 | [._]*.s[a-v][a-z] 66 | !*.svg # comment out if you don't need vector files 67 | [._]*.sw[a-p] 68 | [._]s[a-rt-v][a-z] 69 | [._]ss[a-gi-z] 70 | [._]sw[a-p] 71 | 72 | # Session 73 | Session.vim 74 | Sessionx.vim 75 | 76 | # Temporary 77 | .netrwhist 78 | *~ 79 | # Auto-generated tag files 80 | tags 81 | # Persistent undo 82 | [._]*.un~ 83 | 84 | ### VisualStudioCode ### 85 | .vscode/* 86 | !.vscode/settings.json 87 | !.vscode/tasks.json 88 | !.vscode/launch.json 89 | !.vscode/extensions.json 90 | !.vscode/*.code-snippets 91 | 92 | # Local History for Visual Studio Code 93 | .history/ 94 | 95 | # Built Visual Studio Code Extensions 96 | *.vsix 97 | 98 | ### VisualStudioCode Patch ### 99 | # Ignore all local history of files 100 | .history 101 | .ionide 102 | 103 | # End of https://www.toptal.com/developers/gitignore/api/rust,visualstudiocode,vim,macos 104 | -------------------------------------------------------------------------------- /.markdownlint.json: -------------------------------------------------------------------------------- 1 | { 2 | "line-length": false, 3 | } 4 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": [ 3 | "davidanson.vscode-markdownlint", 4 | "rust-lang.rust-analyzer", 5 | "vadimcn.vscode-lldb" 6 | ] 7 | } 8 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # CODEOWNERS 2 | * @rkuris @aaronbuchwald @demosdemon 3 | /ffi @alarso16 4 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Welcome contributors 2 | 3 | We are eager for contributions and happy you found yourself here. 4 | Please read through this document to familiarize yourself with our 5 | guidelines for contributing to firewood. 6 | 7 | ## Table of Contents 8 | 9 | * [Quick Links](#Quick Links) 10 | * [Testing](#testing) 11 | * [How to submit changes](#How to submit changes) 12 | * [Where can I ask for help?](#Where can I ask for help) 13 | 14 | ## [Quick Links] 15 | 16 | * [Setting up docker](README.docker.md) 17 | * [Auto-generated documentation](https://ava-labs.github.io/firewood/firewood/) 18 | * [Issue tracker](https://github.com/ava-labs/firewood/issues) 19 | 20 | ## [Testing] 21 | 22 | After submitting a PR, we'll run all the tests and verify your code meets our submission guidelines. To ensure it's more likely to pass these checks, you should run the following commands locally: 23 | 24 | cargo fmt 25 | cargo test 26 | cargo clippy 27 | cargo doc --no-deps 28 | 29 | Resolve any warnings or errors before making your PR. 30 | 31 | ## [How to submit changes] 32 | 33 | To create a PR, fork firewood, and use github to create the PR. We typically prioritize reviews in the middle of our the next work day, 34 | so you should expect a response during the week within 24 hours. 35 | 36 | ## [How to report a bug] 37 | 38 | Please use the [issue tracker](https://github.com/ava-labs/firewood/issues) for reporting issues. 39 | 40 | ## [First time fixes for contributors] 41 | 42 | The [issue tracker](https://github.com/ava-labs/firewood/issues) typically has some issues tagged for first-time contributors. If not, 43 | please reach out. We hope you work on an easy task before tackling a harder one. 44 | 45 | ## [How to request an enhancement] 46 | 47 | Just like bugs, please use the [issue tracker](https://github.com/ava-labs/firewood/issues) for requesting enhancements. Please tag the issue with the "enhancement" tag. 48 | 49 | ## [Style Guide / Coding Conventions] 50 | 51 | We generally follow the same rules that `cargo fmt` and `cargo clippy` will report as warnings, with a few notable exceptions as documented in the associated Cargo.toml file. 52 | 53 | By default, we prohibit bare `unwrap` calls and index dereferencing, as there are usually better ways to write this code. In the case where you can't, please use `expect` with a message explaining why it would be a bug, which we currently allow. For more information on our motivation, please read this great article on unwrap: [Using unwrap() in Rust is Okay](https://blog.burntsushi.net/unwrap) by [Andrew Gallant](https://blog.burntsushi.net). 54 | 55 | ## [Where can I ask for help]? 56 | 57 | Please reach out on X (formerly twitter) @rkuris for help or questions! 58 | 59 | ## Thank you 60 | 61 | We'd like to extend a pre-emptive "thank you" for reading through this and submitting your first contribution! 62 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "firewood", 4 | "fwdctl", 5 | "storage", 6 | "benchmark", 7 | "ffi", 8 | "triehash", 9 | ] 10 | resolver = "2" 11 | 12 | [profile.release] 13 | debug = true 14 | 15 | [profile.maxperf] 16 | panic = "abort" 17 | codegen-units = 1 18 | lto = "fat" 19 | debug = false 20 | inherits = "release" 21 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | # Ecosystem License 2 | 3 | Version: 1.1 4 | 5 | Subject to the terms herein, Ava Labs, Inc. (**“Ava Labs”**) hereby grants you 6 | a limited, royalty-free, worldwide, non-sublicensable, non-transferable, 7 | non-exclusive license to use, copy, modify, create derivative works based on, 8 | and redistribute the Software, in source code, binary, or any other form, 9 | including any modifications or derivative works of the Software (collectively, 10 | **“Licensed Software”**), in each case subject to this Ecosystem License 11 | (**“License”**). 12 | 13 | This License applies to all copies, modifications, derivative works, and any 14 | other form or usage of the Licensed Software. You will include and display 15 | this License, without modification, with all uses of the Licensed Software, 16 | regardless of form. 17 | 18 | You will use the Licensed Software solely (i) in connection with the Avalanche 19 | Public Blockchain platform, having a NetworkID of 1 (Mainnet) or 5 (Fuji), and 20 | associated blockchains, comprised exclusively of the Avalanche X-Chain, 21 | C-Chain, P-Chain and any subnets linked to the P-Chain (“Avalanche Authorized 22 | Platform”) or (ii) for non-production, testing or research purposes within the 23 | Avalanche ecosystem, in each case, without any commercial application 24 | (“Non-Commercial Use”); provided that this License does not permit use of the 25 | Licensed Software in connection with (a) any forks of the Avalanche Authorized 26 | Platform or (b) in any manner not operationally connected to the Avalanche 27 | Authorized Platform other than, for the avoidance of doubt, the limited 28 | exception for Non-Commercial Use. Ava Labs may publicly announce changes or 29 | additions to the Avalanche Authorized Platform, which may expand or modify 30 | usage of the Licensed Software. Upon such announcement, the Avalanche 31 | Authorized Platform will be deemed to be the then-current iteration of such 32 | platform. 33 | 34 | You hereby acknowledge and agree to the terms set forth at 35 | . 36 | 37 | If you use the Licensed Software in violation of this License, this License 38 | will automatically terminate and Ava Labs reserves all rights to seek any 39 | remedy for such violation. 40 | 41 | Except for uses explicitly permitted in this License, Ava Labs retains all 42 | rights in the Licensed Software, including without limitation the ability to 43 | modify it. 44 | 45 | Except as required or explicitly permitted by this License, you will not use 46 | any Ava Labs names, logos, or trademarks without Ava Labs’ prior written 47 | consent. 48 | 49 | You may use this License for software other than the “Licensed Software” 50 | specified above, as long as the only change to this License is the definition 51 | of the term “Licensed Software.” 52 | 53 | The Licensed Software may reference third party components. You acknowledge 54 | and agree that these third party components may be governed by a separate 55 | license or terms and that you will comply with them. 56 | 57 | **TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE LICENSED SOFTWARE IS PROVIDED 58 | ON AN “AS IS” BASIS, AND AVA LABS EXPRESSLY DISCLAIMS AND EXCLUDES ALL 59 | REPRESENTATIONS, WARRANTIES AND OTHER TERMS AND CONDITIONS, WHETHER EXPRESS OR 60 | IMPLIED, INCLUDING WITHOUT LIMITATION BY OPERATION OF LAW OR BY CUSTOM, 61 | STATUTE OR OTHERWISE, AND INCLUDING, BUT NOT LIMITED TO, ANY IMPLIED WARRANTY, 62 | TERM, OR CONDITION OF NON-INFRINGEMENT, MERCHANTABILITY, TITLE, OR FITNESS FOR 63 | PARTICULAR PURPOSE. YOU USE THE LICENSED SOFTWARE AT YOUR OWN RISK. AVA LABS 64 | EXPRESSLY DISCLAIMS ALL LIABILITY (INCLUDING FOR ALL DIRECT, CONSEQUENTIAL OR 65 | OTHER DAMAGES OR LOSSES) RELATED TO ANY USE OF THE LICENSED SOFTWARE.** 66 | -------------------------------------------------------------------------------- /README.docker.md: -------------------------------------------------------------------------------- 1 | # Docker on Mac Compatibility 2 | 3 | Note: 4 | Docker compatiblitiy is a work in progress. Please PR any changes here if you find a better way to do this. 5 | 6 | ## Steps 7 | 8 | ### Step 1 9 | 10 | Install docker-desktop ([guide](https://docs.docker.com/desktop/install/mac-install/)) 11 | 12 | ### Step 2 13 | 14 | Setup a dev-environment ([guide](https://docs.docker.com/desktop/dev-environments/set-up/#set-up-a-dev-environment)) 15 | 16 | Here, you want to specifically pick a local-directory (the repo's directory) 17 | 18 | ![image](https://github.com/ava-labs/firewood/assets/3286504/83d6b66d-19e3-4b59-bc73-f67cf68d7329) 19 | 20 | This is best because you can still do all your `git` stuff from the host. 21 | 22 | ### Step 3 23 | 24 | You will need the `Dev Containers` VSCODE extension, authored by Microsoft for this next step. 25 | 26 | Open your dev-environment with VSCODE. Until you do this, the volume might not be properly mounted. If you (dear reader) know of a better way to do this, please open a PR. VSCODE is very useful for its step-by-step debugger, but other than that, you can run whatever IDE you would like in the host environment and just open a shell in the container to run the tests. 27 | 28 | ![image](https://github.com/ava-labs/firewood/assets/3286504/88c981cb-42b9-4b99-acec-fbca31cca652) 29 | 30 | ### Step 4 31 | 32 | Open a terminal in vscode OR exec into the container directly as follows 33 | 34 | ```sh 35 | # you don't need to do this if you open the terminal from vscode 36 | # the container name here is "firewood-app-1", you should be able to see this in docker-desktop 37 | docker exec -it --privileged -u root firewood-app-1 zsh 38 | ``` 39 | 40 | Once you're in the terminal you'll want to install the Rust toolset. You can [find instructions here](https://rustup.rs/) 41 | 42 | **!!! IMPORTANT !!!** 43 | 44 | Make sure you read the output of any commands that you run. `rustup` will likely ask you to `source` a file to add some tools to your `PATH`. 45 | 46 | You'll also need to install all the regular linux dependencies (if there is anything from this list that's missing, please add to this README) 47 | 48 | ```sh 49 | apt update 50 | apt install vim 51 | apt install build-essential 52 | apt install protobuf-compiler 53 | ``` 54 | 55 | ### Step 5 56 | 57 | **!!! IMPORTANT !!!** 58 | 59 | You need to create a separate `CARGO_TARGET_DIR` that isn't volume mounted onto the host. `VirtioFS` (the default file-system) has some concurrency issues when dealing with sequential writes and reads to a volume that is mounted to the host. You can put a directory here for example: `/root/target`. 60 | 61 | For step-by-step debugging and development directly in the container, you will also **need to make sure that `rust-analyzer` is configured to point to the new target-directory instead of just default**. 62 | 63 | There are a couple of places where this can be setup. If you're a `zsh` user, you should add `export CARGO_TARGET_DIR=/root/target` to either `/root/.zshrc` or `/root/.bashrc`. 64 | After adding the line, don't forget to `source` the file to make sure your current session is updated. 65 | 66 | ### Step 6 67 | 68 | Navigate to `/com.docker.devenvironments.code` and run `cargo test`. If it worked, you are most of the way there! If it did not work, there are a couple of common issues. If the code will not compile, it's possible that your target directory isn't set up properly. Check inside `/root/target` to see if there are any build artifacts. If not, you might need to call `source ~/.zshrc` again (sub in whatever your preferred shell is). 69 | 70 | Now for vscode, you need to configure your `rust-analyzer` in the "remote-environment" (the Docker container). There are a couple of places to do this. First, you want to open `/root/.vscode-server/Machine/settings.json` and make sure that you have the following entry: 71 | 72 | ```json 73 | { 74 | "rust-analyzer.cargo.extraEnv": { 75 | "CARGO_TARGET_DIR": "/root/target" 76 | } 77 | } 78 | ``` 79 | 80 | Then, you want to make sure that the terminal that's being used by the vscode instance (for the host system) is the same as your preferred terminal in the container to make sure that things work as expected. [Here are the docs](https://code.visualstudio.com/docs/terminal/profiles) to help you with setting up the proper profile. 81 | 82 | And that should be enough to get your started! Feel free to open an issue if you need any help debugging. 83 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Firewood: Compaction-Less Database Optimized for Efficiently Storing Recent Merkleized Blockchain State 2 | 3 | ![Github Actions](https://github.com/ava-labs/firewood/actions/workflows/ci.yaml/badge.svg?branch=main) 4 | [![Ecosystem license](https://img.shields.io/badge/License-Ecosystem-blue.svg)](./LICENSE.md) 5 | 6 | > :warning: Firewood is alpha-level software and is not ready for production 7 | > use. The Firewood API and on-disk state representation may change with 8 | > little to no warning. 9 | 10 | Firewood is an embedded key-value store, optimized to store recent Merkleized blockchain 11 | state with minimal overhead. Firewood is implemented from the ground up to directly 12 | store trie nodes on-disk. Unlike most state management approaches in the field, 13 | it is not built on top of a generic KV store such as LevelDB/RocksDB. Firewood, like a 14 | B+-tree based database, directly uses the trie structure as the index on-disk. Thus, 15 | there is no additional “emulation” of the logical trie to flatten out the data structure 16 | to feed into the underlying database that is unaware of the data being stored. The convenient 17 | byproduct of this approach is that iteration is still fast (for serving state sync queries) 18 | but compaction is not required to maintain the index. Firewood was first conceived to provide 19 | a very fast storage layer for the EVM but could be used on any blockchain that 20 | requires an authenticated state. 21 | 22 | Firewood only attempts to store recent revisions on-disk and will actively clean up 23 | unused data when revisions expire. Firewood keeps some configurable number of previous states in memory and on disk to power state sync (which may occur at a few roots behind the current state). To do this, a new root is always created for each revision that can reference either new nodes from this revision or nodes from a prior revision. When creating a revision, a list of nodes that are no longer needed are computed and saved to disk in a future-delete log (FDL) as well as kept in memory. When a revision expires, the nodes that were deleted when it was created are returned to the free space. 24 | 25 | Firewood guarantees recoverability by not referencing the new nodes in a new revision before they are flushed to disk, as well as carefully managing the free list during the creation and expiration of revisions. 26 | 27 | ## Architecture Diagram 28 | 29 | ![architecture diagram](./docs/assets/architecture.svg) 30 | 31 | ## Terminology 32 | 33 | - `Revision` - A historical point-in-time state/version of the trie. This 34 | represents the entire trie, including all `Key`/`Value`s at that point 35 | in time, and all `Node`s. 36 | - `View` - This is the interface to read from a `Revision` or a `Proposal`. 37 | - `Node` - A node is a portion of a trie. A trie consists of nodes that are linked 38 | together. Nodes can point to other nodes and/or contain `Key`/`Value` pairs. 39 | - `Hash` - In this context, this refers to the merkle hash for a specific node. 40 | - `Root Hash` - The hash of the root node for a specific revision. 41 | - `Key` - Represents an individual byte array used to index into a trie. A `Key` 42 | usually has a specific `Value`. 43 | - `Value` - Represents a byte array for the value of a specific `Key`. Values can 44 | contain 0-N bytes. In particular, a zero-length `Value` is valid. 45 | - `Key Proof` - A proof that a `Key` exists within a specific revision of a trie. 46 | This includes the hash for the node containing the `Key` as well as all parents. 47 | - `Range Proof` - A proof that consists of two `Key Proof`s, one for the start of 48 | the range, and one for the end of the range, as well as a list of all `Key`/`Value` 49 | pairs in between the two. A `Range Proof` can be validated independently of an 50 | actual database by constructing a trie from the `Key`/`Value`s provided. 51 | - `Change Proof` - A proof that consists of a set of all changes between two 52 | revisions. 53 | - `Put` - An operation for a `Key`/`Value` pair. A put means "create if it doesn't 54 | exist, or update it if it does. A put operation is how you add a `Value` for a 55 | specific `Key`. 56 | - `Delete` - An operation indicating that a `Key` should be removed from the trie. 57 | - `Batch Operation` - An operation of either `Put` or `Delete`. 58 | - `Batch` - An ordered set of `Batch Operation`s. 59 | - `Proposal` - A proposal consists of a base `Root Hash` and a `Batch`, but is not 60 | yet committed to the trie. In Firewood's most recent API, a `Proposal` is required 61 | to `Commit`. 62 | - `Commit` - The operation of applying one or more `Proposal`s to the most recent 63 | `Revision`. 64 | 65 | ## Roadmap 66 | 67 | - [X] Complete the revision manager 68 | - [X] Complete the API implementation 69 | - [X] Implement a node cache 70 | - [ ] Complete the proof code 71 | - [ ] Hook up the RPC 72 | 73 | ## Build 74 | 75 | In order to build firewood, the following dependencies must be installed: 76 | 77 | - `protoc` See [installation instructions](https://grpc.io/docs/protoc-installation/). 78 | - `cargo` See [installation instructions](https://doc.rust-lang.org/cargo/getting-started/installation.html). 79 | - `make` See [download instructions](https://www.gnu.org/software/make/#download) or run `sudo apt install build-essential` on Linux. 80 | 81 | ## Run 82 | 83 | There are several examples, in the examples directory, that simulate real world 84 | use-cases. Try running them via the command-line, via `cargo run --release 85 | --example insert`. 86 | 87 | For maximum performance, use `cargo run --maxperf` instead, which enables maximum 88 | link time compiler optimizations, but takes a lot longer to compile. 89 | 90 | ## Logging 91 | 92 | If you want logging, enable the `logging` feature flag, and then set RUST\_LOG accordingly. 93 | See the documentation for [env\_logger](https://docs.rs/env_logger/latest/env_logger/) for specifics. 94 | We currently have very few logging statements, but this is useful for print-style debugging. 95 | 96 | ## Release 97 | 98 | See the [release documentation](./RELEASE.md) for detailed information on how to release Firewood. 99 | 100 | ## CLI 101 | 102 | Firewood comes with a CLI tool called `fwdctl` that enables one to create and interact with a local instance of a Firewood database. For more information, see the [fwdctl README](fwdctl/README.md). 103 | 104 | ## Test 105 | 106 | ```sh 107 | cargo test --release 108 | ``` 109 | 110 | ## License 111 | 112 | Firewood is licensed by the Ecosystem License. For more information, see the 113 | [LICENSE file](./LICENSE.md). 114 | -------------------------------------------------------------------------------- /RELEASE.md: -------------------------------------------------------------------------------- 1 | # Releasing firewood 2 | 3 | Releasing firewood is straightforward and can be done entirely in CI. 4 | 5 | Firewood is made up of several sub-projects in a workspace. Each project is in 6 | its own crate and has an independent version. 7 | 8 | The first step in drafting a release is ensuring all crates within the firewood 9 | project are using the version of the new release. There is a utility to ensure 10 | all versions are updated simultaneously in `cargo-workspace-version`. To use it 11 | to update to 0.0.5, for example: 12 | 13 | ```sh 14 | cargo install cargo-workspace-version 15 | cargo workspace-version update v0.0.5 16 | ``` 17 | 18 | See the [source code](https://github.com/ava-labs/cargo-workspace-version) for 19 | more information on the tool. 20 | 21 | > ❗ Be sure to update the versions of all sub-projects before creating a new 22 | > release. Open a PR with the updated versions and merge it before continuing to 23 | > the next step. 24 | 25 | To trigger a release, simply push a semver-compatible tag to the main branch, 26 | for example `v0.0.5`. The CI will automatically publish a draft release which 27 | consists of release notes and changes. 28 | 29 | ## Changelog 30 | 31 | To build the changelog, see git-cliff.org. Short version: 32 | 33 | ```sh 34 | cargo install git-cliff 35 | git cliff --tag v0.0.5 | sed -e 's/_/\\_/g' > CHANGELOG.md 36 | ``` 37 | -------------------------------------------------------------------------------- /benchmark/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "benchmark" 3 | version = "0.0.5" 4 | edition = "2024" 5 | rust-version = "1.85.0" 6 | 7 | [dependencies] 8 | firewood = { path = "../firewood" } 9 | hex = "0.4.3" 10 | clap = { version = "4.5.0", features = ['derive', 'string'] } 11 | sha2 = "0.10.8" 12 | metrics = "0.24.1" 13 | metrics-util = "0.19.0" 14 | metrics-exporter-prometheus = "0.17.0" 15 | tokio = { version = "1.36.0", features = ["rt", "sync", "macros", "rt-multi-thread"] } 16 | rand = "0.9.0" 17 | rand_distr = "0.5.0" 18 | pretty-duration = "0.1.1" 19 | env_logger = "0.11.5" 20 | log = "0.4.20" 21 | fastrace = { version = "0.7.4", features = ["enable"] } 22 | fastrace-opentelemetry = { version = "0.12.0" } 23 | opentelemetry-otlp = { version = "0.30.0", features = ["grpc-tonic"] } 24 | opentelemetry = "0.30.0" 25 | opentelemetry_sdk = "0.30.0" 26 | strum = "0.27.0" 27 | 28 | [target.'cfg(unix)'.dependencies] 29 | tikv-jemallocator = "0.6.0" 30 | 31 | [features] 32 | logger = ["firewood/logger"] 33 | -------------------------------------------------------------------------------- /benchmark/cloud-config.txt: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | write_files: 3 | - path: /etc/systemd/system/grafana-server.service.d/override.conf 4 | owner: root:root 5 | permissions: '0644' 6 | content: | 7 | [Service] 8 | CapabilityBoundingSet=CAP_NET_BIND_SERVICE 9 | AmbientCapabilities=CAP_NET_BIND_SERVICE 10 | PrivateUsers=false 11 | - path: /run/firewood/build-firewood.sh 12 | permissions: '0755' 13 | content: | 14 | #!/bin/bash 15 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y 16 | . "$HOME/.cargo/env" 17 | git clone https://github.com/ava-labs/firewood.git 18 | cd firewood 19 | git checkout rkuris/prometheus 20 | cargo build --release 21 | - path: /etc/prometheus/prometheus.yml.addon 22 | content: |2 23 | - job_name: firewood 24 | static_configs: 25 | - targets: ['localhost:3000'] 26 | apt: 27 | sources: 28 | grafana: 29 | source: deb https://apt.grafana.com stable main 30 | key: | 31 | -----BEGIN PGP PUBLIC KEY BLOCK----- 32 | 33 | mQGNBGTnhmkBDADUE+SzjRRyitIm1siGxiHlIlnn6KO4C4GfEuV+PNzqxvwYO+1r 34 | mcKlGDU0ugo8ohXruAOC77Kwc4keVGNU89BeHvrYbIftz/yxEneuPsCbGnbDMIyC 35 | k44UOetRtV9/59Gj5YjNqnsZCr+e5D/JfrHUJTTwKLv88A9eHKxskrlZr7Un7j3i 36 | Ef3NChlOh2Zk9Wfk8IhAqMMTferU4iTIhQk+5fanShtXIuzBaxU3lkzFSG7VuAH4 37 | CBLPWitKRMn5oqXUE0FZbRYL/6Qz0Gt6YCJsZbaQ3Am7FCwWCp9+ZHbR9yU+bkK0 38 | Dts4PNx4Wr9CktHIvbypT4Lk2oJEPWjcCJQHqpPQZXbnclXRlK5Ea0NVpaQdGK+v 39 | JS4HGxFFjSkvTKAZYgwOk93qlpFeDML3TuSgWxuw4NIDitvewudnaWzfl9tDIoVS 40 | Bb16nwJ8bMDzovC/RBE14rRKYtMLmBsRzGYHWd0NnX+FitAS9uURHuFxghv9GFPh 41 | eTaXvc4glM94HBUAEQEAAbQmR3JhZmFuYSBMYWJzIDxlbmdpbmVlcmluZ0BncmFm 42 | YW5hLmNvbT6JAdQEEwEKAD4WIQS1Oud7rbYwpoMEYAWWP6J3EEWFRQUCZOeGaQIb 43 | AwUJA8JnAAULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRCWP6J3EEWFRUiADACa 44 | i+xytv2keEFJWjXNnFAx6/obnHRcXOI3w6nH/zL8gNI7YN5jcdQT2NYvKVYTb3fW 45 | GuMsjHWgat5Gq3AtJrOKABpZ6qeYNPk0Axn/dKtOTwXjZ4pKX3bbUYvVfs0fCEZv 46 | B0HHIj2wI9kgMpoTrkj22LE8layZTPOoQ+3/FbLzS8hN3CYZj25mHN7bpZq8EbV3 47 | 8FW9EU0HM0tg6CvoxkRiVqAuAC0KnVIZAdhD4dlYKuncq64nMvT1A5wxSYbnE+uf 48 | mnWQQhhS6BOwRqN054yw1FrWNDFsvnOSHmr8dIiriv+aZYvx5JQFJ7oZP3LwdYyg 49 | ocQcAJA8HFTIk3P6uJiIF/zdDzocgdKs+IYDoId0hxX7sGCvqdrsveq8n3m7uQiN 50 | 7FvSiV0eXIdV4F7340kc8EKiYwpuYSaZX0UWKLenzlUvD+W4pZCWtoXzPsW7PKUt 51 | q1xdW0+NY+AGLCvSJCc5F4S5kFCObfBAYBbldjwwJFocdq/YOvvWYTPyV7kJeJS5 52 | AY0EZOeGaQEMALNIFUricEIwtZiX7vSDjwxobbqPKqzdek8x3ud0CyYlrbGHy0k+ 53 | FDEXstjJQQ1s9rjJSu3sv5wyg9GDAUH3nzO976n/ZZvKPti3p2XU2UFx5gYkaaFV 54 | D56yYxqGY0YU5ft6BG+RUz3iEPg3UBUzt0sCIYnG9+CsDqGOnRYIIa46fu2/H9Vu 55 | 8JvvSq9xbsK9CfoQDkIcoQOixPuI4P7eHtswCeYR/1LUTWEnYQWsBCf57cEpzR6t 56 | 7mlQnzQo9z4i/kp4S0ybDB77wnn+isMADOS+/VpXO+M7Zj5tpfJ6PkKch3SGXdUy 57 | 3zht8luFOYpJr2lVzp7n3NwB4zW08RptTzTgFAaW/NH2JjYI+rDvQm4jNs08Dtsp 58 | nm4OQvBA9Df/6qwMEOZ9i10ixqk+55UpQFJ3nf4uKlSUM7bKXXVcD/odq804Y/K4 59 | y3csE059YVIyaPexEvYSYlHE2odJWRg2Q1VehmrOSC8Qps3xpU7dTHXD74ZpaYbr 60 | haViRS5v/lCsiwARAQABiQG8BBgBCgAmFiEEtTrne622MKaDBGAFlj+idxBFhUUF 61 | AmTnhmkCGwwFCQPCZwAACgkQlj+idxBFhUUNbQv8DCcfi3GbWfvp9pfY0EJuoFJX 62 | LNgci7z7smXq7aqDp2huYQ+MulnPAydjRCVW2fkHItF2Ks6l+2/8t5Xz0eesGxST 63 | xTyR31ARENMXaq78Lq+itZ+usOSDNuwJcEmJM6CceNMLs4uFkX2GRYhchkry7P0C 64 | lkLxUTiB43ooi+CqILtlNxH7kM1O4Ncs6UGZMXf2IiG9s3JDCsYVPkC5QDMOPkTy 65 | 2ZriF56uPerlJveF0dC61RZ6RlM3iSJ9Fwvea0Oy4rwkCcs5SHuwoDTFyxiyz0QC 66 | 9iqi3fG3iSbLvY9UtJ6X+BtDqdXLAT9Pq527mukPP3LwpEqFVyNQKnGLdLOu2YXc 67 | TWWWseSQkHRzBmjD18KTD74mg4aXxEabyT4snrXpi5+UGLT4KXGV5syQO6Lc0OGw 68 | 9O/0qAIU+YW7ojbKv8fr+NB31TGhGYWASjYlN1NvPotRAK6339O0/Rqr9xGgy3AY 69 | SR+ic2Y610IM7xccKuTVAW9UofKQwJZChqae9VVZ 70 | =J9CI 71 | -----END PGP PUBLIC KEY BLOCK----- 72 | 73 | package_update: true 74 | package_upgrade: true 75 | packages: 76 | - git 77 | - protobuf-compiler 78 | - build-essential 79 | - apt-transport-https 80 | - grafana 81 | - prometheus 82 | - net-tools 83 | runcmd: 84 | - [ perl, -pi, -e, "s/^;?http_port = .*/http_port = 80/", /etc/grafana/grafana.ini ] 85 | - [ dd, if=/etc/prometheus/prometheus.yml.addon, of=/etc/prometheus/prometheus.yml, conv=notrunc, oflag=append ] 86 | - [ systemctl, daemon-reload ] 87 | - [ systemctl, enable, grafana-server ] 88 | - [ systemctl, start, grafana-server ] 89 | - [ sudo, -l, -u, ubuntu, /run/firewood/build-firewood.sh ] 90 | -------------------------------------------------------------------------------- /benchmark/setup-scripts/README.md: -------------------------------------------------------------------------------- 1 | # Setup Scripts 2 | 3 | This directory contains the scripts needed to set up the firewood benchmarks, as follows: 4 | 5 | ```bash 6 | sudo bash build-environment.sh 7 | ``` 8 | 9 | This script sets up the build environment, including installing the firewood build dependencies. 10 | 11 | ```bash 12 | sudo bash install-grafana.sh 13 | ``` 14 | 15 | This script sets up grafana to listen on port 3000 for firewood. It also sets up listening 16 | for coreth as well, on port 6060, with the special metrics path coreth expects. 17 | 18 | ```bash 19 | bash build-firewood.sh 20 | ``` 21 | 22 | This script checks out and builds firewood. It assumes you have already set up the build environment earlier. 23 | 24 | The final script, `run-benchmarks.sh`, is a set of commands that can be copied/pasted to run individual 25 | benchmarks of different sizes. 26 | -------------------------------------------------------------------------------- /benchmark/setup-scripts/build-environment.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script sets up the build environment, including installing the firewood build dependencies. 3 | set -o errexit 4 | 5 | if [ "$EUID" -ne 0 ]; then 6 | echo "This script must be run as root" >&2 7 | exit 1 8 | fi 9 | 10 | apt upgrade -y 11 | 12 | # install the build dependency packages 13 | pkgs=(git protobuf-compiler build-essential apt-transport-https net-tools zfsutils-linux) 14 | install_pkgs=() 15 | for pkg in "${pkgs[@]}"; do 16 | if ! dpkg -s "$pkg" > /dev/null 2>&1; then 17 | install_pkgs+=("$pkg") 18 | fi 19 | done 20 | if [ "${#install_pkgs[@]}" -gt 0 ]; then 21 | apt-get install -y "${install_pkgs[@]}" 22 | fi 23 | 24 | # If there is an NVMe device, format it and mount it to /mnt/nvme/ubuntu/firewood 25 | # this happens on amazon ec2 instances 26 | NVME_DEV="$(realpath /dev/disk/by-id/nvme-Amazon_EC2_NVMe_Instance_Storage_* | uniq)" 27 | if [ -n "$NVME_DEV" ]; then 28 | mkfs.ext4 -E nodiscard -i 6291456 "$NVME_DEV" 29 | NVME_MOUNT=/mnt/nvme 30 | mkdir -p "$NVME_MOUNT" 31 | mount -o noatime "$NVME_DEV" "$NVME_MOUNT" 32 | echo "$NVME_DEV $NVME_MOUNT ext4 noatime 0 0" >> /etc/fstab 33 | mkdir -p "$NVME_MOUNT/ubuntu/firewood" 34 | chown ubuntu:ubuntu "$NVME_MOUNT/ubuntu" "$NVME_MOUNT/ubuntu/firewood" 35 | ln -s "$NVME_MOUNT/ubuntu/firewood" /home/ubuntu/firewood 36 | fi 37 | -------------------------------------------------------------------------------- /benchmark/setup-scripts/build-firewood.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o errexit 3 | 4 | if [ "$EUID" -eq 0 ]; then 5 | echo "This script should be run as a non-root user" 6 | exit 1 7 | fi 8 | 9 | # install rust 10 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y 11 | . "$HOME/.cargo/env" 12 | 13 | # clone the firewood repository 14 | if [ ! -d "$HOME/firewood" ]; then 15 | mkdir -p "$HOME/firewood" 16 | fi 17 | pushd "$HOME/firewood" 18 | 19 | git clone https://github.com/ava-labs/firewood.git . 20 | 21 | # build the firewood binary 22 | cargo build --profile maxperf 23 | popd 24 | 25 | -------------------------------------------------------------------------------- /benchmark/setup-scripts/install-grafana.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o errexit 3 | 4 | # install the keyrings needed to validate the grafana apt repository 5 | if ! [ -d /etc/apt/keyrings ]; then 6 | mkdir -p /etc/apt/keyrings/ 7 | fi 8 | if ! [ -f /etc/apt/keyrings/grafana.gpg ]; then 9 | wget -q -O - https://apt.grafana.com/gpg.key | gpg --dearmor | sudo tee /etc/apt/keyrings/grafana.gpg > /dev/null 10 | echo "deb [signed-by=/etc/apt/keyrings/grafana.gpg] https://apt.grafana.com stable main" | sudo tee -a /etc/apt/sources.list.d/grafana.list 11 | fi 12 | apt-get update 13 | 14 | # set up the systemd configuration to allow grafana to bind to port 80 15 | if ! [ -d /etc/systemd/system/grafana-server.service.d ]; then 16 | mkdir -p /etc/systemd/system/grafana-server.service.d 17 | fi 18 | 19 | if ! [ -f /etc/systemd/system/grafana-server.service.d/override.conf ]; then 20 | cat > /etc/systemd/system/grafana-server.service.d/override.conf < /dev/null 2>&1; then 37 | install_pkgs+=("$pkg") 38 | fi 39 | done 40 | if [ "${#install_pkgs[@]}" -gt 0 ]; then 41 | apt-get install -y "${install_pkgs[@]}" 42 | fi 43 | 44 | # configure grafana to listen on port 80 45 | if ! grep -q '^http_port = 80$' /etc/grafana/grafana.ini; then 46 | perl -pi -e 's/^;?http_port = .*/http_port = 80/' /etc/grafana/grafana.ini 47 | fi 48 | 49 | # configure prometheus to scrape firewood 50 | if ! grep -q '^ - job_name: firewood$' /etc/prometheus/prometheus.yml; then 51 | cat >> /etc/prometheus/prometheus.yml <> /etc/default/prometheus-node-exporter < Result<(), Box> { 22 | let keys = args.global_opts.batch_size; 23 | let start = Instant::now(); 24 | 25 | for key in 0..args.global_opts.number_of_batches { 26 | let root = Span::root(func_path!(), SpanContext::random()); 27 | let _guard = root.set_local_parent(); 28 | 29 | let batch = Self::generate_inserts(key * keys, args.global_opts.batch_size).collect(); 30 | 31 | let proposal = db.propose(batch).await.expect("proposal should succeed"); 32 | proposal.commit().await?; 33 | } 34 | let duration = start.elapsed(); 35 | info!( 36 | "Generated and inserted {} batches of size {keys} in {}", 37 | args.global_opts.number_of_batches, 38 | pretty_duration(&duration, None) 39 | ); 40 | 41 | Ok(()) 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /benchmark/src/single.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use crate::TestRunner; 5 | use firewood::db::{BatchOp, Db}; 6 | use firewood::v2::api::{Db as _, Proposal as _}; 7 | use log::debug; 8 | use pretty_duration::pretty_duration; 9 | use sha2::{Digest, Sha256}; 10 | use std::error::Error; 11 | use std::time::Instant; 12 | 13 | #[derive(Clone)] 14 | pub struct Single; 15 | 16 | impl TestRunner for Single { 17 | async fn run(&self, db: &Db, args: &crate::Args) -> Result<(), Box> { 18 | let start = Instant::now(); 19 | let inner_keys: Vec<_> = (0..args.global_opts.batch_size) 20 | .map(|i| Sha256::digest(i.to_ne_bytes())) 21 | .collect(); 22 | let mut batch_id = 0; 23 | 24 | while start.elapsed().as_secs() / 60 < args.global_opts.duration_minutes { 25 | let batch = inner_keys 26 | .iter() 27 | .map(|key| BatchOp::Put { 28 | key, 29 | value: vec![batch_id as u8], 30 | }) 31 | .collect(); 32 | let proposal = db.propose(batch).await.expect("proposal should succeed"); 33 | proposal.commit().await?; 34 | 35 | if log::log_enabled!(log::Level::Debug) && batch_id % 1000 == 999 { 36 | debug!( 37 | "completed {} batches in {}", 38 | 1 + batch_id, 39 | pretty_duration(&start.elapsed(), None) 40 | ); 41 | } 42 | batch_id += 1; 43 | } 44 | Ok(()) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /benchmark/src/tenkrandom.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use std::error::Error; 5 | use std::time::Instant; 6 | 7 | use firewood::db::{BatchOp, Db}; 8 | use firewood::logger::debug; 9 | use firewood::v2::api::{Db as _, Proposal as _}; 10 | 11 | use crate::{Args, TestRunner}; 12 | use sha2::{Digest, Sha256}; 13 | 14 | #[derive(Clone, Default)] 15 | pub struct TenKRandom; 16 | 17 | impl TestRunner for TenKRandom { 18 | async fn run(&self, db: &Db, args: &Args) -> Result<(), Box> { 19 | let mut low = 0; 20 | let mut high = args.global_opts.number_of_batches * args.global_opts.batch_size; 21 | let twenty_five_pct = args.global_opts.batch_size / 4; 22 | 23 | let start = Instant::now(); 24 | 25 | while start.elapsed().as_secs() / 60 < args.global_opts.duration_minutes { 26 | let batch: Vec> = Self::generate_inserts(high, twenty_five_pct) 27 | .chain(generate_deletes(low, twenty_five_pct)) 28 | .chain(generate_updates(low + high / 2, twenty_five_pct * 2, low)) 29 | .collect(); 30 | let proposal = db.propose(batch).await.expect("proposal should succeed"); 31 | proposal.commit().await?; 32 | low += twenty_five_pct; 33 | high += twenty_five_pct; 34 | } 35 | Ok(()) 36 | } 37 | } 38 | fn generate_updates( 39 | start: u64, 40 | count: u64, 41 | low: u64, 42 | ) -> impl Iterator, Box<[u8]>>> { 43 | let hash_of_low: Box<[u8]> = Sha256::digest(low.to_ne_bytes())[..].into(); 44 | (start..start + count) 45 | .map(|inner_key| { 46 | let digest = Sha256::digest(inner_key.to_ne_bytes())[..].into(); 47 | debug!( 48 | "updating {:?} with digest {} to {}", 49 | inner_key, 50 | hex::encode(&digest), 51 | hex::encode(&hash_of_low) 52 | ); 53 | (digest, hash_of_low.clone()) 54 | }) 55 | .map(|(key, value)| BatchOp::Put { key, value }) 56 | .collect::>() 57 | .into_iter() 58 | } 59 | fn generate_deletes(start: u64, count: u64) -> impl Iterator, Box<[u8]>>> { 60 | (start..start + count) 61 | .map(|key| { 62 | let digest = Sha256::digest(key.to_ne_bytes())[..].into(); 63 | debug!("deleting {:?} with digest {}", key, hex::encode(&digest)); 64 | #[allow(clippy::let_and_return)] 65 | digest 66 | }) 67 | .map(|key| BatchOp::Delete { key }) 68 | .collect::>() 69 | .into_iter() 70 | } 71 | -------------------------------------------------------------------------------- /benchmark/src/zipf.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use crate::TestRunner; 5 | use firewood::db::{BatchOp, Db}; 6 | use firewood::v2::api::{Db as _, Proposal as _}; 7 | use log::{debug, trace}; 8 | use pretty_duration::pretty_duration; 9 | use rand::prelude::*; 10 | use sha2::{Digest, Sha256}; 11 | use std::collections::HashSet; 12 | use std::error::Error; 13 | use std::time::Instant; 14 | 15 | #[derive(clap::Args, Debug, PartialEq)] 16 | pub struct Args { 17 | #[arg(short, long, help = "zipf exponent", default_value_t = 1.2)] 18 | exponent: f64, 19 | } 20 | 21 | #[derive(Clone)] 22 | pub struct Zipf; 23 | 24 | impl TestRunner for Zipf { 25 | async fn run(&self, db: &Db, args: &crate::Args) -> Result<(), Box> { 26 | let exponent = if let crate::TestName::Zipf(args) = &args.test_name { 27 | args.exponent 28 | } else { 29 | unreachable!() 30 | }; 31 | let rows = (args.global_opts.number_of_batches * args.global_opts.batch_size) as f64; 32 | let zipf = rand_distr::Zipf::new(rows, exponent).unwrap(); 33 | let start = Instant::now(); 34 | let mut batch_id = 0; 35 | 36 | while start.elapsed().as_secs() / 60 < args.global_opts.duration_minutes { 37 | let batch: Vec> = 38 | generate_updates(batch_id, args.global_opts.batch_size as usize, zipf).collect(); 39 | if log::log_enabled!(log::Level::Debug) { 40 | let mut distinct = HashSet::new(); 41 | for op in &batch { 42 | match op { 43 | BatchOp::Put { key, value: _ } => { 44 | distinct.insert(key); 45 | } 46 | _ => unreachable!(), 47 | } 48 | } 49 | debug!( 50 | "inserting batch {} with {} distinct data values", 51 | batch_id, 52 | distinct.len() 53 | ); 54 | } 55 | let proposal = db.propose(batch).await.expect("proposal should succeed"); 56 | proposal.commit().await?; 57 | 58 | if log::log_enabled!(log::Level::Debug) { 59 | debug!( 60 | "completed batch {} in {}", 61 | batch_id, 62 | pretty_duration(&start.elapsed(), None) 63 | ); 64 | } 65 | batch_id += 1; 66 | } 67 | Ok(()) 68 | } 69 | } 70 | fn generate_updates( 71 | batch_id: u32, 72 | batch_size: usize, 73 | zipf: rand_distr::Zipf, 74 | ) -> impl Iterator, Vec>> { 75 | let hash_of_batch_id = Sha256::digest(batch_id.to_ne_bytes()).to_vec(); 76 | let rng = rand::rng(); 77 | zipf.sample_iter(rng) 78 | .take(batch_size) 79 | .map(|inner_key| { 80 | let digest = Sha256::digest((inner_key as u64).to_ne_bytes()).to_vec(); 81 | trace!( 82 | "updating {:?} with digest {} to {}", 83 | inner_key, 84 | hex::encode(&digest), 85 | hex::encode(&hash_of_batch_id) 86 | ); 87 | (digest, hash_of_batch_id.clone()) 88 | }) 89 | .map(|(key, value)| BatchOp::Put { key, value }) 90 | .collect::>() 91 | .into_iter() 92 | } 93 | -------------------------------------------------------------------------------- /cliff.toml: -------------------------------------------------------------------------------- 1 | # git-cliff ~ configuration file 2 | # https://git-cliff.org/docs/configuration 3 | 4 | 5 | [changelog] 6 | # A Tera template to be rendered as the changelog's footer. 7 | # See https://keats.github.io/tera/docs/#introduction 8 | header = """ 9 | # Changelog\n 10 | All notable changes to this project will be documented in this file.\n 11 | """ 12 | # A Tera template to be rendered for each release in the changelog. 13 | # See https://keats.github.io/tera/docs/#introduction 14 | body = """ 15 | {% if version %}\ 16 | ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }} 17 | {% else %}\ 18 | ## [unreleased] 19 | {% endif %}\ 20 | {% for group, commits in commits | group_by(attribute="group") %} 21 | ### {{ group | striptags | trim | upper_first }} 22 | {% for commit in commits %} 23 | - {% if commit.scope %}*({{ commit.scope }})* {% endif %}\ 24 | {% if commit.breaking %}[**breaking**] {% endif %}\ 25 | {{ commit.message | upper_first }}\ 26 | {% endfor %} 27 | {% endfor %}\n 28 | """ 29 | # A Tera template to be rendered as the changelog's footer. 30 | # See https://keats.github.io/tera/docs/#introduction 31 | footer = """ 32 | 33 | """ 34 | # Remove leading and trailing whitespaces from the changelog's body. 35 | trim = true 36 | # Render body even when there are no releases to process. 37 | render_always = true 38 | # An array of regex based postprocessors to modify the changelog. 39 | postprocessors = [ 40 | # Replace the placeholder with a URL. 41 | #{ pattern = '', replace = "https://github.com/orhun/git-cliff" }, 42 | ] 43 | # render body even when there are no releases to process 44 | # render_always = true 45 | # output file path 46 | # output = "test.md" 47 | 48 | [git] 49 | # Parse commits according to the conventional commits specification. 50 | # See https://www.conventionalcommits.org 51 | conventional_commits = true 52 | # Exclude commits that do not match the conventional commits specification. 53 | filter_unconventional = true 54 | # Require all commits to be conventional. 55 | # Takes precedence over filter_unconventional. 56 | require_conventional = false 57 | # Split commits on newlines, treating each line as an individual commit. 58 | split_commits = false 59 | # An array of regex based parsers to modify commit messages prior to further processing. 60 | commit_preprocessors = [ 61 | # Replace issue numbers with link templates to be updated in `changelog.postprocessors`. 62 | #{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](/issues/${2}))"}, 63 | # Check spelling of the commit message using https://github.com/crate-ci/typos. 64 | # If the spelling is incorrect, it will be fixed automatically. 65 | #{ pattern = '.*', replace_command = 'typos --write-changes -' }, 66 | ] 67 | # Prevent commits that are breaking from being excluded by commit parsers. 68 | protect_breaking_commits = false 69 | # An array of regex based parsers for extracting data from the commit message. 70 | # Assigns commits to groups. 71 | # Optionally sets the commit's scope and can decide to exclude commits from further processing. 72 | commit_parsers = [ 73 | { message = "^feat", group = "🚀 Features" }, 74 | { message = "^fix", group = "🐛 Bug Fixes" }, 75 | { message = "^doc", group = "📚 Documentation" }, 76 | { message = "^perf", group = "⚡ Performance" }, 77 | { message = "^refactor", group = "🚜 Refactor" }, 78 | { message = "^style", group = "🎨 Styling" }, 79 | { message = "^test", group = "🧪 Testing" }, 80 | { message = "^chore\\(release\\): prepare for", skip = true }, 81 | { message = "^chore\\(deps.*\\)", skip = true }, 82 | { message = "^chore\\(pr\\)", skip = true }, 83 | { message = "^chore\\(pull\\)", skip = true }, 84 | { message = "^chore|^ci", group = "⚙️ Miscellaneous Tasks" }, 85 | { body = ".*security", group = "🛡️ Security" }, 86 | { message = "^revert", group = "◀️ Revert" }, 87 | { message = ".*", group = "💼 Other" }, 88 | ] 89 | # Exclude commits that are not matched by any commit parser. 90 | filter_commits = false 91 | # An array of link parsers for extracting external references, and turning them into URLs, using regex. 92 | link_parsers = [] 93 | # Include only the tags that belong to the current branch. 94 | use_branch_tags = true 95 | # Order releases topologically instead of chronologically. 96 | topo_order = false 97 | # Order releases topologically instead of chronologically. 98 | topo_order_commits = true 99 | # Order of commits in each group/release within the changelog. 100 | # Allowed values: newest, oldest 101 | sort_commits = "oldest" 102 | # Process submodules commits 103 | recurse_submodules = false 104 | # Only process tags in this pattern 105 | tag_pattern = "v[0-9].*" 106 | 107 | -------------------------------------------------------------------------------- /ffi/.gitignore: -------------------------------------------------------------------------------- 1 | dbtest 2 | _obj 3 | -------------------------------------------------------------------------------- /ffi/.golangci.yaml: -------------------------------------------------------------------------------- 1 | # https://golangci-lint.run/usage/configuration/ 2 | run: 3 | timeout: 10m 4 | 5 | # If set we pass it to "go list -mod={option}". From "go help modules": 6 | # If invoked with -mod=readonly, the go command is disallowed from the implicit 7 | # automatic updating of go.mod described above. Instead, it fails when any changes 8 | # to go.mod are needed. This setting is most useful to check that go.mod does 9 | # not need updates, such as in a continuous integration and testing system. 10 | # If invoked with -mod=vendor, the go command assumes that the vendor 11 | # directory holds the correct copies of dependencies and ignores 12 | # the dependency descriptions in go.mod. 13 | # 14 | # Allowed values: readonly|vendor|mod 15 | # By default, it isn't set. 16 | modules-download-mode: readonly 17 | 18 | issues: 19 | # Make issues output unique by line. 20 | # Default: true 21 | uniq-by-line: false 22 | 23 | # Maximum issues count per one linter. 24 | # Set to 0 to disable. 25 | # Default: 50 26 | max-issues-per-linter: 0 27 | 28 | # Maximum count of issues with the same text. 29 | # Set to 0 to disable. 30 | # Default: 3 31 | max-same-issues: 0 32 | 33 | # Enables skipping of directories: 34 | # - vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ 35 | # Default: true 36 | exclude-dirs-use-default: false 37 | 38 | linters: 39 | disable-all: true 40 | enable: 41 | - asciicheck 42 | - bodyclose 43 | - copyloopvar 44 | - depguard 45 | - dupword 46 | - dupl 47 | - errcheck 48 | - errname 49 | - errorlint 50 | - forbidigo 51 | - gci 52 | - goconst 53 | - gocritic 54 | # - err113 - encourages wrapping static errors 55 | - gofmt 56 | - gofumpt 57 | # - mnd - unnecessary magic numbers 58 | - goprintffuncname 59 | - gosec 60 | - gosimple 61 | - govet 62 | - importas 63 | - ineffassign 64 | # - lll line length linter 65 | - misspell 66 | - nakedret 67 | - nilerr 68 | - noctx 69 | - nolintlint 70 | - perfsprint 71 | - prealloc 72 | - predeclared 73 | - revive 74 | - spancheck 75 | - staticcheck 76 | - stylecheck 77 | - tagalign 78 | - testifylint 79 | - typecheck 80 | - unconvert 81 | - unparam 82 | - unused 83 | - usestdlibvars 84 | - whitespace 85 | 86 | linters-settings: 87 | depguard: 88 | rules: 89 | packages: 90 | deny: 91 | - pkg: "github.com/golang/mock/gomock" 92 | desc: go.uber.org/mock/gomock should be used instead. 93 | - pkg: "github.com/stretchr/testify/assert" 94 | desc: github.com/stretchr/testify/require should be used instead. 95 | - pkg: "io/ioutil" 96 | desc: io/ioutil is deprecated. Use package io or os instead. 97 | errorlint: 98 | # Check for plain type assertions and type switches. 99 | asserts: false 100 | # Check for plain error comparisons. 101 | comparison: false 102 | forbidigo: 103 | # Forbid the following identifiers (list of regexp). 104 | forbid: 105 | - 'require\.Error$(# ErrorIs should be used instead)?' 106 | - 'require\.ErrorContains$(# ErrorIs should be used instead)?' 107 | - 'require\.EqualValues$(# Equal should be used instead)?' 108 | - 'require\.NotEqualValues$(# NotEqual should be used instead)?' 109 | - '^(t|b|tb|f)\.(Fatal|Fatalf|Error|Errorf)$(# the require library should be used instead)?' 110 | revive: 111 | rules: 112 | # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr 113 | - name: bool-literal-in-expr 114 | disabled: false 115 | # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return 116 | - name: early-return 117 | disabled: false 118 | # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines 119 | - name: empty-lines 120 | disabled: false 121 | # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format 122 | - name: string-format 123 | disabled: false 124 | arguments: 125 | - ["b.Logf[0]", "/.*%.*/", "no format directive, use b.Log instead"] 126 | - ["fmt.Errorf[0]", "/.*%.*/", "no format directive, use errors.New instead"] 127 | - ["fmt.Fprintf[1]", "/.*%.*/", "no format directive, use fmt.Fprint instead"] 128 | - ["fmt.Printf[0]", "/.*%.*/", "no format directive, use fmt.Print instead"] 129 | - ["fmt.Sprintf[0]", "/.*%.*/", "no format directive, use fmt.Sprint instead"] 130 | - ["log.Fatalf[0]", "/.*%.*/", "no format directive, use log.Fatal instead"] 131 | - ["log.Printf[0]", "/.*%.*/", "no format directive, use log.Print instead"] 132 | - ["t.Logf[0]", "/.*%.*/", "no format directive, use t.Log instead"] 133 | # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag 134 | - name: struct-tag 135 | disabled: false 136 | # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-naming 137 | - name: unexported-naming 138 | disabled: false 139 | # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error 140 | - name: unhandled-error 141 | # prefer the errcheck linter since it can be disabled directly with nolint directive 142 | # but revive's disable directive (e.g. //revive:disable:unhandled-error) is not 143 | # supported when run under golangci_lint 144 | disabled: true 145 | # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter 146 | - name: unused-parameter 147 | disabled: false 148 | # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-receiver 149 | - name: unused-receiver 150 | disabled: false 151 | # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break 152 | - name: useless-break 153 | disabled: false 154 | tagalign: 155 | align: true 156 | sort: true 157 | strict: true 158 | order: 159 | - serialize 160 | testifylint: 161 | # Enable all checkers (https://github.com/Antonboom/testifylint#checkers). 162 | # Default: false 163 | enable-all: true 164 | # Disable checkers by name 165 | # (in addition to default 166 | # suite-thelper 167 | # ). 168 | disable: 169 | - go-require 170 | - float-compare 171 | -------------------------------------------------------------------------------- /ffi/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "firewood-ffi" 3 | version = "0.0.5" 4 | edition = "2024" 5 | rust-version = "1.85.0" 6 | 7 | [lib] 8 | crate-type = ["staticlib"] 9 | 10 | [dependencies] 11 | libc = "0.2.2" 12 | firewood = { path = "../firewood" } 13 | metrics = "0.24.1" 14 | metrics-util = "0.19.0" 15 | chrono = "0.4.39" 16 | oxhttp = "0.3.0" 17 | coarsetime = "0.1.35" 18 | env_logger = {version = "0.11.7", optional = true} 19 | 20 | [target.'cfg(unix)'.dependencies] 21 | tikv-jemallocator = "0.6.0" 22 | 23 | [features] 24 | logger = ["dep:env_logger", "firewood/logger"] 25 | ethhash = ["firewood/ethhash"] 26 | 27 | [build-dependencies] 28 | cbindgen = "0.29.0" 29 | 30 | [lints.clippy] 31 | unwrap_used = "warn" 32 | indexing_slicing = "warn" 33 | explicit_deref_methods = "warn" 34 | missing_const_for_fn = "warn" 35 | pedantic = "warn" 36 | -------------------------------------------------------------------------------- /ffi/README.md: -------------------------------------------------------------------------------- 1 | # Firewood Golang FFI 2 | 3 | The FFI package provides a golang FFI layer for Firewood. 4 | 5 | ## Building Firewood Golang FFI 6 | 7 | The Golang FFI layer uses a CGO directive to locate a C-API compatible binary built from Firewood. Firewood supports both seamless local development and a single-step compilation process for Go projects that depend or transitively depend on Firewood. 8 | 9 | To do this, [firewood.go](./firewood.go) includes CGO directives to include multiple search paths for the Firewood binary in the local `target/` build directory and `ffi/libs`. For the latter, [attach-static-libs](../.github/workflows/attach-static-libs.yaml) GitHub Action pushes an FFI package with static libraries attached for the following supported architectures: 10 | 11 | - x86_64-unknown-linux-gnu 12 | - aarch64-unknown-linux-gnu 13 | - aarch64-apple-darwin 14 | - x86_64-apple-darwin 15 | 16 | to a separate repo [firewood-go](https://github.com/ava-labs/firewood-go) (to avoid including binaries in the Firewood repo). 17 | 18 | ### Local Development 19 | 20 | [firewood.go](./firewood.go) includes CGO directives to include builds in the `target/` directory. 21 | 22 | Firewood prioritizes builds in the following order: 23 | 24 | 1. maxperf 25 | 2. release 26 | 3. debug 27 | 28 | To use and test the Firewood FFI locally, you can run: 29 | 30 | ```bash 31 | cargo build --profile maxperf 32 | cd ffi 33 | go test 34 | ``` 35 | 36 | To use a local build of Firewood for a project that depends on Firewood, you must redirect the `go.mod` to use the local version of Firewood FFI, for example: 37 | 38 | ```bash 39 | go mod edit -replace github.com/ava-labs/firewood-go/ffi=/path/to/firewood/ffi 40 | go mod tidy 41 | ``` 42 | 43 | ### Production Development Flow 44 | 45 | Firewood pushes the FFI source code and attached static libraries to [firewood-go](https://github.com/ava-labs/firewood-go) via [attach-static-libs](../.github/workflows/attach-static-libs.yaml). 46 | 47 | This enables consumers to utilize it directly without forcing them to compile Firewood locally. Go programs running on supported architectures can utilize `firewood-go/ffi` just like any other dependency. 48 | 49 | To trigger this build, [attach-static-libs](../.github/workflows/attach-static-libs.yaml) supports triggers for both manual GitHub Actions and tags, so you can create a mirror branch/tag on [firewood-go](https://github.com/ava-labs/firewood-go) by either trigger a manual GitHub Action and selecting your branch or pushing a tag to Firewood. 50 | 51 | ### Hash Mode 52 | 53 | Firewood implemented its own optimized merkle trie structure. To support Ethereum Merkle Trie hash compatibility, it also provides a feature flag `ethhash`. 54 | 55 | This is an optional feature (disabled by default). To enable it for a local build, compile with: 56 | 57 | ```sh 58 | cargo build -p firewood-ffi --features ethhash 59 | ``` 60 | 61 | To support development in [Coreth](https://github.com/ava-labs/coreth), Firewood pushes static libraries to [firewood-go](https://github.com/ava-labs/firewood-go) with `ethhash` enabled by default. 62 | 63 | ## Development 64 | 65 | Iterative building is unintuitive for the ffi and some common sources of confusion are listed below. 66 | 67 | ### CGO Regeneration 68 | 69 | As you edit any Rust code and save the file in VS Code, the `firewood.h` file is automatically updated with edited function and struct definitions. However, the Go linter will not recognize these changes until you manually regenerate the cgo wrappers. To do this, you can run `go tool cgo firewood.go`. Alternatively, in VS Code, right above the `import "C"` definition, you can click on the small letters saying "regenerate CGO definitions". This will allow the linter to use the altered definitions. 70 | 71 | Because the C header file is autogenerated from the Rust code, the naming matches exactly (due to the `no_mangle` macro). However, the C definitions imported in Go do not match exactly, and are prefixed with `struct_`. Function naming is the same as the header file. These names are generated by the `go tool cgo` command above. 72 | 73 | ### Testing 74 | 75 | Although the VS Code testing feature does work, there are some quirks in ensuring proper building. The Rust code must be compiled separated, and sometimes the `go test` command continues to use a cached result. Whenever testing after making changes to the Rust/C builds, the cache should be cleared if results don't seem correct. Do not compile with `--features ethhash`, as some tests will fail. 76 | 77 | To ensure there are no memory leaks, the easiest way is to use your preferred CLI tool (e.g. `valgrind` for Linux, `leaks` for macOS) and compile the tests into a binary. You must not compile a release binary to ensure all memory can be managed. An example flow is given below. 78 | 79 | ```sh 80 | cd ffi 81 | cargo build # use debug 82 | go test -a -c -o binary_file # ignore cache 83 | leaks --nostacks --atExit -- ./binary_file 84 | ``` 85 | -------------------------------------------------------------------------------- /ffi/build.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | 3 | extern crate cbindgen; 4 | 5 | fn main() { 6 | let crate_dir = env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR is not set"); 7 | 8 | let config = cbindgen::Config::from_file("cbindgen.toml").expect("cbindgen.toml is present"); 9 | 10 | cbindgen::Builder::new() 11 | .with_crate(crate_dir) 12 | // Add any additional configuration options here 13 | .with_config(config) 14 | .generate() 15 | .map_or_else( 16 | |error| match error { 17 | cbindgen::Error::ParseSyntaxError { .. } => {} 18 | e => panic!("{e:?}"), 19 | }, 20 | |bindings| { 21 | bindings.write_to_file("firewood.h"); 22 | }, 23 | ); 24 | } 25 | -------------------------------------------------------------------------------- /ffi/cbindgen.toml: -------------------------------------------------------------------------------- 1 | # This is a template cbindgen.toml file with all of the default values. 2 | # Some values are commented out because their absence is the real default. 3 | # 4 | # See https://github.com/mozilla/cbindgen/blob/master/docs.md#cbindgentoml 5 | # for detailed documentation of every option here. 6 | 7 | 8 | 9 | language = "C" 10 | 11 | 12 | 13 | ############## Options for Wrapping the Contents of the Header ################# 14 | 15 | # header = "/* Text to put at the beginning of the generated file. Probably a license. */" 16 | # trailer = "/* Text to put at the end of the generated file */" 17 | # include_guard = "my_bindings_h" 18 | # pragma_once = true 19 | # autogen_warning = "/* Warning, this file is autogenerated by cbindgen. Don't modify this manually. */" 20 | include_version = false 21 | # namespace = "my_namespace" 22 | namespaces = [] 23 | using_namespaces = [] 24 | sys_includes = [] 25 | includes = [] 26 | no_includes = false 27 | # cpp_compat = true 28 | after_includes = "" 29 | 30 | 31 | ############################ Code Style Options ################################ 32 | 33 | braces = "SameLine" 34 | line_length = 100 35 | tab_width = 2 36 | documentation = true 37 | documentation_style = "auto" 38 | documentation_length = "full" 39 | line_endings = "LF" # also "CR", "CRLF", "Native" 40 | 41 | 42 | ############################# Codegen Options ################################## 43 | 44 | style = "both" 45 | sort_by = "Name" # default for `fn.sort_by` and `const.sort_by` 46 | usize_is_size_t = true 47 | 48 | [defines] 49 | # "target_os = freebsd" = "DEFINE_FREEBSD" 50 | # "feature = serde" = "DEFINE_SERDE" 51 | 52 | [export] 53 | include = [] 54 | exclude = [] 55 | # prefix = "CAPI_" 56 | item_types = [] 57 | renaming_overrides_prefixing = false 58 | 59 | [export.rename] 60 | "Db" = "void" 61 | 62 | [export.body] 63 | 64 | [export.mangle] 65 | 66 | [fn] 67 | rename_args = "None" 68 | # must_use = "MUST_USE_FUNC" 69 | # deprecated = "DEPRECATED_FUNC" 70 | # deprecated_with_note = "DEPRECATED_FUNC_WITH_NOTE" 71 | # no_return = "NO_RETURN" 72 | # prefix = "START_FUNC" 73 | # postfix = "END_FUNC" 74 | args = "auto" 75 | sort_by = "Name" 76 | 77 | [struct] 78 | rename_fields = "None" 79 | # must_use = "MUST_USE_STRUCT" 80 | # deprecated = "DEPRECATED_STRUCT" 81 | # deprecated_with_note = "DEPRECATED_STRUCT_WITH_NOTE" 82 | derive_constructor = false 83 | derive_eq = false 84 | derive_neq = false 85 | derive_lt = false 86 | derive_lte = false 87 | derive_gt = false 88 | derive_gte = false 89 | 90 | [enum] 91 | rename_variants = "None" 92 | # must_use = "MUST_USE_ENUM" 93 | # deprecated = "DEPRECATED_ENUM" 94 | # deprecated_with_note = "DEPRECATED_ENUM_WITH_NOTE" 95 | add_sentinel = false 96 | prefix_with_name = false 97 | derive_helper_methods = false 98 | derive_const_casts = false 99 | derive_mut_casts = false 100 | # cast_assert_name = "ASSERT" 101 | derive_tagged_enum_destructor = false 102 | derive_tagged_enum_copy_constructor = false 103 | enum_class = true 104 | private_default_tagged_enum_constructor = false 105 | 106 | 107 | 108 | 109 | [const] 110 | allow_static_const = true 111 | allow_constexpr = false 112 | sort_by = "Name" 113 | 114 | 115 | 116 | 117 | [macro_expansion] 118 | bitflags = false 119 | 120 | 121 | 122 | 123 | 124 | 125 | ############## Options for How Your Rust library Should Be Parsed ############## 126 | 127 | [parse] 128 | parse_deps = false 129 | # include = [] 130 | exclude = [] 131 | clean = false 132 | extra_bindings = [] 133 | 134 | 135 | 136 | [parse.expand] 137 | crates = [] 138 | all_features = false 139 | default_features = true 140 | features = [] 141 | -------------------------------------------------------------------------------- /ffi/firewood.go: -------------------------------------------------------------------------------- 1 | // Package firewood provides a Go wrapper around the [Firewood] database. 2 | // 3 | // [Firewood]: https://github.com/ava-labs/firewood 4 | package firewood 5 | 6 | // // Note that -lm is required on Linux but not on Mac. 7 | // #cgo linux,amd64 LDFLAGS: -L${SRCDIR}/libs/x86_64-unknown-linux-gnu -lm 8 | // #cgo linux,arm64 LDFLAGS: -L${SRCDIR}/libs/aarch64-unknown-linux-gnu -lm 9 | // #cgo darwin,amd64 LDFLAGS: -L${SRCDIR}/libs/x86_64-apple-darwin 10 | // #cgo darwin,arm64 LDFLAGS: -L${SRCDIR}/libs/aarch64-apple-darwin 11 | // // XXX: last search path takes precedence, which means we prioritize 12 | // // local builds over pre-built and maxperf over release build 13 | // #cgo LDFLAGS: -L${SRCDIR}/../target/debug 14 | // #cgo LDFLAGS: -L${SRCDIR}/../target/release 15 | // #cgo LDFLAGS: -L${SRCDIR}/../target/maxperf 16 | // #cgo LDFLAGS: -L/usr/local/lib -lfirewood_ffi 17 | // #include 18 | // #include "firewood.h" 19 | import "C" 20 | 21 | import ( 22 | "errors" 23 | "fmt" 24 | "strings" 25 | "unsafe" 26 | ) 27 | 28 | // These constants are used to identify errors returned by the Firewood Rust FFI. 29 | // These must be changed if the Rust FFI changes - should be reported by tests. 30 | const ( 31 | RootLength = 32 32 | rootHashNotFound = "IO error: Root hash not found" 33 | keyNotFound = "key not found" 34 | ) 35 | 36 | var errDBClosed = errors.New("firewood database already closed") 37 | 38 | // A Database is a handle to a Firewood database. 39 | // It is not safe to call these methods with a nil handle. 40 | type Database struct { 41 | // handle is returned and accepted by cgo functions. It MUST be treated as 42 | // an opaque value without special meaning. 43 | // https://en.wikipedia.org/wiki/Blinkenlights 44 | handle *C.DatabaseHandle 45 | } 46 | 47 | // Config configures the opening of a [Database]. 48 | type Config struct { 49 | Create bool 50 | NodeCacheEntries uint 51 | Revisions uint 52 | ReadCacheStrategy CacheStrategy 53 | MetricsPort uint16 54 | } 55 | 56 | // DefaultConfig returns a sensible default Config. 57 | func DefaultConfig() *Config { 58 | return &Config{ 59 | NodeCacheEntries: 1_000_000, 60 | Revisions: 100, 61 | ReadCacheStrategy: OnlyCacheWrites, 62 | MetricsPort: 3000, 63 | } 64 | } 65 | 66 | // A CacheStrategy represents the caching strategy used by a [Database]. 67 | type CacheStrategy uint8 68 | 69 | const ( 70 | OnlyCacheWrites CacheStrategy = iota 71 | CacheBranchReads 72 | CacheAllReads 73 | 74 | // invalidCacheStrategy MUST be the final value in the iota block to make it 75 | // the smallest value greater than all valid values. 76 | invalidCacheStrategy 77 | ) 78 | 79 | // New opens or creates a new Firewood database with the given configuration. If 80 | // a nil `Config` is provided [DefaultConfig] will be used instead. 81 | func New(filePath string, conf *Config) (*Database, error) { 82 | if conf == nil { 83 | conf = DefaultConfig() 84 | } 85 | if conf.ReadCacheStrategy >= invalidCacheStrategy { 86 | return nil, fmt.Errorf("invalid %T (%[1]d)", conf.ReadCacheStrategy) 87 | } 88 | if conf.Revisions < 2 { 89 | return nil, fmt.Errorf("%T.Revisions must be >= 2", conf) 90 | } 91 | if conf.NodeCacheEntries < 1 { 92 | return nil, fmt.Errorf("%T.NodeCacheEntries must be >= 1", conf) 93 | } 94 | 95 | args := C.struct_CreateOrOpenArgs{ 96 | path: C.CString(filePath), 97 | cache_size: C.size_t(conf.NodeCacheEntries), 98 | revisions: C.size_t(conf.Revisions), 99 | strategy: C.uint8_t(conf.ReadCacheStrategy), 100 | metrics_port: C.uint16_t(conf.MetricsPort), 101 | } 102 | // Defer freeing the C string allocated to the heap on the other side 103 | // of the FFI boundary. 104 | defer C.free(unsafe.Pointer(args.path)) 105 | 106 | var db *C.DatabaseHandle 107 | if conf.Create { 108 | db = C.fwd_create_db(args) 109 | } else { 110 | db = C.fwd_open_db(args) 111 | } 112 | 113 | return &Database{handle: db}, nil 114 | } 115 | 116 | // Update applies a batch of updates to the database, returning the hash of the 117 | // root node after the batch is applied. 118 | // 119 | // WARNING: a consequence of prefix deletion is that calling Update with an empty 120 | // key and value will delete the entire database. 121 | func (db *Database) Update(keys, vals [][]byte) ([]byte, error) { 122 | if db.handle == nil { 123 | return nil, errDBClosed 124 | } 125 | 126 | ffiOps, cleanup := createOps(keys, vals) 127 | defer cleanup() 128 | 129 | hash := C.fwd_batch( 130 | db.handle, 131 | C.size_t(len(ffiOps)), 132 | unsafe.SliceData(ffiOps), // implicitly pinned 133 | ) 134 | return bytesFromValue(&hash) 135 | } 136 | 137 | func (db *Database) Propose(keys, vals [][]byte) (*Proposal, error) { 138 | if db.handle == nil { 139 | return nil, errDBClosed 140 | } 141 | 142 | ffiOps, cleanup := createOps(keys, vals) 143 | defer cleanup() 144 | 145 | val := C.fwd_propose_on_db( 146 | db.handle, 147 | C.size_t(len(ffiOps)), 148 | unsafe.SliceData(ffiOps), // implicitly pinned 149 | ) 150 | return newProposal(db.handle, &val) 151 | } 152 | 153 | // Get retrieves the value for the given key. It always returns a nil error. 154 | // If the key is not found, the return value will be (nil, nil). 155 | func (db *Database) Get(key []byte) ([]byte, error) { 156 | if db.handle == nil { 157 | return nil, errDBClosed 158 | } 159 | 160 | values, cleanup := newValueFactory() 161 | defer cleanup() 162 | val := C.fwd_get_latest(db.handle, values.from(key)) 163 | bytes, err := bytesFromValue(&val) 164 | 165 | // If the root hash is not found, return nil. 166 | if err != nil && strings.Contains(err.Error(), rootHashNotFound) { 167 | return nil, nil 168 | } 169 | 170 | return bytes, err 171 | } 172 | 173 | // Root returns the current root hash of the trie. 174 | // Empty trie must return common.Hash{}. 175 | func (db *Database) Root() ([]byte, error) { 176 | if db.handle == nil { 177 | return nil, errDBClosed 178 | } 179 | hash := C.fwd_root_hash(db.handle) 180 | bytes, err := bytesFromValue(&hash) 181 | 182 | // If the root hash is not found, return a zeroed slice. 183 | if err == nil && bytes == nil { 184 | bytes = make([]byte, RootLength) 185 | } 186 | return bytes, err 187 | } 188 | 189 | // Revision returns a historical revision of the database. 190 | func (db *Database) Revision(root []byte) (*Revision, error) { 191 | return newRevision(db.handle, root) 192 | } 193 | 194 | // Close closes the database and releases all held resources. 195 | // Returns an error if already closed. 196 | func (db *Database) Close() error { 197 | if db.handle == nil { 198 | return errDBClosed 199 | } 200 | C.fwd_close_db(db.handle) 201 | db.handle = nil 202 | return nil 203 | } 204 | -------------------------------------------------------------------------------- /ffi/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/ava-labs/firewood/ffi 2 | 3 | go 1.23 4 | 5 | toolchain go1.23.6 6 | 7 | require github.com/stretchr/testify v1.10.0 8 | 9 | require ( 10 | github.com/davecgh/go-spew v1.1.1 // indirect 11 | github.com/pmezard/go-difflib v1.0.0 // indirect 12 | gopkg.in/yaml.v3 v3.0.1 // indirect 13 | ) 14 | -------------------------------------------------------------------------------- /ffi/go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 2 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 3 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 4 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 5 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 6 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 7 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 8 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 9 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 10 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 11 | -------------------------------------------------------------------------------- /ffi/kvbackend.go: -------------------------------------------------------------------------------- 1 | package firewood 2 | 3 | // implement a specific interface for firewood 4 | // this is used for some of the firewood performance tests 5 | 6 | // Validate that Firewood implements the KVBackend interface 7 | var _ kVBackend = (*Database)(nil) 8 | 9 | type kVBackend interface { 10 | // Returns the current root hash of the trie. 11 | // Empty trie must return common.Hash{}. 12 | // Length of the returned slice must be common.HashLength. 13 | Root() ([]byte, error) 14 | 15 | // Get retrieves the value for the given key. 16 | // If the key does not exist, it must return (nil, nil). 17 | Get(key []byte) ([]byte, error) 18 | 19 | // Prefetch loads the intermediary nodes of the given key into memory. 20 | // The first return value is ignored. 21 | Prefetch(key []byte) ([]byte, error) 22 | 23 | // After this call, Root() should return the same hash as returned by this call. 24 | // Note when length of a particular value is zero, it means the corresponding 25 | // key should be deleted. 26 | // There may be duplicate keys in the batch provided, and the last one should 27 | // take effect. 28 | // Note after this call, the next call to Update must build on the returned root, 29 | // regardless of whether Commit is called. 30 | // Length of the returned root must be common.HashLength. 31 | Update(keys, vals [][]byte) ([]byte, error) 32 | 33 | // After this call, changes related to [root] should be persisted to disk. 34 | // This may be implemented as no-op if Update already persists changes, or 35 | // commits happen on a rolling basis. 36 | // Length of the root slice is guaranteed to be common.HashLength. 37 | Commit(root []byte) error 38 | 39 | // Close closes the backend and releases all held resources. 40 | Close() error 41 | } 42 | 43 | // Prefetch is a no-op since we don't need to prefetch for Firewood. 44 | func (db *Database) Prefetch(_ []byte) ([]byte, error) { 45 | if db.handle == nil { 46 | return nil, errDBClosed 47 | } 48 | 49 | return nil, nil 50 | } 51 | 52 | // Commit is a no-op, since [Database.Update] already persists changes. 53 | func (db *Database) Commit(_ []byte) error { 54 | if db.handle == nil { 55 | return errDBClosed 56 | } 57 | 58 | return nil 59 | } 60 | -------------------------------------------------------------------------------- /ffi/memory.go: -------------------------------------------------------------------------------- 1 | // Package firewood provides a Go wrapper around the [Firewood] database. 2 | // 3 | // [Firewood]: https://github.com/ava-labs/firewood 4 | package firewood 5 | 6 | // // Note that -lm is required on Linux but not on Mac. 7 | // #cgo LDFLAGS: -L${SRCDIR}/../target/release -L/usr/local/lib -lfirewood_ffi -lm 8 | // #include 9 | // #include "firewood.h" 10 | import "C" 11 | 12 | import ( 13 | "errors" 14 | "fmt" 15 | "runtime" 16 | "unsafe" 17 | ) 18 | 19 | var ( 20 | errNilBuffer = errors.New("firewood error: nil value returned from cgo") 21 | errBadValue = errors.New("firewood error: value from cgo formatted incorrectly") 22 | ) 23 | 24 | // KeyValue is a key-value pair. 25 | type KeyValue struct { 26 | Key []byte 27 | Value []byte 28 | } 29 | 30 | // hashAndIDFromValue converts the cgo `Value` payload into: 31 | // 32 | // case | data | len | meaning 33 | // 34 | // 1. | nil | 0 | invalid 35 | // 2. | nil | non-0 | proposal deleted everything 36 | // 3. | non-nil | 0 | error string 37 | // 4. | non-nil | non-0 | hash and id 38 | // 39 | // The value should never be nil. 40 | func hashAndIDFromValue(v *C.struct_Value) ([]byte, uint32, error) { 41 | // Pin the returned value to prevent it from being garbage collected. 42 | defer runtime.KeepAlive(v) 43 | 44 | if v == nil { 45 | return nil, 0, errNilBuffer 46 | } 47 | 48 | if v.data == nil { 49 | // Case 2 50 | if v.len != 0 { 51 | return nil, uint32(v.len), nil 52 | } 53 | 54 | // Case 1 55 | return nil, 0, errBadValue 56 | } 57 | 58 | // Case 3 59 | if v.len == 0 { 60 | errStr := C.GoString((*C.char)(unsafe.Pointer(v.data))) 61 | C.fwd_free_value(v) 62 | return nil, 0, fmt.Errorf("firewood error: %s", errStr) 63 | } 64 | 65 | // Case 4 66 | id := uint32(v.len) 67 | buf := C.GoBytes(unsafe.Pointer(v.data), RootLength) 68 | v.len = C.size_t(RootLength) // set the length to free 69 | C.fwd_free_value(v) 70 | return buf, id, nil 71 | } 72 | 73 | // errorFromValue converts the cgo `Value` payload into: 74 | // 75 | // case | data | len | meaning 76 | // 77 | // 1. | nil | 0 | empty 78 | // 2. | nil | non-0 | invalid 79 | // 3. | non-nil | 0 | error string 80 | // 4. | non-nil | non-0 | invalid 81 | // 82 | // The value should never be nil. 83 | func errorFromValue(v *C.struct_Value) error { 84 | // Pin the returned value to prevent it from being garbage collected. 85 | defer runtime.KeepAlive(v) 86 | 87 | if v == nil { 88 | return errNilBuffer 89 | } 90 | 91 | // Case 1 92 | if v.data == nil && v.len == 0 { 93 | return nil 94 | } 95 | 96 | // Case 3 97 | if v.len == 0 { 98 | errStr := C.GoString((*C.char)(unsafe.Pointer(v.data))) 99 | C.fwd_free_value(v) 100 | return fmt.Errorf("firewood error: %s", errStr) 101 | } 102 | 103 | // Case 2 and 4 104 | C.fwd_free_value(v) 105 | return errBadValue 106 | } 107 | 108 | // bytesFromValue converts the cgo `Value` payload to: 109 | // 110 | // case | data | len | meaning 111 | // 112 | // 1. | nil | 0 | empty 113 | // 2. | nil | non-0 | invalid 114 | // 3. | non-nil | 0 | error string 115 | // 4. | non-nil | non-0 | bytes (most common) 116 | // 117 | // The value should never be nil. 118 | func bytesFromValue(v *C.struct_Value) ([]byte, error) { 119 | // Pin the returned value to prevent it from being garbage collected. 120 | defer runtime.KeepAlive(v) 121 | 122 | if v == nil { 123 | return nil, errNilBuffer 124 | } 125 | 126 | // Case 4 127 | if v.len != 0 && v.data != nil { 128 | buf := C.GoBytes(unsafe.Pointer(v.data), C.int(v.len)) 129 | C.fwd_free_value(v) 130 | return buf, nil 131 | } 132 | 133 | // Case 1 134 | if v.len == 0 && v.data == nil { 135 | return nil, nil 136 | } 137 | 138 | // Case 3 139 | if v.len == 0 { 140 | errStr := C.GoString((*C.char)(unsafe.Pointer(v.data))) 141 | C.fwd_free_value(v) 142 | return nil, fmt.Errorf("firewood error: %s", errStr) 143 | } 144 | 145 | // Case 2 146 | return nil, errBadValue 147 | } 148 | 149 | // newValueFactory returns a factory for converting byte slices into cgo `Value` 150 | // structs that can be passed as arguments to cgo functions. The returned 151 | // cleanup function MUST be called when the constructed values are no longer 152 | // required, after which they can no longer be used as cgo arguments. 153 | func newValueFactory() (*valueFactory, func()) { 154 | f := new(valueFactory) 155 | return f, func() { f.pin.Unpin() } 156 | } 157 | 158 | type valueFactory struct { 159 | pin runtime.Pinner 160 | } 161 | 162 | func (f *valueFactory) from(data []byte) C.struct_Value { 163 | if len(data) == 0 { 164 | return C.struct_Value{0, nil} 165 | } 166 | ptr := (*C.uchar)(unsafe.SliceData(data)) 167 | f.pin.Pin(ptr) 168 | return C.struct_Value{C.size_t(len(data)), ptr} 169 | } 170 | 171 | // createOps creates a slice of cgo `KeyValue` structs from the given keys and 172 | // values and pins the memory of the underlying byte slices to prevent 173 | // garbage collection while the cgo function is using them. The returned cleanup 174 | // function MUST be called when the constructed values are no longer required, 175 | // after which they can no longer be used as cgo arguments. 176 | func createOps(keys, vals [][]byte) ([]C.struct_KeyValue, func()) { 177 | values, cleanup := newValueFactory() 178 | 179 | ffiOps := make([]C.struct_KeyValue, len(keys)) 180 | for i := range keys { 181 | ffiOps[i] = C.struct_KeyValue{ 182 | key: values.from(keys[i]), 183 | value: values.from(vals[i]), 184 | } 185 | } 186 | 187 | return ffiOps, cleanup 188 | } 189 | -------------------------------------------------------------------------------- /ffi/proposal.go: -------------------------------------------------------------------------------- 1 | // Package firewood provides a Go wrapper around the [Firewood] database. 2 | // 3 | // [Firewood]: https://github.com/ava-labs/firewood 4 | package firewood 5 | 6 | // // Note that -lm is required on Linux but not on Mac. 7 | // #cgo LDFLAGS: -L${SRCDIR}/../target/release -L/usr/local/lib -lfirewood_ffi -lm 8 | // #include 9 | // #include "firewood.h" 10 | import "C" 11 | 12 | import ( 13 | "errors" 14 | "unsafe" 15 | ) 16 | 17 | var errDroppedProposal = errors.New("proposal already dropped") 18 | 19 | type Proposal struct { 20 | // handle is returned and accepted by cgo functions. It MUST be treated as 21 | // an opaque value without special meaning. 22 | // https://en.wikipedia.org/wiki/Blinkenlights 23 | handle *C.DatabaseHandle 24 | 25 | // The proposal ID. 26 | // id = 0 is reserved for a dropped proposal. 27 | id uint32 28 | 29 | // The proposal root hash. 30 | root []byte 31 | } 32 | 33 | // newProposal creates a new Proposal from the given DatabaseHandle and Value. 34 | // The Value must be returned from a Firewood FFI function. 35 | // An error can only occur from parsing the Value. 36 | func newProposal(handle *C.DatabaseHandle, val *C.struct_Value) (*Proposal, error) { 37 | bytes, id, err := hashAndIDFromValue(val) 38 | if err != nil { 39 | return nil, err 40 | } 41 | 42 | // If the proposal root is nil, it means the proposal is empty. 43 | if bytes == nil { 44 | bytes = make([]byte, RootLength) 45 | } 46 | 47 | return &Proposal{ 48 | handle: handle, 49 | id: id, 50 | root: bytes, 51 | }, nil 52 | } 53 | 54 | // Root retrieves the root hash of the proposal. 55 | // If the proposal is empty (i.e. no keys in database), 56 | // it returns nil, nil. 57 | func (p *Proposal) Root() ([]byte, error) { 58 | if p.handle == nil { 59 | return nil, errDBClosed 60 | } 61 | 62 | if p.id == 0 { 63 | return nil, errDroppedProposal 64 | } 65 | 66 | // If the hash is empty, return the empty root hash. 67 | if p.root == nil { 68 | return make([]byte, RootLength), nil 69 | } 70 | 71 | // Get the root hash of the proposal. 72 | return p.root, nil 73 | } 74 | 75 | // Get retrieves the value for the given key. 76 | // If the key does not exist, it returns (nil, nil). 77 | func (p *Proposal) Get(key []byte) ([]byte, error) { 78 | if p.handle == nil { 79 | return nil, errDBClosed 80 | } 81 | 82 | if p.id == 0 { 83 | return nil, errDroppedProposal 84 | } 85 | values, cleanup := newValueFactory() 86 | defer cleanup() 87 | 88 | // Get the value for the given key. 89 | val := C.fwd_get_from_proposal(p.handle, C.uint32_t(p.id), values.from(key)) 90 | return bytesFromValue(&val) 91 | } 92 | 93 | // Propose creates a new proposal with the given keys and values. 94 | // The proposal is not committed until Commit is called. 95 | func (p *Proposal) Propose(keys, vals [][]byte) (*Proposal, error) { 96 | if p.handle == nil { 97 | return nil, errDBClosed 98 | } 99 | 100 | if p.id == 0 { 101 | return nil, errDroppedProposal 102 | } 103 | 104 | ffiOps, cleanup := createOps(keys, vals) 105 | defer cleanup() 106 | 107 | // Propose the keys and values. 108 | val := C.fwd_propose_on_proposal(p.handle, C.uint32_t(p.id), 109 | C.size_t(len(ffiOps)), 110 | unsafe.SliceData(ffiOps), 111 | ) 112 | 113 | return newProposal(p.handle, &val) 114 | } 115 | 116 | // Commit commits the proposal and returns any errors. 117 | // If an error occurs, the proposal is dropped and no longer valid. 118 | func (p *Proposal) Commit() error { 119 | if p.handle == nil { 120 | return errDBClosed 121 | } 122 | 123 | if p.id == 0 { 124 | return errDroppedProposal 125 | } 126 | 127 | // Commit the proposal and return the hash. 128 | errVal := C.fwd_commit(p.handle, C.uint32_t(p.id)) 129 | err := errorFromValue(&errVal) 130 | if err != nil { 131 | // this is unrecoverable due to Rust's ownership model 132 | // The underlying proposal is no longer valid. 133 | p.id = 0 134 | } 135 | return err 136 | } 137 | 138 | // Drop removes the proposal from memory in Firewood. 139 | // In the case of an error, the proposal can assumed to be dropped. 140 | // An error is returned if the proposal was already dropped. 141 | func (p *Proposal) Drop() error { 142 | if p.handle == nil { 143 | return errDBClosed 144 | } 145 | 146 | if p.id == 0 { 147 | return errDroppedProposal 148 | } 149 | 150 | // Drop the proposal. 151 | val := C.fwd_drop_proposal(p.handle, C.uint32_t(p.id)) 152 | p.id = 0 153 | return errorFromValue(&val) 154 | } 155 | -------------------------------------------------------------------------------- /ffi/revision.go: -------------------------------------------------------------------------------- 1 | // Package firewood provides a Go wrapper around the [Firewood] database. 2 | // 3 | // [Firewood]: https://github.com/ava-labs/firewood 4 | package firewood 5 | 6 | // // Note that -lm is required on Linux but not on Mac. 7 | // #cgo LDFLAGS: -L${SRCDIR}/../target/release -L/usr/local/lib -lfirewood_ffi -lm 8 | // #include 9 | // #include "firewood.h" 10 | import "C" 11 | 12 | import ( 13 | "errors" 14 | "fmt" 15 | ) 16 | 17 | var ( 18 | errRevisionNotFound = errors.New("firewood error: revision not found") 19 | errInvalidRootLength = fmt.Errorf("firewood error: root hash must be %d bytes", RootLength) 20 | ) 21 | 22 | type Revision struct { 23 | // handle is returned and accepted by cgo functions. It MUST be treated as 24 | // an opaque value without special meaning. 25 | // https://en.wikipedia.org/wiki/Blinkenlights 26 | handle *C.DatabaseHandle 27 | // The revision root 28 | root []byte 29 | } 30 | 31 | func newRevision(handle *C.DatabaseHandle, root []byte) (*Revision, error) { 32 | if handle == nil { 33 | return nil, errors.New("firewood error: nil handle or root") 34 | } 35 | 36 | // Check that the root is the correct length. 37 | if root == nil || len(root) != RootLength { 38 | return nil, errInvalidRootLength 39 | } 40 | 41 | // Attempt to get any value from the root. 42 | // This will verify that the root is valid and accessible. 43 | // If the root is not valid, this will return an error. 44 | values, cleanup := newValueFactory() 45 | defer cleanup() 46 | val := C.fwd_get_from_root(handle, values.from(root), values.from([]byte{})) 47 | _, err := bytesFromValue(&val) 48 | if err != nil { 49 | // Any error from this function indicates that the root is inaccessible. 50 | return nil, errRevisionNotFound 51 | } 52 | 53 | // All other verification of the root is done during use. 54 | return &Revision{ 55 | handle: handle, 56 | root: root, 57 | }, nil 58 | } 59 | 60 | func (r *Revision) Get(key []byte) ([]byte, error) { 61 | if r.handle == nil { 62 | return nil, errDBClosed 63 | } 64 | if r.root == nil { 65 | return nil, errRevisionNotFound 66 | } 67 | 68 | values, cleanup := newValueFactory() 69 | defer cleanup() 70 | 71 | val := C.fwd_get_from_root(r.handle, values.from(r.root), values.from(key)) 72 | value, err := bytesFromValue(&val) 73 | if err != nil { 74 | // Any error from this function indicates that the revision is inaccessible. 75 | r.root = nil 76 | } 77 | return value, err 78 | } 79 | -------------------------------------------------------------------------------- /ffi/src/metrics_setup.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashSet; 2 | use std::io::Write; 3 | use std::net::Ipv6Addr; 4 | use std::ops::Deref; 5 | use std::sync::atomic::Ordering; 6 | use std::sync::{Arc, Once}; 7 | use std::time::SystemTime; 8 | 9 | use oxhttp::Server; 10 | use oxhttp::model::{Body, Response, StatusCode}; 11 | use std::net::Ipv4Addr; 12 | use std::time::Duration; 13 | 14 | use chrono::{DateTime, Utc}; 15 | 16 | use metrics::Key; 17 | use metrics_util::registry::{AtomicStorage, Registry}; 18 | 19 | static INIT: Once = Once::new(); 20 | 21 | pub(crate) fn setup_metrics(metrics_port: u16) { 22 | INIT.call_once(|| { 23 | let inner: TextRecorderInner = TextRecorderInner { 24 | registry: Registry::atomic(), 25 | }; 26 | let recorder = TextRecorder { 27 | inner: Arc::new(inner), 28 | }; 29 | metrics::set_global_recorder(recorder.clone()).expect("failed to set recorder"); 30 | 31 | Server::new(move |request| { 32 | if request.method() == "GET" { 33 | Response::builder() 34 | .status(StatusCode::OK) 35 | .header("Content-Type", "text/plain") 36 | .body(Body::from(recorder.stats())) 37 | .expect("failed to build response") 38 | } else { 39 | Response::builder() 40 | .status(StatusCode::METHOD_NOT_ALLOWED) 41 | .body(Body::from("Method not allowed")) 42 | .expect("failed to build response") 43 | } 44 | }) 45 | .bind((Ipv4Addr::LOCALHOST, metrics_port)) 46 | .bind((Ipv6Addr::LOCALHOST, metrics_port)) 47 | .with_global_timeout(Duration::from_secs(60 * 60)) 48 | .with_max_concurrent_connections(2) 49 | .spawn() 50 | .expect("failed to spawn server"); 51 | }); 52 | } 53 | 54 | #[derive(Debug)] 55 | struct TextRecorderInner { 56 | registry: Registry, 57 | } 58 | 59 | #[derive(Debug, Clone)] 60 | struct TextRecorder { 61 | inner: Arc, 62 | } 63 | 64 | impl TextRecorder { 65 | fn stats(&self) -> String { 66 | let mut output = Vec::new(); 67 | let systemtime_now = SystemTime::now(); 68 | let utc_now: DateTime = systemtime_now.into(); 69 | let epoch_duration = systemtime_now 70 | .duration_since(SystemTime::UNIX_EPOCH) 71 | .expect("system time is before Unix epoch"); 72 | let epoch_ms = epoch_duration.as_secs() * 1000 + u64::from(epoch_duration.subsec_millis()); 73 | writeln!(output, "# {utc_now}").unwrap(); 74 | 75 | let counters = self.registry.get_counter_handles(); 76 | let mut seen = HashSet::new(); 77 | for (key, counter) in counters { 78 | let sanitized_key_name = key.name().to_string().replace('.', "_"); 79 | if !seen.contains(&sanitized_key_name) { 80 | writeln!( 81 | output, 82 | "# TYPE {} counter", 83 | key.name().to_string().replace('.', "_") 84 | ) 85 | .expect("write error"); 86 | seen.insert(sanitized_key_name.clone()); 87 | } 88 | write!(output, "{sanitized_key_name}").expect("write error"); 89 | if key.labels().len() > 0 { 90 | write!( 91 | output, 92 | "{{{}}}", 93 | key.labels() 94 | .map(|label| format!("{}=\"{}\"", label.key(), label.value())) 95 | .collect::>() 96 | .join(",") 97 | ) 98 | .expect("write error"); 99 | } 100 | writeln!(output, " {} {}", counter.load(Ordering::Relaxed), epoch_ms) 101 | .expect("write error"); 102 | } 103 | writeln!(output).expect("write error"); 104 | output.flush().expect("flush error"); 105 | 106 | std::str::from_utf8(output.as_slice()) 107 | .expect("failed to convert to string") 108 | .into() 109 | } 110 | } 111 | 112 | impl Deref for TextRecorder { 113 | type Target = Arc; 114 | 115 | fn deref(&self) -> &Self::Target { 116 | &self.inner 117 | } 118 | } 119 | 120 | impl metrics::Recorder for TextRecorder { 121 | fn describe_counter( 122 | &self, 123 | _key: metrics::KeyName, 124 | _unit: Option, 125 | _description: metrics::SharedString, 126 | ) { 127 | } 128 | 129 | fn describe_gauge( 130 | &self, 131 | _key: metrics::KeyName, 132 | _unit: Option, 133 | _description: metrics::SharedString, 134 | ) { 135 | } 136 | 137 | fn describe_histogram( 138 | &self, 139 | _key: metrics::KeyName, 140 | _unit: Option, 141 | _description: metrics::SharedString, 142 | ) { 143 | } 144 | 145 | fn register_counter( 146 | &self, 147 | key: &metrics::Key, 148 | _metadata: &metrics::Metadata<'_>, 149 | ) -> metrics::Counter { 150 | self.inner 151 | .registry 152 | .get_or_create_counter(key, |c| c.clone().into()) 153 | } 154 | 155 | fn register_gauge( 156 | &self, 157 | key: &metrics::Key, 158 | _metadata: &metrics::Metadata<'_>, 159 | ) -> metrics::Gauge { 160 | self.inner 161 | .registry 162 | .get_or_create_gauge(key, |c| c.clone().into()) 163 | } 164 | 165 | fn register_histogram( 166 | &self, 167 | key: &metrics::Key, 168 | _metadata: &metrics::Metadata<'_>, 169 | ) -> metrics::Histogram { 170 | self.inner 171 | .registry 172 | .get_or_create_histogram(key, |c| c.clone().into()) 173 | } 174 | } 175 | -------------------------------------------------------------------------------- /ffi/tests/eth/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/ava-labs/firewood/ffi/tests 2 | 3 | go 1.23.9 4 | 5 | toolchain go1.24.2 6 | 7 | require ( 8 | github.com/ava-labs/firewood-go/ffi v0.0.0 // this is replaced to use the parent folder 9 | github.com/ava-labs/libevm v1.13.14-0.2.0.release 10 | github.com/holiman/uint256 v1.3.2 11 | github.com/stretchr/testify v1.10.0 12 | ) 13 | 14 | require ( 15 | github.com/DataDog/zstd v1.5.2 // indirect 16 | github.com/VictoriaMetrics/fastcache v1.12.1 // indirect 17 | github.com/beorn7/perks v1.0.1 // indirect 18 | github.com/bits-and-blooms/bitset v1.10.0 // indirect 19 | github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect 20 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 21 | github.com/cockroachdb/errors v1.9.1 // indirect 22 | github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect 23 | github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 // indirect 24 | github.com/cockroachdb/redact v1.1.3 // indirect 25 | github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect 26 | github.com/consensys/bavard v0.1.13 // indirect 27 | github.com/consensys/gnark-crypto v0.12.1 // indirect 28 | github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 // indirect 29 | github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect 30 | github.com/davecgh/go-spew v1.1.1 // indirect 31 | github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect 32 | github.com/ethereum/c-kzg-4844 v0.4.0 // indirect 33 | github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 // indirect 34 | github.com/getsentry/sentry-go v0.18.0 // indirect 35 | github.com/go-ole/go-ole v1.3.0 // indirect 36 | github.com/gofrs/flock v0.8.1 // indirect 37 | github.com/gogo/protobuf v1.3.2 // indirect 38 | github.com/golang/protobuf v1.5.4 // indirect 39 | github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect 40 | github.com/gorilla/websocket v1.5.0 // indirect 41 | github.com/holiman/bloomfilter/v2 v2.0.3 // indirect 42 | github.com/klauspost/compress v1.15.15 // indirect 43 | github.com/kr/pretty v0.3.1 // indirect 44 | github.com/kr/text v0.2.0 // indirect 45 | github.com/mattn/go-runewidth v0.0.13 // indirect 46 | github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect 47 | github.com/mmcloughlin/addchain v0.4.0 // indirect 48 | github.com/olekukonko/tablewriter v0.0.5 // indirect 49 | github.com/pkg/errors v0.9.1 // indirect 50 | github.com/pmezard/go-difflib v1.0.0 // indirect 51 | github.com/prometheus/client_golang v1.16.0 // indirect 52 | github.com/prometheus/client_model v0.3.0 // indirect 53 | github.com/prometheus/common v0.42.0 // indirect 54 | github.com/prometheus/procfs v0.10.1 // indirect 55 | github.com/rivo/uniseg v0.2.0 // indirect 56 | github.com/rogpeppe/go-internal v1.12.0 // indirect 57 | github.com/shirou/gopsutil v3.21.11+incompatible // indirect 58 | github.com/supranational/blst v0.3.14 // indirect 59 | github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect 60 | github.com/tklauser/go-sysconf v0.3.12 // indirect 61 | github.com/tklauser/numcpus v0.6.1 // indirect 62 | github.com/yusufpapurcu/wmi v1.2.4 // indirect 63 | golang.org/x/crypto v0.35.0 // indirect 64 | golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect 65 | golang.org/x/sync v0.11.0 // indirect 66 | golang.org/x/sys v0.30.0 // indirect 67 | golang.org/x/text v0.22.0 // indirect 68 | google.golang.org/protobuf v1.33.0 // indirect 69 | gopkg.in/yaml.v3 v3.0.1 // indirect 70 | rsc.io/tmplfunc v0.0.3 // indirect 71 | ) 72 | 73 | replace github.com/ava-labs/firewood-go/ffi => ../../ 74 | -------------------------------------------------------------------------------- /ffi/tests/firewood/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/ava-labs/firewood/ffi/tests 2 | 3 | go 1.23.9 4 | 5 | toolchain go1.24.2 6 | 7 | require ( 8 | github.com/ava-labs/firewood-go/ffi v0.0.0 // this is replaced to use the parent folder 9 | github.com/stretchr/testify v1.10.0 10 | ) 11 | 12 | require github.com/ava-labs/avalanchego v1.13.1 13 | 14 | require ( 15 | github.com/BurntSushi/toml v1.2.0 // indirect 16 | github.com/beorn7/perks v1.0.1 // indirect 17 | github.com/cenkalti/backoff/v4 v4.2.1 // indirect 18 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 19 | github.com/davecgh/go-spew v1.1.1 // indirect 20 | github.com/go-logr/logr v1.4.1 // indirect 21 | github.com/go-logr/stdr v1.2.2 // indirect 22 | github.com/golang/protobuf v1.5.4 // indirect 23 | github.com/google/renameio/v2 v2.0.0 // indirect 24 | github.com/gorilla/rpc v1.2.0 // indirect 25 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect 26 | github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect 27 | github.com/mr-tron/base58 v1.2.0 // indirect 28 | github.com/pmezard/go-difflib v1.0.0 // indirect 29 | github.com/prometheus/client_golang v1.16.0 // indirect 30 | github.com/prometheus/client_model v0.3.0 // indirect 31 | github.com/prometheus/common v0.42.0 // indirect 32 | github.com/prometheus/procfs v0.10.1 // indirect 33 | go.opentelemetry.io/otel v1.22.0 // indirect 34 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect 35 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 // indirect 36 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 // indirect 37 | go.opentelemetry.io/otel/metric v1.22.0 // indirect 38 | go.opentelemetry.io/otel/sdk v1.22.0 // indirect 39 | go.opentelemetry.io/otel/trace v1.22.0 // indirect 40 | go.opentelemetry.io/proto/otlp v1.0.0 // indirect 41 | go.uber.org/multierr v1.11.0 // indirect 42 | go.uber.org/zap v1.26.0 // indirect 43 | golang.org/x/crypto v0.36.0 // indirect 44 | golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e // indirect 45 | golang.org/x/net v0.38.0 // indirect 46 | golang.org/x/sys v0.31.0 // indirect 47 | golang.org/x/term v0.30.0 // indirect 48 | golang.org/x/text v0.23.0 // indirect 49 | gonum.org/v1/gonum v0.11.0 // indirect 50 | google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect 51 | google.golang.org/genproto/googleapis/rpc v0.0.0-20240827150818-7e3bb234dfed // indirect 52 | google.golang.org/grpc v1.66.0 // indirect 53 | google.golang.org/protobuf v1.35.2 // indirect 54 | gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect 55 | gopkg.in/yaml.v3 v3.0.1 // indirect 56 | ) 57 | 58 | replace github.com/ava-labs/firewood-go/ffi => ../../ 59 | -------------------------------------------------------------------------------- /firewood/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "firewood" 3 | version = "0.0.5" 4 | edition = "2024" 5 | authors = [ 6 | "Ted Yin (@Determinant) ", 7 | "Dan Sover (@exdx) ", 8 | "Hao Hao (@haohao-os) ", 9 | "Gyuho Lee (@gyuho) ", 10 | "Sam Batschelet (@hexfusion) ", 11 | "Ron Kuris (@rkuris) ", 12 | ] 13 | description = "Firewood is an embedded key-value store, optimized to store blockchain state." 14 | license-file = "../LICENSE.md" 15 | homepage = "https://avalabs.org" 16 | readme = "../README.md" 17 | 18 | [dependencies] 19 | aquamarine = "0.6.0" 20 | async-trait = "0.1.77" 21 | futures = "0.3.30" 22 | hex = "0.4.3" 23 | metrics = "0.24.0" 24 | serde = { version = "1.0" } 25 | sha2 = "0.10.8" 26 | test-case = "3.3.1" 27 | thiserror = "2.0.3" 28 | typed-builder = "0.21.0" 29 | bincode = "1.3.3" 30 | integer-encoding = "4.0.0" 31 | smallvec = "1.6.1" 32 | fastrace = { version = "0.7.4" } 33 | 34 | [features] 35 | default = [] 36 | nightly = [] 37 | io-uring = ["storage/io-uring"] 38 | logger = ["storage/logger"] 39 | branch_factor_256 = [ "storage/branch_factor_256" ] 40 | ethhash = [ "storage/ethhash" ] 41 | 42 | [dev-dependencies] 43 | triehash = { version = "0.8.5", path = "../triehash" } 44 | criterion = { version = "0.6.0", features = ["async_tokio"] } 45 | rand = "0.9.0" 46 | rand_distr = "0.5.0" 47 | clap = { version = "4.5.0", features = ['derive'] } 48 | pprof = { version = "0.15.0", features = ["flamegraph"] } 49 | tempfile = "3.12.0" 50 | tokio = { version = "1.36.0", features = ["rt", "sync", "macros", "rt-multi-thread"] } 51 | ethereum-types = "0.15.1" 52 | sha3 = "0.10.8" 53 | plain_hasher = "0.2.3" 54 | hex-literal = "1.0.0" 55 | env_logger = "0.11.7" 56 | hash-db = "0.16.0" 57 | 58 | [[bench]] 59 | name = "hashops" 60 | harness = false 61 | 62 | [lints.clippy] 63 | unwrap_used = "warn" 64 | indexing_slicing = "warn" 65 | explicit_deref_methods = "warn" 66 | missing_const_for_fn = "warn" 67 | 68 | [target.'cfg(target_os = "linux")'.dependencies] 69 | storage = { path = "../storage", features = ["io-uring"] } 70 | 71 | [target.'cfg(not(target_os = "linux"))'.dependencies] 72 | storage = { path = "../storage" } 73 | -------------------------------------------------------------------------------- /firewood/benches/hashops.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | // hash benchmarks; run with 'cargo bench' 5 | 6 | use criterion::profiler::Profiler; 7 | use criterion::{BatchSize, Criterion, criterion_group, criterion_main}; 8 | use firewood::db::{BatchOp, DbConfig}; 9 | use firewood::merkle::Merkle; 10 | use firewood::v2::api::{Db as _, Proposal as _}; 11 | use pprof::ProfilerGuard; 12 | use rand::rngs::StdRng; 13 | use rand::{Rng, SeedableRng}; 14 | use rand_distr::Alphanumeric; 15 | use std::fs::File; 16 | use std::iter::repeat_with; 17 | use std::os::raw::c_int; 18 | use std::path::Path; 19 | use std::sync::Arc; 20 | use storage::{MemStore, NodeStore}; 21 | 22 | // To enable flamegraph output 23 | // cargo bench --bench hashops -- --profile-time=N 24 | enum FlamegraphProfiler { 25 | Init(c_int), 26 | Active(ProfilerGuard<'static>), 27 | } 28 | 29 | fn file_error_panic(path: &Path) -> impl FnOnce(T) -> U { 30 | |_| panic!("Error on file `{}`", path.display()) 31 | } 32 | 33 | impl Profiler for FlamegraphProfiler { 34 | #[expect(clippy::unwrap_used)] 35 | fn start_profiling(&mut self, _benchmark_id: &str, _benchmark_dir: &Path) { 36 | if let Self::Init(frequency) = self { 37 | let guard = ProfilerGuard::new(*frequency).unwrap(); 38 | *self = Self::Active(guard); 39 | } 40 | } 41 | 42 | #[expect(clippy::unwrap_used)] 43 | fn stop_profiling(&mut self, _benchmark_id: &str, benchmark_dir: &Path) { 44 | std::fs::create_dir_all(benchmark_dir).unwrap(); 45 | let filename = "firewood-flamegraph.svg"; 46 | let flamegraph_path = benchmark_dir.join(filename); 47 | let flamegraph_file = 48 | File::create(&flamegraph_path).unwrap_or_else(file_error_panic(&flamegraph_path)); 49 | 50 | #[expect(clippy::unwrap_used)] 51 | if let Self::Active(profiler) = self { 52 | profiler 53 | .report() 54 | .build() 55 | .unwrap() 56 | .flamegraph(flamegraph_file) 57 | .unwrap_or_else(file_error_panic(&flamegraph_path)); 58 | } 59 | } 60 | } 61 | 62 | // This benchmark peeks into the merkle layer and times how long it takes 63 | // to insert NKEYS with a key length of KEYSIZE 64 | fn bench_merkle(criterion: &mut Criterion) { 65 | let mut rng = StdRng::seed_from_u64(1234); 66 | 67 | criterion 68 | .benchmark_group("Merkle") 69 | .sample_size(30) 70 | .bench_function("insert", |b| { 71 | b.iter_batched( 72 | || { 73 | let store = Arc::new(MemStore::new(vec![])); 74 | let nodestore = NodeStore::new_empty_proposal(store); 75 | let merkle = Merkle::from(nodestore); 76 | 77 | let keys: Vec> = repeat_with(|| { 78 | (&mut rng) 79 | .sample_iter(&Alphanumeric) 80 | .take(KEYSIZE) 81 | .collect() 82 | }) 83 | .take(NKEYS) 84 | .collect(); 85 | 86 | (merkle, keys) 87 | }, 88 | #[expect(clippy::unwrap_used)] 89 | |(mut merkle, keys)| { 90 | keys.into_iter() 91 | .for_each(|key| merkle.insert(&key, Box::new(*b"v")).unwrap()); 92 | let _frozen = merkle.hash(); 93 | }, 94 | BatchSize::SmallInput, 95 | ); 96 | }); 97 | } 98 | 99 | #[expect(clippy::unwrap_used)] 100 | fn bench_db(criterion: &mut Criterion) { 101 | const KEY_LEN: usize = 4; 102 | let mut rng = StdRng::seed_from_u64(1234); 103 | 104 | criterion 105 | .benchmark_group("Db") 106 | .sample_size(30) 107 | .bench_function("commit", |b| { 108 | b.to_async(tokio::runtime::Runtime::new().unwrap()) 109 | .iter_batched( 110 | || { 111 | let batch_ops: Vec<_> = repeat_with(|| { 112 | (&mut rng) 113 | .sample_iter(&Alphanumeric) 114 | .take(KEY_LEN) 115 | .collect() 116 | }) 117 | .map(|key: Vec<_>| BatchOp::Put { 118 | key, 119 | value: vec![b'v'], 120 | }) 121 | .take(N) 122 | .collect(); 123 | batch_ops 124 | }, 125 | |batch_ops| async { 126 | let db_path = std::env::temp_dir(); 127 | let db_path = db_path.join("benchmark_db"); 128 | let cfg = DbConfig::builder(); 129 | 130 | let db = firewood::db::Db::new(db_path, cfg.clone().truncate(true).build()) 131 | .await 132 | .unwrap(); 133 | 134 | db.propose(batch_ops).await.unwrap().commit().await.unwrap() 135 | }, 136 | BatchSize::SmallInput, 137 | ); 138 | }); 139 | } 140 | 141 | criterion_group! { 142 | name = benches; 143 | config = Criterion::default().with_profiler(FlamegraphProfiler::Init(100)); 144 | targets = bench_merkle::<3, 4>, bench_merkle<3, 32>, bench_db::<100> 145 | } 146 | 147 | criterion_main!(benches); 148 | -------------------------------------------------------------------------------- /firewood/examples/insert.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | // This example isn't an actual benchmark, it's just an example of how to 5 | // insert some random keys using the front-end API. 6 | 7 | use clap::Parser; 8 | use std::borrow::BorrowMut as _; 9 | use std::collections::HashMap; 10 | use std::error::Error; 11 | use std::num::NonZeroUsize; 12 | use std::ops::RangeInclusive; 13 | use std::time::Instant; 14 | 15 | use firewood::db::{Batch, BatchOp, Db, DbConfig}; 16 | use firewood::manager::RevisionManagerConfig; 17 | use firewood::v2::api::{Db as _, DbView, Proposal as _}; 18 | use rand::{Rng, SeedableRng as _}; 19 | use rand_distr::Alphanumeric; 20 | 21 | #[derive(Parser, Debug)] 22 | struct Args { 23 | #[arg(short, long, default_value = "1-64", value_parser = string_to_range)] 24 | keylen: RangeInclusive, 25 | #[arg(short, long, default_value = "32", value_parser = string_to_range)] 26 | valuelen: RangeInclusive, 27 | #[arg(short, long, default_value_t = 1)] 28 | batch_size: usize, 29 | #[arg(short, long, default_value_t = 100)] 30 | number_of_batches: usize, 31 | #[arg(short = 'p', long, default_value_t = 0, value_parser = clap::value_parser!(u16).range(0..=100))] 32 | read_verify_percent: u16, 33 | #[arg(short, long)] 34 | seed: Option, 35 | #[arg(short, long, default_value_t = NonZeroUsize::new(20480).expect("is non-zero"))] 36 | cache_size: NonZeroUsize, 37 | #[arg(short, long, default_value_t = true)] 38 | truncate: bool, 39 | #[arg(short, long, default_value_t = 128)] 40 | revisions: usize, 41 | } 42 | 43 | fn string_to_range(input: &str) -> Result, Box> { 44 | //::Err> { 45 | let parts: Vec<&str> = input.split('-').collect(); 46 | #[expect(clippy::indexing_slicing)] 47 | match parts.len() { 48 | 1 => Ok(input.parse()?..=input.parse()?), 49 | 2 => Ok(parts[0].parse()?..=parts[1].parse()?), 50 | _ => Err("Too many dashes in input string".into()), 51 | } 52 | } 53 | 54 | /// cargo run --release --example insert 55 | #[tokio::main(flavor = "multi_thread")] 56 | async fn main() -> Result<(), Box> { 57 | let args = Args::parse(); 58 | 59 | let mgrcfg = RevisionManagerConfig::builder() 60 | .node_cache_size(args.cache_size) 61 | .max_revisions(args.revisions) 62 | .build(); 63 | let cfg = DbConfig::builder() 64 | .truncate(args.truncate) 65 | .manager(mgrcfg) 66 | .build(); 67 | 68 | let db = Db::new("rev_db", cfg) 69 | .await 70 | .expect("db initiation should succeed"); 71 | 72 | let keys = args.batch_size; 73 | let start = Instant::now(); 74 | 75 | let mut rng = if let Some(seed) = args.seed { 76 | rand::rngs::StdRng::seed_from_u64(seed) 77 | } else { 78 | rand::rngs::StdRng::from_os_rng() 79 | }; 80 | 81 | for _ in 0..args.number_of_batches { 82 | let keylen = rng.random_range(args.keylen.clone()); 83 | let valuelen = rng.random_range(args.valuelen.clone()); 84 | let batch: Batch, Vec> = (0..keys) 85 | .map(|_| { 86 | ( 87 | rng.borrow_mut() 88 | .sample_iter(&Alphanumeric) 89 | .take(keylen) 90 | .collect::>(), 91 | rng.borrow_mut() 92 | .sample_iter(&Alphanumeric) 93 | .take(valuelen) 94 | .collect::>(), 95 | ) 96 | }) 97 | .map(|(key, value)| BatchOp::Put { key, value }) 98 | .collect(); 99 | 100 | let verify = get_keys_to_verify(&batch, args.read_verify_percent); 101 | 102 | #[expect(clippy::unwrap_used)] 103 | let proposal = db.propose(batch).await.unwrap(); 104 | proposal.commit().await?; 105 | verify_keys(&db, verify).await?; 106 | } 107 | 108 | let duration = start.elapsed(); 109 | println!( 110 | "Generated and inserted {} batches of size {keys} in {duration:?}", 111 | args.number_of_batches 112 | ); 113 | 114 | Ok(()) 115 | } 116 | 117 | fn get_keys_to_verify(batch: &Batch, Vec>, pct: u16) -> HashMap, Box<[u8]>> { 118 | if pct == 0 { 119 | HashMap::new() 120 | } else { 121 | batch 122 | .iter() 123 | .filter(|_last_key| rand::rng().random_range(0..=(100 - pct)) == 0) 124 | .map(|op| { 125 | if let BatchOp::Put { key, value } = op { 126 | (key.clone(), value.clone().into_boxed_slice()) 127 | } else { 128 | unreachable!() 129 | } 130 | }) 131 | .collect() 132 | } 133 | } 134 | 135 | async fn verify_keys( 136 | db: &impl firewood::v2::api::Db, 137 | verify: HashMap, Box<[u8]>>, 138 | ) -> Result<(), firewood::v2::api::Error> { 139 | if !verify.is_empty() { 140 | let hash = db.root_hash().await?.expect("root hash should exist"); 141 | let revision = db.revision(hash).await?; 142 | for (key, value) in verify { 143 | assert_eq!(Some(value), revision.val(key).await?); 144 | } 145 | } 146 | Ok(()) 147 | } 148 | -------------------------------------------------------------------------------- /firewood/src/range_proof.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2024, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use storage::Hashable; 5 | 6 | use crate::proof::Proof; 7 | 8 | /// A range proof proves that a given set of key-value pairs 9 | /// are in the trie with a given root hash. 10 | #[derive(Debug)] 11 | pub struct RangeProof, V: AsRef<[u8]>, H: Hashable> { 12 | #[expect(dead_code)] 13 | pub(crate) start_proof: Option>, 14 | #[expect(dead_code)] 15 | pub(crate) end_proof: Option>, 16 | #[expect(dead_code)] 17 | pub(crate) key_values: Box<[(K, V)]>, 18 | } 19 | -------------------------------------------------------------------------------- /firewood/src/v2/emptydb.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use crate::proof::{Proof, ProofNode}; 5 | use crate::range_proof::RangeProof; 6 | 7 | use super::api::{Batch, Db, DbView, Error, HashKey, KeyType, ValueType}; 8 | use super::propose::{Proposal, ProposalBase}; 9 | use async_trait::async_trait; 10 | use futures::Stream; 11 | use std::sync::Arc; 12 | 13 | /// An EmptyDb is a simple implementation of api::Db 14 | /// that doesn't store any data. It contains a single 15 | /// HistoricalImpl that has no keys or values 16 | #[derive(Debug)] 17 | pub struct EmptyDb; 18 | 19 | /// HistoricalImpl is always empty, and there is only one, 20 | /// since nothing can be committed to an EmptyDb. 21 | #[derive(Debug)] 22 | pub struct HistoricalImpl; 23 | 24 | #[async_trait] 25 | impl Db for EmptyDb { 26 | type Historical = HistoricalImpl; 27 | 28 | type Proposal<'p> = Proposal; 29 | 30 | async fn revision(&self, hash_key: HashKey) -> Result, Error> { 31 | Err(Error::HashNotFound { provided: hash_key }) 32 | } 33 | 34 | async fn root_hash(&self) -> Result, Error> { 35 | Ok(None) 36 | } 37 | 38 | async fn propose<'p, K, V>( 39 | &'p self, 40 | data: Batch, 41 | ) -> Result>, Error> 42 | where 43 | K: KeyType, 44 | V: ValueType, 45 | { 46 | Ok(Proposal::new( 47 | ProposalBase::View(HistoricalImpl.into()), 48 | data, 49 | )) 50 | } 51 | 52 | async fn all_hashes(&self) -> Result, Error> { 53 | Ok(vec![]) 54 | } 55 | } 56 | 57 | #[async_trait] 58 | impl DbView for HistoricalImpl { 59 | type Stream<'a> = EmptyStreamer; 60 | 61 | async fn root_hash(&self) -> Result, Error> { 62 | Ok(None) 63 | } 64 | 65 | async fn val(&self, _key: K) -> Result>, Error> { 66 | Ok(None) 67 | } 68 | 69 | async fn single_key_proof(&self, _key: K) -> Result, Error> { 70 | Err(Error::RangeProofOnEmptyTrie) 71 | } 72 | 73 | async fn range_proof( 74 | &self, 75 | _first_key: Option, 76 | _last_key: Option, 77 | _limit: Option, 78 | ) -> Result, Box<[u8]>, ProofNode>>, Error> { 79 | Ok(None) 80 | } 81 | 82 | fn iter_option(&self, _first_key: Option) -> Result { 83 | Ok(EmptyStreamer {}) 84 | } 85 | } 86 | 87 | #[derive(Debug)] 88 | /// An empty streamer that doesn't stream any data 89 | pub struct EmptyStreamer; 90 | 91 | impl Stream for EmptyStreamer { 92 | type Item = Result<(Box<[u8]>, Vec), Error>; 93 | 94 | fn poll_next( 95 | self: std::pin::Pin<&mut Self>, 96 | _cx: &mut std::task::Context<'_>, 97 | ) -> std::task::Poll> { 98 | std::task::Poll::Ready(None) 99 | } 100 | } 101 | 102 | #[cfg(test)] 103 | #[expect(clippy::unwrap_used)] 104 | mod tests { 105 | use super::*; 106 | use crate::v2::api::{BatchOp, Proposal}; 107 | 108 | #[tokio::test] 109 | async fn basic_proposal() -> Result<(), Error> { 110 | let db = EmptyDb; 111 | 112 | let batch = vec![ 113 | BatchOp::Put { 114 | key: b"k", 115 | value: b"v", 116 | }, 117 | BatchOp::Delete { key: b"z" }, 118 | ]; 119 | 120 | let proposal = db.propose(batch).await?; 121 | 122 | assert_eq!( 123 | proposal.val(b"k").await.unwrap().unwrap(), 124 | Box::from(b"v".as_slice()) 125 | ); 126 | 127 | assert!(proposal.val(b"z").await.unwrap().is_none()); 128 | 129 | Ok(()) 130 | } 131 | 132 | #[tokio::test] 133 | async fn nested_proposal() -> Result<(), Error> { 134 | let db = EmptyDb; 135 | // create proposal1 which adds key "k" with value "v" and deletes "z" 136 | let batch = vec![ 137 | BatchOp::Put { 138 | key: b"k", 139 | value: b"v", 140 | }, 141 | BatchOp::Delete { key: b"z" }, 142 | ]; 143 | 144 | let proposal1 = db.propose(batch).await?; 145 | 146 | // create proposal2 which adds key "z" with value "undo" 147 | let proposal2 = proposal1 148 | .clone() 149 | .propose(vec![BatchOp::Put { 150 | key: b"z", 151 | value: "undo", 152 | }]) 153 | .await?; 154 | // both proposals still have (k,v) 155 | assert_eq!(proposal1.val(b"k").await.unwrap().unwrap().to_vec(), b"v"); 156 | assert_eq!(proposal2.val(b"k").await.unwrap().unwrap().to_vec(), b"v"); 157 | // only proposal1 doesn't have z 158 | assert!(proposal1.val(b"z").await.unwrap().is_none()); 159 | // proposal2 has z with value "undo" 160 | assert_eq!( 161 | proposal2.val(b"z").await.unwrap().unwrap().to_vec(), 162 | b"undo" 163 | ); 164 | 165 | // create a proposal3 by adding the two proposals together, keeping the originals 166 | let proposal3 = proposal1.as_ref() + proposal2.as_ref(); 167 | assert_eq!(proposal3.val(b"k").await.unwrap().unwrap().to_vec(), b"v"); 168 | assert_eq!( 169 | proposal3.val(b"z").await.unwrap().unwrap().to_vec(), 170 | b"undo" 171 | ); 172 | 173 | // now consume proposal1 and proposal2 174 | proposal2.commit().await?; 175 | 176 | Ok(()) 177 | } 178 | } 179 | -------------------------------------------------------------------------------- /firewood/src/v2/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | /// The public API 5 | pub mod api; 6 | 7 | /// The proposal 8 | pub mod propose; 9 | 10 | /// An empty database implementation for testing 11 | pub mod emptydb; 12 | -------------------------------------------------------------------------------- /firewood/src/v2/propose.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use std::collections::BTreeMap; 5 | use std::fmt::Debug; 6 | use std::sync::Arc; 7 | 8 | use async_trait::async_trait; 9 | use futures::stream::Empty; 10 | 11 | use super::api::{KeyType, ValueType}; 12 | use crate::proof::{Proof, ProofNode}; 13 | use crate::range_proof::RangeProof; 14 | use crate::v2::api; 15 | 16 | #[derive(Clone, Debug)] 17 | pub(crate) enum KeyOp { 18 | Put(V), 19 | Delete, 20 | } 21 | 22 | #[derive(Debug)] 23 | pub(crate) enum ProposalBase { 24 | Proposal(Arc>), 25 | View(Arc), 26 | } 27 | 28 | // Implement Clone because T doesn't need to be Clone 29 | // so an automatically derived Clone won't work 30 | impl Clone for ProposalBase { 31 | fn clone(&self) -> Self { 32 | match self { 33 | Self::Proposal(arg0) => Self::Proposal(arg0.clone()), 34 | Self::View(arg0) => Self::View(arg0.clone()), 35 | } 36 | } 37 | } 38 | 39 | /// A proposal is created either from the [[crate::v2::api::Db]] object 40 | /// or from another proposal. Proposals are owned by the 41 | /// caller. A proposal can only be committed if it has a 42 | /// base of the current revision of the [[crate::v2::api::Db]]. 43 | #[cfg_attr(doc, aquamarine::aquamarine)] 44 | /// ```mermaid 45 | /// graph LR 46 | /// subgraph historical 47 | /// direction BT 48 | /// PH1 --> R1((R1)) 49 | /// PH2 --> R1 50 | /// PH3 --> PH2 51 | /// end 52 | /// R1 ~~~|"proposals on R1
may not be committed"| R1 53 | /// subgraph committed_head 54 | /// direction BT 55 | /// R2 ~~~|"proposals on R2
may be committed"| R2 56 | /// PC4 --> R2((R2)) 57 | /// PC6 --> PC5 58 | /// PC5 --> R2 59 | /// PC6 ~~~|"Committing PC6
creates two revisions"| PC6 60 | /// end 61 | /// subgraph new_committing 62 | /// direction BT 63 | /// PN --> R3((R3)) 64 | /// R3 ~~~|"R3 does not yet exist"| R3 65 | /// PN ~~~|"this proposal
is committing"
--
could be
PC4 or PC5| PN 66 | /// end 67 | /// historical ==> committed_head 68 | /// committed_head ==> new_committing 69 | /// ``` 70 | #[derive(Debug)] 71 | pub struct Proposal { 72 | pub(crate) base: ProposalBase, 73 | pub(crate) delta: BTreeMap, KeyOp>>, 74 | } 75 | 76 | // Implement Clone because T doesn't need to be Clone 77 | // so an automatically derived Clone won't work 78 | impl Clone for Proposal { 79 | fn clone(&self) -> Self { 80 | Self { 81 | base: self.base.clone(), 82 | delta: self.delta.clone(), 83 | } 84 | } 85 | } 86 | 87 | impl Proposal { 88 | pub(crate) fn new( 89 | base: ProposalBase, 90 | batch: api::Batch, 91 | ) -> Arc { 92 | let delta = batch 93 | .into_iter() 94 | .map(|op| match op { 95 | api::BatchOp::Put { key, value } => ( 96 | key.as_ref().to_vec().into_boxed_slice(), 97 | KeyOp::Put(value.as_ref().to_vec().into_boxed_slice()), 98 | ), 99 | api::BatchOp::Delete { key } => { 100 | (key.as_ref().to_vec().into_boxed_slice(), KeyOp::Delete) 101 | } 102 | api::BatchOp::DeleteRange { prefix } => { 103 | (prefix.as_ref().to_vec().into_boxed_slice(), KeyOp::Delete) 104 | } 105 | }) 106 | .collect::>(); 107 | 108 | Arc::new(Self { base, delta }) 109 | } 110 | } 111 | 112 | #[async_trait] 113 | impl api::DbView for Proposal { 114 | // TODO: Replace with the correct stream type for an in-memory proposal implementation 115 | type Stream<'a> 116 | = Empty, Vec), api::Error>> 117 | where 118 | T: 'a; 119 | 120 | async fn root_hash(&self) -> Result, api::Error> { 121 | todo!(); 122 | } 123 | 124 | async fn val(&self, key: K) -> Result>, api::Error> { 125 | // see if this key is in this proposal 126 | match self.delta.get(key.as_ref()) { 127 | Some(change) => match change { 128 | // key in proposal, check for Put or Delete 129 | KeyOp::Put(val) => Ok(Some(val.clone())), 130 | KeyOp::Delete => Ok(None), // key was deleted in this proposal 131 | }, 132 | None => match &self.base { 133 | // key not in this proposal, so delegate to base 134 | ProposalBase::Proposal(p) => p.val(key).await, 135 | ProposalBase::View(view) => view.val(key).await, 136 | }, 137 | } 138 | } 139 | 140 | async fn single_key_proof(&self, _key: K) -> Result, api::Error> { 141 | todo!(); 142 | } 143 | 144 | async fn range_proof( 145 | &self, 146 | _first_key: Option, 147 | _last_key: Option, 148 | _limit: Option, 149 | ) -> Result, Box<[u8]>, ProofNode>>, api::Error> { 150 | todo!(); 151 | } 152 | 153 | fn iter_option( 154 | &self, 155 | _first_key: Option, 156 | ) -> Result, api::Error> { 157 | todo!(); 158 | } 159 | } 160 | 161 | #[async_trait] 162 | impl api::Proposal for Proposal { 163 | type Proposal = Proposal; 164 | 165 | async fn propose( 166 | self: Arc, 167 | data: api::Batch, 168 | ) -> Result, api::Error> { 169 | // find the Arc for this base proposal from the parent 170 | Ok(Proposal::new(ProposalBase::Proposal(self), data)) 171 | } 172 | 173 | async fn commit(self: Arc) -> Result<(), api::Error> { 174 | match &self.base { 175 | ProposalBase::Proposal(base) => base.clone().commit().await, 176 | ProposalBase::View(_) => Ok(()), 177 | } 178 | } 179 | } 180 | 181 | impl std::ops::Add for Proposal { 182 | type Output = Arc>; 183 | 184 | fn add(self, rhs: Self) -> Self::Output { 185 | let mut delta = self.delta.clone(); 186 | 187 | delta.extend(rhs.delta); 188 | 189 | let proposal = Proposal { 190 | base: self.base, 191 | delta, 192 | }; 193 | 194 | Arc::new(proposal) 195 | } 196 | } 197 | 198 | impl std::ops::Add for &Proposal { 199 | type Output = Arc>; 200 | 201 | fn add(self, rhs: Self) -> Self::Output { 202 | let mut delta = self.delta.clone(); 203 | 204 | delta.extend(rhs.delta.clone()); 205 | 206 | let proposal = Proposal { 207 | base: self.base.clone(), 208 | delta, 209 | }; 210 | 211 | Arc::new(proposal) 212 | } 213 | } 214 | -------------------------------------------------------------------------------- /firewood/tests/common/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use std::env::temp_dir; 5 | use std::fs::remove_file; 6 | use std::ops::Deref; 7 | use std::path::PathBuf; 8 | 9 | use firewood::db::{Db, DbConfig}; 10 | use typed_builder::TypedBuilder; 11 | 12 | #[derive(Clone, Debug, TypedBuilder)] 13 | pub struct TestDbCreator { 14 | #[builder(setter(into))] 15 | _test_name: String, 16 | #[builder(default, setter(into))] 17 | path: Option, 18 | #[builder(default = DbConfig::builder().truncate(true).build())] 19 | _cfg: DbConfig, 20 | } 21 | 22 | pub struct TestDb { 23 | creator: TestDbCreator, 24 | preserve_on_drop: bool, 25 | db: Db, 26 | } 27 | 28 | impl TestDbCreator { 29 | #[expect(clippy::unwrap_used)] 30 | pub async fn _create(self) -> TestDb { 31 | let path = self.path.clone().unwrap_or_else(|| { 32 | let mut path: PathBuf = std::env::var_os("CARGO_TARGET_DIR") 33 | .unwrap_or(temp_dir().into()) 34 | .into(); 35 | if path.join("tmp").is_dir() { 36 | path.push("tmp"); 37 | } 38 | path.join(&self._test_name) 39 | }); 40 | let mut creator = self.clone(); 41 | creator.path = path.clone().into(); 42 | let db = Db::new(&path, self._cfg).await.unwrap(); 43 | TestDb { 44 | creator, 45 | db, 46 | preserve_on_drop: false, 47 | } 48 | } 49 | } 50 | 51 | impl Deref for TestDb { 52 | type Target = Db; 53 | 54 | fn deref(&self) -> &Self::Target { 55 | &self.db 56 | } 57 | } 58 | 59 | impl TestDb { 60 | /// reopen the database, consuming the old TestDb and giving you a new one 61 | pub async fn _reopen(mut self) -> Self { 62 | let mut creator = self.creator.clone(); 63 | self.preserve_on_drop = true; 64 | drop(self); 65 | creator._cfg.truncate = false; 66 | creator._create().await 67 | } 68 | } 69 | 70 | impl Drop for TestDb { 71 | fn drop(&mut self) { 72 | if !self.preserve_on_drop { 73 | #[expect(clippy::unwrap_used)] 74 | remove_file(self.creator.path.as_ref().unwrap()).unwrap(); 75 | } 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /firewood/tests/v2api.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | pub mod common; 5 | 6 | // #[ignore = "unimplemented"] 7 | // #[tokio::test(flavor = "multi_thread")] 8 | // #[expect(clippy::unwrap_used)] 9 | // async fn smoke() -> Result<(), Box> { 10 | // let cfg = DbConfig::builder().truncate(true).build(); 11 | // let db = TestDbCreator::builder() 12 | // .cfg(cfg) 13 | // .test_name("smoke") 14 | // .build() 15 | // .create() 16 | // .await; 17 | 18 | // let empty_hash = db.root_hash().await?; 19 | // assert_ne!(empty_hash, [0; 32]); 20 | 21 | // // insert a single key/value 22 | // let (key, value) = (b"smoke", b"test"); 23 | // let batch_put = BatchOp::Put { key, value }; 24 | // let proposal = db.propose(vec![batch_put]).await?; 25 | // proposal.commit().await?; 26 | 27 | // // ensure the latest hash is different 28 | // let latest = db.root_hash().await?; 29 | // assert_ne!(empty_hash, latest); 30 | 31 | // // fetch the view of the latest 32 | // let view = db.revision(latest).await.unwrap(); 33 | 34 | // // check that the key/value is there 35 | // let got_value = view.val(key).await.unwrap().unwrap(); 36 | // assert_eq!(got_value, value); 37 | 38 | // // TODO: also fetch view of empty; this currently does not work, as you can't reference 39 | // // the empty hash 40 | // // let empty_view = db.revision(empty_hash).await.unwrap(); 41 | // // let value = empty_view.val(b"smoke").await.unwrap(); 42 | // // assert_eq!(value, None); 43 | 44 | // Ok(()) 45 | // } 46 | -------------------------------------------------------------------------------- /fwdctl/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "fwdctl" 3 | version = "0.0.5" 4 | edition = "2024" 5 | rust-version = "1.85.0" 6 | 7 | [dependencies] 8 | firewood = { version = "0.0.5", path = "../firewood" } 9 | clap = { version = "4.5.0", features = ["cargo", "derive"] } 10 | env_logger = "0.11.2" 11 | log = "0.4.20" 12 | tokio = { version = "1.36.0", features = ["full"] } 13 | futures-util = "0.3.30" 14 | hex = "0.4.3" 15 | csv = "1.3.1" 16 | 17 | [dev-dependencies] 18 | anyhow = "1.0.79" 19 | assert_cmd = "2.0.13" 20 | predicates = "3.1.0" 21 | serial_test = "3.0.0" 22 | 23 | [lints.rust] 24 | unsafe_code = "deny" 25 | 26 | [lints.clippy] 27 | unwrap_used = "warn" 28 | indexing_slicing = "warn" 29 | explicit_deref_methods = "warn" 30 | missing_const_for_fn = "warn" 31 | -------------------------------------------------------------------------------- /fwdctl/README.md: -------------------------------------------------------------------------------- 1 | # fwdctl 2 | 3 | `fwdctl` is a small CLI designed to make it easy to experiment with firewood locally. 4 | 5 | ## Building locally 6 | 7 | ```sh 8 | cargo build --release --bin fwdctl 9 | ``` 10 | 11 | To use 12 | 13 | ```sh 14 | ./target/release/fwdctl -h 15 | ``` 16 | 17 | ## Supported commands 18 | 19 | * `fwdctl create`: Create a new firewood database. 20 | * `fwdctl get`: Get the code associated with a key in the database. 21 | * `fwdctl insert`: Insert a key/value pair into the generic key/value store. 22 | * `fwdctl delete`: Delete a key/value pair from the database. 23 | * `fwdctl root`: Get the root hash of the key/value trie. 24 | * `fwdctl dump`: Dump the contents of the key/value store. 25 | 26 | ## Examples 27 | 28 | * fwdctl create 29 | 30 | ```sh 31 | # Check available options when creating a database, including the defaults. 32 | $ fwdctl create -h 33 | # Create a new, blank instance of firewood using the default name "firewood". 34 | $ fwdctl create firewood 35 | # Look inside, there are several folders representing different components of firewood, including the WAL. 36 | $ ls firewood 37 | ``` 38 | 39 | * fwdctl get KEY 40 | 41 | ```sh 42 | # Get the value associated with a key in the database, if it exists. 43 | fwdctl get KEY 44 | ``` 45 | 46 | * fwdctl insert KEY VALUE 47 | 48 | ```sh 49 | # Insert a key/value pair into the database. 50 | fwdctl insert KEY VALUE 51 | ``` 52 | 53 | * fwdctl delete KEY 54 | 55 | ```sh 56 | # Delete a key from the database, along with the associated value. 57 | fwdctl delete KEY 58 | ``` 59 | -------------------------------------------------------------------------------- /fwdctl/src/create.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use clap::{Args, value_parser}; 5 | use firewood::db::{Db, DbConfig}; 6 | use firewood::v2::api; 7 | 8 | #[derive(Args)] 9 | pub struct Options { 10 | /// DB Options 11 | #[arg( 12 | required = true, 13 | value_name = "NAME", 14 | help = "A name for the database. A good default name is firewood." 15 | )] 16 | pub name: String, 17 | 18 | #[arg( 19 | long, 20 | required = false, 21 | value_parser = value_parser!(bool), 22 | default_missing_value = "false", 23 | default_value_t = true, 24 | value_name = "TRUNCATE", 25 | help = "Whether to truncate the DB when opening it. If set, the DB will be reset and all its 26 | existing contents will be lost" 27 | )] 28 | pub truncate: bool, 29 | 30 | /// WAL Config 31 | #[arg( 32 | long, 33 | required = false, 34 | default_value_t = 22, 35 | value_name = "WAL_FILE_NBIT", 36 | help = "Size of WAL file." 37 | )] 38 | file_nbit: u64, 39 | 40 | #[arg( 41 | long, 42 | required = false, 43 | default_value_t = 100, 44 | value_name = "Wal_MAX_REVISIONS", 45 | help = "Number of revisions to keep from the past. This preserves a rolling window 46 | of the past N commits to the database." 47 | )] 48 | max_revisions: u32, 49 | } 50 | 51 | pub(super) fn new(opts: &Options) -> DbConfig { 52 | DbConfig::builder().truncate(opts.truncate).build() 53 | } 54 | 55 | pub(super) async fn run(opts: &Options) -> Result<(), api::Error> { 56 | let db_config = new(opts); 57 | log::debug!("database configuration parameters: \n{:?}\n", db_config); 58 | 59 | Db::new(opts.name.clone(), db_config).await?; 60 | println!("created firewood database in {:?}", opts.name); 61 | Ok(()) 62 | } 63 | -------------------------------------------------------------------------------- /fwdctl/src/delete.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use clap::Args; 5 | use firewood::db::{BatchOp, Db, DbConfig}; 6 | use firewood::v2::api::{self, Db as _, Proposal as _}; 7 | 8 | #[derive(Debug, Args)] 9 | pub struct Options { 10 | /// The key to delete 11 | #[arg(required = true, value_name = "KEY", help = "Key to delete")] 12 | pub key: String, 13 | 14 | /// The database path (if no path is provided, return an error). Defaults to firewood. 15 | #[arg( 16 | long, 17 | required = false, 18 | value_name = "DB_NAME", 19 | default_value_t = String::from("firewood"), 20 | help = "Name of the database" 21 | )] 22 | pub db: String, 23 | } 24 | 25 | pub(super) async fn run(opts: &Options) -> Result<(), api::Error> { 26 | log::debug!("deleting key {:?}", opts); 27 | let cfg = DbConfig::builder().truncate(false); 28 | 29 | let db = Db::new(opts.db.clone(), cfg.build()).await?; 30 | 31 | let batch: Vec> = vec![BatchOp::Delete { 32 | key: opts.key.clone(), 33 | }]; 34 | let proposal = db.propose(batch).await?; 35 | proposal.commit().await?; 36 | 37 | println!("key {} deleted successfully", opts.key); 38 | Ok(()) 39 | } 40 | -------------------------------------------------------------------------------- /fwdctl/src/get.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use clap::Args; 5 | use std::str; 6 | 7 | use firewood::db::{Db, DbConfig}; 8 | use firewood::v2::api::{self, Db as _, DbView as _}; 9 | 10 | #[derive(Debug, Args)] 11 | pub struct Options { 12 | /// The key to get the value for 13 | #[arg(required = true, value_name = "KEY", help = "Key to get")] 14 | pub key: String, 15 | 16 | /// The database path (if no path is provided, return an error). Defaults to firewood. 17 | #[arg( 18 | long, 19 | required = false, 20 | value_name = "DB_NAME", 21 | default_value_t = String::from("firewood"), 22 | help = "Name of the database" 23 | )] 24 | pub db: String, 25 | } 26 | 27 | pub(super) async fn run(opts: &Options) -> Result<(), api::Error> { 28 | log::debug!("get key value pair {:?}", opts); 29 | let cfg = DbConfig::builder().truncate(false); 30 | 31 | let db = Db::new(opts.db.clone(), cfg.build()).await?; 32 | 33 | let hash = db.root_hash().await?; 34 | 35 | let Some(hash) = hash else { 36 | println!("Database is empty"); 37 | return Ok(()); 38 | }; 39 | 40 | let rev = db.revision(hash).await?; 41 | 42 | match rev.val(opts.key.as_bytes()).await { 43 | Ok(Some(val)) => { 44 | let s = String::from_utf8_lossy(val.as_ref()); 45 | println!("{s:?}"); 46 | Ok(()) 47 | } 48 | Ok(None) => { 49 | eprintln!("Key '{}' not found", opts.key); 50 | Ok(()) 51 | } 52 | Err(e) => Err(e), 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /fwdctl/src/graph.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use clap::Args; 5 | use firewood::db::{Db, DbConfig}; 6 | use firewood::v2::api; 7 | use std::io::stdout; 8 | 9 | #[derive(Debug, Args)] 10 | pub struct Options { 11 | /// The database path (if no path is provided, return an error). Defaults to firewood. 12 | #[arg( 13 | value_name = "DB_NAME", 14 | default_value_t = String::from("firewood"), 15 | help = "Name of the database" 16 | )] 17 | pub db: String, 18 | } 19 | 20 | pub(super) async fn run(opts: &Options) -> Result<(), api::Error> { 21 | log::debug!("dump database {:?}", opts); 22 | let cfg = DbConfig::builder().truncate(false); 23 | 24 | let db = Db::new(opts.db.clone(), cfg.build()).await?; 25 | db.dump(&mut stdout()).await?; 26 | Ok(()) 27 | } 28 | -------------------------------------------------------------------------------- /fwdctl/src/insert.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use clap::Args; 5 | use firewood::db::{BatchOp, Db, DbConfig}; 6 | use firewood::v2::api::{self, Db as _, Proposal as _}; 7 | 8 | #[derive(Debug, Args)] 9 | pub struct Options { 10 | /// The key to insert 11 | #[arg(required = true, value_name = "KEY", help = "Key to insert")] 12 | pub key: String, 13 | 14 | /// The value to insert 15 | #[arg(required = true, value_name = "VALUE", help = "Value to insert")] 16 | pub value: String, 17 | 18 | /// The database path (if no path is provided, return an error). Defaults to firewood. 19 | #[arg( 20 | long, 21 | required = false, 22 | value_name = "DB_NAME", 23 | default_value_t = String::from("firewood"), 24 | help = "Name of the database" 25 | )] 26 | pub db: String, 27 | } 28 | 29 | pub(super) async fn run(opts: &Options) -> Result<(), api::Error> { 30 | log::debug!("inserting key value pair {:?}", opts); 31 | let cfg = DbConfig::builder().truncate(false); 32 | 33 | let db = Db::new(opts.db.clone(), cfg.build()).await?; 34 | 35 | let batch: Vec, Vec>> = vec![BatchOp::Put { 36 | key: opts.key.clone().into(), 37 | value: opts.value.bytes().collect(), 38 | }]; 39 | let proposal = db.propose(batch).await?; 40 | proposal.commit().await?; 41 | 42 | println!("{}", opts.key); 43 | Ok(()) 44 | } 45 | -------------------------------------------------------------------------------- /fwdctl/src/main.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | #![doc = include_str!("../README.md")] 5 | 6 | use clap::{Parser, Subcommand}; 7 | use firewood::v2::api; 8 | 9 | pub mod create; 10 | pub mod delete; 11 | pub mod dump; 12 | pub mod get; 13 | pub mod graph; 14 | pub mod insert; 15 | pub mod root; 16 | 17 | #[derive(Parser)] 18 | #[command(author, version, about, long_about = None)] 19 | #[command(propagate_version = true)] 20 | struct Cli { 21 | #[command(subcommand)] 22 | command: Commands, 23 | #[arg( 24 | long, 25 | short = 'l', 26 | required = false, 27 | help = "Log level. Respects RUST_LOG.", 28 | value_name = "LOG_LEVEL", 29 | num_args = 1, 30 | value_parser = ["debug", "info"], 31 | default_value_t = String::from("info"), 32 | )] 33 | log_level: String, 34 | } 35 | 36 | #[derive(Subcommand)] 37 | enum Commands { 38 | /// Create a new firewood database 39 | Create(create::Options), 40 | /// Insert a key/value pair into the database 41 | Insert(insert::Options), 42 | /// Get values associated with a key 43 | Get(get::Options), 44 | /// Delete values associated with a key 45 | Delete(delete::Options), 46 | /// Display key/value trie root hash 47 | Root(root::Options), 48 | /// Dump contents of key/value store 49 | Dump(dump::Options), 50 | /// Produce a dot file of the database 51 | Graph(graph::Options), 52 | } 53 | 54 | #[tokio::main] 55 | async fn main() -> Result<(), api::Error> { 56 | let cli = Cli::parse(); 57 | 58 | env_logger::init_from_env( 59 | env_logger::Env::default() 60 | .filter_or(env_logger::DEFAULT_FILTER_ENV, cli.log_level.to_string()), 61 | ); 62 | 63 | match &cli.command { 64 | Commands::Create(opts) => create::run(opts).await, 65 | Commands::Insert(opts) => insert::run(opts).await, 66 | Commands::Get(opts) => get::run(opts).await, 67 | Commands::Delete(opts) => delete::run(opts).await, 68 | Commands::Root(opts) => root::run(opts).await, 69 | Commands::Dump(opts) => dump::run(opts).await, 70 | Commands::Graph(opts) => graph::run(opts).await, 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /fwdctl/src/root.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use clap::Args; 5 | use std::str; 6 | 7 | use firewood::db::{Db, DbConfig}; 8 | use firewood::v2::api::{self, Db as _}; 9 | 10 | #[derive(Debug, Args)] 11 | pub struct Options { 12 | /// The database path (if no path is provided, return an error). Defaults to firewood. 13 | #[arg( 14 | long, 15 | required = false, 16 | value_name = "DB_NAME", 17 | default_value_t = String::from("firewood"), 18 | help = "Name of the database" 19 | )] 20 | pub db: String, 21 | } 22 | 23 | pub(super) async fn run(opts: &Options) -> Result<(), api::Error> { 24 | let cfg = DbConfig::builder().truncate(false); 25 | 26 | let db = Db::new(opts.db.clone(), cfg.build()).await?; 27 | 28 | let hash = db.root_hash().await?; 29 | 30 | println!("{hash:?}"); 31 | Ok(()) 32 | } 33 | -------------------------------------------------------------------------------- /grpc-testtool/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rpc" 3 | version = "0.0.4" 4 | edition = "2024" 5 | rust-version = "1.85.0" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [[bin]] 10 | name = "process-server" 11 | test = false 12 | bench = false 13 | 14 | [[bin]] 15 | name = "client" 16 | test = false 17 | bench = false 18 | 19 | [dependencies] 20 | firewood = { version = "0.0.4", path = "../firewood" } 21 | prost = "0.13.1" 22 | tokio = { version = "1.36.0", features = ["sync", "rt-multi-thread"] } 23 | tonic = { version = "0.13.0", features = ["tls-ring"] } 24 | tracing = { version = "0.1.40" } 25 | clap = { version = "4.5.0", features = ["derive"] } 26 | log = "0.4.20" 27 | env_logger = "0.11.2" 28 | chrono = "0.4.34" 29 | serde_json = "1.0.113" 30 | serde = { version = "1.0.196", features = ["derive"] } 31 | 32 | [build-dependencies] 33 | tonic-build = "0.13.0" 34 | 35 | [dev-dependencies] 36 | criterion = {version = "0.6.0", features = ["async_tokio"]} 37 | rand = "0.9.1" 38 | rand_distr = "0.5.0" 39 | 40 | [lints.rust] 41 | unsafe_code = "deny" 42 | 43 | [[bench]] 44 | name = "insert" 45 | harness = false 46 | 47 | [lints.clippy] 48 | unwrap_used = "warn" 49 | indexing_slicing = "warn" 50 | explicit_deref_methods = "warn" 51 | missing_const_for_fn = "warn" 52 | 53 | [package.metadata.cargo-machete] 54 | ignored = ["prost", "tonic_build"] 55 | 56 | -------------------------------------------------------------------------------- /grpc-testtool/README.md: -------------------------------------------------------------------------------- 1 | # Firewood process-server implementation 2 | 3 | This service is a plug-in for the test tool orchestrator (currently closed source). 4 | The test tool is used for both performance and correctness testing, especially for the syncer. 5 | 6 | ```mermaid 7 | sequenceDiagram 8 | Orchestrator->>ProcessServer: Startup (via command line) 9 | ProcessServer->>Firewood: Open or create database 10 | Orchestrator->>+ProcessServer: GRPC request 11 | ProcessServer->>+Firewood: Native API Call 12 | Firewood->>-ProcessServer: Response 13 | ProcessServer->>-Orchestrator: Response 14 | ``` 15 | 16 | There are 3 RPC specs that must be implemented: 17 | 18 | 1. The rpcdb proto, which supports simple operations like Has, Get, Put, Delete, and some iterators. 19 | 2. The sync proto, which supports retrieving range and change proofs 20 | 3. The process-server proto, which currently only retrieves metrics 21 | 22 | ## Running 23 | 24 | To test the release version of firewood, just run `RUST_MIN_STACK=7000000 cargo bench`. If you make some changes and then 25 | run it again, it will give you a report showing how much it sped up or slowed down. 26 | 27 | If you want to run this against merkledb, first build the process-server following the instructions in 28 | the [merkledb-tester](https://github.com/ava-labs/merkledb-tester) directory, then modify your PATH so 29 | that `process-server` from the merkledbexecutable is found first, then run `cargo bench`. 30 | -------------------------------------------------------------------------------- /grpc-testtool/build.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use std::path::PathBuf; 5 | 6 | fn main() -> Result<(), Box> { 7 | // we want to import these proto files 8 | let import_protos = ["sync", "rpcdb", "process-server"]; 9 | 10 | let protos: Box<[PathBuf]> = import_protos 11 | .into_iter() 12 | .map(|proto| PathBuf::from(format!("proto/{proto}/{proto}.proto"))) 13 | .collect(); 14 | 15 | // go through each proto and build it, also let cargo know we rerun this if the file changes 16 | for proto in protos.iter() { 17 | tonic_build::compile_protos(proto)?; 18 | 19 | // this improves recompile times; we only rerun tonic if any of these files change 20 | println!("cargo:rerun-if-changed={}", proto.display()); 21 | } 22 | 23 | Ok(()) 24 | } 25 | -------------------------------------------------------------------------------- /grpc-testtool/proto/merkle/merkle.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package merkle; 4 | 5 | import "google/protobuf/empty.proto"; 6 | 7 | // Methods on this service return status code NOT_FOUND if a requested 8 | // view, iterator or root hash is not found. 9 | service Merkle { 10 | // --- Proposals --- 11 | rpc NewProposal(NewProposalRequest) returns (NewProposalResponse); 12 | rpc ProposalCommit(ProposalCommitRequest) returns (google.protobuf.Empty); 13 | 14 | // --- Views --- 15 | rpc NewView(NewViewRequest) returns (NewViewResponse); 16 | 17 | // --- Reads --- 18 | // The methods below may be called with an ID that corresponds to either a (committable) proposal 19 | // or (non-committable) historical view. 20 | rpc ViewHas(ViewHasRequest) returns (ViewHasResponse); 21 | rpc ViewGet(ViewGetRequest) returns (ViewGetResponse); 22 | 23 | // --- Iterators --- 24 | rpc ViewNewIteratorWithStartAndPrefix(ViewNewIteratorWithStartAndPrefixRequest) returns (ViewNewIteratorWithStartAndPrefixResponse); 25 | // Returns status code OUT_OF_RANGE when the iterator is done 26 | rpc IteratorNext(IteratorNextRequest) returns (IteratorNextResponse); 27 | rpc IteratorError(IteratorErrorRequest) returns (google.protobuf.Empty); 28 | // Iterator can't be used (even to check error) after release. 29 | rpc IteratorRelease(IteratorReleaseRequest) returns (google.protobuf.Empty); 30 | rpc ViewRelease(ViewReleaseRequest) returns (google.protobuf.Empty); 31 | } 32 | 33 | message NewProposalRequest { 34 | // If not given, the parent view is the current database revision. 35 | optional uint32 parent_id = 1; 36 | repeated PutRequest puts = 2; 37 | // The keys being deleted. 38 | repeated bytes deletes = 3; 39 | } 40 | 41 | message NewProposalResponse { 42 | uint32 id = 1; 43 | } 44 | 45 | message ProposalCommitRequest { 46 | uint32 id = 1; 47 | } 48 | 49 | message NewViewRequest { 50 | bytes root_hash = 1; 51 | } 52 | 53 | message NewViewResponse { 54 | uint32 id = 1; 55 | } 56 | 57 | message ViewHasRequest { 58 | uint32 id = 1; 59 | bytes key = 2; 60 | } 61 | 62 | message ViewHasResponse { 63 | bool has = 1; 64 | } 65 | 66 | message ViewGetRequest { 67 | uint32 id = 1; 68 | bytes key = 2; 69 | } 70 | 71 | message ViewGetResponse { 72 | bytes value = 1; 73 | } 74 | 75 | message ViewNewIteratorWithStartAndPrefixRequest { 76 | uint64 id = 1; 77 | bytes start = 2; 78 | bytes prefix = 3; 79 | } 80 | 81 | message ViewNewIteratorWithStartAndPrefixResponse { 82 | uint64 id = 1; 83 | } 84 | 85 | message IteratorNextRequest { 86 | uint64 id = 1; 87 | } 88 | 89 | message IteratorNextResponse { 90 | PutRequest data = 1; 91 | } 92 | 93 | message IteratorErrorRequest { 94 | uint64 id = 1; 95 | } 96 | 97 | message IteratorReleaseRequest { 98 | uint64 id = 1; 99 | } 100 | 101 | message ViewReleaseRequest { 102 | uint32 id = 1; 103 | } 104 | 105 | // TODO import this from the rpcdb package. 106 | message PutRequest { 107 | bytes key = 1; 108 | bytes value = 2; 109 | } 110 | -------------------------------------------------------------------------------- /grpc-testtool/proto/process-server/process-server.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package process; 4 | 5 | import "google/protobuf/empty.proto"; 6 | 7 | option go_package = "github.com/ava-labs/merkledb-tester/proto/pb/process"; 8 | 9 | service ProcessServerService { 10 | rpc Metrics(google.protobuf.Empty) returns (MetricsResponse); 11 | } 12 | 13 | message MetricsResponse { 14 | string metrics = 1; 15 | } 16 | -------------------------------------------------------------------------------- /grpc-testtool/proto/rpcdb/rpcdb.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package rpcdb; 4 | 5 | import "google/protobuf/empty.proto"; 6 | 7 | option go_package = "github.com/ava-labs/avalanchego/proto/pb/rpcdb"; 8 | 9 | service Database { 10 | rpc Has(HasRequest) returns (HasResponse); 11 | rpc Get(GetRequest) returns (GetResponse); 12 | rpc Put(PutRequest) returns (PutResponse); 13 | rpc Delete(DeleteRequest) returns (DeleteResponse); 14 | rpc Compact(CompactRequest) returns (CompactResponse); 15 | rpc Close(CloseRequest) returns (CloseResponse); 16 | rpc HealthCheck(google.protobuf.Empty) returns (HealthCheckResponse); 17 | rpc WriteBatch(WriteBatchRequest) returns (WriteBatchResponse); 18 | rpc NewIteratorWithStartAndPrefix(NewIteratorWithStartAndPrefixRequest) returns (NewIteratorWithStartAndPrefixResponse); 19 | rpc IteratorNext(IteratorNextRequest) returns (IteratorNextResponse); 20 | rpc IteratorError(IteratorErrorRequest) returns (IteratorErrorResponse); 21 | rpc IteratorRelease(IteratorReleaseRequest) returns (IteratorReleaseResponse); 22 | } 23 | 24 | enum Error { 25 | // ERROR_UNSPECIFIED is used to indicate that no error occurred. 26 | ERROR_UNSPECIFIED = 0; 27 | ERROR_CLOSED = 1; 28 | ERROR_NOT_FOUND = 2; 29 | } 30 | 31 | message HasRequest { 32 | bytes key = 1; 33 | } 34 | 35 | message HasResponse { 36 | bool has = 1; 37 | Error err = 2; 38 | } 39 | 40 | message GetRequest { 41 | bytes key = 1; 42 | } 43 | 44 | message GetResponse { 45 | bytes value = 1; 46 | Error err = 2; 47 | } 48 | 49 | message PutRequest { 50 | bytes key = 1; 51 | bytes value = 2; 52 | } 53 | 54 | message PutResponse { 55 | Error err = 1; 56 | } 57 | 58 | message DeleteRequest { 59 | bytes key = 1; 60 | } 61 | 62 | message DeleteResponse { 63 | Error err = 1; 64 | } 65 | 66 | message CompactRequest { 67 | bytes start = 1; 68 | bytes limit = 2; 69 | } 70 | 71 | message CompactResponse { 72 | Error err = 1; 73 | } 74 | 75 | message CloseRequest {} 76 | 77 | message CloseResponse { 78 | Error err = 1; 79 | } 80 | 81 | message WriteBatchRequest { 82 | repeated PutRequest puts = 1; 83 | repeated DeleteRequest deletes = 2; 84 | } 85 | 86 | message WriteBatchResponse { 87 | Error err = 1; 88 | } 89 | 90 | message NewIteratorRequest {} 91 | 92 | message NewIteratorWithStartAndPrefixRequest { 93 | bytes start = 1; 94 | bytes prefix = 2; 95 | } 96 | 97 | message NewIteratorWithStartAndPrefixResponse { 98 | uint64 id = 1; 99 | } 100 | 101 | message IteratorNextRequest { 102 | uint64 id = 1; 103 | } 104 | 105 | message IteratorNextResponse { 106 | repeated PutRequest data = 1; 107 | } 108 | 109 | message IteratorErrorRequest { 110 | uint64 id = 1; 111 | } 112 | 113 | message IteratorErrorResponse { 114 | Error err = 1; 115 | } 116 | 117 | message IteratorReleaseRequest { 118 | uint64 id = 1; 119 | } 120 | 121 | message IteratorReleaseResponse { 122 | Error err = 1; 123 | } 124 | 125 | message HealthCheckResponse { 126 | bytes details = 1; 127 | } 128 | -------------------------------------------------------------------------------- /grpc-testtool/proto/sync/sync.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package sync; 4 | 5 | import "google/protobuf/empty.proto"; 6 | 7 | option go_package = "github.com/ava-labs/avalanchego/proto/pb/sync"; 8 | 9 | // Request represents a request for information during syncing. 10 | message Request { 11 | oneof message { 12 | SyncGetRangeProofRequest range_proof_request = 1; 13 | SyncGetChangeProofRequest change_proof_request = 2; 14 | } 15 | } 16 | 17 | // The interface required by an x/sync/SyncManager for syncing. 18 | // Note this service definition only exists for use in tests. 19 | // A database shouldn't expose this over the internet, as it 20 | // allows for reading/writing to the database. 21 | service DB { 22 | rpc GetMerkleRoot(google.protobuf.Empty) returns (GetMerkleRootResponse); 23 | 24 | rpc GetProof(GetProofRequest) returns (GetProofResponse); 25 | 26 | rpc GetChangeProof(GetChangeProofRequest) returns (GetChangeProofResponse); 27 | rpc VerifyChangeProof(VerifyChangeProofRequest) returns (VerifyChangeProofResponse); 28 | rpc CommitChangeProof(CommitChangeProofRequest) returns (google.protobuf.Empty); 29 | 30 | rpc GetRangeProof(GetRangeProofRequest) returns (GetRangeProofResponse); 31 | rpc CommitRangeProof(CommitRangeProofRequest) returns (google.protobuf.Empty); 32 | } 33 | 34 | message GetMerkleRootResponse { 35 | bytes root_hash = 1; 36 | } 37 | 38 | message GetProofRequest { 39 | bytes key = 1; 40 | } 41 | 42 | message GetProofResponse { 43 | Proof proof = 1; 44 | } 45 | 46 | message Proof { 47 | bytes key = 1; 48 | MaybeBytes value = 2; 49 | repeated ProofNode proof = 3; 50 | } 51 | 52 | // For use in sync client, which has a restriction on the size of 53 | // the response. GetChangeProof in the DB service doesn't. 54 | message SyncGetChangeProofRequest { 55 | bytes start_root_hash = 1; 56 | bytes end_root_hash = 2; 57 | MaybeBytes start_key = 3; 58 | MaybeBytes end_key = 4; 59 | uint32 key_limit = 5; 60 | uint32 bytes_limit = 6; 61 | } 62 | 63 | message SyncGetChangeProofResponse { 64 | oneof response { 65 | ChangeProof change_proof = 1; 66 | RangeProof range_proof = 2; 67 | } 68 | } 69 | 70 | message GetChangeProofRequest { 71 | bytes start_root_hash = 1; 72 | bytes end_root_hash = 2; 73 | MaybeBytes start_key = 3; 74 | MaybeBytes end_key = 4; 75 | uint32 key_limit = 5; 76 | } 77 | 78 | message GetChangeProofResponse { 79 | oneof response { 80 | ChangeProof change_proof = 1; 81 | // True iff server errored with merkledb.ErrInsufficientHistory. 82 | bool root_not_present = 2; 83 | } 84 | } 85 | 86 | message VerifyChangeProofRequest { 87 | ChangeProof proof = 1; 88 | MaybeBytes start_key = 2; 89 | MaybeBytes end_key = 3; 90 | bytes expected_root_hash = 4; 91 | } 92 | 93 | message VerifyChangeProofResponse { 94 | // If empty, there was no error. 95 | string error = 1; 96 | } 97 | 98 | message CommitChangeProofRequest { 99 | ChangeProof proof = 1; 100 | } 101 | 102 | // For use in sync client, which has a restriction on the size of 103 | // the response. GetRangeProof in the DB service doesn't. 104 | message SyncGetRangeProofRequest { 105 | bytes root_hash = 1; 106 | MaybeBytes start_key = 2; 107 | MaybeBytes end_key = 3; 108 | uint32 key_limit = 4; 109 | uint32 bytes_limit = 5; 110 | } 111 | 112 | message GetRangeProofRequest { 113 | bytes root_hash = 1; 114 | MaybeBytes start_key = 2; 115 | MaybeBytes end_key = 3; 116 | uint32 key_limit = 4; 117 | } 118 | 119 | message GetRangeProofResponse { 120 | RangeProof proof = 1; 121 | } 122 | 123 | message CommitRangeProofRequest { 124 | MaybeBytes start_key = 1; 125 | RangeProof range_proof = 2; 126 | } 127 | 128 | message ChangeProof { 129 | repeated ProofNode start_proof = 1; 130 | repeated ProofNode end_proof = 2; 131 | repeated KeyChange key_changes = 3; 132 | } 133 | 134 | message RangeProof { 135 | repeated ProofNode start = 1; 136 | repeated ProofNode end = 2; 137 | repeated KeyValue key_values = 3; 138 | } 139 | 140 | message ProofNode { 141 | SerializedPath key = 1; 142 | MaybeBytes value_or_hash = 2; 143 | map children = 3; 144 | } 145 | 146 | message KeyChange { 147 | bytes key = 1; 148 | MaybeBytes value = 2; 149 | } 150 | 151 | message SerializedPath { 152 | uint64 nibble_length = 1; 153 | bytes value = 2; 154 | } 155 | 156 | message MaybeBytes { 157 | bytes value = 1; 158 | // If false, this is None. 159 | // Otherwise this is Some. 160 | bool is_nothing = 2; 161 | } 162 | 163 | message KeyValue { 164 | bytes key = 1; 165 | bytes value = 2; 166 | } 167 | -------------------------------------------------------------------------------- /grpc-testtool/src/bin/client.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | fn main() { 5 | println!("Hello from {}", file!()); 6 | } 7 | -------------------------------------------------------------------------------- /grpc-testtool/src/bin/process-server.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use chrono::Local; 5 | use clap::Parser; 6 | use env_logger::Builder; 7 | use log::{LevelFilter, info}; 8 | use rpc::DatabaseService; 9 | use rpc::process_server::process_server_service_server::ProcessServerServiceServer; 10 | use rpc::rpcdb::database_server::DatabaseServer as RpcServer; 11 | use rpc::sync::db_server::DbServer as SyncServer; 12 | use serde::Deserialize; 13 | use std::error::Error; 14 | use std::io::Write; 15 | use std::net::IpAddr::V4; 16 | use std::net::Ipv4Addr; 17 | use std::path::PathBuf; 18 | use std::str::FromStr; 19 | use std::sync::Arc; 20 | use tonic::transport::Server; 21 | 22 | #[derive(Clone, Debug, Deserialize)] 23 | struct Options { 24 | #[serde(default = "Options::history_length_default")] 25 | history_length: u32, 26 | } 27 | 28 | impl Options { 29 | // used in two cases: 30 | // serde deserializes Options and there was no history_length 31 | // OR 32 | // Options was not present 33 | const fn history_length_default() -> u32 { 34 | 100 35 | } 36 | } 37 | 38 | impl FromStr for Options { 39 | type Err = String; 40 | 41 | fn from_str(s: &str) -> Result { 42 | serde_json::from_str(s).map_err(|e| format!("error parsing options: {}", e)) 43 | } 44 | } 45 | 46 | /// A GRPC server that can be plugged into the generic testing framework for merkledb 47 | 48 | #[derive(Debug, Parser)] 49 | #[command(author, version, about, long_about = None)] 50 | struct Opts { 51 | #[arg(short, long)] 52 | //// Port gRPC server listens on 53 | grpc_port: u16, 54 | 55 | #[arg(short, long)] 56 | db_dir: PathBuf, 57 | 58 | #[arg(short, long, default_value_t = LevelFilter::Info)] 59 | log_level: LevelFilter, 60 | 61 | #[arg(short, long)] 62 | config: Option, 63 | } 64 | 65 | #[tokio::main] 66 | async fn main() -> Result<(), Box> { 67 | // parse command line options 68 | let args = Opts::parse(); 69 | 70 | // configure the logger 71 | Builder::new() 72 | .format(|buf, record| { 73 | writeln!( 74 | buf, 75 | "{} [{}] - {}", 76 | Local::now().format("%Y-%m-%dT%H:%M:%S"), 77 | record.level(), 78 | record.args() 79 | ) 80 | }) 81 | .format_target(true) 82 | .filter(None, args.log_level) 83 | .init(); 84 | 85 | // tracing_subscriber::fmt::init(); 86 | 87 | // log to the file and to stderr 88 | info!("Starting up: Listening on {}", args.grpc_port); 89 | 90 | let svc = Arc::new( 91 | DatabaseService::new( 92 | args.db_dir, 93 | args.config 94 | .map(|o| o.history_length) 95 | .unwrap_or_else(Options::history_length_default), 96 | ) 97 | .await?, 98 | ); 99 | 100 | // TODO: graceful shutdown 101 | Ok(Server::builder() 102 | .trace_fn(|_m| tracing::debug_span!("process-server")) 103 | .add_service(RpcServer::from_arc(svc.clone())) 104 | .add_service(SyncServer::from_arc(svc.clone())) 105 | .add_service(ProcessServerServiceServer::from_arc(svc.clone())) 106 | .serve(std::net::SocketAddr::new( 107 | V4(Ipv4Addr::LOCALHOST), 108 | args.grpc_port, 109 | )) 110 | .await?) 111 | } 112 | -------------------------------------------------------------------------------- /grpc-testtool/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | #![doc = include_str!("../README.md")] 5 | 6 | pub mod sync { 7 | #![expect(clippy::missing_const_for_fn)] 8 | tonic::include_proto!("sync"); 9 | } 10 | 11 | pub mod rpcdb { 12 | #![expect(clippy::missing_const_for_fn)] 13 | tonic::include_proto!("rpcdb"); 14 | } 15 | 16 | pub mod process_server { 17 | #![expect(clippy::missing_const_for_fn)] 18 | tonic::include_proto!("process"); 19 | } 20 | 21 | pub mod service; 22 | 23 | pub use service::Database as DatabaseService; 24 | -------------------------------------------------------------------------------- /grpc-testtool/src/service.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use firewood::db::{Db, DbConfig}; 5 | use firewood::v2::api::{Db as _, Error}; 6 | 7 | use std::collections::HashMap; 8 | use std::ops::Deref; 9 | use std::path::Path; 10 | use std::sync::Arc; 11 | use std::sync::atomic::{AtomicU64, Ordering}; 12 | use tokio::sync::Mutex; 13 | use tonic::Status; 14 | 15 | pub mod database; 16 | pub mod db; 17 | pub mod process; 18 | 19 | trait IntoStatusResultExt { 20 | fn into_status_result(self) -> Result>; 21 | } 22 | 23 | impl IntoStatusResultExt for Result { 24 | // We map errors from bad arguments into Status::invalid_argument; all other errors are Status::internal errors 25 | fn into_status_result(self) -> Result> { 26 | self.map_err(|err| match err { 27 | Error::IncorrectRootHash { .. } | Error::HashNotFound { .. } | Error::RangeTooSmall => { 28 | Box::new(Status::invalid_argument(err.to_string())) 29 | } 30 | Error::IO { .. } | Error::InternalError { .. } => { 31 | Box::new(Status::internal(err.to_string())) 32 | } 33 | _ => Box::new(Status::internal(err.to_string())), 34 | }) 35 | } 36 | } 37 | 38 | #[derive(Debug)] 39 | pub struct Database { 40 | db: Db, 41 | iterators: Arc>, 42 | } 43 | 44 | impl Database { 45 | pub async fn new>(path: P, _history_length: u32) -> Result { 46 | // try to create the parents for this directory, but it's okay if it fails; it will get caught in Db::new 47 | std::fs::create_dir_all(&path).ok(); 48 | // TODO: truncate should be false 49 | // see https://github.com/ava-labs/firewood/issues/418 50 | let cfg = DbConfig::builder().truncate(true).build(); 51 | 52 | let db = Db::new(path, cfg).await?; 53 | 54 | Ok(Self { 55 | db, 56 | iterators: Default::default(), 57 | }) 58 | } 59 | } 60 | 61 | impl Deref for Database { 62 | type Target = Db; 63 | 64 | fn deref(&self) -> &Self::Target { 65 | &self.db 66 | } 67 | } 68 | 69 | impl Database { 70 | async fn latest(&self) -> Result::Historical>, Error> { 71 | let root_hash = self.root_hash().await?.ok_or(Error::LatestIsEmpty)?; 72 | self.revision(root_hash).await 73 | } 74 | } 75 | 76 | // TODO: implement Iterator 77 | #[derive(Debug)] 78 | struct Iter; 79 | 80 | #[derive(Default, Debug)] 81 | struct Iterators { 82 | map: HashMap, 83 | next_id: AtomicU64, 84 | } 85 | 86 | impl Iterators { 87 | fn insert(&mut self, iter: Iter) -> u64 { 88 | let id = self.next_id.fetch_add(1, Ordering::Relaxed); 89 | self.map.insert(id, iter); 90 | id 91 | } 92 | 93 | fn _get(&self, id: u64) -> Option<&Iter> { 94 | self.map.get(&id) 95 | } 96 | 97 | fn remove(&mut self, id: u64) { 98 | self.map.remove(&id); 99 | } 100 | } 101 | 102 | #[derive(Debug)] 103 | pub struct ProcessServer; 104 | -------------------------------------------------------------------------------- /grpc-testtool/src/service/db.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use super::Database; 5 | use crate::sync::db_server::Db as DbServerTrait; 6 | use crate::sync::{ 7 | CommitChangeProofRequest, CommitRangeProofRequest, GetChangeProofRequest, 8 | GetChangeProofResponse, GetMerkleRootResponse, GetProofRequest, GetProofResponse, 9 | GetRangeProofRequest, GetRangeProofResponse, VerifyChangeProofRequest, 10 | VerifyChangeProofResponse, 11 | }; 12 | use tonic::{Request, Response, Status, async_trait}; 13 | 14 | #[async_trait] 15 | impl DbServerTrait for Database { 16 | #[tracing::instrument(level = "trace")] 17 | async fn get_merkle_root( 18 | &self, 19 | _request: Request<()>, 20 | ) -> Result, Status> { 21 | todo!() 22 | // let root_hash = self.db.root_hash().await.into_status_result().map_err(|e| *e)?.to_vec(); 23 | // 24 | // let response = GetMerkleRootResponse { root_hash }; 25 | // 26 | // Ok(Response::new(response)) 27 | } 28 | 29 | #[tracing::instrument(level = "trace")] 30 | async fn get_proof( 31 | &self, 32 | _request: Request, 33 | ) -> Result, Status> { 34 | todo!() 35 | // let GetProofRequest { key: _ } = request.into_inner(); 36 | // let _revision = self.latest().await.into_status_result().map_err(|e| *e)?; 37 | } 38 | 39 | #[tracing::instrument(level = "trace")] 40 | async fn get_change_proof( 41 | &self, 42 | _request: Request, 43 | ) -> Result, Status> { 44 | todo!() 45 | // let GetChangeProofRequest { 46 | // start_root_hash: _, 47 | // end_root_hash: _, 48 | // start_key: _, 49 | // end_key: _, 50 | // key_limit: _, 51 | // } = request.into_inner(); 52 | 53 | // let _revision = self.latest().await.into_status_result().map_err(|e| *e)?; 54 | } 55 | 56 | #[tracing::instrument(level = "trace")] 57 | async fn verify_change_proof( 58 | &self, 59 | _request: Request, 60 | ) -> Result, Status> { 61 | todo!() 62 | // let VerifyChangeProofRequest { 63 | // proof: _, 64 | // start_key: _, 65 | // end_key: _, 66 | // expected_root_hash: _, 67 | // } = request.into_inner(); 68 | 69 | // let _revision = self.latest().await.into_status_result().map_err(|e| *e)?; 70 | } 71 | 72 | #[tracing::instrument(level = "trace")] 73 | async fn commit_change_proof( 74 | &self, 75 | _request: Request, 76 | ) -> Result, Status> { 77 | todo!() 78 | // let CommitChangeProofRequest { proof: _ } = request.into_inner(); 79 | } 80 | 81 | #[tracing::instrument(level = "trace")] 82 | async fn get_range_proof( 83 | &self, 84 | request: Request, 85 | ) -> Result, Status> { 86 | let GetRangeProofRequest { 87 | root_hash: _, 88 | start_key: _, 89 | end_key: _, 90 | key_limit: _, 91 | } = request.into_inner(); 92 | 93 | todo!() 94 | } 95 | 96 | #[tracing::instrument(level = "trace")] 97 | async fn commit_range_proof( 98 | &self, 99 | request: Request, 100 | ) -> Result, Status> { 101 | let CommitRangeProofRequest { 102 | start_key: _, 103 | range_proof: _, 104 | } = request.into_inner(); 105 | 106 | todo!() 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /grpc-testtool/src/service/process.rs: -------------------------------------------------------------------------------- 1 | use tonic::{Request, Response, Status, async_trait}; 2 | 3 | use crate::process_server::process_server_service_server::ProcessServerService as ProcessTrait; 4 | 5 | use crate::process_server::MetricsResponse; 6 | 7 | #[async_trait] 8 | impl ProcessTrait for super::Database { 9 | async fn metrics(&self, _request: Request<()>) -> Result, Status> { 10 | Err(Status::unimplemented("Metrics not yet supported")) 11 | // TODO: collect the metrics here 12 | // Ok(Response::new(MetricsResponse::default())) 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /storage/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "storage" 3 | version = "0.0.5" 4 | edition = "2024" 5 | rust-version = "1.85.0" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | bincode = "1.3.3" 11 | bitflags = "2.5.0" 12 | enum-as-inner = "0.6.0" 13 | hex = "0.4.3" 14 | serde = { version = "1.0.199", features = ["derive"] } 15 | smallvec = { version = "1.13.2", features = ["serde", "write", "union"] } 16 | sha2 = "0.10.8" 17 | integer-encoding = "4.0.0" 18 | arc-swap = "1.7.1" 19 | lru = "0.14.0" 20 | metrics = "0.24.0" 21 | log = { version = "0.4.20", optional = true } 22 | bytemuck = "1.7.0" 23 | bytemuck_derive = "1.7.0" 24 | bitfield = "0.19.0" 25 | fastrace = { version = "0.7.4" } 26 | io-uring = { version = "0.7.4", optional = true } 27 | triomphe = "0.1.14" 28 | coarsetime = "0.1.35" 29 | rlp = { version = "0.6.1", optional = true } 30 | sha3 = { version = "0.10.8", optional = true } 31 | bytes = { version = "1.10.1", optional = true } 32 | 33 | [dev-dependencies] 34 | rand = "0.9.0" 35 | test-case = "3.3.1" 36 | criterion = { version = "0.6.0", features = ["async_tokio", "html_reports"] } 37 | pprof = { version = "0.15.0", features = ["flamegraph"] } 38 | tempfile = "3.12.0" 39 | 40 | [features] 41 | logger = ["log"] 42 | branch_factor_256 = [] 43 | io-uring = ["dep:io-uring"] 44 | ethhash = [ "dep:rlp", "dep:sha3", "dep:bytes" ] 45 | 46 | [[bench]] 47 | name = "serializer" 48 | harness = false 49 | -------------------------------------------------------------------------------- /storage/benches/serializer.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use std::array::from_fn; 5 | use std::fs::File; 6 | use std::num::NonZeroU64; 7 | use std::os::raw::c_int; 8 | 9 | use bincode::Options; 10 | use criterion::profiler::Profiler; 11 | use criterion::{Criterion, criterion_group, criterion_main}; 12 | use pprof::ProfilerGuard; 13 | use smallvec::SmallVec; 14 | use storage::{LeafNode, Node, Path}; 15 | 16 | use std::path::Path as FsPath; 17 | 18 | // For flamegraphs: 19 | // cargo bench --bench serializer -- --profile-time=5 20 | 21 | enum FlamegraphProfiler { 22 | Init(c_int), 23 | Active(ProfilerGuard<'static>), 24 | } 25 | 26 | fn file_error_panic(path: &FsPath) -> impl FnOnce(T) -> U { 27 | |_| panic!("Error on file `{}`", path.display()) 28 | } 29 | 30 | impl Profiler for FlamegraphProfiler { 31 | #[expect(clippy::unwrap_used)] 32 | fn start_profiling(&mut self, _benchmark_id: &str, _benchmark_dir: &FsPath) { 33 | if let Self::Init(frequency) = self { 34 | let guard = ProfilerGuard::new(*frequency).unwrap(); 35 | *self = Self::Active(guard); 36 | } 37 | } 38 | 39 | #[expect(clippy::unwrap_used)] 40 | fn stop_profiling(&mut self, _benchmark_id: &str, benchmark_dir: &FsPath) { 41 | std::fs::create_dir_all(benchmark_dir).unwrap(); 42 | let filename = "firewood-flamegraph.svg"; 43 | let flamegraph_path = benchmark_dir.join(filename); 44 | let flamegraph_file = 45 | File::create(&flamegraph_path).unwrap_or_else(file_error_panic(&flamegraph_path)); 46 | 47 | if let Self::Active(profiler) = self { 48 | profiler 49 | .report() 50 | .build() 51 | .unwrap() 52 | .flamegraph(flamegraph_file) 53 | .unwrap_or_else(file_error_panic(&flamegraph_path)); 54 | } 55 | } 56 | } 57 | 58 | fn leaf(c: &mut Criterion) { 59 | let mut group = c.benchmark_group("leaf"); 60 | let input = Node::Leaf(LeafNode { 61 | partial_path: Path(SmallVec::from_slice(&[0, 1])), 62 | value: Box::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), 63 | }); 64 | let serializer = bincode::DefaultOptions::new().with_varint_encoding(); 65 | group.bench_with_input("serde", &input, |b, input| { 66 | b.iter(|| { 67 | serializer.serialize(input).unwrap(); 68 | }) 69 | }); 70 | 71 | group.bench_with_input("manual", &input, |b, input| { 72 | b.iter(|| { 73 | let mut bytes = Vec::::new(); 74 | input.as_bytes(0, &mut bytes); 75 | }) 76 | }); 77 | group.finish(); 78 | } 79 | 80 | fn branch(c: &mut Criterion) { 81 | let mut group = c.benchmark_group("has_value"); 82 | let mut input = Node::Branch(Box::new(storage::BranchNode { 83 | partial_path: Path(SmallVec::from_slice(&[0, 1])), 84 | value: Some(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9].into_boxed_slice()), 85 | children: from_fn(|i| { 86 | if i == 0 { 87 | Some(storage::Child::AddressWithHash( 88 | NonZeroU64::new(1).unwrap(), 89 | storage::HashType::from([0; 32]), 90 | )) 91 | } else { 92 | None 93 | } 94 | }), 95 | })); 96 | let serializer = bincode::DefaultOptions::new().with_varint_encoding(); 97 | let serde_serializer = |b: &mut criterion::Bencher, input: &storage::Node| { 98 | b.iter(|| { 99 | serializer.serialize(input).unwrap(); 100 | }) 101 | }; 102 | 103 | let manual_serializer = |b: &mut criterion::Bencher, input: &storage::Node| { 104 | b.iter(|| { 105 | let mut bytes = Vec::new(); 106 | input.as_bytes(0, &mut bytes); 107 | }) 108 | }; 109 | 110 | group.bench_with_input("serde", &input, serde_serializer); 111 | group.bench_with_input("manual", &input, manual_serializer); 112 | group.finish(); 113 | 114 | let mut group = c.benchmark_group("1_child"); 115 | input.as_branch_mut().unwrap().value = None; 116 | group.bench_with_input("serde", &input, serde_serializer); 117 | group.bench_with_input("manual", &input, manual_serializer); 118 | let child = input.as_branch().unwrap().children[0].clone(); 119 | group.finish(); 120 | 121 | let mut group = c.benchmark_group("2_child"); 122 | input.as_branch_mut().unwrap().children[1] = child.clone(); 123 | group.bench_with_input("serde", &input, serde_serializer); 124 | group.bench_with_input("manual", &input, manual_serializer); 125 | group.finish(); 126 | 127 | let mut group = c.benchmark_group("16_child"); 128 | input.as_branch_mut().unwrap().children = std::array::from_fn(|_| child.clone()); 129 | group.bench_with_input("serde", &input, serde_serializer); 130 | group.bench_with_input("manual", &input, manual_serializer); 131 | group.finish(); 132 | } 133 | 134 | criterion_group!( 135 | name = serializers; 136 | config = Criterion::default().with_profiler(FlamegraphProfiler::Init(100)); 137 | targets = leaf, branch 138 | ); 139 | criterion_main!(serializers); 140 | -------------------------------------------------------------------------------- /storage/src/hashednode.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use std::{ 5 | iter::{self}, 6 | ops::Deref, 7 | }; 8 | 9 | use smallvec::SmallVec; 10 | 11 | use crate::{BranchNode, HashType, LeafNode, Node, Path}; 12 | 13 | /// Returns the hash of `node`, which is at the given `path_prefix`. 14 | pub fn hash_node(node: &Node, path_prefix: &Path) -> HashType { 15 | match node { 16 | Node::Branch(node) => { 17 | // All child hashes should be filled in. 18 | // TODO danlaine: Enforce this with the type system. 19 | #[cfg(debug_assertions)] 20 | debug_assert!( 21 | node.children 22 | .iter() 23 | .all(|c| !matches!(c, Some(crate::Child::Node(_)))), 24 | "branch children: {:?}", 25 | node.children 26 | ); 27 | NodeAndPrefix { 28 | node: node.as_ref(), 29 | prefix: path_prefix, 30 | } 31 | .into() 32 | } 33 | Node::Leaf(node) => NodeAndPrefix { 34 | node, 35 | prefix: path_prefix, 36 | } 37 | .into(), 38 | } 39 | } 40 | 41 | /// Returns the serialized representation of `node` used as the pre-image 42 | /// when hashing the node. The node is at the given `path_prefix`. 43 | pub fn hash_preimage(node: &Node, path_prefix: &Path) -> Box<[u8]> { 44 | // Key, 3 options, value digest 45 | let est_len = node.partial_path().len() + path_prefix.len() + 3 + HashType::default().len(); 46 | let mut buf = Vec::with_capacity(est_len); 47 | match node { 48 | Node::Branch(node) => { 49 | NodeAndPrefix { 50 | node: node.as_ref(), 51 | prefix: path_prefix, 52 | } 53 | .write(&mut buf); 54 | } 55 | Node::Leaf(node) => NodeAndPrefix { 56 | node, 57 | prefix: path_prefix, 58 | } 59 | .write(&mut buf), 60 | } 61 | buf.into_boxed_slice() 62 | } 63 | 64 | pub trait HasUpdate { 65 | fn update>(&mut self, data: T); 66 | } 67 | 68 | impl HasUpdate for Vec { 69 | fn update>(&mut self, data: T) { 70 | self.extend(data.as_ref().iter().copied()); 71 | } 72 | } 73 | 74 | // TODO: make it work with any size SmallVec 75 | // impl + smallvec::Array> HasUpdate for SmallVec { 76 | // fn update>(&mut self, data: U) { 77 | // self.extend(data.as_ref()); 78 | // } 79 | // } 80 | 81 | impl HasUpdate for SmallVec<[u8; 32]> { 82 | fn update>(&mut self, data: T) { 83 | self.extend(data.as_ref().iter().copied()); 84 | } 85 | } 86 | 87 | #[derive(Clone, Debug)] 88 | /// A ValueDigest is either a node's value or the hash of its value. 89 | pub enum ValueDigest { 90 | /// The node's value. 91 | Value(T), 92 | /// TODO this variant will be used when we deserialize a proof node 93 | /// from a remote Firewood instance. The serialized proof node they 94 | /// send us may the hash of the value, not the value itself. 95 | Hash(T), 96 | } 97 | 98 | impl Deref for ValueDigest { 99 | type Target = T; 100 | 101 | fn deref(&self) -> &Self::Target { 102 | match self { 103 | ValueDigest::Value(value) => value, 104 | ValueDigest::Hash(hash) => hash, 105 | } 106 | } 107 | } 108 | 109 | /// A node in the trie that can be hashed. 110 | pub trait Hashable { 111 | /// The key of the node where each byte is a nibble. 112 | fn key(&self) -> impl Iterator + Clone; 113 | /// The partial path of this node 114 | #[cfg(feature = "ethhash")] 115 | fn partial_path(&self) -> impl Iterator + Clone; 116 | /// The node's value or hash. 117 | fn value_digest(&self) -> Option>; 118 | /// Each element is a child's index and hash. 119 | /// Yields 0 elements if the node is a leaf. 120 | fn children(&self) -> impl Iterator + Clone; 121 | } 122 | 123 | /// A preimage of a hash. 124 | pub trait Preimage { 125 | /// Returns the hash of this preimage. 126 | fn to_hash(&self) -> HashType; 127 | /// Write this hash preimage to `buf`. 128 | fn write(&self, buf: &mut impl HasUpdate); 129 | } 130 | 131 | trait HashableNode { 132 | fn partial_path(&self) -> impl Iterator + Clone; 133 | fn value(&self) -> Option<&[u8]>; 134 | fn children_iter(&self) -> impl Iterator + Clone; 135 | } 136 | 137 | impl HashableNode for BranchNode { 138 | fn partial_path(&self) -> impl Iterator + Clone { 139 | self.partial_path.0.iter().copied() 140 | } 141 | 142 | fn value(&self) -> Option<&[u8]> { 143 | self.value.as_deref() 144 | } 145 | 146 | fn children_iter(&self) -> impl Iterator + Clone { 147 | self.children_iter() 148 | } 149 | } 150 | 151 | impl HashableNode for LeafNode { 152 | fn partial_path(&self) -> impl Iterator + Clone { 153 | self.partial_path.0.iter().copied() 154 | } 155 | 156 | fn value(&self) -> Option<&[u8]> { 157 | Some(&self.value) 158 | } 159 | 160 | fn children_iter(&self) -> impl Iterator + Clone { 161 | iter::empty() 162 | } 163 | } 164 | 165 | struct NodeAndPrefix<'a, N: HashableNode> { 166 | node: &'a N, 167 | prefix: &'a Path, 168 | } 169 | 170 | impl<'a, N: HashableNode> From> for HashType { 171 | fn from(node: NodeAndPrefix<'a, N>) -> Self { 172 | node.to_hash() 173 | } 174 | } 175 | 176 | impl<'a, N: HashableNode> Hashable for NodeAndPrefix<'a, N> { 177 | fn key(&self) -> impl Iterator + Clone { 178 | self.prefix 179 | .0 180 | .iter() 181 | .copied() 182 | .chain(self.node.partial_path()) 183 | } 184 | 185 | #[cfg(feature = "ethhash")] 186 | fn partial_path(&self) -> impl Iterator + Clone { 187 | self.node.partial_path() 188 | } 189 | 190 | fn value_digest(&self) -> Option> { 191 | self.node.value().map(ValueDigest::Value) 192 | } 193 | 194 | fn children(&self) -> impl Iterator + Clone { 195 | self.node.children_iter() 196 | } 197 | } 198 | -------------------------------------------------------------------------------- /storage/src/hashers/merkledb.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use crate::hashednode::{HasUpdate, Hashable, Preimage}; 5 | use crate::{TrieHash, ValueDigest}; 6 | /// Merkledb compatible hashing algorithm. 7 | use integer_encoding::VarInt; 8 | use sha2::{Digest, Sha256}; 9 | 10 | const MAX_VARINT_SIZE: usize = 10; 11 | const BITS_PER_NIBBLE: u64 = 4; 12 | 13 | impl HasUpdate for Sha256 { 14 | fn update>(&mut self, data: T) { 15 | sha2::Digest::update(self, data) 16 | } 17 | } 18 | 19 | impl Preimage for T { 20 | fn to_hash(&self) -> TrieHash { 21 | let mut hasher = Sha256::new(); 22 | 23 | self.write(&mut hasher); 24 | hasher.finalize().into() 25 | } 26 | 27 | fn write(&self, buf: &mut impl HasUpdate) { 28 | let children = self.children(); 29 | 30 | let num_children = children.clone().count() as u64; 31 | add_varint_to_buf(buf, num_children); 32 | 33 | for (index, hash) in children { 34 | add_varint_to_buf(buf, index as u64); 35 | buf.update(hash); 36 | } 37 | 38 | // Add value digest (if any) to hash pre-image 39 | add_value_digest_to_buf(buf, self.value_digest()); 40 | 41 | // Add key length (in bits) to hash pre-image 42 | let mut key = self.key(); 43 | // let mut key = key.as_ref().iter(); 44 | let key_bit_len = BITS_PER_NIBBLE * key.clone().count() as u64; 45 | add_varint_to_buf(buf, key_bit_len); 46 | 47 | // Add key to hash pre-image 48 | while let Some(high_nibble) = key.next() { 49 | let low_nibble = key.next().unwrap_or(0); 50 | let byte = (high_nibble << 4) | low_nibble; 51 | buf.update([byte]); 52 | } 53 | } 54 | } 55 | 56 | fn add_value_digest_to_buf>( 57 | buf: &mut H, 58 | value_digest: Option>, 59 | ) { 60 | let Some(value_digest) = value_digest else { 61 | let value_exists: u8 = 0; 62 | buf.update([value_exists]); 63 | return; 64 | }; 65 | 66 | let value_exists: u8 = 1; 67 | buf.update([value_exists]); 68 | 69 | match value_digest { 70 | ValueDigest::Value(value) if value.as_ref().len() >= 32 => { 71 | let hash = Sha256::digest(value); 72 | add_len_and_value_to_buf(buf, hash); 73 | } 74 | ValueDigest::Value(value) => { 75 | add_len_and_value_to_buf(buf, value); 76 | } 77 | ValueDigest::Hash(hash) => { 78 | add_len_and_value_to_buf(buf, hash); 79 | } 80 | } 81 | } 82 | 83 | #[inline] 84 | /// Writes the length of `value` and `value` to `buf`. 85 | fn add_len_and_value_to_buf>(buf: &mut H, value: V) { 86 | let value_len = value.as_ref().len(); 87 | buf.update([value_len as u8]); 88 | buf.update(value); 89 | } 90 | 91 | #[inline] 92 | /// Encodes `value` as a varint and writes it to `buf`. 93 | fn add_varint_to_buf(buf: &mut H, value: u64) { 94 | let mut buf_arr = [0u8; MAX_VARINT_SIZE]; 95 | let len = value.encode_var(&mut buf_arr); 96 | buf.update( 97 | buf_arr 98 | .get(..len) 99 | .expect("length is always less than MAX_VARINT_SIZE"), 100 | ); 101 | } 102 | -------------------------------------------------------------------------------- /storage/src/hashers/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | #[cfg(feature = "ethhash")] 5 | mod ethhash; 6 | #[cfg(not(feature = "ethhash"))] 7 | mod merkledb; 8 | -------------------------------------------------------------------------------- /storage/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | #![warn(missing_debug_implementations, rust_2018_idioms, missing_docs)] 4 | #![deny(unsafe_code)] 5 | 6 | //! # storage implements the storage of a [Node] on top of a LinearStore 7 | //! 8 | //! Nodes are stored at a [LinearAddress] within a [ReadableStorage]. 9 | //! 10 | //! The [NodeStore] maintains a free list and the [LinearAddress] of a root node. 11 | //! 12 | //! A [NodeStore] is backed by a [ReadableStorage] which is persisted storage. 13 | 14 | mod hashednode; 15 | mod hashers; 16 | mod linear; 17 | mod node; 18 | mod nodestore; 19 | mod trie_hash; 20 | 21 | /// Logger module for handling logging functionality 22 | pub mod logger; 23 | 24 | // re-export these so callers don't need to know where they are 25 | pub use hashednode::{Hashable, Preimage, ValueDigest, hash_node, hash_preimage}; 26 | pub use linear::{FileIoError, ReadableStorage, WritableStorage}; 27 | pub use node::path::{NibblesIterator, Path}; 28 | pub use node::{BranchNode, Child, LeafNode, Node, PathIterItem, branch::HashType}; 29 | pub use nodestore::{ 30 | Committed, HashedNodeReader, ImmutableProposal, LinearAddress, MutableProposal, NodeReader, 31 | NodeStore, Parentable, ReadInMemoryNode, RootReader, TrieReader, UpdateError, 32 | }; 33 | 34 | pub use linear::filebacked::FileBacked; 35 | pub use linear::memory::MemStore; 36 | 37 | pub use trie_hash::TrieHash; 38 | 39 | /// A shared node, which is just a triophe Arc of a node 40 | pub type SharedNode = triomphe::Arc; 41 | 42 | /// The strategy for caching nodes that are read 43 | /// from the storage layer. Generally, we only want to 44 | /// cache write operations, but for some read-heavy workloads 45 | /// you can enable caching of branch reads or all reads. 46 | #[derive(Clone, Debug)] 47 | pub enum CacheReadStrategy { 48 | /// Only cache writes (no reads will be cached) 49 | WritesOnly, 50 | 51 | /// Cache branch reads (reads that are not leaf nodes) 52 | BranchReads, 53 | 54 | /// Cache all reads (leaves and branches) 55 | All, 56 | } 57 | 58 | impl std::fmt::Display for CacheReadStrategy { 59 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 60 | write!(f, "{self:?}") 61 | } 62 | } 63 | 64 | /// Returns the hash of an empty trie, which is the Keccak256 hash of the RLP encoding of an empty byte array. 65 | /// 66 | /// This function is slow, so callers should cache the result 67 | #[cfg(feature = "ethhash")] 68 | pub fn empty_trie_hash() -> TrieHash { 69 | use sha3::Digest as _; 70 | 71 | sha3::Keccak256::digest(rlp::NULL_RLP) 72 | .as_slice() 73 | .try_into() 74 | .expect("empty trie hash is 32 bytes") 75 | } 76 | -------------------------------------------------------------------------------- /storage/src/linear/memory.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use super::{FileIoError, ReadableStorage, WritableStorage}; 5 | use metrics::counter; 6 | use std::io::{Cursor, Read}; 7 | use std::sync::Mutex; 8 | 9 | #[derive(Debug, Default)] 10 | /// An in-memory impelementation of [WritableStorage] and [ReadableStorage] 11 | pub struct MemStore { 12 | bytes: Mutex>, 13 | } 14 | 15 | impl MemStore { 16 | /// Create a new, empty [MemStore] 17 | pub const fn new(bytes: Vec) -> Self { 18 | Self { 19 | bytes: Mutex::new(bytes), 20 | } 21 | } 22 | } 23 | 24 | impl WritableStorage for MemStore { 25 | fn write(&self, offset: u64, object: &[u8]) -> Result { 26 | let offset = offset as usize; 27 | let mut guard = self.bytes.lock().expect("poisoned lock"); 28 | if offset + object.len() > guard.len() { 29 | guard.resize(offset + object.len(), 0); 30 | } 31 | guard[offset..offset + object.len()].copy_from_slice(object); 32 | Ok(object.len()) 33 | } 34 | } 35 | 36 | impl ReadableStorage for MemStore { 37 | fn stream_from(&self, addr: u64) -> Result, FileIoError> { 38 | counter!("firewood.read_node", "from" => "memory").increment(1); 39 | let bytes = self 40 | .bytes 41 | .lock() 42 | .expect("poisoned lock") 43 | .get(addr as usize..) 44 | .unwrap_or_default() 45 | .to_owned(); 46 | 47 | Ok(Box::new(Cursor::new(bytes))) 48 | } 49 | 50 | fn size(&self) -> Result { 51 | Ok(self.bytes.lock().expect("poisoned lock").len() as u64) 52 | } 53 | } 54 | 55 | #[expect(clippy::unwrap_used)] 56 | #[cfg(test)] 57 | mod test { 58 | use super::*; 59 | use test_case::test_case; 60 | 61 | #[test_case(&[(0,&[1, 2, 3])],(0,&[1, 2, 3]); "write to empty store")] 62 | #[test_case(&[(0,&[1, 2, 3])],(1,&[2, 3]); "read from middle of store")] 63 | #[test_case(&[(0,&[1, 2, 3])],(2,&[3]); "read from end of store")] 64 | #[test_case(&[(0,&[1, 2, 3])],(3,&[]); "read past end of store")] 65 | #[test_case(&[(0,&[1, 2, 3]),(3,&[4,5,6])],(0,&[1, 2, 3,4,5,6]); "write to end of store")] 66 | #[test_case(&[(0,&[1, 2, 3]),(0,&[4])],(0,&[4,2,3]); "overwrite start of store")] 67 | #[test_case(&[(0,&[1, 2, 3]),(1,&[4])],(0,&[1,4,3]); "overwrite middle of store")] 68 | #[test_case(&[(0,&[1, 2, 3]),(2,&[4])],(0,&[1,2,4]); "overwrite end of store")] 69 | #[test_case(&[(0,&[1, 2, 3]),(2,&[4,5])],(0,&[1,2,4,5]); "overwrite/extend end of store")] 70 | fn test_in_mem_write_linear_store(writes: &[(u64, &[u8])], expected: (u64, &[u8])) { 71 | let store = MemStore { 72 | bytes: Mutex::new(vec![]), 73 | }; 74 | assert_eq!(store.size().unwrap(), 0); 75 | 76 | for write in writes { 77 | store.write(write.0, write.1).unwrap(); 78 | } 79 | 80 | let mut reader = store.stream_from(expected.0).unwrap(); 81 | let mut read_bytes = vec![]; 82 | reader.read_to_end(&mut read_bytes).unwrap(); 83 | assert_eq!(read_bytes, expected.1); 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /storage/src/linear/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | //! A LinearStore provides a view of a set of bytes at 5 | //! a given time. A LinearStore has three different types, 6 | //! which refer to another base type, as follows: 7 | //! ```mermaid 8 | //! stateDiagram-v2 9 | //! R1(Committed) --> R2(Committed) 10 | //! R2(Committed) --> R3(FileBacked) 11 | //! P1(Proposed) --> R3(FileBacked) 12 | //! P2(Proposed) --> P1(Proposed) 13 | //! ``` 14 | //! 15 | //! Each type is described in more detail below. 16 | 17 | use std::fmt::Debug; 18 | use std::io::Read; 19 | use std::num::NonZero; 20 | use std::ops::Deref; 21 | use std::path::PathBuf; 22 | 23 | use crate::{CacheReadStrategy, LinearAddress, SharedNode}; 24 | pub(super) mod filebacked; 25 | pub mod memory; 26 | 27 | /// An error that occurs when reading or writing to a [ReadableStorage] or [WritableStorage] 28 | /// 29 | /// This error is used to wrap errors that occur when reading or writing to a file. 30 | /// It contains the filename, offset, and context of the error. 31 | #[derive(Debug)] 32 | pub struct FileIoError { 33 | inner: std::io::Error, 34 | filename: Option, 35 | offset: u64, 36 | context: Option, 37 | } 38 | 39 | impl FileIoError { 40 | /// Create a new [FileIoError] from a generic error 41 | /// 42 | /// Only use this constructor if you do not have any file or line information. 43 | /// 44 | /// # Arguments 45 | /// 46 | /// * `error` - The error to wrap 47 | pub fn from_generic_no_file(error: T, context: &str) -> Self { 48 | Self { 49 | inner: std::io::Error::other(error.to_string()), 50 | filename: None, 51 | offset: 0, 52 | context: Some(context.into()), 53 | } 54 | } 55 | 56 | /// Create a new [FileIoError] 57 | /// 58 | /// # Arguments 59 | /// 60 | /// * `inner` - The inner error 61 | /// * `filename` - The filename of the file that caused the error 62 | /// * `offset` - The offset of the file that caused the error 63 | /// * `context` - The context of this error 64 | pub fn new( 65 | inner: std::io::Error, 66 | filename: Option, 67 | offset: u64, 68 | context: Option, 69 | ) -> Self { 70 | Self { 71 | inner, 72 | filename, 73 | offset, 74 | context, 75 | } 76 | } 77 | } 78 | 79 | impl std::error::Error for FileIoError { 80 | fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { 81 | Some(&self.inner) 82 | } 83 | } 84 | 85 | impl std::fmt::Display for FileIoError { 86 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 87 | write!( 88 | f, 89 | "{inner} at offset {offset} of file '{filename}' {context}", 90 | inner = self.inner, 91 | offset = self.offset, 92 | filename = self 93 | .filename 94 | .as_ref() 95 | .unwrap_or(&PathBuf::from("[unknown]")) 96 | .display(), 97 | context = self.context.as_ref().unwrap_or(&String::from("")) 98 | ) 99 | } 100 | } 101 | 102 | impl Deref for FileIoError { 103 | type Target = std::io::Error; 104 | 105 | fn deref(&self) -> &Self::Target { 106 | &self.inner 107 | } 108 | } 109 | /// Trait for readable storage. 110 | pub trait ReadableStorage: Debug + Sync + Send { 111 | /// Stream data from the specified address. 112 | /// 113 | /// # Arguments 114 | /// 115 | /// * `addr` - The address from which to stream the data. 116 | /// 117 | /// # Returns 118 | /// 119 | /// A `Result` containing a boxed `Read` trait object, or an `Error` if the operation fails. 120 | fn stream_from(&self, addr: u64) -> Result, FileIoError>; 121 | 122 | /// Return the size of the underlying storage, in bytes 123 | fn size(&self) -> Result; 124 | 125 | /// Read a node from the cache (if any) 126 | fn read_cached_node(&self, _addr: LinearAddress, _mode: &'static str) -> Option { 127 | None 128 | } 129 | 130 | /// Fetch the next pointer from the freelist cache 131 | fn free_list_cache(&self, _addr: LinearAddress) -> Option> { 132 | None 133 | } 134 | 135 | /// Return the cache read strategy for this readable storage 136 | fn cache_read_strategy(&self) -> &CacheReadStrategy { 137 | &CacheReadStrategy::WritesOnly 138 | } 139 | 140 | /// Cache a node for future reads 141 | fn cache_node(&self, _addr: LinearAddress, _node: SharedNode) {} 142 | 143 | /// Return the filename of the underlying storage 144 | fn filename(&self) -> Option { 145 | None 146 | } 147 | 148 | /// Convert an io::Error into a FileIoError 149 | fn file_io_error( 150 | &self, 151 | error: std::io::Error, 152 | offset: u64, 153 | context: Option, 154 | ) -> FileIoError { 155 | FileIoError { 156 | inner: error, 157 | filename: self.filename(), 158 | offset, 159 | context, 160 | } 161 | } 162 | } 163 | 164 | /// Trait for writable storage. 165 | pub trait WritableStorage: ReadableStorage { 166 | /// Writes the given object at the specified offset. 167 | /// 168 | /// # Arguments 169 | /// 170 | /// * `offset` - The offset at which to write the object. 171 | /// * `object` - The object to write. 172 | /// 173 | /// # Returns 174 | /// 175 | /// The number of bytes written, or an error if the write operation fails. 176 | fn write(&self, offset: u64, object: &[u8]) -> Result; 177 | 178 | /// Write all nodes to the cache (if any) 179 | fn write_cached_nodes<'a>( 180 | &self, 181 | _nodes: impl Iterator, &'a SharedNode)>, 182 | ) -> Result<(), FileIoError> { 183 | Ok(()) 184 | } 185 | 186 | /// Invalidate all nodes that are part of a specific revision, as these will never be referenced again 187 | fn invalidate_cached_nodes<'a>(&self, _addresses: impl Iterator) {} 188 | 189 | /// Add a new entry to the freelist cache 190 | fn add_to_free_list_cache(&self, _addr: LinearAddress, _next: Option) {} 191 | } 192 | -------------------------------------------------------------------------------- /storage/src/logger.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | // Supports making the logging operations a true runtime no-op 5 | // Since we're a library, we can't really use the logging level 6 | // static shortcut 7 | 8 | #[cfg(feature = "logger")] 9 | pub use log::{debug, error, info, trace, warn}; 10 | 11 | /// Returns true if the trace log level is enabled 12 | #[cfg(feature = "logger")] 13 | pub fn trace_enabled() -> bool { 14 | log::log_enabled!(log::Level::Trace) 15 | } 16 | 17 | #[cfg(not(feature = "logger"))] 18 | pub use noop_logger::{debug, error, info, trace, trace_enabled, warn}; 19 | 20 | #[cfg(not(feature = "logger"))] 21 | mod noop_logger { 22 | #[macro_export] 23 | /// A noop logger, when the logger feature is disabled 24 | macro_rules! noop { 25 | ($($arg:tt)+) => {}; 26 | } 27 | 28 | pub use noop as debug; 29 | pub use noop as error; 30 | pub use noop as info; 31 | pub use noop as trace; 32 | pub use noop as warn; 33 | 34 | /// trace_enabled for a noop logger is always false 35 | #[inline] 36 | pub fn trace_enabled() -> bool { 37 | false 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /storage/src/node/leaf.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use serde::{Deserialize, Serialize}; 5 | 6 | use std::fmt::{Debug, Error as FmtError, Formatter}; 7 | 8 | use crate::Path; 9 | 10 | /// A leaf node 11 | #[derive(PartialEq, Eq, Clone, Serialize, Deserialize)] 12 | pub struct LeafNode { 13 | /// The path of this leaf, but only the remaining nibbles 14 | pub partial_path: Path, 15 | 16 | /// The value associated with this leaf 17 | pub value: Box<[u8]>, 18 | } 19 | 20 | impl Debug for LeafNode { 21 | fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> { 22 | write!( 23 | f, 24 | "[Leaf {:?} {}]", 25 | self.partial_path, 26 | hex::encode(&*self.value) 27 | ) 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /storage/src/trie_hash.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use std::fmt::{self, Debug, Display, Formatter}; 5 | 6 | use serde::de::Visitor; 7 | use serde::{Deserialize, Serialize}; 8 | use sha2::digest::generic_array::GenericArray; 9 | use sha2::digest::typenum; 10 | 11 | use crate::node::branch::Serializable; 12 | 13 | /// A hash value inside a merkle trie 14 | /// We use the same type as returned by sha2 here to avoid copies 15 | #[derive(PartialEq, Eq, Clone, Default, Hash)] 16 | pub struct TrieHash(GenericArray); 17 | 18 | impl std::ops::Deref for TrieHash { 19 | type Target = GenericArray; 20 | fn deref(&self) -> &Self::Target { 21 | &self.0 22 | } 23 | } 24 | 25 | impl AsRef<[u8]> for TrieHash { 26 | fn as_ref(&self) -> &[u8] { 27 | &self.0 28 | } 29 | } 30 | 31 | impl Debug for TrieHash { 32 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { 33 | let width = f.precision().unwrap_or(64); 34 | write!(f, "{:.*}", width, hex::encode(self.0)) 35 | } 36 | } 37 | impl Display for TrieHash { 38 | fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { 39 | let width = f.precision().unwrap_or(64); 40 | write!(f, "{:.*}", width, hex::encode(self.0)) 41 | } 42 | } 43 | 44 | const TRIE_HASH_LEN: usize = std::mem::size_of::(); 45 | 46 | impl From<[u8; 32]> for TrieHash { 47 | fn from(value: [u8; TRIE_HASH_LEN]) -> Self { 48 | TrieHash(value.into()) 49 | } 50 | } 51 | 52 | impl TryFrom<&[u8]> for TrieHash { 53 | type Error = &'static str; 54 | 55 | fn try_from(value: &[u8]) -> Result { 56 | if value.len() == Self::len() { 57 | let mut hash = TrieHash::default(); 58 | hash.0.copy_from_slice(value); 59 | Ok(hash) 60 | } else { 61 | Err("Invalid length") 62 | } 63 | } 64 | } 65 | 66 | impl From> for TrieHash { 67 | fn from(value: GenericArray) -> Self { 68 | TrieHash(value) 69 | } 70 | } 71 | 72 | impl TrieHash { 73 | /// Return the length of a TrieHash 74 | pub(crate) const fn len() -> usize { 75 | std::mem::size_of::() 76 | } 77 | 78 | /// Some code needs a TrieHash even though it only has a HashType. 79 | /// This function is a no-op, as HashType is a TrieHash in this context. 80 | pub const fn into_triehash(self) -> Self { 81 | self 82 | } 83 | } 84 | 85 | impl Serializable for TrieHash { 86 | fn serialized_bytes(&self) -> Vec { 87 | self.0.to_vec() 88 | } 89 | 90 | fn from_reader(mut reader: R) -> Result 91 | where 92 | Self: Sized, 93 | { 94 | let mut buf = [0u8; 32]; 95 | reader.read_exact(&mut buf)?; 96 | Ok(TrieHash::from(buf)) 97 | } 98 | } 99 | 100 | impl Serialize for TrieHash { 101 | fn serialize(&self, serializer: S) -> Result 102 | where 103 | S: serde::Serializer, 104 | { 105 | serializer.serialize_bytes(&self.0) 106 | } 107 | } 108 | 109 | impl<'de> Deserialize<'de> for TrieHash { 110 | fn deserialize(deserializer: D) -> Result 111 | where 112 | D: serde::Deserializer<'de>, 113 | { 114 | deserializer.deserialize_bytes(TrieVisitor) 115 | } 116 | } 117 | 118 | struct TrieVisitor; 119 | 120 | impl Visitor<'_> for TrieVisitor { 121 | type Value = TrieHash; 122 | 123 | fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { 124 | formatter.write_str("an array of u8 hash bytes") 125 | } 126 | 127 | fn visit_bytes(self, v: &[u8]) -> Result 128 | where 129 | E: serde::de::Error, 130 | { 131 | let mut hash = TrieHash::default(); 132 | if v.len() == hash.0.len() { 133 | hash.0.copy_from_slice(v); 134 | Ok(hash) 135 | } else { 136 | Err(E::invalid_length(v.len(), &self)) 137 | } 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /triehash/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | The format is based on [Keep a Changelog]. 4 | 5 | [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ 6 | 7 | ## [Unreleased] 8 | 9 | ## [0.8.5] - 2025-03-26 10 | 11 | - Updated `hash-db` to 0.16.0 12 | - Updated `rlp` to 0.6 13 | - Updated `criterion` to 0.5.1 14 | - Updated `keccak-hasher` to 0.16.0 15 | - Updated `ethereum-types` to 0.15.1 16 | - Updated `trie-standardmap` to 0.16.0 17 | - Updated `hex-literal` to 1.0.0 18 | 19 | ## [0.8.4] - 2020-01-08 20 | 21 | - Updated `rlp` to 0.5. [#463](https://github.com/paritytech/parity-common/pull/463) 22 | 23 | ## [0.8.3] - 2020-03-16 24 | 25 | - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) 26 | 27 | ## [0.8.2] - 2019-12-15 28 | 29 | - Added no-std support. [#280](https://github.com/paritytech/parity-common/pull/280) 30 | 31 | ## [0.8.1] - 2019-10-24 32 | 33 | - Migrated to 2018 edition. [#214](https://github.com/paritytech/parity-common/pull/214) 34 | 35 | ### Dependencies 36 | 37 | - Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) 38 | -------------------------------------------------------------------------------- /triehash/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "triehash" 3 | version = "0.8.5" 4 | authors = ["Parity Technologies ", "Ron Kuris "] 5 | description = "In-memory patricia trie operations" 6 | repository = "https://github.com/paritytech/parity-common" 7 | license = "MIT OR Apache-2.0" 8 | edition = "2024" 9 | 10 | [dependencies] 11 | hash-db = { version = "0.16.0", default-features = false } 12 | rlp = { version = "0.6", default-features = false } 13 | 14 | [dev-dependencies] 15 | criterion = "0.6.0" 16 | keccak-hasher = "0.16.0" 17 | ethereum-types = { version = "0.15.1" } 18 | tiny-keccak = { version = "2.0", features = ["keccak"] } 19 | trie-standardmap = "0.16.0" 20 | hex-literal = "1.0.0" 21 | 22 | [features] 23 | default = ["std"] 24 | std = [ 25 | "hash-db/std", 26 | "rlp/std", 27 | ] 28 | 29 | [[bench]] 30 | name = "triehash" 31 | path = "benches/triehash.rs" 32 | harness = false 33 | -------------------------------------------------------------------------------- /triehash/README.md: -------------------------------------------------------------------------------- 1 | # triehash 2 | 3 | This crate provides utility functions to validate and initialize tries using flexible input. 4 | It is used extensively in `parity-ethereum` to validate blocks (mostly transactions and receipt roots). 5 | -------------------------------------------------------------------------------- /triehash/benches/triehash.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Parity Technologies 2 | // 3 | // Licensed under the Apache License, Version 2.0 or the MIT license 5 | // , at your 6 | // option. This file may not be copied, modified, or distributed 7 | // except according to those terms. 8 | 9 | use criterion::{Criterion, criterion_group, criterion_main}; 10 | use ethereum_types::H256; 11 | use keccak_hasher::KeccakHasher; 12 | use tiny_keccak::{Hasher, Keccak}; 13 | use trie_standardmap::{Alphabet, StandardMap, ValueMode}; 14 | use triehash::trie_root; 15 | 16 | fn keccak256(input: &[u8]) -> [u8; 32] { 17 | let mut keccak256 = Keccak::v256(); 18 | let mut out = [0u8; 32]; 19 | keccak256.update(input); 20 | keccak256.finalize(&mut out); 21 | out 22 | } 23 | 24 | fn random_word(alphabet: &[u8], min_count: usize, diff_count: usize, seed: &mut H256) -> Vec { 25 | assert!(min_count + diff_count <= 32); 26 | *seed = H256(keccak256(seed.as_bytes())); 27 | let r = min_count + (seed[31] as usize % (diff_count + 1)); 28 | let mut ret: Vec = Vec::with_capacity(r); 29 | for i in 0..r { 30 | ret.push(alphabet[seed[i] as usize % alphabet.len()]); 31 | } 32 | ret 33 | } 34 | 35 | fn random_bytes(min_count: usize, diff_count: usize, seed: &mut H256) -> Vec { 36 | assert!(min_count + diff_count <= 32); 37 | *seed = H256(keccak256(seed.as_bytes())); 38 | let r = min_count + (seed[31] as usize % (diff_count + 1)); 39 | seed[0..r].to_vec() 40 | } 41 | 42 | fn random_value(seed: &mut H256) -> Vec { 43 | *seed = H256(keccak256(seed.as_bytes())); 44 | match seed[0] % 2 { 45 | 1 => vec![seed[31]; 1], 46 | _ => seed.as_bytes().to_vec(), 47 | } 48 | } 49 | 50 | fn bench_insertions(c: &mut Criterion) { 51 | c.bench_function("32_mir_1k", |b| { 52 | let st = StandardMap { 53 | alphabet: Alphabet::All, 54 | min_key: 32, 55 | journal_key: 0, 56 | value_mode: ValueMode::Mirror, 57 | count: 1000, 58 | }; 59 | let d = st.make(); 60 | b.iter(|| trie_root::(d.clone())); 61 | }); 62 | 63 | c.bench_function("32_ran_1k", |b| { 64 | let st = StandardMap { 65 | alphabet: Alphabet::All, 66 | min_key: 32, 67 | journal_key: 0, 68 | value_mode: ValueMode::Random, 69 | count: 1000, 70 | }; 71 | let d = st.make(); 72 | b.iter(|| trie_root::(d.clone())); 73 | }); 74 | 75 | c.bench_function("six_high", |b| { 76 | let mut d: Vec<(Vec, Vec)> = Vec::new(); 77 | let mut seed = H256::default(); 78 | for _ in 0..1000 { 79 | let k = random_bytes(6, 0, &mut seed); 80 | let v = random_value(&mut seed); 81 | d.push((k, v)) 82 | } 83 | b.iter(|| trie_root::(d.clone())); 84 | }); 85 | 86 | c.bench_function("six_mid", |b| { 87 | let alphabet = b"@QWERTYUIOPASDFGHJKLZXCVBNM[/]^_"; 88 | let mut d: Vec<(Vec, Vec)> = Vec::new(); 89 | let mut seed = H256::default(); 90 | for _ in 0..1000 { 91 | let k = random_word(alphabet, 6, 0, &mut seed); 92 | let v = random_value(&mut seed); 93 | d.push((k, v)) 94 | } 95 | b.iter(|| trie_root::(d.clone())); 96 | }); 97 | 98 | c.bench_function("random_mid", |b| { 99 | let alphabet = b"@QWERTYUIOPASDFGHJKLZXCVBNM[/]^_"; 100 | let mut d: Vec<(Vec, Vec)> = Vec::new(); 101 | let mut seed = H256::default(); 102 | for _ in 0..1000 { 103 | let k = random_word(alphabet, 1, 5, &mut seed); 104 | let v = random_value(&mut seed); 105 | d.push((k, v)) 106 | } 107 | b.iter(|| trie_root::(d.clone())); 108 | }); 109 | 110 | c.bench_function("six_low", |b| { 111 | let alphabet = b"abcdef"; 112 | let mut d: Vec<(Vec, Vec)> = Vec::new(); 113 | let mut seed = H256::default(); 114 | for _ in 0..1000 { 115 | let k = random_word(alphabet, 6, 0, &mut seed); 116 | let v = random_value(&mut seed); 117 | d.push((k, v)) 118 | } 119 | b.iter(|| trie_root::(d.clone())); 120 | }); 121 | } 122 | 123 | criterion_group!(benches, bench_insertions); 124 | criterion_main!(benches); 125 | --------------------------------------------------------------------------------