├── .github
├── check-license-headers.yaml
├── dependabot.yml
├── license-header.txt
└── workflows
│ ├── attach-static-libs.yaml
│ ├── cache-cleanup.yaml
│ ├── ci.yaml
│ ├── conventional-commits.yaml
│ ├── default-branch-cache.yaml
│ ├── gh-pages.yaml
│ ├── pr-title.yaml
│ └── release.yaml
├── .gitignore
├── .markdownlint.json
├── .vscode
└── extensions.json
├── CHANGELOG.md
├── CODEOWNERS
├── CONTRIBUTING.md
├── Cargo.toml
├── LICENSE.md
├── README.docker.md
├── README.md
├── RELEASE.md
├── benchmark
├── Cargo.toml
├── Grafana-dashboard.json
├── README.md
├── cloud-config.txt
├── setup-scripts
│ ├── README.md
│ ├── build-environment.sh
│ ├── build-firewood.sh
│ ├── install-grafana.sh
│ └── run-benchmarks.sh
└── src
│ ├── create.rs
│ ├── main.rs
│ ├── single.rs
│ ├── tenkrandom.rs
│ └── zipf.rs
├── cliff.toml
├── docs
└── assets
│ └── architecture.svg
├── ffi
├── .gitignore
├── .golangci.yaml
├── Cargo.toml
├── README.md
├── build.rs
├── cbindgen.toml
├── firewood.go
├── firewood.h
├── firewood_test.go
├── go.mod
├── go.sum
├── kvbackend.go
├── memory.go
├── proposal.go
├── revision.go
├── src
│ ├── lib.rs
│ └── metrics_setup.rs
└── tests
│ ├── eth
│ ├── eth_compatibility_test.go
│ ├── go.mod
│ └── go.sum
│ └── firewood
│ ├── go.mod
│ ├── go.sum
│ └── merkle_compatibility_test.go
├── firewood
├── Cargo.toml
├── benches
│ └── hashops.rs
├── examples
│ └── insert.rs
├── src
│ ├── db.rs
│ ├── lib.rs
│ ├── manager.rs
│ ├── merkle.rs
│ ├── proof.rs
│ ├── range_proof.rs
│ ├── stream.rs
│ └── v2
│ │ ├── api.rs
│ │ ├── emptydb.rs
│ │ ├── mod.rs
│ │ └── propose.rs
└── tests
│ ├── common
│ └── mod.rs
│ ├── db.rs
│ └── v2api.rs
├── fwdctl
├── Cargo.toml
├── README.md
├── src
│ ├── create.rs
│ ├── delete.rs
│ ├── dump.rs
│ ├── get.rs
│ ├── graph.rs
│ ├── insert.rs
│ ├── main.rs
│ └── root.rs
└── tests
│ └── cli.rs
├── grpc-testtool
├── Cargo.toml
├── README.md
├── benches
│ └── insert.rs
├── build.rs
├── proto
│ ├── merkle
│ │ └── merkle.proto
│ ├── process-server
│ │ └── process-server.proto
│ ├── rpcdb
│ │ └── rpcdb.proto
│ └── sync
│ │ └── sync.proto
└── src
│ ├── bin
│ ├── client.rs
│ └── process-server.rs
│ ├── lib.rs
│ ├── service.rs
│ └── service
│ ├── database.rs
│ ├── db.rs
│ └── process.rs
├── storage
├── Cargo.toml
├── benches
│ └── serializer.rs
└── src
│ ├── hashednode.rs
│ ├── hashers
│ ├── ethhash.rs
│ ├── merkledb.rs
│ └── mod.rs
│ ├── lib.rs
│ ├── linear
│ ├── filebacked.rs
│ ├── memory.rs
│ └── mod.rs
│ ├── logger.rs
│ ├── node
│ ├── branch.rs
│ ├── leaf.rs
│ ├── mod.rs
│ └── path.rs
│ ├── nodestore.rs
│ └── trie_hash.rs
└── triehash
├── CHANGELOG.md
├── Cargo.toml
├── README.md
├── benches
└── triehash.rs
└── src
└── lib.rs
/.github/check-license-headers.yaml:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "include": [
4 | "**/**/*.rs"
5 | ],
6 | "exclude": [
7 | "target/**",
8 | "*/LICENSE*",
9 | "LICENSE.md",
10 | "RELEASE.md",
11 | "grpc-testtool/**",
12 | "README*",
13 | "**/README*",
14 | "Cargo.toml",
15 | "*/Cargo.toml",
16 | "docs/**",
17 | "CODEOWNERS",
18 | "CONTRIBUTING.md",
19 | "benchmark/**",
20 | "ffi/**",
21 | "triehash/**",
22 | "CHANGELOG.md",
23 | "cliff.toml",
24 | ],
25 | "license": "./.github/license-header.txt"
26 | },
27 | {
28 | "include": [
29 | "target/**",
30 | "*/LICENSE*",
31 | "LICENSE.md",
32 | "RELEASE.md",
33 | "grpc-testtool/**",
34 | "README*",
35 | "**/README*",
36 | "Cargo.toml",
37 | "*/Cargo.toml",
38 | "docs/**",
39 | "benchmark/**",
40 | "ffi/**",
41 | "CODEOWNERS",
42 | "CONTRIBUTING.md",
43 | "triehash/**",
44 | "CHANGELOG.md",
45 | "cliff.toml",
46 | ],
47 | }
48 | ]
49 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | # To get started with Dependabot version updates, you'll need to specify which
2 | # package ecosystems to update and where the package manifests are located.
3 | # Please see the documentation for all configuration options:
4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
5 |
6 | version: 2
7 | updates:
8 | - package-ecosystem: "cargo"
9 | directory: "/"
10 | schedule:
11 | interval: "daily"
12 | time: "05:00"
13 | timezone: "America/Los_Angeles"
14 | open-pull-requests-limit: 10
15 | allow:
16 | - dependency-type: "all"
17 |
--------------------------------------------------------------------------------
/.github/license-header.txt:
--------------------------------------------------------------------------------
1 | // Copyright (C) %year%, Ava Labs, Inc. All rights reserved.
2 | // See the file LICENSE.md for licensing terms.
3 |
--------------------------------------------------------------------------------
/.github/workflows/cache-cleanup.yaml:
--------------------------------------------------------------------------------
1 | name: cleanup caches by a branch
2 | on:
3 | pull_request:
4 | types:
5 | - closed
6 |
7 | jobs:
8 | cleanup:
9 | runs-on: ubuntu-latest
10 | steps:
11 | - name: Check out code
12 | uses: actions/checkout@v4
13 |
14 | - name: Cleanup
15 | run: |
16 | gh extension install actions/gh-actions-cache
17 |
18 | REPO=${{ github.repository }}
19 | BRANCH="refs/pull/${{ github.event.pull_request.number }}/merge"
20 |
21 | echo "Fetching list of cache key"
22 | cacheKeysForPR=$(gh actions-cache list -R $REPO -B $BRANCH | cut -f 1 )
23 |
24 | ## Setting this to not fail the workflow while deleting cache keys.
25 | set +e
26 | echo "Deleting caches..."
27 | for cacheKey in $cacheKeysForPR
28 | do
29 | gh actions-cache delete $cacheKey -R $REPO -B $BRANCH --confirm
30 | done
31 | echo "Done"
32 | env:
33 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
34 |
35 |
--------------------------------------------------------------------------------
/.github/workflows/conventional-commits.yaml:
--------------------------------------------------------------------------------
1 | name: Conventional Commits
2 |
3 | on:
4 | pull_request:
5 | branches: [ main ]
6 |
7 | jobs:
8 | build:
9 | name: Conventional Commits
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: actions/checkout@v4
13 |
14 | - uses: webiny/action-conventional-commits@v1.3.0
15 | with:
16 | allowed-commit-types: "build,chore,ci,docs,feat,fix,perf,refactor,style,test"
17 |
--------------------------------------------------------------------------------
/.github/workflows/default-branch-cache.yaml:
--------------------------------------------------------------------------------
1 | # because apparently caches are isolated across branches
2 | name: default-branch-cache
3 |
4 | on:
5 | workflow_dispatch:
6 | push:
7 | branches:
8 | - main
9 |
10 | env:
11 | CARGO_TERM_COLOR: always
12 |
13 | jobs:
14 | build:
15 | runs-on: ubuntu-latest
16 |
17 | steps:
18 | - uses: actions/checkout@v4
19 | - uses: dtolnay/rust-toolchain@stable
20 | - uses: arduino/setup-protoc@v3
21 | with:
22 | repo-token: ${{ secrets.GITHUB_TOKEN }}
23 | - uses: Swatinem/rust-cache@v2
24 | with:
25 | save-if: "false"
26 | shared-key: "debug-no-features"
27 | - name: Check
28 | run: cargo check --workspace --tests --examples --benches
29 | - name: Build
30 | run: cargo build --workspace --tests --examples --benches
31 |
--------------------------------------------------------------------------------
/.github/workflows/gh-pages.yaml:
--------------------------------------------------------------------------------
1 | name: gh-pages
2 |
3 | on:
4 | push:
5 | branches:
6 | - "main"
7 | - "rkuris/gh-pages"
8 |
9 | env:
10 | CARGO_TERM_COLOR: always
11 |
12 | jobs:
13 | build:
14 | runs-on: ubuntu-latest
15 | steps:
16 | - uses: actions/checkout@v4
17 | - uses: dtolnay/rust-toolchain@stable
18 | - uses: arduino/setup-protoc@v3
19 | with:
20 | repo-token: ${{ secrets.GITHUB_TOKEN }}
21 | # caution: this is the same restore as in ci.yaml
22 | - uses: Swatinem/rust-cache@v2
23 | with:
24 | save-if: "false"
25 | - name: Build
26 | run: cargo doc --document-private-items --no-deps
27 | - name: Set up _site redirect to firewood
28 | run: |
29 | rm -fr _site
30 | mkdir _site
31 | echo "" > _site/index.html
32 | - name: Copy doc files to _site
33 | run: |
34 | cp -rv target/doc/* ./_site
35 | cp -rv docs/assets ./_site
36 | - uses: actions/upload-artifact@v4
37 | with:
38 | name: pages
39 | path: _site
40 | if-no-files-found: error
41 | overwrite: true
42 | include-hidden-files: true
43 | deploy:
44 | needs: build
45 | permissions:
46 | pages: write
47 | id-token: write
48 | environment:
49 | name: github-pages
50 | url: ${{ steps.deployment.outputs.page_url }}
51 | runs-on: ubuntu-latest
52 | steps:
53 | - name: Download pages artifact
54 | uses: actions/download-artifact@v4
55 | with:
56 | name: pages
57 | path: .
58 | - name: Setup Pages
59 | uses: actions/configure-pages@v3
60 | - name: Upload artifact
61 | uses: actions/upload-pages-artifact@v3
62 | with:
63 | path: .
64 | - name: Deploy to GitHub pages
65 | id: deployment
66 | uses: actions/deploy-pages@v4
67 |
--------------------------------------------------------------------------------
/.github/workflows/pr-title.yaml:
--------------------------------------------------------------------------------
1 | # Check that the PR title matches the conventional commit format
2 | name: pr-title
3 |
4 | permissions:
5 | pull-requests: write
6 |
7 | on:
8 | pull_request:
9 | types:
10 | - edited
11 | - opened
12 | - reopened
13 |
14 | jobs:
15 | check-pr-title:
16 | runs-on: ubuntu-latest
17 | permissions:
18 | pull-requests: read
19 | steps:
20 | - name: Check PR title follows conventional commits
21 | uses: amannn/action-semantic-pull-request@v5
22 | with:
23 | types: |
24 | build
25 | chore
26 | ci
27 | docs
28 | feat
29 | fix
30 | perf
31 | refactor
32 | style
33 | test
34 | # scope is not required ("feat: whatever" is okay)
35 | requireScope: false
36 | # if the PR only has one commit, we can validate the commit message
37 | # instead of the PR title
38 | validateSingleCommit: true
39 | subjectPattern: ^.{1,}$
40 | subjectPatternError: |
41 | The subject "{subject}" found in the pull request title "{title}"
42 | didn't match the configured pattern. Please ensure that the subject
43 | matches the conventional commit format.
44 | env:
45 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
46 |
47 |
--------------------------------------------------------------------------------
/.github/workflows/release.yaml:
--------------------------------------------------------------------------------
1 | name: release
2 |
3 | on:
4 | push:
5 | tags:
6 | - "v*.*.*"
7 |
8 | jobs:
9 | build:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - name: Checkout
13 | uses: actions/checkout@v4
14 | - name: Release
15 | uses: softprops/action-gh-release@v1
16 | with:
17 | draft: true
18 | generate_release_notes: true
19 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore VSCode directory
2 | .vscode
3 |
4 | compose-dev.yaml
5 |
6 | # ignore test databases
7 | *_db
8 |
9 | #### Below sections are auto-generated ####
10 |
11 | # Created by https://www.toptal.com/developers/gitignore/api/rust,visualstudiocode,vim,macos
12 | # Edit at https://www.toptal.com/developers/gitignore?templates=rust,visualstudiocode,vim,macos
13 |
14 | ### macOS ###
15 | # General
16 | .DS_Store
17 | .AppleDouble
18 | .LSOverride
19 |
20 | # Icon must end with two \r
21 | Icon
22 |
23 |
24 | # Thumbnails
25 | ._*
26 |
27 | # Files that might appear in the root of a volume
28 | .DocumentRevisions-V100
29 | .fseventsd
30 | .Spotlight-V100
31 | .TemporaryItems
32 | .Trashes
33 | .VolumeIcon.icns
34 | .com.apple.timemachine.donotpresent
35 |
36 | # Directories potentially created on remote AFP share
37 | .AppleDB
38 | .AppleDesktop
39 | Network Trash Folder
40 | Temporary Items
41 | .apdisk
42 |
43 | ### macOS Patch ###
44 | # iCloud generated files
45 | *.icloud
46 |
47 | ### Rust ###
48 | # Generated by Cargo
49 | # will have compiled files and executables
50 | debug/
51 | target/
52 |
53 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
54 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
55 | Cargo.lock
56 |
57 | # These are backup files generated by rustfmt
58 | **/*.rs.bk
59 |
60 | # MSVC Windows builds of rustc generate these, which store debugging information
61 | *.pdb
62 |
63 | ### Vim ###
64 | # Swap
65 | [._]*.s[a-v][a-z]
66 | !*.svg # comment out if you don't need vector files
67 | [._]*.sw[a-p]
68 | [._]s[a-rt-v][a-z]
69 | [._]ss[a-gi-z]
70 | [._]sw[a-p]
71 |
72 | # Session
73 | Session.vim
74 | Sessionx.vim
75 |
76 | # Temporary
77 | .netrwhist
78 | *~
79 | # Auto-generated tag files
80 | tags
81 | # Persistent undo
82 | [._]*.un~
83 |
84 | ### VisualStudioCode ###
85 | .vscode/*
86 | !.vscode/settings.json
87 | !.vscode/tasks.json
88 | !.vscode/launch.json
89 | !.vscode/extensions.json
90 | !.vscode/*.code-snippets
91 |
92 | # Local History for Visual Studio Code
93 | .history/
94 |
95 | # Built Visual Studio Code Extensions
96 | *.vsix
97 |
98 | ### VisualStudioCode Patch ###
99 | # Ignore all local history of files
100 | .history
101 | .ionide
102 |
103 | # End of https://www.toptal.com/developers/gitignore/api/rust,visualstudiocode,vim,macos
104 |
--------------------------------------------------------------------------------
/.markdownlint.json:
--------------------------------------------------------------------------------
1 | {
2 | "line-length": false,
3 | }
4 |
--------------------------------------------------------------------------------
/.vscode/extensions.json:
--------------------------------------------------------------------------------
1 | {
2 | "recommendations": [
3 | "davidanson.vscode-markdownlint",
4 | "rust-lang.rust-analyzer",
5 | "vadimcn.vscode-lldb"
6 | ]
7 | }
8 |
--------------------------------------------------------------------------------
/CODEOWNERS:
--------------------------------------------------------------------------------
1 | # CODEOWNERS
2 | * @rkuris @aaronbuchwald @demosdemon
3 | /ffi @alarso16
4 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Welcome contributors
2 |
3 | We are eager for contributions and happy you found yourself here.
4 | Please read through this document to familiarize yourself with our
5 | guidelines for contributing to firewood.
6 |
7 | ## Table of Contents
8 |
9 | * [Quick Links](#Quick Links)
10 | * [Testing](#testing)
11 | * [How to submit changes](#How to submit changes)
12 | * [Where can I ask for help?](#Where can I ask for help)
13 |
14 | ## [Quick Links]
15 |
16 | * [Setting up docker](README.docker.md)
17 | * [Auto-generated documentation](https://ava-labs.github.io/firewood/firewood/)
18 | * [Issue tracker](https://github.com/ava-labs/firewood/issues)
19 |
20 | ## [Testing]
21 |
22 | After submitting a PR, we'll run all the tests and verify your code meets our submission guidelines. To ensure it's more likely to pass these checks, you should run the following commands locally:
23 |
24 | cargo fmt
25 | cargo test
26 | cargo clippy
27 | cargo doc --no-deps
28 |
29 | Resolve any warnings or errors before making your PR.
30 |
31 | ## [How to submit changes]
32 |
33 | To create a PR, fork firewood, and use github to create the PR. We typically prioritize reviews in the middle of our the next work day,
34 | so you should expect a response during the week within 24 hours.
35 |
36 | ## [How to report a bug]
37 |
38 | Please use the [issue tracker](https://github.com/ava-labs/firewood/issues) for reporting issues.
39 |
40 | ## [First time fixes for contributors]
41 |
42 | The [issue tracker](https://github.com/ava-labs/firewood/issues) typically has some issues tagged for first-time contributors. If not,
43 | please reach out. We hope you work on an easy task before tackling a harder one.
44 |
45 | ## [How to request an enhancement]
46 |
47 | Just like bugs, please use the [issue tracker](https://github.com/ava-labs/firewood/issues) for requesting enhancements. Please tag the issue with the "enhancement" tag.
48 |
49 | ## [Style Guide / Coding Conventions]
50 |
51 | We generally follow the same rules that `cargo fmt` and `cargo clippy` will report as warnings, with a few notable exceptions as documented in the associated Cargo.toml file.
52 |
53 | By default, we prohibit bare `unwrap` calls and index dereferencing, as there are usually better ways to write this code. In the case where you can't, please use `expect` with a message explaining why it would be a bug, which we currently allow. For more information on our motivation, please read this great article on unwrap: [Using unwrap() in Rust is Okay](https://blog.burntsushi.net/unwrap) by [Andrew Gallant](https://blog.burntsushi.net).
54 |
55 | ## [Where can I ask for help]?
56 |
57 | Please reach out on X (formerly twitter) @rkuris for help or questions!
58 |
59 | ## Thank you
60 |
61 | We'd like to extend a pre-emptive "thank you" for reading through this and submitting your first contribution!
62 |
--------------------------------------------------------------------------------
/Cargo.toml:
--------------------------------------------------------------------------------
1 | [workspace]
2 | members = [
3 | "firewood",
4 | "fwdctl",
5 | "storage",
6 | "benchmark",
7 | "ffi",
8 | "triehash",
9 | ]
10 | resolver = "2"
11 |
12 | [profile.release]
13 | debug = true
14 |
15 | [profile.maxperf]
16 | panic = "abort"
17 | codegen-units = 1
18 | lto = "fat"
19 | debug = false
20 | inherits = "release"
21 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | # Ecosystem License
2 |
3 | Version: 1.1
4 |
5 | Subject to the terms herein, Ava Labs, Inc. (**“Ava Labs”**) hereby grants you
6 | a limited, royalty-free, worldwide, non-sublicensable, non-transferable,
7 | non-exclusive license to use, copy, modify, create derivative works based on,
8 | and redistribute the Software, in source code, binary, or any other form,
9 | including any modifications or derivative works of the Software (collectively,
10 | **“Licensed Software”**), in each case subject to this Ecosystem License
11 | (**“License”**).
12 |
13 | This License applies to all copies, modifications, derivative works, and any
14 | other form or usage of the Licensed Software. You will include and display
15 | this License, without modification, with all uses of the Licensed Software,
16 | regardless of form.
17 |
18 | You will use the Licensed Software solely (i) in connection with the Avalanche
19 | Public Blockchain platform, having a NetworkID of 1 (Mainnet) or 5 (Fuji), and
20 | associated blockchains, comprised exclusively of the Avalanche X-Chain,
21 | C-Chain, P-Chain and any subnets linked to the P-Chain (“Avalanche Authorized
22 | Platform”) or (ii) for non-production, testing or research purposes within the
23 | Avalanche ecosystem, in each case, without any commercial application
24 | (“Non-Commercial Use”); provided that this License does not permit use of the
25 | Licensed Software in connection with (a) any forks of the Avalanche Authorized
26 | Platform or (b) in any manner not operationally connected to the Avalanche
27 | Authorized Platform other than, for the avoidance of doubt, the limited
28 | exception for Non-Commercial Use. Ava Labs may publicly announce changes or
29 | additions to the Avalanche Authorized Platform, which may expand or modify
30 | usage of the Licensed Software. Upon such announcement, the Avalanche
31 | Authorized Platform will be deemed to be the then-current iteration of such
32 | platform.
33 |
34 | You hereby acknowledge and agree to the terms set forth at
35 | .
36 |
37 | If you use the Licensed Software in violation of this License, this License
38 | will automatically terminate and Ava Labs reserves all rights to seek any
39 | remedy for such violation.
40 |
41 | Except for uses explicitly permitted in this License, Ava Labs retains all
42 | rights in the Licensed Software, including without limitation the ability to
43 | modify it.
44 |
45 | Except as required or explicitly permitted by this License, you will not use
46 | any Ava Labs names, logos, or trademarks without Ava Labs’ prior written
47 | consent.
48 |
49 | You may use this License for software other than the “Licensed Software”
50 | specified above, as long as the only change to this License is the definition
51 | of the term “Licensed Software.”
52 |
53 | The Licensed Software may reference third party components. You acknowledge
54 | and agree that these third party components may be governed by a separate
55 | license or terms and that you will comply with them.
56 |
57 | **TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE LICENSED SOFTWARE IS PROVIDED
58 | ON AN “AS IS” BASIS, AND AVA LABS EXPRESSLY DISCLAIMS AND EXCLUDES ALL
59 | REPRESENTATIONS, WARRANTIES AND OTHER TERMS AND CONDITIONS, WHETHER EXPRESS OR
60 | IMPLIED, INCLUDING WITHOUT LIMITATION BY OPERATION OF LAW OR BY CUSTOM,
61 | STATUTE OR OTHERWISE, AND INCLUDING, BUT NOT LIMITED TO, ANY IMPLIED WARRANTY,
62 | TERM, OR CONDITION OF NON-INFRINGEMENT, MERCHANTABILITY, TITLE, OR FITNESS FOR
63 | PARTICULAR PURPOSE. YOU USE THE LICENSED SOFTWARE AT YOUR OWN RISK. AVA LABS
64 | EXPRESSLY DISCLAIMS ALL LIABILITY (INCLUDING FOR ALL DIRECT, CONSEQUENTIAL OR
65 | OTHER DAMAGES OR LOSSES) RELATED TO ANY USE OF THE LICENSED SOFTWARE.**
66 |
--------------------------------------------------------------------------------
/README.docker.md:
--------------------------------------------------------------------------------
1 | # Docker on Mac Compatibility
2 |
3 | Note:
4 | Docker compatiblitiy is a work in progress. Please PR any changes here if you find a better way to do this.
5 |
6 | ## Steps
7 |
8 | ### Step 1
9 |
10 | Install docker-desktop ([guide](https://docs.docker.com/desktop/install/mac-install/))
11 |
12 | ### Step 2
13 |
14 | Setup a dev-environment ([guide](https://docs.docker.com/desktop/dev-environments/set-up/#set-up-a-dev-environment))
15 |
16 | Here, you want to specifically pick a local-directory (the repo's directory)
17 |
18 | 
19 |
20 | This is best because you can still do all your `git` stuff from the host.
21 |
22 | ### Step 3
23 |
24 | You will need the `Dev Containers` VSCODE extension, authored by Microsoft for this next step.
25 |
26 | Open your dev-environment with VSCODE. Until you do this, the volume might not be properly mounted. If you (dear reader) know of a better way to do this, please open a PR. VSCODE is very useful for its step-by-step debugger, but other than that, you can run whatever IDE you would like in the host environment and just open a shell in the container to run the tests.
27 |
28 | 
29 |
30 | ### Step 4
31 |
32 | Open a terminal in vscode OR exec into the container directly as follows
33 |
34 | ```sh
35 | # you don't need to do this if you open the terminal from vscode
36 | # the container name here is "firewood-app-1", you should be able to see this in docker-desktop
37 | docker exec -it --privileged -u root firewood-app-1 zsh
38 | ```
39 |
40 | Once you're in the terminal you'll want to install the Rust toolset. You can [find instructions here](https://rustup.rs/)
41 |
42 | **!!! IMPORTANT !!!**
43 |
44 | Make sure you read the output of any commands that you run. `rustup` will likely ask you to `source` a file to add some tools to your `PATH`.
45 |
46 | You'll also need to install all the regular linux dependencies (if there is anything from this list that's missing, please add to this README)
47 |
48 | ```sh
49 | apt update
50 | apt install vim
51 | apt install build-essential
52 | apt install protobuf-compiler
53 | ```
54 |
55 | ### Step 5
56 |
57 | **!!! IMPORTANT !!!**
58 |
59 | You need to create a separate `CARGO_TARGET_DIR` that isn't volume mounted onto the host. `VirtioFS` (the default file-system) has some concurrency issues when dealing with sequential writes and reads to a volume that is mounted to the host. You can put a directory here for example: `/root/target`.
60 |
61 | For step-by-step debugging and development directly in the container, you will also **need to make sure that `rust-analyzer` is configured to point to the new target-directory instead of just default**.
62 |
63 | There are a couple of places where this can be setup. If you're a `zsh` user, you should add `export CARGO_TARGET_DIR=/root/target` to either `/root/.zshrc` or `/root/.bashrc`.
64 | After adding the line, don't forget to `source` the file to make sure your current session is updated.
65 |
66 | ### Step 6
67 |
68 | Navigate to `/com.docker.devenvironments.code` and run `cargo test`. If it worked, you are most of the way there! If it did not work, there are a couple of common issues. If the code will not compile, it's possible that your target directory isn't set up properly. Check inside `/root/target` to see if there are any build artifacts. If not, you might need to call `source ~/.zshrc` again (sub in whatever your preferred shell is).
69 |
70 | Now for vscode, you need to configure your `rust-analyzer` in the "remote-environment" (the Docker container). There are a couple of places to do this. First, you want to open `/root/.vscode-server/Machine/settings.json` and make sure that you have the following entry:
71 |
72 | ```json
73 | {
74 | "rust-analyzer.cargo.extraEnv": {
75 | "CARGO_TARGET_DIR": "/root/target"
76 | }
77 | }
78 | ```
79 |
80 | Then, you want to make sure that the terminal that's being used by the vscode instance (for the host system) is the same as your preferred terminal in the container to make sure that things work as expected. [Here are the docs](https://code.visualstudio.com/docs/terminal/profiles) to help you with setting up the proper profile.
81 |
82 | And that should be enough to get your started! Feel free to open an issue if you need any help debugging.
83 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Firewood: Compaction-Less Database Optimized for Efficiently Storing Recent Merkleized Blockchain State
2 |
3 | 
4 | [](./LICENSE.md)
5 |
6 | > :warning: Firewood is alpha-level software and is not ready for production
7 | > use. The Firewood API and on-disk state representation may change with
8 | > little to no warning.
9 |
10 | Firewood is an embedded key-value store, optimized to store recent Merkleized blockchain
11 | state with minimal overhead. Firewood is implemented from the ground up to directly
12 | store trie nodes on-disk. Unlike most state management approaches in the field,
13 | it is not built on top of a generic KV store such as LevelDB/RocksDB. Firewood, like a
14 | B+-tree based database, directly uses the trie structure as the index on-disk. Thus,
15 | there is no additional “emulation” of the logical trie to flatten out the data structure
16 | to feed into the underlying database that is unaware of the data being stored. The convenient
17 | byproduct of this approach is that iteration is still fast (for serving state sync queries)
18 | but compaction is not required to maintain the index. Firewood was first conceived to provide
19 | a very fast storage layer for the EVM but could be used on any blockchain that
20 | requires an authenticated state.
21 |
22 | Firewood only attempts to store recent revisions on-disk and will actively clean up
23 | unused data when revisions expire. Firewood keeps some configurable number of previous states in memory and on disk to power state sync (which may occur at a few roots behind the current state). To do this, a new root is always created for each revision that can reference either new nodes from this revision or nodes from a prior revision. When creating a revision, a list of nodes that are no longer needed are computed and saved to disk in a future-delete log (FDL) as well as kept in memory. When a revision expires, the nodes that were deleted when it was created are returned to the free space.
24 |
25 | Firewood guarantees recoverability by not referencing the new nodes in a new revision before they are flushed to disk, as well as carefully managing the free list during the creation and expiration of revisions.
26 |
27 | ## Architecture Diagram
28 |
29 | 
30 |
31 | ## Terminology
32 |
33 | - `Revision` - A historical point-in-time state/version of the trie. This
34 | represents the entire trie, including all `Key`/`Value`s at that point
35 | in time, and all `Node`s.
36 | - `View` - This is the interface to read from a `Revision` or a `Proposal`.
37 | - `Node` - A node is a portion of a trie. A trie consists of nodes that are linked
38 | together. Nodes can point to other nodes and/or contain `Key`/`Value` pairs.
39 | - `Hash` - In this context, this refers to the merkle hash for a specific node.
40 | - `Root Hash` - The hash of the root node for a specific revision.
41 | - `Key` - Represents an individual byte array used to index into a trie. A `Key`
42 | usually has a specific `Value`.
43 | - `Value` - Represents a byte array for the value of a specific `Key`. Values can
44 | contain 0-N bytes. In particular, a zero-length `Value` is valid.
45 | - `Key Proof` - A proof that a `Key` exists within a specific revision of a trie.
46 | This includes the hash for the node containing the `Key` as well as all parents.
47 | - `Range Proof` - A proof that consists of two `Key Proof`s, one for the start of
48 | the range, and one for the end of the range, as well as a list of all `Key`/`Value`
49 | pairs in between the two. A `Range Proof` can be validated independently of an
50 | actual database by constructing a trie from the `Key`/`Value`s provided.
51 | - `Change Proof` - A proof that consists of a set of all changes between two
52 | revisions.
53 | - `Put` - An operation for a `Key`/`Value` pair. A put means "create if it doesn't
54 | exist, or update it if it does. A put operation is how you add a `Value` for a
55 | specific `Key`.
56 | - `Delete` - An operation indicating that a `Key` should be removed from the trie.
57 | - `Batch Operation` - An operation of either `Put` or `Delete`.
58 | - `Batch` - An ordered set of `Batch Operation`s.
59 | - `Proposal` - A proposal consists of a base `Root Hash` and a `Batch`, but is not
60 | yet committed to the trie. In Firewood's most recent API, a `Proposal` is required
61 | to `Commit`.
62 | - `Commit` - The operation of applying one or more `Proposal`s to the most recent
63 | `Revision`.
64 |
65 | ## Roadmap
66 |
67 | - [X] Complete the revision manager
68 | - [X] Complete the API implementation
69 | - [X] Implement a node cache
70 | - [ ] Complete the proof code
71 | - [ ] Hook up the RPC
72 |
73 | ## Build
74 |
75 | In order to build firewood, the following dependencies must be installed:
76 |
77 | - `protoc` See [installation instructions](https://grpc.io/docs/protoc-installation/).
78 | - `cargo` See [installation instructions](https://doc.rust-lang.org/cargo/getting-started/installation.html).
79 | - `make` See [download instructions](https://www.gnu.org/software/make/#download) or run `sudo apt install build-essential` on Linux.
80 |
81 | ## Run
82 |
83 | There are several examples, in the examples directory, that simulate real world
84 | use-cases. Try running them via the command-line, via `cargo run --release
85 | --example insert`.
86 |
87 | For maximum performance, use `cargo run --maxperf` instead, which enables maximum
88 | link time compiler optimizations, but takes a lot longer to compile.
89 |
90 | ## Logging
91 |
92 | If you want logging, enable the `logging` feature flag, and then set RUST\_LOG accordingly.
93 | See the documentation for [env\_logger](https://docs.rs/env_logger/latest/env_logger/) for specifics.
94 | We currently have very few logging statements, but this is useful for print-style debugging.
95 |
96 | ## Release
97 |
98 | See the [release documentation](./RELEASE.md) for detailed information on how to release Firewood.
99 |
100 | ## CLI
101 |
102 | Firewood comes with a CLI tool called `fwdctl` that enables one to create and interact with a local instance of a Firewood database. For more information, see the [fwdctl README](fwdctl/README.md).
103 |
104 | ## Test
105 |
106 | ```sh
107 | cargo test --release
108 | ```
109 |
110 | ## License
111 |
112 | Firewood is licensed by the Ecosystem License. For more information, see the
113 | [LICENSE file](./LICENSE.md).
114 |
--------------------------------------------------------------------------------
/RELEASE.md:
--------------------------------------------------------------------------------
1 | # Releasing firewood
2 |
3 | Releasing firewood is straightforward and can be done entirely in CI.
4 |
5 | Firewood is made up of several sub-projects in a workspace. Each project is in
6 | its own crate and has an independent version.
7 |
8 | The first step in drafting a release is ensuring all crates within the firewood
9 | project are using the version of the new release. There is a utility to ensure
10 | all versions are updated simultaneously in `cargo-workspace-version`. To use it
11 | to update to 0.0.5, for example:
12 |
13 | ```sh
14 | cargo install cargo-workspace-version
15 | cargo workspace-version update v0.0.5
16 | ```
17 |
18 | See the [source code](https://github.com/ava-labs/cargo-workspace-version) for
19 | more information on the tool.
20 |
21 | > ❗ Be sure to update the versions of all sub-projects before creating a new
22 | > release. Open a PR with the updated versions and merge it before continuing to
23 | > the next step.
24 |
25 | To trigger a release, simply push a semver-compatible tag to the main branch,
26 | for example `v0.0.5`. The CI will automatically publish a draft release which
27 | consists of release notes and changes.
28 |
29 | ## Changelog
30 |
31 | To build the changelog, see git-cliff.org. Short version:
32 |
33 | ```sh
34 | cargo install git-cliff
35 | git cliff --tag v0.0.5 | sed -e 's/_/\\_/g' > CHANGELOG.md
36 | ```
37 |
--------------------------------------------------------------------------------
/benchmark/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "benchmark"
3 | version = "0.0.5"
4 | edition = "2024"
5 | rust-version = "1.85.0"
6 |
7 | [dependencies]
8 | firewood = { path = "../firewood" }
9 | hex = "0.4.3"
10 | clap = { version = "4.5.0", features = ['derive', 'string'] }
11 | sha2 = "0.10.8"
12 | metrics = "0.24.1"
13 | metrics-util = "0.19.0"
14 | metrics-exporter-prometheus = "0.17.0"
15 | tokio = { version = "1.36.0", features = ["rt", "sync", "macros", "rt-multi-thread"] }
16 | rand = "0.9.0"
17 | rand_distr = "0.5.0"
18 | pretty-duration = "0.1.1"
19 | env_logger = "0.11.5"
20 | log = "0.4.20"
21 | fastrace = { version = "0.7.4", features = ["enable"] }
22 | fastrace-opentelemetry = { version = "0.12.0" }
23 | opentelemetry-otlp = { version = "0.30.0", features = ["grpc-tonic"] }
24 | opentelemetry = "0.30.0"
25 | opentelemetry_sdk = "0.30.0"
26 | strum = "0.27.0"
27 |
28 | [target.'cfg(unix)'.dependencies]
29 | tikv-jemallocator = "0.6.0"
30 |
31 | [features]
32 | logger = ["firewood/logger"]
33 |
--------------------------------------------------------------------------------
/benchmark/cloud-config.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | write_files:
3 | - path: /etc/systemd/system/grafana-server.service.d/override.conf
4 | owner: root:root
5 | permissions: '0644'
6 | content: |
7 | [Service]
8 | CapabilityBoundingSet=CAP_NET_BIND_SERVICE
9 | AmbientCapabilities=CAP_NET_BIND_SERVICE
10 | PrivateUsers=false
11 | - path: /run/firewood/build-firewood.sh
12 | permissions: '0755'
13 | content: |
14 | #!/bin/bash
15 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
16 | . "$HOME/.cargo/env"
17 | git clone https://github.com/ava-labs/firewood.git
18 | cd firewood
19 | git checkout rkuris/prometheus
20 | cargo build --release
21 | - path: /etc/prometheus/prometheus.yml.addon
22 | content: |2
23 | - job_name: firewood
24 | static_configs:
25 | - targets: ['localhost:3000']
26 | apt:
27 | sources:
28 | grafana:
29 | source: deb https://apt.grafana.com stable main
30 | key: |
31 | -----BEGIN PGP PUBLIC KEY BLOCK-----
32 |
33 | mQGNBGTnhmkBDADUE+SzjRRyitIm1siGxiHlIlnn6KO4C4GfEuV+PNzqxvwYO+1r
34 | mcKlGDU0ugo8ohXruAOC77Kwc4keVGNU89BeHvrYbIftz/yxEneuPsCbGnbDMIyC
35 | k44UOetRtV9/59Gj5YjNqnsZCr+e5D/JfrHUJTTwKLv88A9eHKxskrlZr7Un7j3i
36 | Ef3NChlOh2Zk9Wfk8IhAqMMTferU4iTIhQk+5fanShtXIuzBaxU3lkzFSG7VuAH4
37 | CBLPWitKRMn5oqXUE0FZbRYL/6Qz0Gt6YCJsZbaQ3Am7FCwWCp9+ZHbR9yU+bkK0
38 | Dts4PNx4Wr9CktHIvbypT4Lk2oJEPWjcCJQHqpPQZXbnclXRlK5Ea0NVpaQdGK+v
39 | JS4HGxFFjSkvTKAZYgwOk93qlpFeDML3TuSgWxuw4NIDitvewudnaWzfl9tDIoVS
40 | Bb16nwJ8bMDzovC/RBE14rRKYtMLmBsRzGYHWd0NnX+FitAS9uURHuFxghv9GFPh
41 | eTaXvc4glM94HBUAEQEAAbQmR3JhZmFuYSBMYWJzIDxlbmdpbmVlcmluZ0BncmFm
42 | YW5hLmNvbT6JAdQEEwEKAD4WIQS1Oud7rbYwpoMEYAWWP6J3EEWFRQUCZOeGaQIb
43 | AwUJA8JnAAULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRCWP6J3EEWFRUiADACa
44 | i+xytv2keEFJWjXNnFAx6/obnHRcXOI3w6nH/zL8gNI7YN5jcdQT2NYvKVYTb3fW
45 | GuMsjHWgat5Gq3AtJrOKABpZ6qeYNPk0Axn/dKtOTwXjZ4pKX3bbUYvVfs0fCEZv
46 | B0HHIj2wI9kgMpoTrkj22LE8layZTPOoQ+3/FbLzS8hN3CYZj25mHN7bpZq8EbV3
47 | 8FW9EU0HM0tg6CvoxkRiVqAuAC0KnVIZAdhD4dlYKuncq64nMvT1A5wxSYbnE+uf
48 | mnWQQhhS6BOwRqN054yw1FrWNDFsvnOSHmr8dIiriv+aZYvx5JQFJ7oZP3LwdYyg
49 | ocQcAJA8HFTIk3P6uJiIF/zdDzocgdKs+IYDoId0hxX7sGCvqdrsveq8n3m7uQiN
50 | 7FvSiV0eXIdV4F7340kc8EKiYwpuYSaZX0UWKLenzlUvD+W4pZCWtoXzPsW7PKUt
51 | q1xdW0+NY+AGLCvSJCc5F4S5kFCObfBAYBbldjwwJFocdq/YOvvWYTPyV7kJeJS5
52 | AY0EZOeGaQEMALNIFUricEIwtZiX7vSDjwxobbqPKqzdek8x3ud0CyYlrbGHy0k+
53 | FDEXstjJQQ1s9rjJSu3sv5wyg9GDAUH3nzO976n/ZZvKPti3p2XU2UFx5gYkaaFV
54 | D56yYxqGY0YU5ft6BG+RUz3iEPg3UBUzt0sCIYnG9+CsDqGOnRYIIa46fu2/H9Vu
55 | 8JvvSq9xbsK9CfoQDkIcoQOixPuI4P7eHtswCeYR/1LUTWEnYQWsBCf57cEpzR6t
56 | 7mlQnzQo9z4i/kp4S0ybDB77wnn+isMADOS+/VpXO+M7Zj5tpfJ6PkKch3SGXdUy
57 | 3zht8luFOYpJr2lVzp7n3NwB4zW08RptTzTgFAaW/NH2JjYI+rDvQm4jNs08Dtsp
58 | nm4OQvBA9Df/6qwMEOZ9i10ixqk+55UpQFJ3nf4uKlSUM7bKXXVcD/odq804Y/K4
59 | y3csE059YVIyaPexEvYSYlHE2odJWRg2Q1VehmrOSC8Qps3xpU7dTHXD74ZpaYbr
60 | haViRS5v/lCsiwARAQABiQG8BBgBCgAmFiEEtTrne622MKaDBGAFlj+idxBFhUUF
61 | AmTnhmkCGwwFCQPCZwAACgkQlj+idxBFhUUNbQv8DCcfi3GbWfvp9pfY0EJuoFJX
62 | LNgci7z7smXq7aqDp2huYQ+MulnPAydjRCVW2fkHItF2Ks6l+2/8t5Xz0eesGxST
63 | xTyR31ARENMXaq78Lq+itZ+usOSDNuwJcEmJM6CceNMLs4uFkX2GRYhchkry7P0C
64 | lkLxUTiB43ooi+CqILtlNxH7kM1O4Ncs6UGZMXf2IiG9s3JDCsYVPkC5QDMOPkTy
65 | 2ZriF56uPerlJveF0dC61RZ6RlM3iSJ9Fwvea0Oy4rwkCcs5SHuwoDTFyxiyz0QC
66 | 9iqi3fG3iSbLvY9UtJ6X+BtDqdXLAT9Pq527mukPP3LwpEqFVyNQKnGLdLOu2YXc
67 | TWWWseSQkHRzBmjD18KTD74mg4aXxEabyT4snrXpi5+UGLT4KXGV5syQO6Lc0OGw
68 | 9O/0qAIU+YW7ojbKv8fr+NB31TGhGYWASjYlN1NvPotRAK6339O0/Rqr9xGgy3AY
69 | SR+ic2Y610IM7xccKuTVAW9UofKQwJZChqae9VVZ
70 | =J9CI
71 | -----END PGP PUBLIC KEY BLOCK-----
72 |
73 | package_update: true
74 | package_upgrade: true
75 | packages:
76 | - git
77 | - protobuf-compiler
78 | - build-essential
79 | - apt-transport-https
80 | - grafana
81 | - prometheus
82 | - net-tools
83 | runcmd:
84 | - [ perl, -pi, -e, "s/^;?http_port = .*/http_port = 80/", /etc/grafana/grafana.ini ]
85 | - [ dd, if=/etc/prometheus/prometheus.yml.addon, of=/etc/prometheus/prometheus.yml, conv=notrunc, oflag=append ]
86 | - [ systemctl, daemon-reload ]
87 | - [ systemctl, enable, grafana-server ]
88 | - [ systemctl, start, grafana-server ]
89 | - [ sudo, -l, -u, ubuntu, /run/firewood/build-firewood.sh ]
90 |
--------------------------------------------------------------------------------
/benchmark/setup-scripts/README.md:
--------------------------------------------------------------------------------
1 | # Setup Scripts
2 |
3 | This directory contains the scripts needed to set up the firewood benchmarks, as follows:
4 |
5 | ```bash
6 | sudo bash build-environment.sh
7 | ```
8 |
9 | This script sets up the build environment, including installing the firewood build dependencies.
10 |
11 | ```bash
12 | sudo bash install-grafana.sh
13 | ```
14 |
15 | This script sets up grafana to listen on port 3000 for firewood. It also sets up listening
16 | for coreth as well, on port 6060, with the special metrics path coreth expects.
17 |
18 | ```bash
19 | bash build-firewood.sh
20 | ```
21 |
22 | This script checks out and builds firewood. It assumes you have already set up the build environment earlier.
23 |
24 | The final script, `run-benchmarks.sh`, is a set of commands that can be copied/pasted to run individual
25 | benchmarks of different sizes.
26 |
--------------------------------------------------------------------------------
/benchmark/setup-scripts/build-environment.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # This script sets up the build environment, including installing the firewood build dependencies.
3 | set -o errexit
4 |
5 | if [ "$EUID" -ne 0 ]; then
6 | echo "This script must be run as root" >&2
7 | exit 1
8 | fi
9 |
10 | apt upgrade -y
11 |
12 | # install the build dependency packages
13 | pkgs=(git protobuf-compiler build-essential apt-transport-https net-tools zfsutils-linux)
14 | install_pkgs=()
15 | for pkg in "${pkgs[@]}"; do
16 | if ! dpkg -s "$pkg" > /dev/null 2>&1; then
17 | install_pkgs+=("$pkg")
18 | fi
19 | done
20 | if [ "${#install_pkgs[@]}" -gt 0 ]; then
21 | apt-get install -y "${install_pkgs[@]}"
22 | fi
23 |
24 | # If there is an NVMe device, format it and mount it to /mnt/nvme/ubuntu/firewood
25 | # this happens on amazon ec2 instances
26 | NVME_DEV="$(realpath /dev/disk/by-id/nvme-Amazon_EC2_NVMe_Instance_Storage_* | uniq)"
27 | if [ -n "$NVME_DEV" ]; then
28 | mkfs.ext4 -E nodiscard -i 6291456 "$NVME_DEV"
29 | NVME_MOUNT=/mnt/nvme
30 | mkdir -p "$NVME_MOUNT"
31 | mount -o noatime "$NVME_DEV" "$NVME_MOUNT"
32 | echo "$NVME_DEV $NVME_MOUNT ext4 noatime 0 0" >> /etc/fstab
33 | mkdir -p "$NVME_MOUNT/ubuntu/firewood"
34 | chown ubuntu:ubuntu "$NVME_MOUNT/ubuntu" "$NVME_MOUNT/ubuntu/firewood"
35 | ln -s "$NVME_MOUNT/ubuntu/firewood" /home/ubuntu/firewood
36 | fi
37 |
--------------------------------------------------------------------------------
/benchmark/setup-scripts/build-firewood.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -o errexit
3 |
4 | if [ "$EUID" -eq 0 ]; then
5 | echo "This script should be run as a non-root user"
6 | exit 1
7 | fi
8 |
9 | # install rust
10 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
11 | . "$HOME/.cargo/env"
12 |
13 | # clone the firewood repository
14 | if [ ! -d "$HOME/firewood" ]; then
15 | mkdir -p "$HOME/firewood"
16 | fi
17 | pushd "$HOME/firewood"
18 |
19 | git clone https://github.com/ava-labs/firewood.git .
20 |
21 | # build the firewood binary
22 | cargo build --profile maxperf
23 | popd
24 |
25 |
--------------------------------------------------------------------------------
/benchmark/setup-scripts/install-grafana.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -o errexit
3 |
4 | # install the keyrings needed to validate the grafana apt repository
5 | if ! [ -d /etc/apt/keyrings ]; then
6 | mkdir -p /etc/apt/keyrings/
7 | fi
8 | if ! [ -f /etc/apt/keyrings/grafana.gpg ]; then
9 | wget -q -O - https://apt.grafana.com/gpg.key | gpg --dearmor | sudo tee /etc/apt/keyrings/grafana.gpg > /dev/null
10 | echo "deb [signed-by=/etc/apt/keyrings/grafana.gpg] https://apt.grafana.com stable main" | sudo tee -a /etc/apt/sources.list.d/grafana.list
11 | fi
12 | apt-get update
13 |
14 | # set up the systemd configuration to allow grafana to bind to port 80
15 | if ! [ -d /etc/systemd/system/grafana-server.service.d ]; then
16 | mkdir -p /etc/systemd/system/grafana-server.service.d
17 | fi
18 |
19 | if ! [ -f /etc/systemd/system/grafana-server.service.d/override.conf ]; then
20 | cat > /etc/systemd/system/grafana-server.service.d/override.conf < /dev/null 2>&1; then
37 | install_pkgs+=("$pkg")
38 | fi
39 | done
40 | if [ "${#install_pkgs[@]}" -gt 0 ]; then
41 | apt-get install -y "${install_pkgs[@]}"
42 | fi
43 |
44 | # configure grafana to listen on port 80
45 | if ! grep -q '^http_port = 80$' /etc/grafana/grafana.ini; then
46 | perl -pi -e 's/^;?http_port = .*/http_port = 80/' /etc/grafana/grafana.ini
47 | fi
48 |
49 | # configure prometheus to scrape firewood
50 | if ! grep -q '^ - job_name: firewood$' /etc/prometheus/prometheus.yml; then
51 | cat >> /etc/prometheus/prometheus.yml <> /etc/default/prometheus-node-exporter < Result<(), Box> {
22 | let keys = args.global_opts.batch_size;
23 | let start = Instant::now();
24 |
25 | for key in 0..args.global_opts.number_of_batches {
26 | let root = Span::root(func_path!(), SpanContext::random());
27 | let _guard = root.set_local_parent();
28 |
29 | let batch = Self::generate_inserts(key * keys, args.global_opts.batch_size).collect();
30 |
31 | let proposal = db.propose(batch).await.expect("proposal should succeed");
32 | proposal.commit().await?;
33 | }
34 | let duration = start.elapsed();
35 | info!(
36 | "Generated and inserted {} batches of size {keys} in {}",
37 | args.global_opts.number_of_batches,
38 | pretty_duration(&duration, None)
39 | );
40 |
41 | Ok(())
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/benchmark/src/single.rs:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved.
2 | // See the file LICENSE.md for licensing terms.
3 |
4 | use crate::TestRunner;
5 | use firewood::db::{BatchOp, Db};
6 | use firewood::v2::api::{Db as _, Proposal as _};
7 | use log::debug;
8 | use pretty_duration::pretty_duration;
9 | use sha2::{Digest, Sha256};
10 | use std::error::Error;
11 | use std::time::Instant;
12 |
13 | #[derive(Clone)]
14 | pub struct Single;
15 |
16 | impl TestRunner for Single {
17 | async fn run(&self, db: &Db, args: &crate::Args) -> Result<(), Box> {
18 | let start = Instant::now();
19 | let inner_keys: Vec<_> = (0..args.global_opts.batch_size)
20 | .map(|i| Sha256::digest(i.to_ne_bytes()))
21 | .collect();
22 | let mut batch_id = 0;
23 |
24 | while start.elapsed().as_secs() / 60 < args.global_opts.duration_minutes {
25 | let batch = inner_keys
26 | .iter()
27 | .map(|key| BatchOp::Put {
28 | key,
29 | value: vec![batch_id as u8],
30 | })
31 | .collect();
32 | let proposal = db.propose(batch).await.expect("proposal should succeed");
33 | proposal.commit().await?;
34 |
35 | if log::log_enabled!(log::Level::Debug) && batch_id % 1000 == 999 {
36 | debug!(
37 | "completed {} batches in {}",
38 | 1 + batch_id,
39 | pretty_duration(&start.elapsed(), None)
40 | );
41 | }
42 | batch_id += 1;
43 | }
44 | Ok(())
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/benchmark/src/tenkrandom.rs:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved.
2 | // See the file LICENSE.md for licensing terms.
3 |
4 | use std::error::Error;
5 | use std::time::Instant;
6 |
7 | use firewood::db::{BatchOp, Db};
8 | use firewood::logger::debug;
9 | use firewood::v2::api::{Db as _, Proposal as _};
10 |
11 | use crate::{Args, TestRunner};
12 | use sha2::{Digest, Sha256};
13 |
14 | #[derive(Clone, Default)]
15 | pub struct TenKRandom;
16 |
17 | impl TestRunner for TenKRandom {
18 | async fn run(&self, db: &Db, args: &Args) -> Result<(), Box> {
19 | let mut low = 0;
20 | let mut high = args.global_opts.number_of_batches * args.global_opts.batch_size;
21 | let twenty_five_pct = args.global_opts.batch_size / 4;
22 |
23 | let start = Instant::now();
24 |
25 | while start.elapsed().as_secs() / 60 < args.global_opts.duration_minutes {
26 | let batch: Vec> = Self::generate_inserts(high, twenty_five_pct)
27 | .chain(generate_deletes(low, twenty_five_pct))
28 | .chain(generate_updates(low + high / 2, twenty_five_pct * 2, low))
29 | .collect();
30 | let proposal = db.propose(batch).await.expect("proposal should succeed");
31 | proposal.commit().await?;
32 | low += twenty_five_pct;
33 | high += twenty_five_pct;
34 | }
35 | Ok(())
36 | }
37 | }
38 | fn generate_updates(
39 | start: u64,
40 | count: u64,
41 | low: u64,
42 | ) -> impl Iterator- , Box<[u8]>>> {
43 | let hash_of_low: Box<[u8]> = Sha256::digest(low.to_ne_bytes())[..].into();
44 | (start..start + count)
45 | .map(|inner_key| {
46 | let digest = Sha256::digest(inner_key.to_ne_bytes())[..].into();
47 | debug!(
48 | "updating {:?} with digest {} to {}",
49 | inner_key,
50 | hex::encode(&digest),
51 | hex::encode(&hash_of_low)
52 | );
53 | (digest, hash_of_low.clone())
54 | })
55 | .map(|(key, value)| BatchOp::Put { key, value })
56 | .collect::>()
57 | .into_iter()
58 | }
59 | fn generate_deletes(start: u64, count: u64) -> impl Iterator
- , Box<[u8]>>> {
60 | (start..start + count)
61 | .map(|key| {
62 | let digest = Sha256::digest(key.to_ne_bytes())[..].into();
63 | debug!("deleting {:?} with digest {}", key, hex::encode(&digest));
64 | #[allow(clippy::let_and_return)]
65 | digest
66 | })
67 | .map(|key| BatchOp::Delete { key })
68 | .collect::>()
69 | .into_iter()
70 | }
71 |
--------------------------------------------------------------------------------
/benchmark/src/zipf.rs:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved.
2 | // See the file LICENSE.md for licensing terms.
3 |
4 | use crate::TestRunner;
5 | use firewood::db::{BatchOp, Db};
6 | use firewood::v2::api::{Db as _, Proposal as _};
7 | use log::{debug, trace};
8 | use pretty_duration::pretty_duration;
9 | use rand::prelude::*;
10 | use sha2::{Digest, Sha256};
11 | use std::collections::HashSet;
12 | use std::error::Error;
13 | use std::time::Instant;
14 |
15 | #[derive(clap::Args, Debug, PartialEq)]
16 | pub struct Args {
17 | #[arg(short, long, help = "zipf exponent", default_value_t = 1.2)]
18 | exponent: f64,
19 | }
20 |
21 | #[derive(Clone)]
22 | pub struct Zipf;
23 |
24 | impl TestRunner for Zipf {
25 | async fn run(&self, db: &Db, args: &crate::Args) -> Result<(), Box> {
26 | let exponent = if let crate::TestName::Zipf(args) = &args.test_name {
27 | args.exponent
28 | } else {
29 | unreachable!()
30 | };
31 | let rows = (args.global_opts.number_of_batches * args.global_opts.batch_size) as f64;
32 | let zipf = rand_distr::Zipf::new(rows, exponent).unwrap();
33 | let start = Instant::now();
34 | let mut batch_id = 0;
35 |
36 | while start.elapsed().as_secs() / 60 < args.global_opts.duration_minutes {
37 | let batch: Vec> =
38 | generate_updates(batch_id, args.global_opts.batch_size as usize, zipf).collect();
39 | if log::log_enabled!(log::Level::Debug) {
40 | let mut distinct = HashSet::new();
41 | for op in &batch {
42 | match op {
43 | BatchOp::Put { key, value: _ } => {
44 | distinct.insert(key);
45 | }
46 | _ => unreachable!(),
47 | }
48 | }
49 | debug!(
50 | "inserting batch {} with {} distinct data values",
51 | batch_id,
52 | distinct.len()
53 | );
54 | }
55 | let proposal = db.propose(batch).await.expect("proposal should succeed");
56 | proposal.commit().await?;
57 |
58 | if log::log_enabled!(log::Level::Debug) {
59 | debug!(
60 | "completed batch {} in {}",
61 | batch_id,
62 | pretty_duration(&start.elapsed(), None)
63 | );
64 | }
65 | batch_id += 1;
66 | }
67 | Ok(())
68 | }
69 | }
70 | fn generate_updates(
71 | batch_id: u32,
72 | batch_size: usize,
73 | zipf: rand_distr::Zipf,
74 | ) -> impl Iterator
- , Vec>> {
75 | let hash_of_batch_id = Sha256::digest(batch_id.to_ne_bytes()).to_vec();
76 | let rng = rand::rng();
77 | zipf.sample_iter(rng)
78 | .take(batch_size)
79 | .map(|inner_key| {
80 | let digest = Sha256::digest((inner_key as u64).to_ne_bytes()).to_vec();
81 | trace!(
82 | "updating {:?} with digest {} to {}",
83 | inner_key,
84 | hex::encode(&digest),
85 | hex::encode(&hash_of_batch_id)
86 | );
87 | (digest, hash_of_batch_id.clone())
88 | })
89 | .map(|(key, value)| BatchOp::Put { key, value })
90 | .collect::>()
91 | .into_iter()
92 | }
93 |
--------------------------------------------------------------------------------
/cliff.toml:
--------------------------------------------------------------------------------
1 | # git-cliff ~ configuration file
2 | # https://git-cliff.org/docs/configuration
3 |
4 |
5 | [changelog]
6 | # A Tera template to be rendered as the changelog's footer.
7 | # See https://keats.github.io/tera/docs/#introduction
8 | header = """
9 | # Changelog\n
10 | All notable changes to this project will be documented in this file.\n
11 | """
12 | # A Tera template to be rendered for each release in the changelog.
13 | # See https://keats.github.io/tera/docs/#introduction
14 | body = """
15 | {% if version %}\
16 | ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }}
17 | {% else %}\
18 | ## [unreleased]
19 | {% endif %}\
20 | {% for group, commits in commits | group_by(attribute="group") %}
21 | ### {{ group | striptags | trim | upper_first }}
22 | {% for commit in commits %}
23 | - {% if commit.scope %}*({{ commit.scope }})* {% endif %}\
24 | {% if commit.breaking %}[**breaking**] {% endif %}\
25 | {{ commit.message | upper_first }}\
26 | {% endfor %}
27 | {% endfor %}\n
28 | """
29 | # A Tera template to be rendered as the changelog's footer.
30 | # See https://keats.github.io/tera/docs/#introduction
31 | footer = """
32 |
33 | """
34 | # Remove leading and trailing whitespaces from the changelog's body.
35 | trim = true
36 | # Render body even when there are no releases to process.
37 | render_always = true
38 | # An array of regex based postprocessors to modify the changelog.
39 | postprocessors = [
40 | # Replace the placeholder with a URL.
41 | #{ pattern = '', replace = "https://github.com/orhun/git-cliff" },
42 | ]
43 | # render body even when there are no releases to process
44 | # render_always = true
45 | # output file path
46 | # output = "test.md"
47 |
48 | [git]
49 | # Parse commits according to the conventional commits specification.
50 | # See https://www.conventionalcommits.org
51 | conventional_commits = true
52 | # Exclude commits that do not match the conventional commits specification.
53 | filter_unconventional = true
54 | # Require all commits to be conventional.
55 | # Takes precedence over filter_unconventional.
56 | require_conventional = false
57 | # Split commits on newlines, treating each line as an individual commit.
58 | split_commits = false
59 | # An array of regex based parsers to modify commit messages prior to further processing.
60 | commit_preprocessors = [
61 | # Replace issue numbers with link templates to be updated in `changelog.postprocessors`.
62 | #{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](/issues/${2}))"},
63 | # Check spelling of the commit message using https://github.com/crate-ci/typos.
64 | # If the spelling is incorrect, it will be fixed automatically.
65 | #{ pattern = '.*', replace_command = 'typos --write-changes -' },
66 | ]
67 | # Prevent commits that are breaking from being excluded by commit parsers.
68 | protect_breaking_commits = false
69 | # An array of regex based parsers for extracting data from the commit message.
70 | # Assigns commits to groups.
71 | # Optionally sets the commit's scope and can decide to exclude commits from further processing.
72 | commit_parsers = [
73 | { message = "^feat", group = "🚀 Features" },
74 | { message = "^fix", group = "🐛 Bug Fixes" },
75 | { message = "^doc", group = "📚 Documentation" },
76 | { message = "^perf", group = "⚡ Performance" },
77 | { message = "^refactor", group = "🚜 Refactor" },
78 | { message = "^style", group = "🎨 Styling" },
79 | { message = "^test", group = "🧪 Testing" },
80 | { message = "^chore\\(release\\): prepare for", skip = true },
81 | { message = "^chore\\(deps.*\\)", skip = true },
82 | { message = "^chore\\(pr\\)", skip = true },
83 | { message = "^chore\\(pull\\)", skip = true },
84 | { message = "^chore|^ci", group = "⚙️ Miscellaneous Tasks" },
85 | { body = ".*security", group = "🛡️ Security" },
86 | { message = "^revert", group = "◀️ Revert" },
87 | { message = ".*", group = "💼 Other" },
88 | ]
89 | # Exclude commits that are not matched by any commit parser.
90 | filter_commits = false
91 | # An array of link parsers for extracting external references, and turning them into URLs, using regex.
92 | link_parsers = []
93 | # Include only the tags that belong to the current branch.
94 | use_branch_tags = true
95 | # Order releases topologically instead of chronologically.
96 | topo_order = false
97 | # Order releases topologically instead of chronologically.
98 | topo_order_commits = true
99 | # Order of commits in each group/release within the changelog.
100 | # Allowed values: newest, oldest
101 | sort_commits = "oldest"
102 | # Process submodules commits
103 | recurse_submodules = false
104 | # Only process tags in this pattern
105 | tag_pattern = "v[0-9].*"
106 |
107 |
--------------------------------------------------------------------------------
/ffi/.gitignore:
--------------------------------------------------------------------------------
1 | dbtest
2 | _obj
3 |
--------------------------------------------------------------------------------
/ffi/.golangci.yaml:
--------------------------------------------------------------------------------
1 | # https://golangci-lint.run/usage/configuration/
2 | run:
3 | timeout: 10m
4 |
5 | # If set we pass it to "go list -mod={option}". From "go help modules":
6 | # If invoked with -mod=readonly, the go command is disallowed from the implicit
7 | # automatic updating of go.mod described above. Instead, it fails when any changes
8 | # to go.mod are needed. This setting is most useful to check that go.mod does
9 | # not need updates, such as in a continuous integration and testing system.
10 | # If invoked with -mod=vendor, the go command assumes that the vendor
11 | # directory holds the correct copies of dependencies and ignores
12 | # the dependency descriptions in go.mod.
13 | #
14 | # Allowed values: readonly|vendor|mod
15 | # By default, it isn't set.
16 | modules-download-mode: readonly
17 |
18 | issues:
19 | # Make issues output unique by line.
20 | # Default: true
21 | uniq-by-line: false
22 |
23 | # Maximum issues count per one linter.
24 | # Set to 0 to disable.
25 | # Default: 50
26 | max-issues-per-linter: 0
27 |
28 | # Maximum count of issues with the same text.
29 | # Set to 0 to disable.
30 | # Default: 3
31 | max-same-issues: 0
32 |
33 | # Enables skipping of directories:
34 | # - vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
35 | # Default: true
36 | exclude-dirs-use-default: false
37 |
38 | linters:
39 | disable-all: true
40 | enable:
41 | - asciicheck
42 | - bodyclose
43 | - copyloopvar
44 | - depguard
45 | - dupword
46 | - dupl
47 | - errcheck
48 | - errname
49 | - errorlint
50 | - forbidigo
51 | - gci
52 | - goconst
53 | - gocritic
54 | # - err113 - encourages wrapping static errors
55 | - gofmt
56 | - gofumpt
57 | # - mnd - unnecessary magic numbers
58 | - goprintffuncname
59 | - gosec
60 | - gosimple
61 | - govet
62 | - importas
63 | - ineffassign
64 | # - lll line length linter
65 | - misspell
66 | - nakedret
67 | - nilerr
68 | - noctx
69 | - nolintlint
70 | - perfsprint
71 | - prealloc
72 | - predeclared
73 | - revive
74 | - spancheck
75 | - staticcheck
76 | - stylecheck
77 | - tagalign
78 | - testifylint
79 | - typecheck
80 | - unconvert
81 | - unparam
82 | - unused
83 | - usestdlibvars
84 | - whitespace
85 |
86 | linters-settings:
87 | depguard:
88 | rules:
89 | packages:
90 | deny:
91 | - pkg: "github.com/golang/mock/gomock"
92 | desc: go.uber.org/mock/gomock should be used instead.
93 | - pkg: "github.com/stretchr/testify/assert"
94 | desc: github.com/stretchr/testify/require should be used instead.
95 | - pkg: "io/ioutil"
96 | desc: io/ioutil is deprecated. Use package io or os instead.
97 | errorlint:
98 | # Check for plain type assertions and type switches.
99 | asserts: false
100 | # Check for plain error comparisons.
101 | comparison: false
102 | forbidigo:
103 | # Forbid the following identifiers (list of regexp).
104 | forbid:
105 | - 'require\.Error$(# ErrorIs should be used instead)?'
106 | - 'require\.ErrorContains$(# ErrorIs should be used instead)?'
107 | - 'require\.EqualValues$(# Equal should be used instead)?'
108 | - 'require\.NotEqualValues$(# NotEqual should be used instead)?'
109 | - '^(t|b|tb|f)\.(Fatal|Fatalf|Error|Errorf)$(# the require library should be used instead)?'
110 | revive:
111 | rules:
112 | # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr
113 | - name: bool-literal-in-expr
114 | disabled: false
115 | # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return
116 | - name: early-return
117 | disabled: false
118 | # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines
119 | - name: empty-lines
120 | disabled: false
121 | # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format
122 | - name: string-format
123 | disabled: false
124 | arguments:
125 | - ["b.Logf[0]", "/.*%.*/", "no format directive, use b.Log instead"]
126 | - ["fmt.Errorf[0]", "/.*%.*/", "no format directive, use errors.New instead"]
127 | - ["fmt.Fprintf[1]", "/.*%.*/", "no format directive, use fmt.Fprint instead"]
128 | - ["fmt.Printf[0]", "/.*%.*/", "no format directive, use fmt.Print instead"]
129 | - ["fmt.Sprintf[0]", "/.*%.*/", "no format directive, use fmt.Sprint instead"]
130 | - ["log.Fatalf[0]", "/.*%.*/", "no format directive, use log.Fatal instead"]
131 | - ["log.Printf[0]", "/.*%.*/", "no format directive, use log.Print instead"]
132 | - ["t.Logf[0]", "/.*%.*/", "no format directive, use t.Log instead"]
133 | # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag
134 | - name: struct-tag
135 | disabled: false
136 | # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-naming
137 | - name: unexported-naming
138 | disabled: false
139 | # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error
140 | - name: unhandled-error
141 | # prefer the errcheck linter since it can be disabled directly with nolint directive
142 | # but revive's disable directive (e.g. //revive:disable:unhandled-error) is not
143 | # supported when run under golangci_lint
144 | disabled: true
145 | # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter
146 | - name: unused-parameter
147 | disabled: false
148 | # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-receiver
149 | - name: unused-receiver
150 | disabled: false
151 | # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break
152 | - name: useless-break
153 | disabled: false
154 | tagalign:
155 | align: true
156 | sort: true
157 | strict: true
158 | order:
159 | - serialize
160 | testifylint:
161 | # Enable all checkers (https://github.com/Antonboom/testifylint#checkers).
162 | # Default: false
163 | enable-all: true
164 | # Disable checkers by name
165 | # (in addition to default
166 | # suite-thelper
167 | # ).
168 | disable:
169 | - go-require
170 | - float-compare
171 |
--------------------------------------------------------------------------------
/ffi/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "firewood-ffi"
3 | version = "0.0.5"
4 | edition = "2024"
5 | rust-version = "1.85.0"
6 |
7 | [lib]
8 | crate-type = ["staticlib"]
9 |
10 | [dependencies]
11 | libc = "0.2.2"
12 | firewood = { path = "../firewood" }
13 | metrics = "0.24.1"
14 | metrics-util = "0.19.0"
15 | chrono = "0.4.39"
16 | oxhttp = "0.3.0"
17 | coarsetime = "0.1.35"
18 | env_logger = {version = "0.11.7", optional = true}
19 |
20 | [target.'cfg(unix)'.dependencies]
21 | tikv-jemallocator = "0.6.0"
22 |
23 | [features]
24 | logger = ["dep:env_logger", "firewood/logger"]
25 | ethhash = ["firewood/ethhash"]
26 |
27 | [build-dependencies]
28 | cbindgen = "0.29.0"
29 |
30 | [lints.clippy]
31 | unwrap_used = "warn"
32 | indexing_slicing = "warn"
33 | explicit_deref_methods = "warn"
34 | missing_const_for_fn = "warn"
35 | pedantic = "warn"
36 |
--------------------------------------------------------------------------------
/ffi/README.md:
--------------------------------------------------------------------------------
1 | # Firewood Golang FFI
2 |
3 | The FFI package provides a golang FFI layer for Firewood.
4 |
5 | ## Building Firewood Golang FFI
6 |
7 | The Golang FFI layer uses a CGO directive to locate a C-API compatible binary built from Firewood. Firewood supports both seamless local development and a single-step compilation process for Go projects that depend or transitively depend on Firewood.
8 |
9 | To do this, [firewood.go](./firewood.go) includes CGO directives to include multiple search paths for the Firewood binary in the local `target/` build directory and `ffi/libs`. For the latter, [attach-static-libs](../.github/workflows/attach-static-libs.yaml) GitHub Action pushes an FFI package with static libraries attached for the following supported architectures:
10 |
11 | - x86_64-unknown-linux-gnu
12 | - aarch64-unknown-linux-gnu
13 | - aarch64-apple-darwin
14 | - x86_64-apple-darwin
15 |
16 | to a separate repo [firewood-go](https://github.com/ava-labs/firewood-go) (to avoid including binaries in the Firewood repo).
17 |
18 | ### Local Development
19 |
20 | [firewood.go](./firewood.go) includes CGO directives to include builds in the `target/` directory.
21 |
22 | Firewood prioritizes builds in the following order:
23 |
24 | 1. maxperf
25 | 2. release
26 | 3. debug
27 |
28 | To use and test the Firewood FFI locally, you can run:
29 |
30 | ```bash
31 | cargo build --profile maxperf
32 | cd ffi
33 | go test
34 | ```
35 |
36 | To use a local build of Firewood for a project that depends on Firewood, you must redirect the `go.mod` to use the local version of Firewood FFI, for example:
37 |
38 | ```bash
39 | go mod edit -replace github.com/ava-labs/firewood-go/ffi=/path/to/firewood/ffi
40 | go mod tidy
41 | ```
42 |
43 | ### Production Development Flow
44 |
45 | Firewood pushes the FFI source code and attached static libraries to [firewood-go](https://github.com/ava-labs/firewood-go) via [attach-static-libs](../.github/workflows/attach-static-libs.yaml).
46 |
47 | This enables consumers to utilize it directly without forcing them to compile Firewood locally. Go programs running on supported architectures can utilize `firewood-go/ffi` just like any other dependency.
48 |
49 | To trigger this build, [attach-static-libs](../.github/workflows/attach-static-libs.yaml) supports triggers for both manual GitHub Actions and tags, so you can create a mirror branch/tag on [firewood-go](https://github.com/ava-labs/firewood-go) by either trigger a manual GitHub Action and selecting your branch or pushing a tag to Firewood.
50 |
51 | ### Hash Mode
52 |
53 | Firewood implemented its own optimized merkle trie structure. To support Ethereum Merkle Trie hash compatibility, it also provides a feature flag `ethhash`.
54 |
55 | This is an optional feature (disabled by default). To enable it for a local build, compile with:
56 |
57 | ```sh
58 | cargo build -p firewood-ffi --features ethhash
59 | ```
60 |
61 | To support development in [Coreth](https://github.com/ava-labs/coreth), Firewood pushes static libraries to [firewood-go](https://github.com/ava-labs/firewood-go) with `ethhash` enabled by default.
62 |
63 | ## Development
64 |
65 | Iterative building is unintuitive for the ffi and some common sources of confusion are listed below.
66 |
67 | ### CGO Regeneration
68 |
69 | As you edit any Rust code and save the file in VS Code, the `firewood.h` file is automatically updated with edited function and struct definitions. However, the Go linter will not recognize these changes until you manually regenerate the cgo wrappers. To do this, you can run `go tool cgo firewood.go`. Alternatively, in VS Code, right above the `import "C"` definition, you can click on the small letters saying "regenerate CGO definitions". This will allow the linter to use the altered definitions.
70 |
71 | Because the C header file is autogenerated from the Rust code, the naming matches exactly (due to the `no_mangle` macro). However, the C definitions imported in Go do not match exactly, and are prefixed with `struct_`. Function naming is the same as the header file. These names are generated by the `go tool cgo` command above.
72 |
73 | ### Testing
74 |
75 | Although the VS Code testing feature does work, there are some quirks in ensuring proper building. The Rust code must be compiled separated, and sometimes the `go test` command continues to use a cached result. Whenever testing after making changes to the Rust/C builds, the cache should be cleared if results don't seem correct. Do not compile with `--features ethhash`, as some tests will fail.
76 |
77 | To ensure there are no memory leaks, the easiest way is to use your preferred CLI tool (e.g. `valgrind` for Linux, `leaks` for macOS) and compile the tests into a binary. You must not compile a release binary to ensure all memory can be managed. An example flow is given below.
78 |
79 | ```sh
80 | cd ffi
81 | cargo build # use debug
82 | go test -a -c -o binary_file # ignore cache
83 | leaks --nostacks --atExit -- ./binary_file
84 | ```
85 |
--------------------------------------------------------------------------------
/ffi/build.rs:
--------------------------------------------------------------------------------
1 | use std::env;
2 |
3 | extern crate cbindgen;
4 |
5 | fn main() {
6 | let crate_dir = env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR is not set");
7 |
8 | let config = cbindgen::Config::from_file("cbindgen.toml").expect("cbindgen.toml is present");
9 |
10 | cbindgen::Builder::new()
11 | .with_crate(crate_dir)
12 | // Add any additional configuration options here
13 | .with_config(config)
14 | .generate()
15 | .map_or_else(
16 | |error| match error {
17 | cbindgen::Error::ParseSyntaxError { .. } => {}
18 | e => panic!("{e:?}"),
19 | },
20 | |bindings| {
21 | bindings.write_to_file("firewood.h");
22 | },
23 | );
24 | }
25 |
--------------------------------------------------------------------------------
/ffi/cbindgen.toml:
--------------------------------------------------------------------------------
1 | # This is a template cbindgen.toml file with all of the default values.
2 | # Some values are commented out because their absence is the real default.
3 | #
4 | # See https://github.com/mozilla/cbindgen/blob/master/docs.md#cbindgentoml
5 | # for detailed documentation of every option here.
6 |
7 |
8 |
9 | language = "C"
10 |
11 |
12 |
13 | ############## Options for Wrapping the Contents of the Header #################
14 |
15 | # header = "/* Text to put at the beginning of the generated file. Probably a license. */"
16 | # trailer = "/* Text to put at the end of the generated file */"
17 | # include_guard = "my_bindings_h"
18 | # pragma_once = true
19 | # autogen_warning = "/* Warning, this file is autogenerated by cbindgen. Don't modify this manually. */"
20 | include_version = false
21 | # namespace = "my_namespace"
22 | namespaces = []
23 | using_namespaces = []
24 | sys_includes = []
25 | includes = []
26 | no_includes = false
27 | # cpp_compat = true
28 | after_includes = ""
29 |
30 |
31 | ############################ Code Style Options ################################
32 |
33 | braces = "SameLine"
34 | line_length = 100
35 | tab_width = 2
36 | documentation = true
37 | documentation_style = "auto"
38 | documentation_length = "full"
39 | line_endings = "LF" # also "CR", "CRLF", "Native"
40 |
41 |
42 | ############################# Codegen Options ##################################
43 |
44 | style = "both"
45 | sort_by = "Name" # default for `fn.sort_by` and `const.sort_by`
46 | usize_is_size_t = true
47 |
48 | [defines]
49 | # "target_os = freebsd" = "DEFINE_FREEBSD"
50 | # "feature = serde" = "DEFINE_SERDE"
51 |
52 | [export]
53 | include = []
54 | exclude = []
55 | # prefix = "CAPI_"
56 | item_types = []
57 | renaming_overrides_prefixing = false
58 |
59 | [export.rename]
60 | "Db" = "void"
61 |
62 | [export.body]
63 |
64 | [export.mangle]
65 |
66 | [fn]
67 | rename_args = "None"
68 | # must_use = "MUST_USE_FUNC"
69 | # deprecated = "DEPRECATED_FUNC"
70 | # deprecated_with_note = "DEPRECATED_FUNC_WITH_NOTE"
71 | # no_return = "NO_RETURN"
72 | # prefix = "START_FUNC"
73 | # postfix = "END_FUNC"
74 | args = "auto"
75 | sort_by = "Name"
76 |
77 | [struct]
78 | rename_fields = "None"
79 | # must_use = "MUST_USE_STRUCT"
80 | # deprecated = "DEPRECATED_STRUCT"
81 | # deprecated_with_note = "DEPRECATED_STRUCT_WITH_NOTE"
82 | derive_constructor = false
83 | derive_eq = false
84 | derive_neq = false
85 | derive_lt = false
86 | derive_lte = false
87 | derive_gt = false
88 | derive_gte = false
89 |
90 | [enum]
91 | rename_variants = "None"
92 | # must_use = "MUST_USE_ENUM"
93 | # deprecated = "DEPRECATED_ENUM"
94 | # deprecated_with_note = "DEPRECATED_ENUM_WITH_NOTE"
95 | add_sentinel = false
96 | prefix_with_name = false
97 | derive_helper_methods = false
98 | derive_const_casts = false
99 | derive_mut_casts = false
100 | # cast_assert_name = "ASSERT"
101 | derive_tagged_enum_destructor = false
102 | derive_tagged_enum_copy_constructor = false
103 | enum_class = true
104 | private_default_tagged_enum_constructor = false
105 |
106 |
107 |
108 |
109 | [const]
110 | allow_static_const = true
111 | allow_constexpr = false
112 | sort_by = "Name"
113 |
114 |
115 |
116 |
117 | [macro_expansion]
118 | bitflags = false
119 |
120 |
121 |
122 |
123 |
124 |
125 | ############## Options for How Your Rust library Should Be Parsed ##############
126 |
127 | [parse]
128 | parse_deps = false
129 | # include = []
130 | exclude = []
131 | clean = false
132 | extra_bindings = []
133 |
134 |
135 |
136 | [parse.expand]
137 | crates = []
138 | all_features = false
139 | default_features = true
140 | features = []
141 |
--------------------------------------------------------------------------------
/ffi/firewood.go:
--------------------------------------------------------------------------------
1 | // Package firewood provides a Go wrapper around the [Firewood] database.
2 | //
3 | // [Firewood]: https://github.com/ava-labs/firewood
4 | package firewood
5 |
6 | // // Note that -lm is required on Linux but not on Mac.
7 | // #cgo linux,amd64 LDFLAGS: -L${SRCDIR}/libs/x86_64-unknown-linux-gnu -lm
8 | // #cgo linux,arm64 LDFLAGS: -L${SRCDIR}/libs/aarch64-unknown-linux-gnu -lm
9 | // #cgo darwin,amd64 LDFLAGS: -L${SRCDIR}/libs/x86_64-apple-darwin
10 | // #cgo darwin,arm64 LDFLAGS: -L${SRCDIR}/libs/aarch64-apple-darwin
11 | // // XXX: last search path takes precedence, which means we prioritize
12 | // // local builds over pre-built and maxperf over release build
13 | // #cgo LDFLAGS: -L${SRCDIR}/../target/debug
14 | // #cgo LDFLAGS: -L${SRCDIR}/../target/release
15 | // #cgo LDFLAGS: -L${SRCDIR}/../target/maxperf
16 | // #cgo LDFLAGS: -L/usr/local/lib -lfirewood_ffi
17 | // #include
18 | // #include "firewood.h"
19 | import "C"
20 |
21 | import (
22 | "errors"
23 | "fmt"
24 | "strings"
25 | "unsafe"
26 | )
27 |
28 | // These constants are used to identify errors returned by the Firewood Rust FFI.
29 | // These must be changed if the Rust FFI changes - should be reported by tests.
30 | const (
31 | RootLength = 32
32 | rootHashNotFound = "IO error: Root hash not found"
33 | keyNotFound = "key not found"
34 | )
35 |
36 | var errDBClosed = errors.New("firewood database already closed")
37 |
38 | // A Database is a handle to a Firewood database.
39 | // It is not safe to call these methods with a nil handle.
40 | type Database struct {
41 | // handle is returned and accepted by cgo functions. It MUST be treated as
42 | // an opaque value without special meaning.
43 | // https://en.wikipedia.org/wiki/Blinkenlights
44 | handle *C.DatabaseHandle
45 | }
46 |
47 | // Config configures the opening of a [Database].
48 | type Config struct {
49 | Create bool
50 | NodeCacheEntries uint
51 | Revisions uint
52 | ReadCacheStrategy CacheStrategy
53 | MetricsPort uint16
54 | }
55 |
56 | // DefaultConfig returns a sensible default Config.
57 | func DefaultConfig() *Config {
58 | return &Config{
59 | NodeCacheEntries: 1_000_000,
60 | Revisions: 100,
61 | ReadCacheStrategy: OnlyCacheWrites,
62 | MetricsPort: 3000,
63 | }
64 | }
65 |
66 | // A CacheStrategy represents the caching strategy used by a [Database].
67 | type CacheStrategy uint8
68 |
69 | const (
70 | OnlyCacheWrites CacheStrategy = iota
71 | CacheBranchReads
72 | CacheAllReads
73 |
74 | // invalidCacheStrategy MUST be the final value in the iota block to make it
75 | // the smallest value greater than all valid values.
76 | invalidCacheStrategy
77 | )
78 |
79 | // New opens or creates a new Firewood database with the given configuration. If
80 | // a nil `Config` is provided [DefaultConfig] will be used instead.
81 | func New(filePath string, conf *Config) (*Database, error) {
82 | if conf == nil {
83 | conf = DefaultConfig()
84 | }
85 | if conf.ReadCacheStrategy >= invalidCacheStrategy {
86 | return nil, fmt.Errorf("invalid %T (%[1]d)", conf.ReadCacheStrategy)
87 | }
88 | if conf.Revisions < 2 {
89 | return nil, fmt.Errorf("%T.Revisions must be >= 2", conf)
90 | }
91 | if conf.NodeCacheEntries < 1 {
92 | return nil, fmt.Errorf("%T.NodeCacheEntries must be >= 1", conf)
93 | }
94 |
95 | args := C.struct_CreateOrOpenArgs{
96 | path: C.CString(filePath),
97 | cache_size: C.size_t(conf.NodeCacheEntries),
98 | revisions: C.size_t(conf.Revisions),
99 | strategy: C.uint8_t(conf.ReadCacheStrategy),
100 | metrics_port: C.uint16_t(conf.MetricsPort),
101 | }
102 | // Defer freeing the C string allocated to the heap on the other side
103 | // of the FFI boundary.
104 | defer C.free(unsafe.Pointer(args.path))
105 |
106 | var db *C.DatabaseHandle
107 | if conf.Create {
108 | db = C.fwd_create_db(args)
109 | } else {
110 | db = C.fwd_open_db(args)
111 | }
112 |
113 | return &Database{handle: db}, nil
114 | }
115 |
116 | // Update applies a batch of updates to the database, returning the hash of the
117 | // root node after the batch is applied.
118 | //
119 | // WARNING: a consequence of prefix deletion is that calling Update with an empty
120 | // key and value will delete the entire database.
121 | func (db *Database) Update(keys, vals [][]byte) ([]byte, error) {
122 | if db.handle == nil {
123 | return nil, errDBClosed
124 | }
125 |
126 | ffiOps, cleanup := createOps(keys, vals)
127 | defer cleanup()
128 |
129 | hash := C.fwd_batch(
130 | db.handle,
131 | C.size_t(len(ffiOps)),
132 | unsafe.SliceData(ffiOps), // implicitly pinned
133 | )
134 | return bytesFromValue(&hash)
135 | }
136 |
137 | func (db *Database) Propose(keys, vals [][]byte) (*Proposal, error) {
138 | if db.handle == nil {
139 | return nil, errDBClosed
140 | }
141 |
142 | ffiOps, cleanup := createOps(keys, vals)
143 | defer cleanup()
144 |
145 | val := C.fwd_propose_on_db(
146 | db.handle,
147 | C.size_t(len(ffiOps)),
148 | unsafe.SliceData(ffiOps), // implicitly pinned
149 | )
150 | return newProposal(db.handle, &val)
151 | }
152 |
153 | // Get retrieves the value for the given key. It always returns a nil error.
154 | // If the key is not found, the return value will be (nil, nil).
155 | func (db *Database) Get(key []byte) ([]byte, error) {
156 | if db.handle == nil {
157 | return nil, errDBClosed
158 | }
159 |
160 | values, cleanup := newValueFactory()
161 | defer cleanup()
162 | val := C.fwd_get_latest(db.handle, values.from(key))
163 | bytes, err := bytesFromValue(&val)
164 |
165 | // If the root hash is not found, return nil.
166 | if err != nil && strings.Contains(err.Error(), rootHashNotFound) {
167 | return nil, nil
168 | }
169 |
170 | return bytes, err
171 | }
172 |
173 | // Root returns the current root hash of the trie.
174 | // Empty trie must return common.Hash{}.
175 | func (db *Database) Root() ([]byte, error) {
176 | if db.handle == nil {
177 | return nil, errDBClosed
178 | }
179 | hash := C.fwd_root_hash(db.handle)
180 | bytes, err := bytesFromValue(&hash)
181 |
182 | // If the root hash is not found, return a zeroed slice.
183 | if err == nil && bytes == nil {
184 | bytes = make([]byte, RootLength)
185 | }
186 | return bytes, err
187 | }
188 |
189 | // Revision returns a historical revision of the database.
190 | func (db *Database) Revision(root []byte) (*Revision, error) {
191 | return newRevision(db.handle, root)
192 | }
193 |
194 | // Close closes the database and releases all held resources.
195 | // Returns an error if already closed.
196 | func (db *Database) Close() error {
197 | if db.handle == nil {
198 | return errDBClosed
199 | }
200 | C.fwd_close_db(db.handle)
201 | db.handle = nil
202 | return nil
203 | }
204 |
--------------------------------------------------------------------------------
/ffi/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/ava-labs/firewood/ffi
2 |
3 | go 1.23
4 |
5 | toolchain go1.23.6
6 |
7 | require github.com/stretchr/testify v1.10.0
8 |
9 | require (
10 | github.com/davecgh/go-spew v1.1.1 // indirect
11 | github.com/pmezard/go-difflib v1.0.0 // indirect
12 | gopkg.in/yaml.v3 v3.0.1 // indirect
13 | )
14 |
--------------------------------------------------------------------------------
/ffi/go.sum:
--------------------------------------------------------------------------------
1 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
2 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
3 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
4 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
5 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
6 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
7 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
8 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
9 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
10 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
11 |
--------------------------------------------------------------------------------
/ffi/kvbackend.go:
--------------------------------------------------------------------------------
1 | package firewood
2 |
3 | // implement a specific interface for firewood
4 | // this is used for some of the firewood performance tests
5 |
6 | // Validate that Firewood implements the KVBackend interface
7 | var _ kVBackend = (*Database)(nil)
8 |
9 | type kVBackend interface {
10 | // Returns the current root hash of the trie.
11 | // Empty trie must return common.Hash{}.
12 | // Length of the returned slice must be common.HashLength.
13 | Root() ([]byte, error)
14 |
15 | // Get retrieves the value for the given key.
16 | // If the key does not exist, it must return (nil, nil).
17 | Get(key []byte) ([]byte, error)
18 |
19 | // Prefetch loads the intermediary nodes of the given key into memory.
20 | // The first return value is ignored.
21 | Prefetch(key []byte) ([]byte, error)
22 |
23 | // After this call, Root() should return the same hash as returned by this call.
24 | // Note when length of a particular value is zero, it means the corresponding
25 | // key should be deleted.
26 | // There may be duplicate keys in the batch provided, and the last one should
27 | // take effect.
28 | // Note after this call, the next call to Update must build on the returned root,
29 | // regardless of whether Commit is called.
30 | // Length of the returned root must be common.HashLength.
31 | Update(keys, vals [][]byte) ([]byte, error)
32 |
33 | // After this call, changes related to [root] should be persisted to disk.
34 | // This may be implemented as no-op if Update already persists changes, or
35 | // commits happen on a rolling basis.
36 | // Length of the root slice is guaranteed to be common.HashLength.
37 | Commit(root []byte) error
38 |
39 | // Close closes the backend and releases all held resources.
40 | Close() error
41 | }
42 |
43 | // Prefetch is a no-op since we don't need to prefetch for Firewood.
44 | func (db *Database) Prefetch(_ []byte) ([]byte, error) {
45 | if db.handle == nil {
46 | return nil, errDBClosed
47 | }
48 |
49 | return nil, nil
50 | }
51 |
52 | // Commit is a no-op, since [Database.Update] already persists changes.
53 | func (db *Database) Commit(_ []byte) error {
54 | if db.handle == nil {
55 | return errDBClosed
56 | }
57 |
58 | return nil
59 | }
60 |
--------------------------------------------------------------------------------
/ffi/memory.go:
--------------------------------------------------------------------------------
1 | // Package firewood provides a Go wrapper around the [Firewood] database.
2 | //
3 | // [Firewood]: https://github.com/ava-labs/firewood
4 | package firewood
5 |
6 | // // Note that -lm is required on Linux but not on Mac.
7 | // #cgo LDFLAGS: -L${SRCDIR}/../target/release -L/usr/local/lib -lfirewood_ffi -lm
8 | // #include
9 | // #include "firewood.h"
10 | import "C"
11 |
12 | import (
13 | "errors"
14 | "fmt"
15 | "runtime"
16 | "unsafe"
17 | )
18 |
19 | var (
20 | errNilBuffer = errors.New("firewood error: nil value returned from cgo")
21 | errBadValue = errors.New("firewood error: value from cgo formatted incorrectly")
22 | )
23 |
24 | // KeyValue is a key-value pair.
25 | type KeyValue struct {
26 | Key []byte
27 | Value []byte
28 | }
29 |
30 | // hashAndIDFromValue converts the cgo `Value` payload into:
31 | //
32 | // case | data | len | meaning
33 | //
34 | // 1. | nil | 0 | invalid
35 | // 2. | nil | non-0 | proposal deleted everything
36 | // 3. | non-nil | 0 | error string
37 | // 4. | non-nil | non-0 | hash and id
38 | //
39 | // The value should never be nil.
40 | func hashAndIDFromValue(v *C.struct_Value) ([]byte, uint32, error) {
41 | // Pin the returned value to prevent it from being garbage collected.
42 | defer runtime.KeepAlive(v)
43 |
44 | if v == nil {
45 | return nil, 0, errNilBuffer
46 | }
47 |
48 | if v.data == nil {
49 | // Case 2
50 | if v.len != 0 {
51 | return nil, uint32(v.len), nil
52 | }
53 |
54 | // Case 1
55 | return nil, 0, errBadValue
56 | }
57 |
58 | // Case 3
59 | if v.len == 0 {
60 | errStr := C.GoString((*C.char)(unsafe.Pointer(v.data)))
61 | C.fwd_free_value(v)
62 | return nil, 0, fmt.Errorf("firewood error: %s", errStr)
63 | }
64 |
65 | // Case 4
66 | id := uint32(v.len)
67 | buf := C.GoBytes(unsafe.Pointer(v.data), RootLength)
68 | v.len = C.size_t(RootLength) // set the length to free
69 | C.fwd_free_value(v)
70 | return buf, id, nil
71 | }
72 |
73 | // errorFromValue converts the cgo `Value` payload into:
74 | //
75 | // case | data | len | meaning
76 | //
77 | // 1. | nil | 0 | empty
78 | // 2. | nil | non-0 | invalid
79 | // 3. | non-nil | 0 | error string
80 | // 4. | non-nil | non-0 | invalid
81 | //
82 | // The value should never be nil.
83 | func errorFromValue(v *C.struct_Value) error {
84 | // Pin the returned value to prevent it from being garbage collected.
85 | defer runtime.KeepAlive(v)
86 |
87 | if v == nil {
88 | return errNilBuffer
89 | }
90 |
91 | // Case 1
92 | if v.data == nil && v.len == 0 {
93 | return nil
94 | }
95 |
96 | // Case 3
97 | if v.len == 0 {
98 | errStr := C.GoString((*C.char)(unsafe.Pointer(v.data)))
99 | C.fwd_free_value(v)
100 | return fmt.Errorf("firewood error: %s", errStr)
101 | }
102 |
103 | // Case 2 and 4
104 | C.fwd_free_value(v)
105 | return errBadValue
106 | }
107 |
108 | // bytesFromValue converts the cgo `Value` payload to:
109 | //
110 | // case | data | len | meaning
111 | //
112 | // 1. | nil | 0 | empty
113 | // 2. | nil | non-0 | invalid
114 | // 3. | non-nil | 0 | error string
115 | // 4. | non-nil | non-0 | bytes (most common)
116 | //
117 | // The value should never be nil.
118 | func bytesFromValue(v *C.struct_Value) ([]byte, error) {
119 | // Pin the returned value to prevent it from being garbage collected.
120 | defer runtime.KeepAlive(v)
121 |
122 | if v == nil {
123 | return nil, errNilBuffer
124 | }
125 |
126 | // Case 4
127 | if v.len != 0 && v.data != nil {
128 | buf := C.GoBytes(unsafe.Pointer(v.data), C.int(v.len))
129 | C.fwd_free_value(v)
130 | return buf, nil
131 | }
132 |
133 | // Case 1
134 | if v.len == 0 && v.data == nil {
135 | return nil, nil
136 | }
137 |
138 | // Case 3
139 | if v.len == 0 {
140 | errStr := C.GoString((*C.char)(unsafe.Pointer(v.data)))
141 | C.fwd_free_value(v)
142 | return nil, fmt.Errorf("firewood error: %s", errStr)
143 | }
144 |
145 | // Case 2
146 | return nil, errBadValue
147 | }
148 |
149 | // newValueFactory returns a factory for converting byte slices into cgo `Value`
150 | // structs that can be passed as arguments to cgo functions. The returned
151 | // cleanup function MUST be called when the constructed values are no longer
152 | // required, after which they can no longer be used as cgo arguments.
153 | func newValueFactory() (*valueFactory, func()) {
154 | f := new(valueFactory)
155 | return f, func() { f.pin.Unpin() }
156 | }
157 |
158 | type valueFactory struct {
159 | pin runtime.Pinner
160 | }
161 |
162 | func (f *valueFactory) from(data []byte) C.struct_Value {
163 | if len(data) == 0 {
164 | return C.struct_Value{0, nil}
165 | }
166 | ptr := (*C.uchar)(unsafe.SliceData(data))
167 | f.pin.Pin(ptr)
168 | return C.struct_Value{C.size_t(len(data)), ptr}
169 | }
170 |
171 | // createOps creates a slice of cgo `KeyValue` structs from the given keys and
172 | // values and pins the memory of the underlying byte slices to prevent
173 | // garbage collection while the cgo function is using them. The returned cleanup
174 | // function MUST be called when the constructed values are no longer required,
175 | // after which they can no longer be used as cgo arguments.
176 | func createOps(keys, vals [][]byte) ([]C.struct_KeyValue, func()) {
177 | values, cleanup := newValueFactory()
178 |
179 | ffiOps := make([]C.struct_KeyValue, len(keys))
180 | for i := range keys {
181 | ffiOps[i] = C.struct_KeyValue{
182 | key: values.from(keys[i]),
183 | value: values.from(vals[i]),
184 | }
185 | }
186 |
187 | return ffiOps, cleanup
188 | }
189 |
--------------------------------------------------------------------------------
/ffi/proposal.go:
--------------------------------------------------------------------------------
1 | // Package firewood provides a Go wrapper around the [Firewood] database.
2 | //
3 | // [Firewood]: https://github.com/ava-labs/firewood
4 | package firewood
5 |
6 | // // Note that -lm is required on Linux but not on Mac.
7 | // #cgo LDFLAGS: -L${SRCDIR}/../target/release -L/usr/local/lib -lfirewood_ffi -lm
8 | // #include
9 | // #include "firewood.h"
10 | import "C"
11 |
12 | import (
13 | "errors"
14 | "unsafe"
15 | )
16 |
17 | var errDroppedProposal = errors.New("proposal already dropped")
18 |
19 | type Proposal struct {
20 | // handle is returned and accepted by cgo functions. It MUST be treated as
21 | // an opaque value without special meaning.
22 | // https://en.wikipedia.org/wiki/Blinkenlights
23 | handle *C.DatabaseHandle
24 |
25 | // The proposal ID.
26 | // id = 0 is reserved for a dropped proposal.
27 | id uint32
28 |
29 | // The proposal root hash.
30 | root []byte
31 | }
32 |
33 | // newProposal creates a new Proposal from the given DatabaseHandle and Value.
34 | // The Value must be returned from a Firewood FFI function.
35 | // An error can only occur from parsing the Value.
36 | func newProposal(handle *C.DatabaseHandle, val *C.struct_Value) (*Proposal, error) {
37 | bytes, id, err := hashAndIDFromValue(val)
38 | if err != nil {
39 | return nil, err
40 | }
41 |
42 | // If the proposal root is nil, it means the proposal is empty.
43 | if bytes == nil {
44 | bytes = make([]byte, RootLength)
45 | }
46 |
47 | return &Proposal{
48 | handle: handle,
49 | id: id,
50 | root: bytes,
51 | }, nil
52 | }
53 |
54 | // Root retrieves the root hash of the proposal.
55 | // If the proposal is empty (i.e. no keys in database),
56 | // it returns nil, nil.
57 | func (p *Proposal) Root() ([]byte, error) {
58 | if p.handle == nil {
59 | return nil, errDBClosed
60 | }
61 |
62 | if p.id == 0 {
63 | return nil, errDroppedProposal
64 | }
65 |
66 | // If the hash is empty, return the empty root hash.
67 | if p.root == nil {
68 | return make([]byte, RootLength), nil
69 | }
70 |
71 | // Get the root hash of the proposal.
72 | return p.root, nil
73 | }
74 |
75 | // Get retrieves the value for the given key.
76 | // If the key does not exist, it returns (nil, nil).
77 | func (p *Proposal) Get(key []byte) ([]byte, error) {
78 | if p.handle == nil {
79 | return nil, errDBClosed
80 | }
81 |
82 | if p.id == 0 {
83 | return nil, errDroppedProposal
84 | }
85 | values, cleanup := newValueFactory()
86 | defer cleanup()
87 |
88 | // Get the value for the given key.
89 | val := C.fwd_get_from_proposal(p.handle, C.uint32_t(p.id), values.from(key))
90 | return bytesFromValue(&val)
91 | }
92 |
93 | // Propose creates a new proposal with the given keys and values.
94 | // The proposal is not committed until Commit is called.
95 | func (p *Proposal) Propose(keys, vals [][]byte) (*Proposal, error) {
96 | if p.handle == nil {
97 | return nil, errDBClosed
98 | }
99 |
100 | if p.id == 0 {
101 | return nil, errDroppedProposal
102 | }
103 |
104 | ffiOps, cleanup := createOps(keys, vals)
105 | defer cleanup()
106 |
107 | // Propose the keys and values.
108 | val := C.fwd_propose_on_proposal(p.handle, C.uint32_t(p.id),
109 | C.size_t(len(ffiOps)),
110 | unsafe.SliceData(ffiOps),
111 | )
112 |
113 | return newProposal(p.handle, &val)
114 | }
115 |
116 | // Commit commits the proposal and returns any errors.
117 | // If an error occurs, the proposal is dropped and no longer valid.
118 | func (p *Proposal) Commit() error {
119 | if p.handle == nil {
120 | return errDBClosed
121 | }
122 |
123 | if p.id == 0 {
124 | return errDroppedProposal
125 | }
126 |
127 | // Commit the proposal and return the hash.
128 | errVal := C.fwd_commit(p.handle, C.uint32_t(p.id))
129 | err := errorFromValue(&errVal)
130 | if err != nil {
131 | // this is unrecoverable due to Rust's ownership model
132 | // The underlying proposal is no longer valid.
133 | p.id = 0
134 | }
135 | return err
136 | }
137 |
138 | // Drop removes the proposal from memory in Firewood.
139 | // In the case of an error, the proposal can assumed to be dropped.
140 | // An error is returned if the proposal was already dropped.
141 | func (p *Proposal) Drop() error {
142 | if p.handle == nil {
143 | return errDBClosed
144 | }
145 |
146 | if p.id == 0 {
147 | return errDroppedProposal
148 | }
149 |
150 | // Drop the proposal.
151 | val := C.fwd_drop_proposal(p.handle, C.uint32_t(p.id))
152 | p.id = 0
153 | return errorFromValue(&val)
154 | }
155 |
--------------------------------------------------------------------------------
/ffi/revision.go:
--------------------------------------------------------------------------------
1 | // Package firewood provides a Go wrapper around the [Firewood] database.
2 | //
3 | // [Firewood]: https://github.com/ava-labs/firewood
4 | package firewood
5 |
6 | // // Note that -lm is required on Linux but not on Mac.
7 | // #cgo LDFLAGS: -L${SRCDIR}/../target/release -L/usr/local/lib -lfirewood_ffi -lm
8 | // #include
9 | // #include "firewood.h"
10 | import "C"
11 |
12 | import (
13 | "errors"
14 | "fmt"
15 | )
16 |
17 | var (
18 | errRevisionNotFound = errors.New("firewood error: revision not found")
19 | errInvalidRootLength = fmt.Errorf("firewood error: root hash must be %d bytes", RootLength)
20 | )
21 |
22 | type Revision struct {
23 | // handle is returned and accepted by cgo functions. It MUST be treated as
24 | // an opaque value without special meaning.
25 | // https://en.wikipedia.org/wiki/Blinkenlights
26 | handle *C.DatabaseHandle
27 | // The revision root
28 | root []byte
29 | }
30 |
31 | func newRevision(handle *C.DatabaseHandle, root []byte) (*Revision, error) {
32 | if handle == nil {
33 | return nil, errors.New("firewood error: nil handle or root")
34 | }
35 |
36 | // Check that the root is the correct length.
37 | if root == nil || len(root) != RootLength {
38 | return nil, errInvalidRootLength
39 | }
40 |
41 | // Attempt to get any value from the root.
42 | // This will verify that the root is valid and accessible.
43 | // If the root is not valid, this will return an error.
44 | values, cleanup := newValueFactory()
45 | defer cleanup()
46 | val := C.fwd_get_from_root(handle, values.from(root), values.from([]byte{}))
47 | _, err := bytesFromValue(&val)
48 | if err != nil {
49 | // Any error from this function indicates that the root is inaccessible.
50 | return nil, errRevisionNotFound
51 | }
52 |
53 | // All other verification of the root is done during use.
54 | return &Revision{
55 | handle: handle,
56 | root: root,
57 | }, nil
58 | }
59 |
60 | func (r *Revision) Get(key []byte) ([]byte, error) {
61 | if r.handle == nil {
62 | return nil, errDBClosed
63 | }
64 | if r.root == nil {
65 | return nil, errRevisionNotFound
66 | }
67 |
68 | values, cleanup := newValueFactory()
69 | defer cleanup()
70 |
71 | val := C.fwd_get_from_root(r.handle, values.from(r.root), values.from(key))
72 | value, err := bytesFromValue(&val)
73 | if err != nil {
74 | // Any error from this function indicates that the revision is inaccessible.
75 | r.root = nil
76 | }
77 | return value, err
78 | }
79 |
--------------------------------------------------------------------------------
/ffi/src/metrics_setup.rs:
--------------------------------------------------------------------------------
1 | use std::collections::HashSet;
2 | use std::io::Write;
3 | use std::net::Ipv6Addr;
4 | use std::ops::Deref;
5 | use std::sync::atomic::Ordering;
6 | use std::sync::{Arc, Once};
7 | use std::time::SystemTime;
8 |
9 | use oxhttp::Server;
10 | use oxhttp::model::{Body, Response, StatusCode};
11 | use std::net::Ipv4Addr;
12 | use std::time::Duration;
13 |
14 | use chrono::{DateTime, Utc};
15 |
16 | use metrics::Key;
17 | use metrics_util::registry::{AtomicStorage, Registry};
18 |
19 | static INIT: Once = Once::new();
20 |
21 | pub(crate) fn setup_metrics(metrics_port: u16) {
22 | INIT.call_once(|| {
23 | let inner: TextRecorderInner = TextRecorderInner {
24 | registry: Registry::atomic(),
25 | };
26 | let recorder = TextRecorder {
27 | inner: Arc::new(inner),
28 | };
29 | metrics::set_global_recorder(recorder.clone()).expect("failed to set recorder");
30 |
31 | Server::new(move |request| {
32 | if request.method() == "GET" {
33 | Response::builder()
34 | .status(StatusCode::OK)
35 | .header("Content-Type", "text/plain")
36 | .body(Body::from(recorder.stats()))
37 | .expect("failed to build response")
38 | } else {
39 | Response::builder()
40 | .status(StatusCode::METHOD_NOT_ALLOWED)
41 | .body(Body::from("Method not allowed"))
42 | .expect("failed to build response")
43 | }
44 | })
45 | .bind((Ipv4Addr::LOCALHOST, metrics_port))
46 | .bind((Ipv6Addr::LOCALHOST, metrics_port))
47 | .with_global_timeout(Duration::from_secs(60 * 60))
48 | .with_max_concurrent_connections(2)
49 | .spawn()
50 | .expect("failed to spawn server");
51 | });
52 | }
53 |
54 | #[derive(Debug)]
55 | struct TextRecorderInner {
56 | registry: Registry,
57 | }
58 |
59 | #[derive(Debug, Clone)]
60 | struct TextRecorder {
61 | inner: Arc,
62 | }
63 |
64 | impl TextRecorder {
65 | fn stats(&self) -> String {
66 | let mut output = Vec::new();
67 | let systemtime_now = SystemTime::now();
68 | let utc_now: DateTime = systemtime_now.into();
69 | let epoch_duration = systemtime_now
70 | .duration_since(SystemTime::UNIX_EPOCH)
71 | .expect("system time is before Unix epoch");
72 | let epoch_ms = epoch_duration.as_secs() * 1000 + u64::from(epoch_duration.subsec_millis());
73 | writeln!(output, "# {utc_now}").unwrap();
74 |
75 | let counters = self.registry.get_counter_handles();
76 | let mut seen = HashSet::new();
77 | for (key, counter) in counters {
78 | let sanitized_key_name = key.name().to_string().replace('.', "_");
79 | if !seen.contains(&sanitized_key_name) {
80 | writeln!(
81 | output,
82 | "# TYPE {} counter",
83 | key.name().to_string().replace('.', "_")
84 | )
85 | .expect("write error");
86 | seen.insert(sanitized_key_name.clone());
87 | }
88 | write!(output, "{sanitized_key_name}").expect("write error");
89 | if key.labels().len() > 0 {
90 | write!(
91 | output,
92 | "{{{}}}",
93 | key.labels()
94 | .map(|label| format!("{}=\"{}\"", label.key(), label.value()))
95 | .collect::>()
96 | .join(",")
97 | )
98 | .expect("write error");
99 | }
100 | writeln!(output, " {} {}", counter.load(Ordering::Relaxed), epoch_ms)
101 | .expect("write error");
102 | }
103 | writeln!(output).expect("write error");
104 | output.flush().expect("flush error");
105 |
106 | std::str::from_utf8(output.as_slice())
107 | .expect("failed to convert to string")
108 | .into()
109 | }
110 | }
111 |
112 | impl Deref for TextRecorder {
113 | type Target = Arc;
114 |
115 | fn deref(&self) -> &Self::Target {
116 | &self.inner
117 | }
118 | }
119 |
120 | impl metrics::Recorder for TextRecorder {
121 | fn describe_counter(
122 | &self,
123 | _key: metrics::KeyName,
124 | _unit: Option,
125 | _description: metrics::SharedString,
126 | ) {
127 | }
128 |
129 | fn describe_gauge(
130 | &self,
131 | _key: metrics::KeyName,
132 | _unit: Option,
133 | _description: metrics::SharedString,
134 | ) {
135 | }
136 |
137 | fn describe_histogram(
138 | &self,
139 | _key: metrics::KeyName,
140 | _unit: Option,
141 | _description: metrics::SharedString,
142 | ) {
143 | }
144 |
145 | fn register_counter(
146 | &self,
147 | key: &metrics::Key,
148 | _metadata: &metrics::Metadata<'_>,
149 | ) -> metrics::Counter {
150 | self.inner
151 | .registry
152 | .get_or_create_counter(key, |c| c.clone().into())
153 | }
154 |
155 | fn register_gauge(
156 | &self,
157 | key: &metrics::Key,
158 | _metadata: &metrics::Metadata<'_>,
159 | ) -> metrics::Gauge {
160 | self.inner
161 | .registry
162 | .get_or_create_gauge(key, |c| c.clone().into())
163 | }
164 |
165 | fn register_histogram(
166 | &self,
167 | key: &metrics::Key,
168 | _metadata: &metrics::Metadata<'_>,
169 | ) -> metrics::Histogram {
170 | self.inner
171 | .registry
172 | .get_or_create_histogram(key, |c| c.clone().into())
173 | }
174 | }
175 |
--------------------------------------------------------------------------------
/ffi/tests/eth/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/ava-labs/firewood/ffi/tests
2 |
3 | go 1.23.9
4 |
5 | toolchain go1.24.2
6 |
7 | require (
8 | github.com/ava-labs/firewood-go/ffi v0.0.0 // this is replaced to use the parent folder
9 | github.com/ava-labs/libevm v1.13.14-0.2.0.release
10 | github.com/holiman/uint256 v1.3.2
11 | github.com/stretchr/testify v1.10.0
12 | )
13 |
14 | require (
15 | github.com/DataDog/zstd v1.5.2 // indirect
16 | github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
17 | github.com/beorn7/perks v1.0.1 // indirect
18 | github.com/bits-and-blooms/bitset v1.10.0 // indirect
19 | github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
20 | github.com/cespare/xxhash/v2 v2.3.0 // indirect
21 | github.com/cockroachdb/errors v1.9.1 // indirect
22 | github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
23 | github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 // indirect
24 | github.com/cockroachdb/redact v1.1.3 // indirect
25 | github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
26 | github.com/consensys/bavard v0.1.13 // indirect
27 | github.com/consensys/gnark-crypto v0.12.1 // indirect
28 | github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 // indirect
29 | github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect
30 | github.com/davecgh/go-spew v1.1.1 // indirect
31 | github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
32 | github.com/ethereum/c-kzg-4844 v0.4.0 // indirect
33 | github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 // indirect
34 | github.com/getsentry/sentry-go v0.18.0 // indirect
35 | github.com/go-ole/go-ole v1.3.0 // indirect
36 | github.com/gofrs/flock v0.8.1 // indirect
37 | github.com/gogo/protobuf v1.3.2 // indirect
38 | github.com/golang/protobuf v1.5.4 // indirect
39 | github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
40 | github.com/gorilla/websocket v1.5.0 // indirect
41 | github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
42 | github.com/klauspost/compress v1.15.15 // indirect
43 | github.com/kr/pretty v0.3.1 // indirect
44 | github.com/kr/text v0.2.0 // indirect
45 | github.com/mattn/go-runewidth v0.0.13 // indirect
46 | github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
47 | github.com/mmcloughlin/addchain v0.4.0 // indirect
48 | github.com/olekukonko/tablewriter v0.0.5 // indirect
49 | github.com/pkg/errors v0.9.1 // indirect
50 | github.com/pmezard/go-difflib v1.0.0 // indirect
51 | github.com/prometheus/client_golang v1.16.0 // indirect
52 | github.com/prometheus/client_model v0.3.0 // indirect
53 | github.com/prometheus/common v0.42.0 // indirect
54 | github.com/prometheus/procfs v0.10.1 // indirect
55 | github.com/rivo/uniseg v0.2.0 // indirect
56 | github.com/rogpeppe/go-internal v1.12.0 // indirect
57 | github.com/shirou/gopsutil v3.21.11+incompatible // indirect
58 | github.com/supranational/blst v0.3.14 // indirect
59 | github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect
60 | github.com/tklauser/go-sysconf v0.3.12 // indirect
61 | github.com/tklauser/numcpus v0.6.1 // indirect
62 | github.com/yusufpapurcu/wmi v1.2.4 // indirect
63 | golang.org/x/crypto v0.35.0 // indirect
64 | golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect
65 | golang.org/x/sync v0.11.0 // indirect
66 | golang.org/x/sys v0.30.0 // indirect
67 | golang.org/x/text v0.22.0 // indirect
68 | google.golang.org/protobuf v1.33.0 // indirect
69 | gopkg.in/yaml.v3 v3.0.1 // indirect
70 | rsc.io/tmplfunc v0.0.3 // indirect
71 | )
72 |
73 | replace github.com/ava-labs/firewood-go/ffi => ../../
74 |
--------------------------------------------------------------------------------
/ffi/tests/firewood/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/ava-labs/firewood/ffi/tests
2 |
3 | go 1.23.9
4 |
5 | toolchain go1.24.2
6 |
7 | require (
8 | github.com/ava-labs/firewood-go/ffi v0.0.0 // this is replaced to use the parent folder
9 | github.com/stretchr/testify v1.10.0
10 | )
11 |
12 | require github.com/ava-labs/avalanchego v1.13.1
13 |
14 | require (
15 | github.com/BurntSushi/toml v1.2.0 // indirect
16 | github.com/beorn7/perks v1.0.1 // indirect
17 | github.com/cenkalti/backoff/v4 v4.2.1 // indirect
18 | github.com/cespare/xxhash/v2 v2.3.0 // indirect
19 | github.com/davecgh/go-spew v1.1.1 // indirect
20 | github.com/go-logr/logr v1.4.1 // indirect
21 | github.com/go-logr/stdr v1.2.2 // indirect
22 | github.com/golang/protobuf v1.5.4 // indirect
23 | github.com/google/renameio/v2 v2.0.0 // indirect
24 | github.com/gorilla/rpc v1.2.0 // indirect
25 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
26 | github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
27 | github.com/mr-tron/base58 v1.2.0 // indirect
28 | github.com/pmezard/go-difflib v1.0.0 // indirect
29 | github.com/prometheus/client_golang v1.16.0 // indirect
30 | github.com/prometheus/client_model v0.3.0 // indirect
31 | github.com/prometheus/common v0.42.0 // indirect
32 | github.com/prometheus/procfs v0.10.1 // indirect
33 | go.opentelemetry.io/otel v1.22.0 // indirect
34 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect
35 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 // indirect
36 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 // indirect
37 | go.opentelemetry.io/otel/metric v1.22.0 // indirect
38 | go.opentelemetry.io/otel/sdk v1.22.0 // indirect
39 | go.opentelemetry.io/otel/trace v1.22.0 // indirect
40 | go.opentelemetry.io/proto/otlp v1.0.0 // indirect
41 | go.uber.org/multierr v1.11.0 // indirect
42 | go.uber.org/zap v1.26.0 // indirect
43 | golang.org/x/crypto v0.36.0 // indirect
44 | golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e // indirect
45 | golang.org/x/net v0.38.0 // indirect
46 | golang.org/x/sys v0.31.0 // indirect
47 | golang.org/x/term v0.30.0 // indirect
48 | golang.org/x/text v0.23.0 // indirect
49 | gonum.org/v1/gonum v0.11.0 // indirect
50 | google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect
51 | google.golang.org/genproto/googleapis/rpc v0.0.0-20240827150818-7e3bb234dfed // indirect
52 | google.golang.org/grpc v1.66.0 // indirect
53 | google.golang.org/protobuf v1.35.2 // indirect
54 | gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
55 | gopkg.in/yaml.v3 v3.0.1 // indirect
56 | )
57 |
58 | replace github.com/ava-labs/firewood-go/ffi => ../../
59 |
--------------------------------------------------------------------------------
/firewood/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "firewood"
3 | version = "0.0.5"
4 | edition = "2024"
5 | authors = [
6 | "Ted Yin (@Determinant) ",
7 | "Dan Sover (@exdx) ",
8 | "Hao Hao (@haohao-os) ",
9 | "Gyuho Lee (@gyuho) ",
10 | "Sam Batschelet (@hexfusion) ",
11 | "Ron Kuris (@rkuris) ",
12 | ]
13 | description = "Firewood is an embedded key-value store, optimized to store blockchain state."
14 | license-file = "../LICENSE.md"
15 | homepage = "https://avalabs.org"
16 | readme = "../README.md"
17 |
18 | [dependencies]
19 | aquamarine = "0.6.0"
20 | async-trait = "0.1.77"
21 | futures = "0.3.30"
22 | hex = "0.4.3"
23 | metrics = "0.24.0"
24 | serde = { version = "1.0" }
25 | sha2 = "0.10.8"
26 | test-case = "3.3.1"
27 | thiserror = "2.0.3"
28 | typed-builder = "0.21.0"
29 | bincode = "1.3.3"
30 | integer-encoding = "4.0.0"
31 | smallvec = "1.6.1"
32 | fastrace = { version = "0.7.4" }
33 |
34 | [features]
35 | default = []
36 | nightly = []
37 | io-uring = ["storage/io-uring"]
38 | logger = ["storage/logger"]
39 | branch_factor_256 = [ "storage/branch_factor_256" ]
40 | ethhash = [ "storage/ethhash" ]
41 |
42 | [dev-dependencies]
43 | triehash = { version = "0.8.5", path = "../triehash" }
44 | criterion = { version = "0.6.0", features = ["async_tokio"] }
45 | rand = "0.9.0"
46 | rand_distr = "0.5.0"
47 | clap = { version = "4.5.0", features = ['derive'] }
48 | pprof = { version = "0.15.0", features = ["flamegraph"] }
49 | tempfile = "3.12.0"
50 | tokio = { version = "1.36.0", features = ["rt", "sync", "macros", "rt-multi-thread"] }
51 | ethereum-types = "0.15.1"
52 | sha3 = "0.10.8"
53 | plain_hasher = "0.2.3"
54 | hex-literal = "1.0.0"
55 | env_logger = "0.11.7"
56 | hash-db = "0.16.0"
57 |
58 | [[bench]]
59 | name = "hashops"
60 | harness = false
61 |
62 | [lints.clippy]
63 | unwrap_used = "warn"
64 | indexing_slicing = "warn"
65 | explicit_deref_methods = "warn"
66 | missing_const_for_fn = "warn"
67 |
68 | [target.'cfg(target_os = "linux")'.dependencies]
69 | storage = { path = "../storage", features = ["io-uring"] }
70 |
71 | [target.'cfg(not(target_os = "linux"))'.dependencies]
72 | storage = { path = "../storage" }
73 |
--------------------------------------------------------------------------------
/firewood/benches/hashops.rs:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved.
2 | // See the file LICENSE.md for licensing terms.
3 |
4 | // hash benchmarks; run with 'cargo bench'
5 |
6 | use criterion::profiler::Profiler;
7 | use criterion::{BatchSize, Criterion, criterion_group, criterion_main};
8 | use firewood::db::{BatchOp, DbConfig};
9 | use firewood::merkle::Merkle;
10 | use firewood::v2::api::{Db as _, Proposal as _};
11 | use pprof::ProfilerGuard;
12 | use rand::rngs::StdRng;
13 | use rand::{Rng, SeedableRng};
14 | use rand_distr::Alphanumeric;
15 | use std::fs::File;
16 | use std::iter::repeat_with;
17 | use std::os::raw::c_int;
18 | use std::path::Path;
19 | use std::sync::Arc;
20 | use storage::{MemStore, NodeStore};
21 |
22 | // To enable flamegraph output
23 | // cargo bench --bench hashops -- --profile-time=N
24 | enum FlamegraphProfiler {
25 | Init(c_int),
26 | Active(ProfilerGuard<'static>),
27 | }
28 |
29 | fn file_error_panic(path: &Path) -> impl FnOnce(T) -> U {
30 | |_| panic!("Error on file `{}`", path.display())
31 | }
32 |
33 | impl Profiler for FlamegraphProfiler {
34 | #[expect(clippy::unwrap_used)]
35 | fn start_profiling(&mut self, _benchmark_id: &str, _benchmark_dir: &Path) {
36 | if let Self::Init(frequency) = self {
37 | let guard = ProfilerGuard::new(*frequency).unwrap();
38 | *self = Self::Active(guard);
39 | }
40 | }
41 |
42 | #[expect(clippy::unwrap_used)]
43 | fn stop_profiling(&mut self, _benchmark_id: &str, benchmark_dir: &Path) {
44 | std::fs::create_dir_all(benchmark_dir).unwrap();
45 | let filename = "firewood-flamegraph.svg";
46 | let flamegraph_path = benchmark_dir.join(filename);
47 | let flamegraph_file =
48 | File::create(&flamegraph_path).unwrap_or_else(file_error_panic(&flamegraph_path));
49 |
50 | #[expect(clippy::unwrap_used)]
51 | if let Self::Active(profiler) = self {
52 | profiler
53 | .report()
54 | .build()
55 | .unwrap()
56 | .flamegraph(flamegraph_file)
57 | .unwrap_or_else(file_error_panic(&flamegraph_path));
58 | }
59 | }
60 | }
61 |
62 | // This benchmark peeks into the merkle layer and times how long it takes
63 | // to insert NKEYS with a key length of KEYSIZE
64 | fn bench_merkle(criterion: &mut Criterion) {
65 | let mut rng = StdRng::seed_from_u64(1234);
66 |
67 | criterion
68 | .benchmark_group("Merkle")
69 | .sample_size(30)
70 | .bench_function("insert", |b| {
71 | b.iter_batched(
72 | || {
73 | let store = Arc::new(MemStore::new(vec![]));
74 | let nodestore = NodeStore::new_empty_proposal(store);
75 | let merkle = Merkle::from(nodestore);
76 |
77 | let keys: Vec> = repeat_with(|| {
78 | (&mut rng)
79 | .sample_iter(&Alphanumeric)
80 | .take(KEYSIZE)
81 | .collect()
82 | })
83 | .take(NKEYS)
84 | .collect();
85 |
86 | (merkle, keys)
87 | },
88 | #[expect(clippy::unwrap_used)]
89 | |(mut merkle, keys)| {
90 | keys.into_iter()
91 | .for_each(|key| merkle.insert(&key, Box::new(*b"v")).unwrap());
92 | let _frozen = merkle.hash();
93 | },
94 | BatchSize::SmallInput,
95 | );
96 | });
97 | }
98 |
99 | #[expect(clippy::unwrap_used)]
100 | fn bench_db(criterion: &mut Criterion) {
101 | const KEY_LEN: usize = 4;
102 | let mut rng = StdRng::seed_from_u64(1234);
103 |
104 | criterion
105 | .benchmark_group("Db")
106 | .sample_size(30)
107 | .bench_function("commit", |b| {
108 | b.to_async(tokio::runtime::Runtime::new().unwrap())
109 | .iter_batched(
110 | || {
111 | let batch_ops: Vec<_> = repeat_with(|| {
112 | (&mut rng)
113 | .sample_iter(&Alphanumeric)
114 | .take(KEY_LEN)
115 | .collect()
116 | })
117 | .map(|key: Vec<_>| BatchOp::Put {
118 | key,
119 | value: vec![b'v'],
120 | })
121 | .take(N)
122 | .collect();
123 | batch_ops
124 | },
125 | |batch_ops| async {
126 | let db_path = std::env::temp_dir();
127 | let db_path = db_path.join("benchmark_db");
128 | let cfg = DbConfig::builder();
129 |
130 | let db = firewood::db::Db::new(db_path, cfg.clone().truncate(true).build())
131 | .await
132 | .unwrap();
133 |
134 | db.propose(batch_ops).await.unwrap().commit().await.unwrap()
135 | },
136 | BatchSize::SmallInput,
137 | );
138 | });
139 | }
140 |
141 | criterion_group! {
142 | name = benches;
143 | config = Criterion::default().with_profiler(FlamegraphProfiler::Init(100));
144 | targets = bench_merkle::<3, 4>, bench_merkle<3, 32>, bench_db::<100>
145 | }
146 |
147 | criterion_main!(benches);
148 |
--------------------------------------------------------------------------------
/firewood/examples/insert.rs:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved.
2 | // See the file LICENSE.md for licensing terms.
3 |
4 | // This example isn't an actual benchmark, it's just an example of how to
5 | // insert some random keys using the front-end API.
6 |
7 | use clap::Parser;
8 | use std::borrow::BorrowMut as _;
9 | use std::collections::HashMap;
10 | use std::error::Error;
11 | use std::num::NonZeroUsize;
12 | use std::ops::RangeInclusive;
13 | use std::time::Instant;
14 |
15 | use firewood::db::{Batch, BatchOp, Db, DbConfig};
16 | use firewood::manager::RevisionManagerConfig;
17 | use firewood::v2::api::{Db as _, DbView, Proposal as _};
18 | use rand::{Rng, SeedableRng as _};
19 | use rand_distr::Alphanumeric;
20 |
21 | #[derive(Parser, Debug)]
22 | struct Args {
23 | #[arg(short, long, default_value = "1-64", value_parser = string_to_range)]
24 | keylen: RangeInclusive,
25 | #[arg(short, long, default_value = "32", value_parser = string_to_range)]
26 | valuelen: RangeInclusive,
27 | #[arg(short, long, default_value_t = 1)]
28 | batch_size: usize,
29 | #[arg(short, long, default_value_t = 100)]
30 | number_of_batches: usize,
31 | #[arg(short = 'p', long, default_value_t = 0, value_parser = clap::value_parser!(u16).range(0..=100))]
32 | read_verify_percent: u16,
33 | #[arg(short, long)]
34 | seed: Option,
35 | #[arg(short, long, default_value_t = NonZeroUsize::new(20480).expect("is non-zero"))]
36 | cache_size: NonZeroUsize,
37 | #[arg(short, long, default_value_t = true)]
38 | truncate: bool,
39 | #[arg(short, long, default_value_t = 128)]
40 | revisions: usize,
41 | }
42 |
43 | fn string_to_range(input: &str) -> Result, Box> {
44 | //::Err> {
45 | let parts: Vec<&str> = input.split('-').collect();
46 | #[expect(clippy::indexing_slicing)]
47 | match parts.len() {
48 | 1 => Ok(input.parse()?..=input.parse()?),
49 | 2 => Ok(parts[0].parse()?..=parts[1].parse()?),
50 | _ => Err("Too many dashes in input string".into()),
51 | }
52 | }
53 |
54 | /// cargo run --release --example insert
55 | #[tokio::main(flavor = "multi_thread")]
56 | async fn main() -> Result<(), Box> {
57 | let args = Args::parse();
58 |
59 | let mgrcfg = RevisionManagerConfig::builder()
60 | .node_cache_size(args.cache_size)
61 | .max_revisions(args.revisions)
62 | .build();
63 | let cfg = DbConfig::builder()
64 | .truncate(args.truncate)
65 | .manager(mgrcfg)
66 | .build();
67 |
68 | let db = Db::new("rev_db", cfg)
69 | .await
70 | .expect("db initiation should succeed");
71 |
72 | let keys = args.batch_size;
73 | let start = Instant::now();
74 |
75 | let mut rng = if let Some(seed) = args.seed {
76 | rand::rngs::StdRng::seed_from_u64(seed)
77 | } else {
78 | rand::rngs::StdRng::from_os_rng()
79 | };
80 |
81 | for _ in 0..args.number_of_batches {
82 | let keylen = rng.random_range(args.keylen.clone());
83 | let valuelen = rng.random_range(args.valuelen.clone());
84 | let batch: Batch, Vec> = (0..keys)
85 | .map(|_| {
86 | (
87 | rng.borrow_mut()
88 | .sample_iter(&Alphanumeric)
89 | .take(keylen)
90 | .collect::>(),
91 | rng.borrow_mut()
92 | .sample_iter(&Alphanumeric)
93 | .take(valuelen)
94 | .collect::>(),
95 | )
96 | })
97 | .map(|(key, value)| BatchOp::Put { key, value })
98 | .collect();
99 |
100 | let verify = get_keys_to_verify(&batch, args.read_verify_percent);
101 |
102 | #[expect(clippy::unwrap_used)]
103 | let proposal = db.propose(batch).await.unwrap();
104 | proposal.commit().await?;
105 | verify_keys(&db, verify).await?;
106 | }
107 |
108 | let duration = start.elapsed();
109 | println!(
110 | "Generated and inserted {} batches of size {keys} in {duration:?}",
111 | args.number_of_batches
112 | );
113 |
114 | Ok(())
115 | }
116 |
117 | fn get_keys_to_verify(batch: &Batch, Vec>, pct: u16) -> HashMap, Box<[u8]>> {
118 | if pct == 0 {
119 | HashMap::new()
120 | } else {
121 | batch
122 | .iter()
123 | .filter(|_last_key| rand::rng().random_range(0..=(100 - pct)) == 0)
124 | .map(|op| {
125 | if let BatchOp::Put { key, value } = op {
126 | (key.clone(), value.clone().into_boxed_slice())
127 | } else {
128 | unreachable!()
129 | }
130 | })
131 | .collect()
132 | }
133 | }
134 |
135 | async fn verify_keys(
136 | db: &impl firewood::v2::api::Db,
137 | verify: HashMap, Box<[u8]>>,
138 | ) -> Result<(), firewood::v2::api::Error> {
139 | if !verify.is_empty() {
140 | let hash = db.root_hash().await?.expect("root hash should exist");
141 | let revision = db.revision(hash).await?;
142 | for (key, value) in verify {
143 | assert_eq!(Some(value), revision.val(key).await?);
144 | }
145 | }
146 | Ok(())
147 | }
148 |
--------------------------------------------------------------------------------
/firewood/src/range_proof.rs:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2024, Ava Labs, Inc. All rights reserved.
2 | // See the file LICENSE.md for licensing terms.
3 |
4 | use storage::Hashable;
5 |
6 | use crate::proof::Proof;
7 |
8 | /// A range proof proves that a given set of key-value pairs
9 | /// are in the trie with a given root hash.
10 | #[derive(Debug)]
11 | pub struct RangeProof, V: AsRef<[u8]>, H: Hashable> {
12 | #[expect(dead_code)]
13 | pub(crate) start_proof: Option>,
14 | #[expect(dead_code)]
15 | pub(crate) end_proof: Option>,
16 | #[expect(dead_code)]
17 | pub(crate) key_values: Box<[(K, V)]>,
18 | }
19 |
--------------------------------------------------------------------------------
/firewood/src/v2/emptydb.rs:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved.
2 | // See the file LICENSE.md for licensing terms.
3 |
4 | use crate::proof::{Proof, ProofNode};
5 | use crate::range_proof::RangeProof;
6 |
7 | use super::api::{Batch, Db, DbView, Error, HashKey, KeyType, ValueType};
8 | use super::propose::{Proposal, ProposalBase};
9 | use async_trait::async_trait;
10 | use futures::Stream;
11 | use std::sync::Arc;
12 |
13 | /// An EmptyDb is a simple implementation of api::Db
14 | /// that doesn't store any data. It contains a single
15 | /// HistoricalImpl that has no keys or values
16 | #[derive(Debug)]
17 | pub struct EmptyDb;
18 |
19 | /// HistoricalImpl is always empty, and there is only one,
20 | /// since nothing can be committed to an EmptyDb.
21 | #[derive(Debug)]
22 | pub struct HistoricalImpl;
23 |
24 | #[async_trait]
25 | impl Db for EmptyDb {
26 | type Historical = HistoricalImpl;
27 |
28 | type Proposal<'p> = Proposal;
29 |
30 | async fn revision(&self, hash_key: HashKey) -> Result, Error> {
31 | Err(Error::HashNotFound { provided: hash_key })
32 | }
33 |
34 | async fn root_hash(&self) -> Result