├── CLAUDE.md ├── .markdownlint.json ├── ffi ├── .gitignore ├── src │ ├── proofs.rs │ ├── value │ │ ├── hash_key.rs │ │ ├── display_hex.rs │ │ └── kvp.rs │ ├── iterator.rs │ ├── revision.rs │ ├── value.rs │ ├── arc_cache.rs │ └── logging.rs ├── build.rs ├── go.mod ├── Cargo.toml ├── maybe.go ├── test-build-equivalency.sh ├── keepalive.go ├── tests │ ├── firewood │ │ └── go.mod │ └── eth │ │ └── go.mod ├── metrics_test.go ├── metrics.go ├── go.sum ├── cbindgen.toml ├── flake.lock └── generate_cgo.go ├── .github ├── .gitignore ├── pull_request_template.md ├── license-header.txt ├── workflows │ ├── release.yaml │ ├── ffi-nix.yaml │ ├── default-branch-cache.yaml │ ├── expected-golangci-yaml-diff.yaml │ ├── cache-cleanup.yaml │ ├── pr-title.yaml │ ├── gh-pages.yaml │ ├── publish.yaml │ ├── label-pull-requests.yaml │ └── metrics-check.yaml ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── dependabot.yml ├── check-license-headers.yaml ├── .golangci.yaml.patch └── scripts │ └── verify_golangci_yaml_changes.sh ├── .gitattributes ├── .cargo └── config.toml ├── .vscode └── extensions.json ├── firewood-macros ├── tests │ ├── compile_fail │ │ ├── invalid_args.stderr │ │ ├── no_args.rs │ │ ├── invalid_args.rs │ │ ├── non_result_return.stderr │ │ ├── non_result_return.rs │ │ └── no_args.stderr │ └── compile_pass │ │ ├── basic_usage.rs │ │ └── with_attributes.rs ├── Cargo.toml └── README.md ├── firewood ├── src │ ├── db │ │ └── tests.rs │ ├── v2 │ │ └── mod.rs │ ├── merkle │ │ └── tests │ │ │ └── triehash.rs │ └── proofs │ │ └── mod.rs ├── Cargo.toml └── examples │ └── insert.rs ├── triehash ├── README.md ├── Cargo.toml ├── CHANGELOG.md └── benches │ └── triehash.rs ├── CODEOWNERS ├── storage ├── src │ ├── hashers │ │ ├── mod.rs │ │ └── merkledb.rs │ ├── node │ │ └── leaf.rs │ ├── hashtype.rs │ ├── macros_test.rs │ ├── logger.rs │ ├── hashedshunt.rs │ ├── iter.rs │ ├── macros.rs │ ├── linear │ │ └── memory.rs │ ├── path │ │ └── joined.rs │ └── hashtype │ │ └── trie_hash.rs └── Cargo.toml ├── .config └── nextest.toml ├── benchmark ├── setup-scripts │ ├── build-firewood.sh │ ├── README.md │ ├── run-benchmarks.sh │ ├── install-golang.sh │ ├── build-environment.sh │ └── install-grafana.sh ├── src │ ├── create.rs │ ├── single.rs │ ├── tenkrandom.rs │ └── zipf.rs ├── Cargo.toml └── bootstrap │ └── README.md ├── fwdctl ├── src │ ├── graph.rs │ ├── root.rs │ ├── delete.rs │ ├── insert.rs │ ├── get.rs │ ├── create.rs │ └── main.rs ├── build.rs ├── README.md └── Cargo.toml ├── scripts └── run-just.sh ├── clippy.toml ├── .gitignore ├── Cargo.toml ├── LICENSE.md ├── README.docker.md └── cliff.toml /CLAUDE.md: -------------------------------------------------------------------------------- 1 | AGENTS.md -------------------------------------------------------------------------------- /.markdownlint.json: -------------------------------------------------------------------------------- 1 | { 2 | "line-length": false, 3 | } 4 | -------------------------------------------------------------------------------- /ffi/.gitignore: -------------------------------------------------------------------------------- 1 | dbtest 2 | _obj 3 | 4 | # Nix output 5 | result 6 | -------------------------------------------------------------------------------- /.github/.gitignore: -------------------------------------------------------------------------------- 1 | .golangci.yaml 2 | .golangci.yaml.orig 3 | .golangci.yaml.rej 4 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Hint that ffi/firewood.h is a generated file for GitHub Linguist 2 | ffi/firewood.h linguist-generated=true 3 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ## Why this should be merged 2 | 3 | ## How this works 4 | 5 | ## How this was tested 6 | -------------------------------------------------------------------------------- /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [alias] 2 | build-static-ffi = "build --frozen --profile maxperf --package firewood-ffi --features ethhash,logger" 3 | -------------------------------------------------------------------------------- /.github/license-header.txt: -------------------------------------------------------------------------------- 1 | // Copyright (C) %year%, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": [ 3 | "davidanson.vscode-markdownlint", 4 | "rust-lang.rust-analyzer", 5 | "vadimcn.vscode-lldb" 6 | ] 7 | } 8 | -------------------------------------------------------------------------------- /ffi/src/proofs.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2025, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | mod change; 5 | mod range; 6 | 7 | pub use self::change::*; 8 | pub use self::range::*; 9 | -------------------------------------------------------------------------------- /firewood-macros/tests/compile_fail/invalid_args.stderr: -------------------------------------------------------------------------------- 1 | error: Expected string literal(s) for metric name and optional description 2 | --> tests/compile_fail/invalid_args.rs:4:11 3 | | 4 | 4 | #[metrics(123)] 5 | | ^^^ 6 | -------------------------------------------------------------------------------- /firewood/src/db/tests.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | #![expect(clippy::unwrap_used)] 5 | 6 | mod merge; 7 | 8 | use super::test::TestDb; 9 | -------------------------------------------------------------------------------- /triehash/README.md: -------------------------------------------------------------------------------- 1 | # triehash 2 | 3 | This crate provides utility functions to validate and initialize tries using flexible input. 4 | It is used extensively in `parity-ethereum` to validate blocks (mostly transactions and receipt roots). 5 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # CODEOWNERS 2 | # Note: more specific rules overrule wildcard 3 | * @rkuris @demosdemon @RodrigoVillar 4 | /ffi @rkuris @demosdemon @alarso16 @RodrigoVillar 5 | /.github @rkuris @demosdemon @aaronbuchwald @RodrigoVillar 6 | -------------------------------------------------------------------------------- /firewood/src/v2/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | /// The public API 5 | pub mod api; 6 | 7 | /// A batch operation and associated types 8 | mod batch_op; 9 | -------------------------------------------------------------------------------- /storage/src/hashers/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | #[cfg(feature = "ethhash")] 5 | mod ethhash; 6 | #[cfg(not(feature = "ethhash"))] 7 | mod merkledb; 8 | -------------------------------------------------------------------------------- /firewood-macros/tests/compile_fail/no_args.rs: -------------------------------------------------------------------------------- 1 | // Test that metrics macro fails when no arguments are provided 2 | use firewood_macros::metrics; 3 | 4 | #[metrics()] 5 | fn function_without_args() -> Result<(), &'static str> { 6 | Ok(()) 7 | } 8 | 9 | fn main() {} 10 | -------------------------------------------------------------------------------- /firewood-macros/tests/compile_fail/invalid_args.rs: -------------------------------------------------------------------------------- 1 | // Test that metrics macro fails with invalid arguments 2 | use firewood_macros::metrics; 3 | 4 | #[metrics(123)] 5 | fn function_with_invalid_arg() -> Result<(), &'static str> { 6 | Ok(()) 7 | } 8 | 9 | fn main() {} 10 | -------------------------------------------------------------------------------- /firewood-macros/tests/compile_fail/non_result_return.stderr: -------------------------------------------------------------------------------- 1 | error: Function must return a Result to use #[metrics] attribute 2 | --> tests/compile_fail/non_result_return.rs:5:33 3 | | 4 | 5 | fn function_without_result() -> i32 { 5 | | ^^^ 6 | -------------------------------------------------------------------------------- /firewood-macros/tests/compile_fail/non_result_return.rs: -------------------------------------------------------------------------------- 1 | // Test that metrics macro fails when function doesn't return Result 2 | use firewood_macros::metrics; 3 | 4 | #[metrics("test.invalid")] 5 | fn function_without_result() -> i32 { 6 | 42 7 | } 8 | 9 | fn main() {} 10 | -------------------------------------------------------------------------------- /.config/nextest.toml: -------------------------------------------------------------------------------- 1 | [profile.default] 2 | # Skip tests prefixed with test_slow in default profile 3 | default-filter = "not test(test_slow)" 4 | 5 | [profile.ci] 6 | # Do not cancel the test run on the first failure. 7 | fail-fast = false 8 | # Override default filter to run all tests including slow ones 9 | default-filter = "all()" -------------------------------------------------------------------------------- /firewood-macros/tests/compile_fail/no_args.stderr: -------------------------------------------------------------------------------- 1 | error: Expected string literal for metric prefix, e.g., #[metrics("my.operation")] or #[metrics("my.operation", "description")] 2 | --> tests/compile_fail/no_args.rs:5:1 3 | | 4 | 5 | / fn function_without_args() -> Result<(), &'static str> { 5 | 6 | | Ok(()) 6 | 7 | | } 7 | | |_^ 8 | -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: release 2 | 3 | on: 4 | push: 5 | tags: 6 | - "v*.*.*" 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Checkout 13 | uses: actions/checkout@v4 14 | - name: Release 15 | uses: softprops/action-gh-release@v1 16 | with: 17 | draft: true 18 | generate_release_notes: true 19 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 16 | **Expected behavior** 17 | A clear and concise description of what you expected to happen. 18 | 19 | **Possible Solutions** 20 | If known, give some ideas of what may be done to fix the problem. 21 | 22 | **Additional context** 23 | Add any other context about the problem here. 24 | -------------------------------------------------------------------------------- /benchmark/setup-scripts/build-firewood.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o errexit 3 | 4 | if [ "$EUID" -eq 0 ]; then 5 | echo "This script should be run as a non-root user" 6 | exit 1 7 | fi 8 | 9 | # install rust 10 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y 11 | . "$HOME/.cargo/env" 12 | 13 | # clone the firewood repository 14 | if [ ! -d "$HOME/firewood" ]; then 15 | mkdir -p "$HOME/firewood" 16 | fi 17 | pushd "$HOME/firewood" 18 | 19 | git clone https://github.com/ava-labs/firewood.git . 20 | 21 | # build the firewood binary 22 | cargo build --profile maxperf 23 | popd 24 | 25 | -------------------------------------------------------------------------------- /.github/workflows/ffi-nix.yaml: -------------------------------------------------------------------------------- 1 | name: ffi-nix 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | 7 | jobs: 8 | ffi-nix: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v4 12 | - uses: DeterminateSystems/nix-installer-action@786fff0690178f1234e4e1fe9b536e94f5433196 #v20 13 | - uses: DeterminateSystems/magic-nix-cache-action@565684385bcd71bad329742eefe8d12f2e765b39 #v13 14 | - name: Check that FFI flake is up-to-date 15 | run: ./scripts/run-just.sh check-ffi-flake 16 | - name: Test nix build of Golang FFI bindings 17 | run: ./scripts/run-just.sh test-ffi-nix 18 | 19 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /firewood-macros/tests/compile_pass/basic_usage.rs: -------------------------------------------------------------------------------- 1 | // Test that basic metrics macro usage compiles correctly 2 | use firewood_macros::metrics; 3 | 4 | #[metrics("test.basic")] 5 | fn test_basic_function() -> Result<(), &'static str> { 6 | Ok(()) 7 | } 8 | 9 | #[metrics("test.with_description", "test operation")] 10 | fn test_function_with_description() -> Result> { 11 | Ok("success".to_string()) 12 | } 13 | 14 | #[metrics("test.complex")] 15 | async fn test_async_function() -> Result, std::io::Error> { 16 | Ok(vec![1, 2, 3]) 17 | } 18 | 19 | fn main() { 20 | // These functions should compile but we don't need to call them 21 | // since this is just a compilation test 22 | } 23 | -------------------------------------------------------------------------------- /fwdctl/src/graph.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use clap::Args; 5 | use firewood::db::{Db, DbConfig}; 6 | use firewood::v2::api; 7 | use std::io::stdout; 8 | 9 | use crate::DatabasePath; 10 | 11 | #[derive(Debug, Args)] 12 | pub struct Options { 13 | #[command(flatten)] 14 | pub database: DatabasePath, 15 | } 16 | 17 | pub(super) fn run(opts: &Options) -> Result<(), api::Error> { 18 | log::debug!("dump database {opts:?}"); 19 | let cfg = DbConfig::builder().create_if_missing(false).truncate(false); 20 | 21 | let db = Db::new(opts.database.dbpath.clone(), cfg.build())?; 22 | db.dump(&mut stdout())?; 23 | Ok(()) 24 | } 25 | -------------------------------------------------------------------------------- /fwdctl/src/root.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use clap::Args; 5 | 6 | use firewood::db::{Db, DbConfig}; 7 | use firewood::v2::api::{self, Db as _}; 8 | 9 | use crate::DatabasePath; 10 | 11 | #[derive(Debug, Args)] 12 | pub struct Options { 13 | #[command(flatten)] 14 | pub database_dir: DatabasePath, 15 | } 16 | 17 | pub(super) fn run(opts: &Options) -> Result<(), api::Error> { 18 | let cfg = DbConfig::builder().create_if_missing(false).truncate(false); 19 | 20 | let db = Db::new(opts.database_dir.dbpath.clone(), cfg.build())?; 21 | 22 | let hash = db.root_hash()?; 23 | 24 | println!("{hash:?}"); 25 | Ok(()) 26 | } 27 | -------------------------------------------------------------------------------- /.github/workflows/default-branch-cache.yaml: -------------------------------------------------------------------------------- 1 | # because apparently caches are isolated across branches 2 | name: default-branch-cache 3 | 4 | on: 5 | workflow_dispatch: 6 | push: 7 | branches: 8 | - main 9 | 10 | env: 11 | CARGO_TERM_COLOR: always 12 | 13 | jobs: 14 | build: 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - uses: actions/checkout@v4 19 | - uses: dtolnay/rust-toolchain@stable 20 | with: 21 | components: clippy,rustfmt 22 | - uses: Swatinem/rust-cache@v2 23 | with: 24 | save-if: "false" 25 | shared-key: "debug-no-features" 26 | - name: Check 27 | run: cargo check --workspace --all-targets 28 | - name: Build 29 | run: cargo build --workspace --all-targets 30 | -------------------------------------------------------------------------------- /firewood-macros/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "firewood-macros" 3 | version.workspace = true 4 | edition.workspace = true 5 | authors = [ 6 | "Ron Kuris ", 7 | ] 8 | description = "Proc macros for Firewood metrics" 9 | license-file.workspace = true 10 | homepage.workspace = true 11 | repository.workspace = true 12 | rust-version.workspace = true 13 | 14 | [lib] 15 | proc-macro = true 16 | 17 | [dependencies] 18 | # Regular dependencies 19 | proc-macro2 = "1.0" 20 | quote = "1.0" 21 | syn = { version = "2.0", features = ["full", "extra-traits"] } 22 | 23 | [dev-dependencies] 24 | # Workspace dependencies 25 | coarsetime.workspace = true 26 | metrics.workspace = true 27 | # Regular dependencies 28 | trybuild = "1.0" 29 | 30 | [lints] 31 | workspace = true 32 | -------------------------------------------------------------------------------- /storage/src/node/leaf.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use std::fmt::{Debug, Error as FmtError, Formatter}; 5 | 6 | use crate::Path; 7 | 8 | /// A leaf node 9 | #[derive(PartialEq, Eq, Clone)] 10 | pub struct LeafNode { 11 | /// The path of this leaf, but only the remaining nibbles 12 | pub partial_path: Path, 13 | 14 | /// The value associated with this leaf 15 | pub value: Box<[u8]>, 16 | } 17 | 18 | impl Debug for LeafNode { 19 | fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> { 20 | write!( 21 | f, 22 | "[Leaf {:?} {}]", 23 | self.partial_path, 24 | super::DisplayTruncatedHex(&self.value) 25 | ) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "cargo" 9 | directory: "/" 10 | schedule: 11 | interval: "daily" 12 | time: "05:00" 13 | timezone: "America/Los_Angeles" 14 | open-pull-requests-limit: 0 # Disable non-security version updates 15 | - package-ecosystem: github-actions 16 | directory: "/" 17 | schedule: 18 | interval: weekly 19 | open-pull-requests-limit: 0 # Disable non-security version updates 20 | -------------------------------------------------------------------------------- /scripts/run-just.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | if command -v just &> /dev/null; then 5 | exec just "$@" 6 | elif command -v nix &> /dev/null; then 7 | exec nix run nixpkgs#just -- "$@" 8 | else 9 | echo "Error: Neither 'just' nor 'nix' is installed." >&2 10 | echo "" >&2 11 | echo "Please install one of the following:" >&2 12 | echo "" >&2 13 | echo "Option 1 - Install just:" >&2 14 | echo " - Visit: https://github.com/casey/just#installation" >&2 15 | echo " - Or use cargo: cargo install just" >&2 16 | echo "" >&2 17 | echo "Option 2 - Install nix:" >&2 18 | echo " - Visit: https://github.com/DeterminateSystems/nix-installer" >&2 19 | echo " - Or run: curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install" >&2 20 | exit 1 21 | fi 22 | -------------------------------------------------------------------------------- /triehash/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "firewood-triehash" 3 | version = "0.0.16" 4 | authors = ["Parity Technologies ", "Ron Kuris "] 5 | description = "In-memory patricia trie operations" 6 | repository.workspace = true 7 | license = "MIT OR Apache-2.0" 8 | edition.workspace = true 9 | rust-version.workspace = true 10 | 11 | [dependencies] 12 | # Regular dependencies 13 | hash-db = "0.16.0" 14 | rlp = "0.6" 15 | 16 | [dev-dependencies] 17 | # Workspace dependencies 18 | criterion.workspace = true 19 | ethereum-types.workspace = true 20 | hex-literal.workspace = true 21 | # Regular dependencies 22 | keccak-hasher = "0.16.0" 23 | tiny-keccak = { version = "2.0", features = ["keccak"] } 24 | trie-standardmap = "0.16.0" 25 | 26 | [[bench]] 27 | name = "triehash" 28 | path = "benches/triehash.rs" 29 | harness = false 30 | 31 | [lints] 32 | workspace = true 33 | -------------------------------------------------------------------------------- /ffi/build.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2025, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use std::env; 5 | 6 | extern crate cbindgen; 7 | 8 | fn main() { 9 | let crate_dir = env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR is not set"); 10 | 11 | let config = cbindgen::Config::from_file("cbindgen.toml").expect("cbindgen.toml is present"); 12 | 13 | cbindgen::Builder::new() 14 | .with_crate(crate_dir) 15 | // Add any additional configuration options here 16 | .with_config(config) 17 | .generate() 18 | .map_or_else( 19 | |error| match error { 20 | cbindgen::Error::ParseSyntaxError { .. } => {} 21 | e => panic!("{e:?}"), 22 | }, 23 | |bindings| { 24 | bindings.write_to_file("firewood.h"); 25 | }, 26 | ); 27 | } 28 | -------------------------------------------------------------------------------- /firewood-macros/tests/compile_pass/with_attributes.rs: -------------------------------------------------------------------------------- 1 | // Test that metrics macro works with other function attributes 2 | use firewood_macros::metrics; 3 | 4 | #[derive(Debug)] 5 | struct TestError; 6 | 7 | impl std::fmt::Display for TestError { 8 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 9 | write!(f, "TestError") 10 | } 11 | } 12 | 13 | impl std::error::Error for TestError {} 14 | 15 | #[metrics("test.with_doc", "documented function")] 16 | /// This function has documentation 17 | pub fn documented_function() -> Result { 18 | Ok(42) 19 | } 20 | 21 | #[inline] 22 | #[metrics("test.inline")] 23 | fn inline_function() -> Result<(), TestError> { 24 | Ok(()) 25 | } 26 | 27 | #[allow(dead_code)] 28 | #[metrics("test.allowed", "function with allow attribute")] 29 | fn function_with_allow() -> Result { 30 | Ok(true) 31 | } 32 | 33 | fn main() {} 34 | -------------------------------------------------------------------------------- /.github/workflows/expected-golangci-yaml-diff.yaml: -------------------------------------------------------------------------------- 1 | name: expected golangci.yaml diff 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - .github/scripts/verify_golangci_yaml_changes.sh 7 | - .github/workflows/expected-golangci-yaml-diff.yaml 8 | - .github/.golangci.yaml.patch 9 | - ffi/.golangci.yaml 10 | # if updating the general CI workflow, go ahead and update the golangci.yaml 11 | - .github/workflows/ci.yaml 12 | # for good measure, if someone is editing the go.mod or go.sum files then 13 | # they should make sure the golangci.yaml is up to date 14 | - ffi/go.mod 15 | - ffi/go.sum 16 | push: 17 | branches: 18 | - "main" 19 | 20 | jobs: 21 | expected-golangci-yaml-diff: 22 | runs-on: ubuntu-latest 23 | steps: 24 | - uses: actions/checkout@v4 25 | - name: Validate expected golangci-lint changes 26 | run: .github/scripts/verify_golangci_yaml_changes.sh 27 | -------------------------------------------------------------------------------- /.github/workflows/cache-cleanup.yaml: -------------------------------------------------------------------------------- 1 | name: cleanup caches by a branch 2 | on: 3 | pull_request: 4 | types: 5 | - closed 6 | 7 | jobs: 8 | cleanup: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Check out code 12 | uses: actions/checkout@v4 13 | 14 | - name: Cleanup 15 | run: | 16 | gh extension install actions/gh-actions-cache 17 | 18 | REPO=${{ github.repository }} 19 | BRANCH="refs/pull/${{ github.event.pull_request.number }}/merge" 20 | 21 | echo "Fetching list of cache key" 22 | cacheKeysForPR=$(gh actions-cache list -R $REPO -B $BRANCH | cut -f 1 ) 23 | 24 | ## Setting this to not fail the workflow while deleting cache keys. 25 | set +e 26 | echo "Deleting caches..." 27 | for cacheKey in $cacheKeysForPR 28 | do 29 | gh actions-cache delete $cacheKey -R $REPO -B $BRANCH --confirm 30 | done 31 | echo "Done" 32 | env: 33 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 34 | 35 | -------------------------------------------------------------------------------- /fwdctl/src/delete.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use clap::Args; 5 | use firewood::db::{BatchOp, Db, DbConfig}; 6 | use firewood::v2::api::{self, Db as _, Proposal as _}; 7 | 8 | use crate::DatabasePath; 9 | 10 | #[derive(Debug, Args)] 11 | pub struct Options { 12 | #[command(flatten)] 13 | pub database: DatabasePath, 14 | 15 | /// The key to delete 16 | #[arg(required = true, value_name = "KEY", help = "Key to delete")] 17 | pub key: String, 18 | } 19 | 20 | pub(super) fn run(opts: &Options) -> Result<(), api::Error> { 21 | log::debug!("deleting key {opts:?}"); 22 | let cfg = DbConfig::builder().create_if_missing(false).truncate(false); 23 | 24 | let db = Db::new(opts.database.dbpath.clone(), cfg.build())?; 25 | 26 | let batch: Vec> = vec![BatchOp::Delete { 27 | key: opts.key.clone(), 28 | }]; 29 | let proposal = db.propose(batch)?; 30 | proposal.commit()?; 31 | 32 | println!("key {} deleted successfully", opts.key); 33 | Ok(()) 34 | } 35 | -------------------------------------------------------------------------------- /triehash/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | This changelog is deprecatated. Please see the changelog at the top 4 | level. 5 | 6 | The format is based on [Keep a Changelog]. 7 | 8 | [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ 9 | 10 | ## [Unreleased] 11 | 12 | ## [0.8.5] - 2025-03-26 13 | 14 | - Updated `hash-db` to 0.16.0 15 | - Updated `rlp` to 0.6 16 | - Updated `criterion` to 0.5.1 17 | - Updated `keccak-hasher` to 0.16.0 18 | - Updated `ethereum-types` to 0.15.1 19 | - Updated `trie-standardmap` to 0.16.0 20 | - Updated `hex-literal` to 1.0.0 21 | 22 | ## [0.8.4] - 2020-01-08 23 | 24 | - Updated `rlp` to 0.5. [#463](https://github.com/paritytech/parity-common/pull/463) 25 | 26 | ## [0.8.3] - 2020-03-16 27 | 28 | - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) 29 | 30 | ## [0.8.2] - 2019-12-15 31 | 32 | - Added no-std support. [#280](https://github.com/paritytech/parity-common/pull/280) 33 | 34 | ## [0.8.1] - 2019-10-24 35 | 36 | - Migrated to 2018 edition. [#214](https://github.com/paritytech/parity-common/pull/214) 37 | 38 | ### Dependencies 39 | 40 | - Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) 41 | -------------------------------------------------------------------------------- /ffi/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/ava-labs/firewood/ffi 2 | 3 | go 1.24 4 | 5 | // Changes to the toolchain version should be replicated in: 6 | // - ffi/go.mod (here) 7 | // - ffi/flake.nix (update golang.url to a version of avalanchego's nix/go/flake.nix that uses the desired version and run `just update-ffi-flake`) 8 | // - ffi/tests/eth/go.mod 9 | // - ffi/tests/firewood/go.mod 10 | // `just check-golang-version` validates that these versions are in sync and will run in CI as part of the ffi-nix job. 11 | toolchain go1.24.9 12 | 13 | require ( 14 | github.com/prometheus/client_golang v1.22.0 15 | github.com/prometheus/client_model v0.6.1 16 | github.com/prometheus/common v0.62.0 17 | github.com/stretchr/testify v1.10.0 18 | ) 19 | 20 | require ( 21 | github.com/beorn7/perks v1.0.1 // indirect 22 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 23 | github.com/davecgh/go-spew v1.1.1 // indirect 24 | github.com/kr/text v0.2.0 // indirect 25 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 26 | github.com/pmezard/go-difflib v1.0.0 // indirect 27 | github.com/prometheus/procfs v0.15.1 // indirect 28 | golang.org/x/sys v0.30.0 // indirect 29 | google.golang.org/protobuf v1.36.5 // indirect 30 | gopkg.in/yaml.v3 v3.0.1 // indirect 31 | ) 32 | -------------------------------------------------------------------------------- /fwdctl/src/insert.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use clap::Args; 5 | use firewood::db::{BatchOp, Db, DbConfig}; 6 | use firewood::v2::api::{self, Db as _, Proposal as _}; 7 | 8 | use crate::DatabasePath; 9 | 10 | #[derive(Debug, Args)] 11 | pub struct Options { 12 | #[command(flatten)] 13 | pub database: DatabasePath, 14 | 15 | /// The key to insert 16 | #[arg(required = true, value_name = "KEY", help = "Key to insert")] 17 | pub key: String, 18 | 19 | /// The value to insert 20 | #[arg(required = true, value_name = "VALUE", help = "Value to insert")] 21 | pub value: String, 22 | } 23 | 24 | pub(super) fn run(opts: &Options) -> Result<(), api::Error> { 25 | log::debug!("inserting key value pair {opts:?}"); 26 | let cfg = DbConfig::builder().create_if_missing(false).truncate(false); 27 | 28 | let db = Db::new(opts.database.dbpath.clone(), cfg.build())?; 29 | 30 | let batch: Vec, Vec>> = vec![BatchOp::Put { 31 | key: opts.key.clone().into(), 32 | value: opts.value.bytes().collect(), 33 | }]; 34 | let proposal = db.propose(batch)?; 35 | proposal.commit()?; 36 | 37 | println!("{}", opts.key); 38 | Ok(()) 39 | } 40 | -------------------------------------------------------------------------------- /fwdctl/build.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use std::process::Command; 5 | 6 | fn main() { 7 | // Get the git commit SHA 8 | let git_sha = match Command::new("git").args(["rev-parse", "HEAD"]).output() { 9 | Ok(output) => { 10 | if output.status.success() { 11 | String::from_utf8_lossy(&output.stdout).trim().to_string() 12 | } else { 13 | let error_msg = String::from_utf8_lossy(&output.stderr); 14 | format!("git error: {}", error_msg.trim()) 15 | } 16 | } 17 | Err(e) => { 18 | format!("git not found: {e}") 19 | } 20 | }; 21 | 22 | // Check if ethhash feature is enabled 23 | let ethhash_feature = if cfg!(feature = "ethhash") { 24 | "ethhash" 25 | } else { 26 | "-ethhash" 27 | }; 28 | 29 | // Make the git SHA and ethhash status available to the main.rs file 30 | println!("cargo:rustc-env=GIT_COMMIT_SHA={git_sha}"); 31 | println!("cargo:rustc-env=ETHHASH_FEATURE={ethhash_feature}"); 32 | 33 | // Re-run this build script if the git HEAD changes 34 | println!("cargo:rerun-if-changed=../.git/HEAD"); 35 | println!("cargo:rerun-if-changed=../.git/index"); 36 | } 37 | -------------------------------------------------------------------------------- /.github/workflows/pr-title.yaml: -------------------------------------------------------------------------------- 1 | # Check that the PR title matches the conventional commit format 2 | name: pr-title 3 | 4 | permissions: 5 | pull-requests: write 6 | 7 | on: 8 | pull_request: 9 | types: 10 | - edited 11 | - opened 12 | - reopened 13 | 14 | jobs: 15 | check-pr-title: 16 | runs-on: ubuntu-latest 17 | permissions: 18 | pull-requests: read 19 | steps: 20 | - name: Check PR title follows conventional commits 21 | uses: amannn/action-semantic-pull-request@v5 22 | with: 23 | types: | 24 | build 25 | chore 26 | ci 27 | docs 28 | feat 29 | fix 30 | perf 31 | refactor 32 | style 33 | test 34 | # scope is not required ("feat: whatever" is okay) 35 | requireScope: false 36 | # if the PR only has one commit, we can validate the commit message 37 | # instead of the PR title 38 | validateSingleCommit: true 39 | subjectPattern: ^.{1,}$ 40 | subjectPatternError: | 41 | The subject "{subject}" found in the pull request title "{title}" 42 | didn't match the configured pattern. Please ensure that the subject 43 | matches the conventional commit format. 44 | env: 45 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 46 | 47 | -------------------------------------------------------------------------------- /benchmark/setup-scripts/README.md: -------------------------------------------------------------------------------- 1 | # Setup Scripts 2 | 3 | This directory contains the scripts needed to set up the firewood benchmarks, as follows: 4 | 5 | ```bash 6 | sudo bash build-environment.sh 7 | ``` 8 | 9 | This script sets up the build environment, including installing the firewood build dependencies. 10 | 11 | By default, it sets the bytes-per-inode to 2097152 (2MB) when creating the ext4 filesystem. This default works well for workloads that create many small files (such as LevelDB with AvalancheGo). 12 | 13 | If you're not using LevelDB (for example, just using Firewood without AvalancheGo), you don't need as many inodes, which gives you more room for the database itself. In this case, you can and should use a larger value with the `--bytes-per-inode` option: 14 | 15 | ```bash 16 | sudo bash build-environment.sh --bytes-per-inode 6291456 17 | ``` 18 | 19 | ```bash 20 | sudo bash install-grafana.sh 21 | ``` 22 | 23 | This script sets up grafana to listen on port 3000 for firewood. It also sets up listening 24 | for coreth as well, on port 6060, with the special metrics path coreth expects. 25 | 26 | ```bash 27 | bash build-firewood.sh 28 | ``` 29 | 30 | This script checks out and builds firewood. It assumes you have already set up the build environment earlier. 31 | 32 | The final script, `run-benchmarks.sh`, is a set of commands that can be copied/pasted to run individual 33 | benchmarks of different sizes. 34 | -------------------------------------------------------------------------------- /fwdctl/src/get.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use clap::Args; 5 | 6 | use firewood::db::{Db, DbConfig}; 7 | use firewood::v2::api::{self, Db as _, DbView as _}; 8 | 9 | use crate::DatabasePath; 10 | 11 | #[derive(Debug, Args)] 12 | pub struct Options { 13 | #[command(flatten)] 14 | pub database: DatabasePath, 15 | 16 | /// The key to get the value for 17 | #[arg(required = true, value_name = "KEY", help = "Key to get")] 18 | pub key: String, 19 | } 20 | 21 | pub(super) fn run(opts: &Options) -> Result<(), api::Error> { 22 | log::debug!("get key value pair {opts:?}"); 23 | let cfg = DbConfig::builder().create_if_missing(false).truncate(false); 24 | 25 | let db = Db::new(opts.database.dbpath.clone(), cfg.build())?; 26 | 27 | let hash = db.root_hash()?; 28 | 29 | let Some(hash) = hash else { 30 | println!("Database is empty"); 31 | return Ok(()); 32 | }; 33 | 34 | let rev = db.revision(hash)?; 35 | 36 | match rev.val(opts.key.as_bytes()) { 37 | Ok(Some(val)) => { 38 | let s = String::from_utf8_lossy(val.as_ref()); 39 | println!("{s:?}"); 40 | Ok(()) 41 | } 42 | Ok(None) => { 43 | eprintln!("Key '{}' not found", opts.key); 44 | Ok(()) 45 | } 46 | Err(e) => Err(e), 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /fwdctl/README.md: -------------------------------------------------------------------------------- 1 | # fwdctl 2 | 3 | `fwdctl` is a small CLI designed to make it easy to experiment with firewood locally. 4 | 5 | ## Building locally 6 | 7 | ```sh 8 | cargo build --release --bin fwdctl 9 | ``` 10 | 11 | To use 12 | 13 | ```sh 14 | ./target/release/fwdctl -h 15 | ``` 16 | 17 | ## Supported commands 18 | 19 | * `fwdctl create`: Create a new firewood database. 20 | * `fwdctl get`: Get the code associated with a key in the database. 21 | * `fwdctl insert`: Insert a key/value pair into the generic key/value store. 22 | * `fwdctl delete`: Delete a key/value pair from the database. 23 | * `fwdctl root`: Get the root hash of the key/value trie. 24 | * `fwdctl dump`: Dump the contents of the key/value store. 25 | 26 | ## Examples 27 | 28 | * fwdctl create 29 | 30 | ```sh 31 | # Check available options when creating a database, including the defaults. 32 | $ fwdctl create -h 33 | # Create a new, blank instance of firewood using the default directory name "firewood". 34 | $ fwdctl create firewood 35 | ``` 36 | 37 | * fwdctl get KEY 38 | 39 | ```sh 40 | # Get the value associated with a key in the database, if it exists. 41 | fwdctl get KEY 42 | ``` 43 | 44 | * fwdctl insert KEY VALUE 45 | 46 | ```sh 47 | # Insert a key/value pair into the database. 48 | fwdctl insert KEY VALUE 49 | ``` 50 | 51 | * fwdctl delete KEY 52 | 53 | ```sh 54 | # Delete a key from the database, along with the associated value. 55 | fwdctl delete KEY 56 | ``` 57 | -------------------------------------------------------------------------------- /storage/src/hashtype.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2025, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | #[cfg(feature = "ethhash")] 5 | mod ethhash; 6 | mod trie_hash; 7 | 8 | pub use trie_hash::{InvalidTrieHashLength, TrieHash}; 9 | 10 | /// The type of a hash. For ethereum compatible hashes, this might be a RLP encoded 11 | /// value if it's small enough to fit in less than 32 bytes. For merkledb compatible 12 | /// hashes, it's always a `TrieHash`. 13 | #[cfg(feature = "ethhash")] 14 | pub type HashType = ethhash::HashOrRlp; 15 | 16 | #[cfg(not(feature = "ethhash"))] 17 | /// The type of a hash. For non-ethereum compatible hashes, this is always a `TrieHash`. 18 | pub type HashType = crate::TrieHash; 19 | 20 | /// A trait to convert a value into a [`HashType`]. 21 | /// 22 | /// This is used to allow different hash types to be conditionally used, e.g., when the 23 | /// `ethhash` feature is enabled. When not enabled, this suppresses the clippy warnings 24 | /// about useless `.into()` calls. 25 | pub trait IntoHashType { 26 | /// Converts the value into a `HashType`. 27 | #[must_use] 28 | fn into_hash_type(self) -> HashType; 29 | } 30 | 31 | impl IntoHashType for crate::TrieHash { 32 | #[inline] 33 | fn into_hash_type(self) -> HashType { 34 | #[cfg(feature = "ethhash")] 35 | { 36 | self.into() 37 | } 38 | 39 | #[cfg(not(feature = "ethhash"))] 40 | { 41 | self 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /.github/check-license-headers.yaml: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "include": [ 4 | "**/**/*.rs", 5 | "**/**/*.go", 6 | "**/**/*.h", 7 | ], 8 | "exclude": [ 9 | "target/**", 10 | "*/LICENSE*", 11 | "LICENSE.md", 12 | "RELEASE.md", 13 | "grpc-testtool/**", 14 | "README*", 15 | "**/README*", 16 | "METRICS.md", 17 | "Cargo.toml", 18 | "Cargo.lock", 19 | "*/Cargo.toml", 20 | "docs/**", 21 | "CODEOWNERS", 22 | "CONTRIBUTING.md", 23 | "benchmark/**", 24 | "triehash/**", 25 | "CHANGELOG.md", 26 | "cliff.toml", 27 | "clippy.toml", 28 | "**/tests/compile_*/**", 29 | "**/go.mod", 30 | "**/go.mod", 31 | "**/cbindgen.toml", 32 | ], 33 | "license": "./.github/license-header.txt" 34 | }, 35 | { 36 | "include": [ 37 | "target/**", 38 | "*/LICENSE*", 39 | "LICENSE.md", 40 | "RELEASE.md", 41 | "grpc-testtool/**", 42 | "README*", 43 | "**/README*", 44 | "METRICS.md", 45 | "Cargo.toml", 46 | "Cargo.lock", 47 | "*/Cargo.toml", 48 | "docs/**", 49 | "benchmark/**", 50 | "ffi/**", 51 | "CODEOWNERS", 52 | "CONTRIBUTING.md", 53 | "triehash/**", 54 | "CHANGELOG.md", 55 | "AGENTS.md", 56 | "CLAUDE.md", 57 | "cliff.toml", 58 | "clippy.toml", 59 | "**/tests/compile_*/**", 60 | "justfile", 61 | "scripts/run-just.sh", 62 | ], 63 | } 64 | ] 65 | -------------------------------------------------------------------------------- /benchmark/src/create.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | #![expect( 5 | clippy::arithmetic_side_effects, 6 | reason = "Found 1 occurrences after enabling the lint." 7 | )] 8 | 9 | use std::error::Error; 10 | use std::time::Instant; 11 | 12 | use fastrace::prelude::SpanContext; 13 | use fastrace::{Span, func_path}; 14 | use firewood::db::Db; 15 | use firewood::v2::api::{Db as _, Proposal as _}; 16 | use log::info; 17 | 18 | use pretty_duration::pretty_duration; 19 | 20 | use crate::{Args, TestRunner}; 21 | 22 | #[derive(Clone)] 23 | pub struct Create; 24 | 25 | impl TestRunner for Create { 26 | fn run(&self, db: &Db, args: &Args) -> Result<(), Box> { 27 | let keys = args.global_opts.batch_size; 28 | let start = Instant::now(); 29 | 30 | for key in 0..args.global_opts.number_of_batches { 31 | let root = Span::root(func_path!(), SpanContext::random()); 32 | let _guard = root.set_local_parent(); 33 | 34 | let batch = Self::generate_inserts(key * keys, args.global_opts.batch_size); 35 | 36 | let proposal = db.propose(batch).expect("proposal should succeed"); 37 | proposal.commit()?; 38 | } 39 | let duration = start.elapsed(); 40 | info!( 41 | "Generated and inserted {} batches of size {keys} in {}", 42 | args.global_opts.number_of_batches, 43 | pretty_duration(&duration, None) 44 | ); 45 | 46 | Ok(()) 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /ffi/src/value/hash_key.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2025, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use std::fmt; 5 | 6 | /// A database hash key, used in FFI functions that require hashes. 7 | /// This type requires no allocation and can be copied freely and 8 | /// dropped without any additional overhead. 9 | /// 10 | /// This is useful because it is the same size as 4 words which is equivalent 11 | /// to 2 heap-allocated slices (pointer + length each), or 1.5 vectors (which 12 | /// uses an extra word for allocation capacity) and it can be passed around 13 | /// without needing to allocate or deallocate memory. 14 | #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] 15 | // Must use `repr(C)` instead of `repr(transparent)` to ensure it is a struct 16 | // with one field instead of a type alias of an array of 32 element, which is 17 | // necessary for FFI compatibility so that `HashKey` can be passed by value; 18 | // otherwise, it would look like a pointer to an array of 32 bytes. 19 | #[repr(C)] 20 | pub struct HashKey([u8; 32]); 21 | 22 | impl fmt::Display for HashKey { 23 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 24 | super::DisplayHex(&self.0).fmt(f) 25 | } 26 | } 27 | 28 | impl From for HashKey { 29 | fn from(value: firewood::v2::api::HashKey) -> Self { 30 | Self(value.into()) 31 | } 32 | } 33 | 34 | impl From for firewood::v2::api::HashKey { 35 | fn from(value: HashKey) -> Self { 36 | value.0.into() 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /ffi/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "firewood-ffi" 3 | version.workspace = true 4 | edition.workspace = true 5 | authors = [ 6 | "Aaron Buchwald ", 7 | "Arran Schlosberg <519948+ARR4N@users.noreply.github.com>", 8 | "Austin Larson <78000745+alarso16@users.noreply.github.com>", 9 | "Darioush Jalali ", 10 | "Ron Kuris ", 11 | ] 12 | description = "C FFI bindings for Firewood, an embedded key-value store optimized for blockchain state." 13 | license-file.workspace = true 14 | homepage.workspace = true 15 | repository.workspace = true 16 | readme.workspace = true 17 | rust-version.workspace = true 18 | 19 | [lib] 20 | crate-type = ["staticlib"] 21 | 22 | [dependencies] 23 | # Workspace dependencies 24 | coarsetime.workspace = true 25 | firewood.workspace = true 26 | firewood-storage.workspace = true 27 | metrics.workspace = true 28 | metrics-util.workspace = true 29 | parking_lot.workspace = true 30 | # Regular dependencies 31 | chrono = "0.4.42" 32 | oxhttp = "0.3.1" 33 | tikv-jemallocator = "0.6.1" 34 | # Optional dependencies 35 | env_logger = { workspace = true, optional = true } 36 | derive-where = "1.6.0" 37 | 38 | [dev-dependencies] 39 | # Workspace dependencies 40 | test-case.workspace = true 41 | 42 | [features] 43 | logger = ["dep:env_logger", "firewood/logger"] 44 | ethhash = ["firewood/ethhash"] 45 | io-uring = ["firewood/io-uring", "firewood-storage/io-uring"] 46 | 47 | [build-dependencies] 48 | cbindgen = "0.29.2" 49 | 50 | [lints] 51 | workspace = true 52 | 53 | [package.metadata.cargo-machete] 54 | ignored = ["cbindgen"] 55 | -------------------------------------------------------------------------------- /fwdctl/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "firewood-fwdctl" 3 | version.workspace = true 4 | edition.workspace = true 5 | authors = [ 6 | "Dan Laine ", 7 | "Dan Sover ", 8 | "Hao Hao ", 9 | "Richard Pringle ", 10 | "Ron Kuris ", 11 | "Sam Batschelet ", 12 | "xinifinity <113067541+xinifinity@users.noreply.github.com>", 13 | "zdf ", 14 | ] 15 | description = "Command-line tool for Firewood, an embedded key-value store optimized for blockchain state." 16 | license-file.workspace = true 17 | homepage.workspace = true 18 | repository.workspace = true 19 | rust-version.workspace = true 20 | 21 | [[bin]] 22 | name = "fwdctl" 23 | path = "src/main.rs" 24 | 25 | [dependencies] 26 | # Workspace dependencies 27 | clap = { workspace = true, features = ["cargo", "string"] } 28 | env_logger.workspace = true 29 | firewood.workspace = true 30 | firewood-storage.workspace = true 31 | hex.workspace = true 32 | log.workspace = true 33 | nonzero_ext.workspace = true 34 | # Regular dependencies 35 | csv = "1.4.0" 36 | indicatif = "0.18.3" 37 | askama = "0.14.0" 38 | num-format = "0.4.4" 39 | 40 | [features] 41 | ethhash = ["firewood/ethhash"] 42 | logger = ["firewood/logger"] 43 | 44 | [dev-dependencies] 45 | # Workspace dependencies 46 | firewood-storage = { workspace = true, features = ["test_utils"] } 47 | rand.workspace = true 48 | tempfile.workspace = true 49 | # Regular dependencies 50 | assert_cmd = "2.1.1" 51 | predicates = "3.1.3" 52 | 53 | [lints] 54 | workspace = true 55 | -------------------------------------------------------------------------------- /benchmark/setup-scripts/run-benchmarks.sh: -------------------------------------------------------------------------------- 1 | DB="$HOME/firewood/benchmark-db" 2 | # 10M rows: 3 | nohup time cargo run --profile maxperf --bin benchmark -- -n 1000 -d "$DB"-10M create & 4 | wait 5 | nohup time cargo run --profile maxperf --bin benchmark -- -n 1000 -d "$DB"-10M zipf & 6 | wait 7 | nohup time cargo run --profile maxperf --bin benchmark -- -n 1000 -d "$DB"-10M single & 8 | wait 9 | 10 | # 50M rows: 11 | nohup time cargo run --profile maxperf --bin benchmark -- -n 5000 -d "$DB"-50M create & 12 | wait 13 | nohup time cargo run --profile maxperf --bin benchmark -- -n 5000 -d "$DB"-50M zipf & 14 | wait 15 | nohup time cargo run --profile maxperf --bin benchmark -- -n 5000 -d "$DB"-50M single & 16 | wait 17 | 18 | # 100M rows: 19 | nohup time cargo run --profile maxperf --bin benchmark -- -n 10000 -d "$DB"-100M create & 20 | wait 21 | nohup time cargo run --profile maxperf --bin benchmark -- -n 10000 -d "$DB"-100M zipf & 22 | wait 23 | nohup time cargo run --profile maxperf --bin benchmark -- -n 10000 -d "$DB"-100M single & 24 | wait 25 | 26 | # 500M rows: 27 | nohup time cargo run --profile maxperf --bin benchmark -- -n 50000 -d "$DB"-500M create & 28 | wait 29 | nohup time cargo run --profile maxperf --bin benchmark -- -n 50000 -d "$DB"-500M zipf & 30 | wait 31 | nohup time cargo run --profile maxperf --bin benchmark -- -n 50000 -d "$DB"-500M single & 32 | wait 33 | 34 | # 1B rows: 35 | nohup time cargo run --profile maxperf --bin benchmark -- -n 100000 -d "$DB"-1B create & 36 | wait 37 | nohup time cargo run --profile maxperf --bin benchmark -- -n 100000 -d "$DB"-1B zipf & 38 | wait 39 | nohup time cargo run --profile maxperf --bin benchmark -- -n 100000 -d "$DB"-1B single & 40 | wait 41 | -------------------------------------------------------------------------------- /ffi/src/iterator.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2025, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use derive_where::derive_where; 5 | use firewood::merkle; 6 | use firewood::v2::api::{self, ArcDynDbView, BoxKeyValueIter}; 7 | use std::iter::FusedIterator; 8 | 9 | type KeyValueItem = (merkle::Key, merkle::Value); 10 | 11 | /// An opaque wrapper around a [`BoxKeyValueIter`] and a reference 12 | /// to the [`ArcDynDbView`] backing it, preventing the view from 13 | /// being dropped while iteration is in progress. 14 | #[derive(Default)] 15 | #[derive_where(Debug)] 16 | #[derive_where(skip_inner)] 17 | pub struct IteratorHandle<'view>(Option<(BoxKeyValueIter<'view>, ArcDynDbView)>); 18 | 19 | impl Iterator for IteratorHandle<'_> { 20 | type Item = Result; 21 | 22 | fn next(&mut self) -> Option { 23 | let out = self.0.as_mut()?.0.next(); 24 | if out.is_none() { 25 | // iterator exhausted; drop it so the NodeStore can be released 26 | self.0 = None; 27 | } 28 | out.map(|res| res.map_err(api::Error::from)) 29 | } 30 | } 31 | 32 | impl FusedIterator for IteratorHandle<'_> {} 33 | 34 | #[expect(clippy::missing_errors_doc)] 35 | impl<'view> IteratorHandle<'view> { 36 | pub fn iter_next_n(&mut self, n: usize) -> Result, api::Error> { 37 | self.by_ref().take(n).collect() 38 | } 39 | 40 | pub fn new(view: ArcDynDbView, iter: BoxKeyValueIter<'view>) -> Self { 41 | IteratorHandle(Some((iter, view))) 42 | } 43 | } 44 | 45 | #[derive(Debug, Default)] 46 | pub struct CreateIteratorResult<'db>(pub IteratorHandle<'db>); 47 | -------------------------------------------------------------------------------- /storage/src/macros_test.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | #[cfg(test)] 5 | mod tests { 6 | 7 | #[test] 8 | fn test_firewood_counter_macro() { 9 | // Test counter without labels 10 | let counter = crate::firewood_counter!("test.counter.simple", "A simple test counter"); 11 | counter.increment(1); 12 | 13 | // Test counter with labels 14 | let counter_with_labels = crate::firewood_counter!("test.counter.labeled", "A labeled test counter", "env" => "test"); 15 | counter_with_labels.increment(5); 16 | } 17 | 18 | #[test] 19 | fn test_firewood_gauge_macro() { 20 | // Test gauge without labels 21 | let gauge = crate::firewood_gauge!("test.gauge.simple", "A simple test gauge"); 22 | gauge.set(42.0); 23 | gauge.increment(10.0); 24 | gauge.decrement(5.0); 25 | 26 | // Test gauge with labels 27 | let gauge_with_labels = 28 | crate::firewood_gauge!("test.gauge.labeled", "A labeled test gauge", "env" => "test"); 29 | gauge_with_labels.set(100.0); 30 | } 31 | 32 | #[test] 33 | fn test_macro_description_registration() { 34 | // Verify that calling the macro multiple times doesn't panic 35 | // (the static ONCE guard should ensure describe_* is only called once) 36 | for i in 0..10 { 37 | let counter = crate::firewood_counter!("test.counter.multi", "Multi-call test counter"); 38 | counter.increment(1); 39 | 40 | let gauge = crate::firewood_gauge!("test.gauge.multi", "Multi-call test gauge"); 41 | gauge.set(f64::from(i)); 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /clippy.toml: -------------------------------------------------------------------------------- 1 | # See https://doc.rust-lang.org/clippy/lint_configuration.html 2 | # for full configuration options. 3 | 4 | msrv = "1.91" 5 | 6 | doc-valid-idents = [ 7 | "MerkleDB", 8 | # this list must end with ".." so that it does not truncate the default list 9 | "..", 10 | ] 11 | 12 | disallowed-methods = [ 13 | { path = "rand::rng", replacement = "firewood_storage::SeededRng::from_env_or_random", reason = "use a prng with a user-defined seed instead", allow-invalid = true }, 14 | ] 15 | 16 | disallowed-types = [ 17 | { path = "rand::SeedableRng", replacement = "firewood_storage::SeededRng", reason = "use a prng with a user-defined seed instead", allow-invalid = true }, 18 | { path = "rand::rngs::StdRng", replacement = "firewood_storage::SeededRng", reason = "use a prng with a user-defined seed instead", allow-invalid = true }, 19 | { path = "std::sync::Mutex", replacement = "parking_lot::Mutex", reason = "prefer parking_lot locking primitives for consistency and performance", allow-invalid = true }, 20 | { path = "std::sync::MutexGuard", replacement = "parking_lot::MutexGuard", reason = "prefer parking_lot locking primitives for consistency and performance", allow-invalid = true }, 21 | { path = "std::sync::RwLock", replacement = "parking_lot::RwLock", reason = "prefer parking_lot locking primitives for consistency and performance", allow-invalid = true }, 22 | { path = "std::sync::RwLockReadGuard", replacement = "parking_lot::RwLockReadGuard", reason = "prefer parking_lot locking primitives for consistency and performance", allow-invalid = true }, 23 | { path = "std::sync::RwLockWriteGuard", replacement = "parking_lot::RwLockWriteGuard", reason = "prefer parking_lot locking primitives for consistency and performance", allow-invalid = true }, 24 | ] 25 | -------------------------------------------------------------------------------- /ffi/maybe.go: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2025, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | package ffi 5 | 6 | // #include 7 | // #include "firewood.h" 8 | import "C" 9 | 10 | import ( 11 | "unsafe" 12 | ) 13 | 14 | // Maybe is an interface that represents an optional value of type T. 15 | // 16 | // Maybe is a drop-in replacement for the Maybe type defined in avalanchego/utils/maybe. 17 | // This interface is used to avoid importing avalanchego packages into the ffi 18 | // package, which would create a circular dependency. 19 | // 20 | // 21 | // 22 | // The avalanchego implementation of Maybe implements this interface. 23 | type Maybe[T any] interface { 24 | // HasValue returns true if the Maybe contains a value. 25 | HasValue() bool 26 | // Value returns the value contained in the Maybe. 27 | // 28 | // Implementations may panic if the Maybe contains no value but can also 29 | // return the zero value of T. 30 | Value() T 31 | } 32 | 33 | func newMaybeBorrowedBytes(maybe Maybe[[]byte], pinner Pinner) C.Maybe_BorrowedBytes { 34 | var cMaybe C.Maybe_BorrowedBytes 35 | 36 | if maybe != nil && maybe.HasValue() { 37 | cMaybeBorrowedBytesPtr := (*C.BorrowedBytes)(unsafe.Pointer(&cMaybe.anon0)) 38 | *cMaybeBorrowedBytesPtr = newBorrowedBytes(maybe.Value(), pinner) 39 | 40 | cMaybe.tag = C.Maybe_BorrowedBytes_Some_BorrowedBytes 41 | } else { 42 | cMaybe.tag = C.Maybe_BorrowedBytes_None_BorrowedBytes 43 | } 44 | 45 | return cMaybe 46 | } 47 | 48 | func (b *ownedBytes) HasValue() bool { 49 | return b != nil 50 | } 51 | 52 | func (b *ownedBytes) Value() *ownedBytes { 53 | return b 54 | } 55 | -------------------------------------------------------------------------------- /benchmark/src/single.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | #![expect( 5 | clippy::arithmetic_side_effects, 6 | reason = "Found 2 occurrences after enabling the lint." 7 | )] 8 | #![expect( 9 | clippy::cast_sign_loss, 10 | reason = "Found 1 occurrences after enabling the lint." 11 | )] 12 | 13 | use crate::TestRunner; 14 | use firewood::db::{BatchOp, Db}; 15 | use firewood::v2::api::{Db as _, Proposal as _}; 16 | use log::debug; 17 | use pretty_duration::pretty_duration; 18 | use sha2::{Digest, Sha256}; 19 | use std::error::Error; 20 | use std::time::Instant; 21 | 22 | #[derive(Clone)] 23 | pub struct Single; 24 | 25 | impl TestRunner for Single { 26 | fn run(&self, db: &Db, args: &crate::Args) -> Result<(), Box> { 27 | let start = Instant::now(); 28 | let inner_keys: Vec<_> = (0..args.global_opts.batch_size) 29 | .map(|i| Sha256::digest(i.to_ne_bytes())) 30 | .collect(); 31 | let mut batch_id = 0; 32 | 33 | while start.elapsed().as_secs() / 60 < args.global_opts.duration_minutes { 34 | let batch = inner_keys.iter().map(|key| BatchOp::Put { 35 | key, 36 | value: vec![batch_id as u8], 37 | }); 38 | let proposal = db.propose(batch).expect("proposal should succeed"); 39 | proposal.commit()?; 40 | 41 | if log::log_enabled!(log::Level::Debug) && batch_id % 1000 == 999 { 42 | debug!( 43 | "completed {} batches in {}", 44 | 1 + batch_id, 45 | pretty_duration(&start.elapsed(), None) 46 | ); 47 | } 48 | batch_id += 1; 49 | } 50 | Ok(()) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /firewood/src/merkle/tests/triehash.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2025, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use super::*; 5 | use test_case::test_case; 6 | 7 | #[test_case(vec![], None; "empty trie")] 8 | #[test_case(vec![(&[0],&[0])], Some("073615413d814b23383fc2c8d8af13abfffcb371b654b98dbf47dd74b1e4d1b9"); "root")] 9 | #[test_case(vec![(&[0,1],&[0,1])], Some("28e67ae4054c8cdf3506567aa43f122224fe65ef1ab3e7b7899f75448a69a6fd"); "root with partial path")] 10 | #[test_case(vec![(&[0],&[1;32])], Some("ba0283637f46fa807280b7d08013710af08dfdc236b9b22f9d66e60592d6c8a3"); "leaf value >= 32 bytes")] 11 | #[test_case(vec![(&[0],&[0]),(&[0,1],&[1;32])], Some("3edbf1fdd345db01e47655bcd0a9a456857c4093188cf35c5c89b8b0fb3de17e"); "branch value >= 32 bytes")] 12 | #[test_case(vec![(&[0],&[0]),(&[0,1],&[0,1])], Some("c3bdc20aff5cba30f81ffd7689e94e1dbeece4a08e27f0104262431604cf45c6"); "root with leaf child")] 13 | #[test_case(vec![(&[0],&[0]),(&[0,1],&[0,1]),(&[0,1,2],&[0,1,2])], Some("229011c50ad4d5c2f4efe02b8db54f361ad295c4eee2bf76ea4ad1bb92676f97"); "root with branch child")] 14 | #[test_case(vec![(&[0],&[0]),(&[0,1],&[0,1]),(&[0,8],&[0,8]),(&[0,1,2],&[0,1,2])], Some("a683b4881cb540b969f885f538ba5904699d480152f350659475a962d6240ef9"); "root with branch child and leaf child")] 15 | fn test_root_hash_merkledb_compatible(kvs: Vec<(&[u8], &[u8])>, expected_hash: Option<&str>) { 16 | let merkle = init_merkle(kvs); 17 | let Some(got_hash) = merkle.nodestore.root_hash() else { 18 | assert!(expected_hash.is_none()); 19 | return; 20 | }; 21 | 22 | let expected_hash = expected_hash.unwrap(); 23 | 24 | // This hash is from merkledb 25 | let expected_hash: [u8; 32] = hex::decode(expected_hash).unwrap().try_into().unwrap(); 26 | 27 | assert_eq!(got_hash, TrieHash::from(expected_hash)); 28 | } 29 | -------------------------------------------------------------------------------- /.github/workflows/gh-pages.yaml: -------------------------------------------------------------------------------- 1 | name: gh-pages 2 | 3 | on: 4 | push: 5 | branches: 6 | - "main" 7 | - "rkuris/gh-pages" 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | build: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/checkout@v4 17 | - uses: dtolnay/rust-toolchain@stable 18 | # caution: this is the same restore as in ci.yaml 19 | - uses: Swatinem/rust-cache@v2 20 | with: 21 | save-if: "false" 22 | - name: Build 23 | run: cargo doc --document-private-items --no-deps 24 | - name: Set up _site redirect to firewood 25 | run: | 26 | rm -fr _site 27 | mkdir _site 28 | echo "" > _site/index.html 29 | - name: Copy doc files to _site 30 | run: | 31 | cp -rv target/doc/* ./_site 32 | cp -rv docs/assets ./_site 33 | - uses: actions/upload-artifact@v4 34 | with: 35 | name: pages 36 | path: _site 37 | if-no-files-found: error 38 | overwrite: true 39 | include-hidden-files: true 40 | deploy: 41 | needs: build 42 | permissions: 43 | pages: write 44 | id-token: write 45 | environment: 46 | name: github-pages 47 | url: ${{ steps.deployment.outputs.page_url }} 48 | runs-on: ubuntu-latest 49 | steps: 50 | - name: Download pages artifact 51 | uses: actions/download-artifact@v4 52 | with: 53 | name: pages 54 | path: . 55 | - name: Setup Pages 56 | uses: actions/configure-pages@v3 57 | - name: Upload artifact 58 | uses: actions/upload-pages-artifact@v3 59 | with: 60 | path: . 61 | - name: Deploy to GitHub pages 62 | id: deployment 63 | uses: actions/deploy-pages@v4 64 | -------------------------------------------------------------------------------- /fwdctl/src/create.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use clap::{Args, value_parser}; 5 | use firewood::db::{Db, DbConfig}; 6 | use firewood::v2::api; 7 | 8 | use crate::DatabasePath; 9 | 10 | #[derive(Args, Debug)] 11 | pub struct Options { 12 | #[command(flatten)] 13 | pub database: DatabasePath, 14 | 15 | #[arg( 16 | long, 17 | required = false, 18 | value_parser = value_parser!(bool), 19 | default_missing_value = "false", 20 | default_value_t = true, 21 | value_name = "TRUNCATE", 22 | help = "Whether to truncate the DB when opening it. If set, the DB will be reset and all its 23 | existing contents will be lost" 24 | )] 25 | pub truncate: bool, 26 | 27 | /// WAL Config 28 | #[arg( 29 | long, 30 | required = false, 31 | default_value_t = 22, 32 | value_name = "WAL_FILE_NBIT", 33 | help = "Size of WAL file." 34 | )] 35 | file_nbit: u64, 36 | 37 | #[arg( 38 | long, 39 | required = false, 40 | default_value_t = 100, 41 | value_name = "Wal_MAX_REVISIONS", 42 | help = "Number of revisions to keep from the past. This preserves a rolling window 43 | of the past N commits to the database." 44 | )] 45 | max_revisions: u32, 46 | } 47 | 48 | pub(super) fn new(opts: &Options) -> DbConfig { 49 | DbConfig::builder().truncate(opts.truncate).build() 50 | } 51 | 52 | pub(super) fn run(opts: &Options) -> Result<(), api::Error> { 53 | let db_config = new(opts); 54 | log::debug!("database configuration parameters: \n{db_config:?}\n"); 55 | 56 | Db::new(opts.database.dbpath.clone(), db_config)?; 57 | println!( 58 | "created firewood database in {}", 59 | opts.database.dbpath.display() 60 | ); 61 | Ok(()) 62 | } 63 | -------------------------------------------------------------------------------- /benchmark/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "firewood-benchmark" 3 | version.workspace = true 4 | edition.workspace = true 5 | authors = [ 6 | "Aaron Buchwald ", 7 | "Ron Kuris ", 8 | ] 9 | description = "Benchmarking tool for Firewood, an embedded key-value store optimized for blockchain state." 10 | license-file.workspace = true 11 | homepage.workspace = true 12 | repository.workspace = true 13 | readme.workspace = true 14 | rust-version.workspace = true 15 | 16 | [package.metadata.cargo-machete] 17 | ignored = ["opentelemetry-proto"] 18 | 19 | [[bin]] 20 | name = "benchmark" 21 | path = "src/main.rs" 22 | 23 | [dependencies] 24 | # Workspace dependencies 25 | clap = { workspace = true, features = ['string'] } 26 | env_logger.workspace = true 27 | fastrace = { workspace = true, features = ["enable"] } 28 | firewood.workspace = true 29 | firewood-storage = { workspace = true, features = ["test_utils"] } 30 | hex.workspace = true 31 | log.workspace = true 32 | metrics.workspace = true 33 | metrics-util = { workspace = true, optional = true } 34 | rand.workspace = true 35 | rand_distr.workspace = true 36 | sha2.workspace = true 37 | # Regular dependencies 38 | fastrace-opentelemetry = { version = "=0.14.0" } 39 | metrics-exporter-prometheus = { version = "0.18.1", optional = true } 40 | opentelemetry = "=0.31.0" 41 | opentelemetry-otlp = { version = "=0.31.0", features = ["grpc-tonic"] } 42 | opentelemetry-proto = "=0.31.0" 43 | opentelemetry_sdk = "=0.31.0" 44 | pretty-duration = "0.1.1" 45 | tikv-jemallocator = "0.6.1" 46 | 47 | [dependencies.tokio] 48 | optional = true 49 | version = "1.48.0" 50 | features = ["mio", "net", "parking_lot", "rt", "time"] 51 | 52 | [features] 53 | default = ["prometheus"] 54 | logger = ["firewood/logger"] 55 | prometheus = [ 56 | "dep:metrics-exporter-prometheus", 57 | "dep:metrics-util", 58 | "dep:tokio", 59 | ] 60 | 61 | [lints] 62 | workspace = true 63 | -------------------------------------------------------------------------------- /storage/src/logger.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | // Supports making the logging operations a true runtime no-op 5 | // Since we're a library, we can't really use the logging level 6 | // static shortcut 7 | 8 | #[cfg(feature = "logger")] 9 | pub use log::{debug, error, info, trace, warn}; 10 | 11 | /// Returns true if the trace log level is enabled 12 | #[cfg(feature = "logger")] 13 | #[must_use] 14 | pub fn trace_enabled() -> bool { 15 | log::log_enabled!(log::Level::Trace) 16 | } 17 | 18 | #[cfg(not(feature = "logger"))] 19 | pub use noop_logger::{debug, error, info, trace, trace_enabled, warn}; 20 | 21 | #[cfg(not(feature = "logger"))] 22 | mod noop_logger { 23 | #[macro_export] 24 | /// A noop logger, when the logger feature is disabled 25 | macro_rules! noop { 26 | ($($arg:tt)+) => { 27 | if false { 28 | // This is a no-op. If we had an empty macro, the compiler and 29 | // clippy would generate warnings about variables in the 30 | // expressions passed into the macro going unused. 31 | // 32 | // This is a workaround to avoid that. The `false` branch will 33 | // never be execute, the expressions passed in will never be 34 | // evaluated, this string will never be constructed, and the 35 | // compiler will completely eliminate this branch when any 36 | // level of optimization is enabled. 37 | let _ = format!($($arg)+); 38 | } 39 | }; 40 | } 41 | 42 | pub use noop as debug; 43 | pub use noop as error; 44 | pub use noop as info; 45 | pub use noop as trace; 46 | pub use noop as warn; 47 | 48 | /// `trace_enabled` for a noop logger is always false 49 | #[inline] 50 | #[must_use] 51 | pub const fn trace_enabled() -> bool { 52 | false 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /.github/workflows/publish.yaml: -------------------------------------------------------------------------------- 1 | name: publish 2 | 3 | on: 4 | workflow_dispatch: 5 | release: 6 | types: [published] 7 | 8 | jobs: 9 | publish-firewood-crate: 10 | name: firewood-lib 11 | runs-on: ubuntu-latest 12 | if: startsWith(github.event.release.tag_name, 'v') 13 | steps: 14 | - uses: actions/checkout@v1 15 | - uses: dtolnay/rust-toolchain@stable 16 | ## NOTE: keep these packages sorted in reverse topological order! 17 | ## cargo tree --workspace -e all | grep firewood 18 | - name: publish firewood-macros crate 19 | continue-on-error: false 20 | run: | 21 | cargo login ${{ secrets.CARGO_TOKEN }} 22 | cargo publish -p firewood-macros 23 | # TODO(demosdemon): detect when version is bumped and only publish then 24 | # - name: publish firewood-triehash crate 25 | # continue-on-error: false 26 | # run: | 27 | # cargo login ${{ secrets.CARGO_TOKEN }} 28 | # cargo publish -p firewood-triehash 29 | - name: publish firewood-storage crate 30 | continue-on-error: false 31 | run: | 32 | cargo login ${{ secrets.CARGO_TOKEN }} 33 | cargo publish -p firewood-storage 34 | - name: publish firewood crate 35 | continue-on-error: false 36 | run: | 37 | cargo login ${{ secrets.CARGO_TOKEN }} 38 | cargo publish -p firewood 39 | - name: publish firewood-ffi crate 40 | continue-on-error: false 41 | run: | 42 | cargo login ${{ secrets.CARGO_TOKEN }} 43 | cargo publish -p firewood-ffi 44 | - name: publish firewood-fwdctl crate 45 | continue-on-error: false 46 | run: | 47 | cargo login ${{ secrets.CARGO_TOKEN }} 48 | cargo publish -p firewood-fwdctl 49 | - name: publish firewood-benchmark crate 50 | continue-on-error: false 51 | run: | 52 | cargo login ${{ secrets.CARGO_TOKEN }} 53 | cargo publish -p firewood-benchmark 54 | -------------------------------------------------------------------------------- /ffi/test-build-equivalency.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | # Always work from the repo root 6 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 7 | REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" 8 | cd "$REPO_ROOT" 9 | 10 | # Define paths to libraries (relative to repo root) 11 | NIX_LIB="ffi/result/lib/libfirewood_ffi.a" # Default path for the nix build 12 | CARGO_LIB="target/maxperf/libfirewood_ffi.a" 13 | 14 | # Create temporary directory and ensure cleanup on exit 15 | TMPDIR=$(mktemp -d) 16 | trap "rm -rf $TMPDIR" EXIT 17 | 18 | echo "Building with cargo (using nix dev shell)..." 19 | nix develop ./ffi#default --command bash -c "cargo fetch --locked --verbose && cargo build-static-ffi" 20 | 21 | echo "Building with nix..." 22 | cd ffi && nix build .#firewood-ffi && cd .. 23 | 24 | echo "" 25 | echo "=== File Size Comparison ===" 26 | ls -lh "$CARGO_LIB" "$NIX_LIB" 27 | 28 | echo "" 29 | echo "=== Symbol Count Comparison ===" 30 | # Extract symbols to temporary files for comparison 31 | nm "$NIX_LIB" | sort > "$TMPDIR/nix-symbols.txt" 32 | nm "$CARGO_LIB" | sort > "$TMPDIR/cargo-symbols.txt" 33 | 34 | NIX_SYMBOLS=$(wc -l < "$TMPDIR/nix-symbols.txt") 35 | CARGO_SYMBOLS=$(wc -l < "$TMPDIR/cargo-symbols.txt") 36 | echo "Nix build: $NIX_SYMBOLS symbols" 37 | echo "Cargo build: $CARGO_SYMBOLS symbols" 38 | if [ "$NIX_SYMBOLS" -eq "$CARGO_SYMBOLS" ]; then 39 | echo "✅ Symbol counts are both $NIX_SYMBOLS" 40 | else 41 | echo "❌ Symbol counts differ" 42 | echo "" 43 | echo "=== Symbol Differences ===" 44 | echo "Symbols only in Nix build:" 45 | # Show lines that exist in the old file (nix) but not in the new file (cargo) 46 | diff --unchanged-line-format="" --old-line-format="%L" --new-line-format="" "$TMPDIR/nix-symbols.txt" "$TMPDIR/cargo-symbols.txt" || true 47 | echo "" 48 | echo "Symbols only in Cargo build:" 49 | # Show lines that exist in the new file (cargo) but not in the old file (nix) 50 | diff --unchanged-line-format="" --old-line-format="" --new-line-format="%L" "$TMPDIR/nix-symbols.txt" "$TMPDIR/cargo-symbols.txt" || true 51 | fi 52 | -------------------------------------------------------------------------------- /.github/workflows/label-pull-requests.yaml: -------------------------------------------------------------------------------- 1 | name: Label pull requests 2 | 3 | on: pull_request_target 4 | 5 | jobs: 6 | add_label: 7 | runs-on: ubuntu-latest 8 | permissions: 9 | issues: write 10 | pull-requests: write 11 | steps: 12 | - name: Check if pull request is from a fork 13 | id: check_is_form 14 | run: | 15 | if [[ "${{ github.event.pull_request.head.repo.full_name }}" != "${{ github.repository }}" ]]; then 16 | echo "is_fork=true" >> "$GITHUB_OUTPUT" 17 | else 18 | echo "is_fork=false" >> "$GITHUB_OUTPUT" 19 | fi 20 | 21 | - name: Add label to third-party pull request 22 | uses: actions/github-script@v7 23 | with: 24 | script: | 25 | const label = '3rd party contributor'; 26 | const isFork = ${{ steps.check_is_form.outputs.is_fork }}; 27 | const { data: issue } = await github.rest.issues.get({ 28 | owner: context.repo.owner, 29 | repo: context.repo.repo, 30 | issue_number: context.issue.number, 31 | }); 32 | const isLabeled = issue.labels.some(l => l.name === label); 33 | // add the label if a fork (and not already labeled), 34 | // remove if not a fork and is labeled 35 | if (isFork && !isLabeled) { 36 | console.log(`Adding label: ${label}`); 37 | await github.rest.issues.addLabels({ 38 | owner: context.repo.owner, 39 | repo: context.repo.repo, 40 | issue_number: context.issue.number, 41 | labels: [label], 42 | }); 43 | } else if (!isFork && isLabeled) { 44 | console.log(`Removing label: ${label}`); 45 | await github.rest.issues.removeLabel({ 46 | owner: context.repo.owner, 47 | repo: context.repo.repo, 48 | issue_number: context.issue.number, 49 | name: label, 50 | }); 51 | } 52 | console.log(`Label ${isFork ? 'added' : 'removed'}: ${label}`); 53 | -------------------------------------------------------------------------------- /storage/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "firewood-storage" 3 | version.workspace = true 4 | edition.workspace = true 5 | authors = [ 6 | "Aaron Buchwald ", 7 | "Ron Kuris ", 8 | "Suyan Qu <36519575+qusuyan@users.noreply.github.com>", 9 | ] 10 | description = "Storage layer for Firewood, an embedded key-value store optimized for blockchain state." 11 | license-file.workspace = true 12 | homepage.workspace = true 13 | repository.workspace = true 14 | readme.workspace = true 15 | rust-version.workspace = true 16 | 17 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 18 | 19 | [dependencies] 20 | # Workspace dependencies 21 | aquamarine.workspace = true 22 | bytemuck.workspace = true 23 | bytemuck_derive.workspace = true 24 | coarsetime.workspace = true 25 | fastrace.workspace = true 26 | hex.workspace = true 27 | integer-encoding.workspace = true 28 | metrics.workspace = true 29 | nonzero_ext.workspace = true 30 | rand = { workspace = true, optional = true } 31 | sha2.workspace = true 32 | smallvec.workspace = true 33 | thiserror.workspace = true 34 | # Regular dependencies 35 | bitfield = "0.19.4" 36 | bitflags = "2.10.0" 37 | derive-where = "1.6.0" 38 | enum-as-inner = "0.6.1" 39 | indicatif = "0.18.3" 40 | lru = "0.16.2" 41 | semver = "1.0.27" 42 | triomphe = "0.1.15" 43 | parking_lot.workspace = true 44 | # Optional dependencies 45 | bytes = { version = "1.11.0", optional = true } 46 | io-uring = { version = "0.7.11", optional = true } 47 | log = { version = "0.4.29", optional = true } 48 | rlp = { version = "0.6.1", optional = true } 49 | sha3 = { version = "0.10.8", optional = true } 50 | bumpalo = { version = "3.19.1", features = ["collections", "std"] } 51 | 52 | [dev-dependencies] 53 | cfg-if = "1.0.4" 54 | # Workspace dependencies 55 | criterion = { workspace = true, features = ["html_reports"] } 56 | pprof = { workspace = true, features = ["flamegraph"] } 57 | rand.workspace = true 58 | tempfile.workspace = true 59 | test-case.workspace = true 60 | 61 | [features] 62 | logger = ["log"] 63 | io-uring = ["dep:io-uring"] 64 | ethhash = ["dep:rlp", "dep:sha3", "dep:bytes"] 65 | test_utils = ["dep:rand"] 66 | 67 | [[bench]] 68 | name = "serializer" 69 | harness = false 70 | 71 | [lints] 72 | workspace = true 73 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore VSCode directory 2 | .vscode 3 | .idea 4 | 5 | compose-dev.yaml 6 | 7 | # ignore test databases 8 | *_db 9 | 10 | #### Below sections are auto-generated #### 11 | 12 | # Created by https://www.toptal.com/developers/gitignore/api/rust,visualstudiocode,vim,macos 13 | # Edit at https://www.toptal.com/developers/gitignore?templates=rust,visualstudiocode,vim,macos 14 | 15 | ### macOS ### 16 | # General 17 | .DS_Store 18 | .AppleDouble 19 | .LSOverride 20 | 21 | # Icon must end with two \r 22 | Icon 23 | 24 | 25 | # Thumbnails 26 | ._* 27 | 28 | # Files that might appear in the root of a volume 29 | .DocumentRevisions-V100 30 | .fseventsd 31 | .Spotlight-V100 32 | .TemporaryItems 33 | .Trashes 34 | .VolumeIcon.icns 35 | .com.apple.timemachine.donotpresent 36 | 37 | # Directories potentially created on remote AFP share 38 | .AppleDB 39 | .AppleDesktop 40 | Network Trash Folder 41 | Temporary Items 42 | .apdisk 43 | 44 | ### macOS Patch ### 45 | # iCloud generated files 46 | *.icloud 47 | 48 | ### Rust ### 49 | # Generated by Cargo 50 | # will have compiled files and executables 51 | debug/ 52 | target/ 53 | 54 | ### Golang ### 55 | go.work 56 | go.work.sum 57 | 58 | # These are backup files generated by rustfmt 59 | **/*.rs.bk 60 | 61 | # MSVC Windows builds of rustc generate these, which store debugging information 62 | *.pdb 63 | 64 | ### Vim ### 65 | # Swap 66 | [._]*.s[a-v][a-z] 67 | !*.svg # comment out if you don't need vector files 68 | [._]*.sw[a-p] 69 | [._]s[a-rt-v][a-z] 70 | [._]ss[a-gi-z] 71 | [._]sw[a-p] 72 | 73 | # Session 74 | Session.vim 75 | Sessionx.vim 76 | 77 | # Temporary 78 | .netrwhist 79 | *~ 80 | # Auto-generated tag files 81 | tags 82 | # Persistent undo 83 | [._]*.un~ 84 | 85 | ### VisualStudioCode ### 86 | .vscode/* 87 | !.vscode/settings.json 88 | !.vscode/tasks.json 89 | !.vscode/launch.json 90 | !.vscode/extensions.json 91 | !.vscode/*.code-snippets 92 | 93 | # Local History for Visual Studio Code 94 | .history/ 95 | 96 | # Built Visual Studio Code Extensions 97 | *.vsix 98 | 99 | ### VisualStudioCode Patch ### 100 | # Ignore all local history of files 101 | .history 102 | .ionide 103 | 104 | # End of https://www.toptal.com/developers/gitignore/api/rust,visualstudiocode,vim,macos 105 | -------------------------------------------------------------------------------- /benchmark/setup-scripts/install-golang.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | INSTALL_DIR="/usr/local/go" 5 | 6 | # Check write permission 7 | if [ -d "$INSTALL_DIR" ]; then 8 | if [ ! -w "$INSTALL_DIR" ]; then 9 | echo "Error: $INSTALL_DIR exists but is not writable." >&2 10 | exit 1 11 | fi 12 | else 13 | if [ ! -w "$(dirname "$INSTALL_DIR")" ]; then 14 | echo "Error: Cannot create $INSTALL_DIR. $(dirname "$INSTALL_DIR") is not writable." >&2 15 | exit 1 16 | fi 17 | fi 18 | 19 | # Detect latest Go version 20 | LATEST_VERSION=$(curl -s https://go.dev/dl/?mode=json | \ 21 | grep -oE '"version": ?"go[0-9]+\.[0-9]+(\.[0-9]+)?"' | \ 22 | head -n1 | cut -d\" -f4) 23 | 24 | if [ -z "$LATEST_VERSION" ]; then 25 | echo "Error: Could not detect latest Go version." >&2 26 | exit 1 27 | fi 28 | 29 | # Detect platform 30 | UNAME_OS="$(uname -s | tr '[:upper:]' '[:lower:]')" 31 | UNAME_ARCH="$(uname -m)" 32 | 33 | # Map to Go arch 34 | case "$UNAME_ARCH" in 35 | x86_64) ARCH="amd64" ;; 36 | aarch64 | arm64) ARCH="arm64" ;; 37 | armv6l | armv7l) ARCH="arm" ;; 38 | *) echo "Unsupported architecture: $UNAME_ARCH" >&2; exit 1 ;; 39 | esac 40 | 41 | # Map to Go OS 42 | case "$UNAME_OS" in 43 | linux | darwin) OS="$UNAME_OS" ;; 44 | *) echo "Unsupported OS: $UNAME_OS" >&2; exit 1 ;; 45 | esac 46 | 47 | # Build tarball name and URL 48 | TARBALL="${LATEST_VERSION}.${OS}-${ARCH}.tar.gz" 49 | URL="https://go.dev/dl/${TARBALL}" 50 | 51 | # Validate URL 52 | echo "Checking URL: $URL" 53 | if ! curl --head --fail --silent "$URL" >/dev/null; then 54 | echo "Error: Go tarball not found at $URL" >&2 55 | exit 1 56 | fi 57 | 58 | # Download and install 59 | TMP_DIR=$(mktemp -d) 60 | cd "$TMP_DIR" 61 | echo "Downloading $TARBALL..." 62 | curl -fLO "$URL" 63 | 64 | # Validate archive format 65 | if ! file "$TARBALL" | grep -q 'gzip compressed data'; then 66 | echo "Error: Downloaded file is not a valid tar.gz archive." >&2 67 | exit 1 68 | fi 69 | 70 | echo "Removing any existing Go installation in $INSTALL_DIR..." 71 | rm -rf "$INSTALL_DIR" 72 | 73 | echo "Extracting Go to $INSTALL_DIR..." 74 | tar -C "$(dirname "$INSTALL_DIR")" -xzf "$TARBALL" 75 | 76 | rm -rf "$TMP_DIR" 77 | 78 | echo "✅ Go $LATEST_VERSION installed to $INSTALL_DIR" 79 | echo "➕ Add to PATH if needed:" 80 | echo " export PATH=\$PATH:/usr/local/go/bin" 81 | 82 | -------------------------------------------------------------------------------- /firewood/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "firewood" 3 | version.workspace = true 4 | edition.workspace = true 5 | authors = [ 6 | "Angel Leon ", 7 | "Austin Larson <78000745+alarso16@users.noreply.github.com>", 8 | "Cesar <137245636+nytzuga@users.noreply.github.com>", 9 | "Dan Laine ", 10 | "Dan Sover ", 11 | "Hao Hao ", 12 | "Patrick O'Grady ", 13 | "Richard Pringle ", 14 | "Ron Kuris ", 15 | "Sam Batschelet ", 16 | "xinifinity <113067541+xinifinity@users.noreply.github.com>", 17 | ] 18 | description = "Firewood is an embedded key-value store, optimized to store blockchain state." 19 | license-file.workspace = true 20 | homepage.workspace = true 21 | repository.workspace = true 22 | readme.workspace = true 23 | rust-version.workspace = true 24 | 25 | [dependencies] 26 | # Workspace dependencies 27 | bytemuck_derive.workspace = true 28 | bytemuck.workspace = true 29 | coarsetime.workspace = true 30 | fastrace.workspace = true 31 | firewood-macros.workspace = true 32 | firewood-storage.workspace = true 33 | hex.workspace = true 34 | integer-encoding.workspace = true 35 | metrics.workspace = true 36 | thiserror.workspace = true 37 | # Regular dependencies 38 | typed-builder = "0.23.2" 39 | rayon = "1.11.0" 40 | parking_lot.workspace = true 41 | fjall = "2.11.2" 42 | derive-where = "1.6.0" 43 | weak-table = "0.3.2" 44 | 45 | [features] 46 | default = [] 47 | nightly = [] 48 | io-uring = ["firewood-storage/io-uring"] 49 | logger = ["firewood-storage/logger"] 50 | ethhash = ["firewood-storage/ethhash"] 51 | 52 | [dev-dependencies] 53 | # Workspace dependencies 54 | clap = { workspace = true, features = ['derive'] } 55 | criterion.workspace = true 56 | env_logger.workspace = true 57 | ethereum-types.workspace = true 58 | firewood-storage = { workspace = true, features = ["test_utils"] } 59 | firewood-triehash.workspace = true 60 | hex-literal.workspace = true 61 | pprof = { workspace = true, features = ["flamegraph"] } 62 | rand.workspace = true 63 | tempfile.workspace = true 64 | test-case.workspace = true 65 | # Regular dependencies 66 | ctor = "0.6.3" 67 | hash-db = "0.16.0" 68 | plain_hasher = "0.2.3" 69 | rlp = "0.6.1" 70 | sha3 = "0.10.8" 71 | 72 | [[bench]] 73 | name = "hashops" 74 | harness = false 75 | 76 | [lints] 77 | workspace = true 78 | 79 | [package.metadata.cargo-machete] 80 | ignored = ["coarsetime", "hex-literal"] 81 | -------------------------------------------------------------------------------- /ffi/keepalive.go: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2025, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | package ffi 5 | 6 | import "sync" 7 | 8 | // databaseKeepAliveHandle is added to types that hold a lease on the database 9 | // to ensure it is not closed while those types are still in use. 10 | // 11 | // This is necessary to prevent use-after-free bugs where a type holding a 12 | // reference to the database outlives the database itself. Even attempting to 13 | // free those objects after the database has been closed will lead to undefined 14 | // behavior, as a part of the underling Rust object will have already been freed. 15 | type databaseKeepAliveHandle struct { 16 | mu sync.Mutex 17 | // [Database.Close] blocks on this WaitGroup, which is set and incremented 18 | // by [newKeepAliveHandle], and decremented by 19 | // [databaseKeepAliveHandle.disown]. 20 | outstandingHandles *sync.WaitGroup 21 | } 22 | 23 | // init initializes the keep-alive handle to track a new outstanding handle. 24 | func (h *databaseKeepAliveHandle) init(wg *sync.WaitGroup) { 25 | // lock not necessary today, but will be necessary in the future for types 26 | // that initialize the handle at some point after construction (#1429). 27 | h.mu.Lock() 28 | defer h.mu.Unlock() 29 | 30 | if h.outstandingHandles != nil { 31 | // setting the finalizer twice will also panic, so we're panicking 32 | // early to provide better context 33 | panic("keep-alive handle already initialized") 34 | } 35 | 36 | h.outstandingHandles = wg 37 | h.outstandingHandles.Add(1) 38 | } 39 | 40 | // disown indicates that the object owning this handle is no longer keeping the 41 | // database alive. If [attemptDisown] returns an error, disowning will only occur 42 | // if [disownEvenOnErr] is true. 43 | // 44 | // This method is safe to call multiple times; subsequent calls after the first 45 | // will continue to invoke [attemptDisown] but will not decrement the wait group 46 | // unless [databaseKeepAliveHandle.init] was called again in the meantime. 47 | func (h *databaseKeepAliveHandle) disown(disownEvenOnErr bool, attemptDisown func() error) error { 48 | h.mu.Lock() 49 | defer h.mu.Unlock() 50 | 51 | err := attemptDisown() 52 | 53 | if (err == nil || disownEvenOnErr) && h.outstandingHandles != nil { 54 | h.outstandingHandles.Done() 55 | // prevent calling `Done` multiple times if disown is called again, which 56 | // may happen when the finalizer runs after an explicit call to Drop or Commit. 57 | h.outstandingHandles = nil 58 | } 59 | 60 | return err 61 | } 62 | -------------------------------------------------------------------------------- /ffi/src/revision.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2025, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use crate::{CreateIteratorResult, IteratorHandle}; 5 | use firewood::v2::api; 6 | use firewood::v2::api::{ArcDynDbView, BoxKeyValueIter, DbView, HashKey}; 7 | 8 | #[derive(Debug)] 9 | pub struct RevisionHandle { 10 | view: ArcDynDbView, 11 | } 12 | 13 | impl RevisionHandle { 14 | /// Creates a new revision handle for the provided database view. 15 | pub(crate) fn new(view: ArcDynDbView) -> RevisionHandle { 16 | RevisionHandle { view } 17 | } 18 | 19 | /// Creates an iterator on the revision starting from the given key. 20 | #[must_use] 21 | #[allow(clippy::missing_panics_doc)] 22 | pub fn iter_from(&self, first_key: Option<&[u8]>) -> CreateIteratorResult<'_> { 23 | let it = self 24 | .view 25 | .iter_option(first_key) 26 | .expect("infallible; see issue #1329"); 27 | CreateIteratorResult(IteratorHandle::new(self.view.clone(), it)) 28 | } 29 | } 30 | 31 | impl DbView for RevisionHandle { 32 | type Iter<'view> 33 | = BoxKeyValueIter<'view> 34 | where 35 | Self: 'view; 36 | 37 | fn root_hash(&self) -> Result, api::Error> { 38 | self.view.root_hash() 39 | } 40 | 41 | fn val(&self, key: K) -> Result, api::Error> { 42 | self.view.val(key.as_ref()) 43 | } 44 | 45 | fn single_key_proof(&self, key: K) -> Result { 46 | self.view.single_key_proof(key.as_ref()) 47 | } 48 | 49 | fn range_proof( 50 | &self, 51 | first_key: Option, 52 | last_key: Option, 53 | limit: Option, 54 | ) -> Result { 55 | self.view.range_proof( 56 | first_key.as_ref().map(AsRef::as_ref), 57 | last_key.as_ref().map(AsRef::as_ref), 58 | limit, 59 | ) 60 | } 61 | 62 | fn iter_option( 63 | &self, 64 | first_key: Option, 65 | ) -> Result, api::Error> { 66 | self.view.iter_option(first_key.as_ref().map(AsRef::as_ref)) 67 | } 68 | 69 | fn dump_to_string(&self) -> Result { 70 | self.view.dump_to_string() 71 | } 72 | } 73 | 74 | #[derive(Debug)] 75 | pub struct GetRevisionResult { 76 | pub handle: RevisionHandle, 77 | pub root_hash: HashKey, 78 | } 79 | -------------------------------------------------------------------------------- /benchmark/bootstrap/README.md: -------------------------------------------------------------------------------- 1 | # Bootstrap Testing Script 2 | 3 | This directory contains tools for automated Firewood blockchain database benchmarking on AWS. The `aws-launch.sh` script creates EC2 instances, sets up the complete testing environment, and executes C-chain (Avalanche) block bootstrapping tests. 4 | 5 | ## Prerequisites 6 | 7 | Before running the script, you'll need: 8 | 9 | - AWS CLI installed and configured on your machine 10 | - Authenticated AWS session: `aws sso login` 11 | - Your session should be configured to use the `Experimental` account. 12 | 13 | ## What It Does 14 | 15 | The `aws-launch.sh` script automatically: 16 | 17 | 1. Launches an EC2 instance with the specified instance type and configuration 18 | 2. Sets up the environment with all necessary dependencies (Git, Rust, Go, build tools, Grafana) 19 | 3. Creates user accounts with SSH access for the team 20 | 4. Clones and builds: 21 | - Firewood (from specified branch or default) 22 | - AvalancheGo (from specified branch or default) 23 | - LibEVM (from specified branch or default) 24 | 5. Downloads pre-existing blockchain data from S3 (1M, 10M, or 50M blocks) 25 | 6. Executes the bootstrapping benchmark to test Firewood's performance 26 | 27 | ## Usage 28 | 29 | ```bash 30 | ./aws-launch.sh [OPTIONS] 31 | ``` 32 | 33 | For a complete list of options, run: 34 | 35 | ```bash 36 | ./aws-launch.sh --help 37 | ``` 38 | 39 | ## Examples 40 | 41 | ### Run a large benchmark with spot pricing 42 | 43 | ```bash 44 | ./aws-launch.sh --instance-type i4i.xlarge --nblocks 10m --spot 45 | ``` 46 | 47 | ### Test multiple component branches together 48 | 49 | ```bash 50 | ./aws-launch.sh --firewood-branch my-firewood-branch --avalanchego-branch foo --libevm-commit bar 51 | ``` 52 | 53 | ### Preview a configuration without launching 54 | 55 | ```bash 56 | ./aws-launch.sh --dry-run --firewood-branch my-branch --nblocks 1m 57 | ``` 58 | 59 | ## Monitoring Results 60 | 61 | After launching, the script outputs an instance ID. You can: 62 | 63 | 1. **SSH to the instance** - Only authorized team members (rkuris, austin, aaron, brandon, amin, bernard, rodrigo) can SSH using their configured GPG hardware keys. Note: Your GPG agent must be properly configured for SSH support on your local machine. 64 | 65 | ```bash 66 | ssh @ 67 | ``` 68 | 69 | 2. **Monitor benchmark progress**: 70 | 71 | ```bash 72 | tail -f /var/log/bootstrap.log 73 | ``` 74 | 75 | 3. **Check build logs**: 76 | 77 | ```bash 78 | tail -f /mnt/nvme/ubuntu/firewood/build.log 79 | tail -f /mnt/nvme/ubuntu/avalanchego/build.log 80 | ``` 81 | -------------------------------------------------------------------------------- /ffi/tests/firewood/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/ava-labs/firewood/ffi/tests/firewood 2 | 3 | go 1.24.0 4 | 5 | toolchain go1.24.9 6 | 7 | require ( 8 | github.com/ava-labs/firewood-go/ffi v0.0.0 // this is replaced to use the parent folder 9 | github.com/stretchr/testify v1.10.0 10 | ) 11 | 12 | require github.com/ava-labs/avalanchego v1.13.1 13 | 14 | require ( 15 | github.com/BurntSushi/toml v1.2.0 // indirect 16 | github.com/beorn7/perks v1.0.1 // indirect 17 | github.com/cenkalti/backoff/v4 v4.2.1 // indirect 18 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 19 | github.com/davecgh/go-spew v1.1.1 // indirect 20 | github.com/go-logr/logr v1.4.1 // indirect 21 | github.com/go-logr/stdr v1.2.2 // indirect 22 | github.com/google/renameio/v2 v2.0.0 // indirect 23 | github.com/gorilla/rpc v1.2.0 // indirect 24 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect 25 | github.com/mr-tron/base58 v1.2.0 // indirect 26 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 27 | github.com/pmezard/go-difflib v1.0.0 // indirect 28 | github.com/prometheus/client_golang v1.22.0 // indirect 29 | github.com/prometheus/client_model v0.6.1 // indirect 30 | github.com/prometheus/common v0.62.0 // indirect 31 | github.com/prometheus/procfs v0.15.1 // indirect 32 | go.opentelemetry.io/otel v1.22.0 // indirect 33 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect 34 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 // indirect 35 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 // indirect 36 | go.opentelemetry.io/otel/metric v1.22.0 // indirect 37 | go.opentelemetry.io/otel/sdk v1.22.0 // indirect 38 | go.opentelemetry.io/otel/trace v1.22.0 // indirect 39 | go.opentelemetry.io/proto/otlp v1.0.0 // indirect 40 | go.uber.org/multierr v1.11.0 // indirect 41 | go.uber.org/zap v1.26.0 // indirect 42 | golang.org/x/crypto v0.45.0 // indirect 43 | golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e // indirect 44 | golang.org/x/net v0.47.0 // indirect 45 | golang.org/x/sys v0.38.0 // indirect 46 | golang.org/x/term v0.37.0 // indirect 47 | golang.org/x/text v0.31.0 // indirect 48 | gonum.org/v1/gonum v0.11.0 // indirect 49 | google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect 50 | google.golang.org/genproto/googleapis/rpc v0.0.0-20240827150818-7e3bb234dfed // indirect 51 | google.golang.org/grpc v1.66.0 // indirect 52 | google.golang.org/protobuf v1.36.5 // indirect 53 | gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect 54 | gopkg.in/yaml.v3 v3.0.1 // indirect 55 | ) 56 | 57 | replace github.com/ava-labs/firewood-go/ffi => ../../ 58 | -------------------------------------------------------------------------------- /storage/src/hashedshunt.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2025, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use crate::{Children, HashType, Hashable, JoinedPath, SplitPath, ValueDigest}; 5 | 6 | /// A shunt for a hasheable trie that we can use to compute the hash of a node 7 | /// using component parts. 8 | pub struct HashableShunt<'a, P1, P2> { 9 | parent_prefix: P1, 10 | partial_path: P2, 11 | value: Option>, 12 | child_hashes: Children>, 13 | } 14 | 15 | impl<'a, P1: SplitPath, P2: SplitPath> HashableShunt<'a, P1, P2> { 16 | /// Creates a new [`HashableShunt`]. 17 | #[must_use] 18 | pub const fn new( 19 | parent_prefix: P1, 20 | partial_path: P2, 21 | value: Option>, 22 | child_hashes: Children>, 23 | ) -> Self { 24 | Self { 25 | parent_prefix, 26 | partial_path, 27 | value, 28 | child_hashes, 29 | } 30 | } 31 | 32 | /// Calculates the hash of this shunt. 33 | pub fn to_hash(&self) -> HashType { 34 | crate::Preimage::to_hash(self) 35 | } 36 | } 37 | 38 | impl std::fmt::Debug for HashableShunt<'_, P1, P2> { 39 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 40 | f.debug_struct("HashableShunt") 41 | .field("parent_prefix", &self.parent_prefix.display()) 42 | .field("partial_path", &self.partial_path.display()) 43 | .field( 44 | "value", 45 | &self.value.as_ref().map(|v| v.as_ref().map(hex::encode)), 46 | ) 47 | .field("child_hashes", &self.child_hashes) 48 | .field("hash", &self.to_hash()) 49 | .finish() 50 | } 51 | } 52 | 53 | impl Hashable for HashableShunt<'_, P1, P2> { 54 | type LeadingPath<'a> 55 | = P1 56 | where 57 | Self: 'a; 58 | 59 | type PartialPath<'a> 60 | = P2 61 | where 62 | Self: 'a; 63 | 64 | type FullPath<'a> 65 | = JoinedPath 66 | where 67 | Self: 'a; 68 | 69 | fn parent_prefix_path(&self) -> Self::LeadingPath<'_> { 70 | self.parent_prefix 71 | } 72 | 73 | fn partial_path(&self) -> Self::PartialPath<'_> { 74 | self.partial_path 75 | } 76 | 77 | fn full_path(&self) -> Self::FullPath<'_> { 78 | self.parent_prefix_path().append(self.partial_path()) 79 | } 80 | 81 | fn value_digest(&self) -> Option> { 82 | self.value.clone() 83 | } 84 | 85 | fn children(&self) -> Children> { 86 | self.child_hashes.clone() 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /benchmark/src/tenkrandom.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | #![expect( 5 | clippy::arithmetic_side_effects, 6 | reason = "Found 7 occurrences after enabling the lint." 7 | )] 8 | 9 | use std::error::Error; 10 | use std::time::Instant; 11 | 12 | use firewood::db::{BatchOp, Db}; 13 | use firewood::logger::debug; 14 | use firewood::v2::api::{Db as _, Proposal as _}; 15 | 16 | use crate::{Args, TestRunner}; 17 | use sha2::{Digest, Sha256}; 18 | 19 | #[derive(Clone, Default)] 20 | pub struct TenKRandom; 21 | 22 | impl TestRunner for TenKRandom { 23 | fn run(&self, db: &Db, args: &Args) -> Result<(), Box> { 24 | let mut low = 0; 25 | let mut high = args.global_opts.number_of_batches * args.global_opts.batch_size; 26 | let twenty_five_pct = args.global_opts.batch_size / 4; 27 | 28 | let start = Instant::now(); 29 | 30 | while start.elapsed().as_secs() / 60 < args.global_opts.duration_minutes { 31 | let batch: Vec> = Self::generate_inserts(high, twenty_five_pct) 32 | .chain(generate_deletes(low, twenty_five_pct)) 33 | .chain(generate_updates(low + high / 2, twenty_five_pct * 2, low)) 34 | .collect(); 35 | let proposal = db.propose(batch).expect("proposal should succeed"); 36 | proposal.commit()?; 37 | low += twenty_five_pct; 38 | high += twenty_five_pct; 39 | } 40 | Ok(()) 41 | } 42 | } 43 | fn generate_updates( 44 | start: u64, 45 | count: u64, 46 | low: u64, 47 | ) -> impl Iterator, Box<[u8]>>> { 48 | let hash_of_low: Box<[u8]> = Sha256::digest(low.to_ne_bytes())[..].into(); 49 | (start..start + count) 50 | .map(|inner_key| { 51 | let digest = Sha256::digest(inner_key.to_ne_bytes())[..].into(); 52 | debug!( 53 | "updating {:?} with digest {} to {}", 54 | inner_key, 55 | hex::encode(&digest), 56 | hex::encode(&hash_of_low) 57 | ); 58 | (digest, hash_of_low.clone()) 59 | }) 60 | .map(|(key, value)| BatchOp::Put { key, value }) 61 | .collect::>() 62 | .into_iter() 63 | } 64 | fn generate_deletes(start: u64, count: u64) -> impl Iterator, Box<[u8]>>> { 65 | (start..start + count) 66 | .map(|key| { 67 | let digest = Sha256::digest(key.to_ne_bytes())[..].into(); 68 | debug!("deleting {:?} with digest {}", key, hex::encode(&digest)); 69 | digest 70 | }) 71 | .map(|key| BatchOp::Delete { key }) 72 | .collect::>() 73 | .into_iter() 74 | } 75 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "benchmark", 4 | "ffi", 5 | "firewood-macros", 6 | "firewood", 7 | "fwdctl", 8 | "storage", 9 | "triehash", 10 | ] 11 | resolver = "2" 12 | 13 | [workspace.package] 14 | # NOTE: when bumping to 0.1.0, this will be removed and each crate will have its 15 | # version set independently. 16 | version = "0.0.18" 17 | edition = "2024" 18 | license-file = "LICENSE.md" 19 | homepage = "https://avalabs.org" 20 | repository = "https://github.com/ava-labs/firewood" 21 | readme = "README.md" 22 | rust-version = "1.91.0" 23 | 24 | [profile.release] 25 | debug = true 26 | 27 | [profile.maxperf] 28 | panic = "abort" 29 | codegen-units = 1 30 | lto = "fat" 31 | debug = false 32 | inherits = "release" 33 | 34 | [workspace.lints.rust] 35 | unsafe_code = "deny" 36 | missing_debug_implementations = "warn" 37 | 38 | [workspace.lints.clippy] 39 | unwrap_used = "warn" 40 | indexing_slicing = "warn" 41 | explicit_deref_methods = "warn" 42 | missing_const_for_fn = "warn" 43 | arithmetic_side_effects = "warn" 44 | undocumented_unsafe_blocks = "deny" 45 | # lower the priority of pedantic to allow overriding the lints it includes 46 | pedantic = { level = "warn", priority = -1 } 47 | # These lints are from pedantic but allowed. They are a bit too pedantic and 48 | # encourage making backwards incompatible changes. 49 | needless_pass_by_value = "allow" 50 | unnecessary_wraps = "allow" 51 | unused_self = "allow" 52 | # Ignore interger casts. This is to avoid unnecessary `try_into` calls for usize 53 | # to u64 and vice versa and should be re-enabled if/when clippy has a separate 54 | # lint for usize vs non-usize truncation. 55 | cast_possible_truncation = "allow" 56 | 57 | [workspace.dependencies] 58 | # workspace local packages 59 | firewood = { path = "firewood", version = "0.0.18" } 60 | firewood-macros = { path = "firewood-macros", version = "0.0.18" } 61 | firewood-storage = { path = "storage", version = "0.0.18" } 62 | firewood-ffi = { path = "ffi", version = "0.0.18" } 63 | 64 | # no longer bumping with workspace 65 | firewood-triehash = { path = "triehash", version = "0.0.16" } 66 | 67 | # common dependencies 68 | aquamarine = "0.6.0" 69 | bytemuck = "1.24.0" 70 | bytemuck_derive = "1.10.2" 71 | clap = { version = "4.5.53", features = ["derive"] } 72 | coarsetime = "0.1.36" 73 | env_logger = "0.11.8" 74 | fastrace = "0.7.15" 75 | hex = "0.4.3" 76 | integer-encoding = "4.1.0" 77 | log = "0.4.29" 78 | metrics = "0.24.3" 79 | metrics-util = "0.20.1" 80 | nonzero_ext = "0.3.0" 81 | rand_distr = "0.5.1" 82 | sha2 = "0.10.9" 83 | smallvec = { version = "1.15.1", features = ["write", "union", "const_new"] } 84 | test-case = "3.3.1" 85 | thiserror = "2.0.17" 86 | parking_lot = "0.12.5" 87 | 88 | # common dev dependencies 89 | criterion = "0.8.1" 90 | ethereum-types = "0.16.0" 91 | hex-literal = "1.1.0" 92 | pprof = "0.15.0" 93 | rand = "0.9.2" 94 | tempfile = "3.23.0" 95 | -------------------------------------------------------------------------------- /ffi/metrics_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2025, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | package ffi 5 | 6 | import ( 7 | "fmt" 8 | "io" 9 | "net/http" 10 | "os" 11 | "path/filepath" 12 | "testing" 13 | "time" 14 | 15 | "github.com/stretchr/testify/require" 16 | 17 | dto "github.com/prometheus/client_model/go" 18 | ) 19 | 20 | // Test calling metrics exporter along with gathering metrics 21 | // This lives under one test as we can only instantiate the global recorder once 22 | func TestMetrics(t *testing.T) { 23 | r := require.New(t) 24 | ctx := t.Context() 25 | 26 | // test params 27 | var ( 28 | logPath = filepath.Join(t.TempDir(), "firewood.log") 29 | metricsPort = uint16(3000) 30 | ) 31 | 32 | db := newTestDatabase(t) 33 | r.NoError(StartMetricsWithExporter(metricsPort)) 34 | 35 | logConfig := &LogConfig{ 36 | Path: logPath, 37 | FilterLevel: "trace", 38 | } 39 | 40 | var logsDisabled bool 41 | if err := StartLogs(logConfig); err != nil { 42 | r.Contains(err.Error(), "Logging is not available") 43 | logsDisabled = true 44 | } 45 | 46 | // Populate DB 47 | keys, vals := kvForTest(10) 48 | _, err := db.Update(keys, vals) 49 | r.NoError(err) 50 | 51 | req, err := http.NewRequestWithContext( 52 | ctx, 53 | http.MethodGet, 54 | fmt.Sprintf("http://localhost:%d", metricsPort), 55 | nil, 56 | ) 57 | r.NoError(err) 58 | 59 | client := &http.Client{Timeout: 10 * time.Second} 60 | resp, err := client.Do(req) 61 | r.NoError(err) 62 | 63 | body, err := io.ReadAll(resp.Body) 64 | r.NoError(err) 65 | r.NoError(resp.Body.Close()) 66 | 67 | // Check that batch op was recorded 68 | r.Contains(string(body), "firewood_ffi_batch 1") 69 | 70 | g := Gatherer{} 71 | metricsFamily, err := g.Gather() 72 | r.NoError(err) 73 | 74 | expectedMetrics := map[string]dto.MetricType{ 75 | "firewood_ffi_batch": dto.MetricType_COUNTER, 76 | "firewood_proposal_commit": dto.MetricType_COUNTER, 77 | "firewood_proposal_commit_ms": dto.MetricType_COUNTER, 78 | "firewood_ffi_propose_ms": dto.MetricType_COUNTER, 79 | "firewood_ffi_commit_ms": dto.MetricType_COUNTER, 80 | "firewood_ffi_batch_ms": dto.MetricType_COUNTER, 81 | "firewood_flush_nodes": dto.MetricType_COUNTER, 82 | "firewood_insert": dto.MetricType_COUNTER, 83 | "firewood_space_from_end": dto.MetricType_COUNTER, 84 | } 85 | 86 | for k, v := range expectedMetrics { 87 | var d *dto.MetricFamily 88 | for _, m := range metricsFamily { 89 | if *m.Name == k { 90 | d = m 91 | } 92 | } 93 | r.NotNil(d) 94 | r.Equal(v, *d.Type) 95 | } 96 | 97 | if !logsDisabled { 98 | // logs should be non-empty if logging with trace filter level 99 | f, err := os.ReadFile(logPath) 100 | r.NoError(err) 101 | r.NotEmpty(f) 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /ffi/src/value.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2025, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | mod borrowed; 5 | mod display_hex; 6 | mod hash_key; 7 | mod kvp; 8 | mod owned; 9 | mod results; 10 | 11 | pub use self::borrowed::{BorrowedBytes, BorrowedKeyValuePairs, BorrowedSlice}; 12 | use self::display_hex::DisplayHex; 13 | pub use self::hash_key::HashKey; 14 | pub use self::kvp::{KeyValuePair, OwnedKeyValueBatch, OwnedKeyValuePair}; 15 | pub use self::owned::{OwnedBytes, OwnedSlice}; 16 | pub(crate) use self::results::{CResult, NullHandleResult}; 17 | pub use self::results::{ 18 | ChangeProofResult, HandleResult, HashResult, IteratorResult, KeyValueBatchResult, 19 | KeyValueResult, NextKeyRangeResult, ProposalResult, RangeProofResult, RevisionResult, 20 | ValueResult, VoidResult, 21 | }; 22 | 23 | /// Maybe is a C-compatible optional type using a tagged union pattern. 24 | /// 25 | /// FFI methods and types can use this to represent optional values where `Optional` 26 | /// does not work due to it not having a C-compatible layout. 27 | #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] 28 | #[repr(C)] 29 | pub enum Maybe { 30 | /// No value present. 31 | None, 32 | /// A value is present. 33 | Some(T), 34 | } 35 | 36 | impl Maybe { 37 | /// Returns true if the `Maybe` contains a value. 38 | pub const fn is_some(&self) -> bool { 39 | matches!(self, Maybe::Some(_)) 40 | } 41 | 42 | /// Returns true if the `Maybe` does not contain a value. 43 | pub const fn is_none(&self) -> bool { 44 | matches!(self, Maybe::None) 45 | } 46 | 47 | /// Converts from `&Maybe` to `Maybe<&T>`. 48 | pub const fn as_ref(&self) -> Maybe<&T> { 49 | match self { 50 | Maybe::None => Maybe::None, 51 | Maybe::Some(v) => Maybe::Some(v), 52 | } 53 | } 54 | 55 | /// Converts from `&mut Maybe` to `Maybe<&mut T>`. 56 | pub const fn as_mut(&mut self) -> Maybe<&mut T> { 57 | match self { 58 | Maybe::None => Maybe::None, 59 | Maybe::Some(v) => Maybe::Some(v), 60 | } 61 | } 62 | 63 | /// Maps a `Maybe` to `Maybe` by applying a function to a contained value. 64 | pub fn map U>(self, f: F) -> Maybe { 65 | match self { 66 | Maybe::None => Maybe::None, 67 | Maybe::Some(v) => Maybe::Some(f(v)), 68 | } 69 | } 70 | 71 | /// Converts from `Maybe` to `Option`. 72 | pub fn into_option(self) -> Option { 73 | match self { 74 | Maybe::None => None, 75 | Maybe::Some(v) => Some(v), 76 | } 77 | } 78 | } 79 | 80 | impl From> for Maybe { 81 | fn from(opt: Option) -> Self { 82 | match opt { 83 | None => Maybe::None, 84 | Some(v) => Maybe::Some(v), 85 | } 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /ffi/metrics.go: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2025, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | package ffi 5 | 6 | //go:generate go run generate_cgo.go 7 | 8 | // #include 9 | // #include "firewood.h" 10 | import "C" 11 | 12 | import ( 13 | "runtime" 14 | "strings" 15 | 16 | "github.com/prometheus/client_golang/prometheus" 17 | "github.com/prometheus/common/expfmt" 18 | 19 | dto "github.com/prometheus/client_model/go" 20 | ) 21 | 22 | var _ prometheus.Gatherer = (*Gatherer)(nil) 23 | 24 | type Gatherer struct{} 25 | 26 | func (Gatherer) Gather() ([]*dto.MetricFamily, error) { 27 | metrics, err := GatherMetrics() 28 | if err != nil { 29 | return nil, err 30 | } 31 | 32 | reader := strings.NewReader(metrics) 33 | 34 | var parser expfmt.TextParser 35 | parsedMetrics, err := parser.TextToMetricFamilies(reader) 36 | if err != nil { 37 | return nil, err 38 | } 39 | 40 | lst := make([]*dto.MetricFamily, 0, len(parsedMetrics)) 41 | for _, v := range parsedMetrics { 42 | lst = append(lst, v) 43 | } 44 | 45 | return lst, nil 46 | } 47 | 48 | // Starts global recorder for metrics. 49 | // This function only needs to be called once. 50 | // An error is returned if this method is called a second time, or if it is 51 | // called after StartMetricsWithExporter. 52 | // This is best used in conjunction with the [Gatherer] type to collect metrics. 53 | func StartMetrics() error { 54 | return getErrorFromVoidResult(C.fwd_start_metrics()) 55 | } 56 | 57 | // Start global recorder for metrics along with an HTTP exporter. 58 | // This function only needs to be called once. 59 | // An error is returned if this method is called a second time, if it is 60 | // called after StartMetrics, or if the exporter failed to start. 61 | func StartMetricsWithExporter(metricsPort uint16) error { 62 | return getErrorFromVoidResult(C.fwd_start_metrics_with_exporter(C.uint16_t(metricsPort))) 63 | } 64 | 65 | // Collect metrics from global recorder 66 | // Returns an error if the global recorder is not initialized. 67 | // This method must be called after StartMetrics or StartMetricsWithExporter 68 | func GatherMetrics() (string, error) { 69 | bytes, err := getValueFromValueResult(C.fwd_gather()) 70 | if err != nil { 71 | return "", err 72 | } 73 | 74 | return string(bytes), nil 75 | } 76 | 77 | // LogConfig configures logs for this process. 78 | type LogConfig struct { 79 | Path string 80 | FilterLevel string 81 | } 82 | 83 | // Starts global logs. 84 | // This function only needs to be called once. 85 | // An error is returned if this method is called a second time. 86 | func StartLogs(config *LogConfig) error { 87 | var pinner runtime.Pinner 88 | defer pinner.Unpin() 89 | 90 | args := C.struct_LogArgs{ 91 | path: newBorrowedBytes([]byte(config.Path), &pinner), 92 | filter_level: newBorrowedBytes([]byte(config.FilterLevel), &pinner), 93 | } 94 | 95 | return getErrorFromVoidResult(C.fwd_start_logs(args)) 96 | } 97 | -------------------------------------------------------------------------------- /fwdctl/src/main.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | #![doc = include_str!("../README.md")] 5 | 6 | use std::path::PathBuf; 7 | 8 | use clap::{Parser, Subcommand}; 9 | use firewood::v2::api; 10 | 11 | pub mod check; 12 | pub mod create; 13 | pub mod delete; 14 | pub mod dump; 15 | pub mod get; 16 | pub mod graph; 17 | pub mod insert; 18 | pub mod root; 19 | 20 | #[derive(Clone, Debug, Parser)] 21 | pub struct DatabasePath { 22 | /// The database path. Defaults to firewood 23 | #[arg( 24 | long = "db", 25 | short = 'd', 26 | required = false, 27 | value_name = "DB_DIR_NAME", 28 | default_value_os_t = default_db_path(), 29 | help = "Name of the database directory" 30 | )] 31 | pub dbpath: PathBuf, 32 | } 33 | 34 | #[derive(Parser)] 35 | #[command(author, version, about, long_about = None)] 36 | #[command(propagate_version = true)] 37 | #[command(version = concat!(env!("CARGO_PKG_VERSION"), " (", env!("GIT_COMMIT_SHA"), ", ", env!("ETHHASH_FEATURE"), ")"))] 38 | struct Cli { 39 | #[command(subcommand)] 40 | command: Commands, 41 | #[arg( 42 | long, 43 | short = 'l', 44 | required = false, 45 | help = "Log level. Respects RUST_LOG.", 46 | value_name = "LOG_LEVEL", 47 | num_args = 1, 48 | value_parser = ["debug", "info"], 49 | default_value_t = String::from("info"), 50 | )] 51 | log_level: String, 52 | } 53 | 54 | #[derive(Subcommand)] 55 | enum Commands { 56 | /// Create a new firewood database 57 | Create(create::Options), 58 | /// Insert a key/value pair into the database 59 | Insert(insert::Options), 60 | /// Get values associated with a key 61 | Get(get::Options), 62 | /// Delete values associated with a key 63 | Delete(delete::Options), 64 | /// Display key/value trie root hash 65 | Root(root::Options), 66 | /// Dump contents of key/value store 67 | Dump(dump::Options), 68 | /// Produce a dot file of the database 69 | Graph(graph::Options), 70 | /// Runs the checker on the database 71 | Check(check::Options), 72 | } 73 | 74 | fn main() -> Result<(), api::Error> { 75 | let cli = Cli::parse(); 76 | 77 | env_logger::init_from_env( 78 | env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, cli.log_level.clone()), 79 | ); 80 | 81 | match &cli.command { 82 | Commands::Create(opts) => create::run(opts), 83 | Commands::Insert(opts) => insert::run(opts), 84 | Commands::Get(opts) => get::run(opts), 85 | Commands::Delete(opts) => delete::run(opts), 86 | Commands::Root(opts) => root::run(opts), 87 | Commands::Dump(opts) => dump::run(opts), 88 | Commands::Graph(opts) => graph::run(opts), 89 | Commands::Check(opts) => check::run(opts), 90 | } 91 | } 92 | 93 | fn default_db_path() -> PathBuf { 94 | PathBuf::from("firewood") 95 | } 96 | -------------------------------------------------------------------------------- /.github/.golangci.yaml.patch: -------------------------------------------------------------------------------- 1 | --- .github/.golangci.yaml 2025-11-26 17:52:36.814736814 +0000 2 | +++ ffi/.golangci.yaml 2025-11-26 17:52:31.624079933 +0000 3 | @@ -40,7 +40,7 @@ 4 | - standard 5 | - default 6 | - blank 7 | - - prefix(github.com/ava-labs/avalanchego) 8 | + - prefix(github.com/ava-labs/firewood/ffi) 9 | - alias 10 | - dot 11 | custom-order: true 12 | @@ -93,8 +93,6 @@ 13 | rules: 14 | packages: 15 | deny: 16 | - - pkg: container/list 17 | - desc: github.com/ava-labs/avalanchego/utils/linked should be used instead. 18 | - pkg: github.com/golang/mock/gomock 19 | desc: go.uber.org/mock/gomock should be used instead. 20 | - pkg: github.com/stretchr/testify/assert 21 | @@ -109,29 +107,10 @@ 22 | forbidigo: 23 | # Forbid the following identifiers (list of regexp). 24 | forbid: 25 | - - pattern: require\.Error$(# ErrorIs should be used instead)? 26 | - - pattern: require\.ErrorContains$(# ErrorIs should be used instead)? 27 | - - pattern: require\.EqualValues$(# Equal should be used instead)? 28 | - - pattern: require\.NotEqualValues$(# NotEqual should be used instead)? 29 | - pattern: ^(t|b|tb|f)\.(Fatal|Fatalf|Error|Errorf)$(# the require library should be used instead)? 30 | - pattern: ^sort\.(Slice|Strings)$(# the slices package should be used instead)? 31 | # Exclude godoc examples from forbidigo checks. 32 | exclude-godoc-examples: false 33 | - gosec: 34 | - excludes: 35 | - - G107 # Url provided to HTTP request as taint input https://securego.io/docs/rules/g107 36 | - - G115 # TODO(marun) Enable this ruleset in a follow-up PR 37 | - importas: 38 | - # Do not allow unaliased imports of aliased packages. 39 | - no-unaliased: false 40 | - # Do not allow non-required aliases. 41 | - no-extra-aliases: false 42 | - # List of aliases 43 | - alias: 44 | - - pkg: github.com/ava-labs/avalanchego/utils/math 45 | - alias: safemath 46 | - - pkg: github.com/ava-labs/avalanchego/utils/json 47 | - alias: avajson 48 | revive: 49 | rules: 50 | # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr 51 | @@ -195,17 +174,6 @@ 52 | # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break 53 | - name: useless-break 54 | disabled: false 55 | - spancheck: 56 | - # https://github.com/jjti/go-spancheck#checks 57 | - checks: 58 | - - end 59 | - staticcheck: 60 | - # https://staticcheck.io/docs/options#checks 61 | - checks: 62 | - - all 63 | - - -SA6002A # Storing non-pointer values in sync.Pool allocates memory 64 | - - -SA1019 # Using a deprecated function, variable, constant or field 65 | - - -QF1008 # Unnecessary embedded expressions 66 | tagalign: 67 | align: true 68 | sort: true 69 | -------------------------------------------------------------------------------- /storage/src/hashers/merkledb.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | #![cfg_attr( 5 | not(feature = "ethhash"), 6 | expect( 7 | clippy::arithmetic_side_effects, 8 | reason = "Found 1 occurrences after enabling the lint." 9 | ) 10 | )] 11 | 12 | use crate::hashednode::{HasUpdate, Hashable, Preimage}; 13 | use crate::{TrieHash, TriePath, TriePathAsPackedBytes, ValueDigest}; 14 | /// Merkledb compatible hashing algorithm. 15 | use integer_encoding::VarInt; 16 | use sha2::{Digest, Sha256}; 17 | 18 | const MAX_VARINT_SIZE: usize = 10; 19 | const BITS_PER_NIBBLE: u64 = 4; 20 | 21 | impl HasUpdate for Sha256 { 22 | fn update>(&mut self, data: T) { 23 | sha2::Digest::update(self, data); 24 | } 25 | } 26 | 27 | impl Preimage for T { 28 | fn to_hash(&self) -> TrieHash { 29 | let mut hasher = Sha256::new(); 30 | 31 | self.write(&mut hasher); 32 | hasher.finalize().into() 33 | } 34 | 35 | fn write(&self, buf: &mut impl HasUpdate) { 36 | let children = self.children(); 37 | 38 | let num_children = children.count() as u64; 39 | 40 | add_varint_to_buf(buf, num_children); 41 | 42 | for (index, hash) in &children { 43 | if let Some(hash) = hash { 44 | add_varint_to_buf(buf, u64::from(index.as_u8())); 45 | buf.update(hash); 46 | } 47 | } 48 | 49 | // Add value digest (if any) to hash pre-image 50 | add_value_digest_to_buf(buf, self.value_digest()); 51 | 52 | // Add key length (in bits) to hash pre-image 53 | let key = self.full_path(); 54 | let key_bit_len = BITS_PER_NIBBLE * key.len() as u64; 55 | add_varint_to_buf(buf, key_bit_len); 56 | // Add key to hash pre-image 57 | key.as_packed_bytes().for_each(|byte| buf.update([byte])); 58 | } 59 | } 60 | 61 | fn add_value_digest_to_buf>( 62 | buf: &mut H, 63 | value_digest: Option>, 64 | ) { 65 | let Some(value_digest) = value_digest else { 66 | let value_exists: u8 = 0; 67 | buf.update([value_exists]); 68 | return; 69 | }; 70 | 71 | let value_exists: u8 = 1; 72 | buf.update([value_exists]); 73 | 74 | add_len_and_value_to_buf(buf, value_digest.make_hash()); 75 | } 76 | 77 | #[inline] 78 | /// Writes the length of `value` and `value` to `buf`. 79 | fn add_len_and_value_to_buf>(buf: &mut H, value: V) { 80 | let value_len = value.as_ref().len(); 81 | buf.update([value_len as u8]); 82 | buf.update(value); 83 | } 84 | 85 | #[inline] 86 | /// Encodes `value` as a varint and writes it to `buf`. 87 | fn add_varint_to_buf(buf: &mut H, value: u64) { 88 | let mut buf_arr = [0u8; MAX_VARINT_SIZE]; 89 | let len = value.encode_var(&mut buf_arr); 90 | buf.update( 91 | buf_arr 92 | .get(..len) 93 | .expect("length is always less than MAX_VARINT_SIZE"), 94 | ); 95 | } 96 | -------------------------------------------------------------------------------- /storage/src/iter.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | //! Internal utilities for working with iterators. 5 | 6 | /// Writes a limited number of items to a writer, separated by a specified separator. 7 | /// 8 | /// - If `limit` is `Some(0)`, it will only write the number of hidden items. 9 | /// - If `limit` is `Some(n)`, it will write at most `n` items, followed by a message 10 | /// indicating how many more items were not written. 11 | /// - If `limit` is `None`, it will write all items without any limit. 12 | /// Caution: if limit is None, this function will not work with iterators that do not terminate. 13 | /// 14 | /// # Arguments 15 | /// - `writer`: The writer to which the items will be written. 16 | /// - `iter`: An iterator of items that implement `std::fmt::Display`. 17 | /// - `sep`: A separator that will be used between items. 18 | /// - `limit`: An optional limit on the number of items to write. 19 | /// 20 | /// # Returns 21 | /// A `std::fmt::Result` indicating success or failure of the write operation. 22 | pub(crate) fn write_limited_with_sep( 23 | writer: &mut (impl std::fmt::Write + ?Sized), 24 | iter: impl IntoIterator, 25 | sep: impl std::fmt::Display, 26 | limit: Option, 27 | ) -> std::fmt::Result { 28 | match limit { 29 | Some(0) => { 30 | let hidden_count = iter.into_iter().count(); 31 | write!(writer, "({hidden_count} hidden)") 32 | } 33 | Some(limit) => { 34 | let mut iter = iter.into_iter(); 35 | let to_display_iter = iter.by_ref().take(limit); 36 | write_all_with_sep(writer, to_display_iter, &sep)?; 37 | 38 | let hidden_count = iter.count(); 39 | if hidden_count > 0 { 40 | write!(writer, "{sep}... ({hidden_count} more hidden)")?; 41 | } 42 | Ok(()) 43 | } 44 | None => write_all_with_sep(writer, iter, &sep), 45 | } 46 | } 47 | 48 | // Helper function that writes all items in the iterator. 49 | // Caution: this function will not work with iterators that do not terminate. 50 | fn write_all_with_sep( 51 | writer: &mut (impl std::fmt::Write + ?Sized), 52 | iter: impl IntoIterator, 53 | sep: &impl std::fmt::Display, 54 | ) -> std::fmt::Result { 55 | let mut iter = iter.into_iter(); 56 | if let Some(item) = iter.next() { 57 | write!(writer, "{item}")?; 58 | for item in iter { 59 | write!(writer, "{sep}{item}")?; 60 | } 61 | } 62 | Ok(()) 63 | } 64 | 65 | #[cfg(test)] 66 | mod tests { 67 | #![expect(clippy::unwrap_used)] 68 | 69 | use super::*; 70 | use test_case::test_case; 71 | 72 | #[test_case(Some(0usize), "(4 hidden)"; "with limit 0")] 73 | #[test_case(Some(2usize), "apple, banana, ... (2 more hidden)"; "with limit 2")] 74 | #[test_case(None, "apple, banana, cherry, date"; "without limit")] 75 | fn test_write_iter(limit: Option, expected: &str) { 76 | let mut output = String::new(); 77 | let items = ["apple", "banana", "cherry", "date"]; 78 | write_limited_with_sep(&mut output, &items, ", ", limit).unwrap(); 79 | assert_eq!(output, expected); 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /ffi/src/arc_cache.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2025, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | //! A simple single-item cache for database views. 5 | //! 6 | //! This module provides [`ArcCache`], a thread-safe cache that holds at most one 7 | //! key-value pair. It's specifically designed to cache database views in the FFI 8 | //! layer to improve performance by avoiding repeated view creation for the same 9 | //! root hash during database operations. 10 | 11 | use parking_lot::{Mutex, MutexGuard}; 12 | use std::sync::Arc; 13 | 14 | /// A thread-safe single-item cache that stores key-value pairs as `Arc`. 15 | /// 16 | /// This cache is optimized for scenarios where you frequently access the same 17 | /// item and want to avoid expensive recomputation. It holds at most one cached 18 | /// entry and replaces it when a different key is requested. 19 | /// 20 | /// The cache is thread-safe and uses a mutex to protect concurrent access. 21 | /// Values are stored as `Arc` to allow cheap cloning and sharing across 22 | /// threads. 23 | #[derive(Debug)] 24 | pub struct ArcCache { 25 | cache: Mutex)>>, 26 | } 27 | 28 | impl ArcCache { 29 | pub const fn new() -> Self { 30 | ArcCache { 31 | cache: Mutex::new(None), 32 | } 33 | } 34 | 35 | /// Gets the cached value for the given key, or creates and caches a new value. 36 | /// 37 | /// If the cache contains an entry with a key equal to the provided key, 38 | /// returns a clone of the cached `Arc`. Otherwise, calls the factory 39 | /// function to create a new value, caches it, and returns it. 40 | /// 41 | /// # Cache Behavior 42 | /// 43 | /// - Cache hit: Returns the cached value immediately 44 | /// - Cache miss: Clears any existing cache entry, calls factory, caches the result 45 | /// - Factory error: Cache is cleared and the error is propagated 46 | /// 47 | /// # Arguments 48 | /// 49 | /// * `key` - The key to look up or cache 50 | /// * `factory` - A function that creates the value if not cached. It receives 51 | /// a reference to the key as an argument. 52 | /// 53 | /// # Errors 54 | /// 55 | /// Returns any error produced by the factory function. 56 | pub fn get_or_try_insert_with( 57 | &self, 58 | key: K, 59 | factory: impl FnOnce(&K) -> Result, E>, 60 | ) -> Result, E> { 61 | let mut cache = self.lock(); 62 | if let Some((cached_key, value)) = cache.as_ref() 63 | && *cached_key == key 64 | { 65 | return Ok(Arc::clone(value)); 66 | } 67 | 68 | // clear the cache before running the factory in case it fails 69 | *cache = None; 70 | 71 | let value = factory(&key)?; 72 | *cache = Some((key, Arc::clone(&value))); 73 | 74 | Ok(value) 75 | } 76 | 77 | /// Clears the cache, removing any stored key-value pair. 78 | pub fn clear(&self) { 79 | self.lock().take(); 80 | } 81 | 82 | fn lock(&self) -> MutexGuard<'_, Option<(K, Arc)>> { 83 | self.cache.lock() 84 | } 85 | } 86 | 87 | impl Default for ArcCache { 88 | fn default() -> Self { 89 | Self::new() 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /storage/src/macros.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | #[macro_export] 5 | /// Macro to register and use a counter metric with description and labels. 6 | /// This macro is a wrapper around the `metrics` crate's `counter!` and `describe_counter!` 7 | /// macros. It ensures that the description is registered just once. 8 | /// 9 | /// Usage: 10 | /// `firewood_counter!("metric_name", "description")` 11 | /// `firewood_counter!("metric_name", "description", "label" => "value")` 12 | /// 13 | /// Call `.increment(val)` or `.absolute(val)` on the result as appropriate. 14 | macro_rules! firewood_counter { 15 | // With labels 16 | ($name:expr, $desc:expr, $($labels:tt)+) => { 17 | { 18 | static ONCE: std::sync::Once = std::sync::Once::new(); 19 | ONCE.call_once(|| { 20 | metrics::describe_counter!($name, $desc); 21 | }); 22 | metrics::counter!($name, $($labels)+) 23 | } 24 | }; 25 | // No labels 26 | ($name:expr, $desc:expr) => { 27 | { 28 | static ONCE: std::sync::Once = std::sync::Once::new(); 29 | ONCE.call_once(|| { 30 | metrics::describe_counter!($name, $desc); 31 | }); 32 | metrics::counter!($name) 33 | } 34 | }; 35 | } 36 | 37 | #[macro_export] 38 | /// Macro to register and use a gauge metric with description and labels. 39 | /// This macro is a wrapper around the `metrics` crate's `gauge!` and `describe_gauge!` 40 | /// macros. It ensures that the description is registered just once. 41 | /// 42 | /// Usage: 43 | /// `firewood_gauge!("metric_name", "description")` 44 | /// `firewood_gauge!("metric_name", "description", "label" => "value")` 45 | /// 46 | /// Call `.set(val)`, `.increment(val)`, or `.decrement(val)` on the result as appropriate. 47 | macro_rules! firewood_gauge { 48 | // With labels 49 | ($name:expr, $desc:expr, $($labels:tt)+) => { 50 | { 51 | static ONCE: std::sync::Once = std::sync::Once::new(); 52 | ONCE.call_once(|| { 53 | metrics::describe_gauge!($name, $desc); 54 | }); 55 | metrics::gauge!($name, $($labels)+) 56 | } 57 | }; 58 | // No labels 59 | ($name:expr, $desc:expr) => { 60 | { 61 | static ONCE: std::sync::Once = std::sync::Once::new(); 62 | ONCE.call_once(|| { 63 | metrics::describe_gauge!($name, $desc); 64 | }); 65 | metrics::gauge!($name) 66 | } 67 | }; 68 | } 69 | 70 | #[macro_export] 71 | #[cfg(test)] 72 | /// Macro to create an `AreaIndex` from a literal value at compile time. 73 | /// This macro performs bounds checking at compile time and panics if the value is out of bounds. 74 | /// 75 | /// Usage: 76 | /// `area_index!(0)` - creates an `AreaIndex` with value 0 77 | /// `area_index!(23)` - creates an `AreaIndex` with value 23 78 | /// 79 | /// The macro will panic at compile time if the value is negative or >= `NUM_AREA_SIZES`. 80 | macro_rules! area_index { 81 | ($v:expr) => { 82 | const { 83 | match $crate::nodestore::primitives::AreaIndex::new($v as u8) { 84 | Some(v) => v, 85 | None => panic!("Constant area index out of bounds"), 86 | } 87 | } 88 | }; 89 | } 90 | -------------------------------------------------------------------------------- /ffi/src/value/display_hex.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2025, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use std::fmt; 5 | 6 | /// Implementation of `Display` for a slice that displays the bytes in hexadecimal format. 7 | /// 8 | /// If the `precision` is set, it will display only that many bytes in hex, 9 | /// followed by an ellipsis and the number of remaining bytes. 10 | pub struct DisplayHex<'a>(pub(super) &'a [u8]); 11 | 12 | impl fmt::Display for DisplayHex<'_> { 13 | #[inline] 14 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 15 | #![expect(clippy::indexing_slicing, clippy::arithmetic_side_effects)] 16 | 17 | match f.precision() { 18 | Some(p) if p < self.0.len() => { 19 | display_hex_bytes(&self.0[..p], f)?; 20 | f.write_fmt(format_args!("... ({} remaining bytes)", self.0.len() - p))?; 21 | } 22 | _ => display_hex_bytes(self.0, f)?, 23 | } 24 | 25 | Ok(()) 26 | } 27 | } 28 | 29 | #[inline] 30 | fn display_hex_bytes(bytes: &[u8], f: &mut fmt::Formatter<'_>) -> fmt::Result { 31 | const WIDTH: usize = size_of::() * 2; 32 | 33 | // SAFETY: it is trivially safe to transmute integer types, as long as the 34 | // offset is aligned, which `align_to` guarantees. 35 | let (before, aligned, after) = unsafe { bytes.align_to::() }; 36 | 37 | for &byte in before { 38 | write!(f, "{byte:02x}")?; 39 | } 40 | 41 | for &word in aligned { 42 | let word = usize::from_be(word); 43 | write!(f, "{word:0WIDTH$x}")?; 44 | } 45 | 46 | for &byte in after { 47 | write!(f, "{byte:02x}")?; 48 | } 49 | 50 | Ok(()) 51 | } 52 | 53 | #[cfg(test)] 54 | mod tests { 55 | use super::*; 56 | 57 | #[cfg(feature = "ethhash")] 58 | use firewood::v2::api::HashKeyExt; 59 | use test_case::test_case; 60 | 61 | #[test_case(&[], "", None; "empty slice")] 62 | #[test_case(&[], "", Some(42); "empty slice with precision")] 63 | #[test_case(b"abc", "616263", None; "short slice")] 64 | #[test_case(b"abc", "61... (2 remaining bytes)", Some(1); "short slice with precision")] 65 | #[test_case(b"abc", "616263", Some(16); "short slice with long precision")] 66 | #[test_case(firewood_storage::TrieHash::empty().as_ref(), "0000000000000000000000000000000000000000000000000000000000000000", None; "empty trie hash")] 67 | #[test_case(firewood_storage::TrieHash::empty().as_ref(), "00000000000000000000000000000000... (16 remaining bytes)", Some(16); "empty trie hash with precision")] 68 | #[cfg_attr(feature = "ethhash", test_case(firewood_storage::TrieHash::default_root_hash().as_deref().expect("feature = \"ethhash\""), "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", None; "empty rlp hash"))] 69 | #[cfg_attr(feature = "ethhash", test_case(firewood_storage::TrieHash::default_root_hash().as_deref().expect("feature = \"ethhash\""), "56e81f171bcc55a6ff8345e692c0f86e... (16 remaining bytes)", Some(16); "empty rlp hash with precision"))] 70 | fn test_display_hex(input: &[u8], expected: &str, precision: Option) { 71 | let input = DisplayHex(input); 72 | if let Some(p) = precision { 73 | assert_eq!(format!("{input:.p$}"), expected); 74 | } else { 75 | assert_eq!(format!("{input}"), expected); 76 | } 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /ffi/go.sum: -------------------------------------------------------------------------------- 1 | github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= 2 | github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= 3 | github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= 4 | github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 5 | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= 6 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 7 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 8 | github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= 9 | github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= 10 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 11 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 12 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 13 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 14 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= 15 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= 16 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 17 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 18 | github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= 19 | github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= 20 | github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= 21 | github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= 22 | github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= 23 | github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= 24 | github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= 25 | github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= 26 | github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= 27 | github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= 28 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 29 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 30 | golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= 31 | golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 32 | google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= 33 | google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= 34 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 35 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 36 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 37 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 38 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 39 | -------------------------------------------------------------------------------- /ffi/src/value/kvp.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2025, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use std::fmt; 5 | 6 | use crate::value::BorrowedBytes; 7 | use crate::{OwnedBytes, OwnedSlice}; 8 | use firewood::v2::api; 9 | 10 | /// A type alias for a rust-owned byte slice. 11 | pub type OwnedKeyValueBatch = OwnedSlice; 12 | 13 | /// A `KeyValue` represents a key-value pair, passed to the FFI. 14 | #[repr(C)] 15 | #[derive(Debug, Clone, Copy)] 16 | pub struct KeyValuePair<'a> { 17 | pub key: BorrowedBytes<'a>, 18 | pub value: BorrowedBytes<'a>, 19 | } 20 | 21 | impl<'a> KeyValuePair<'a> { 22 | pub fn new((key, value): &'a (impl AsRef<[u8]>, impl AsRef<[u8]>)) -> Self { 23 | Self { 24 | key: BorrowedBytes::from_slice(key.as_ref()), 25 | value: BorrowedBytes::from_slice(value.as_ref()), 26 | } 27 | } 28 | } 29 | 30 | impl fmt::Display for KeyValuePair<'_> { 31 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 32 | let precision = f.precision().unwrap_or(64); 33 | write!( 34 | f, 35 | "Key: {:.precision$}, Value: {:.precision$}", 36 | self.key, self.value 37 | ) 38 | } 39 | } 40 | 41 | impl<'a> api::TryIntoBatch for KeyValuePair<'a> { 42 | type Key = BorrowedBytes<'a>; 43 | type Value = BorrowedBytes<'a>; 44 | type Error = std::convert::Infallible; 45 | 46 | #[inline] 47 | fn try_into_batch(self) -> Result, Self::Error> { 48 | // Check if the value pointer is null (nil slice in Go) 49 | // vs non-null but empty (empty slice []byte{} in Go) 50 | Ok(if self.value.is_null() { 51 | api::BatchOp::DeleteRange { prefix: self.key } 52 | } else { 53 | api::BatchOp::Put { 54 | key: self.key, 55 | value: self.value, 56 | } 57 | }) 58 | } 59 | } 60 | 61 | impl api::KeyValuePair for KeyValuePair<'_> { 62 | #[inline] 63 | fn try_into_tuple(self) -> Result<(Self::Key, Self::Value), Self::Error> { 64 | Ok((self.key, self.value)) 65 | } 66 | } 67 | 68 | impl<'a> api::TryIntoBatch for &KeyValuePair<'a> { 69 | type Key = BorrowedBytes<'a>; 70 | type Value = BorrowedBytes<'a>; 71 | type Error = std::convert::Infallible; 72 | 73 | #[inline] 74 | fn try_into_batch(self) -> Result, Self::Error> { 75 | (*self).try_into_batch() 76 | } 77 | } 78 | 79 | impl api::KeyValuePair for &KeyValuePair<'_> { 80 | #[inline] 81 | fn try_into_tuple(self) -> Result<(Self::Key, Self::Value), Self::Error> { 82 | (*self).try_into_tuple() 83 | } 84 | } 85 | 86 | /// Owned version of `KeyValuePair`, returned to ffi callers. 87 | /// 88 | /// C callers must free this using [`crate::fwd_free_owned_kv_pair`], 89 | /// not the C standard library's `free` function. 90 | #[repr(C)] 91 | #[derive(Debug, Clone)] 92 | pub struct OwnedKeyValuePair { 93 | pub key: OwnedBytes, 94 | pub value: OwnedBytes, 95 | } 96 | 97 | impl From<(Box<[u8]>, Box<[u8]>)> for OwnedKeyValuePair { 98 | fn from(value: (Box<[u8]>, Box<[u8]>)) -> Self { 99 | OwnedKeyValuePair { 100 | key: value.0.into(), 101 | value: value.1.into(), 102 | } 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /storage/src/linear/memory.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | #![expect( 5 | clippy::arithmetic_side_effects, 6 | reason = "Found 3 occurrences after enabling the lint." 7 | )] 8 | #![expect( 9 | clippy::indexing_slicing, 10 | reason = "Found 1 occurrences after enabling the lint." 11 | )] 12 | 13 | use super::{FileIoError, OffsetReader, ReadableStorage, WritableStorage}; 14 | use crate::firewood_counter; 15 | use parking_lot::Mutex; 16 | use std::io::Cursor; 17 | 18 | #[derive(Debug, Default)] 19 | /// An in-memory impelementation of [`WritableStorage`] and [`ReadableStorage`] 20 | pub struct MemStore { 21 | bytes: Mutex>, 22 | } 23 | 24 | impl MemStore { 25 | /// Create a new, empty [`MemStore`] 26 | #[must_use] 27 | pub const fn new(bytes: Vec) -> Self { 28 | Self { 29 | bytes: Mutex::new(bytes), 30 | } 31 | } 32 | } 33 | 34 | impl WritableStorage for MemStore { 35 | fn write(&self, offset: u64, object: &[u8]) -> Result { 36 | let offset = offset as usize; 37 | let mut guard = self.bytes.lock(); 38 | if offset + object.len() > guard.len() { 39 | guard.resize(offset + object.len(), 0); 40 | } 41 | guard[offset..offset + object.len()].copy_from_slice(object); 42 | Ok(object.len()) 43 | } 44 | } 45 | 46 | impl ReadableStorage for MemStore { 47 | fn stream_from(&self, addr: u64) -> Result { 48 | firewood_counter!("firewood.read_node", "Number of node reads", "from" => "memory") 49 | .increment(1); 50 | let bytes = self 51 | .bytes 52 | .lock() 53 | .get(addr as usize..) 54 | .unwrap_or_default() 55 | .to_owned(); 56 | 57 | Ok(Cursor::new(bytes)) 58 | } 59 | 60 | fn size(&self) -> Result { 61 | Ok(self.bytes.lock().len() as u64) 62 | } 63 | } 64 | 65 | #[expect(clippy::unwrap_used)] 66 | #[cfg(test)] 67 | mod test { 68 | use super::*; 69 | use std::io::Read; 70 | use test_case::test_case; 71 | 72 | #[test_case(&[(0,&[1, 2, 3])],(0,&[1, 2, 3]); "write to empty store")] 73 | #[test_case(&[(0,&[1, 2, 3])],(1,&[2, 3]); "read from middle of store")] 74 | #[test_case(&[(0,&[1, 2, 3])],(2,&[3]); "read from end of store")] 75 | #[test_case(&[(0,&[1, 2, 3])],(3,&[]); "read past end of store")] 76 | #[test_case(&[(0,&[1, 2, 3]),(3,&[4,5,6])],(0,&[1, 2, 3,4,5,6]); "write to end of store")] 77 | #[test_case(&[(0,&[1, 2, 3]),(0,&[4])],(0,&[4,2,3]); "overwrite start of store")] 78 | #[test_case(&[(0,&[1, 2, 3]),(1,&[4])],(0,&[1,4,3]); "overwrite middle of store")] 79 | #[test_case(&[(0,&[1, 2, 3]),(2,&[4])],(0,&[1,2,4]); "overwrite end of store")] 80 | #[test_case(&[(0,&[1, 2, 3]),(2,&[4,5])],(0,&[1,2,4,5]); "overwrite/extend end of store")] 81 | fn test_in_mem_write_linear_store(writes: &[(u64, &[u8])], expected: (u64, &[u8])) { 82 | let store = MemStore { 83 | bytes: Mutex::new(vec![]), 84 | }; 85 | assert_eq!(store.size().unwrap(), 0); 86 | 87 | for write in writes { 88 | store.write(write.0, write.1).unwrap(); 89 | } 90 | 91 | let mut reader = store.stream_from(expected.0).unwrap(); 92 | let mut read_bytes = vec![]; 93 | reader.read_to_end(&mut read_bytes).unwrap(); 94 | assert_eq!(read_bytes, expected.1); 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /.github/workflows/metrics-check.yaml: -------------------------------------------------------------------------------- 1 | name: Metrics Change Check 2 | 3 | on: 4 | pull_request: 5 | types: [opened, synchronize, reopened] 6 | 7 | concurrency: 8 | group: metrics-check-${{ github.event.pull_request.number }} 9 | cancel-in-progress: true 10 | 11 | jobs: 12 | check-metrics-changes: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Checkout code 16 | uses: actions/checkout@v4 17 | with: 18 | fetch-depth: 0 # Fetch full history to compare changes 19 | 20 | - name: Check for metric-related changes 21 | id: check-metrics 22 | run: | 23 | # Get the base commit for comparison 24 | if [ "${{ github.event_name }}" = "pull_request" ]; then 25 | BASE_COMMIT="${{ github.event.pull_request.base.sha }}" 26 | else 27 | BASE_COMMIT="${{ github.event.before }}" 28 | fi 29 | 30 | # Check if Grafana dashboard was modified - exit early if so 31 | if git diff --name-only $BASE_COMMIT..HEAD | grep -q "benchmark/Grafana-dashboard.json"; then 32 | echo "benchmark/Grafana-dashboard.json was modified - skipping metrics check" 33 | exit 0 34 | fi 35 | 36 | # Regex pattern to match metric-related code changes 37 | METRIC_REGEX_PATTERN="(counter!|gauge!|histogram!|#\\[metrics\\])" # ci trigger 38 | 39 | # Check for metric-related changes 40 | METRIC_CHANGES=$(git diff $BASE_COMMIT..HEAD --unified=0 | grep -E "$METRIC_REGEX_PATTERN" || true) 41 | 42 | if [ -n "$METRIC_CHANGES" ]; then 43 | echo "⚠️ WARNING: Found metric-related changes, but no dashboard modification:" 44 | echo "$METRIC_CHANGES" 45 | else 46 | echo "✅ No metric-related changes found" 47 | fi 48 | 49 | # Set output variables for the comment step 50 | echo "metric_changes_found=$([ -n "$METRIC_CHANGES" ] && echo "true" || echo "false")" >> $GITHUB_OUTPUT 51 | echo "metric_changes<> $GITHUB_OUTPUT 52 | echo "$METRIC_CHANGES" >> $GITHUB_OUTPUT 53 | echo "EOF" >> $GITHUB_OUTPUT 54 | 55 | - name: Look for previous comment 56 | if: github.repository == github.event.pull_request.head.repo.full_name && steps.check-metrics.outputs.metric_changes_found == 'true' 57 | id: find-comment 58 | uses: peter-evans/find-comment@v3 59 | with: 60 | issue-number: ${{ github.event.pull_request.number }} 61 | comment-author: github-actions[bot] 62 | body-includes: "## Metrics Change Detection ⚠️" 63 | 64 | - name: Comment on PR (if applicable) 65 | if: github.repository == github.event.pull_request.head.repo.full_name && steps.check-metrics.outputs.metric_changes_found == 'true' 66 | uses: peter-evans/create-or-update-comment@v4 67 | with: 68 | issue-number: ${{ github.event.pull_request.number }} 69 | body: | 70 | ## Metrics Change Detection ⚠️ 71 | 72 | This PR contains changes related to metrics: 73 | 74 | ``` 75 | ${{ steps.check-metrics.outputs.metric_changes }} 76 | ``` 77 | 78 | However, the dashboard was not modified. 79 | 80 | You may need to update `benchmark/Grafana-dashboard.json` accordingly. 81 | 82 | --- 83 | 84 | *This check is automated to help maintain the dashboard.* 85 | edit-mode: replace 86 | comment-id: ${{ steps.find-comment.outputs.comment-id }} 87 | -------------------------------------------------------------------------------- /ffi/tests/eth/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/ava-labs/firewood/ffi/tests/eth 2 | 3 | go 1.24.0 4 | 5 | toolchain go1.24.9 6 | 7 | require ( 8 | github.com/ava-labs/firewood-go-ethhash/ffi v0.0.0 // this is replaced to use the parent folder 9 | github.com/ava-labs/libevm v1.13.14-0.2.0.release 10 | github.com/holiman/uint256 v1.3.2 11 | github.com/stretchr/testify v1.10.0 12 | ) 13 | 14 | require ( 15 | github.com/DataDog/zstd v1.5.2 // indirect 16 | github.com/VictoriaMetrics/fastcache v1.12.1 // indirect 17 | github.com/beorn7/perks v1.0.1 // indirect 18 | github.com/bits-and-blooms/bitset v1.10.0 // indirect 19 | github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect 20 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 21 | github.com/cockroachdb/errors v1.9.1 // indirect 22 | github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect 23 | github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 // indirect 24 | github.com/cockroachdb/redact v1.1.3 // indirect 25 | github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect 26 | github.com/consensys/bavard v0.1.13 // indirect 27 | github.com/consensys/gnark-crypto v0.12.1 // indirect 28 | github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 // indirect 29 | github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect 30 | github.com/davecgh/go-spew v1.1.1 // indirect 31 | github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect 32 | github.com/ethereum/c-kzg-4844 v0.4.0 // indirect 33 | github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 // indirect 34 | github.com/getsentry/sentry-go v0.18.0 // indirect 35 | github.com/go-ole/go-ole v1.3.0 // indirect 36 | github.com/gofrs/flock v0.8.1 // indirect 37 | github.com/gogo/protobuf v1.3.2 // indirect 38 | github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect 39 | github.com/gorilla/websocket v1.5.0 // indirect 40 | github.com/holiman/bloomfilter/v2 v2.0.3 // indirect 41 | github.com/klauspost/compress v1.18.0 // indirect 42 | github.com/kr/pretty v0.3.1 // indirect 43 | github.com/kr/text v0.2.0 // indirect 44 | github.com/mattn/go-runewidth v0.0.13 // indirect 45 | github.com/mmcloughlin/addchain v0.4.0 // indirect 46 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 47 | github.com/olekukonko/tablewriter v0.0.5 // indirect 48 | github.com/pkg/errors v0.9.1 // indirect 49 | github.com/pmezard/go-difflib v1.0.0 // indirect 50 | github.com/prometheus/client_golang v1.22.0 // indirect 51 | github.com/prometheus/client_model v0.6.1 // indirect 52 | github.com/prometheus/common v0.62.0 // indirect 53 | github.com/prometheus/procfs v0.15.1 // indirect 54 | github.com/rivo/uniseg v0.2.0 // indirect 55 | github.com/rogpeppe/go-internal v1.12.0 // indirect 56 | github.com/shirou/gopsutil v3.21.11+incompatible // indirect 57 | github.com/supranational/blst v0.3.14 // indirect 58 | github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect 59 | github.com/tklauser/go-sysconf v0.3.12 // indirect 60 | github.com/tklauser/numcpus v0.6.1 // indirect 61 | github.com/yusufpapurcu/wmi v1.2.4 // indirect 62 | golang.org/x/crypto v0.45.0 // indirect 63 | golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect 64 | golang.org/x/sync v0.18.0 // indirect 65 | golang.org/x/sys v0.38.0 // indirect 66 | golang.org/x/text v0.31.0 // indirect 67 | google.golang.org/protobuf v1.36.5 // indirect 68 | gopkg.in/yaml.v3 v3.0.1 // indirect 69 | rsc.io/tmplfunc v0.0.3 // indirect 70 | ) 71 | 72 | replace github.com/ava-labs/firewood-go-ethhash/ffi => ../../ 73 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | # Ecosystem License 2 | 3 | Version: 1.1 4 | 5 | Subject to the terms herein, Ava Labs, Inc. (**“Ava Labs”**) hereby grants you 6 | a limited, royalty-free, worldwide, non-sublicensable, non-transferable, 7 | non-exclusive license to use, copy, modify, create derivative works based on, 8 | and redistribute the Software, in source code, binary, or any other form, 9 | including any modifications or derivative works of the Software (collectively, 10 | **“Licensed Software”**), in each case subject to this Ecosystem License 11 | (**“License”**). 12 | 13 | This License applies to all copies, modifications, derivative works, and any 14 | other form or usage of the Licensed Software. You will include and display 15 | this License, without modification, with all uses of the Licensed Software, 16 | regardless of form. 17 | 18 | You will use the Licensed Software solely (i) in connection with the Avalanche 19 | Public Blockchain platform, having a NetworkID of 1 (Mainnet) or 5 (Fuji), and 20 | associated blockchains, comprised exclusively of the Avalanche X-Chain, 21 | C-Chain, P-Chain and any subnets linked to the P-Chain (“Avalanche Authorized 22 | Platform”) or (ii) for non-production, testing or research purposes within the 23 | Avalanche ecosystem, in each case, without any commercial application 24 | (“Non-Commercial Use”); provided that this License does not permit use of the 25 | Licensed Software in connection with (a) any forks of the Avalanche Authorized 26 | Platform or (b) in any manner not operationally connected to the Avalanche 27 | Authorized Platform other than, for the avoidance of doubt, the limited 28 | exception for Non-Commercial Use. Ava Labs may publicly announce changes or 29 | additions to the Avalanche Authorized Platform, which may expand or modify 30 | usage of the Licensed Software. Upon such announcement, the Avalanche 31 | Authorized Platform will be deemed to be the then-current iteration of such 32 | platform. 33 | 34 | You hereby acknowledge and agree to the terms set forth at 35 | . 36 | 37 | If you use the Licensed Software in violation of this License, this License 38 | will automatically terminate and Ava Labs reserves all rights to seek any 39 | remedy for such violation. 40 | 41 | Except for uses explicitly permitted in this License, Ava Labs retains all 42 | rights in the Licensed Software, including without limitation the ability to 43 | modify it. 44 | 45 | Except as required or explicitly permitted by this License, you will not use 46 | any Ava Labs names, logos, or trademarks without Ava Labs’ prior written 47 | consent. 48 | 49 | You may use this License for software other than the “Licensed Software” 50 | specified above, as long as the only change to this License is the definition 51 | of the term “Licensed Software.” 52 | 53 | The Licensed Software may reference third party components. You acknowledge 54 | and agree that these third party components may be governed by a separate 55 | license or terms and that you will comply with them. 56 | 57 | **TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE LICENSED SOFTWARE IS PROVIDED 58 | ON AN “AS IS” BASIS, AND AVA LABS EXPRESSLY DISCLAIMS AND EXCLUDES ALL 59 | REPRESENTATIONS, WARRANTIES AND OTHER TERMS AND CONDITIONS, WHETHER EXPRESS OR 60 | IMPLIED, INCLUDING WITHOUT LIMITATION BY OPERATION OF LAW OR BY CUSTOM, 61 | STATUTE OR OTHERWISE, AND INCLUDING, BUT NOT LIMITED TO, ANY IMPLIED WARRANTY, 62 | TERM, OR CONDITION OF NON-INFRINGEMENT, MERCHANTABILITY, TITLE, OR FITNESS FOR 63 | PARTICULAR PURPOSE. YOU USE THE LICENSED SOFTWARE AT YOUR OWN RISK. AVA LABS 64 | EXPRESSLY DISCLAIMS ALL LIABILITY (INCLUDING FOR ALL DIRECT, CONSEQUENTIAL OR 65 | OTHER DAMAGES OR LOSSES) RELATED TO ANY USE OF THE LICENSED SOFTWARE.** 66 | -------------------------------------------------------------------------------- /benchmark/setup-scripts/build-environment.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script sets up the build environment, including installing the firewood build dependencies. 3 | set -o errexit 4 | 5 | if [ "$EUID" -ne 0 ]; then 6 | echo "This script must be run as root" >&2 7 | exit 1 8 | fi 9 | 10 | # Default bytes-per-inode for ext4 filesystem (2MB) 11 | BYTES_PER_INODE=2097152 12 | 13 | # Parse command line arguments 14 | show_usage() { 15 | echo "Usage: $0 [OPTIONS]" 16 | echo "" 17 | echo "Options:" 18 | echo " --bytes-per-inode BYTES Set bytes-per-inode for ext4 filesystem (default: 2097152)" 19 | echo " --help Show this help message" 20 | } 21 | 22 | while [[ $# -gt 0 ]]; do 23 | case $1 in 24 | --bytes-per-inode) 25 | BYTES_PER_INODE="$2" 26 | shift 2 27 | ;; 28 | --help) 29 | show_usage 30 | exit 0 31 | ;; 32 | *) 33 | echo "Error: Unknown option $1" >&2 34 | show_usage 35 | exit 1 36 | ;; 37 | esac 38 | done 39 | 40 | apt upgrade -y 41 | 42 | # install the build dependency packages 43 | pkgs=(git protobuf-compiler build-essential apt-transport-https net-tools zfsutils-linux mdadm) 44 | install_pkgs=() 45 | for pkg in "${pkgs[@]}"; do 46 | if ! dpkg -s "$pkg" > /dev/null 2>&1; then 47 | install_pkgs+=("$pkg") 48 | fi 49 | done 50 | if [ "${#install_pkgs[@]}" -gt 0 ]; then 51 | apt-get install -y "${install_pkgs[@]}" 52 | fi 53 | 54 | # If there are NVMe devices, set up RAID if multiple, or use single device 55 | mapfile -t NVME_DEVS < <(realpath /dev/disk/by-id/nvme-Amazon_EC2_NVMe_Instance_Storage_* 2>/dev/null | sort | uniq) 56 | if [ "${#NVME_DEVS[@]}" -gt 0 ]; then 57 | DEVICE_TO_USE="" 58 | 59 | if [ "${#NVME_DEVS[@]}" -eq 1 ]; then 60 | # Single device, use it directly 61 | DEVICE_TO_USE="${NVME_DEVS[0]}" 62 | echo "Using single NVMe device: $DEVICE_TO_USE" 63 | elif [ "${#NVME_DEVS[@]}" -eq 2 ]; then 64 | # Two devices, create RAID1 65 | echo "Creating RAID1 array with 2 devices: ${NVME_DEVS[*]}" 66 | mdadm --create /dev/md0 --level=1 --raid-devices=2 "${NVME_DEVS[@]}" 67 | DEVICE_TO_USE="/dev/md0" 68 | elif [ "${#NVME_DEVS[@]}" -eq 3 ]; then 69 | # Three devices, create RAID5 70 | echo "Creating RAID5 array with 3 devices: ${NVME_DEVS[*]}" 71 | mdadm --create /dev/md0 --level=5 --raid-devices=3 "${NVME_DEVS[@]}" 72 | DEVICE_TO_USE="/dev/md0" 73 | elif [ "${#NVME_DEVS[@]}" -eq 4 ]; then 74 | # Four devices, create RAID10 75 | echo "Creating RAID10 array with 4 devices: ${NVME_DEVS[*]}" 76 | mdadm --create /dev/md0 --level=10 --raid-devices=4 "${NVME_DEVS[@]}" 77 | DEVICE_TO_USE="/dev/md0" 78 | else 79 | echo "Unsupported number of NVMe devices: ${#NVME_DEVS[@]}. Using first device only." 80 | DEVICE_TO_USE="${NVME_DEVS[0]}" 81 | fi 82 | 83 | # Wait for RAID array to be ready (if created) 84 | if [[ "$DEVICE_TO_USE" == "/dev/md0" ]]; then 85 | echo "Waiting for RAID array to be ready..." 86 | while [ ! -e "$DEVICE_TO_USE" ]; do 87 | sleep 1 88 | done 89 | # Save RAID configuration 90 | mdadm --detail --scan >> /etc/mdadm/mdadm.conf 91 | update-initramfs -u 92 | fi 93 | 94 | # Format and mount the device 95 | mkfs.ext4 -E nodiscard -i "$BYTES_PER_INODE" "$DEVICE_TO_USE" 96 | NVME_MOUNT=/mnt/nvme 97 | mkdir -p "$NVME_MOUNT" 98 | mount -o noatime "$DEVICE_TO_USE" "$NVME_MOUNT" 99 | echo "$DEVICE_TO_USE $NVME_MOUNT ext4 noatime 0 0" >> /etc/fstab 100 | mkdir -p "$NVME_MOUNT/ubuntu/firewood" 101 | chown ubuntu:ubuntu "$NVME_MOUNT/ubuntu" "$NVME_MOUNT/ubuntu/firewood" 102 | ln -s "$NVME_MOUNT/ubuntu/firewood" /home/ubuntu/firewood 103 | fi 104 | -------------------------------------------------------------------------------- /storage/src/path/joined.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2025, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use std::iter::Chain; 5 | 6 | use super::{SplitPath, TriePath}; 7 | 8 | /// Joins two path segments into a single path, retaining the original segments 9 | /// without needing to allocate a new contiguous array. 10 | #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] 11 | pub struct JoinedPath { 12 | /// The prefix segment of the path. 13 | pub prefix: P, 14 | 15 | /// The suffix segment of the path. 16 | pub suffix: S, 17 | } 18 | 19 | impl JoinedPath { 20 | /// Creates a new joined path from the given prefix and suffix. 21 | /// 22 | /// This does not allocate and takes ownership of the input segments. 23 | pub const fn new(prefix: P, suffix: S) -> Self { 24 | Self { prefix, suffix } 25 | } 26 | } 27 | 28 | impl TriePath for JoinedPath { 29 | type Components<'a> 30 | = Chain, S::Components<'a>> 31 | where 32 | Self: 'a; 33 | 34 | fn len(&self) -> usize { 35 | self.prefix 36 | .len() 37 | .checked_add(self.suffix.len()) 38 | .expect("joined path length overflowed usize") 39 | } 40 | 41 | fn is_empty(&self) -> bool { 42 | self.prefix.is_empty() && self.suffix.is_empty() 43 | } 44 | 45 | fn components(&self) -> Self::Components<'_> { 46 | self.prefix.components().chain(self.suffix.components()) 47 | } 48 | 49 | fn as_component_slice(&self) -> super::PartialPath<'_> { 50 | if self.prefix.is_empty() { 51 | self.suffix.as_component_slice() 52 | } else if self.suffix.is_empty() { 53 | self.prefix.as_component_slice() 54 | } else { 55 | let mut buf = super::PathBuf::with_capacity(self.len()); 56 | buf.extend(self.prefix.components()); 57 | buf.extend(self.suffix.components()); 58 | super::PartialPath::Owned(buf) 59 | } 60 | } 61 | } 62 | 63 | impl SplitPath for JoinedPath { 64 | fn split_at(self, mid: usize) -> (Self, Self) { 65 | if let Some(mid) = mid.checked_sub(self.prefix.len()) { 66 | let (a_suffix, b_suffix) = self.suffix.split_at(mid); 67 | let prefix: Self = Self { 68 | prefix: self.prefix, 69 | suffix: a_suffix, 70 | }; 71 | let suffix = Self { 72 | prefix: P::default(), 73 | suffix: b_suffix, 74 | }; 75 | (prefix, suffix) 76 | } else { 77 | let (a_prefix, b_prefix) = self.prefix.split_at(mid); 78 | let prefix = Self { 79 | prefix: a_prefix, 80 | suffix: S::default(), 81 | }; 82 | let suffix: Self = Self { 83 | prefix: b_prefix, 84 | suffix: self.suffix, 85 | }; 86 | (prefix, suffix) 87 | } 88 | } 89 | 90 | fn split_first(self) -> Option<(super::PathComponent, Self)> { 91 | if let Some((first, prefix)) = self.prefix.split_first() { 92 | Some(( 93 | first, 94 | Self { 95 | prefix, 96 | suffix: self.suffix, 97 | }, 98 | )) 99 | } else if let Some((first, suffix)) = self.suffix.split_first() { 100 | Some(( 101 | first, 102 | Self { 103 | prefix: P::default(), 104 | suffix, 105 | }, 106 | )) 107 | } else { 108 | None 109 | } 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /ffi/cbindgen.toml: -------------------------------------------------------------------------------- 1 | # This is a template cbindgen.toml file with all of the default values. 2 | # Some values are commented out because their absence is the real default. 3 | # 4 | # See https://github.com/mozilla/cbindgen/blob/master/docs.md#cbindgentoml 5 | # for detailed documentation of every option here. 6 | 7 | 8 | language = "C" 9 | 10 | 11 | ############## Options for Wrapping the Contents of the Header ################# 12 | 13 | # header = "/* Text to put at the beginning of the generated file. Probably a license. */" 14 | header = """// Copyright (C) 2025, Ava Labs, Inc. All rights reserved. 15 | // See the file LICENSE.md for licensing terms. 16 | 17 | // This file was @generated by cbindgen. Do not edit this file manually. 18 | // Run `cargo build --target firewood-ffi` to regenerate this file.""" 19 | 20 | # trailer = "/* Text to put at the end of the generated file */" 21 | # include_guard = "my_bindings_h" 22 | # pragma_once = true 23 | # autogen_warning = "/* Warning, this file is autogenerated by cbindgen. Don't modify this manually. */" 24 | include_version = false 25 | # namespace = "my_namespace" 26 | namespaces = [] 27 | using_namespaces = [] 28 | sys_includes = [] 29 | includes = [] 30 | no_includes = false 31 | # cpp_compat = true 32 | after_includes = "" 33 | 34 | 35 | ############################ Code Style Options ################################ 36 | 37 | braces = "SameLine" 38 | line_length = 100 39 | tab_width = 2 40 | documentation = true 41 | documentation_style = "auto" 42 | documentation_length = "full" 43 | line_endings = "LF" # also "CR", "CRLF", "Native" 44 | 45 | 46 | ############################# Codegen Options ################################## 47 | 48 | style = "both" 49 | sort_by = "Name" # default for `fn.sort_by` and `const.sort_by` 50 | usize_is_size_t = true 51 | 52 | [defines] 53 | # "target_os = freebsd" = "DEFINE_FREEBSD" 54 | # "feature = serde" = "DEFINE_SERDE" 55 | 56 | [export] 57 | include = [] 58 | exclude = [] 59 | # prefix = "CAPI_" 60 | item_types = [] 61 | renaming_overrides_prefixing = false 62 | 63 | [export.rename] 64 | "Db" = "void" 65 | 66 | [export.body] 67 | 68 | [export.mangle] 69 | 70 | [fn] 71 | rename_args = "None" 72 | # must_use = "MUST_USE_FUNC" 73 | # deprecated = "DEPRECATED_FUNC" 74 | # deprecated_with_note = "DEPRECATED_FUNC_WITH_NOTE" 75 | # no_return = "NO_RETURN" 76 | # prefix = "START_FUNC" 77 | # postfix = "END_FUNC" 78 | args = "auto" 79 | sort_by = "Name" 80 | 81 | [struct] 82 | rename_fields = "None" 83 | # must_use = "MUST_USE_STRUCT" 84 | # deprecated = "DEPRECATED_STRUCT" 85 | # deprecated_with_note = "DEPRECATED_STRUCT_WITH_NOTE" 86 | derive_constructor = false 87 | derive_eq = false 88 | derive_neq = false 89 | derive_lt = false 90 | derive_lte = false 91 | derive_gt = false 92 | derive_gte = false 93 | 94 | [enum] 95 | rename_variants = "None" 96 | # must_use = "MUST_USE_ENUM" 97 | # deprecated = "DEPRECATED_ENUM" 98 | # deprecated_with_note = "DEPRECATED_ENUM_WITH_NOTE" 99 | add_sentinel = false 100 | prefix_with_name = true 101 | derive_helper_methods = false 102 | derive_const_casts = false 103 | derive_mut_casts = false 104 | # cast_assert_name = "ASSERT" 105 | derive_tagged_enum_destructor = false 106 | derive_tagged_enum_copy_constructor = false 107 | enum_class = true 108 | private_default_tagged_enum_constructor = false 109 | 110 | 111 | [const] 112 | allow_static_const = true 113 | allow_constexpr = false 114 | sort_by = "Name" 115 | 116 | 117 | [macro_expansion] 118 | bitflags = false 119 | 120 | 121 | ############## Options for How Your Rust library Should Be Parsed ############## 122 | 123 | [parse] 124 | parse_deps = false 125 | # include = [] 126 | exclude = [] 127 | clean = false 128 | extra_bindings = [] 129 | 130 | 131 | [parse.expand] 132 | crates = [] 133 | all_features = false 134 | default_features = true 135 | features = [] 136 | -------------------------------------------------------------------------------- /benchmark/src/zipf.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | #![expect( 5 | clippy::arithmetic_side_effects, 6 | reason = "Found 2 occurrences after enabling the lint." 7 | )] 8 | #![expect( 9 | clippy::cast_precision_loss, 10 | reason = "Found 1 occurrences after enabling the lint." 11 | )] 12 | #![expect( 13 | clippy::cast_sign_loss, 14 | reason = "Found 1 occurrences after enabling the lint." 15 | )] 16 | #![expect( 17 | clippy::unwrap_used, 18 | reason = "Found 1 occurrences after enabling the lint." 19 | )] 20 | 21 | use crate::TestRunner; 22 | use firewood::db::{BatchOp, Db}; 23 | use firewood::v2::api::{Db as _, Proposal as _}; 24 | use log::{debug, trace}; 25 | use pretty_duration::pretty_duration; 26 | use rand::prelude::*; 27 | use sha2::{Digest, Sha256}; 28 | use std::collections::HashSet; 29 | use std::error::Error; 30 | use std::time::Instant; 31 | 32 | #[derive(clap::Args, Debug, PartialEq)] 33 | pub struct Args { 34 | #[arg(short, long, help = "zipf exponent", default_value_t = 1.2)] 35 | exponent: f64, 36 | } 37 | 38 | #[derive(Clone)] 39 | pub struct Zipf; 40 | 41 | impl TestRunner for Zipf { 42 | fn run(&self, db: &Db, args: &crate::Args) -> Result<(), Box> { 43 | let exponent = if let crate::TestName::Zipf(args) = &args.test_name { 44 | args.exponent 45 | } else { 46 | unreachable!() 47 | }; 48 | let rows = (args.global_opts.number_of_batches * args.global_opts.batch_size) as f64; 49 | let zipf = rand_distr::Zipf::new(rows, exponent).unwrap(); 50 | let start = Instant::now(); 51 | let mut batch_id = 0; 52 | 53 | let rng = firewood_storage::SeededRng::from_env_or_random(); 54 | while start.elapsed().as_secs() / 60 < args.global_opts.duration_minutes { 55 | let batch: Vec> = 56 | generate_updates(&rng, batch_id, args.global_opts.batch_size as usize, zipf) 57 | .collect(); 58 | if log::log_enabled!(log::Level::Debug) { 59 | let mut distinct = HashSet::new(); 60 | for op in &batch { 61 | match op { 62 | BatchOp::Put { key, value: _ } => { 63 | distinct.insert(key); 64 | } 65 | _ => unreachable!(), 66 | } 67 | } 68 | debug!( 69 | "inserting batch {} with {} distinct data values", 70 | batch_id, 71 | distinct.len() 72 | ); 73 | } 74 | let proposal = db.propose(batch).expect("proposal should succeed"); 75 | proposal.commit()?; 76 | 77 | if log::log_enabled!(log::Level::Debug) { 78 | debug!( 79 | "completed batch {} in {}", 80 | batch_id, 81 | pretty_duration(&start.elapsed(), None) 82 | ); 83 | } 84 | batch_id += 1; 85 | } 86 | Ok(()) 87 | } 88 | } 89 | fn generate_updates( 90 | rng: &firewood_storage::SeededRng, 91 | batch_id: u32, 92 | batch_size: usize, 93 | zipf: rand_distr::Zipf, 94 | ) -> impl Iterator, Vec>> { 95 | let hash_of_batch_id = Sha256::digest(batch_id.to_ne_bytes()).to_vec(); 96 | zipf.sample_iter(rng) 97 | .take(batch_size) 98 | .map(|inner_key| { 99 | let digest = Sha256::digest((inner_key as u64).to_ne_bytes()).to_vec(); 100 | trace!( 101 | "updating {:?} with digest {} to {}", 102 | inner_key, 103 | hex::encode(&digest), 104 | hex::encode(&hash_of_batch_id) 105 | ); 106 | (digest, hash_of_batch_id.clone()) 107 | }) 108 | .map(|(key, value)| BatchOp::Put { key, value }) 109 | .collect::>() 110 | .into_iter() 111 | } 112 | -------------------------------------------------------------------------------- /storage/src/hashtype/trie_hash.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use crate::node::ExtendableBytes; 5 | use crate::node::branch::Serializable; 6 | use sha2::digest::generic_array::GenericArray; 7 | use sha2::digest::typenum; 8 | use std::fmt::{self, Debug, Display, Formatter}; 9 | 10 | /// An error that occurs when trying to convert a slice to a `TrieHash` 11 | #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, thiserror::Error)] 12 | #[error("could not convert a slice of {0} bytes to TrieHash (an array of 32 bytes)")] 13 | #[non_exhaustive] 14 | pub struct InvalidTrieHashLength(pub usize); 15 | 16 | /// A hash value inside a merkle trie 17 | /// We use the same type as returned by sha2 here to avoid copies 18 | #[derive(PartialEq, Eq, Clone, Hash)] 19 | pub struct TrieHash([u8; 32]); 20 | 21 | /// Intentionally, there is no [`Default`] implementation for [`TrieHash`] to force 22 | /// the user to explicitly decide between an empty RLP hash or a hash of all zeros. 23 | /// 24 | /// These unfortunately cannot be `const` because the [`GenericArray`] type does 25 | /// provide a const constructor. 26 | impl TrieHash { 27 | /// Creates a new `TrieHash` from the default value, which is the all zeros. 28 | /// 29 | /// ``` 30 | /// assert_eq!( 31 | /// firewood_storage::TrieHash::empty(), 32 | /// firewood_storage::TrieHash::from([0; 32]), 33 | /// ) 34 | /// ``` 35 | #[must_use] 36 | pub const fn empty() -> Self { 37 | TrieHash([0; TRIE_HASH_LEN]) 38 | } 39 | } 40 | 41 | impl std::ops::Deref for TrieHash { 42 | type Target = [u8; 32]; 43 | fn deref(&self) -> &Self::Target { 44 | &self.0 45 | } 46 | } 47 | 48 | impl AsRef<[u8]> for TrieHash { 49 | fn as_ref(&self) -> &[u8] { 50 | &self.0 51 | } 52 | } 53 | 54 | impl Debug for TrieHash { 55 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { 56 | let width = f.precision().unwrap_or(64); 57 | write!(f, "{:.*}", width, hex::encode(self.0)) 58 | } 59 | } 60 | impl Display for TrieHash { 61 | fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { 62 | let width = f.precision().unwrap_or(64); 63 | write!(f, "{:.*}", width, hex::encode(self.0)) 64 | } 65 | } 66 | 67 | const TRIE_HASH_LEN: usize = std::mem::size_of::(); 68 | 69 | impl From<[u8; TRIE_HASH_LEN]> for TrieHash { 70 | fn from(value: [u8; TRIE_HASH_LEN]) -> Self { 71 | TrieHash(value) 72 | } 73 | } 74 | 75 | impl From for [u8; TRIE_HASH_LEN] { 76 | fn from(value: TrieHash) -> Self { 77 | value.0 78 | } 79 | } 80 | 81 | impl TryFrom<&[u8]> for TrieHash { 82 | type Error = InvalidTrieHashLength; 83 | 84 | fn try_from(value: &[u8]) -> Result { 85 | match value.try_into() { 86 | Ok(array) => Ok(Self::from_bytes(array)), 87 | Err(_) => Err(InvalidTrieHashLength(value.len())), 88 | } 89 | } 90 | } 91 | 92 | impl From> for TrieHash { 93 | fn from(value: GenericArray) -> Self { 94 | TrieHash(value.into()) 95 | } 96 | } 97 | 98 | impl TrieHash { 99 | /// Some code needs a `TrieHash` even though it only has a `HashType`. 100 | /// This function is a no-op, as `HashType` is a `TrieHash` in this context. 101 | #[must_use] 102 | pub const fn into_triehash(self) -> Self { 103 | self 104 | } 105 | 106 | /// Creates a new `TrieHash` from an array of bytes. 107 | #[must_use] 108 | pub fn from_bytes(bytes: [u8; TRIE_HASH_LEN]) -> Self { 109 | bytes.into() 110 | } 111 | } 112 | 113 | impl Serializable for TrieHash { 114 | fn write_to(&self, vec: &mut W) { 115 | vec.extend_from_slice(&self.0); 116 | } 117 | 118 | fn from_reader(mut reader: R) -> Result 119 | where 120 | Self: Sized, 121 | { 122 | let mut buf = [0u8; 32]; 123 | reader.read_exact(&mut buf)?; 124 | Ok(TrieHash::from(buf)) 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /README.docker.md: -------------------------------------------------------------------------------- 1 | # Docker on Mac Compatibility 2 | 3 | Note: 4 | Docker compatiblitiy is a work in progress. Please PR any changes here if you find a better way to do this. 5 | 6 | ## Steps 7 | 8 | ### Step 1 9 | 10 | Install docker-desktop ([guide](https://docs.docker.com/desktop/install/mac-install/)) 11 | 12 | ### Step 2 13 | 14 | Setup a dev-environment ([guide](https://docs.docker.com/desktop/dev-environments/set-up/#set-up-a-dev-environment)) 15 | 16 | Here, you want to specifically pick a local-directory (the repo's directory) 17 | 18 | ![image](https://github.com/ava-labs/firewood/assets/3286504/83d6b66d-19e3-4b59-bc73-f67cf68d7329) 19 | 20 | This is best because you can still do all your `git` stuff from the host. 21 | 22 | ### Step 3 23 | 24 | You will need the `Dev Containers` VSCODE extension, authored by Microsoft for this next step. 25 | 26 | Open your dev-environment with VSCODE. Until you do this, the volume might not be properly mounted. If you (dear reader) know of a better way to do this, please open a PR. VSCODE is very useful for its step-by-step debugger, but other than that, you can run whatever IDE you would like in the host environment and just open a shell in the container to run the tests. 27 | 28 | ![image](https://github.com/ava-labs/firewood/assets/3286504/88c981cb-42b9-4b99-acec-fbca31cca652) 29 | 30 | ### Step 4 31 | 32 | Open a terminal in vscode OR exec into the container directly as follows 33 | 34 | ```sh 35 | # you don't need to do this if you open the terminal from vscode 36 | # the container name here is "firewood-app-1", you should be able to see this in docker-desktop 37 | docker exec -it --privileged -u root firewood-app-1 zsh 38 | ``` 39 | 40 | Once you're in the terminal you'll want to install the Rust toolset. You can [find instructions here](https://rustup.rs/) 41 | 42 | **!!! IMPORTANT !!!** 43 | 44 | Make sure you read the output of any commands that you run. `rustup` will likely ask you to `source` a file to add some tools to your `PATH`. 45 | 46 | You'll also need to install all the regular linux dependencies (if there is anything from this list that's missing, please add to this README) 47 | 48 | ```sh 49 | apt update 50 | apt install vim 51 | apt install build-essential 52 | apt install protobuf-compiler 53 | ``` 54 | 55 | ### Step 5 56 | 57 | **!!! IMPORTANT !!!** 58 | 59 | You need to create a separate `CARGO_TARGET_DIR` that isn't volume mounted onto the host. `VirtioFS` (the default file-system) has some concurrency issues when dealing with sequential writes and reads to a volume that is mounted to the host. You can put a directory here for example: `/root/target`. 60 | 61 | For step-by-step debugging and development directly in the container, you will also **need to make sure that `rust-analyzer` is configured to point to the new target-directory instead of just default**. 62 | 63 | There are a couple of places where this can be setup. If you're a `zsh` user, you should add `export CARGO_TARGET_DIR=/root/target` to either `/root/.zshrc` or `/root/.bashrc`. 64 | After adding the line, don't forget to `source` the file to make sure your current session is updated. 65 | 66 | ### Step 6 67 | 68 | Navigate to `/com.docker.devenvironments.code` and run `cargo test`. If it worked, you are most of the way there! If it did not work, there are a couple of common issues. If the code will not compile, it's possible that your target directory isn't set up properly. Check inside `/root/target` to see if there are any build artifacts. If not, you might need to call `source ~/.zshrc` again (sub in whatever your preferred shell is). 69 | 70 | Now for vscode, you need to configure your `rust-analyzer` in the "remote-environment" (the Docker container). There are a couple of places to do this. First, you want to open `/root/.vscode-server/Machine/settings.json` and make sure that you have the following entry: 71 | 72 | ```json 73 | { 74 | "rust-analyzer.cargo.extraEnv": { 75 | "CARGO_TARGET_DIR": "/root/target" 76 | } 77 | } 78 | ``` 79 | 80 | Then, you want to make sure that the terminal that's being used by the vscode instance (for the host system) is the same as your preferred terminal in the container to make sure that things work as expected. [Here are the docs](https://code.visualstudio.com/docs/terminal/profiles) to help you with setting up the proper profile. 81 | 82 | And that should be enough to get your started! Feel free to open an issue if you need any help debugging. 83 | -------------------------------------------------------------------------------- /ffi/src/logging.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2025, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | use crate::BorrowedBytes; 5 | 6 | /// Arguments for initializing logging for the Firewood FFI. 7 | #[repr(C)] 8 | #[derive(Debug)] 9 | pub struct LogArgs<'a> { 10 | /// The file path where logs for this process are stored. 11 | /// 12 | /// If empty, this is set to `${TMPDIR}/firewood-log.txt`. 13 | /// 14 | /// This is required to be a valid UTF-8 string. 15 | pub path: BorrowedBytes<'a>, 16 | 17 | /// The filter level for logs. 18 | /// 19 | /// If empty, this is set to `info`. 20 | /// 21 | /// This is required to be a valid UTF-8 string. 22 | pub filter_level: BorrowedBytes<'a>, 23 | } 24 | 25 | #[cfg(feature = "logger")] 26 | impl LogArgs<'_> { 27 | fn path(&self) -> std::io::Result> { 28 | let path = self.path.as_str().map_err(|err| { 29 | std::io::Error::new( 30 | std::io::ErrorKind::InvalidInput, 31 | format!("log path contains invalid utf-8: {err}"), 32 | ) 33 | })?; 34 | if path.is_empty() { 35 | Ok(std::borrow::Cow::Owned( 36 | std::env::temp_dir().join("firewood-log.txt"), 37 | )) 38 | } else { 39 | Ok(std::borrow::Cow::Borrowed(std::path::Path::new(path))) 40 | } 41 | } 42 | 43 | fn log_level(&self) -> std::io::Result<&str> { 44 | let level = self.filter_level.as_str().map_err(|err| { 45 | std::io::Error::new( 46 | std::io::ErrorKind::InvalidInput, 47 | format!("log level contains invalid utf-8: {err}"), 48 | ) 49 | })?; 50 | if level.is_empty() { 51 | Ok("info") 52 | } else { 53 | Ok(level) 54 | } 55 | } 56 | 57 | /// Starts logging to the specified file path with the given filter level. 58 | /// 59 | /// # Errors 60 | /// 61 | /// If the log file cannot be created or opened, or if the log level is invalid, 62 | /// this will return an error. 63 | pub fn start_logging(&self) -> std::io::Result<()> { 64 | use env_logger::Target::Pipe; 65 | use std::fs::OpenOptions; 66 | 67 | let log_path = self.path()?; 68 | 69 | if let Some(log_dir) = log_path.parent() { 70 | std::fs::create_dir_all(log_dir).map_err(|e| { 71 | std::io::Error::new( 72 | e.kind(), 73 | format!( 74 | "failed to create log directory `{}`: {e}", 75 | log_dir.display() 76 | ), 77 | ) 78 | })?; 79 | } 80 | 81 | let level = self.log_level()?; 82 | let level = level.parse().map_err(|e| { 83 | std::io::Error::new( 84 | std::io::ErrorKind::InvalidInput, 85 | format!("invalid log level `{level}`: {e}"), 86 | ) 87 | })?; 88 | 89 | let file = OpenOptions::new() 90 | .create(true) 91 | .write(true) 92 | .truncate(false) 93 | .open(&log_path) 94 | .map_err(|e| { 95 | std::io::Error::new( 96 | e.kind(), 97 | format!("failed to open log file `{}`: {e}", log_path.display()), 98 | ) 99 | })?; 100 | 101 | env_logger::Builder::new() 102 | .filter_level(level) 103 | .target(Pipe(Box::new(file))) 104 | .try_init() 105 | .map_err(|e| std::io::Error::other(format!("failed to initialize logger: {e}")))?; 106 | 107 | Ok(()) 108 | } 109 | } 110 | 111 | #[cfg(not(feature = "logger"))] 112 | impl LogArgs<'_> { 113 | /// Starts logging to the specified file path with the given filter level. 114 | /// 115 | /// # Errors 116 | /// 117 | /// This method will always return an error because the `logger` feature is not enabled. 118 | pub fn start_logging(&self) -> std::io::Result<()> { 119 | Err(std::io::Error::new( 120 | std::io::ErrorKind::Unsupported, 121 | "firewood-ffi was compiled without the `logger` feature. Logging is not available.", 122 | )) 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /benchmark/setup-scripts/install-grafana.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o errexit 3 | 4 | # install the keyrings needed to validate the grafana apt repository 5 | if ! [ -d /etc/apt/keyrings ]; then 6 | mkdir -p /etc/apt/keyrings/ 7 | fi 8 | if ! [ -f /etc/apt/keyrings/grafana.gpg ]; then 9 | wget -q -O - https://apt.grafana.com/gpg.key | gpg --dearmor > /etc/apt/keyrings/grafana.gpg 10 | echo "deb [signed-by=/etc/apt/keyrings/grafana.gpg] https://apt.grafana.com stable main" >> /etc/apt/sources.list.d/grafana.list 11 | fi 12 | apt-get update 13 | 14 | # set up the systemd configuration to allow grafana to bind to port 80 15 | if ! [ -d /etc/systemd/system/grafana-server.service.d ]; then 16 | mkdir -p /etc/systemd/system/grafana-server.service.d 17 | fi 18 | 19 | if ! [ -f /etc/systemd/system/grafana-server.service.d/override.conf ]; then 20 | cat > /etc/systemd/system/grafana-server.service.d/override.conf < /dev/null 2>&1; then 37 | install_pkgs+=("$pkg") 38 | fi 39 | done 40 | if [ "${#install_pkgs[@]}" -gt 0 ]; then 41 | apt-get install -y "${install_pkgs[@]}" 42 | fi 43 | 44 | # configure grafana to listen on port 80 45 | if ! grep -q '^http_port = 80$' /etc/grafana/grafana.ini; then 46 | perl -pi -e 's/^;?http_port = .*/http_port = 80/' /etc/grafana/grafana.ini 47 | fi 48 | 49 | # configure username and password 50 | # TODO(amin): auto-generate some password for more security 51 | # TODO(amin): another possible option here is enabling google oauth, and this could give access 52 | # to anyone within our org emails 53 | sed -i -E "s|^;?\s*admin_user\s*=.*|admin_user = admin|" /etc/grafana/grafana.ini 54 | sed -i -E "s|^;?\s*admin_password\s*=.*|admin_password = firewood_is_fast|" /etc/grafana/grafana.ini 55 | 56 | # provision data source and dashboards 57 | cat > /etc/grafana/provisioning/datasources/prometheus.yml < /etc/grafana/provisioning/dashboards/dashboards.yaml <> /etc/prometheus/prometheus.yml <> /etc/default/prometheus-node-exporter <&2 22 | exit "$code" 23 | } 24 | 25 | function @usage() { 26 | cat <"$dest" 2>/dev/null; then 63 | @die 3 "'$golangci_yaml' has no differences '$upstream_yaml'; this is unexpected! At least package name must be different." 64 | fi 65 | } 66 | 67 | function @apply-patch-to-upstream() { 68 | if ! patch -t "$upstream_yaml" "$expected_patch"; then 69 | @die 4 "Failed to apply the patch from $expected_patch. Please review the changes manually." 70 | fi 71 | } 72 | 73 | function @apply() { 74 | local backup 75 | backup=$(mktemp) 76 | trap 'rm -f "$backup"' EXIT 77 | 78 | # make a copy of the upstream yaml before applying the patch so we can refresh the patch file later 79 | cp -f "$upstream_yaml" "$backup" 80 | @apply-patch-to-upstream 81 | 82 | # We cleanly applied the patch, so we can now replace the local file with the patched version 83 | cp -f "$upstream_yaml" "$golangci_yaml" 84 | cp -f "$backup" "$upstream_yaml" 85 | echo "Successfully applied the patch from $expected_patch to $golangci_yaml" 86 | 87 | # refresh the patch so `check` apply later can use it 88 | @update 89 | } 90 | 91 | function @check() { 92 | @apply-patch-to-upstream 93 | 94 | local patch 95 | patch=$(mktemp) 96 | trap 'rm -f "$patch"' EXIT 97 | 98 | if diff -Nau "$upstream_yaml" "$golangci_yaml" >"$patch" 2>&1; then 99 | echo "'$golangci_yaml' is up to date with AvalancheGo's .golangci.yaml." 100 | exit 0 101 | fi 102 | 103 | { 104 | echo "'$golangci_yaml' has unexpected changes from AvalancheGo." 105 | echo "View the upstream changes at: $history_url and apply them if necessary." 106 | echo "" 107 | echo "Current changes:" 108 | cat "$patch" 109 | } >&2 110 | 111 | exit 5 112 | } 113 | 114 | function @update() { 115 | @generate-patch "$expected_patch" 116 | echo "Updated expected changes in $expected_patch" 117 | exit 0 118 | } 119 | 120 | case "${1:-check}" in 121 | apply | check | update) 122 | # make sure we are in the root of the repository 123 | cd "$(git rev-parse --show-toplevel)" 124 | 125 | @download-upstream 126 | 127 | case "${1:-check}" in 128 | apply) @apply ;; 129 | check) @check ;; 130 | update) @update ;; 131 | esac 132 | 133 | ;; 134 | *) @usage ;; 135 | esac 136 | -------------------------------------------------------------------------------- /firewood/src/proofs/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2025, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | //! Cryptographic proof system for Merkle tries. 5 | //! 6 | //! This module provides a complete proof system for verifying the presence or absence 7 | //! of key-value pairs in Firewood's Merkle trie implementation without requiring access 8 | //! to the entire trie structure. The proof system enables efficient verification of trie 9 | //! state using cryptographic hashes. 10 | //! 11 | //! # Overview 12 | //! 13 | //! Firewood's proof system consists of several key components: 14 | //! 15 | //! - **Single-key proofs** ([`Proof`]): Verify that a specific key-value pair exists 16 | //! (or doesn't exist) in a trie with a given root hash. 17 | //! - **Range proofs** ([`RangeProof`]): Verify that a contiguous set of key-value pairs 18 | //! exists within a specific key range. 19 | //! - **Serialization format**: A compact binary format for transmitting proofs over the 20 | //! network or storing them persistently. 21 | //! 22 | //! # Architecture 23 | //! 24 | //! The proof system is organized into several submodules: 25 | //! 26 | //! - `types`: Core proof types including [`Proof`], [`ProofNode`], [`ProofError`], and 27 | //! [`ProofCollection`]. 28 | //! - `range`: Range proof implementation for verifying multiple consecutive keys. 29 | //! - `header`: Proof format headers and validation. 30 | //! - `reader`: Proof reading and deserialization utilities. 31 | //! - `ser`: Proof serialization implementation (internal). 32 | //! - `de`: Proof deserialization implementation (internal). 33 | //! - `childmap`: Compact bitmap for tracking present children (internal). 34 | //! - `magic`: Magic constants for proof format identification (internal). 35 | //! 36 | //! # Usage 37 | //! 38 | //! For most use cases, import proof types directly from the top level of the crate: 39 | //! 40 | //! ```rust,ignore 41 | //! use firewood::{Proof, ProofNode, RangeProof}; 42 | //! 43 | //! // Verify a single key 44 | //! let proof: Proof> = /* ... */; 45 | //! proof.verify(b"key", Some(b"value"), &root_hash)?; 46 | //! 47 | //! // Verify a key range 48 | //! let range_proof: RangeProof, Vec, Vec> = /* ... */; 49 | //! for (key, value) in &range_proof { 50 | //! // Process key-value pairs 51 | //! } 52 | //! ``` 53 | //! 54 | //! # Proof Format 55 | //! 56 | //! Proofs are serialized in a compact binary format that includes: 57 | //! 58 | //! 1. A 32-byte header identifying the proof type, version, hash mode, and branching factor 59 | //! 2. A sequence of proof nodes, each containing: 60 | //! - The node's key path (variable length) 61 | //! - The node's value or value hash (if present) 62 | //! - A bitmap indicating which children are present 63 | //! - The hash of each present child 64 | //! 65 | //! The serialization format is versioned to allow for future evolution while maintaining 66 | //! backward compatibility with proof verification. 67 | 68 | pub(super) mod childmap; 69 | pub(super) mod de; 70 | pub(crate) mod header; 71 | pub(crate) mod range; 72 | pub(crate) mod reader; 73 | pub(super) mod ser; 74 | #[cfg(test)] 75 | mod tests; 76 | pub(crate) mod types; 77 | 78 | pub use self::header::InvalidHeader; 79 | pub use self::range::RangeProof; 80 | pub use self::reader::ReadError; 81 | pub use self::types::{ 82 | EmptyProofCollection, Proof, ProofCollection, ProofError, ProofNode, ProofType, 83 | }; 84 | 85 | pub(super) mod magic { 86 | //! Magic constants for proof format identification. 87 | //! 88 | //! These constants are used in proof headers to identify the proof format, 89 | //! version, hash mode, and branching factor. They enable proof readers to 90 | //! quickly validate that a proof is compatible with the current implementation. 91 | 92 | /// Magic header bytes identifying a Firewood proof: `b"fwdproof"` 93 | pub const PROOF_HEADER: &[u8; 8] = b"fwdproof"; 94 | 95 | /// Current proof format version: `0` 96 | pub const PROOF_VERSION: u8 = 0; 97 | 98 | /// Hash mode identifier for SHA-256 hashing 99 | #[cfg(not(feature = "ethhash"))] 100 | pub const HASH_MODE: u8 = 0; 101 | 102 | /// Hash mode identifier for Keccak-256 hashing (Ethereum-compatible) 103 | #[cfg(feature = "ethhash")] 104 | pub const HASH_MODE: u8 = 1; 105 | 106 | /// Returns the human-readable name for a hash mode identifier. 107 | pub const fn hash_mode_name(v: u8) -> &'static str { 108 | match v { 109 | 0 => "sha256", 110 | 1 => "keccak256", 111 | _ => "unknown", 112 | } 113 | } 114 | 115 | /// Branching factor identifier for branch factor 16 116 | pub const BRANCH_FACTOR: u8 = 16; 117 | } 118 | -------------------------------------------------------------------------------- /ffi/flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "crane": { 4 | "locked": { 5 | "lastModified": 1762538466, 6 | "narHash": "sha256-8zrIPl6J+wLm9MH5ksHcW7BUHo7jSNOu0/hA0ohOOaM=", 7 | "owner": "ipetkov", 8 | "repo": "crane", 9 | "rev": "0cea393fffb39575c46b7a0318386467272182fe", 10 | "type": "github" 11 | }, 12 | "original": { 13 | "owner": "ipetkov", 14 | "repo": "crane", 15 | "type": "github" 16 | } 17 | }, 18 | "flake-utils": { 19 | "inputs": { 20 | "systems": "systems" 21 | }, 22 | "locked": { 23 | "lastModified": 1731533236, 24 | "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", 25 | "owner": "numtide", 26 | "repo": "flake-utils", 27 | "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", 28 | "type": "github" 29 | }, 30 | "original": { 31 | "owner": "numtide", 32 | "repo": "flake-utils", 33 | "type": "github" 34 | } 35 | }, 36 | "golang": { 37 | "inputs": { 38 | "nixpkgs": "nixpkgs" 39 | }, 40 | "locked": { 41 | "dir": "nix/go", 42 | "lastModified": 1760973838, 43 | "narHash": "sha256-UnngvRB45lUeWwot7cvB0MaedaQEQmcw+q8Y6WbeGtE=", 44 | "owner": "ava-labs", 45 | "repo": "avalanchego", 46 | "rev": "f10757d594eedf0f016bc1400739788c542f005f", 47 | "type": "github" 48 | }, 49 | "original": { 50 | "dir": "nix/go", 51 | "owner": "ava-labs", 52 | "ref": "f10757d594eedf0f016bc1400739788c542f005f", 53 | "repo": "avalanchego", 54 | "type": "github" 55 | } 56 | }, 57 | "nixpkgs": { 58 | "locked": { 59 | "lastModified": 1759735786, 60 | "narHash": "sha256-a0+h02lyP2KwSNrZz4wLJTu9ikujNsTWIC874Bv7IJ0=", 61 | "rev": "20c4598c84a671783f741e02bf05cbfaf4907cff", 62 | "revCount": 810859, 63 | "type": "tarball", 64 | "url": "https://api.flakehub.com/f/pinned/NixOS/nixpkgs/0.2505.810859%2Brev-20c4598c84a671783f741e02bf05cbfaf4907cff/0199bc43-02e2-7036-8e2c-e43f6d6b4ede/source.tar.gz" 65 | }, 66 | "original": { 67 | "type": "tarball", 68 | "url": "https://flakehub.com/f/NixOS/nixpkgs/0.2505.%2A.tar.gz" 69 | } 70 | }, 71 | "nixpkgs_2": { 72 | "locked": { 73 | "lastModified": 1762498405, 74 | "narHash": "sha256-Zg/SCgCaAioc0/SVZQJxuECGPJy+OAeBcGeA5okdYDc=", 75 | "rev": "6faeb062ee4cf4f105989d490831713cc5a43ee1", 76 | "revCount": 812554, 77 | "type": "tarball", 78 | "url": "https://api.flakehub.com/f/pinned/NixOS/nixpkgs/0.2505.812554%2Brev-6faeb062ee4cf4f105989d490831713cc5a43ee1/019a5f75-0159-79b8-b171-f9b6d2148da2/source.tar.gz" 79 | }, 80 | "original": { 81 | "type": "tarball", 82 | "url": "https://flakehub.com/f/NixOS/nixpkgs/0.2505.%2A.tar.gz" 83 | } 84 | }, 85 | "nixpkgs_3": { 86 | "locked": { 87 | "lastModified": 1744536153, 88 | "narHash": "sha256-awS2zRgF4uTwrOKwwiJcByDzDOdo3Q1rPZbiHQg/N38=", 89 | "owner": "NixOS", 90 | "repo": "nixpkgs", 91 | "rev": "18dd725c29603f582cf1900e0d25f9f1063dbf11", 92 | "type": "github" 93 | }, 94 | "original": { 95 | "owner": "NixOS", 96 | "ref": "nixpkgs-unstable", 97 | "repo": "nixpkgs", 98 | "type": "github" 99 | } 100 | }, 101 | "root": { 102 | "inputs": { 103 | "crane": "crane", 104 | "flake-utils": "flake-utils", 105 | "golang": "golang", 106 | "nixpkgs": "nixpkgs_2", 107 | "rust-overlay": "rust-overlay" 108 | } 109 | }, 110 | "rust-overlay": { 111 | "inputs": { 112 | "nixpkgs": "nixpkgs_3" 113 | }, 114 | "locked": { 115 | "lastModified": 1762742448, 116 | "narHash": "sha256-XMxV0h13gg63s0sV6beihCIqdpcJhtbse6DHI743nvo=", 117 | "owner": "oxalica", 118 | "repo": "rust-overlay", 119 | "rev": "7f3556887e3375dc26ff1601b57c93ee286f2c5e", 120 | "type": "github" 121 | }, 122 | "original": { 123 | "owner": "oxalica", 124 | "repo": "rust-overlay", 125 | "type": "github" 126 | } 127 | }, 128 | "systems": { 129 | "locked": { 130 | "lastModified": 1681028828, 131 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", 132 | "owner": "nix-systems", 133 | "repo": "default", 134 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", 135 | "type": "github" 136 | }, 137 | "original": { 138 | "owner": "nix-systems", 139 | "repo": "default", 140 | "type": "github" 141 | } 142 | } 143 | }, 144 | "root": "root", 145 | "version": 7 146 | } 147 | -------------------------------------------------------------------------------- /triehash/benches/triehash.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Parity Technologies 2 | // 3 | // Licensed under the Apache License, Version 2.0 or the MIT license 5 | // , at your 6 | // option. This file may not be copied, modified, or distributed 7 | // except according to those terms. 8 | 9 | #![expect( 10 | clippy::arithmetic_side_effects, 11 | reason = "Found 5 occurrences after enabling the lint." 12 | )] 13 | #![expect( 14 | clippy::indexing_slicing, 15 | reason = "Found 1 occurrences after enabling the lint." 16 | )] 17 | 18 | use criterion::{Criterion, criterion_group, criterion_main}; 19 | use ethereum_types::H256; 20 | use firewood_triehash::trie_root; 21 | use keccak_hasher::KeccakHasher; 22 | use tiny_keccak::{Hasher, Keccak}; 23 | use trie_standardmap::{Alphabet, StandardMap, ValueMode}; 24 | 25 | fn keccak256(input: &[u8]) -> [u8; 32] { 26 | let mut keccak256 = Keccak::v256(); 27 | let mut out = [0u8; 32]; 28 | keccak256.update(input); 29 | keccak256.finalize(&mut out); 30 | out 31 | } 32 | 33 | fn random_word(alphabet: &[u8], min_count: usize, diff_count: usize, seed: &mut H256) -> Vec { 34 | assert!(min_count + diff_count <= 32); 35 | *seed = H256(keccak256(seed.as_bytes())); 36 | let r = min_count + (seed[31] as usize % (diff_count + 1)); 37 | let mut ret: Vec = Vec::with_capacity(r); 38 | for i in 0..r { 39 | ret.push(alphabet[seed[i] as usize % alphabet.len()]); 40 | } 41 | ret 42 | } 43 | 44 | fn random_bytes(min_count: usize, diff_count: usize, seed: &mut H256) -> Vec { 45 | assert!(min_count + diff_count <= 32); 46 | *seed = H256(keccak256(seed.as_bytes())); 47 | let r = min_count + (seed[31] as usize % (diff_count + 1)); 48 | seed[0..r].to_vec() 49 | } 50 | 51 | fn random_value(seed: &mut H256) -> Vec { 52 | *seed = H256(keccak256(seed.as_bytes())); 53 | match seed[0] % 2 { 54 | 1 => vec![seed[31]; 1], 55 | _ => seed.as_bytes().to_vec(), 56 | } 57 | } 58 | 59 | fn bench_insertions(c: &mut Criterion) { 60 | c.bench_function("32_mir_1k", |b| { 61 | let st = StandardMap { 62 | alphabet: Alphabet::All, 63 | min_key: 32, 64 | journal_key: 0, 65 | value_mode: ValueMode::Mirror, 66 | count: 1000, 67 | }; 68 | let d = st.make(); 69 | b.iter(|| trie_root::(d.clone())); 70 | }); 71 | 72 | c.bench_function("32_ran_1k", |b| { 73 | let st = StandardMap { 74 | alphabet: Alphabet::All, 75 | min_key: 32, 76 | journal_key: 0, 77 | value_mode: ValueMode::Random, 78 | count: 1000, 79 | }; 80 | let d = st.make(); 81 | b.iter(|| trie_root::(d.clone())); 82 | }); 83 | 84 | c.bench_function("six_high", |b| { 85 | let mut d: Vec<(Vec, Vec)> = Vec::new(); 86 | let mut seed = H256::default(); 87 | for _ in 0..1000 { 88 | let k = random_bytes(6, 0, &mut seed); 89 | let v = random_value(&mut seed); 90 | d.push((k, v)); 91 | } 92 | b.iter(|| trie_root::(d.clone())); 93 | }); 94 | 95 | c.bench_function("six_mid", |b| { 96 | let alphabet = b"@QWERTYUIOPASDFGHJKLZXCVBNM[/]^_"; 97 | let mut d: Vec<(Vec, Vec)> = Vec::new(); 98 | let mut seed = H256::default(); 99 | for _ in 0..1000 { 100 | let k = random_word(alphabet, 6, 0, &mut seed); 101 | let v = random_value(&mut seed); 102 | d.push((k, v)); 103 | } 104 | b.iter(|| trie_root::(d.clone())); 105 | }); 106 | 107 | c.bench_function("random_mid", |b| { 108 | let alphabet = b"@QWERTYUIOPASDFGHJKLZXCVBNM[/]^_"; 109 | let mut d: Vec<(Vec, Vec)> = Vec::new(); 110 | let mut seed = H256::default(); 111 | for _ in 0..1000 { 112 | let k = random_word(alphabet, 1, 5, &mut seed); 113 | let v = random_value(&mut seed); 114 | d.push((k, v)); 115 | } 116 | b.iter(|| trie_root::(d.clone())); 117 | }); 118 | 119 | c.bench_function("six_low", |b| { 120 | let alphabet = b"abcdef"; 121 | let mut d: Vec<(Vec, Vec)> = Vec::new(); 122 | let mut seed = H256::default(); 123 | for _ in 0..1000 { 124 | let k = random_word(alphabet, 6, 0, &mut seed); 125 | let v = random_value(&mut seed); 126 | d.push((k, v)); 127 | } 128 | b.iter(|| trie_root::(d.clone())); 129 | }); 130 | } 131 | 132 | criterion_group!(benches, bench_insertions); 133 | criterion_main!(benches); 134 | -------------------------------------------------------------------------------- /cliff.toml: -------------------------------------------------------------------------------- 1 | # git-cliff ~ configuration file 2 | # https://git-cliff.org/docs/configuration 3 | 4 | 5 | [changelog] 6 | # A Tera template to be rendered as the changelog's footer. 7 | # See https://keats.github.io/tera/docs/#introduction 8 | header = """ 9 | # Changelog\n 10 | All notable changes to this project will be documented in this file.\n 11 | """ 12 | # A Tera template to be rendered for each release in the changelog. 13 | # See https://keats.github.io/tera/docs/#introduction 14 | body = """ 15 | {% if version %}\ 16 | ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }} 17 | {% else %}\ 18 | ## [unreleased] 19 | {% endif %}\ 20 | {% for group, commits in commits | group_by(attribute="group") %} 21 | ### {{ group | striptags | trim | upper_first }} 22 | {% for commit in commits %} 23 | - {% if commit.scope %}*({{ commit.scope }})* {% endif %}\ 24 | {% if commit.breaking %}[**breaking**] {% endif %}\ 25 | {{ commit.message | upper_first }}\ 26 | {% endfor %} 27 | {% endfor %}\n 28 | """ 29 | # A Tera template to be rendered as the changelog's footer. 30 | # See https://keats.github.io/tera/docs/#introduction 31 | footer = """ 32 | 33 | """ 34 | # Remove leading and trailing whitespaces from the changelog's body. 35 | trim = true 36 | # Render body even when there are no releases to process. 37 | render_always = true 38 | # An array of regex based postprocessors to modify the changelog. 39 | postprocessors = [ 40 | # Replace the placeholder with a URL. 41 | { pattern = '', replace = "https://github.com/ava-labs/firewood" }, 42 | ] 43 | # render body even when there are no releases to process 44 | # render_always = true 45 | # output file path 46 | # output = "test.md" 47 | 48 | [git] 49 | # Parse commits according to the conventional commits specification. 50 | # See https://www.conventionalcommits.org 51 | conventional_commits = true 52 | # Exclude commits that do not match the conventional commits specification. 53 | filter_unconventional = true 54 | # Require all commits to be conventional. 55 | # Takes precedence over filter_unconventional. 56 | require_conventional = false 57 | # Split commits on newlines, treating each line as an individual commit. 58 | split_commits = false 59 | # An array of regex based parsers to modify commit messages prior to further processing. 60 | commit_preprocessors = [ 61 | # Replace issue numbers with link templates to be updated in `changelog.postprocessors`. 62 | #{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](/issues/${2}))"}, 63 | # Check spelling of the commit message using https://github.com/crate-ci/typos. 64 | # If the spelling is incorrect, it will be fixed automatically. 65 | #{ pattern = '.*', replace_command = 'typos --write-changes -' }, 66 | # Squash merges end with the pull request number (e.g., `(#12345)`). Replace those with a link to the pull request. 67 | { pattern = '\(#([0-9]+)\)', replace = '([#${1}](/pull/${1}))' }, 68 | ] 69 | # Prevent commits that are breaking from being excluded by commit parsers. 70 | protect_breaking_commits = false 71 | # An array of regex based parsers for extracting data from the commit message. 72 | # Assigns commits to groups. 73 | # Optionally sets the commit's scope and can decide to exclude commits from further processing. 74 | commit_parsers = [ 75 | { message = "^feat", group = "🚀 Features" }, 76 | { message = "^fix", group = "🐛 Bug Fixes" }, 77 | { message = "^doc", group = "📚 Documentation" }, 78 | { message = "^perf", group = "⚡ Performance" }, 79 | { message = "^refactor", group = "🚜 Refactor" }, 80 | { message = "^style", group = "🎨 Styling" }, 81 | { message = "^test", group = "🧪 Testing" }, 82 | { message = "^chore\\(release\\): prepare for", skip = true }, 83 | { message = "^chore\\(deps.*\\)", skip = true }, 84 | { message = "^chore\\(pr\\)", skip = true }, 85 | { message = "^chore\\(pull\\)", skip = true }, 86 | { message = "^chore|^ci", group = "⚙️ Miscellaneous Tasks" }, 87 | { body = ".*security", group = "🛡️ Security" }, 88 | { message = "^revert", group = "◀️ Revert" }, 89 | { message = ".*", group = "💼 Other" }, 90 | ] 91 | # Exclude commits that are not matched by any commit parser. 92 | filter_commits = false 93 | # An array of link parsers for extracting external references, and turning them into URLs, using regex. 94 | link_parsers = [] 95 | # Include only the tags that belong to the current branch. 96 | use_branch_tags = true 97 | # Order releases topologically instead of chronologically. 98 | topo_order = false 99 | # Order releases topologically instead of chronologically. 100 | topo_order_commits = true 101 | # Order of commits in each group/release within the changelog. 102 | # Allowed values: newest, oldest 103 | sort_commits = "oldest" 104 | # Process submodules commits 105 | recurse_submodules = false 106 | # Only process tags in this pattern 107 | tag_pattern = "v[0-9].*" 108 | -------------------------------------------------------------------------------- /ffi/generate_cgo.go: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2025, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | //go:build ignore 5 | 6 | // go generate script 7 | // 8 | // This script fixes up a go file to enable/disable the correct cgo directives, 9 | // tailored for use in firewood.go to eliminate linker warnings for production builds. 10 | // 11 | // It scans for blocks of cgo directives, using marker lines like this: 12 | // FIREWOOD_CGO_BEGIN_ 13 | // cgo line 1 14 | // ... 15 | // cgo line n 16 | // FIREWOOD_CGO_END_ 17 | // 18 | // FIREWOOD_LD_MODE is an environment variable that decides which blocks are activated. 19 | // The default value for FIREWOOD_LD_MODE is "LOCAL_LIBS" for local development. 20 | // When building production static libraries, FIREWOOD_LD_MODE is set to "STATIC_LIBS" 21 | // in the github actions workflow. 22 | // 23 | // The script enables CGO directives for the target mode and comments out CGO directives 24 | // that do not match. 25 | // 26 | // CGO directives are already comments and CGO does not allow interleaving regular 27 | // comments with CGO directives. To disable, we must double escape the CGO directives 28 | // with: 29 | // 30 | // // #cgo ... 31 | // 32 | // The go file may contain multiple such blocks, but nesting is not allowed. 33 | 34 | package main 35 | 36 | import ( 37 | "errors" 38 | "fmt" 39 | "log" 40 | "os" 41 | "strings" 42 | ) 43 | 44 | const ( 45 | defaultMode = "LOCAL_LIBS" 46 | ) 47 | 48 | var errGoFileNotSet = errors.New("GOFILE is not set") 49 | 50 | func main() { 51 | mode := getFirewoodLdMode() 52 | 53 | targetFile, err := getTargetFile() 54 | if err != nil { 55 | log.Fatalf("Error switching CGO mode to %s:\n%v", mode, err) 56 | } 57 | 58 | if err := changeCgoDirectivesForFile(mode, targetFile); err != nil { 59 | log.Fatalf("Error switching CGO mode to %s:\n%v", mode, err) 60 | } 61 | 62 | fmt.Printf("Successfully switched CGO directives to %s mode\n", mode) 63 | } 64 | 65 | // getFirewoodLdMode returns the FIREWOOD_LD_MODE environment variable. 66 | // Defaults to "LOCAL_LIBS". 67 | func getFirewoodLdMode() string { 68 | mode, ok := os.LookupEnv("FIREWOOD_LD_MODE") 69 | if !ok { 70 | mode = "LOCAL_LIBS" 71 | } 72 | return mode 73 | } 74 | 75 | func getTargetFile() (string, error) { 76 | targetFile, ok := os.LookupEnv("GOFILE") 77 | if !ok { 78 | return "", errGoFileNotSet 79 | } 80 | return targetFile, nil 81 | } 82 | 83 | func changeCgoDirectivesForFile(targetMode string, targetFile string) error { 84 | originalFileContent, err := os.ReadFile(targetFile) 85 | if err != nil { 86 | return fmt.Errorf("failed to read %s: %w", targetFile, err) 87 | } 88 | 89 | fileLines := strings.Split(string(originalFileContent), "\n") 90 | 91 | // Initial state is "None" which does not process any lines 92 | currentBlockName := "None" 93 | for i, line := range fileLines { 94 | // process state transitions 95 | // if the line starts with "// FIREWOOD_CGO_BEGIN_", set the state to the text after the prefix 96 | if newBlockName, ok := strings.CutPrefix(line, "// // FIREWOOD_CGO_BEGIN_"); ok { 97 | if currentBlockName != "None" { 98 | return fmt.Errorf("[ERROR] %s:%d: nested CGO blocks not allowed (found %s after %s)", targetFile, i+1, newBlockName, currentBlockName) 99 | } 100 | currentBlockName = newBlockName 101 | continue 102 | } else if line == fmt.Sprintf("// // FIREWOOD_CGO_END_%s", currentBlockName) { 103 | currentBlockName = "None" 104 | continue 105 | } 106 | 107 | // If we are in a block, process the line 108 | if currentBlockName != "None" { 109 | if !isCGODirective(line) { 110 | return fmt.Errorf("[ERROR] %s:%d: invalid CGO directive in %s section:\n===\n%s\n===\n", targetFile, i+1, currentBlockName, line) 111 | } 112 | if currentBlockName == targetMode { 113 | fileLines[i] = activateCGOLine(fileLines[i]) 114 | } else { 115 | fileLines[i] = deactivateCGOLine(fileLines[i]) 116 | } 117 | } 118 | } 119 | 120 | if currentBlockName != "None" { 121 | return fmt.Errorf("[ERROR] %s: unterminated CGO block ended in %s", targetFile, currentBlockName) 122 | } 123 | 124 | // If the contents changed, write it back to the file 125 | newContents := strings.Join(fileLines, "\n") 126 | if newContents == string(originalFileContent) { 127 | fmt.Printf("[INFO] No changes needed to %s\n", targetFile) 128 | return nil 129 | } 130 | return os.WriteFile(targetFile, []byte(newContents), 0644) 131 | } 132 | 133 | func isCGODirective(line string) bool { 134 | trimmed := strings.TrimSpace(line) 135 | return strings.HasPrefix(trimmed, "// #cgo") || strings.HasPrefix(trimmed, "// // #cgo") 136 | } 137 | 138 | func activateCGOLine(line string) string { 139 | // Convert "// // #cgo" to "// #cgo" 140 | return strings.Replace(line, "// // #cgo", "// #cgo", 1) 141 | } 142 | func deactivateCGOLine(line string) string { 143 | // Convert "// #cgo" to "// // #cgo" (but not "// // #cgo" to "// // // #cgo") 144 | if strings.Contains(line, "// #cgo") && !strings.Contains(line, "// // #cgo") { 145 | return strings.Replace(line, "// #cgo", "// // #cgo", 1) 146 | } 147 | // Already deactivated 148 | return line 149 | } 150 | -------------------------------------------------------------------------------- /firewood/examples/insert.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. 2 | // See the file LICENSE.md for licensing terms. 3 | 4 | // This example isn't an actual benchmark, it's just an example of how to 5 | // insert some random keys using the front-end API. 6 | 7 | use clap::Parser; 8 | use std::collections::HashMap; 9 | use std::error::Error; 10 | use std::num::NonZeroUsize; 11 | use std::ops::RangeInclusive; 12 | use std::time::Instant; 13 | 14 | use firewood::db::{BatchOp, Db, DbConfig}; 15 | use firewood::manager::RevisionManagerConfig; 16 | use firewood::v2::api::{Db as _, DbView, KeyType, Proposal as _, ValueType}; 17 | use rand::{Rng, distr::Alphanumeric}; 18 | 19 | #[derive(Parser, Debug)] 20 | struct Args { 21 | #[arg(short, long, default_value = "1-64", value_parser = string_to_range)] 22 | keylen: RangeInclusive, 23 | #[arg(short, long, default_value = "32", value_parser = string_to_range)] 24 | valuelen: RangeInclusive, 25 | #[arg(short, long, default_value_t = 1)] 26 | batch_size: usize, 27 | #[arg(short, long, default_value_t = 100)] 28 | number_of_batches: usize, 29 | #[arg(short = 'p', long, default_value_t = 0, value_parser = clap::value_parser!(u16).range(0..=100))] 30 | read_verify_percent: u16, 31 | #[arg(short, long)] 32 | seed: Option, 33 | #[arg(short, long, default_value_t = NonZeroUsize::new(20480).expect("is non-zero"))] 34 | cache_size: NonZeroUsize, 35 | #[arg(short, long, default_value_t = true)] 36 | truncate: bool, 37 | #[arg(short, long, default_value_t = 128)] 38 | revisions: usize, 39 | } 40 | 41 | fn string_to_range(input: &str) -> Result, Box> { 42 | //::Err> { 43 | let parts: Vec<&str> = input.split('-').collect(); 44 | #[expect(clippy::indexing_slicing)] 45 | match parts.len() { 46 | 1 => Ok(input.parse()?..=input.parse()?), 47 | 2 => Ok(parts[0].parse()?..=parts[1].parse()?), 48 | _ => Err("Too many dashes in input string".into()), 49 | } 50 | } 51 | 52 | /// cargo run --release --example insert 53 | fn main() -> Result<(), Box> { 54 | let args = Args::parse(); 55 | 56 | let mgrcfg = RevisionManagerConfig::builder() 57 | .node_cache_size(args.cache_size) 58 | .max_revisions(args.revisions) 59 | .build(); 60 | let cfg = DbConfig::builder() 61 | .truncate(args.truncate) 62 | .manager(mgrcfg) 63 | .build(); 64 | 65 | let db = Db::new("firewood", cfg).expect("db initiation should succeed"); 66 | 67 | let keys = args.batch_size; 68 | let start = Instant::now(); 69 | 70 | let rng = &firewood_storage::SeededRng::from_option(args.seed); 71 | 72 | for _ in 0..args.number_of_batches { 73 | let keylen = rng.random_range(args.keylen.clone()); 74 | let valuelen = rng.random_range(args.valuelen.clone()); 75 | let batch = (0..keys) 76 | .map(|_| { 77 | ( 78 | rng.sample_iter(&Alphanumeric) 79 | .take(keylen) 80 | .collect::>(), 81 | rng.sample_iter(&Alphanumeric) 82 | .take(valuelen) 83 | .collect::>(), 84 | ) 85 | }) 86 | .map(|(key, value)| BatchOp::Put { key, value }) 87 | .collect::>(); 88 | 89 | let verify = get_keys_to_verify(rng, &batch, args.read_verify_percent); 90 | 91 | #[expect(clippy::unwrap_used)] 92 | let proposal = db.propose(batch.clone()).unwrap(); 93 | proposal.commit()?; 94 | verify_keys(&db, verify)?; 95 | } 96 | 97 | let duration = start.elapsed(); 98 | println!( 99 | "Generated and inserted {} batches of size {keys} in {duration:?}", 100 | args.number_of_batches 101 | ); 102 | 103 | Ok(()) 104 | } 105 | 106 | fn get_keys_to_verify<'a, K: KeyType + 'a, V: ValueType + 'a>( 107 | rng: &firewood_storage::SeededRng, 108 | batch: impl IntoIterator>, 109 | pct: u16, 110 | ) -> HashMap<&'a [u8], &'a [u8]> { 111 | if pct == 0 { 112 | HashMap::new() 113 | } else { 114 | batch 115 | .into_iter() 116 | .filter(|_last_key| rng.random_range(0..=100u16.saturating_sub(pct)) == 0) 117 | .map(|op| { 118 | if let BatchOp::Put { key, value } = op { 119 | (key.as_ref(), value.as_ref()) 120 | } else { 121 | unreachable!() 122 | } 123 | }) 124 | .collect() 125 | } 126 | } 127 | 128 | fn verify_keys( 129 | db: &impl firewood::v2::api::Db, 130 | verify: HashMap<&[u8], &[u8]>, 131 | ) -> Result<(), firewood::v2::api::Error> { 132 | if !verify.is_empty() { 133 | let hash = db.root_hash()?.expect("root hash should exist"); 134 | let revision = db.revision(hash)?; 135 | for (key, value) in verify { 136 | assert_eq!(Some(value), revision.val(key)?.as_deref()); 137 | } 138 | } 139 | Ok(()) 140 | } 141 | -------------------------------------------------------------------------------- /firewood-macros/README.md: -------------------------------------------------------------------------------- 1 | # Firewood Macros 2 | 3 | A Rust procedural macro crate providing zero-allocation metrics instrumentation for the Firewood database. 4 | 5 | ## Overview 6 | 7 | This crate provides the `#[metrics]` attribute macro that automatically instruments functions with performance metrics collection. The macro is designed for high-performance applications where allocation overhead during metrics collection is unacceptable. 8 | 9 | ## Features 10 | 11 | - **Zero Runtime Allocations**: Uses compile-time string concatenation and static label arrays 12 | - **Automatic Timing**: Measures function execution time with microsecond precision 13 | - **Success/Failure Tracking**: Automatically labels metrics based on `Result` return values 14 | - **Metric Descriptions**: Optional human-readable descriptions for better observability 15 | - **Compile-time Validation**: Ensures functions return `Result` types 16 | 17 | ## Usage 18 | 19 | Add the dependency to your `Cargo.toml`: 20 | 21 | ```toml 22 | [dependencies] 23 | firewood-macros.workspace = true 24 | metrics = "0.24" 25 | coarsetime = "0.1" 26 | ``` 27 | 28 | ### Basic Usage 29 | 30 | ```rust 31 | use firewood_macros::metrics; 32 | 33 | #[metrics("firewood.example")] 34 | fn example() -> Result, DatabaseError> { 35 | // Your function implementation 36 | Ok(vec![]) 37 | } 38 | ``` 39 | 40 | ### With Description 41 | 42 | ```rust 43 | #[metrics("firewood.example", "example operation")] 44 | fn example(user: User) -> Result<(), DatabaseError> { 45 | // Your function implementation 46 | Ok(()) 47 | } 48 | ``` 49 | 50 | ## Generated Metrics 51 | 52 | For each instrumented function, the macro generates two metrics: 53 | 54 | 1. **Count Metric** (base name): Tracks the number of function calls 55 | 2. **Timing Metric** (base name + "_ms"): Tracks execution time in milliseconds 56 | 57 | Both metrics include a `success` label: 58 | 59 | - `success="true"` for `Ok(_)` results 60 | - `success="false"` for `Err(_)` results 61 | 62 | ### Example Output 63 | 64 | For `#[metrics("firewood.query", "data retrieval")]`: 65 | 66 | - `firewood.example{success="true"}` - Count of successful queries 67 | - `firewood.example{success="false"}` - Count of failed queries 68 | - `firewood.example_ms{success="true"}` - Timing of successful queries 69 | - `firewood.example_ms{success="false"}` - Timing of failed queries 70 | 71 | ## Requirements 72 | 73 | - Functions must return a `Result` type 74 | - The `metrics` and `coarsetime` crates must be available in scope 75 | - Rust 1.70+ (for `is_some_and` method) 76 | 77 | ## Performance Characteristics 78 | 79 | ### Zero Allocations 80 | 81 | The macro generates code that avoids all runtime allocations: 82 | 83 | ```rust 84 | // Static label arrays (no allocation) 85 | static __METRICS_LABELS_SUCCESS: &[(&str, &str)] = &[("success", "true")]; 86 | static __METRICS_LABELS_ERROR: &[(&str, &str)] = &[("success", "false")]; 87 | 88 | // Compile-time string concatenation (no allocation) 89 | metrics::counter!(concat!("my.metric", "_ms"), labels) 90 | ``` 91 | 92 | ### Minimal Overhead 93 | 94 | - Single timestamp capture at function start, using the coarsetime crate, which is known to be extremely fast 95 | - Branch-free label selection based on `Result::is_err()` 96 | - Direct counter increments without intermediate allocations 97 | 98 | ## Implementation Details 99 | 100 | ### Code Generation 101 | 102 | The macro transforms this: 103 | 104 | ```rust 105 | #[metrics("my.operation")] 106 | fn my_function() -> Result { 107 | Ok("result".to_string()) 108 | } 109 | ``` 110 | 111 | Into approximately this: 112 | 113 | ```rust 114 | fn my_function() -> Result { 115 | // Register metrics (once per process) 116 | static __METRICS_REGISTERED: std::sync::Once = std::sync::Once::new(); 117 | __METRICS_REGISTERED.call_once(|| { 118 | metrics::describe_counter!("my.operation", "Operation counter"); 119 | metrics::describe_counter!(concat!("my.operation", "_ms"), "Operation timing"); 120 | }); 121 | 122 | // Start timing 123 | let __metrics_start = coarsetime::Instant::now(); 124 | 125 | // Execute original function 126 | let __metrics_result = (|| { 127 | Ok("result".to_string()) 128 | })(); 129 | 130 | // Record metrics 131 | static __METRICS_LABELS_SUCCESS: &[(&str, &str)] = &[("success", "true")]; 132 | static __METRICS_LABELS_ERROR: &[(&str, &str)] = &[("success", "false")]; 133 | let __metrics_labels = if __metrics_result.is_err() { 134 | __METRICS_LABELS_ERROR 135 | } else { 136 | __METRICS_LABELS_SUCCESS 137 | }; 138 | 139 | metrics::counter!("my.operation", __metrics_labels).increment(1); 140 | metrics::counter!(concat!("my.operation", "_ms"), __metrics_labels) 141 | .increment(__metrics_start.elapsed().as_millis()); 142 | 143 | __metrics_result 144 | } 145 | ``` 146 | 147 | ## Testing 148 | 149 | The crate includes comprehensive tests: 150 | 151 | ```bash 152 | cargo nextest -p firewood-macros 153 | ``` 154 | 155 | ## License 156 | 157 | This crate is part of the Firewood project and follows the same licensing terms. 158 | See LICENSE.md at the top level for details. 159 | --------------------------------------------------------------------------------