├── .config └── nextest.toml ├── .github ├── CODEOWNERS ├── PULL_REQUEST_TEMPLATE.md ├── scripts │ └── install_test_binaries.sh └── workflows │ └── ci.yml ├── .gitignore ├── CHANGELOG.md ├── CONTRIBUTING.md ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── cliff.toml ├── clippy.toml ├── deny.toml ├── release.toml ├── rustfmt.toml ├── scripts ├── changelog.sh └── check_no_std.sh ├── src ├── backend.rs ├── cache.rs ├── error.rs └── lib.rs └── test-data └── storage.json /.config/nextest.toml: -------------------------------------------------------------------------------- 1 | [profile.default] 2 | retries = { backoff = "exponential", count = 2, delay = "2s", jitter = true } 3 | slow-timeout = { period = "30s", terminate-after = 4 } 4 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @danipopes @mattsse @klkvr 2 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 12 | 13 | 14 | 15 | ## Motivation 16 | 17 | 22 | 23 | ## Solution 24 | 25 | 29 | 30 | ## PR Checklist 31 | 32 | - [ ] Added Tests 33 | - [ ] Added Documentation 34 | - [ ] Breaking changes 35 | -------------------------------------------------------------------------------- /.github/scripts/install_test_binaries.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Installs Solc and Geth binaries 3 | # Note: intended for use only with CI (x86_64 Ubuntu, MacOS or Windows) 4 | set -e 5 | 6 | GETH_BUILD=${GETH_BUILD:-"1.11.2-73b01f40"} 7 | 8 | BIN_DIR=${BIN_DIR:-"$HOME/bin"} 9 | 10 | PLATFORM="$(uname -s | awk '{print tolower($0)}')" 11 | if [ "$PLATFORM" != "linux" ] && [ "$PLATFORM" != "darwin" ]; then 12 | EXT=".exe" 13 | fi 14 | 15 | main() { 16 | mkdir -p "$BIN_DIR" 17 | cd "$BIN_DIR" 18 | export PATH="$BIN_DIR:$PATH" 19 | if [ "$GITHUB_PATH" ]; then 20 | echo "$BIN_DIR" >> "$GITHUB_PATH" 21 | fi 22 | 23 | install_geth 24 | 25 | echo "" 26 | echo "Installed Geth:" 27 | geth version 28 | } 29 | 30 | # Installs geth from https://geth.ethereum.org/downloads 31 | install_geth() { 32 | case "$PLATFORM" in 33 | linux|darwin) 34 | name="geth-$PLATFORM-amd64-$GETH_BUILD" 35 | curl -s "https://gethstore.blob.core.windows.net/builds/$name.tar.gz" | tar -xzf - 36 | mv -f "$name/geth" ./ 37 | rm -rf "$name" 38 | chmod +x geth 39 | ;; 40 | *) 41 | name="geth-windows-amd64-$GETH_BUILD" 42 | zip="$name.zip" 43 | curl -so "$zip" "https://gethstore.blob.core.windows.net/builds/$zip" 44 | unzip "$zip" 45 | mv -f "$name/geth.exe" ./ 46 | rm -rf "$name" "$zip" 47 | ;; 48 | esac 49 | } 50 | 51 | main 52 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | 8 | env: 9 | CARGO_TERM_COLOR: always 10 | 11 | concurrency: 12 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 13 | cancel-in-progress: true 14 | 15 | jobs: 16 | test: 17 | name: test ${{ matrix.rust }} ${{ matrix.flags }} 18 | runs-on: ubuntu-latest 19 | timeout-minutes: 30 20 | strategy: 21 | fail-fast: false 22 | matrix: 23 | rust: ["stable", "nightly", "1.83"] # MSRV 24 | flags: ["--no-default-features", "", "--all-features"] 25 | exclude: 26 | # Some features have higher MSRV. 27 | - rust: "1.83" # MSRV 28 | flags: "--all-features" 29 | steps: 30 | - uses: actions/checkout@v3 31 | - uses: dtolnay/rust-toolchain@master 32 | with: 33 | toolchain: ${{ matrix.rust }} 34 | - name: Install Anvil 35 | uses: foundry-rs/foundry-toolchain@v1 36 | with: 37 | version: nightly 38 | - name: Install test binaries 39 | shell: bash 40 | run: ./.github/scripts/install_test_binaries.sh 41 | - uses: Swatinem/rust-cache@v2 42 | with: 43 | cache-on-failure: true 44 | # Only run tests on latest stable and above 45 | - name: Install cargo-nextest 46 | if: ${{ matrix.rust != '1.83' }} # MSRV 47 | uses: taiki-e/install-action@nextest 48 | - name: build 49 | if: ${{ matrix.rust == '1.83' }} # MSRV 50 | run: cargo build --workspace ${{ matrix.flags }} 51 | - name: test 52 | if: ${{ matrix.rust != '1.83' }} # MSRV 53 | run: cargo nextest run --workspace ${{ matrix.flags }} 54 | 55 | doctest: 56 | runs-on: ubuntu-latest 57 | timeout-minutes: 30 58 | steps: 59 | - uses: actions/checkout@v4 60 | - uses: dtolnay/rust-toolchain@stable 61 | - uses: Swatinem/rust-cache@v2 62 | with: 63 | cache-on-failure: true 64 | - run: cargo test --workspace --doc 65 | - run: cargo test --all-features --workspace --doc 66 | 67 | no-std: 68 | runs-on: ubuntu-latest 69 | timeout-minutes: 30 70 | steps: 71 | - uses: actions/checkout@v3 72 | - uses: dtolnay/rust-toolchain@stable 73 | with: 74 | target: riscv32imac-unknown-none-elf 75 | - uses: taiki-e/install-action@cargo-hack 76 | - uses: Swatinem/rust-cache@v2 77 | with: 78 | cache-on-failure: true 79 | - name: check 80 | run: ./scripts/check_no_std.sh 81 | 82 | feature-checks: 83 | runs-on: ubuntu-latest 84 | timeout-minutes: 30 85 | steps: 86 | - uses: actions/checkout@v3 87 | - uses: dtolnay/rust-toolchain@stable 88 | - uses: taiki-e/install-action@cargo-hack 89 | - uses: Swatinem/rust-cache@v2 90 | with: 91 | cache-on-failure: true 92 | - name: cargo hack 93 | run: cargo hack check --feature-powerset --depth 1 94 | 95 | clippy: 96 | runs-on: ubuntu-latest 97 | timeout-minutes: 30 98 | steps: 99 | - uses: actions/checkout@v4 100 | - uses: dtolnay/rust-toolchain@master 101 | with: 102 | toolchain: stable 103 | components: clippy 104 | - uses: Swatinem/rust-cache@v2 105 | with: 106 | cache-on-failure: true 107 | - run: cargo +stable clippy --workspace --all-targets --all-features 108 | env: 109 | RUSTFLAGS: -Dwarnings 110 | 111 | docs: 112 | runs-on: ubuntu-latest 113 | timeout-minutes: 30 114 | steps: 115 | - uses: actions/checkout@v3 116 | - uses: dtolnay/rust-toolchain@nightly 117 | - uses: Swatinem/rust-cache@v2 118 | with: 119 | cache-on-failure: true 120 | - run: cargo doc --workspace --all-features --no-deps --document-private-items 121 | env: 122 | RUSTDOCFLAGS: --cfg docsrs -D warnings --show-type-layout --generate-link-to-definition --enable-index-page -Zunstable-options 123 | 124 | fmt: 125 | runs-on: ubuntu-latest 126 | timeout-minutes: 30 127 | steps: 128 | - uses: actions/checkout@v3 129 | - uses: dtolnay/rust-toolchain@nightly 130 | with: 131 | components: rustfmt 132 | - run: cargo fmt --all --check 133 | 134 | deny: 135 | uses: ithacaxyz/ci/.github/workflows/deny.yml@main 136 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /Cargo.lock 3 | .vscode 4 | .idea 5 | .env 6 | .DS_Store 7 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [0.15.0](https://github.com/foundry-rs/foundry-fork-db/releases/tag/v0.15.0) - 2025-05-23 9 | 10 | ### Dependencies 11 | 12 | - Bump revm to 24.0.0 ([#50](https://github.com/foundry-rs/foundry-fork-db/issues/50)) 13 | 14 | ## [0.14.0](https://github.com/foundry-rs/foundry-fork-db/releases/tag/v0.14.0) - 2025-05-15 15 | 16 | ### Miscellaneous Tasks 17 | 18 | - Release 0.14.0 19 | - Alloy 1.0 ([#49](https://github.com/foundry-rs/foundry-fork-db/issues/49)) 20 | 21 | ## [0.13.0](https://github.com/foundry-rs/foundry-fork-db/releases/tag/v0.13.0) - 2025-05-08 22 | 23 | ### Dependencies 24 | 25 | - [deps] Alloy 0.15 ([#46](https://github.com/foundry-rs/foundry-fork-db/issues/46)) 26 | 27 | ### Features 28 | 29 | - Bump revm to `21.0.0` and alloy to `0.13.0` ([#44](https://github.com/foundry-rs/foundry-fork-db/issues/44)) 30 | 31 | ### Miscellaneous Tasks 32 | 33 | - Release 0.13.0 34 | 35 | ## [0.12.0](https://github.com/foundry-rs/foundry-fork-db/releases/tag/v0.12.0) - 2025-03-07 36 | 37 | ### Dependencies 38 | 39 | - [deps] Alloy 0.12 ([#43](https://github.com/foundry-rs/foundry-fork-db/issues/43)) 40 | 41 | ### Miscellaneous Tasks 42 | 43 | - Release 0.12.0 44 | 45 | ## [0.11.1](https://github.com/foundry-rs/foundry-fork-db/releases/tag/v0.11.1) - 2025-02-18 46 | 47 | ### Features 48 | 49 | - Expose cache_path for JsonBlockCacheDB ([#42](https://github.com/foundry-rs/foundry-fork-db/issues/42)) 50 | 51 | ### Miscellaneous Tasks 52 | 53 | - Release 0.11.1 54 | 55 | ## [0.11.0](https://github.com/foundry-rs/foundry-fork-db/releases/tag/v0.11.0) - 2025-01-31 56 | 57 | ### Dependencies 58 | 59 | - Bump alloy 0.11 ([#41](https://github.com/foundry-rs/foundry-fork-db/issues/41)) 60 | 61 | ### Miscellaneous Tasks 62 | 63 | - Release 0.11.0 64 | 65 | ## [0.10.0](https://github.com/foundry-rs/foundry-fork-db/releases/tag/v0.10.0) - 2024-12-30 66 | 67 | ### Features 68 | 69 | - Update revm 19 alloy 09 ([#39](https://github.com/foundry-rs/foundry-fork-db/issues/39)) 70 | 71 | ### Miscellaneous Tasks 72 | 73 | - Release 0.10.0 74 | 75 | ## [0.9.0](https://github.com/foundry-rs/foundry-fork-db/releases/tag/v0.9.0) - 2024-12-10 76 | 77 | ### Dependencies 78 | 79 | - Bump alloy 0.8 ([#38](https://github.com/foundry-rs/foundry-fork-db/issues/38)) 80 | - Bump MSRV to 1.81 ([#37](https://github.com/foundry-rs/foundry-fork-db/issues/37)) 81 | - Bump breaking deps ([#36](https://github.com/foundry-rs/foundry-fork-db/issues/36)) 82 | 83 | ### Miscellaneous Tasks 84 | 85 | - Release 0.9.0 86 | - Update deny.toml ([#35](https://github.com/foundry-rs/foundry-fork-db/issues/35)) 87 | 88 | ### Other 89 | 90 | - Move deny to ci ([#34](https://github.com/foundry-rs/foundry-fork-db/issues/34)) 91 | 92 | ## [0.8.0](https://github.com/foundry-rs/foundry-fork-db/releases/tag/v0.8.0) - 2024-11-28 93 | 94 | ### Dependencies 95 | 96 | - Bump alloy ([#33](https://github.com/foundry-rs/foundry-fork-db/issues/33)) 97 | 98 | ### Miscellaneous Tasks 99 | 100 | - Release 0.8.0 101 | 102 | ## [0.7.2](https://github.com/foundry-rs/foundry-fork-db/releases/tag/v0.7.2) - 2024-11-27 103 | 104 | ### Documentation 105 | 106 | - Fix typo in changelog generator 2 107 | - Fix typo in changelog generator 108 | 109 | ### Features 110 | 111 | - [backend] Add support for arbitrary provider requests with AnyRequest ([#32](https://github.com/foundry-rs/foundry-fork-db/issues/32)) 112 | 113 | ### Miscellaneous Tasks 114 | 115 | - Release 0.7.2 116 | 117 | ## [0.7.1](https://github.com/foundry-rs/foundry-fork-db/releases/tag/v0.7.1) - 2024-11-09 118 | 119 | ### Bug Fixes 120 | 121 | - Accept generic header in meta builder ([#30](https://github.com/foundry-rs/foundry-fork-db/issues/30)) 122 | 123 | ### Miscellaneous Tasks 124 | 125 | - Release 0.7.1 126 | 127 | ## [0.7.0](https://github.com/foundry-rs/foundry-fork-db/releases/tag/v0.7.0) - 2024-11-08 128 | 129 | ### Dependencies 130 | 131 | - [deps] Bump alloy 0.6.2 ([#29](https://github.com/foundry-rs/foundry-fork-db/issues/29)) 132 | 133 | ### Documentation 134 | 135 | - Update docs 136 | 137 | ### Miscellaneous Tasks 138 | 139 | - Release 0.7.0 140 | 141 | ## [0.6.0](https://github.com/foundry-rs/foundry-fork-db/releases/tag/v0.6.0) - 2024-10-23 142 | 143 | ### Dependencies 144 | 145 | - Bump revm ([#27](https://github.com/foundry-rs/foundry-fork-db/issues/27)) 146 | 147 | ### Miscellaneous Tasks 148 | 149 | - Release 0.6.0 150 | 151 | ## [0.5.0](https://github.com/foundry-rs/foundry-fork-db/releases/tag/v0.5.0) - 2024-10-18 152 | 153 | ### Dependencies 154 | 155 | - Bump alloy 0.5 ([#26](https://github.com/foundry-rs/foundry-fork-db/issues/26)) 156 | 157 | ### Miscellaneous Tasks 158 | 159 | - Release 0.5.0 160 | 161 | ## [0.4.0](https://github.com/foundry-rs/foundry-fork-db/releases/tag/v0.4.0) - 2024-09-30 162 | 163 | ### Dependencies 164 | 165 | - Bump alloy 0.4 ([#24](https://github.com/foundry-rs/foundry-fork-db/issues/24)) 166 | 167 | ### Miscellaneous Tasks 168 | 169 | - Release 0.4.0 170 | 171 | ## [0.3.2](https://github.com/foundry-rs/foundry-fork-db/releases/tag/v0.3.2) - 2024-09-29 172 | 173 | ### Features 174 | 175 | - BlockchainDbMeta builder ([#22](https://github.com/foundry-rs/foundry-fork-db/issues/22)) 176 | 177 | ### Miscellaneous Tasks 178 | 179 | - Release 0.3.2 180 | - Use more alloy_primitives::map 181 | 182 | ## [0.3.1](https://github.com/foundry-rs/foundry-fork-db/releases/tag/v0.3.1) - 2024-09-21 183 | 184 | ### Dependencies 185 | 186 | - [deps] Disable default features for revm ([#20](https://github.com/foundry-rs/foundry-fork-db/issues/20)) 187 | 188 | ### Miscellaneous Tasks 189 | 190 | - Release 0.3.1 191 | 192 | ### Other 193 | 194 | - Don't deploy docs 195 | 196 | ## [0.3.0](https://github.com/foundry-rs/foundry-fork-db/releases/tag/v0.3.0) - 2024-08-29 197 | 198 | ### Bug Fixes 199 | 200 | - Fix fmt 201 | 202 | ### Dependencies 203 | 204 | - Merge pull request [#19](https://github.com/foundry-rs/foundry-fork-db/issues/19) from foundry-rs/matt/bump-alloy03 205 | - Bump alloy 206 | 207 | ### Miscellaneous Tasks 208 | 209 | - Release 0.3.0 210 | 211 | ### Other 212 | 213 | - Update 214 | - Merge pull request [#18](https://github.com/foundry-rs/foundry-fork-db/issues/18) from nkysg/unbound_channel 215 | - Rm clone 216 | - Replace bounded channel with unbounded channel 217 | 218 | ## [0.2.1](https://github.com/foundry-rs/foundry-fork-db/releases/tag/v0.2.1) - 2024-08-08 219 | 220 | ### Bug Fixes 221 | 222 | - Fix clippy 223 | - Fix-tests after checking 224 | 225 | ### Dependencies 226 | 227 | - Merge pull request [#17](https://github.com/foundry-rs/foundry-fork-db/issues/17) from foundry-rs/matt/bump-revm13 228 | - Bump revm 13 229 | - Undo bump version 230 | - Bump version of crate 231 | - Merge bump-revm 232 | 233 | ### Documentation 234 | 235 | - Docs to functions 236 | - Docs 237 | 238 | ### Miscellaneous Tasks 239 | 240 | - Release 0.2.1 241 | 242 | ### Other 243 | 244 | - Merge pull request [#16](https://github.com/foundry-rs/foundry-fork-db/issues/16) from m1stoyanov/patch-1 245 | - Remove the unnecessary result from the helper functions 246 | - Provide helper methods for MemDb data 247 | - Merge pull request [#13](https://github.com/foundry-rs/foundry-fork-db/issues/13) from nkysg/sharedbackend_behaviour 248 | - Update process logic 249 | - Add BlockingMod::Block process 250 | - add configure for SharedBackend block_in_place or not 251 | - Merge pull request [#10](https://github.com/foundry-rs/foundry-fork-db/issues/10) from Ethanol48/update_state 252 | - Eliminated tmp ETH_RPC 253 | - Added tmp file for testing 254 | - Eliminate reduntant code 255 | - Add tests to verify if the data was properly updated 256 | - Added db to test to verify data 257 | - Add minor changes 258 | - Update block hashes 259 | - Typo 260 | - Update address in db 261 | - Update revm 262 | - Merge pull request [#12](https://github.com/foundry-rs/foundry-fork-db/issues/12) from Ethanol48/flush_to_file 263 | - Change to &Path 264 | - Eliminate reduntant code 265 | - Merge branch 'main' of https://github.com/Ethanol48/foundry-fork-db into flush_to_file 266 | 267 | ### Refactor 268 | 269 | - Refactor and storage update 270 | - Refactoring 271 | 272 | ## [0.2.0](https://github.com/foundry-rs/foundry-fork-db/releases/tag/v0.2.0) - 2024-07-17 273 | 274 | ### Dependencies 275 | 276 | - Merge pull request [#8](https://github.com/foundry-rs/foundry-fork-db/issues/8) from foundry-rs/klkvr/bump-revm 277 | - Bump revm 278 | - Merge pull request [#7](https://github.com/foundry-rs/foundry-fork-db/issues/7) from foundry-rs/matt/bump-revm-alloy 279 | - Bump alloy and revm 280 | 281 | ### Miscellaneous Tasks 282 | 283 | - Release 0.2.0 284 | 285 | ### Other 286 | 287 | - Formating 288 | - Add documentation 289 | - Add flush to arbitrary file 290 | 291 | ## [0.1.1](https://github.com/foundry-rs/foundry-fork-db/releases/tag/v0.1.1) - 2024-07-15 292 | 293 | ### Dependencies 294 | 295 | - Merge pull request [#5](https://github.com/foundry-rs/foundry-fork-db/issues/5) from foundry-rs/matt/bump-msrv 296 | - Bump msrv 79 297 | - Merge pull request [#4](https://github.com/foundry-rs/foundry-fork-db/issues/4) from m1stoyanov/main 298 | - Bump alloy [provider, rpc-types, serde, transport, rpc-client, transport-http] to 0.1.4, alloy-primitives to 0.7.7 and revm to 11.0.0 299 | 300 | ### Miscellaneous Tasks 301 | 302 | - Release 0.1.1 303 | 304 | ### Other 305 | 306 | - Remove redundant check 307 | - Update Cargo.toml according to the reviews 308 | 309 | ## [0.1.0](https://github.com/foundry-rs/foundry-fork-db/releases/tag/v0.1.0) - 2024-07-02 310 | 311 | ### Bug Fixes 312 | 313 | - Clippy 314 | - Cargo deny 315 | - Clippy + fmt 316 | - Tests 317 | 318 | ### Miscellaneous Tasks 319 | 320 | - Release 0.1.0 321 | - Init changelog 322 | - Fix cliff.toml 323 | - Add description 324 | 325 | ### Other 326 | 327 | - Update naming ([#2](https://github.com/foundry-rs/foundry-fork-db/issues/2)) 328 | - Merge pull request [#1](https://github.com/foundry-rs/foundry-fork-db/issues/1) from klkvr/klkvr/init 329 | - DatabaseError -> BackendError 330 | - Initial commit 331 | - Update readme 332 | - Update name 333 | - Initial commit 334 | 335 | 336 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | ## Contributing to Foundry 2 | 3 | Thanks for your interest in improving Foundry! 4 | 5 | There are multiple opportunities to contribute at any level. It doesn't matter if you are just getting started with Rust or are the most weathered expert, we can use your help. 6 | 7 | This document will help you get started. **Do not let the document intimidate you**. 8 | It should be considered as a guide to help you navigate the process. 9 | 10 | The [dev Telegram][dev-tg] is available for any concerns you may have that are not covered in this guide. 11 | 12 | ### Code of Conduct 13 | 14 | The Foundry project adheres to the [Rust Code of Conduct][rust-coc]. This code of conduct describes the _minimum_ behavior expected from all contributors. 15 | 16 | Instances of violations of the Code of Conduct can be reported by contacting the team at [me@gakonst.com](mailto:me@gakonst.com). 17 | 18 | ### Ways to contribute 19 | 20 | There are fundamentally four ways an individual can contribute: 21 | 22 | 1. **By opening an issue:** For example, if you believe that you have uncovered a bug 23 | in Foundry, creating a new issue in the issue tracker is the way to report it. 24 | 2. **By adding context:** Providing additional context to existing issues, 25 | such as screenshots and code snippets, which help resolve issues. 26 | 3. **By resolving issues:** Typically this is done in the form of either 27 | demonstrating that the issue reported is not a problem after all, or more often, 28 | by opening a pull request that fixes the underlying problem, in a concrete and 29 | reviewable manner. 30 | 31 | **Anybody can participate in any stage of contribution**. We urge you to participate in the discussion 32 | around bugs and participate in reviewing PRs. 33 | 34 | ### Contributions Related to Spelling and Grammar 35 | 36 | At this time, we will not be accepting contributions that only fix spelling or grammatical errors in documentation, code or 37 | elsewhere. 38 | 39 | ### Asking for help 40 | 41 | If you have reviewed existing documentation and still have questions, or you are having problems, you can get help in the following ways: 42 | 43 | - **Asking in the support Telegram:** The [Foundry Support Telegram][support-tg] is a fast and easy way to ask questions. 44 | - **Opening a discussion:** This repository comes with a discussions board where you can also ask for help. Click the "Discussions" tab at the top. 45 | 46 | As Foundry is still in heavy development, the documentation can be a bit scattered. 47 | The [Foundry Book][foundry-book] is our current best-effort attempt at keeping up-to-date information. 48 | 49 | ### Submitting a bug report 50 | 51 | When filing a new bug report in the issue tracker, you will be presented with a basic form to fill out. 52 | 53 | If you believe that you have uncovered a bug, please fill out the form to the best of your ability. Do not worry if you cannot answer every detail; just fill in what you can. Contributors will ask follow-up questions if something is unclear. 54 | 55 | The most important pieces of information we need in a bug report are: 56 | 57 | - The Foundry version you are on (and that it is up to date) 58 | - The platform you are on (Windows, macOS, an M1 Mac or Linux) 59 | - Code snippets if this is happening in relation to testing or building code 60 | - Concrete steps to reproduce the bug 61 | 62 | In order to rule out the possibility of the bug being in your project, the code snippets should be as minimal 63 | as possible. It is better if you can reproduce the bug with a small snippet as opposed to an entire project! 64 | 65 | See [this guide][mcve] on how to create a minimal, complete, and verifiable example. 66 | 67 | ### Submitting a feature request 68 | 69 | When adding a feature request in the issue tracker, you will be presented with a basic form to fill out. 70 | 71 | Please include as detailed of an explanation as possible of the feature you would like, adding additional context if necessary. 72 | 73 | If you have examples of other tools that have the feature you are requesting, please include them as well. 74 | 75 | ### Resolving an issue 76 | 77 | Pull requests are the way concrete changes are made to the code, documentation, and dependencies of Foundry. 78 | 79 | Even minor pull requests, such as those fixing wording, are greatly appreciated. Before making a large change, it is usually 80 | a good idea to first open an issue describing the change to solicit feedback and guidance. This will increase 81 | the likelihood of the PR getting merged. 82 | 83 | Please also make sure that the following commands pass if you have changed the code: 84 | 85 | ```sh 86 | cargo check --all 87 | cargo test --all --all-features 88 | cargo +nightly fmt -- --check 89 | cargo +nightly clippy --all --all-targets --all-features -- -D warnings 90 | ``` 91 | 92 | If you are working in VSCode, we recommend you install the [rust-analyzer](https://rust-analyzer.github.io/) extension, and use the following VSCode user settings: 93 | 94 | ```json 95 | "editor.formatOnSave": true, 96 | "rust-analyzer.rustfmt.extraArgs": ["+nightly"], 97 | "[rust]": { 98 | "editor.defaultFormatter": "rust-lang.rust-analyzer" 99 | } 100 | ``` 101 | 102 | If you are working on a larger feature, we encourage you to open up a draft pull request, to make sure that other contributors are not duplicating work. 103 | 104 | If you would like to test the binaries built from your change, see [foundryup](https://github.com/foundry-rs/foundry/tree/master/foundryup). 105 | 106 | If you would like to use a debugger with breakpoints to debug a patch you might be working on, keep in mind we currently strip debug info for faster builds, which is *not* the default. Therefore, to use a debugger, you need to enable it on the workspace [`Cargo.toml`'s `dev` profile](https://github.com/foundry-rs/foundry/tree/master/Cargo.toml#L15-L18). 107 | 108 | #### Adding tests 109 | 110 | If the change being proposed alters code, it is either adding new functionality to Foundry, or fixing existing, broken functionality. 111 | In both of these cases, the pull request should include one or more tests to ensure that Foundry does not regress 112 | in the future. 113 | 114 | Types of tests include: 115 | 116 | - **Unit tests**: Functions which have very specific tasks should be unit tested. 117 | - **Integration tests**: For general purpose, far reaching functionality, integration tests should be added. 118 | The best way to add a new integration test is to look at existing ones and follow the style. 119 | 120 | Tests that use forking must contain "fork" in their name. 121 | 122 | #### Commits 123 | 124 | It is a recommended best practice to keep your changes as logically grouped as possible within individual commits. There is no limit to the number of commits any single pull request may have, and many contributors find it easier to review changes that are split across multiple commits. 125 | 126 | That said, if you have a number of commits that are "checkpoints" and don't represent a single logical change, please squash those together. 127 | 128 | #### Opening the pull request 129 | 130 | From within GitHub, opening a new pull request will present you with a template that should be filled out. Please try your best at filling out the details, but feel free to skip parts if you're not sure what to put. 131 | 132 | #### Discuss and update 133 | 134 | You will probably get feedback or requests for changes to your pull request. 135 | This is a big part of the submission process, so don't be discouraged! Some contributors may sign off on the pull request right away, others may have more detailed comments or feedback. 136 | This is a necessary part of the process in order to evaluate whether the changes are correct and necessary. 137 | 138 | **Any community member can review a PR, so you might get conflicting feedback**. 139 | Keep an eye out for comments from code owners to provide guidance on conflicting feedback. 140 | 141 | #### Reviewing pull requests 142 | 143 | **Any Foundry community member is welcome to review any pull request**. 144 | 145 | All contributors who choose to review and provide feedback on pull requests have a responsibility to both the project and individual making the contribution. Reviews and feedback must be helpful, insightful, and geared towards improving the contribution as opposed to simply blocking it. If there are reasons why you feel the PR should not be merged, explain what those are. Do not expect to be able to block a PR from advancing simply because you say "no" without giving an explanation. Be open to having your mind changed. Be open to working _with_ the contributor to make the pull request better. 146 | 147 | Reviews that are dismissive or disrespectful of the contributor or any other reviewers are strictly counter to the Code of Conduct. 148 | 149 | When reviewing a pull request, the primary goals are for the codebase to improve and for the person submitting the request to succeed. **Even if a pull request is not merged, the submitter should come away from the experience feeling like their effort was not unappreciated**. Every PR from a new contributor is an opportunity to grow the community. 150 | 151 | ##### Review a bit at a time 152 | 153 | Do not overwhelm new contributors. 154 | 155 | It is tempting to micro-optimize and make everything about relative performance, perfect grammar, or exact style matches. Do not succumb to that temptation.. 156 | 157 | Focus first on the most significant aspects of the change: 158 | 159 | 1. Does this change make sense for Foundry? 160 | 2. Does this change make Foundry better, even if only incrementally? 161 | 3. Are there clear bugs or larger scale issues that need attending? 162 | 4. Are the commit messages readable and correct? If it contains a breaking change, is it clear enough? 163 | 164 | Note that only **incremental** improvement is needed to land a PR. This means that the PR does not need to be perfect, only better than the status quo. Follow-up PRs may be opened to continue iterating. 165 | 166 | When changes are necessary, _request_ them, do not _demand_ them, and **do not assume that the submitter already knows how to add a test or run a benchmark**. 167 | 168 | Specific performance optimization techniques, coding styles and conventions change over time. The first impression you give to a new contributor never does. 169 | 170 | Nits (requests for small changes that are not essential) are fine, but try to avoid stalling the pull request. Most nits can typically be fixed by the Foundry maintainers merging the pull request, but they can also be an opportunity for the contributor to learn a bit more about the project. 171 | 172 | It is always good to clearly indicate nits when you comment, e.g.: `Nit: change foo() to bar(). But this is not blocking`. 173 | 174 | If your comments were addressed but were not folded after new commits, or if they proved to be mistaken, please, [hide them][hiding-a-comment] with the appropriate reason to keep the conversation flow concise and relevant. 175 | 176 | ##### Be aware of the person behind the code 177 | 178 | Be aware that _how_ you communicate requests and reviews in your feedback can have a significant impact on the success of the pull request. Yes, we may merge a particular change that makes Foundry better, but the individual might just not want to have anything to do with Foundry ever again. The goal is not just having good code. 179 | 180 | ##### Abandoned or stale pull requests 181 | 182 | If a pull request appears to be abandoned or stalled, it is polite to first check with the contributor to see if they intend to continue the work before checking if they would mind if you took it over (especially if it just has nits left). When doing so, it is courteous to give the original contributor credit for the work they started, either by preserving their name and e-mail address in the commit log, or by using the `Author: ` or `Co-authored-by: ` metadata tag in the commits. 183 | 184 | _Adapted from the [ethers-rs contributing guide](https://github.com/gakonst/ethers-rs/blob/master/CONTRIBUTING.md)_. 185 | 186 | ### Releasing 187 | 188 | Releases are automatically done by the release workflow when a tag is pushed, however, these steps still need to be taken: 189 | 190 | 1. Ensure that the versions in the relevant `Cargo.toml` files are up-to-date. 191 | 2. Update documentation links 192 | 3. Perform a final audit for breaking changes. 193 | 194 | [rust-coc]: https://github.com/rust-lang/rust/blob/master/CODE_OF_CONDUCT.md 195 | [dev-tg]: https://t.me/foundry_rs 196 | [foundry-book]: https://github.com/foundry-rs/foundry-book 197 | [support-tg]: https://t.me/foundry_support 198 | [mcve]: https://stackoverflow.com/help/mcve 199 | [hiding-a-comment]: https://help.github.com/articles/managing-disruptive-comments/#hiding-a-comment -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "foundry-fork-db" 3 | description = "Fork database used by Foundry" 4 | version = "0.15.0" 5 | edition = "2021" 6 | # Remember to update clippy.toml as well 7 | rust-version = "1.83" 8 | authors = ["Foundry Contributors"] 9 | license = "MIT OR Apache-2.0" 10 | homepage = "https://github.com/foundry-rs/foundry-fork-db" 11 | repository = "https://github.com/foundry-rs/foundry-fork-db" 12 | 13 | [lints] 14 | rust.unused_must_use = "deny" 15 | rust.rust_2018_idioms = { level = "deny", priority = -1 } 16 | rustdoc.all = "warn" 17 | 18 | [lints.clippy] 19 | missing_const_for_fn = "allow" # TODO: https://github.com/rust-lang/rust-clippy/issues/14020 20 | use_self = "warn" 21 | option_if_let_else = "warn" 22 | 23 | [package.metadata.docs.rs] 24 | all-features = true 25 | rustdoc-args = ["--cfg", "docsrs"] 26 | 27 | [dependencies] 28 | alloy-primitives = { version = "1.0", features = ["map"] } 29 | alloy-provider = { version = "1.0.3", default-features = false } 30 | alloy-rpc-types = { version = "1.0.3", features = ["eth"] } 31 | alloy-consensus = { version = "1.0.3", default-features = false } 32 | 33 | eyre = "0.6" 34 | futures = "0.3" 35 | 36 | parking_lot = "0.12" 37 | 38 | revm = { version = "24.0.0", features = ["std", "serde"] } 39 | 40 | serde = "1.0" 41 | serde_json = "1.0" 42 | 43 | thiserror = "2" 44 | tokio = { version = "1", features = ["rt-multi-thread"] } 45 | tracing = "0.1" 46 | 47 | url = "2" 48 | 49 | [dev-dependencies] 50 | alloy-rpc-client = "1.0.3" 51 | tiny_http = "0.12" 52 | 53 | # [patch.crates-io] 54 | # alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "a4e7099" } 55 | # alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "a4e7099" } 56 | # alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "a4e7099" } 57 | # alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "a4e7099" } 58 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Permission is hereby granted, free of charge, to any 2 | person obtaining a copy of this software and associated 3 | documentation files (the "Software"), to deal in the 4 | Software without restriction, including without 5 | limitation the rights to use, copy, modify, merge, 6 | publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software 8 | is furnished to do so, subject to the following 9 | conditions: 10 | 11 | The above copyright notice and this permission notice 12 | shall be included in all copies or substantial portions 13 | of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 16 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 17 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 18 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 19 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 20 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 22 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | DEALINGS IN THE SOFTWARE. 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # foundry-fork-db 2 | 3 | #### License 4 | 5 | 6 | Licensed under either of Apache License, Version 7 | 2.0 or MIT license at your option. 8 | 9 | 10 |
11 | 12 | 13 | Unless you explicitly state otherwise, any contribution intentionally submitted 14 | for inclusion in these crates by you, as defined in the Apache-2.0 license, 15 | shall be dual licensed as above, without any additional terms or conditions. 16 | 17 | -------------------------------------------------------------------------------- /cliff.toml: -------------------------------------------------------------------------------- 1 | # Configuration file for [`git-cliff`](https://github.com/orhun/git-cliff) 2 | # See https://git-cliff.org/docs/configuration 3 | 4 | [changelog] 5 | header = """ 6 | # Changelog 7 | 8 | All notable changes to this project will be documented in this file. 9 | 10 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), 11 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).\n 12 | """ 13 | # https://tera.netlify.app/docs/#introduction 14 | body = """ 15 | {% if version %}\ 16 | ## [{{ version | trim_start_matches(pat="v") }}](https://github.com/foundry-rs/foundry-fork-db/releases/tag/v{{ version | trim_start_matches(pat="v") }}) - {{ timestamp | date(format="%Y-%m-%d") }} 17 | {% endif %}\ 18 | {% for group, commits in commits | group_by(attribute="group") %} 19 | ### {{ group | title }} 20 | {% for commit in commits %} 21 | - {% if commit.scope %}[{{ commit.scope }}] {% endif %}{{ commit.message | upper_first | split(pat="\\n") | first }}\ 22 | {% endfor %} 23 | {% endfor %}\n 24 | """ 25 | trim = true 26 | footer = "" 27 | 28 | [git] 29 | conventional_commits = true 30 | filter_unconventional = false 31 | commit_preprocessors = [ 32 | { pattern = '#(\d+)', replace = "[#$1](https://github.com/foundry-rs/foundry-fork-db/issues/$1)" }, 33 | ] 34 | commit_parsers = [ 35 | { message = "^[Ff]eat", group = "Features" }, 36 | { message = "^[Ff]ix", group = "Bug Fixes" }, 37 | { message = "^[Dd]oc", group = "Documentation" }, 38 | { message = ".*\\b([Dd]eps|[Dd]ependencies|[Bb]ump)\\b", group = "Dependencies" }, 39 | { message = "^[Pp]erf", group = "Performance" }, 40 | { message = "^[Rr]efactor", group = "Refactor" }, 41 | { message = ".*\\b([Ss]tyle|[Ff]mt|[Ff]ormat)\\b", group = "Styling" }, 42 | { message = "^[Tt]est", group = "Testing" }, 43 | { message = "^[Cc]hore", group = "Miscellaneous Tasks" }, 44 | 45 | { message = ".*", group = "Other" }, 46 | ] 47 | protect_breaking_commits = false 48 | filter_commits = false 49 | tag_pattern = "v[0-9]*" 50 | skip_tags = "beta|alpha" 51 | ignore_tags = "rc" 52 | sort_commits = "newest" 53 | -------------------------------------------------------------------------------- /clippy.toml: -------------------------------------------------------------------------------- 1 | msrv = "1.83" 2 | -------------------------------------------------------------------------------- /deny.toml: -------------------------------------------------------------------------------- 1 | [advisories] 2 | version = 2 3 | yanked = "warn" 4 | ignore = [ 5 | # https://github.com/dtolnay/paste 6 | "RUSTSEC-2024-0436", 7 | ] 8 | 9 | [bans] 10 | multiple-versions = "warn" 11 | wildcards = "deny" 12 | highlight = "all" 13 | 14 | [licenses] 15 | confidence-threshold = 0.9 16 | # copyleft = "deny" 17 | 18 | allow = [ 19 | "MIT", 20 | "MIT-0", 21 | "Apache-2.0", 22 | "Apache-2.0 WITH LLVM-exception", 23 | "BSD-2-Clause", 24 | "BSD-3-Clause", 25 | "ISC", 26 | "Unicode-DFS-2016", 27 | "Unicode-3.0", 28 | "Unlicense", 29 | "MPL-2.0", 30 | "Zlib", 31 | # https://github.com/briansmith/ring/issues/902 32 | "LicenseRef-ring", 33 | # https://github.com/briansmith/webpki/issues/148 34 | "LicenseRef-webpki", 35 | ] 36 | 37 | exceptions = [ 38 | # CC0 is a permissive license but somewhat unclear status for source code 39 | # so we prefer to not have dependencies using it 40 | # https://tldrlegal.com/license/creative-commons-cc0-1.0-universal 41 | { allow = ["CC0-1.0"], name = "tiny-keccak" }, 42 | { allow = ["CC0-1.0"], name = "aurora-engine-modexp" }, 43 | { allow = ["CC0-1.0"], name = "secp256k1" }, 44 | { allow = ["CC0-1.0"], name = "secp256k1-sys" }, 45 | ] 46 | 47 | [[licenses.clarify]] 48 | name = "ring" 49 | expression = "LicenseRef-ring" 50 | license-files = [{ path = "LICENSE", hash = 0xbd0eed23 }] 51 | 52 | [[licenses.clarify]] 53 | name = "webpki" 54 | expression = "LicenseRef-webpki" 55 | license-files = [{ path = "LICENSE", hash = 0x001c7e6c }] 56 | 57 | [sources] 58 | unknown-registry = "deny" 59 | unknown-git = "deny" 60 | -------------------------------------------------------------------------------- /release.toml: -------------------------------------------------------------------------------- 1 | # Configuration file for [`cargo-release`](https://github.com/crate-ci/cargo-release) 2 | # See: https://github.com/crate-ci/cargo-release/blob/master/docs/reference.md 3 | 4 | allow-branch = ["main"] 5 | sign-commit = true 6 | sign-tag = true 7 | shared-version = true 8 | pre-release-commit-message = "chore: release {{version}}" 9 | tag-prefix = "" # tag only once instead of per every crate 10 | pre-release-hook = ["sh", "-c", "$WORKSPACE_ROOT/scripts/changelog.sh --tag {{version}}"] 11 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | reorder_imports = true 2 | use_field_init_shorthand = true 3 | use_small_heuristics = "Max" 4 | 5 | # Nightly 6 | max_width = 100 7 | comment_width = 100 8 | imports_granularity = "Crate" 9 | wrap_comments = true 10 | format_code_in_doc_comments = true 11 | doc_comment_code_block_width = 100 12 | format_macro_matchers = true 13 | -------------------------------------------------------------------------------- /scripts/changelog.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e -o pipefail 3 | 4 | root=$(dirname "$(dirname "$0")") 5 | cmd=(git cliff --workdir "$root" --output "$root/CHANGELOG.md" "$@") 6 | 7 | if [ "$DRY_RUN" = "true" ]; then 8 | echo "skipping due to dry run: ${cmd[*]}" >&2 9 | exit 0 10 | else 11 | "${cmd[@]}" 12 | fi -------------------------------------------------------------------------------- /scripts/check_no_std.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eo pipefail 3 | 4 | no_std_packages=( 5 | ) 6 | 7 | for package in "${no_std_packages[@]}"; do 8 | cmd="cargo +stable build -p $package --target riscv32imac-unknown-none-elf --no-default-features" 9 | if [ -n "$CI" ]; then 10 | echo "::group::$cmd" 11 | else 12 | printf "\n%s:\n %s\n" "$package" "$cmd" 13 | fi 14 | 15 | $cmd 16 | 17 | if [ -n "$CI" ]; then 18 | echo "::endgroup::" 19 | fi 20 | done 21 | -------------------------------------------------------------------------------- /src/backend.rs: -------------------------------------------------------------------------------- 1 | //! Smart caching and deduplication of requests when using a forking provider. 2 | 3 | use crate::{ 4 | cache::{BlockchainDb, FlushJsonBlockCacheDB, MemDb, StorageInfo}, 5 | error::{DatabaseError, DatabaseResult}, 6 | }; 7 | use alloy_primitives::{keccak256, Address, Bytes, B256, U256}; 8 | use alloy_provider::{ 9 | network::{AnyNetwork, AnyRpcBlock, AnyRpcTransaction}, 10 | Provider, 11 | }; 12 | use alloy_rpc_types::BlockId; 13 | use eyre::WrapErr; 14 | use futures::{ 15 | channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}, 16 | stream::Stream, 17 | task::{Context, Poll}, 18 | Future, FutureExt, 19 | }; 20 | use revm::{ 21 | database::DatabaseRef, 22 | primitives::{ 23 | map::{hash_map::Entry, AddressHashMap, HashMap}, 24 | KECCAK_EMPTY, 25 | }, 26 | state::{AccountInfo, Bytecode}, 27 | }; 28 | use std::{ 29 | collections::VecDeque, 30 | fmt, 31 | future::IntoFuture, 32 | path::Path, 33 | pin::Pin, 34 | sync::{ 35 | mpsc::{channel as oneshot_channel, Sender as OneshotSender}, 36 | Arc, 37 | }, 38 | }; 39 | 40 | /// Logged when an error is indicative that the user is trying to fork from a non-archive node. 41 | pub const NON_ARCHIVE_NODE_WARNING: &str = "\ 42 | It looks like you're trying to fork from an older block with a non-archive node which is not \ 43 | supported. Please try to change your RPC url to an archive node if the issue persists."; 44 | 45 | // Various future/request type aliases 46 | 47 | type AccountFuture = 48 | Pin, Address)> + Send>>; 49 | type StorageFuture = Pin, Address, U256)> + Send>>; 50 | type BlockHashFuture = Pin, u64)> + Send>>; 51 | type FullBlockFuture = Pin< 52 | Box, Err>, BlockId)> + Send>, 53 | >; 54 | type TransactionFuture = 55 | Pin, B256)> + Send>>; 56 | 57 | type AccountInfoSender = OneshotSender>; 58 | type StorageSender = OneshotSender>; 59 | type BlockHashSender = OneshotSender>; 60 | type FullBlockSender = OneshotSender>; 61 | type TransactionSender = OneshotSender>; 62 | 63 | type AddressData = AddressHashMap; 64 | type StorageData = AddressHashMap; 65 | type BlockHashData = HashMap; 66 | 67 | struct AnyRequestFuture { 68 | sender: OneshotSender>, 69 | future: Pin> + Send>>, 70 | } 71 | 72 | impl fmt::Debug for AnyRequestFuture { 73 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 74 | f.debug_tuple("AnyRequestFuture").field(&self.sender).finish() 75 | } 76 | } 77 | 78 | trait WrappedAnyRequest: Unpin + Send + fmt::Debug { 79 | fn poll_inner(&mut self, cx: &mut Context<'_>) -> Poll<()>; 80 | } 81 | 82 | /// @dev Implements `WrappedAnyRequest` for `AnyRequestFuture`. 83 | /// 84 | /// - `poll_inner` is similar to `Future` polling but intentionally consumes the Future 85 | /// and return Future 86 | /// - This design avoids storing `Future` directly, as its type may not be known at 87 | /// compile time. 88 | /// - Instead, the result (`Result`) is sent via the `sender` channel, which enforces type 89 | /// safety. 90 | impl WrappedAnyRequest for AnyRequestFuture 91 | where 92 | T: fmt::Debug + Send + 'static, 93 | Err: fmt::Debug + Send + 'static, 94 | { 95 | fn poll_inner(&mut self, cx: &mut Context<'_>) -> Poll<()> { 96 | match self.future.poll_unpin(cx) { 97 | Poll::Ready(result) => { 98 | let _ = self.sender.send(result); 99 | Poll::Ready(()) 100 | } 101 | Poll::Pending => Poll::Pending, 102 | } 103 | } 104 | } 105 | 106 | /// Request variants that are executed by the provider 107 | enum ProviderRequest { 108 | Account(AccountFuture), 109 | Storage(StorageFuture), 110 | BlockHash(BlockHashFuture), 111 | FullBlock(FullBlockFuture), 112 | Transaction(TransactionFuture), 113 | AnyRequest(Box), 114 | } 115 | 116 | /// The Request type the Backend listens for 117 | #[derive(Debug)] 118 | enum BackendRequest { 119 | /// Fetch the account info 120 | Basic(Address, AccountInfoSender), 121 | /// Fetch a storage slot 122 | Storage(Address, U256, StorageSender), 123 | /// Fetch a block hash 124 | BlockHash(u64, BlockHashSender), 125 | /// Fetch an entire block with transactions 126 | FullBlock(BlockId, FullBlockSender), 127 | /// Fetch a transaction 128 | Transaction(B256, TransactionSender), 129 | /// Sets the pinned block to fetch data from 130 | SetPinnedBlock(BlockId), 131 | 132 | /// Update Address data 133 | UpdateAddress(AddressData), 134 | /// Update Storage data 135 | UpdateStorage(StorageData), 136 | /// Update Block Hashes 137 | UpdateBlockHash(BlockHashData), 138 | /// Any other request 139 | AnyRequest(Box), 140 | } 141 | 142 | /// Handles an internal provider and listens for requests. 143 | /// 144 | /// This handler will remain active as long as it is reachable (request channel still open) and 145 | /// requests are in progress. 146 | #[must_use = "futures do nothing unless polled"] 147 | pub struct BackendHandler

{ 148 | provider: P, 149 | /// Stores all the data. 150 | db: BlockchainDb, 151 | /// Requests currently in progress 152 | pending_requests: Vec>, 153 | /// Listeners that wait for a `get_account` related response 154 | account_requests: HashMap>, 155 | /// Listeners that wait for a `get_storage_at` response 156 | storage_requests: HashMap<(Address, U256), Vec>, 157 | /// Listeners that wait for a `get_block` response 158 | block_requests: HashMap>, 159 | /// Incoming commands. 160 | incoming: UnboundedReceiver, 161 | /// unprocessed queued requests 162 | queued_requests: VecDeque, 163 | /// The block to fetch data from. 164 | // This is an `Option` so that we can have less code churn in the functions below 165 | block_id: Option, 166 | } 167 | 168 | impl

BackendHandler

169 | where 170 | P: Provider + Clone + Unpin + 'static, 171 | { 172 | fn new( 173 | provider: P, 174 | db: BlockchainDb, 175 | rx: UnboundedReceiver, 176 | block_id: Option, 177 | ) -> Self { 178 | Self { 179 | provider, 180 | db, 181 | pending_requests: Default::default(), 182 | account_requests: Default::default(), 183 | storage_requests: Default::default(), 184 | block_requests: Default::default(), 185 | queued_requests: Default::default(), 186 | incoming: rx, 187 | block_id, 188 | } 189 | } 190 | 191 | /// handle the request in queue in the future. 192 | /// 193 | /// We always check: 194 | /// 1. if the requested value is already stored in the cache, then answer the sender 195 | /// 2. otherwise, fetch it via the provider but check if a request for that value is already in 196 | /// progress (e.g. another Sender just requested the same account) 197 | fn on_request(&mut self, req: BackendRequest) { 198 | match req { 199 | BackendRequest::Basic(addr, sender) => { 200 | trace!(target: "backendhandler", "received request basic address={:?}", addr); 201 | let acc = self.db.accounts().read().get(&addr).cloned(); 202 | if let Some(basic) = acc { 203 | let _ = sender.send(Ok(basic)); 204 | } else { 205 | self.request_account(addr, sender); 206 | } 207 | } 208 | BackendRequest::BlockHash(number, sender) => { 209 | let hash = self.db.block_hashes().read().get(&U256::from(number)).cloned(); 210 | if let Some(hash) = hash { 211 | let _ = sender.send(Ok(hash)); 212 | } else { 213 | self.request_hash(number, sender); 214 | } 215 | } 216 | BackendRequest::FullBlock(number, sender) => { 217 | self.request_full_block(number, sender); 218 | } 219 | BackendRequest::Transaction(tx, sender) => { 220 | self.request_transaction(tx, sender); 221 | } 222 | BackendRequest::Storage(addr, idx, sender) => { 223 | // account is already stored in the cache 224 | let value = 225 | self.db.storage().read().get(&addr).and_then(|acc| acc.get(&idx).copied()); 226 | if let Some(value) = value { 227 | let _ = sender.send(Ok(value)); 228 | } else { 229 | // account present but not storage -> fetch storage 230 | self.request_account_storage(addr, idx, sender); 231 | } 232 | } 233 | BackendRequest::SetPinnedBlock(block_id) => { 234 | self.block_id = Some(block_id); 235 | } 236 | BackendRequest::UpdateAddress(address_data) => { 237 | for (address, data) in address_data { 238 | self.db.accounts().write().insert(address, data); 239 | } 240 | } 241 | BackendRequest::UpdateStorage(storage_data) => { 242 | for (address, data) in storage_data { 243 | self.db.storage().write().insert(address, data); 244 | } 245 | } 246 | BackendRequest::UpdateBlockHash(block_hash_data) => { 247 | for (block, hash) in block_hash_data { 248 | self.db.block_hashes().write().insert(block, hash); 249 | } 250 | } 251 | BackendRequest::AnyRequest(fut) => { 252 | self.pending_requests.push(ProviderRequest::AnyRequest(fut)); 253 | } 254 | } 255 | } 256 | 257 | /// process a request for account's storage 258 | fn request_account_storage(&mut self, address: Address, idx: U256, listener: StorageSender) { 259 | match self.storage_requests.entry((address, idx)) { 260 | Entry::Occupied(mut entry) => { 261 | entry.get_mut().push(listener); 262 | } 263 | Entry::Vacant(entry) => { 264 | trace!(target: "backendhandler", %address, %idx, "preparing storage request"); 265 | entry.insert(vec![listener]); 266 | let provider = self.provider.clone(); 267 | let block_id = self.block_id.unwrap_or_default(); 268 | let fut = Box::pin(async move { 269 | let storage = provider 270 | .get_storage_at(address, idx) 271 | .block_id(block_id) 272 | .await 273 | .map_err(Into::into); 274 | (storage, address, idx) 275 | }); 276 | self.pending_requests.push(ProviderRequest::Storage(fut)); 277 | } 278 | } 279 | } 280 | 281 | /// returns the future that fetches the account data 282 | fn get_account_req(&self, address: Address) -> ProviderRequest { 283 | trace!(target: "backendhandler", "preparing account request, address={:?}", address); 284 | let provider = self.provider.clone(); 285 | let block_id = self.block_id.unwrap_or_default(); 286 | let fut = Box::pin(async move { 287 | let balance = provider.get_balance(address).block_id(block_id).into_future(); 288 | let nonce = provider.get_transaction_count(address).block_id(block_id).into_future(); 289 | let code = provider.get_code_at(address).block_id(block_id).into_future(); 290 | let resp = tokio::try_join!(balance, nonce, code).map_err(Into::into); 291 | (resp, address) 292 | }); 293 | ProviderRequest::Account(fut) 294 | } 295 | 296 | /// process a request for an account 297 | fn request_account(&mut self, address: Address, listener: AccountInfoSender) { 298 | match self.account_requests.entry(address) { 299 | Entry::Occupied(mut entry) => { 300 | entry.get_mut().push(listener); 301 | } 302 | Entry::Vacant(entry) => { 303 | entry.insert(vec![listener]); 304 | self.pending_requests.push(self.get_account_req(address)); 305 | } 306 | } 307 | } 308 | 309 | /// process a request for an entire block 310 | fn request_full_block(&mut self, number: BlockId, sender: FullBlockSender) { 311 | let provider = self.provider.clone(); 312 | let fut = Box::pin(async move { 313 | let block = provider 314 | .get_block(number) 315 | .full() 316 | .await 317 | .wrap_err(format!("could not fetch block {number:?}")); 318 | (sender, block, number) 319 | }); 320 | 321 | self.pending_requests.push(ProviderRequest::FullBlock(fut)); 322 | } 323 | 324 | /// process a request for a transactions 325 | fn request_transaction(&mut self, tx: B256, sender: TransactionSender) { 326 | let provider = self.provider.clone(); 327 | let fut = Box::pin(async move { 328 | let block = provider 329 | .get_transaction_by_hash(tx) 330 | .await 331 | .wrap_err_with(|| format!("could not get transaction {tx}")) 332 | .and_then(|maybe| { 333 | maybe.ok_or_else(|| eyre::eyre!("could not get transaction {tx}")) 334 | }); 335 | (sender, block, tx) 336 | }); 337 | 338 | self.pending_requests.push(ProviderRequest::Transaction(fut)); 339 | } 340 | 341 | /// process a request for a block hash 342 | fn request_hash(&mut self, number: u64, listener: BlockHashSender) { 343 | match self.block_requests.entry(number) { 344 | Entry::Occupied(mut entry) => { 345 | entry.get_mut().push(listener); 346 | } 347 | Entry::Vacant(entry) => { 348 | trace!(target: "backendhandler", number, "preparing block hash request"); 349 | entry.insert(vec![listener]); 350 | let provider = self.provider.clone(); 351 | let fut = Box::pin(async move { 352 | let block = provider 353 | .get_block_by_number(number.into()) 354 | .hashes() 355 | .await 356 | .wrap_err("failed to get block"); 357 | 358 | let block_hash = match block { 359 | Ok(Some(block)) => Ok(block.header.hash), 360 | Ok(None) => { 361 | warn!(target: "backendhandler", ?number, "block not found"); 362 | // if no block was returned then the block does not exist, in which case 363 | // we return empty hash 364 | Ok(KECCAK_EMPTY) 365 | } 366 | Err(err) => { 367 | error!(target: "backendhandler", %err, ?number, "failed to get block"); 368 | Err(err) 369 | } 370 | }; 371 | (block_hash, number) 372 | }); 373 | self.pending_requests.push(ProviderRequest::BlockHash(fut)); 374 | } 375 | } 376 | } 377 | } 378 | 379 | impl

Future for BackendHandler

380 | where 381 | P: Provider + Clone + Unpin + 'static, 382 | { 383 | type Output = (); 384 | 385 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 386 | let pin = self.get_mut(); 387 | loop { 388 | // Drain queued requests first. 389 | while let Some(req) = pin.queued_requests.pop_front() { 390 | pin.on_request(req) 391 | } 392 | 393 | // receive new requests to delegate to the underlying provider 394 | loop { 395 | match Pin::new(&mut pin.incoming).poll_next(cx) { 396 | Poll::Ready(Some(req)) => { 397 | pin.queued_requests.push_back(req); 398 | } 399 | Poll::Ready(None) => { 400 | trace!(target: "backendhandler", "last sender dropped, ready to drop (&flush cache)"); 401 | return Poll::Ready(()); 402 | } 403 | Poll::Pending => break, 404 | } 405 | } 406 | 407 | // poll all requests in progress 408 | for n in (0..pin.pending_requests.len()).rev() { 409 | let mut request = pin.pending_requests.swap_remove(n); 410 | match &mut request { 411 | ProviderRequest::Account(fut) => { 412 | if let Poll::Ready((resp, addr)) = fut.poll_unpin(cx) { 413 | // get the response 414 | let (balance, nonce, code) = match resp { 415 | Ok(res) => res, 416 | Err(err) => { 417 | let err = Arc::new(err); 418 | if let Some(listeners) = pin.account_requests.remove(&addr) { 419 | listeners.into_iter().for_each(|l| { 420 | let _ = l.send(Err(DatabaseError::GetAccount( 421 | addr, 422 | Arc::clone(&err), 423 | ))); 424 | }) 425 | } 426 | continue; 427 | } 428 | }; 429 | 430 | // convert it to revm-style types 431 | let (code, code_hash) = if !code.is_empty() { 432 | (code.clone(), keccak256(&code)) 433 | } else { 434 | (Bytes::default(), KECCAK_EMPTY) 435 | }; 436 | 437 | // update the cache 438 | let acc = AccountInfo { 439 | nonce, 440 | balance, 441 | code: Some(Bytecode::new_raw(code)), 442 | code_hash, 443 | }; 444 | pin.db.accounts().write().insert(addr, acc.clone()); 445 | 446 | // notify all listeners 447 | if let Some(listeners) = pin.account_requests.remove(&addr) { 448 | listeners.into_iter().for_each(|l| { 449 | let _ = l.send(Ok(acc.clone())); 450 | }) 451 | } 452 | continue; 453 | } 454 | } 455 | ProviderRequest::Storage(fut) => { 456 | if let Poll::Ready((resp, addr, idx)) = fut.poll_unpin(cx) { 457 | let value = match resp { 458 | Ok(value) => value, 459 | Err(err) => { 460 | // notify all listeners 461 | let err = Arc::new(err); 462 | if let Some(listeners) = 463 | pin.storage_requests.remove(&(addr, idx)) 464 | { 465 | listeners.into_iter().for_each(|l| { 466 | let _ = l.send(Err(DatabaseError::GetStorage( 467 | addr, 468 | idx, 469 | Arc::clone(&err), 470 | ))); 471 | }) 472 | } 473 | continue; 474 | } 475 | }; 476 | 477 | // update the cache 478 | pin.db.storage().write().entry(addr).or_default().insert(idx, value); 479 | 480 | // notify all listeners 481 | if let Some(listeners) = pin.storage_requests.remove(&(addr, idx)) { 482 | listeners.into_iter().for_each(|l| { 483 | let _ = l.send(Ok(value)); 484 | }) 485 | } 486 | continue; 487 | } 488 | } 489 | ProviderRequest::BlockHash(fut) => { 490 | if let Poll::Ready((block_hash, number)) = fut.poll_unpin(cx) { 491 | let value = match block_hash { 492 | Ok(value) => value, 493 | Err(err) => { 494 | let err = Arc::new(err); 495 | // notify all listeners 496 | if let Some(listeners) = pin.block_requests.remove(&number) { 497 | listeners.into_iter().for_each(|l| { 498 | let _ = l.send(Err(DatabaseError::GetBlockHash( 499 | number, 500 | Arc::clone(&err), 501 | ))); 502 | }) 503 | } 504 | continue; 505 | } 506 | }; 507 | 508 | // update the cache 509 | pin.db.block_hashes().write().insert(U256::from(number), value); 510 | 511 | // notify all listeners 512 | if let Some(listeners) = pin.block_requests.remove(&number) { 513 | listeners.into_iter().for_each(|l| { 514 | let _ = l.send(Ok(value)); 515 | }) 516 | } 517 | continue; 518 | } 519 | } 520 | ProviderRequest::FullBlock(fut) => { 521 | if let Poll::Ready((sender, resp, number)) = fut.poll_unpin(cx) { 522 | let msg = match resp { 523 | Ok(Some(block)) => Ok(block), 524 | Ok(None) => Err(DatabaseError::BlockNotFound(number)), 525 | Err(err) => { 526 | let err = Arc::new(err); 527 | Err(DatabaseError::GetFullBlock(number, err)) 528 | } 529 | }; 530 | let _ = sender.send(msg); 531 | continue; 532 | } 533 | } 534 | ProviderRequest::Transaction(fut) => { 535 | if let Poll::Ready((sender, tx, tx_hash)) = fut.poll_unpin(cx) { 536 | let msg = match tx { 537 | Ok(tx) => Ok(tx), 538 | Err(err) => { 539 | let err = Arc::new(err); 540 | Err(DatabaseError::GetTransaction(tx_hash, err)) 541 | } 542 | }; 543 | let _ = sender.send(msg); 544 | continue; 545 | } 546 | } 547 | ProviderRequest::AnyRequest(fut) => { 548 | if fut.poll_inner(cx).is_ready() { 549 | continue; 550 | } 551 | } 552 | } 553 | // not ready, insert and poll again 554 | pin.pending_requests.push(request); 555 | } 556 | 557 | // If no new requests have been queued, break to 558 | // be polled again later. 559 | if pin.queued_requests.is_empty() { 560 | return Poll::Pending; 561 | } 562 | } 563 | } 564 | } 565 | 566 | /// Mode for the `SharedBackend` how to block in the non-async [`DatabaseRef`] when interacting with 567 | /// [`BackendHandler`]. 568 | #[derive(Default, Clone, Debug, PartialEq)] 569 | pub enum BlockingMode { 570 | /// This mode use `tokio::task::block_in_place()` to block in place. 571 | /// 572 | /// This should be used when blocking on the call site is disallowed. 573 | #[default] 574 | BlockInPlace, 575 | /// The mode blocks the current task 576 | /// 577 | /// This can be used if blocking on the call site is allowed, e.g. on a tokio blocking task. 578 | Block, 579 | } 580 | 581 | impl BlockingMode { 582 | /// run process logic with the blocking mode 583 | pub fn run(&self, f: F) -> R 584 | where 585 | F: FnOnce() -> R, 586 | { 587 | match self { 588 | Self::BlockInPlace => tokio::task::block_in_place(f), 589 | Self::Block => f(), 590 | } 591 | } 592 | } 593 | 594 | /// A cloneable backend type that shares access to the backend data with all its clones. 595 | /// 596 | /// This backend type is connected to the `BackendHandler` via a mpsc unbounded channel. The 597 | /// `BackendHandler` is spawned on a tokio task and listens for incoming commands on the receiver 598 | /// half of the channel. A `SharedBackend` holds a sender for that channel, which is `Clone`, so 599 | /// there can be multiple `SharedBackend`s communicating with the same `BackendHandler`, hence this 600 | /// `Backend` type is thread safe. 601 | /// 602 | /// All `Backend` trait functions are delegated as a `BackendRequest` via the channel to the 603 | /// `BackendHandler`. All `BackendRequest` variants include a sender half of an additional channel 604 | /// that is used by the `BackendHandler` to send the result of an executed `BackendRequest` back to 605 | /// `SharedBackend`. 606 | /// 607 | /// The `BackendHandler` holds a `Provider` to look up missing accounts or storage slots 608 | /// from remote (e.g. infura). It detects duplicate requests from multiple `SharedBackend`s and 609 | /// bundles them together, so that always only one provider request is executed. For example, there 610 | /// are two `SharedBackend`s, `A` and `B`, both request the basic account info of account 611 | /// `0xasd9sa7d...` at the same time. After the `BackendHandler` receives the request from `A`, it 612 | /// sends a new provider request to the provider's endpoint, then it reads the identical request 613 | /// from `B` and simply adds it as an additional listener for the request already in progress, 614 | /// instead of sending another one. So that after the provider returns the response all listeners 615 | /// (`A` and `B`) get notified. 616 | // **Note**: the implementation makes use of [tokio::task::block_in_place()] when interacting with 617 | // the underlying [BackendHandler] which runs on a separate spawned tokio task. 618 | // [tokio::task::block_in_place()] 619 | // > Runs the provided blocking function on the current thread without blocking the executor. 620 | // This prevents issues (hangs) we ran into were the [SharedBackend] itself is called from a spawned 621 | // task. 622 | #[derive(Clone, Debug)] 623 | pub struct SharedBackend { 624 | /// channel used for sending commands related to database operations 625 | backend: UnboundedSender, 626 | /// Ensures that the underlying cache gets flushed once the last `SharedBackend` is dropped. 627 | /// 628 | /// There is only one instance of the type, so as soon as the last `SharedBackend` is deleted, 629 | /// `FlushJsonBlockCacheDB` is also deleted and the cache is flushed. 630 | cache: Arc, 631 | 632 | /// The mode for the `SharedBackend` to block in place or not 633 | blocking_mode: BlockingMode, 634 | } 635 | 636 | impl SharedBackend { 637 | /// _Spawns_ a new `BackendHandler` on a `tokio::task` that listens for requests from any 638 | /// `SharedBackend`. Missing values get inserted in the `db`. 639 | /// 640 | /// The spawned `BackendHandler` finishes once the last `SharedBackend` connected to it is 641 | /// dropped. 642 | /// 643 | /// NOTE: this should be called with `Arc` 644 | pub async fn spawn_backend

(provider: P, db: BlockchainDb, pin_block: Option) -> Self 645 | where 646 | P: Provider + Unpin + 'static + Clone, 647 | { 648 | let (shared, handler) = Self::new(provider, db, pin_block); 649 | // spawn the provider handler to a task 650 | trace!(target: "backendhandler", "spawning Backendhandler task"); 651 | tokio::spawn(handler); 652 | shared 653 | } 654 | 655 | /// Same as `Self::spawn_backend` but spawns the `BackendHandler` on a separate `std::thread` in 656 | /// its own `tokio::Runtime` 657 | pub fn spawn_backend_thread

( 658 | provider: P, 659 | db: BlockchainDb, 660 | pin_block: Option, 661 | ) -> Self 662 | where 663 | P: Provider + Unpin + 'static + Clone, 664 | { 665 | let (shared, handler) = Self::new(provider, db, pin_block); 666 | 667 | // spawn a light-weight thread with a thread-local async runtime just for 668 | // sending and receiving data from the remote client 669 | std::thread::Builder::new() 670 | .name("fork-backend".into()) 671 | .spawn(move || { 672 | let rt = tokio::runtime::Builder::new_current_thread() 673 | .enable_all() 674 | .build() 675 | .expect("failed to build tokio runtime"); 676 | 677 | rt.block_on(handler); 678 | }) 679 | .expect("failed to spawn thread"); 680 | trace!(target: "backendhandler", "spawned Backendhandler thread"); 681 | 682 | shared 683 | } 684 | 685 | /// Returns a new `SharedBackend` and the `BackendHandler` 686 | pub fn new

( 687 | provider: P, 688 | db: BlockchainDb, 689 | pin_block: Option, 690 | ) -> (Self, BackendHandler

) 691 | where 692 | P: Provider + Unpin + 'static + Clone, 693 | { 694 | let (backend, backend_rx) = unbounded(); 695 | let cache = Arc::new(FlushJsonBlockCacheDB(Arc::clone(db.cache()))); 696 | let handler = BackendHandler::new(provider, db, backend_rx, pin_block); 697 | (Self { backend, cache, blocking_mode: Default::default() }, handler) 698 | } 699 | 700 | /// Returns a new `SharedBackend` and the `BackendHandler` with a specific blocking mode 701 | pub fn with_blocking_mode(&self, mode: BlockingMode) -> Self { 702 | Self { backend: self.backend.clone(), cache: self.cache.clone(), blocking_mode: mode } 703 | } 704 | 705 | /// Updates the pinned block to fetch data from 706 | pub fn set_pinned_block(&self, block: impl Into) -> eyre::Result<()> { 707 | let req = BackendRequest::SetPinnedBlock(block.into()); 708 | self.backend.unbounded_send(req).map_err(|e| eyre::eyre!("{:?}", e)) 709 | } 710 | 711 | /// Returns the full block for the given block identifier 712 | pub fn get_full_block(&self, block: impl Into) -> DatabaseResult { 713 | self.blocking_mode.run(|| { 714 | let (sender, rx) = oneshot_channel(); 715 | let req = BackendRequest::FullBlock(block.into(), sender); 716 | self.backend.unbounded_send(req)?; 717 | rx.recv()? 718 | }) 719 | } 720 | 721 | /// Returns the transaction for the hash 722 | pub fn get_transaction(&self, tx: B256) -> DatabaseResult { 723 | self.blocking_mode.run(|| { 724 | let (sender, rx) = oneshot_channel(); 725 | let req = BackendRequest::Transaction(tx, sender); 726 | self.backend.unbounded_send(req)?; 727 | rx.recv()? 728 | }) 729 | } 730 | 731 | fn do_get_basic(&self, address: Address) -> DatabaseResult> { 732 | self.blocking_mode.run(|| { 733 | let (sender, rx) = oneshot_channel(); 734 | let req = BackendRequest::Basic(address, sender); 735 | self.backend.unbounded_send(req)?; 736 | rx.recv()?.map(Some) 737 | }) 738 | } 739 | 740 | fn do_get_storage(&self, address: Address, index: U256) -> DatabaseResult { 741 | self.blocking_mode.run(|| { 742 | let (sender, rx) = oneshot_channel(); 743 | let req = BackendRequest::Storage(address, index, sender); 744 | self.backend.unbounded_send(req)?; 745 | rx.recv()? 746 | }) 747 | } 748 | 749 | fn do_get_block_hash(&self, number: u64) -> DatabaseResult { 750 | self.blocking_mode.run(|| { 751 | let (sender, rx) = oneshot_channel(); 752 | let req = BackendRequest::BlockHash(number, sender); 753 | self.backend.unbounded_send(req)?; 754 | rx.recv()? 755 | }) 756 | } 757 | 758 | /// Inserts or updates data for multiple addresses 759 | pub fn insert_or_update_address(&self, address_data: AddressData) { 760 | let req = BackendRequest::UpdateAddress(address_data); 761 | let err = self.backend.unbounded_send(req); 762 | match err { 763 | Ok(_) => (), 764 | Err(e) => { 765 | error!(target: "sharedbackend", "Failed to send update address request: {:?}", e) 766 | } 767 | } 768 | } 769 | 770 | /// Inserts or updates data for multiple storage slots 771 | pub fn insert_or_update_storage(&self, storage_data: StorageData) { 772 | let req = BackendRequest::UpdateStorage(storage_data); 773 | let err = self.backend.unbounded_send(req); 774 | match err { 775 | Ok(_) => (), 776 | Err(e) => { 777 | error!(target: "sharedbackend", "Failed to send update address request: {:?}", e) 778 | } 779 | } 780 | } 781 | 782 | /// Inserts or updates data for multiple block hashes 783 | pub fn insert_or_update_block_hashes(&self, block_hash_data: BlockHashData) { 784 | let req = BackendRequest::UpdateBlockHash(block_hash_data); 785 | let err = self.backend.unbounded_send(req); 786 | match err { 787 | Ok(_) => (), 788 | Err(e) => { 789 | error!(target: "sharedbackend", "Failed to send update address request: {:?}", e) 790 | } 791 | } 792 | } 793 | 794 | /// Returns any arbitrary request on the provider 795 | pub fn do_any_request(&mut self, fut: F) -> DatabaseResult 796 | where 797 | F: Future> + Send + 'static, 798 | T: fmt::Debug + Send + 'static, 799 | { 800 | self.blocking_mode.run(|| { 801 | let (sender, rx) = oneshot_channel::>(); 802 | let req = BackendRequest::AnyRequest(Box::new(AnyRequestFuture { 803 | sender, 804 | future: Box::pin(fut), 805 | })); 806 | self.backend.unbounded_send(req)?; 807 | rx.recv()?.map_err(|err| DatabaseError::AnyRequest(Arc::new(err))) 808 | }) 809 | } 810 | 811 | /// Flushes the DB to disk if caching is enabled 812 | pub fn flush_cache(&self) { 813 | self.cache.0.flush(); 814 | } 815 | 816 | /// Flushes the DB to a specific file 817 | pub fn flush_cache_to(&self, cache_path: &Path) { 818 | self.cache.0.flush_to(cache_path); 819 | } 820 | 821 | /// Returns the DB 822 | pub fn data(&self) -> Arc { 823 | self.cache.0.db().clone() 824 | } 825 | 826 | /// Returns the DB accounts 827 | pub fn accounts(&self) -> AddressData { 828 | self.cache.0.db().accounts.read().clone() 829 | } 830 | 831 | /// Returns the DB accounts length 832 | pub fn accounts_len(&self) -> usize { 833 | self.cache.0.db().accounts.read().len() 834 | } 835 | 836 | /// Returns the DB storage 837 | pub fn storage(&self) -> StorageData { 838 | self.cache.0.db().storage.read().clone() 839 | } 840 | 841 | /// Returns the DB storage length 842 | pub fn storage_len(&self) -> usize { 843 | self.cache.0.db().storage.read().len() 844 | } 845 | 846 | /// Returns the DB block_hashes 847 | pub fn block_hashes(&self) -> BlockHashData { 848 | self.cache.0.db().block_hashes.read().clone() 849 | } 850 | 851 | /// Returns the DB block_hashes length 852 | pub fn block_hashes_len(&self) -> usize { 853 | self.cache.0.db().block_hashes.read().len() 854 | } 855 | } 856 | 857 | impl DatabaseRef for SharedBackend { 858 | type Error = DatabaseError; 859 | 860 | fn basic_ref(&self, address: Address) -> Result, Self::Error> { 861 | trace!(target: "sharedbackend", %address, "request basic"); 862 | self.do_get_basic(address).map_err(|err| { 863 | error!(target: "sharedbackend", %err, %address, "Failed to send/recv `basic`"); 864 | if err.is_possibly_non_archive_node_error() { 865 | error!(target: "sharedbackend", "{NON_ARCHIVE_NODE_WARNING}"); 866 | } 867 | err 868 | }) 869 | } 870 | 871 | fn code_by_hash_ref(&self, hash: B256) -> Result { 872 | Err(DatabaseError::MissingCode(hash)) 873 | } 874 | 875 | fn storage_ref(&self, address: Address, index: U256) -> Result { 876 | trace!(target: "sharedbackend", "request storage {:?} at {:?}", address, index); 877 | self.do_get_storage(address, index).map_err(|err| { 878 | error!(target: "sharedbackend", %err, %address, %index, "Failed to send/recv `storage`"); 879 | if err.is_possibly_non_archive_node_error() { 880 | error!(target: "sharedbackend", "{NON_ARCHIVE_NODE_WARNING}"); 881 | } 882 | err 883 | }) 884 | } 885 | 886 | fn block_hash_ref(&self, number: u64) -> Result { 887 | trace!(target: "sharedbackend", "request block hash for number {:?}", number); 888 | self.do_get_block_hash(number).map_err(|err| { 889 | error!(target: "sharedbackend", %err, %number, "Failed to send/recv `block_hash`"); 890 | if err.is_possibly_non_archive_node_error() { 891 | error!(target: "sharedbackend", "{NON_ARCHIVE_NODE_WARNING}"); 892 | } 893 | err 894 | }) 895 | } 896 | } 897 | 898 | #[cfg(test)] 899 | mod tests { 900 | use super::*; 901 | use crate::cache::{BlockchainDbMeta, JsonBlockCacheDB}; 902 | use alloy_provider::ProviderBuilder; 903 | use alloy_rpc_client::ClientBuilder; 904 | use serde::Deserialize; 905 | use std::{collections::BTreeSet, fs, path::PathBuf}; 906 | use tiny_http::{Response, Server}; 907 | 908 | pub fn get_http_provider(endpoint: &str) -> impl Provider + Clone { 909 | ProviderBuilder::new() 910 | .network::() 911 | .connect_client(ClientBuilder::default().http(endpoint.parse().unwrap())) 912 | } 913 | 914 | const ENDPOINT: Option<&str> = option_env!("ETH_RPC_URL"); 915 | 916 | #[tokio::test(flavor = "multi_thread")] 917 | async fn test_builder() { 918 | let Some(endpoint) = ENDPOINT else { return }; 919 | let provider = get_http_provider(endpoint); 920 | 921 | let any_rpc_block = provider.get_block(BlockId::latest()).hashes().await.unwrap().unwrap(); 922 | let _meta = BlockchainDbMeta::default().with_block(&any_rpc_block.inner); 923 | } 924 | 925 | #[tokio::test(flavor = "multi_thread")] 926 | async fn shared_backend() { 927 | let Some(endpoint) = ENDPOINT else { return }; 928 | 929 | let provider = get_http_provider(endpoint); 930 | let meta = BlockchainDbMeta { 931 | block_env: Default::default(), 932 | hosts: BTreeSet::from([endpoint.to_string()]), 933 | }; 934 | 935 | let db = BlockchainDb::new(meta, None); 936 | let backend = SharedBackend::spawn_backend(Arc::new(provider), db.clone(), None).await; 937 | 938 | // some rng contract from etherscan 939 | let address: Address = "63091244180ae240c87d1f528f5f269134cb07b3".parse().unwrap(); 940 | 941 | let idx = U256::from(0u64); 942 | let value = backend.storage_ref(address, idx).unwrap(); 943 | let account = backend.basic_ref(address).unwrap().unwrap(); 944 | 945 | let mem_acc = db.accounts().read().get(&address).unwrap().clone(); 946 | assert_eq!(account.balance, mem_acc.balance); 947 | assert_eq!(account.nonce, mem_acc.nonce); 948 | let slots = db.storage().read().get(&address).unwrap().clone(); 949 | assert_eq!(slots.len(), 1); 950 | assert_eq!(slots.get(&idx).copied().unwrap(), value); 951 | 952 | let num = 10u64; 953 | let hash = backend.block_hash_ref(num).unwrap(); 954 | let mem_hash = *db.block_hashes().read().get(&U256::from(num)).unwrap(); 955 | assert_eq!(hash, mem_hash); 956 | 957 | let max_slots = 5; 958 | let handle = std::thread::spawn(move || { 959 | for i in 1..max_slots { 960 | let idx = U256::from(i); 961 | let _ = backend.storage_ref(address, idx); 962 | } 963 | }); 964 | handle.join().unwrap(); 965 | let slots = db.storage().read().get(&address).unwrap().clone(); 966 | assert_eq!(slots.len() as u64, max_slots); 967 | } 968 | 969 | #[test] 970 | fn can_read_cache() { 971 | let cache_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("test-data/storage.json"); 972 | let json = JsonBlockCacheDB::load(cache_path).unwrap(); 973 | assert!(!json.db().accounts.read().is_empty()); 974 | } 975 | 976 | #[tokio::test(flavor = "multi_thread")] 977 | async fn can_modify_address() { 978 | let Some(endpoint) = ENDPOINT else { return }; 979 | 980 | let provider = get_http_provider(endpoint); 981 | let meta = BlockchainDbMeta { 982 | block_env: Default::default(), 983 | hosts: BTreeSet::from([endpoint.to_string()]), 984 | }; 985 | 986 | let db = BlockchainDb::new(meta, None); 987 | let backend = SharedBackend::spawn_backend(Arc::new(provider), db.clone(), None).await; 988 | 989 | // some rng contract from etherscan 990 | let address: Address = "63091244180ae240c87d1f528f5f269134cb07b3".parse().unwrap(); 991 | 992 | let new_acc = AccountInfo { 993 | nonce: 1000u64, 994 | balance: U256::from(2000), 995 | code: None, 996 | code_hash: KECCAK_EMPTY, 997 | }; 998 | let mut account_data = AddressData::default(); 999 | account_data.insert(address, new_acc.clone()); 1000 | 1001 | backend.insert_or_update_address(account_data); 1002 | 1003 | let max_slots = 5; 1004 | let handle = std::thread::spawn(move || { 1005 | for i in 1..max_slots { 1006 | let idx = U256::from(i); 1007 | let result_address = backend.basic_ref(address).unwrap(); 1008 | match result_address { 1009 | Some(acc) => { 1010 | assert_eq!( 1011 | acc.nonce, new_acc.nonce, 1012 | "The nonce was not changed in instance of index {}", 1013 | idx 1014 | ); 1015 | assert_eq!( 1016 | acc.balance, new_acc.balance, 1017 | "The balance was not changed in instance of index {}", 1018 | idx 1019 | ); 1020 | 1021 | // comparing with db 1022 | let db_address = { 1023 | let accounts = db.accounts().read(); 1024 | accounts.get(&address).unwrap().clone() 1025 | }; 1026 | 1027 | assert_eq!( 1028 | db_address.nonce, new_acc.nonce, 1029 | "The nonce was not changed in instance of index {}", 1030 | idx 1031 | ); 1032 | assert_eq!( 1033 | db_address.balance, new_acc.balance, 1034 | "The balance was not changed in instance of index {}", 1035 | idx 1036 | ); 1037 | } 1038 | None => panic!("Account not found"), 1039 | } 1040 | } 1041 | }); 1042 | handle.join().unwrap(); 1043 | } 1044 | 1045 | #[tokio::test(flavor = "multi_thread")] 1046 | async fn can_modify_storage() { 1047 | let Some(endpoint) = ENDPOINT else { return }; 1048 | 1049 | let provider = get_http_provider(endpoint); 1050 | let meta = BlockchainDbMeta { 1051 | block_env: Default::default(), 1052 | hosts: BTreeSet::from([endpoint.to_string()]), 1053 | }; 1054 | 1055 | let db = BlockchainDb::new(meta, None); 1056 | let backend = SharedBackend::spawn_backend(Arc::new(provider), db.clone(), None).await; 1057 | 1058 | // some rng contract from etherscan 1059 | let address: Address = "63091244180ae240c87d1f528f5f269134cb07b3".parse().unwrap(); 1060 | 1061 | let mut storage_data = StorageData::default(); 1062 | let mut storage_info = StorageInfo::default(); 1063 | storage_info.insert(U256::from(20), U256::from(10)); 1064 | storage_info.insert(U256::from(30), U256::from(15)); 1065 | storage_info.insert(U256::from(40), U256::from(20)); 1066 | 1067 | storage_data.insert(address, storage_info); 1068 | 1069 | backend.insert_or_update_storage(storage_data.clone()); 1070 | 1071 | let max_slots = 5; 1072 | let handle = std::thread::spawn(move || { 1073 | for _ in 1..max_slots { 1074 | for (address, info) in &storage_data { 1075 | for (index, value) in info { 1076 | let result_storage = backend.do_get_storage(*address, *index); 1077 | match result_storage { 1078 | Ok(stg_db) => { 1079 | assert_eq!( 1080 | stg_db, *value, 1081 | "Storage in slot number {} in address {} do not have the same value", index, address 1082 | ); 1083 | 1084 | let db_result = { 1085 | let storage = db.storage().read(); 1086 | let address_storage = storage.get(address).unwrap(); 1087 | *address_storage.get(index).unwrap() 1088 | }; 1089 | 1090 | assert_eq!( 1091 | stg_db, db_result, 1092 | "Storage in slot number {} in address {} do not have the same value", index, address 1093 | ) 1094 | } 1095 | 1096 | Err(err) => { 1097 | panic!("There was a database error: {}", err) 1098 | } 1099 | } 1100 | } 1101 | } 1102 | } 1103 | }); 1104 | handle.join().unwrap(); 1105 | } 1106 | 1107 | #[tokio::test(flavor = "multi_thread")] 1108 | async fn can_modify_block_hashes() { 1109 | let Some(endpoint) = ENDPOINT else { return }; 1110 | 1111 | let provider = get_http_provider(endpoint); 1112 | let meta = BlockchainDbMeta { 1113 | block_env: Default::default(), 1114 | hosts: BTreeSet::from([endpoint.to_string()]), 1115 | }; 1116 | 1117 | let db = BlockchainDb::new(meta, None); 1118 | let backend = SharedBackend::spawn_backend(Arc::new(provider), db.clone(), None).await; 1119 | 1120 | // some rng contract from etherscan 1121 | // let address: Address = "63091244180ae240c87d1f528f5f269134cb07b3".parse().unwrap(); 1122 | 1123 | let mut block_hash_data = BlockHashData::default(); 1124 | block_hash_data.insert(U256::from(1), B256::from(U256::from(1))); 1125 | block_hash_data.insert(U256::from(2), B256::from(U256::from(2))); 1126 | block_hash_data.insert(U256::from(3), B256::from(U256::from(3))); 1127 | block_hash_data.insert(U256::from(4), B256::from(U256::from(4))); 1128 | block_hash_data.insert(U256::from(5), B256::from(U256::from(5))); 1129 | 1130 | backend.insert_or_update_block_hashes(block_hash_data.clone()); 1131 | 1132 | let max_slots: u64 = 5; 1133 | let handle = std::thread::spawn(move || { 1134 | for i in 1..max_slots { 1135 | let key = U256::from(i); 1136 | let result_hash = backend.do_get_block_hash(i); 1137 | match result_hash { 1138 | Ok(hash) => { 1139 | assert_eq!( 1140 | hash, 1141 | *block_hash_data.get(&key).unwrap(), 1142 | "The hash in block {} did not match", 1143 | key 1144 | ); 1145 | 1146 | let db_result = { 1147 | let hashes = db.block_hashes().read(); 1148 | *hashes.get(&key).unwrap() 1149 | }; 1150 | 1151 | assert_eq!(hash, db_result, "The hash in block {} did not match", key); 1152 | } 1153 | Err(err) => panic!("Hash not found, error: {}", err), 1154 | } 1155 | } 1156 | }); 1157 | handle.join().unwrap(); 1158 | } 1159 | 1160 | #[tokio::test(flavor = "multi_thread")] 1161 | async fn can_modify_storage_with_cache() { 1162 | let Some(endpoint) = ENDPOINT else { return }; 1163 | 1164 | let provider = get_http_provider(endpoint); 1165 | let meta = BlockchainDbMeta { 1166 | block_env: Default::default(), 1167 | hosts: BTreeSet::from([endpoint.to_string()]), 1168 | }; 1169 | 1170 | // create a temporary file 1171 | fs::copy("test-data/storage.json", "test-data/storage-tmp.json").unwrap(); 1172 | 1173 | let cache_path = 1174 | PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("test-data/storage-tmp.json"); 1175 | 1176 | let db = BlockchainDb::new(meta.clone(), Some(cache_path)); 1177 | let backend = 1178 | SharedBackend::spawn_backend(Arc::new(provider.clone()), db.clone(), None).await; 1179 | 1180 | // some rng contract from etherscan 1181 | let address: Address = "63091244180ae240c87d1f528f5f269134cb07b3".parse().unwrap(); 1182 | 1183 | let mut storage_data = StorageData::default(); 1184 | let mut storage_info = StorageInfo::default(); 1185 | storage_info.insert(U256::from(1), U256::from(10)); 1186 | storage_info.insert(U256::from(2), U256::from(15)); 1187 | storage_info.insert(U256::from(3), U256::from(20)); 1188 | storage_info.insert(U256::from(4), U256::from(20)); 1189 | storage_info.insert(U256::from(5), U256::from(15)); 1190 | storage_info.insert(U256::from(6), U256::from(10)); 1191 | 1192 | let mut address_data = backend.basic_ref(address).unwrap().unwrap(); 1193 | address_data.code = None; 1194 | 1195 | storage_data.insert(address, storage_info); 1196 | 1197 | backend.insert_or_update_storage(storage_data.clone()); 1198 | 1199 | let mut new_acc = backend.basic_ref(address).unwrap().unwrap(); 1200 | // nullify the code 1201 | new_acc.code = Some(Bytecode::new_raw(([10, 20, 30, 40]).into())); 1202 | 1203 | let mut account_data = AddressData::default(); 1204 | account_data.insert(address, new_acc.clone()); 1205 | 1206 | backend.insert_or_update_address(account_data); 1207 | 1208 | let backend_clone = backend.clone(); 1209 | 1210 | let max_slots = 5; 1211 | let handle = std::thread::spawn(move || { 1212 | for _ in 1..max_slots { 1213 | for (address, info) in &storage_data { 1214 | for (index, value) in info { 1215 | let result_storage = backend.do_get_storage(*address, *index); 1216 | match result_storage { 1217 | Ok(stg_db) => { 1218 | assert_eq!( 1219 | stg_db, *value, 1220 | "Storage in slot number {} in address {} doesn't have the same value", index, address 1221 | ); 1222 | 1223 | let db_result = { 1224 | let storage = db.storage().read(); 1225 | let address_storage = storage.get(address).unwrap(); 1226 | *address_storage.get(index).unwrap() 1227 | }; 1228 | 1229 | assert_eq!( 1230 | stg_db, db_result, 1231 | "Storage in slot number {} in address {} doesn't have the same value", index, address 1232 | ); 1233 | } 1234 | 1235 | Err(err) => { 1236 | panic!("There was a database error: {}", err) 1237 | } 1238 | } 1239 | } 1240 | } 1241 | } 1242 | 1243 | backend_clone.flush_cache(); 1244 | }); 1245 | handle.join().unwrap(); 1246 | 1247 | // read json and confirm the changes to the data 1248 | 1249 | let cache_path = 1250 | PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("test-data/storage-tmp.json"); 1251 | 1252 | let json_db = BlockchainDb::new(meta, Some(cache_path)); 1253 | 1254 | let mut storage_data = StorageData::default(); 1255 | let mut storage_info = StorageInfo::default(); 1256 | storage_info.insert(U256::from(1), U256::from(10)); 1257 | storage_info.insert(U256::from(2), U256::from(15)); 1258 | storage_info.insert(U256::from(3), U256::from(20)); 1259 | storage_info.insert(U256::from(4), U256::from(20)); 1260 | storage_info.insert(U256::from(5), U256::from(15)); 1261 | storage_info.insert(U256::from(6), U256::from(10)); 1262 | 1263 | storage_data.insert(address, storage_info); 1264 | 1265 | // redo the checks with the data extracted from the json file 1266 | let max_slots = 5; 1267 | let handle = std::thread::spawn(move || { 1268 | for _ in 1..max_slots { 1269 | for (address, info) in &storage_data { 1270 | for (index, value) in info { 1271 | let result_storage = { 1272 | let storage = json_db.storage().read(); 1273 | let address_storage = storage.get(address).unwrap().clone(); 1274 | *address_storage.get(index).unwrap() 1275 | }; 1276 | 1277 | assert_eq!( 1278 | result_storage, *value, 1279 | "Storage in slot number {} in address {} doesn't have the same value", 1280 | index, address 1281 | ); 1282 | } 1283 | } 1284 | } 1285 | }); 1286 | 1287 | handle.join().unwrap(); 1288 | 1289 | // erase the temporary file 1290 | fs::remove_file("test-data/storage-tmp.json").unwrap(); 1291 | } 1292 | 1293 | #[tokio::test(flavor = "multi_thread")] 1294 | async fn shared_backend_any_request() { 1295 | let expected_response_bytes: Bytes = vec![0xff, 0xee].into(); 1296 | let server = Server::http("0.0.0.0:0").expect("failed starting in-memory http server"); 1297 | let endpoint = format!("http://{}", server.server_addr()); 1298 | 1299 | // Spin an in-memory server that responds to "foo_callCustomMethod" rpc call. 1300 | let expected_bytes_innner = expected_response_bytes.clone(); 1301 | let server_handle = std::thread::spawn(move || { 1302 | #[derive(Debug, Deserialize)] 1303 | struct Request { 1304 | method: String, 1305 | } 1306 | let mut request = server.recv().unwrap(); 1307 | let rpc_request: Request = 1308 | serde_json::from_reader(request.as_reader()).expect("failed parsing request"); 1309 | 1310 | match rpc_request.method.as_str() { 1311 | "foo_callCustomMethod" => request 1312 | .respond(Response::from_string(format!( 1313 | r#"{{"result": "{}"}}"#, 1314 | alloy_primitives::hex::encode_prefixed(expected_bytes_innner), 1315 | ))) 1316 | .unwrap(), 1317 | _ => request 1318 | .respond(Response::from_string(r#"{"error": "invalid request"}"#)) 1319 | .unwrap(), 1320 | }; 1321 | }); 1322 | 1323 | let provider = get_http_provider(&endpoint); 1324 | let meta = BlockchainDbMeta { 1325 | block_env: Default::default(), 1326 | hosts: BTreeSet::from([endpoint.to_string()]), 1327 | }; 1328 | 1329 | let db = BlockchainDb::new(meta, None); 1330 | let provider_inner = provider.clone(); 1331 | let mut backend = SharedBackend::spawn_backend(Arc::new(provider), db.clone(), None).await; 1332 | 1333 | let actual_response_bytes = backend 1334 | .do_any_request(async move { 1335 | let bytes: alloy_primitives::Bytes = 1336 | provider_inner.raw_request("foo_callCustomMethod".into(), vec!["0001"]).await?; 1337 | Ok(bytes) 1338 | }) 1339 | .expect("failed performing any request"); 1340 | 1341 | assert_eq!(actual_response_bytes, expected_response_bytes); 1342 | 1343 | server_handle.join().unwrap(); 1344 | } 1345 | } 1346 | -------------------------------------------------------------------------------- /src/cache.rs: -------------------------------------------------------------------------------- 1 | //! Cache related abstraction 2 | use alloy_consensus::BlockHeader; 3 | use alloy_primitives::{Address, B256, U256}; 4 | use alloy_provider::network::TransactionResponse; 5 | use parking_lot::RwLock; 6 | use revm::{ 7 | context::BlockEnv, 8 | context_interface::block::BlobExcessGasAndPrice, 9 | primitives::{ 10 | map::{AddressHashMap, HashMap}, 11 | KECCAK_EMPTY, 12 | }, 13 | state::{Account, AccountInfo, AccountStatus}, 14 | DatabaseCommit, 15 | }; 16 | use serde::{ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer}; 17 | use std::{ 18 | collections::BTreeSet, 19 | fs, 20 | io::{BufWriter, Write}, 21 | path::{Path, PathBuf}, 22 | sync::Arc, 23 | }; 24 | use url::Url; 25 | 26 | pub type StorageInfo = HashMap; 27 | 28 | /// A shareable Block database 29 | #[derive(Clone, Debug)] 30 | pub struct BlockchainDb { 31 | /// Contains all the data 32 | db: Arc, 33 | /// metadata of the current config 34 | meta: Arc>, 35 | /// the cache that can be flushed 36 | cache: Arc, 37 | } 38 | 39 | impl BlockchainDb { 40 | /// Creates a new instance of the [BlockchainDb]. 41 | /// 42 | /// If a `cache_path` is provided it attempts to load a previously stored [JsonBlockCacheData] 43 | /// and will try to use the cached entries it holds. 44 | /// 45 | /// This will return a new and empty [MemDb] if 46 | /// - `cache_path` is `None` 47 | /// - the file the `cache_path` points to, does not exist 48 | /// - the file contains malformed data, or if it couldn't be read 49 | /// - the provided `meta` differs from [BlockchainDbMeta] that's stored on disk 50 | pub fn new(meta: BlockchainDbMeta, cache_path: Option) -> Self { 51 | Self::new_db(meta, cache_path, false) 52 | } 53 | 54 | /// Creates a new instance of the [BlockchainDb] and skips check when comparing meta 55 | /// This is useful for offline-start mode when we don't want to fetch metadata of `block`. 56 | /// 57 | /// if a `cache_path` is provided it attempts to load a previously stored [JsonBlockCacheData] 58 | /// and will try to use the cached entries it holds. 59 | /// 60 | /// This will return a new and empty [MemDb] if 61 | /// - `cache_path` is `None` 62 | /// - the file the `cache_path` points to, does not exist 63 | /// - the file contains malformed data, or if it couldn't be read 64 | /// - the provided `meta` differs from [BlockchainDbMeta] that's stored on disk 65 | pub fn new_skip_check(meta: BlockchainDbMeta, cache_path: Option) -> Self { 66 | Self::new_db(meta, cache_path, true) 67 | } 68 | 69 | fn new_db(meta: BlockchainDbMeta, cache_path: Option, skip_check: bool) -> Self { 70 | trace!(target: "forge::cache", cache=?cache_path, "initialising blockchain db"); 71 | // read cache and check if metadata matches 72 | let cache = cache_path 73 | .as_ref() 74 | .and_then(|p| { 75 | JsonBlockCacheDB::load(p).ok().filter(|cache| { 76 | if skip_check { 77 | return true; 78 | } 79 | let mut existing = cache.meta().write(); 80 | existing.hosts.extend(meta.hosts.clone()); 81 | if meta != *existing { 82 | warn!(target: "cache", "non-matching block metadata"); 83 | false 84 | } else { 85 | true 86 | } 87 | }) 88 | }) 89 | .unwrap_or_else(|| JsonBlockCacheDB::new(Arc::new(RwLock::new(meta)), cache_path)); 90 | 91 | Self { db: Arc::clone(cache.db()), meta: Arc::clone(cache.meta()), cache: Arc::new(cache) } 92 | } 93 | 94 | /// Returns the map that holds the account related info 95 | pub fn accounts(&self) -> &RwLock> { 96 | &self.db.accounts 97 | } 98 | 99 | /// Returns the map that holds the storage related info 100 | pub fn storage(&self) -> &RwLock> { 101 | &self.db.storage 102 | } 103 | 104 | /// Returns the map that holds all the block hashes 105 | pub fn block_hashes(&self) -> &RwLock> { 106 | &self.db.block_hashes 107 | } 108 | 109 | /// Returns the Env related metadata 110 | pub const fn meta(&self) -> &Arc> { 111 | &self.meta 112 | } 113 | 114 | /// Returns the inner cache 115 | pub const fn cache(&self) -> &Arc { 116 | &self.cache 117 | } 118 | 119 | /// Returns the underlying storage 120 | pub const fn db(&self) -> &Arc { 121 | &self.db 122 | } 123 | } 124 | 125 | /// relevant identifying markers in the context of [BlockchainDb] 126 | #[derive(Clone, Debug, Eq, Serialize, Default)] 127 | pub struct BlockchainDbMeta { 128 | /// The block environment 129 | pub block_env: BlockEnv, 130 | /// All the hosts used to connect to 131 | pub hosts: BTreeSet, 132 | } 133 | 134 | impl BlockchainDbMeta { 135 | /// Creates a new instance 136 | pub fn new(block_env: BlockEnv, url: String) -> Self { 137 | let host = Url::parse(&url) 138 | .ok() 139 | .and_then(|url| url.host().map(|host| host.to_string())) 140 | .unwrap_or(url); 141 | 142 | Self { block_env, hosts: BTreeSet::from([host]) } 143 | } 144 | 145 | /// Sets the [BlockEnv] of this instance using the provided [alloy_rpc_types::Block] 146 | pub fn with_block( 147 | mut self, 148 | block: &alloy_rpc_types::Block, 149 | ) -> Self { 150 | self.block_env = BlockEnv { 151 | number: block.header.number(), 152 | beneficiary: block.header.beneficiary(), 153 | timestamp: block.header.timestamp(), 154 | difficulty: U256::from(block.header.difficulty()), 155 | basefee: block.header.base_fee_per_gas().unwrap_or_default(), 156 | gas_limit: block.header.gas_limit(), 157 | prevrandao: block.header.mix_hash(), 158 | blob_excess_gas_and_price: Some(BlobExcessGasAndPrice::new( 159 | block.header.excess_blob_gas().unwrap_or_default(), 160 | false, 161 | )), 162 | }; 163 | 164 | self 165 | } 166 | 167 | /// Infers the host from the provided url and adds it to the set of hosts 168 | pub fn with_url(mut self, url: &str) -> Self { 169 | let host = Url::parse(url) 170 | .ok() 171 | .and_then(|url| url.host().map(|host| host.to_string())) 172 | .unwrap_or(url.to_string()); 173 | self.hosts.insert(host); 174 | self 175 | } 176 | 177 | /// Sets the [BlockEnv] of this instance 178 | pub fn set_block_env(mut self, block_env: revm::context::BlockEnv) { 179 | self.block_env = block_env; 180 | } 181 | } 182 | 183 | // ignore hosts to not invalidate the cache when different endpoints are used, as it's commonly the 184 | // case for http vs ws endpoints 185 | impl PartialEq for BlockchainDbMeta { 186 | fn eq(&self, other: &Self) -> bool { 187 | self.block_env == other.block_env 188 | } 189 | } 190 | 191 | impl<'de> Deserialize<'de> for BlockchainDbMeta { 192 | fn deserialize(deserializer: D) -> Result 193 | where 194 | D: Deserializer<'de>, 195 | { 196 | /// A backwards compatible representation of [revm::primitives::BlockEnv] 197 | /// 198 | /// This prevents deserialization errors of cache files caused by breaking changes to the 199 | /// default [revm::primitives::BlockEnv], for example enabling an optional feature. 200 | /// By hand rolling deserialize impl we can prevent cache file issues 201 | struct BlockEnvBackwardsCompat { 202 | inner: revm::context::BlockEnv, 203 | } 204 | 205 | impl<'de> Deserialize<'de> for BlockEnvBackwardsCompat { 206 | fn deserialize(deserializer: D) -> Result 207 | where 208 | D: Deserializer<'de>, 209 | { 210 | let mut value = serde_json::Value::deserialize(deserializer)?; 211 | 212 | // we check for any missing fields here 213 | if let Some(obj) = value.as_object_mut() { 214 | let default_value = 215 | serde_json::to_value(revm::context::BlockEnv::default()).unwrap(); 216 | for (key, value) in default_value.as_object().unwrap() { 217 | if !obj.contains_key(key) { 218 | obj.insert(key.to_string(), value.clone()); 219 | } 220 | } 221 | } 222 | 223 | let cfg_env: revm::context::BlockEnv = 224 | serde_json::from_value(value).map_err(serde::de::Error::custom)?; 225 | Ok(Self { inner: cfg_env }) 226 | } 227 | } 228 | 229 | // custom deserialize impl to not break existing cache files 230 | #[derive(Deserialize)] 231 | struct Meta { 232 | block_env: BlockEnvBackwardsCompat, 233 | /// all the hosts used to connect to 234 | #[serde(alias = "host")] 235 | hosts: Hosts, 236 | } 237 | 238 | #[derive(Deserialize)] 239 | #[serde(untagged)] 240 | enum Hosts { 241 | Multi(BTreeSet), 242 | Single(String), 243 | } 244 | 245 | let Meta { block_env, hosts } = Meta::deserialize(deserializer)?; 246 | Ok(Self { 247 | block_env: block_env.inner, 248 | hosts: match hosts { 249 | Hosts::Multi(hosts) => hosts, 250 | Hosts::Single(host) => BTreeSet::from([host]), 251 | }, 252 | }) 253 | } 254 | } 255 | 256 | /// In Memory cache containing all fetched accounts and storage slots 257 | /// and their values from RPC 258 | #[derive(Debug, Default)] 259 | pub struct MemDb { 260 | /// Account related data 261 | pub accounts: RwLock>, 262 | /// Storage related data 263 | pub storage: RwLock>, 264 | /// All retrieved block hashes 265 | pub block_hashes: RwLock>, 266 | } 267 | 268 | impl MemDb { 269 | /// Clears all data stored in this db 270 | pub fn clear(&self) { 271 | self.accounts.write().clear(); 272 | self.storage.write().clear(); 273 | self.block_hashes.write().clear(); 274 | } 275 | 276 | // Inserts the account, replacing it if it exists already 277 | pub fn do_insert_account(&self, address: Address, account: AccountInfo) { 278 | self.accounts.write().insert(address, account); 279 | } 280 | 281 | /// The implementation of [DatabaseCommit::commit()] 282 | pub fn do_commit(&self, changes: HashMap) { 283 | let mut storage = self.storage.write(); 284 | let mut accounts = self.accounts.write(); 285 | for (add, mut acc) in changes { 286 | if acc.is_empty() || acc.is_selfdestructed() { 287 | accounts.remove(&add); 288 | storage.remove(&add); 289 | } else { 290 | // insert account 291 | if let Some(code_hash) = acc 292 | .info 293 | .code 294 | .as_ref() 295 | .filter(|code| !code.is_empty()) 296 | .map(|code| code.hash_slow()) 297 | { 298 | acc.info.code_hash = code_hash; 299 | } else if acc.info.code_hash.is_zero() { 300 | acc.info.code_hash = KECCAK_EMPTY; 301 | } 302 | accounts.insert(add, acc.info); 303 | 304 | let acc_storage = storage.entry(add).or_default(); 305 | if acc.status.contains(AccountStatus::Created) { 306 | acc_storage.clear(); 307 | } 308 | for (index, value) in acc.storage { 309 | if value.present_value().is_zero() { 310 | acc_storage.remove(&index); 311 | } else { 312 | acc_storage.insert(index, value.present_value()); 313 | } 314 | } 315 | if acc_storage.is_empty() { 316 | storage.remove(&add); 317 | } 318 | } 319 | } 320 | } 321 | } 322 | 323 | impl Clone for MemDb { 324 | fn clone(&self) -> Self { 325 | Self { 326 | storage: RwLock::new(self.storage.read().clone()), 327 | accounts: RwLock::new(self.accounts.read().clone()), 328 | block_hashes: RwLock::new(self.block_hashes.read().clone()), 329 | } 330 | } 331 | } 332 | 333 | impl DatabaseCommit for MemDb { 334 | fn commit(&mut self, changes: HashMap) { 335 | self.do_commit(changes) 336 | } 337 | } 338 | 339 | /// A DB that stores the cached content in a json file 340 | #[derive(Debug)] 341 | pub struct JsonBlockCacheDB { 342 | /// Where this cache file is stored. 343 | /// 344 | /// If this is a [None] then caching is disabled 345 | cache_path: Option, 346 | /// Object that's stored in a json file 347 | data: JsonBlockCacheData, 348 | } 349 | 350 | impl JsonBlockCacheDB { 351 | /// Creates a new instance. 352 | fn new(meta: Arc>, cache_path: Option) -> Self { 353 | Self { cache_path, data: JsonBlockCacheData { meta, data: Arc::new(Default::default()) } } 354 | } 355 | 356 | /// Loads the contents of the diskmap file and returns the read object 357 | /// 358 | /// # Errors 359 | /// This will fail if 360 | /// - the `path` does not exist 361 | /// - the format does not match [JsonBlockCacheData] 362 | pub fn load(path: impl Into) -> eyre::Result { 363 | let path = path.into(); 364 | trace!(target: "cache", ?path, "reading json cache"); 365 | let contents = std::fs::read_to_string(&path).map_err(|err| { 366 | warn!(?err, ?path, "Failed to read cache file"); 367 | err 368 | })?; 369 | let data = serde_json::from_str(&contents).map_err(|err| { 370 | warn!(target: "cache", ?err, ?path, "Failed to deserialize cache data"); 371 | err 372 | })?; 373 | Ok(Self { cache_path: Some(path), data }) 374 | } 375 | 376 | /// Returns the [MemDb] it holds access to 377 | pub const fn db(&self) -> &Arc { 378 | &self.data.data 379 | } 380 | 381 | /// Metadata stored alongside the data 382 | pub const fn meta(&self) -> &Arc> { 383 | &self.data.meta 384 | } 385 | 386 | /// Returns `true` if this is a transient cache and nothing will be flushed 387 | pub const fn is_transient(&self) -> bool { 388 | self.cache_path.is_none() 389 | } 390 | 391 | /// Flushes the DB to disk if caching is enabled. 392 | #[instrument(level = "warn", skip_all, fields(path = ?self.cache_path))] 393 | pub fn flush(&self) { 394 | let Some(path) = &self.cache_path else { return }; 395 | self.flush_to(path.as_path()); 396 | } 397 | 398 | /// Flushes the DB to a specific file 399 | pub fn flush_to(&self, cache_path: &Path) { 400 | let path: &Path = cache_path; 401 | 402 | trace!(target: "cache", "saving json cache"); 403 | 404 | if let Some(parent) = path.parent() { 405 | let _ = fs::create_dir_all(parent); 406 | } 407 | 408 | let file = match fs::File::create(path) { 409 | Ok(file) => file, 410 | Err(e) => return warn!(target: "cache", %e, "Failed to open json cache for writing"), 411 | }; 412 | 413 | let mut writer = BufWriter::new(file); 414 | if let Err(e) = serde_json::to_writer(&mut writer, &self.data) { 415 | return warn!(target: "cache", %e, "Failed to write to json cache"); 416 | } 417 | if let Err(e) = writer.flush() { 418 | return warn!(target: "cache", %e, "Failed to flush to json cache"); 419 | } 420 | 421 | trace!(target: "cache", "saved json cache"); 422 | } 423 | 424 | /// Returns the cache path. 425 | pub fn cache_path(&self) -> Option<&Path> { 426 | self.cache_path.as_deref() 427 | } 428 | } 429 | 430 | /// The Data the [JsonBlockCacheDB] can read and flush 431 | /// 432 | /// This will be deserialized in a JSON object with the keys: 433 | /// `["meta", "accounts", "storage", "block_hashes"]` 434 | #[derive(Debug)] 435 | pub struct JsonBlockCacheData { 436 | pub meta: Arc>, 437 | pub data: Arc, 438 | } 439 | 440 | impl Serialize for JsonBlockCacheData { 441 | fn serialize(&self, serializer: S) -> Result 442 | where 443 | S: Serializer, 444 | { 445 | let mut map = serializer.serialize_map(Some(4))?; 446 | 447 | map.serialize_entry("meta", &*self.meta.read())?; 448 | map.serialize_entry("accounts", &*self.data.accounts.read())?; 449 | map.serialize_entry("storage", &*self.data.storage.read())?; 450 | map.serialize_entry("block_hashes", &*self.data.block_hashes.read())?; 451 | 452 | map.end() 453 | } 454 | } 455 | 456 | impl<'de> Deserialize<'de> for JsonBlockCacheData { 457 | fn deserialize(deserializer: D) -> Result 458 | where 459 | D: Deserializer<'de>, 460 | { 461 | #[derive(Deserialize)] 462 | struct Data { 463 | meta: BlockchainDbMeta, 464 | accounts: AddressHashMap, 465 | storage: AddressHashMap>, 466 | block_hashes: HashMap, 467 | } 468 | 469 | let Data { meta, accounts, storage, block_hashes } = Data::deserialize(deserializer)?; 470 | 471 | Ok(Self { 472 | meta: Arc::new(RwLock::new(meta)), 473 | data: Arc::new(MemDb { 474 | accounts: RwLock::new(accounts), 475 | storage: RwLock::new(storage), 476 | block_hashes: RwLock::new(block_hashes), 477 | }), 478 | }) 479 | } 480 | } 481 | 482 | /// A type that flushes a `JsonBlockCacheDB` on drop 483 | /// 484 | /// This type intentionally does not implement `Clone` since it's intended that there's only once 485 | /// instance that will flush the cache. 486 | #[derive(Debug)] 487 | pub struct FlushJsonBlockCacheDB(pub Arc); 488 | 489 | impl Drop for FlushJsonBlockCacheDB { 490 | fn drop(&mut self) { 491 | trace!(target: "fork::cache", "flushing cache"); 492 | self.0.flush(); 493 | trace!(target: "fork::cache", "flushed cache"); 494 | } 495 | } 496 | 497 | #[cfg(test)] 498 | mod tests { 499 | use super::*; 500 | 501 | #[test] 502 | fn can_deserialize_cache() { 503 | let s = r#"{ 504 | "meta": { 505 | "cfg_env": { 506 | "chain_id": 1337, 507 | "perf_analyse_created_bytecodes": "Analyse", 508 | "limit_contract_code_size": 18446744073709551615, 509 | "memory_limit": 4294967295, 510 | "disable_block_gas_limit": false, 511 | "disable_eip3607": false, 512 | "disable_base_fee": false 513 | }, 514 | "block_env": { 515 | "number": 15547871, 516 | "coinbase": "0x0000000000000000000000000000000000000000", 517 | "timestamp": 1663351871, 518 | "difficulty": "0x0", 519 | "basefee": 12448539171, 520 | "gas_limit": 30000000, 521 | "prevrandao": "0x0000000000000000000000000000000000000000000000000000000000000000" 522 | }, 523 | "hosts": [ 524 | "eth-mainnet.alchemyapi.io" 525 | ] 526 | }, 527 | "accounts": { 528 | "0xb8ffc3cd6e7cf5a098a1c92f48009765b24088dc": { 529 | "balance": "0x0", 530 | "nonce": 10, 531 | "code_hash": "0x3ac64c95eedf82e5d821696a12daac0e1b22c8ee18a9fd688b00cfaf14550aad", 532 | "code": { 533 | "LegacyAnalyzed": { 534 | "bytecode": "0x00", 535 | "original_len": 0, 536 | "jump_table": { 537 | "order": "bitvec::order::Lsb0", 538 | "head": { 539 | "width": 8, 540 | "index": 0 541 | }, 542 | "bits": 1, 543 | "data": [0] 544 | } 545 | } 546 | } 547 | } 548 | }, 549 | "storage": { 550 | "0xa354f35829ae975e850e23e9615b11da1b3dc4de": { 551 | "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e564": "0x5553444320795661756c74000000000000000000000000000000000000000000", 552 | "0x10": "0x37fd60ff8346", 553 | "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563": "0xb", 554 | "0x6": "0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48", 555 | "0x5": "0x36ff5b93162e", 556 | "0x14": "0x29d635a8e000", 557 | "0x11": "0x63224c73", 558 | "0x2": "0x6" 559 | } 560 | }, 561 | "block_hashes": { 562 | "0xed3deb": "0xbf7be3174b261ea3c377b6aba4a1e05d5fae7eee7aab5691087c20cf353e9877", 563 | "0xed3de9": "0xba1c3648e0aee193e7d00dffe4e9a5e420016b4880455641085a4731c1d32eef", 564 | "0xed3de8": "0x61d1491c03a9295fb13395cca18b17b4fa5c64c6b8e56ee9cc0a70c3f6cf9855", 565 | "0xed3de7": "0xb54560b5baeccd18350d56a3bee4035432294dc9d2b7e02f157813e1dee3a0be", 566 | "0xed3dea": "0x816f124480b9661e1631c6ec9ee39350bda79f0cbfc911f925838d88e3d02e4b" 567 | } 568 | }"#; 569 | 570 | let cache: JsonBlockCacheData = serde_json::from_str(s).unwrap(); 571 | assert_eq!(cache.data.accounts.read().len(), 1); 572 | assert_eq!(cache.data.storage.read().len(), 1); 573 | assert_eq!(cache.data.block_hashes.read().len(), 5); 574 | 575 | let _s = serde_json::to_string(&cache).unwrap(); 576 | } 577 | 578 | #[test] 579 | fn can_deserialize_cache_post_4844() { 580 | let s = r#"{ 581 | "meta": { 582 | "cfg_env": { 583 | "chain_id": 1, 584 | "kzg_settings": "Default", 585 | "perf_analyse_created_bytecodes": "Analyse", 586 | "limit_contract_code_size": 18446744073709551615, 587 | "memory_limit": 134217728, 588 | "disable_block_gas_limit": false, 589 | "disable_eip3607": true, 590 | "disable_base_fee": false, 591 | "optimism": false 592 | }, 593 | "block_env": { 594 | "number": 18651580, 595 | "coinbase": "0x4838b106fce9647bdf1e7877bf73ce8b0bad5f97", 596 | "timestamp": 1700950019, 597 | "gas_limit": 30000000, 598 | "basefee": 26886078239, 599 | "difficulty": "0xc6b1a299886016dea3865689f8393b9bf4d8f4fe8c0ad25f0058b3569297c057", 600 | "prevrandao": "0xc6b1a299886016dea3865689f8393b9bf4d8f4fe8c0ad25f0058b3569297c057", 601 | "blob_excess_gas_and_price": { 602 | "excess_blob_gas": 0, 603 | "blob_gasprice": 1 604 | } 605 | }, 606 | "hosts": [ 607 | "eth-mainnet.alchemyapi.io" 608 | ] 609 | }, 610 | "accounts": { 611 | "0x4838b106fce9647bdf1e7877bf73ce8b0bad5f97": { 612 | "balance": "0x8e0c373cfcdfd0eb", 613 | "nonce": 128912, 614 | "code_hash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", 615 | "code": { 616 | "LegacyAnalyzed": { 617 | "bytecode": "0x00", 618 | "original_len": 0, 619 | "jump_table": { 620 | "order": "bitvec::order::Lsb0", 621 | "head": { 622 | "width": 8, 623 | "index": 0 624 | }, 625 | "bits": 1, 626 | "data": [0] 627 | } 628 | } 629 | } 630 | } 631 | }, 632 | "storage": {}, 633 | "block_hashes": {} 634 | }"#; 635 | 636 | let cache: JsonBlockCacheData = serde_json::from_str(s).unwrap(); 637 | assert_eq!(cache.data.accounts.read().len(), 1); 638 | 639 | let _s = serde_json::to_string(&cache).unwrap(); 640 | } 641 | 642 | #[test] 643 | fn can_return_cache_path_if_set() { 644 | // set 645 | let cache_db = JsonBlockCacheDB::new( 646 | Arc::new(RwLock::new(BlockchainDbMeta::default())), 647 | Some(PathBuf::from("/tmp/foo")), 648 | ); 649 | assert_eq!(Some(Path::new("/tmp/foo")), cache_db.cache_path()); 650 | 651 | // unset 652 | let cache_db = 653 | JsonBlockCacheDB::new(Arc::new(RwLock::new(BlockchainDbMeta::default())), None); 654 | assert_eq!(None, cache_db.cache_path()); 655 | } 656 | } 657 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | use alloy_primitives::{Address, B256, U256}; 2 | use alloy_rpc_types::BlockId; 3 | use futures::channel::mpsc::{SendError, TrySendError}; 4 | use std::{ 5 | convert::Infallible, 6 | sync::{mpsc::RecvError, Arc}, 7 | }; 8 | 9 | /// Result alias with `DatabaseError` as error 10 | pub type DatabaseResult = Result; 11 | 12 | /// Errors that can happen when working with [`revm::Database`] 13 | #[derive(Debug, thiserror::Error)] 14 | #[allow(missing_docs)] 15 | pub enum DatabaseError { 16 | #[error("missing bytecode for code hash {0}")] 17 | MissingCode(B256), 18 | #[error(transparent)] 19 | Recv(#[from] RecvError), 20 | #[error(transparent)] 21 | Send(#[from] SendError), 22 | #[error("failed to get account for {0}: {1}")] 23 | GetAccount(Address, Arc), 24 | #[error("failed to get storage for {0} at {1}: {2}")] 25 | GetStorage(Address, U256, Arc), 26 | #[error("failed to get block hash for {0}: {1}")] 27 | GetBlockHash(u64, Arc), 28 | #[error("failed to get full block for {0:?}: {1}")] 29 | GetFullBlock(BlockId, Arc), 30 | #[error("block {0:?} does not exist")] 31 | BlockNotFound(BlockId), 32 | #[error("failed to get transaction {0}: {1}")] 33 | GetTransaction(B256, Arc), 34 | #[error("failed to process AnyRequest: {0}")] 35 | AnyRequest(Arc), 36 | } 37 | 38 | impl DatabaseError { 39 | fn get_rpc_error(&self) -> Option<&eyre::Error> { 40 | match self { 41 | Self::GetAccount(_, err) => Some(err), 42 | Self::GetStorage(_, _, err) => Some(err), 43 | Self::GetBlockHash(_, err) => Some(err), 44 | Self::GetFullBlock(_, err) => Some(err), 45 | Self::GetTransaction(_, err) => Some(err), 46 | Self::AnyRequest(err) => Some(err), 47 | // Enumerate explicitly to make sure errors are updated if a new one is added. 48 | Self::MissingCode(_) | Self::Recv(_) | Self::Send(_) | Self::BlockNotFound(_) => None, 49 | } 50 | } 51 | 52 | /// Whether the error is potentially caused by the user forking from an older block in a 53 | /// non-archive node. 54 | pub fn is_possibly_non_archive_node_error(&self) -> bool { 55 | static GETH_MESSAGE: &str = "missing trie node"; 56 | 57 | self.get_rpc_error() 58 | .map(|err| err.to_string().to_lowercase().contains(GETH_MESSAGE)) 59 | .unwrap_or(false) 60 | } 61 | } 62 | 63 | impl From> for DatabaseError { 64 | fn from(value: TrySendError) -> Self { 65 | value.into_send_error().into() 66 | } 67 | } 68 | 69 | impl From for DatabaseError { 70 | fn from(value: Infallible) -> Self { 71 | match value {} 72 | } 73 | } 74 | 75 | impl revm::database::DBErrorMarker for DatabaseError {} 76 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![doc = include_str!("../README.md")] 2 | #![cfg_attr(not(test), warn(unused_crate_dependencies))] 3 | #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] 4 | 5 | #[macro_use] 6 | extern crate tracing; 7 | 8 | pub mod backend; 9 | pub mod cache; 10 | pub mod error; 11 | 12 | pub use backend::{BackendHandler, SharedBackend}; 13 | pub use cache::BlockchainDb; 14 | pub use error::{DatabaseError, DatabaseResult}; 15 | -------------------------------------------------------------------------------- /test-data/storage.json: -------------------------------------------------------------------------------- 1 | {"meta":{"cfg_env":{"chain_id":1,"spec_id":"LATEST","perf_all_precompiles_have_balance":false,"memory_limit":4294967295,"perf_analyse_created_bytecodes":"Analyse","limit_contract_code_size":24576,"disable_coinbase_tip":false},"block_env":{"number":14435000,"coinbase":"0x0000000000000000000000000000000000000000","timestamp":0,"difficulty":"0","basefee":0,"gas_limit":18446744073709551615},"hosts":["mainnet.infura.io"]},"accounts":{"0x63091244180ae240c87d1f528f5f269134cb07b3":{"balance":"0x0","code_hash":"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470","code":null,"nonce":0}},"storage":{"0x63091244180ae240c87d1f528f5f269134cb07b3":{"0x0":"0x0","0x1":"0x0","0x2":"0x0","0x3":"0x0","0x4":"0x0","0x5":"0x0","0x6":"0x0","0x7":"0x0","0x8":"0x0","0x9":"0x0"}},"block_hashes":{}} --------------------------------------------------------------------------------