├── .github └── workflows │ └── ci.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── README.md ├── bin ├── adapter │ ├── Cargo.toml │ ├── README.md │ ├── hive.patch │ └── src │ │ └── main.rs ├── ress │ ├── Cargo.toml │ ├── build.rs │ └── src │ │ ├── cli.rs │ │ ├── launch.rs │ │ ├── lib.rs │ │ ├── main.rs │ │ └── rpc.rs └── reth │ ├── Cargo.toml │ └── src │ └── main.rs ├── crates ├── engine │ ├── Cargo.toml │ └── src │ │ ├── download │ │ ├── futs.rs │ │ └── mod.rs │ │ ├── engine.rs │ │ ├── lib.rs │ │ └── tree │ │ ├── block_buffer.rs │ │ ├── mod.rs │ │ ├── outcome.rs │ │ └── root.rs ├── evm │ ├── Cargo.toml │ └── src │ │ ├── db.rs │ │ ├── executor.rs │ │ └── lib.rs ├── network │ ├── Cargo.toml │ └── src │ │ ├── handle.rs │ │ ├── lib.rs │ │ └── manager.rs ├── primitives │ ├── Cargo.toml │ └── src │ │ ├── lib.rs │ │ ├── witness.rs │ │ └── witness_rpc.rs ├── provider │ ├── Cargo.toml │ └── src │ │ ├── chain_state.rs │ │ ├── database.rs │ │ ├── lib.rs │ │ └── provider.rs └── testing │ ├── Cargo.toml │ └── src │ ├── lib.rs │ └── rpc_adapter.rs ├── deny.toml ├── etc └── grafana │ └── dashboards │ └── ress.json ├── justfile └── rustfmt.toml /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | 8 | env: 9 | CARGO_TERM_COLOR: always 10 | 11 | concurrency: 12 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 13 | cancel-in-progress: true 14 | 15 | jobs: 16 | fmt: 17 | runs-on: ubuntu-latest 18 | timeout-minutes: 30 19 | steps: 20 | - uses: actions/checkout@v4 21 | - uses: dtolnay/rust-toolchain@nightly 22 | with: 23 | components: rustfmt 24 | - run: cargo fmt --all --check 25 | 26 | clippy: 27 | runs-on: ubuntu-latest 28 | timeout-minutes: 30 29 | steps: 30 | - uses: actions/checkout@v4 31 | - uses: dtolnay/rust-toolchain@nightly 32 | with: 33 | components: clippy 34 | - uses: Swatinem/rust-cache@v2 35 | with: 36 | cache-on-failure: true 37 | - run: cargo clippy --workspace --all-targets --all-features 38 | env: 39 | RUSTFLAGS: -Dwarnings 40 | 41 | test: 42 | runs-on: ubuntu-latest 43 | timeout-minutes: 30 44 | steps: 45 | - uses: actions/checkout@v4 46 | - uses: dtolnay/rust-toolchain@stable 47 | - uses: Swatinem/rust-cache@v2 48 | with: 49 | cache-on-failure: true 50 | - name: test 51 | run: cargo test --workspace ${{ matrix.flags }} 52 | 53 | docs: 54 | runs-on: ubuntu-latest 55 | timeout-minutes: 30 56 | steps: 57 | - uses: actions/checkout@v4 58 | - uses: dtolnay/rust-toolchain@nightly 59 | - uses: Swatinem/rust-cache@v2 60 | with: 61 | cache-on-failure: true 62 | - name: Build documentation 63 | run: cargo doc --workspace --all-features --no-deps --document-private-items 64 | env: 65 | RUSTDOCFLAGS: --cfg docsrs -D warnings # -Zunstable-options --show-type-layout --generate-link-to-definition 66 | 67 | deny: 68 | uses: ithacaxyz/ci/.github/workflows/deny.yml@main 69 | 70 | ci-success: 71 | name: ci success 72 | runs-on: ubuntu-latest 73 | if: always() 74 | needs: 75 | - test 76 | - clippy 77 | - docs 78 | - fmt 79 | - deny 80 | steps: 81 | - name: Decide whether the needed jobs succeeded or failed 82 | uses: re-actors/alls-green@release/v1 83 | with: 84 | jobs: ${{ toJSON(needs) }} 85 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | .env 3 | *.db 4 | /fixtures/* 5 | proptest-regressions/ -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace.package] 2 | version = "0.1.0" 3 | edition = "2021" 4 | rust-version = "1.83" 5 | license = "MIT OR Apache-2.0" 6 | homepage = "https://github.com/ithacaxyz/reth-stateless" 7 | repository = "https://github.com/ithacaxyz/reth-stateless" 8 | exclude = [".github/"] 9 | 10 | [workspace] 11 | members = [ 12 | "bin/adapter", 13 | "bin/ress", 14 | "bin/reth", 15 | 16 | "crates/engine", 17 | "crates/evm", 18 | "crates/network", 19 | "crates/primitives", 20 | "crates/provider", 21 | 22 | "crates/testing", 23 | ] 24 | 25 | # Explicitly set the resolver to version 2, which is the default for packages with edition >= 2021 26 | # https://doc.rust-lang.org/edition-guide/rust-2021/default-cargo-resolver.html 27 | resolver = "2" 28 | 29 | [workspace.lints.rust] 30 | missing_debug_implementations = "warn" 31 | missing_docs = "warn" 32 | rust_2018_idioms = { level = "deny", priority = -1 } 33 | unreachable_pub = "warn" 34 | unused_must_use = "deny" 35 | 36 | [workspace.lints.rustdoc] 37 | all = "warn" 38 | 39 | # Speed up compilation time for dev builds by reducing emitted debug info. 40 | # NOTE: Debuggers may provide less useful information with this setting. 41 | # Uncomment this section if you're using a debugger. 42 | [profile.dev] 43 | # https://davidlattimore.github.io/posts/2024/02/04/speeding-up-the-rust-edit-build-run-cycle.html 44 | debug = "line-tables-only" 45 | split-debuginfo = "unpacked" 46 | 47 | # Meant for testing - all optimizations, but with debug assertions and overflow checks. 48 | [profile.hivetests] 49 | inherits = "test" 50 | opt-level = 3 51 | lto = "thin" 52 | 53 | [profile.release] 54 | opt-level = 3 55 | lto = "thin" 56 | debug = "none" 57 | strip = "symbols" 58 | panic = "unwind" 59 | codegen-units = 16 60 | 61 | [profile.profiling] 62 | inherits = "release" 63 | debug = "full" 64 | strip = "none" 65 | 66 | [profile.bench] 67 | inherits = "profiling" 68 | 69 | [profile.maxperf] 70 | inherits = "release" 71 | lto = "fat" 72 | codegen-units = 1 73 | 74 | [workspace.dependencies] 75 | # ress 76 | ress-engine = { path = "crates/engine" } 77 | ress-network = { path = "crates/network" } 78 | ress-provider = { path = "crates/provider" } 79 | ress-evm = { path = "crates/evm" } 80 | ress-primitives = { path = "crates/primitives" } 81 | ress-testing = { path = "crates/testing" } 82 | 83 | # reth ress 84 | reth-ress-protocol = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 85 | 86 | # reth 87 | reth = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 88 | 89 | ## primitives/types 90 | reth-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 91 | reth-primitives-traits = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 92 | reth-ethereum-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 93 | reth-chainspec = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 94 | reth-storage-errors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 95 | reth-errors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 96 | 97 | ## db 98 | reth-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 99 | reth-db-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 100 | reth-storage-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 101 | reth-provider = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10", features = [ 102 | "test-utils", 103 | ] } 104 | 105 | ## network 106 | reth-eth-wire = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 107 | reth-network-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 108 | reth-network = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 109 | reth-network-peers = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 110 | reth-discv4 = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 111 | 112 | ## trie 113 | reth-trie = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 114 | reth-trie-sparse = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 115 | 116 | ## evm 117 | reth-revm = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 118 | reth-evm = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 119 | reth-evm-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 120 | 121 | ## consensus 122 | reth-consensus = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 123 | reth-consensus-debug-client = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 124 | reth-engine-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 125 | reth-ethereum-engine-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 126 | reth-engine-tree = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 127 | reth-chain-state = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 128 | 129 | ## rpc 130 | reth-rpc-eth-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 131 | reth-rpc-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 132 | reth-rpc-engine-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 133 | reth-rpc-layer = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 134 | reth-rpc-types-compat = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 135 | reth-rpc-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 136 | reth-rpc = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 137 | 138 | ## node 139 | reth-node-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 140 | reth-node-core = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 141 | reth-node-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 142 | reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 143 | reth-node-events = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 144 | reth-node-metrics = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 145 | reth-payload-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10", features = [ 146 | "test-utils", 147 | ] } 148 | reth-transaction-pool = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 149 | 150 | ## util 151 | reth-tokio-util = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 152 | reth-tracing = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 153 | reth-tasks = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 154 | reth-cli = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 155 | reth-cli-util = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 156 | reth-ethereum-cli = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 157 | reth-metrics = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 158 | reth-testing-utils = { git = "https://github.com/paradigmxyz/reth", tag = "v1.3.10" } 159 | 160 | # alloy 161 | alloy-primitives = { version = "1.0.0", default-features = false } 162 | alloy-trie = { version = "0.8.1", default-features = false } 163 | alloy-rlp = { version = "0.3.10", default-features = false } 164 | 165 | alloy-eips = { version = "0.14.0", default-features = false } 166 | alloy-rpc-types = { version = "0.14.0", default-features = false } 167 | alloy-rpc-types-eth = { version = "0.14.0", default-features = false } 168 | alloy-rpc-types-engine = { version = "0.14.0", default-features = false } 169 | alloy-rpc-types-debug = { version = "0.14.0", default-features = false } 170 | alloy-rpc-client = { version = "0.14.0", default-features = false } 171 | alloy-provider = { version = "0.14.0", default-features = false, features = [ 172 | "reqwest", 173 | "reqwest-rustls-tls", 174 | ] } 175 | alloy-transport-http = { version = "0.14.0", default-features = false } 176 | alloy-serde = { version = "0.14.0", default-features = false } 177 | alloy-consensus = { version = "0.14.0", default-features = false } 178 | alloy-network = { version = "0.14.0", default-features = false } 179 | 180 | # misc 181 | clap = { version = "4.4", features = ["derive"] } 182 | tokio = { version = "1.39", default-features = false } 183 | tokio-stream = "0.1.11" 184 | futures = "0.3" 185 | eyre = "0.6" 186 | rand = "0.8.5" 187 | tracing = "0.1.0" 188 | tracing-subscriber = "0.3" 189 | thiserror = "2.0.9" 190 | rayon = "1.8" 191 | serde = { version = "1.0", features = ["derive"] } 192 | bincode = "1.3" 193 | parking_lot = "0.12" 194 | serde_json = "1.0.1" 195 | reqwest = { version = "0.12", features = [ 196 | "json", 197 | "rustls-tls", 198 | ], default-features = false } 199 | dotenvy = "0.15.7" 200 | itertools = "0.14" 201 | metrics = "0.24.0" 202 | derive_more = { version = "2", default-features = false, features = ["full"] } 203 | strum = "0.27" 204 | strum_macros = "0.27" 205 | schnellru = "0.2" 206 | 207 | # testing 208 | arbitrary = "1.3" 209 | proptest = "1.4" 210 | proptest-derive = "0.5" 211 | proptest-arbitrary-interop = "0.1.0" 212 | assert_matches = "1.5.0" 213 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![image](https://github.com/user-attachments/assets/ac01e368-4872-4983-afc0-a7fd218dd342) 2 | 3 | See the [accompanying blog on Paradigm Research](https://www.paradigm.xyz/2025/03/stateless-reth-nodes). 4 | 5 | # ress(reth stateless) 6 | 7 | Ress (shorthand for: Reth Stateless) is a fully validating stateless Ethereum Execution Layer with 14GB disk requirements. 8 | Stateless nodes matter not only for improving Ethereum’s decentralization, but also to scale the L1 gas limit, 9 | scaling optimistic L2s, and for implementing Native Rollups to improve the L2 ecosystem’s security & interoperability. 10 | 11 | > [!CAUTION] 12 | > Ress is an experimental software that has not been fully tested in production grade settings. 13 | > At this moment, we do not recommend using ress nodes in validator setups. 14 | 15 | ## Run 16 | 17 | ### Install 18 | 19 | ```bash 20 | cargo install --git https://github.com/paradigmxyz/ress ress --locked 21 | ``` 22 | 23 | ### Run Consensus Layer client 24 | 25 | See https://reth.rs/run/mainnet.html#running-the-consensus-layer for the instructions, they're the same as for Reth. 26 | 27 | The only difference is that the default JWT token location is `~/.local/share/ress/NETWORK/jwt.hex` 28 | instead of `~/.local/share/reth/NETWORK//jwt.hex`. 29 | 30 | ### (Optional) Run stateful Reth client 31 | 32 | Follow the [instructions](https://reth.rs/run/run-a-node.html) to install Reth. 33 | Start a node with the `--ress.enable` flag to enable support for `ress` subprotocol. 34 | 35 | ```bash 36 | reth --ress.enable 37 | ``` 38 | 39 | ### Run Ress client 40 | 41 | We've hosted several public nodes that you can peer with if you don't have an ability to run a Reth node yourself. 42 | 43 | ```bash 44 | ress --trusted-peers \ 45 | "enode://04bcda1f0a750ce5fd986187825ffcd7aa1ad3641027f646707c2121443e85ae309e047f228c0067aac382f0c0cab21e91a2852e10b4f7724187b0185bb78b2b@100.126.2.26:30303,\ 46 | enode://04e4e548eee6f042685ad0b6793de36a4c5c6a3107cdf54cbfeedbeb0df4138d4c65b534fb700072341098644eb5a6b125e63c36464d8f57c19b6e26ca36ae7c@100.75.245.88:30303,\ 47 | enode://2c464310c41c2a9d7be44783d38ab2b8517e4c5133719de1a4b605294cfd201f33b2c5b4158054b171a3ba26837f85a97f6a8553622ea1033d70c98fc1b70fa0@69.67.151.138:30303,\ 48 | enode://065cffdc5c824d42c23f933ca615daad8b887f2330b1313e8c1a5d850be93d3b6e95698d4f774bbf7b2639ac6d6d870645156eeb805bd3448107806cc0a6e5f9@69.67.151.138:30303" 49 | ``` 50 | 51 | If you've started a Reth node yourself in the previous step, you can fetch the enode from it 52 | and use it as an argument to the `--trusted-peers` flag. 53 | ```bash 54 | cast rpc admin_nodeInfo -r http://localhost:9545 | jq .enode 55 | ``` 56 | 57 | ## How it works 58 | 59 | Live sync works like any other stateful node: Ress receives a new payload from the consensus client 60 | and fetches necessary state data (witness, block, bytecodes) from a stateful reth client 61 | via [RLPx subprotocol dedicated to Ress](https://github.com/paradigmxyz/reth/tree/main/crates/ress/protocol). 62 | It verifies payload and calculates the new state root all in memory. 63 | 64 | ```mermaid 65 | sequenceDiagram 66 | CL->>Ress: NewPayload 67 | Ress->>Reth: GetWitness 68 | Reth-->>Ress: Witness 69 | Ress->>Reth: GetBytecode 70 | Note over Ress,Reth: (only missing) 71 | Reth-->>Ress: Bytecode 72 | Ress->>Ress: Validate Payload 73 | Ress-->>CL: PayloadStatus 74 | ``` 75 | 76 | ### Components 77 | 78 | To run a ress node successfully you need the following components: 79 | 1. Ress node (stateless) connected to a Consensus Client 80 | 2. Reth node (stateful) connected to a Consensus Client 81 | 82 | ```mermaid 83 | flowchart TB 84 | subgraph Stateful["Stateful Client Pair"] 85 | SFCL["Consensus Layer"] 86 | Reth["Reth Node"] 87 | SFCL -->|Engine API| Reth 88 | end 89 | 90 | subgraph Stateless["Stateless Client Pair"] 91 | SLCL["Consensus Layer"] 92 | Ress["Ress Node"] 93 | SLCL -->|Engine API| Ress 94 | end 95 | 96 | Ress -->|ress RLPx subprotocol| Reth 97 | ``` 98 | 99 | ## How it was tested 100 | 101 | ### Hive Tests 102 | 103 | Ress was tested with [hive](https://github.com/ethereum/hive). 104 | A simulator sends requests to `adapter`, which proxies `engine/*` request to `ress`, and other requests to `reth`. 105 | 106 | - `reth`: auth port(8651), rpc port(8544), subnetwork(30303) 107 | - `ress`: auth port(8552), rpc port(-), subnetwork(61398) 108 | 109 | We ran the cancun test suite and from the 226 tests, ress passes `206` hive tests successfully: 110 | ``` 111 | Mar 13 09:46:28.033 INF simulation ethereum/engine finished suites=1 tests=226 failed=20 112 | ``` 113 | 114 | ### Holesky validators 115 | For our proof of concept, we successfully [ran Ress-backed Ethereum validators](https://light-holesky.beaconcha.in/validator/1919380?v=attestations) on Holesky testnet and correctly attested to block validity. 116 | -------------------------------------------------------------------------------- /bin/adapter/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "adapter" 3 | version.workspace = true 4 | edition.workspace = true 5 | rust-version.workspace = true 6 | license.workspace = true 7 | homepage.workspace = true 8 | repository.workspace = true 9 | 10 | [dependencies] 11 | # alloy 12 | alloy-rpc-types-engine = { workspace = true, features = ["serde"] } 13 | 14 | tokio = { workspace = true, features = ["full"] } 15 | bytes = "1.2" 16 | hyper = { version = "1", features = ["full"] } 17 | hyper-rustls = { version = "0.26", features = ["webpki-roots"] } 18 | http-body-util = "0.1" 19 | hyper-util = { version = "0.1", features = ["full"] } 20 | serde_json.workspace = true 21 | serde.workspace = true 22 | tracing.workspace = true 23 | tracing-subscriber.workspace = true 24 | futures-util = "0.3" 25 | -------------------------------------------------------------------------------- /bin/adapter/README.md: -------------------------------------------------------------------------------- 1 | # hive-adapter 2 | 3 | Acts as a proxy/mitm between hive and reth/ress: 4 | * if it receives a non-engine request: forwards it to reth, gets its response and sends it back to hive. 5 | * if it receives an engine request: forwards it to reth, get its response, THEN sends that same request to ress and gets its response. Only then does it send the reth response back to hive 6 | 7 | ### How to run hive 8 | 9 | 0. install docker 10 | 1. `git clone https://github.com/ethereum/hive && cd hive && go build .` 11 | 2. apply [hive.patch](./hive.patch) to hive repo. 12 | 3. build `reth`, `ress` and `adapter` and copy them into `hive/clients/reth/` 13 | 4. from inside the hive repo run: `rm -rf workspace/ && ./hive --sim ethereum/engine --sim.limit api --client reth` 14 | 15 | ### Tips 16 | * logs can be checked under the workspace folder created after a hive run. Example: `hive/workspace/logs/reth/client-fc9f9ef80a74664e877fd59008e59e24fba35c43d02bbc30e2530952cc07a907.log` 17 | * tracing levels can be changed on the entrypoint: `hive/clients/reth/reth.sh` 18 | -------------------------------------------------------------------------------- /bin/adapter/hive.patch: -------------------------------------------------------------------------------- 1 | diff --git a/clients/reth/Dockerfile b/clients/reth/Dockerfile 2 | index 2f70b6aa..07fcbed7 100644 3 | --- a/clients/reth/Dockerfile 4 | +++ b/clients/reth/Dockerfile 5 | @@ -1,28 +1,41 @@ 6 | -ARG baseimage=ghcr.io/paradigmxyz/reth 7 | -ARG tag=latest 8 | +### Build Reth Locally: 9 | +## Requires a copy of / -> hive/clients/reth/ 10 | 11 | -FROM $baseimage:$tag as builder 12 | +## Builder stage: Compiles reth from a git repository 13 | +FROM rust:latest as builder 14 | 15 | -# Install script tools. 16 | -RUN apt-get update -y 17 | -RUN apt-get install -y bash curl jq 18 | +# Default local client path: clients/reth/ 19 | +ARG local_path=reth 20 | +COPY $local_path reth 21 | 22 | -# Add genesis mapper script. 23 | -ADD genesis.json /genesis.json 24 | -ADD mapper.jq /mapper.jq 25 | +RUN apt-get update && apt-get install -y libclang-dev pkg-config build-essential 26 | + # && cd reth && cargo build --release \ 27 | + # && cp target/release/reth /usr/local/bin/reth 28 | 29 | -# Add the startup script. 30 | -ADD reth.sh /reth.sh 31 | -RUN chmod +x /reth.sh 32 | +## Final stage: Sets up the environment for running reth 33 | +FROM debian:latest 34 | +RUN apt-get update && apt-get install -y bash curl jq \ 35 | + && apt-get clean && rm -rf /var/lib/apt/lists/* 36 | 37 | -# Add the enode URL retriever script. 38 | -ADD enode.sh /hive-bin/enode.sh 39 | -RUN chmod +x /hive-bin/enode.sh 40 | +# Copy compiled binary from builder 41 | +COPY reth /usr/local/bin/reth 42 | +COPY ress /usr/local/bin/ress 43 | +COPY adapter /usr/local/bin/adapter 44 | + 45 | +# Add genesis mapper script, startup script, and enode URL retriever script 46 | +COPY discovery-secret /discovery-secret 47 | +COPY genesis.json /genesis.json 48 | +COPY mapper.jq /mapper.jq 49 | +COPY reth.sh /reth.sh 50 | +COPY enode.sh /hive-bin/enode.sh 51 | + 52 | +# Set execute permissions for scripts 53 | +RUN chmod +x /reth.sh /hive-bin/enode.sh 54 | 55 | # Create version.txt 56 | -RUN /usr/local/bin/reth --version | sed -e 's/reth \(.*\)/\1/' > /version.txt 57 | +RUN /usr/local/bin/reth --version | head -1 > /version.txt 58 | 59 | -# Export the usual networking ports to allow outside access to the node. 60 | +# Export the usual networking ports 61 | EXPOSE 8545 8546 30303 30303/udp 62 | 63 | ENTRYPOINT ["/reth.sh"] 64 | diff --git a/clients/reth/discovery-secret b/clients/reth/discovery-secret 65 | new file mode 100644 66 | index 00000000..266cb554 67 | --- /dev/null 68 | +++ b/clients/reth/discovery-secret 69 | @@ -0,0 +1 @@ 70 | +24ff0cc5a8d69a3e8901cf744ecdc59c9b2795b309979a34d947e4fa34b7fd12 71 | \ No newline at end of file 72 | diff --git a/clients/reth/reth.sh b/clients/reth/reth.sh 73 | index 40db5e37..1abca61e 100644 74 | --- a/clients/reth/reth.sh 75 | +++ b/clients/reth/reth.sh 76 | @@ -159,6 +159,16 @@ fi 77 | # Configure NAT 78 | FLAGS="$FLAGS --nat none" 79 | 80 | +# Configure reth so that everything goes through the proxy + ress peer 81 | +FLAGS="$FLAGS --authrpc.port 8651 --http.port 8544 -d --ress.enable --trusted-peers enode://4d4b6cd1361032ca9bd2aeb9d900aa4d45d9ead80ac9423374c451a7254d07662a3eada2d0fe208b6d257ceb0f064284662e857f57b66b54c198bd310ded36d0@127.0.0.1:61398" 82 | + 83 | +# Ensure reth has always the same PeerId 84 | +cp /discovery-secret $DATADIR 85 | + 86 | + 87 | # Launch the main client. 88 | echo "Running reth with flags: $FLAGS" 89 | -RUST_LOG=info $reth node $FLAGS 90 | + 91 | +RUST_LOG=info,reth::ress_provider=trace,engine::tree=trace $reth node $FLAGS & sleep 1 && RUST_LOG=info,ress=trace /usr/local/bin/ress --trusted-peers "enode://060bb5ab4a20bbb2465a4db24de7a740db00207e34044454504bf004d6396bd9b03bf08b1df3f1f468366a2c0b809dee7aa54069af94fa11bdb26b9103ee76d6@127.0.0.1:30303" --chain /genesis.json --port 30304 --authrpc.port 8552 --authrpc.jwtsecret=/jwt.secret & while ! resp=$(curl -s --max-time 5 http://127.0.0.1:8552); do sleep 1; done; echo "$resp" && RUST_LOG=info adapter 92 | -------------------------------------------------------------------------------- /bin/adapter/src/main.rs: -------------------------------------------------------------------------------- 1 | use alloy_rpc_types_engine::{PayloadId, PayloadStatus}; 2 | use bytes::Bytes; 3 | use http_body_util::{combinators::BoxBody, BodyExt, Full}; 4 | use hyper::{body::Incoming, server::conn::http1, service::service_fn, Request, Response}; 5 | use hyper_rustls::HttpsConnectorBuilder; 6 | use hyper_util::{ 7 | client::legacy::Client, 8 | rt::{TokioExecutor, TokioIo}, 9 | }; 10 | use serde::{Deserialize, Serialize}; 11 | use serde_json::Value; 12 | use std::net::SocketAddr; 13 | use tokio::{self, net::TcpListener}; 14 | use tracing::{error, info, Level}; 15 | 16 | const RETH_AUTH: &str = "http://127.0.0.1:8651"; 17 | const RETH_HTTP: &str = "http://127.0.0.1:8544"; 18 | 19 | const RESS_AUTH: &str = "http://127.0.0.1:8552"; 20 | 21 | #[derive(Debug, Deserialize, Serialize)] 22 | #[serde(rename_all = "camelCase")] 23 | struct RethPayloadResponse { 24 | #[serde(rename = "payloadStatus")] 25 | pub payload_status: PayloadStatus, 26 | #[serde(rename = "payloadId")] 27 | pub payload_id: Option, 28 | } 29 | 30 | async fn forward_request( 31 | req: Request, 32 | is_auth_server: bool, 33 | ) -> Result>, hyper::Error> { 34 | let reth_uri = if is_auth_server { RETH_AUTH } else { RETH_HTTP }; 35 | let req_method = req.method().clone(); 36 | let req_headers = req.headers().clone(); 37 | let whole_body = req.collect().await?.to_bytes(); 38 | let request_body: serde_json::Value = serde_json::from_slice(&whole_body).unwrap(); 39 | 40 | // `is_engine_method` as true if: 41 | // 1) method starts with "engine" 42 | // 2) method does NOT start with "engine_get" 43 | let is_engine_method = request_body["method"] 44 | .as_str() 45 | .map(|method| method.starts_with("engine") && !method.starts_with("engine_get")) 46 | .unwrap_or(false); 47 | 48 | let https = 49 | HttpsConnectorBuilder::new().with_webpki_roots().https_or_http().enable_http1().build(); 50 | let client = Client::builder(TokioExecutor::new()).build(https); 51 | let build_request = |uri: &str| { 52 | let mut builder = Request::builder().method(req_method.clone()).uri(uri); 53 | for (key, value) in req_headers.iter() { 54 | builder = builder.header(key, value); 55 | } 56 | builder.body(Full::new(whole_body.clone()).boxed()).unwrap() 57 | }; 58 | 59 | info!(target: "adapter", "Sending request to reth"); 60 | let reth_req = build_request(reth_uri); 61 | let reth_res = client.request(reth_req).await.unwrap(); 62 | let (parts, body) = reth_res.into_parts(); 63 | // if it's not engine method, return reth response 64 | if !is_engine_method { 65 | let boxed_body = BoxBody::new(body); 66 | info!(target: "adapter", "Sending response from reth"); 67 | return Ok(Response::from_parts(parts, boxed_body)); 68 | } 69 | 70 | let reth_body_bytes = body.collect().await?.to_bytes(); 71 | info!(target: "adapter", "Sending request to ress"); 72 | let ress_req = build_request(RESS_AUTH); 73 | let ress_res = client.request(ress_req).await.unwrap(); 74 | let (mut ress_parts, ress_body) = ress_res.into_parts(); 75 | let ress_body_bytes = ress_body.collect().await?.to_bytes(); 76 | 77 | // Process payload ID replacement 78 | let top_level: Value = serde_json::from_slice(&reth_body_bytes).unwrap(); 79 | if let Some(result_value) = top_level.get("result") { 80 | if let Ok(reth_response) = 81 | serde_json::from_value::(result_value.clone()) 82 | { 83 | if let Some(reth_payload_id) = reth_response.payload_id { 84 | let mut ress_top_level: Value = serde_json::from_slice(&ress_body_bytes).unwrap(); 85 | if let Some(result_obj) = ress_top_level.get_mut("result") { 86 | if let Some(result_map) = result_obj.as_object_mut() { 87 | result_map.insert( 88 | "payloadId".to_string(), 89 | serde_json::Value::String(reth_payload_id.to_string()), 90 | ); 91 | } 92 | } 93 | let new_body = serde_json::to_vec(&ress_top_level).unwrap(); 94 | ress_parts.headers.remove(hyper::header::CONTENT_LENGTH); 95 | ress_parts.headers.insert( 96 | hyper::header::CONTENT_LENGTH, 97 | new_body.len().to_string().parse().unwrap(), 98 | ); 99 | info!(target: "adapter", "Sending response from ress"); 100 | return Ok(Response::from_parts(ress_parts, full(new_body))); 101 | } 102 | } 103 | } 104 | info!(target: "adapter", "Sending response from ress"); 105 | Ok(Response::from_parts(ress_parts, full(ress_body_bytes))) 106 | } 107 | 108 | pub(crate) fn full>(chunk: T) -> BoxBody { 109 | Full::new(chunk.into()).map_err(|never| match never {}).boxed() 110 | } 111 | 112 | #[tokio::main] 113 | async fn main() -> Result<(), Box> { 114 | tracing_subscriber::fmt().with_max_level(Level::INFO).init(); 115 | 116 | let auth_addr: SocketAddr = ([0, 0, 0, 0], 8551).into(); 117 | let http_addr: SocketAddr = ([0, 0, 0, 0], 8545).into(); 118 | 119 | let auth_listener = TcpListener::bind(auth_addr).await?; 120 | let http_listener = TcpListener::bind(http_addr).await?; 121 | info!("Listening on http://{} and http://{}", auth_addr, http_addr); 122 | 123 | let auth_task = tokio::spawn(async move { 124 | loop { 125 | let (auth_tcp, _) = auth_listener.accept().await.unwrap(); 126 | let auth_io = TokioIo::new(auth_tcp); 127 | tokio::spawn(async move { 128 | if let Err(err) = http1::Builder::new() 129 | .serve_connection(auth_io, service_fn(|req| forward_request(req, true))) 130 | .await 131 | { 132 | error!("Error serving auth connection: {:?}", err); 133 | } 134 | }); 135 | } 136 | }); 137 | let http_task = tokio::spawn(async move { 138 | loop { 139 | let (http_tcp, _) = http_listener.accept().await.unwrap(); 140 | let http_io = TokioIo::new(http_tcp); 141 | tokio::spawn(async move { 142 | if let Err(err) = http1::Builder::new() 143 | .serve_connection(http_io, service_fn(|req| forward_request(req, false))) 144 | .await 145 | { 146 | error!("Error serving http connection: {:?}", err); 147 | } 148 | }); 149 | } 150 | }); 151 | tokio::try_join!(auth_task, http_task)?; 152 | Ok(()) 153 | } 154 | -------------------------------------------------------------------------------- /bin/ress/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ress" 3 | version.workspace = true 4 | edition.workspace = true 5 | rust-version.workspace = true 6 | license.workspace = true 7 | homepage.workspace = true 8 | repository.workspace = true 9 | 10 | [lints] 11 | workspace = true 12 | 13 | [dependencies] 14 | # ress 15 | ress-provider.workspace = true 16 | ress-network.workspace = true 17 | ress-engine.workspace = true 18 | ress-testing.workspace = true 19 | 20 | # alloy 21 | alloy-primitives.workspace = true 22 | alloy-eips.workspace = true 23 | alloy-rpc-types-engine.workspace = true 24 | alloy-rpc-types-eth.workspace = true 25 | alloy-network.workspace = true 26 | alloy-serde.workspace = true 27 | 28 | # reth 29 | reth-ress-protocol.workspace = true 30 | reth-ethereum-primitives.workspace = true 31 | reth-chainspec.workspace = true 32 | reth-network.workspace = true 33 | reth-network-peers.workspace = true 34 | reth-discv4.workspace = true 35 | reth-db-api.workspace = true 36 | reth-storage-api.workspace = true 37 | reth-node-api.workspace = true 38 | reth-node-core.workspace = true 39 | reth-node-builder.workspace = true 40 | reth-node-ethereum.workspace = true 41 | reth-node-events.workspace = true 42 | reth-node-metrics.workspace = true 43 | reth-engine-tree.workspace = true 44 | reth-rpc-eth-types.workspace = true 45 | reth-rpc-engine-api.workspace = true 46 | reth-rpc-api.workspace = true 47 | reth-rpc-builder.workspace = true 48 | reth-rpc.workspace = true 49 | reth-consensus-debug-client.workspace = true 50 | reth-payload-builder.workspace = true 51 | reth-transaction-pool.workspace = true 52 | reth-cli.workspace = true 53 | reth-cli-util.workspace = true 54 | reth-ethereum-cli.workspace = true 55 | reth-tasks.workspace = true 56 | 57 | # misc 58 | clap.workspace = true 59 | eyre.workspace = true 60 | tokio = { workspace = true, features = ["rt-multi-thread"] } 61 | tokio-stream.workspace = true 62 | tracing.workspace = true 63 | tracing-subscriber.workspace = true 64 | futures.workspace = true 65 | metrics.workspace = true 66 | dirs-next = "2.0" 67 | shellexpand = "3.0" 68 | http = "1.0" 69 | tower = "0.4" 70 | jsonrpsee = "0.24" 71 | jsonrpsee-server = "0.24" 72 | async-trait = "0.1.68" 73 | 74 | [build-dependencies] 75 | vergen = { version = "9.0", features = ["build"] } 76 | vergen-git2 = "1.0" 77 | -------------------------------------------------------------------------------- /bin/ress/build.rs: -------------------------------------------------------------------------------- 1 | #![allow(missing_docs)] 2 | 3 | use std::{env, error::Error}; 4 | use vergen::{BuildBuilder, CargoBuilder, Emitter}; 5 | use vergen_git2::Git2Builder; 6 | 7 | fn main() -> Result<(), Box> { 8 | let mut emitter = Emitter::default(); 9 | 10 | let build_builder = BuildBuilder::default().build_timestamp(true).build()?; 11 | 12 | emitter.add_instructions(&build_builder)?; 13 | 14 | let cargo_builder = CargoBuilder::default().features(true).target_triple(true).build()?; 15 | 16 | emitter.add_instructions(&cargo_builder)?; 17 | 18 | let git_builder = 19 | Git2Builder::default().describe(false, true, None).dirty(true).sha(false).build()?; 20 | 21 | emitter.add_instructions(&git_builder)?; 22 | 23 | emitter.emit_and_set()?; 24 | let sha = env::var("VERGEN_GIT_SHA")?; 25 | 26 | // Set short SHA 27 | println!("cargo:rustc-env=VERGEN_GIT_SHA_SHORT={}", &sha[..8]); 28 | 29 | Ok(()) 30 | } 31 | -------------------------------------------------------------------------------- /bin/ress/src/cli.rs: -------------------------------------------------------------------------------- 1 | use alloy_rpc_types_engine::{JwtError, JwtSecret}; 2 | use clap::{Args, Parser}; 3 | use reth_chainspec::{Chain, ChainSpec}; 4 | use reth_cli::chainspec::ChainSpecParser; 5 | use reth_cli_util::parse_socket_address; 6 | use reth_discv4::{DEFAULT_DISCOVERY_ADDR, DEFAULT_DISCOVERY_PORT}; 7 | use reth_ethereum_cli::chainspec::EthereumChainSpecParser; 8 | use reth_network_peers::TrustedPeer; 9 | use reth_node_core::{ 10 | dirs::{ChainPath, PlatformPath, XdgPath}, 11 | utils::get_or_create_jwt_secret_from_path, 12 | }; 13 | use reth_rpc_builder::constants::DEFAULT_AUTH_PORT; 14 | use std::{ 15 | env::VarError, 16 | fmt, 17 | net::{IpAddr, Ipv4Addr, SocketAddr}, 18 | path::PathBuf, 19 | str::FromStr, 20 | sync::Arc, 21 | }; 22 | 23 | /// Ress CLI interface. 24 | #[derive(Clone, Debug, Parser)] 25 | #[command(author, version, about = "Ress", long_about = None)] 26 | pub struct RessArgs { 27 | /// The chain this node is running. 28 | /// 29 | /// Possible values are either a built-in chain or the path to a chain specification file. 30 | #[arg( 31 | long, 32 | value_name = "CHAIN_OR_PATH", 33 | long_help = EthereumChainSpecParser::help_message(), 34 | default_value = EthereumChainSpecParser::SUPPORTED_CHAINS[0], 35 | value_parser = EthereumChainSpecParser::parser() 36 | )] 37 | pub chain: Arc, 38 | 39 | /// The path to the data dir for all ress files and subdirectories. 40 | /// 41 | /// Defaults to the OS-specific data directory: 42 | /// 43 | /// - Linux: `$XDG_DATA_HOME/ress/` or `$HOME/.local/share/ress/` 44 | /// - Windows: `{FOLDERID_RoamingAppData}/ress/` 45 | /// - macOS: `$HOME/Library/Application Support/ress/` 46 | #[arg(long, value_name = "DATA_DIR", verbatim_doc_comment, default_value_t)] 47 | pub datadir: MaybePlatformPath, 48 | 49 | /// Network args. 50 | #[clap(flatten)] 51 | pub network: RessNetworkArgs, 52 | 53 | /// RPC args. 54 | #[clap(flatten)] 55 | pub rpc: RessRpcArgs, 56 | 57 | /// Debug args. 58 | #[clap(flatten)] 59 | pub debug: DebugArgs, 60 | 61 | /// Enable Prometheus metrics. 62 | /// 63 | /// The metrics will be served at the given interface and port. 64 | #[arg(long, value_name = "SOCKET", value_parser = parse_socket_address, help_heading = "Metrics")] 65 | pub metrics: Option, 66 | } 67 | 68 | /// Ress networking args. 69 | #[derive(Clone, Debug, Args)] 70 | pub struct RessNetworkArgs { 71 | /// Network listening address 72 | #[arg(long = "addr", value_name = "ADDR", default_value_t = DEFAULT_DISCOVERY_ADDR)] 73 | pub addr: IpAddr, 74 | 75 | /// Network listening port 76 | #[arg(long = "port", value_name = "PORT", default_value_t = DEFAULT_DISCOVERY_PORT)] 77 | pub port: u16, 78 | 79 | /// Secret key to use for this node. 80 | /// 81 | /// This will also deterministically set the peer ID. If not specified, it will be set in the 82 | /// data dir for the chain being used. 83 | #[arg(long, value_name = "PATH")] 84 | pub p2p_secret_key: Option, 85 | 86 | /// Maximum active connections for `ress` subprotocol. 87 | #[arg(long, default_value_t = 256)] 88 | pub max_active_connections: u64, 89 | 90 | #[allow(clippy::doc_markdown)] 91 | /// Comma separated enode URLs of trusted peers for P2P connections. 92 | /// 93 | /// --remote-peer enode://abcd@192.168.0.1:30303 94 | #[arg(long, value_delimiter = ',')] 95 | pub trusted_peers: Vec, 96 | } 97 | 98 | impl RessNetworkArgs { 99 | /// Returns network socket address. 100 | pub fn listener_addr(&self) -> SocketAddr { 101 | SocketAddr::new(self.addr, self.port) 102 | } 103 | 104 | /// Returns path to network secret. 105 | pub fn network_secret_path(&self, data_dir: &ChainPath) -> PathBuf { 106 | self.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()) 107 | } 108 | } 109 | 110 | /// Ress RPC args. 111 | #[derive(Clone, Debug, Args)] 112 | pub struct RessRpcArgs { 113 | /// Auth server address to listen on 114 | #[arg(long = "authrpc.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))] 115 | pub auth_addr: IpAddr, 116 | 117 | /// Auth server port to listen on 118 | #[arg(long = "authrpc.port", default_value_t = DEFAULT_AUTH_PORT)] 119 | pub auth_port: u16, 120 | 121 | /// Path to a JWT secret to use for the authenticated engine-API RPC server. 122 | /// 123 | /// This will enforce JWT authentication for all requests coming from the consensus layer. 124 | /// 125 | /// If no path is provided, a secret will be generated and stored in the datadir under 126 | /// `//jwt.hex`. For mainnet this would be `~/.ress/mainnet/jwt.hex` by default. 127 | #[arg(long = "authrpc.jwtsecret", value_name = "PATH", global = true, required = false)] 128 | pub auth_jwtsecret: Option, 129 | } 130 | 131 | impl RessRpcArgs { 132 | /// Returns auth RPC socker address. 133 | pub fn auth_rpc_addr(&self) -> SocketAddr { 134 | SocketAddr::new(self.auth_addr, self.auth_port) 135 | } 136 | 137 | /// Reads and returns JWT secret at user provider path _or_ 138 | /// reads or creates and returns JWT secret at default path. 139 | pub fn auth_jwt_secret(&self, default_jwt_path: PathBuf) -> Result { 140 | match self.auth_jwtsecret.as_ref() { 141 | Some(fpath) => { 142 | tracing::debug!(target: "ress::cli", user_path=?fpath, "Reading JWT auth secret file"); 143 | JwtSecret::from_file(fpath) 144 | } 145 | None => get_or_create_jwt_secret_from_path(&default_jwt_path), 146 | } 147 | } 148 | } 149 | 150 | /// Ress debug args. 151 | #[derive(Clone, Debug, Args)] 152 | pub struct DebugArgs { 153 | /// Url for debug consensus client. 154 | #[arg(long = "debug.debug-consensus-url")] 155 | pub debug_consensus_url: Option, 156 | 157 | /// Url for RPC adapter. 158 | #[arg(long = "debug.rpc-network-adapter-url")] 159 | pub rpc_network_adapter_url: Option, 160 | } 161 | 162 | /// Returns the path to the ress data dir. 163 | /// 164 | /// The data dir should contain a subdirectory for each chain, and those chain directories will 165 | /// include all information for that chain, such as the p2p secret. 166 | #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] 167 | #[non_exhaustive] 168 | pub struct DataDirPath; 169 | 170 | impl XdgPath for DataDirPath { 171 | fn resolve() -> Option { 172 | data_dir() 173 | } 174 | } 175 | 176 | /// Returns the path to the ress data directory. 177 | /// 178 | /// Refer to [`dirs_next::data_dir`] for cross-platform behavior. 179 | pub fn data_dir() -> Option { 180 | dirs_next::data_dir().map(|root| root.join("ress")) 181 | } 182 | 183 | /// Returns the path to the ress database. 184 | /// 185 | /// Refer to [`dirs_next::data_dir`] for cross-platform behavior. 186 | pub fn database_path() -> Option { 187 | data_dir().map(|root| root.join("db")) 188 | } 189 | 190 | /// An Optional wrapper type around [`PlatformPath`]. 191 | /// 192 | /// This is useful for when a path is optional, such as the `--data-dir` flag. 193 | #[derive(Clone, Debug, PartialEq, Eq)] 194 | pub struct MaybePlatformPath(Option>); 195 | 196 | // === impl MaybePlatformPath === 197 | 198 | impl MaybePlatformPath { 199 | /// Returns the path if it is set, otherwise returns the default path for the given chain. 200 | pub fn unwrap_or_chain_default(&self, chain: Chain) -> ChainPath { 201 | ChainPath::new( 202 | self.0.clone().unwrap_or_else(|| PlatformPath::default().join(chain.to_string())), 203 | chain, 204 | Default::default(), 205 | ) 206 | } 207 | } 208 | 209 | impl fmt::Display for MaybePlatformPath { 210 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 211 | if let Some(path) = &self.0 { 212 | path.fmt(f) 213 | } else { 214 | // NOTE: this is a workaround for making it work with clap's `default_value_t` which 215 | // computes the default value via `Default -> Display -> FromStr` 216 | write!(f, "default") 217 | } 218 | } 219 | } 220 | 221 | impl Default for MaybePlatformPath { 222 | fn default() -> Self { 223 | Self(None) 224 | } 225 | } 226 | 227 | impl FromStr for MaybePlatformPath { 228 | type Err = shellexpand::LookupError; 229 | 230 | fn from_str(s: &str) -> Result { 231 | let p = match s { 232 | "default" => { 233 | // NOTE: this is a workaround for making it work with clap's `default_value_t` which 234 | // computes the default value via `Default -> Display -> FromStr` 235 | None 236 | } 237 | _ => Some(PlatformPath::from_str(s)?), 238 | }; 239 | Ok(Self(p)) 240 | } 241 | } 242 | 243 | // impl From for MaybePlatformPath { 244 | // fn from(path: PathBuf) -> Self { 245 | // Self(Some(PlatformPath(path, std::marker::PhantomData))) 246 | // } 247 | // } 248 | -------------------------------------------------------------------------------- /bin/ress/src/launch.rs: -------------------------------------------------------------------------------- 1 | use alloy_network::Ethereum; 2 | use alloy_primitives::keccak256; 3 | use alloy_rpc_types_engine::{ClientCode, ClientVersionV1, JwtSecret}; 4 | use futures::StreamExt; 5 | use http::{header::CONTENT_TYPE, HeaderValue, Response}; 6 | use ress_engine::engine::ConsensusEngine; 7 | use ress_network::{RessNetworkHandle, RessNetworkManager}; 8 | use ress_provider::{RessDatabase, RessProvider}; 9 | use ress_testing::rpc_adapter::RpcNetworkAdapter; 10 | use reth_chainspec::ChainSpec; 11 | use reth_consensus_debug_client::{DebugConsensusClient, RpcBlockProvider}; 12 | use reth_db_api::database_metrics::DatabaseMetrics; 13 | use reth_ethereum_primitives::EthPrimitives; 14 | use reth_network::{ 15 | config::SecretKey, protocol::IntoRlpxSubProtocol, EthNetworkPrimitives, NetworkConfig, 16 | NetworkInfo, NetworkManager, PeersInfo, 17 | }; 18 | use reth_network_peers::TrustedPeer; 19 | use reth_node_api::BeaconConsensusEngineHandle; 20 | use reth_node_core::primitives::{Bytecode, RecoveredBlock, SealedBlock}; 21 | use reth_node_ethereum::{ 22 | consensus::EthBeaconConsensus, node::EthereumEngineValidator, EthEngineTypes, 23 | }; 24 | use reth_node_events::node::handle_events; 25 | use reth_node_metrics::recorder::install_prometheus_recorder; 26 | use reth_payload_builder::{noop::NoopPayloadBuilderService, PayloadStore}; 27 | use reth_ress_protocol::{NodeType, ProtocolState, RessProtocolHandler, RessProtocolProvider}; 28 | use reth_rpc_api::EngineEthApiServer; 29 | use reth_rpc_builder::auth::{AuthRpcModule, AuthServerConfig, AuthServerHandle}; 30 | use reth_rpc_engine_api::{capabilities::EngineCapabilities, EngineApi}; 31 | use reth_storage_api::noop::NoopProvider; 32 | use reth_tasks::TokioTaskExecutor; 33 | use reth_transaction_pool::noop::NoopTransactionPool; 34 | use std::{convert::Infallible, net::SocketAddr, sync::Arc}; 35 | use tokio::sync::mpsc; 36 | use tokio_stream::wrappers::UnboundedReceiverStream; 37 | use tracing::*; 38 | 39 | use crate::{cli::RessArgs, rpc::RessEthRpc}; 40 | 41 | /// The human readable name of the client 42 | pub const NAME_CLIENT: &str = "Ress"; 43 | 44 | /// The latest version from Cargo.toml. 45 | pub const CARGO_PKG_VERSION: &str = env!("CARGO_PKG_VERSION"); 46 | 47 | /// The 8 character short SHA of the latest commit. 48 | pub const VERGEN_GIT_SHA: &str = env!("VERGEN_GIT_SHA_SHORT"); 49 | 50 | /// Ress node launcher 51 | #[derive(Debug)] 52 | pub struct NodeLauncher { 53 | /// Ress configuration. 54 | args: RessArgs, 55 | } 56 | 57 | impl NodeLauncher { 58 | /// Create new node launcher 59 | pub fn new(args: RessArgs) -> Self { 60 | Self { args } 61 | } 62 | } 63 | 64 | impl NodeLauncher { 65 | /// Launch ress node. 66 | pub async fn launch(self) -> eyre::Result<()> { 67 | let data_dir = self.args.datadir.unwrap_or_chain_default(self.args.chain.chain()); 68 | 69 | // Open database. 70 | let db_path = data_dir.db(); 71 | debug!(target: "ress", path = %db_path.display(), "Opening database"); 72 | let database = RessDatabase::new(&db_path)?; 73 | info!(target: "ress", path = %db_path.display(), "Database opened"); 74 | let provider = RessProvider::new(self.args.chain.clone(), database.clone()); 75 | 76 | // Install the recorder to ensure that upkeep is run periodically and 77 | // start the metrics server. 78 | install_prometheus_recorder().spawn_upkeep(); 79 | if let Some(addr) = self.args.metrics { 80 | info!(target: "ress", ?addr, "Starting metrics endpoint"); 81 | self.start_prometheus_server(addr, database).await?; 82 | } 83 | 84 | // Insert genesis block. 85 | let genesis_hash = self.args.chain.genesis_hash(); 86 | let genesis_header = self.args.chain.genesis_header().clone(); 87 | provider.insert_block( 88 | RecoveredBlock::new_sealed( 89 | SealedBlock::from_parts_unchecked(genesis_header, Default::default(), genesis_hash), 90 | Vec::new(), 91 | ), 92 | None, 93 | ); 94 | provider.insert_canonical_hash(0, genesis_hash); 95 | info!(target: "ress", %genesis_hash, "Inserted genesis block"); 96 | for account in self.args.chain.genesis().alloc.values() { 97 | if let Some(code) = account.code.clone() { 98 | let code_hash = keccak256(&code); 99 | provider.insert_bytecode(code_hash, Bytecode::new_raw(code))?; 100 | } 101 | } 102 | info!(target: "ress", %genesis_hash, "Inserted genesis bytecodes"); 103 | 104 | // Launch network. 105 | let network_secret_path = self.args.network.network_secret_path(&data_dir); 106 | let network_secret = reth_cli_util::get_secret_key(&network_secret_path)?; 107 | 108 | let network_handle = self 109 | .launch_network( 110 | provider.clone(), 111 | network_secret, 112 | self.args.network.max_active_connections, 113 | self.args.network.trusted_peers.clone(), 114 | ) 115 | .await?; 116 | info!(target: "ress", peer_id = %network_handle.inner().peer_id(), addr = %network_handle.inner().local_addr(), enode = %network_handle.inner().local_node_record().to_string(), "Network launched"); 117 | 118 | // Spawn consensus engine. 119 | let (to_engine, from_auth_rpc) = mpsc::unbounded_channel(); 120 | let engine_validator = EthereumEngineValidator::new(self.args.chain.clone()); 121 | let (engine_events_tx, engine_events_rx) = mpsc::unbounded_channel(); 122 | let consensus_engine = ConsensusEngine::new( 123 | provider.clone(), 124 | EthBeaconConsensus::new(self.args.chain.clone()), 125 | engine_validator.clone(), 126 | network_handle.clone(), 127 | from_auth_rpc, 128 | engine_events_tx, 129 | ); 130 | let _consensus_engine_handle = tokio::spawn(consensus_engine); 131 | info!(target: "ress", "Consensus engine spawned"); 132 | 133 | // Start auth RPC server. 134 | let jwt_key = self.args.rpc.auth_jwt_secret(data_dir.jwt())?; 135 | let beacon_consensus_engine_handle = 136 | BeaconConsensusEngineHandle::::new(to_engine); 137 | let auth_server_handle = self 138 | .start_auth_server( 139 | jwt_key, 140 | provider, 141 | engine_validator, 142 | beacon_consensus_engine_handle.clone(), 143 | ) 144 | .await?; 145 | info!(target: "ress", addr = %auth_server_handle.local_addr(), "Auth RPC server started"); 146 | 147 | // Start debug consensus. 148 | if let Some(url) = self.args.debug.debug_consensus_url { 149 | let rpc_to_primitive_block = |rpc_block: alloy_rpc_types_eth::Block| { 150 | let alloy_rpc_types_eth::Block { header, transactions, withdrawals, .. } = 151 | rpc_block; 152 | reth_ethereum_primitives::Block { 153 | header: header.inner, 154 | body: reth_ethereum_primitives::BlockBody { 155 | transactions: transactions 156 | .into_transactions() 157 | .map(|tx| tx.inner.into_inner().into()) 158 | .collect(), 159 | ommers: Default::default(), 160 | withdrawals, 161 | }, 162 | } 163 | }; 164 | let provider = Arc::new( 165 | RpcBlockProvider::::new( 166 | &url, 167 | rpc_to_primitive_block, 168 | ) 169 | .await?, 170 | ); 171 | tokio::spawn(DebugConsensusClient::new(beacon_consensus_engine_handle, provider).run()); 172 | info!(target: "ress", %url, "Debug consensus started"); 173 | } 174 | 175 | handle_events::<_, EthPrimitives>( 176 | Some(Box::new(network_handle.inner().clone())), 177 | None, 178 | UnboundedReceiverStream::from(engine_events_rx).map(Into::into), 179 | ) 180 | .await; 181 | 182 | Ok(()) 183 | } 184 | 185 | async fn launch_network

( 186 | &self, 187 | protocol_provider: P, 188 | secret_key: SecretKey, 189 | max_active_connections: u64, 190 | trusted_peers: Vec, 191 | ) -> eyre::Result 192 | where 193 | P: RessProtocolProvider + Clone + Unpin + 'static, 194 | { 195 | // Configure and instantiate the network 196 | let config = NetworkConfig::builder(secret_key) 197 | .listener_addr(self.args.network.listener_addr()) 198 | .disable_discovery() 199 | .build_with_noop_provider(self.args.chain.clone()); 200 | let mut manager = NetworkManager::::new(config).await?; 201 | 202 | let (events_sender, protocol_events) = mpsc::unbounded_channel(); 203 | let protocol_handler = RessProtocolHandler { 204 | provider: protocol_provider, 205 | node_type: NodeType::Stateless, 206 | peers_handle: manager.peers_handle(), 207 | max_active_connections, 208 | state: ProtocolState { events_sender, active_connections: Arc::default() }, 209 | }; 210 | manager.add_rlpx_sub_protocol(protocol_handler.into_rlpx_sub_protocol()); 211 | 212 | for trusted_peer in trusted_peers { 213 | let trusted_peer_addr = trusted_peer.resolve_blocking()?.tcp_addr(); 214 | manager.peers_handle().add_peer(trusted_peer.id, trusted_peer_addr); 215 | } 216 | 217 | // get a handle to the network to interact with it 218 | let network_handle = manager.handle().clone(); 219 | // spawn the network 220 | tokio::spawn(manager); 221 | 222 | let (peer_requests_tx, peer_requests_rx) = mpsc::unbounded_channel(); 223 | let peer_request_stream = UnboundedReceiverStream::from(peer_requests_rx); 224 | if let Some(rpc_url) = self.args.debug.rpc_network_adapter_url.clone() { 225 | info!(target: "ress", url = %rpc_url, "Using RPC network adapter"); 226 | tokio::spawn(RpcNetworkAdapter::new(&rpc_url).await?.run(peer_request_stream)); 227 | } else { 228 | // spawn ress network manager 229 | tokio::spawn(RessNetworkManager::new( 230 | UnboundedReceiverStream::from(protocol_events), 231 | peer_request_stream, 232 | )); 233 | } 234 | 235 | Ok(RessNetworkHandle::new(network_handle, peer_requests_tx)) 236 | } 237 | 238 | async fn start_auth_server( 239 | &self, 240 | jwt_key: JwtSecret, 241 | provider: RessProvider, 242 | engine_validator: EthereumEngineValidator, 243 | beacon_engine_handle: BeaconConsensusEngineHandle, 244 | ) -> eyre::Result { 245 | let (_, payload_builder_handle) = NoopPayloadBuilderService::::new(); 246 | let client_version = ClientVersionV1 { 247 | code: ClientCode::RH, 248 | name: NAME_CLIENT.to_string(), 249 | version: CARGO_PKG_VERSION.to_string(), 250 | commit: VERGEN_GIT_SHA.to_string(), 251 | }; 252 | let engine_api = EngineApi::new( 253 | NoopProvider::::new(self.args.chain.clone()), 254 | self.args.chain.clone(), 255 | beacon_engine_handle, 256 | PayloadStore::new(payload_builder_handle), 257 | NoopTransactionPool::default(), 258 | Box::::default(), 259 | client_version, 260 | EngineCapabilities::default(), 261 | engine_validator, 262 | false, 263 | ); 264 | let auth_socket = self.args.rpc.auth_rpc_addr(); 265 | let config = AuthServerConfig::builder(jwt_key).socket_addr(auth_socket).build(); 266 | 267 | let mut module = AuthRpcModule::new(engine_api); 268 | module.merge_auth_methods(RessEthRpc::new(provider).into_rpc())?; 269 | Ok(module.start_server(config).await?) 270 | } 271 | 272 | /// This launches the prometheus server. 273 | pub async fn start_prometheus_server( 274 | &self, 275 | addr: SocketAddr, 276 | database: RessDatabase, 277 | ) -> eyre::Result<()> { 278 | // Register version. 279 | let _gauge = metrics::gauge!("info", &[("version", env!("CARGO_PKG_VERSION"))]); 280 | 281 | let listener = tokio::net::TcpListener::bind(addr).await?; 282 | tokio::spawn(async move { 283 | loop { 284 | let io = match listener.accept().await { 285 | Ok((stream, _remote_addr)) => stream, 286 | Err(error) => { 287 | tracing::error!(target: "ress", %error, "failed to accept connection"); 288 | continue; 289 | } 290 | }; 291 | 292 | let database_ = database.clone(); 293 | let handle = install_prometheus_recorder(); 294 | let service = tower::service_fn(move |_| { 295 | database_.report_metrics(); 296 | let metrics = handle.handle().render(); 297 | let mut response = Response::new(metrics); 298 | let content_type = HeaderValue::from_static("text/plain"); 299 | response.headers_mut().insert(CONTENT_TYPE, content_type); 300 | async move { Ok::<_, Infallible>(response) } 301 | }); 302 | 303 | tokio::spawn(async move { 304 | let _ = jsonrpsee_server::serve(io, service).await.inspect_err( 305 | |error| tracing::debug!(target: "ress", %error, "failed to serve request"), 306 | ); 307 | }); 308 | } 309 | }); 310 | Ok(()) 311 | } 312 | } 313 | -------------------------------------------------------------------------------- /bin/ress/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Stateless Ethereum execution layer node based on reth client. 2 | 3 | /// Ress CLI arguments. 4 | pub mod cli; 5 | 6 | /// Node launcher. 7 | pub mod launch; 8 | 9 | /// Eth RPC implementation. 10 | pub mod rpc; 11 | -------------------------------------------------------------------------------- /bin/ress/src/main.rs: -------------------------------------------------------------------------------- 1 | //! Main ress executable. 2 | 3 | use clap::Parser; 4 | use ress::{cli::RessArgs, launch::NodeLauncher}; 5 | use tracing::level_filters::LevelFilter; 6 | use tracing_subscriber::EnvFilter; 7 | 8 | #[tokio::main] 9 | async fn main() -> eyre::Result<()> { 10 | let orig_hook = std::panic::take_hook(); 11 | std::panic::set_hook(Box::new(move |panic_info| { 12 | orig_hook(panic_info); 13 | std::process::exit(1); 14 | })); 15 | 16 | tracing_subscriber::fmt() 17 | .with_env_filter( 18 | EnvFilter::builder().with_default_directive(LevelFilter::INFO.into()).from_env_lossy(), 19 | ) 20 | .init(); 21 | 22 | NodeLauncher::new(RessArgs::parse()).launch().await?; 23 | Ok(()) 24 | } 25 | -------------------------------------------------------------------------------- /bin/ress/src/rpc.rs: -------------------------------------------------------------------------------- 1 | use alloy_eips::{BlockId, BlockNumberOrTag}; 2 | use alloy_network::Ethereum; 3 | use alloy_primitives::{Address, Bytes, B256, U256, U64}; 4 | use alloy_rpc_types_eth::{ 5 | state::StateOverride, BlockOverrides, EIP1186AccountProofResponse, Filter, Log, SyncStatus, 6 | TransactionRequest, 7 | }; 8 | use alloy_serde::JsonStorageKey; 9 | use jsonrpsee::core::RpcResult as Result; 10 | use ress_provider::RessProvider; 11 | use reth_rpc_api::{ 12 | eth::{RpcBlock, RpcReceipt}, 13 | EngineEthApiServer, 14 | }; 15 | use reth_rpc_eth_types::EthApiError; 16 | 17 | /// Implementation of minimal eth RPC interface for Engine API. 18 | #[derive(Debug)] 19 | pub struct RessEthRpc(RessProvider); 20 | 21 | impl RessEthRpc { 22 | /// Creates new ress RPC provider. 23 | pub fn new(provider: RessProvider) -> Self { 24 | Self(provider) 25 | } 26 | } 27 | 28 | /// Minimal eth RPC interface for Engine API. 29 | /// Ref: 30 | #[async_trait::async_trait] 31 | impl EngineEthApiServer, RpcReceipt> for RessEthRpc { 32 | /// Handler for: `eth_syncing` 33 | fn syncing(&self) -> Result { 34 | Ok(SyncStatus::None) 35 | } 36 | 37 | /// Handler for: `eth_chainId` 38 | async fn chain_id(&self) -> Result> { 39 | Ok(Some(U64::from(self.0.chain_spec().chain.id()))) 40 | } 41 | 42 | /// Handler for: `eth_blockNumber` 43 | fn block_number(&self) -> Result { 44 | Err(EthApiError::Unsupported("method not supported").into()) 45 | } 46 | 47 | /// Handler for: `eth_call` 48 | async fn call( 49 | &self, 50 | _request: TransactionRequest, 51 | _block_id: Option, 52 | _state_overrides: Option, 53 | _block_overrides: Option>, 54 | ) -> Result { 55 | Err(EthApiError::Unsupported("method not supported").into()) 56 | } 57 | 58 | /// Handler for: `eth_getCode` 59 | async fn get_code(&self, _address: Address, _block_id: Option) -> Result { 60 | Err(EthApiError::Unsupported("method not supported").into()) 61 | } 62 | 63 | /// Handler for: `eth_getBlockByHash` 64 | async fn block_by_hash(&self, _hash: B256, _full: bool) -> Result>> { 65 | Err(EthApiError::Unsupported("method not supported").into()) 66 | } 67 | 68 | /// Handler for: `eth_getBlockByNumber` 69 | async fn block_by_number( 70 | &self, 71 | _number: BlockNumberOrTag, 72 | _full: bool, 73 | ) -> Result>> { 74 | Err(EthApiError::Unsupported("method not supported").into()) 75 | } 76 | 77 | async fn block_receipts( 78 | &self, 79 | _block_id: BlockId, 80 | ) -> Result>>> { 81 | Err(EthApiError::Unsupported("method not supported").into()) 82 | } 83 | 84 | /// Handler for: `eth_sendRawTransaction` 85 | async fn send_raw_transaction(&self, _bytes: Bytes) -> Result { 86 | Err(EthApiError::Unsupported("method not supported").into()) 87 | } 88 | 89 | async fn transaction_receipt(&self, _hash: B256) -> Result>> { 90 | Err(EthApiError::Unsupported("method not supported").into()) 91 | } 92 | 93 | /// Handler for `eth_getLogs` 94 | async fn logs(&self, _filter: Filter) -> Result> { 95 | Err(EthApiError::Unsupported("method not supported").into()) 96 | } 97 | 98 | /// Handler for `eth_getProof` 99 | async fn get_proof( 100 | &self, 101 | _address: Address, 102 | _keys: Vec, 103 | _block_number: Option, 104 | ) -> Result { 105 | Err(EthApiError::Unsupported("method not supported").into()) 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /bin/reth/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "reth" 3 | version.workspace = true 4 | edition.workspace = true 5 | rust-version.workspace = true 6 | license.workspace = true 7 | homepage.workspace = true 8 | repository.workspace = true 9 | 10 | [lints] 11 | workspace = true 12 | 13 | [dependencies] 14 | # reth 15 | reth.workspace = true 16 | reth-node-builder.workspace = true 17 | reth-node-ethereum.workspace = true 18 | 19 | # misc 20 | clap.workspace = true 21 | eyre.workspace = true 22 | -------------------------------------------------------------------------------- /bin/reth/src/main.rs: -------------------------------------------------------------------------------- 1 | //! Reth node that supports ress subprotocol. 2 | 3 | use clap::Parser; 4 | use reth::{args::RessArgs, chainspec::EthereumChainSpecParser}; 5 | use reth_node_builder::NodeHandle; 6 | 7 | fn main() -> eyre::Result<()> { 8 | reth::cli::Cli::::parse().run( 9 | |builder, ress_args| async move { 10 | // launch the stateful node 11 | let NodeHandle { node, node_exit_future } = 12 | builder.node(reth_node_ethereum::EthereumNode::default()).launch().await?; 13 | 14 | // Install ress subprotocol. 15 | if ress_args.enabled { 16 | reth::ress::install_ress_subprotocol( 17 | ress_args, 18 | node.provider, 19 | node.block_executor, 20 | node.network, 21 | node.task_executor, 22 | node.add_ons_handle.engine_events.new_listener(), 23 | )?; 24 | } 25 | 26 | node_exit_future.await 27 | }, 28 | ) 29 | } 30 | -------------------------------------------------------------------------------- /crates/engine/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ress-engine" 3 | version.workspace = true 4 | edition.workspace = true 5 | rust-version.workspace = true 6 | license.workspace = true 7 | homepage.workspace = true 8 | repository.workspace = true 9 | 10 | [lints] 11 | workspace = true 12 | 13 | [dependencies] 14 | # ress 15 | ress-primitives.workspace = true 16 | ress-provider.workspace = true 17 | ress-evm.workspace = true 18 | ress-network.workspace = true 19 | 20 | # reth 21 | reth-ress-protocol.workspace = true 22 | reth-primitives.workspace = true 23 | reth-primitives-traits.workspace = true 24 | reth-chainspec.workspace = true 25 | reth-errors.workspace = true 26 | reth-provider.workspace = true 27 | reth-consensus.workspace = true 28 | reth-trie.workspace = true 29 | reth-trie-sparse.workspace = true 30 | reth-engine-tree.workspace = true 31 | reth-chain-state.workspace = true 32 | reth-ethereum-engine-primitives.workspace = true 33 | reth-node-api.workspace = true 34 | reth-node-ethereum.workspace = true 35 | reth-metrics.workspace = true 36 | 37 | # alloy 38 | alloy-primitives.workspace = true 39 | alloy-rlp.workspace = true 40 | alloy-eips.workspace = true 41 | alloy-consensus.workspace = true 42 | alloy-rpc-types-engine.workspace = true 43 | 44 | # misc 45 | futures.workspace = true 46 | tokio.workspace = true 47 | tokio-stream.workspace = true 48 | tracing.workspace = true 49 | rayon.workspace = true 50 | itertools.workspace = true 51 | metrics.workspace = true 52 | strum_macros.workspace = true 53 | schnellru.workspace = true 54 | humansize = "2.0" 55 | 56 | [dev-dependencies] 57 | reth-testing-utils.workspace = true 58 | alloy-eips.workspace = true 59 | -------------------------------------------------------------------------------- /crates/engine/src/download/futs.rs: -------------------------------------------------------------------------------- 1 | use alloy_primitives::{Bytes, B256}; 2 | use alloy_rlp::Encodable; 3 | use futures::FutureExt; 4 | use ress_network::{PeerRequestError, RessNetworkHandle}; 5 | use ress_primitives::witness::ExecutionWitness; 6 | use reth_chainspec::ChainSpec; 7 | use reth_consensus::{Consensus, HeaderValidator}; 8 | use reth_node_ethereum::consensus::EthBeaconConsensus; 9 | use reth_primitives::{Block, BlockBody, Bytecode, Header, SealedBlock, SealedHeader}; 10 | use reth_ress_protocol::GetHeaders; 11 | use std::{ 12 | future::Future, 13 | pin::Pin, 14 | task::{ready, Context, Poll}, 15 | time::{Duration, Instant}, 16 | }; 17 | use tracing::*; 18 | 19 | type DownloadFut = Pin> + Send + Sync>>; 20 | 21 | /// A future that downloads a full block from the network. 22 | /// 23 | /// This will attempt to fetch both the header and body for the given block hash at the same time. 24 | /// When both requests succeed, the future will yield the full block. 25 | #[must_use = "futures do nothing unless polled"] 26 | pub struct FetchFullBlockFuture { 27 | network: RessNetworkHandle, 28 | consensus: EthBeaconConsensus, 29 | retry_delay: Duration, 30 | block_hash: B256, 31 | started_at: Instant, 32 | pending_header_request: Option>>, 33 | pending_body_request: Option>>, 34 | header: Option, 35 | body: Option, 36 | } 37 | 38 | impl FetchFullBlockFuture { 39 | /// Create new fetch full block future. 40 | pub fn new( 41 | network: RessNetworkHandle, 42 | consensus: EthBeaconConsensus, 43 | retry_delay: Duration, 44 | block_hash: B256, 45 | ) -> Self { 46 | let mut this = FetchFullBlockFuture { 47 | network, 48 | consensus, 49 | retry_delay, 50 | block_hash, 51 | started_at: Instant::now(), 52 | pending_header_request: None, 53 | pending_body_request: None, 54 | header: None, 55 | body: None, 56 | }; 57 | this.pending_header_request = Some(this.header_request(Duration::default())); 58 | this.pending_body_request = Some(this.body_request(Duration::default())); 59 | this 60 | } 61 | 62 | /// Returns the hash of the block being requested. 63 | pub const fn block_hash(&self) -> B256 { 64 | self.block_hash 65 | } 66 | 67 | /// The duration elapsed since request was started. 68 | pub fn elapsed(&self) -> Duration { 69 | self.started_at.elapsed() 70 | } 71 | 72 | fn header_request(&self, delay: Duration) -> DownloadFut> { 73 | let network = self.network.clone(); 74 | let hash = self.block_hash; 75 | Box::pin(async move { 76 | tokio::time::sleep(delay).await; 77 | let request = GetHeaders { start_hash: hash, limit: 1 }; 78 | network.fetch_headers(request).await.map(|res| res.into_iter().next()) 79 | }) 80 | } 81 | 82 | fn body_request(&self, delay: Duration) -> DownloadFut> { 83 | let network = self.network.clone(); 84 | let hash = self.block_hash; 85 | Box::pin(async move { 86 | tokio::time::sleep(delay).await; 87 | let request = Vec::from([hash]); 88 | network.fetch_block_bodies(request).await.map(|res| res.into_iter().next()) 89 | }) 90 | } 91 | 92 | fn on_header_response(&mut self, response: Result, PeerRequestError>) { 93 | match response { 94 | Ok(Some(header)) => { 95 | let header = SealedHeader::seal_slow(header); 96 | if header.hash() == self.block_hash { 97 | self.header = Some(header); 98 | } else { 99 | trace!(target: "ress::engine::downloader", expected = %self.block_hash, received = %header.hash(), "Received wrong header"); 100 | } 101 | } 102 | Ok(None) => { 103 | trace!(target: "ress::engine::downloader", block_hash = %self.block_hash, "No header received"); 104 | } 105 | Err(error) => { 106 | trace!(target: "ress::engine::downloader", %error, %self.block_hash, "Header download failed"); 107 | } 108 | }; 109 | } 110 | 111 | fn on_body_response(&mut self, response: Result, PeerRequestError>) { 112 | match response { 113 | Ok(Some(body)) => { 114 | self.body = Some(body); 115 | } 116 | Ok(None) => { 117 | trace!(target: "ress::engine::downloader", block_hash = %self.block_hash, "No body received"); 118 | } 119 | Err(error) => { 120 | trace!(target: "ress::engine::downloader", %error, %self.block_hash, "Body download failed"); 121 | } 122 | } 123 | } 124 | } 125 | 126 | impl Future for FetchFullBlockFuture { 127 | type Output = SealedBlock; 128 | 129 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 130 | let this = self.get_mut(); 131 | 132 | loop { 133 | if let Some(fut) = &mut this.pending_header_request { 134 | if let Poll::Ready(response) = fut.poll_unpin(cx) { 135 | this.pending_header_request.take(); 136 | this.on_header_response(response); 137 | if this.header.is_none() { 138 | this.pending_header_request = Some(this.header_request(this.retry_delay)); 139 | continue 140 | } 141 | } 142 | } 143 | 144 | if let Some(fut) = &mut this.pending_body_request { 145 | if let Poll::Ready(response) = fut.poll_unpin(cx) { 146 | this.pending_body_request.take(); 147 | this.on_body_response(response); 148 | if this.body.is_none() { 149 | this.pending_body_request = Some(this.body_request(this.retry_delay)); 150 | continue 151 | } 152 | } 153 | } 154 | 155 | if this.header.is_some() && this.body.is_some() { 156 | let header = this.header.take().unwrap(); 157 | let body = this.body.take().unwrap(); 158 | 159 | // ensure the block is valid, else retry 160 | if let Err(error) = as Consensus>::validate_body_against_header(&this.consensus, &body, &header) { 161 | trace!(target: "ress::engine::downloader", %error, hash = %header.hash(), "Received wrong body"); 162 | this.header = Some(header); 163 | this.pending_body_request = Some(this.body_request(this.retry_delay)); 164 | continue 165 | } 166 | 167 | return Poll::Ready(SealedBlock::from_sealed_parts(header, body)) 168 | } 169 | 170 | return Poll::Pending 171 | } 172 | } 173 | } 174 | 175 | /// A future that downloads headers range. 176 | #[must_use = "futures do nothing unless polled"] 177 | pub struct FetchHeadersRangeFuture { 178 | network: RessNetworkHandle, 179 | consensus: EthBeaconConsensus, 180 | retry_delay: Duration, 181 | request: GetHeaders, 182 | started_at: Instant, 183 | pending: DownloadFut>, 184 | } 185 | 186 | impl FetchHeadersRangeFuture { 187 | /// Create new fetch headers range future. 188 | pub fn new( 189 | network: RessNetworkHandle, 190 | consensus: EthBeaconConsensus, 191 | retry_delay: Duration, 192 | request: GetHeaders, 193 | ) -> Self { 194 | let network_ = network.clone(); 195 | Self { 196 | network, 197 | consensus, 198 | retry_delay, 199 | request, 200 | started_at: Instant::now(), 201 | pending: Box::pin(async move { network_.fetch_headers(request).await }), 202 | } 203 | } 204 | 205 | /// Returns the get headers request. 206 | pub fn request(&self) -> GetHeaders { 207 | self.request 208 | } 209 | 210 | /// The duration elapsed since request was started. 211 | pub fn elapsed(&self) -> Duration { 212 | self.started_at.elapsed() 213 | } 214 | 215 | fn request_headers(&self) -> DownloadFut> { 216 | let network = self.network.clone(); 217 | let request = self.request; 218 | let delay = self.retry_delay; 219 | Box::pin(async move { 220 | tokio::time::sleep(delay).await; 221 | network.fetch_headers(request).await 222 | }) 223 | } 224 | 225 | fn on_response( 226 | &mut self, 227 | response: Result, PeerRequestError>, 228 | ) -> Option> { 229 | let headers = match response { 230 | Ok(headers) => headers, 231 | Err(error) => { 232 | trace!(target: "ress::engine::downloader", %error, ?self.request, "Headers download failed"); 233 | return None 234 | } 235 | }; 236 | 237 | if headers.len() < self.request.limit as usize { 238 | trace!(target: "ress::engine::downloader", len = headers.len(), request = ?self.request, "Invalid headers response length"); 239 | return None 240 | } 241 | 242 | let headers_falling = headers.into_iter().map(SealedHeader::seal_slow).collect::>(); 243 | if headers_falling[0].hash() != self.request.start_hash { 244 | trace!(target: "ress::engine::downloader", expected = %self.request.start_hash, received = %headers_falling[0].hash(), "Invalid start hash"); 245 | return None 246 | } 247 | 248 | let headers_rising = headers_falling.iter().rev().cloned().collect::>(); 249 | // check if the downloaded headers are valid 250 | match self.consensus.validate_header_range(&headers_rising) { 251 | Ok(()) => Some(headers_falling), 252 | Err(error) => { 253 | trace!(target: "ress::engine::downloader", %error, ?self.request, "Received bad header response"); 254 | None 255 | } 256 | } 257 | } 258 | } 259 | 260 | impl Future for FetchHeadersRangeFuture { 261 | type Output = Vec; 262 | 263 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 264 | let this = self.get_mut(); 265 | 266 | loop { 267 | let response = ready!(this.pending.poll_unpin(cx)); 268 | if let Some(headers) = this.on_response(response) { 269 | return Poll::Ready(headers) 270 | } 271 | this.pending = this.request_headers(); 272 | } 273 | } 274 | } 275 | 276 | enum FullBlockWithAncestorsDownloadState { 277 | FullBlock(FetchFullBlockFuture), 278 | Ancestors(SealedBlock, FetchHeadersRangeFuture), 279 | } 280 | 281 | /// A future that downloads full block and the headers of its ancestors. 282 | #[must_use = "futures do nothing unless polled"] 283 | pub struct FetchFullBlockWithAncestorsFuture { 284 | block_hash: B256, 285 | ancestor_count: u64, 286 | state: FullBlockWithAncestorsDownloadState, 287 | started_at: Instant, 288 | } 289 | 290 | impl FetchFullBlockWithAncestorsFuture { 291 | /// Create new fetch full block with ancestors future. 292 | pub fn new( 293 | network: RessNetworkHandle, 294 | consensus: EthBeaconConsensus, 295 | retry_delay: Duration, 296 | block_hash: B256, 297 | ancestor_count: u64, 298 | ) -> Self { 299 | let state = FullBlockWithAncestorsDownloadState::FullBlock(FetchFullBlockFuture::new( 300 | network, 301 | consensus, 302 | retry_delay, 303 | block_hash, 304 | )); 305 | Self { block_hash, ancestor_count, state, started_at: Instant::now() } 306 | } 307 | 308 | /// Returns the hash of the block being requested. 309 | pub const fn block_hash(&self) -> B256 { 310 | self.block_hash 311 | } 312 | 313 | /// The duration elapsed since request was started. 314 | pub fn elapsed(&self) -> Duration { 315 | self.started_at.elapsed() 316 | } 317 | } 318 | 319 | impl Future for FetchFullBlockWithAncestorsFuture { 320 | type Output = (SealedBlock, Vec); 321 | 322 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 323 | let this = self.get_mut(); 324 | loop { 325 | match &mut this.state { 326 | FullBlockWithAncestorsDownloadState::FullBlock(fut) => { 327 | let block = ready!(fut.poll_unpin(cx)); 328 | let ancestors_fut = FetchHeadersRangeFuture::new( 329 | fut.network.clone(), 330 | fut.consensus.clone(), 331 | fut.retry_delay, 332 | GetHeaders { start_hash: block.parent_hash, limit: this.ancestor_count }, 333 | ); 334 | this.state = 335 | FullBlockWithAncestorsDownloadState::Ancestors(block, ancestors_fut); 336 | } 337 | FullBlockWithAncestorsDownloadState::Ancestors(block, fut) => { 338 | let ancestors = ready!(fut.poll_unpin(cx)); 339 | return Poll::Ready((std::mem::take(block), ancestors)) 340 | } 341 | } 342 | } 343 | } 344 | } 345 | 346 | enum FullBlockRangeDownloadState { 347 | Headers { fut: FetchHeadersRangeFuture }, 348 | Bodies(FullBlockRangeBodiesDownloadState), 349 | } 350 | 351 | struct FullBlockRangeBodiesDownloadState { 352 | headers: Vec, 353 | fut: DownloadFut>, 354 | bodies: Vec, 355 | } 356 | 357 | impl FullBlockRangeBodiesDownloadState { 358 | fn missing(&self) -> impl Iterator + '_ { 359 | self.headers.iter().skip(self.bodies.len()).map(|h| h.hash()) 360 | } 361 | 362 | fn take_blocks(&mut self) -> impl Iterator { 363 | std::mem::take(&mut self.headers) 364 | .into_iter() 365 | .zip(std::mem::take(&mut self.bodies)) 366 | .map(|(header, body)| SealedBlock::from_sealed_parts(header, body)) 367 | } 368 | } 369 | 370 | /// A future that downloads full block range. 371 | #[must_use = "futures do nothing unless polled"] 372 | pub struct FetchFullBlockRangeFuture { 373 | network: RessNetworkHandle, 374 | consensus: EthBeaconConsensus, 375 | retry_delay: Duration, 376 | request: GetHeaders, 377 | started_at: Instant, 378 | state: FullBlockRangeDownloadState, 379 | } 380 | 381 | impl FetchFullBlockRangeFuture { 382 | /// Create new fetch full block range future. 383 | pub fn new( 384 | network: RessNetworkHandle, 385 | consensus: EthBeaconConsensus, 386 | retry_delay: Duration, 387 | request: GetHeaders, 388 | ) -> Self { 389 | let fut = 390 | FetchHeadersRangeFuture::new(network.clone(), consensus.clone(), retry_delay, request); 391 | Self { 392 | network, 393 | consensus, 394 | retry_delay, 395 | request, 396 | started_at: fut.started_at, 397 | state: FullBlockRangeDownloadState::Headers { fut }, 398 | } 399 | } 400 | 401 | /// Returns the get headers request. 402 | pub fn request(&self) -> GetHeaders { 403 | self.request 404 | } 405 | 406 | /// The duration elapsed since request was started. 407 | pub fn elapsed(&self) -> Duration { 408 | self.started_at.elapsed() 409 | } 410 | 411 | fn request_bodies( 412 | network: RessNetworkHandle, 413 | request: impl IntoIterator, 414 | delay: Duration, 415 | ) -> DownloadFut> { 416 | let request = request.into_iter().collect(); 417 | Box::pin(async move { 418 | tokio::time::sleep(delay).await; 419 | network.fetch_block_bodies(request).await 420 | }) 421 | } 422 | } 423 | 424 | impl Future for FetchFullBlockRangeFuture { 425 | type Output = Vec; 426 | 427 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 428 | let this = self.get_mut(); 429 | 430 | loop { 431 | match &mut this.state { 432 | FullBlockRangeDownloadState::Headers { fut } => { 433 | let headers = ready!(fut.poll_unpin(cx)); 434 | let fut = Self::request_bodies( 435 | this.network.clone(), 436 | headers.iter().map(|h| h.hash()), 437 | Default::default(), 438 | ); 439 | this.state = 440 | FullBlockRangeDownloadState::Bodies(FullBlockRangeBodiesDownloadState { 441 | headers, 442 | fut, 443 | bodies: Vec::new(), 444 | }); 445 | } 446 | FullBlockRangeDownloadState::Bodies(state) => { 447 | let response = ready!(state.fut.poll_unpin(cx)); 448 | let pending_bodies = match response { 449 | Ok(pending) => { 450 | if pending.is_empty() { 451 | trace!(target: "ress::engine::downloader", request = ?this.request, "Empty bodies response"); 452 | state.fut = Self::request_bodies( 453 | this.network.clone(), 454 | state.missing(), 455 | this.retry_delay, 456 | ); 457 | continue 458 | } 459 | pending 460 | } 461 | Err(error) => { 462 | trace!(target: "ress::engine::downloader", %error, ?this.request, "Bodies download failed"); 463 | state.fut = Self::request_bodies( 464 | this.network.clone(), 465 | state.missing(), 466 | this.retry_delay, 467 | ); 468 | continue 469 | } 470 | }; 471 | 472 | let mut pending_bodies = pending_bodies.into_iter(); 473 | for header in &state.headers[state.bodies.len()..] { 474 | if let Some(body) = pending_bodies.next() { 475 | if let Err(error) = as Consensus< 476 | Block, 477 | >>::validate_body_against_header( 478 | &this.consensus, &body, header 479 | ) { 480 | trace!(target: "ress::engine::downloader", %error, ?this.request, "Invalid body response"); 481 | state.fut = Self::request_bodies( 482 | this.network.clone(), 483 | state.missing(), 484 | this.retry_delay, 485 | ); 486 | continue 487 | } 488 | 489 | state.bodies.push(body); 490 | } 491 | } 492 | 493 | let remaining_hashes = state.missing().collect::>(); 494 | if !remaining_hashes.is_empty() { 495 | state.fut = Self::request_bodies( 496 | this.network.clone(), 497 | remaining_hashes, 498 | Default::default(), 499 | ); 500 | continue 501 | } 502 | 503 | return Poll::Ready(state.take_blocks().collect()) 504 | } 505 | } 506 | } 507 | } 508 | } 509 | 510 | /// A future that downloads a bytecode from the network. 511 | #[must_use = "futures do nothing unless polled"] 512 | pub struct FetchBytecodeFuture { 513 | network: RessNetworkHandle, 514 | retry_delay: Duration, 515 | code_hash: B256, 516 | started_at: Instant, 517 | pending: DownloadFut, 518 | } 519 | 520 | impl FetchBytecodeFuture { 521 | /// Create new fetch bytecode future. 522 | pub fn new(network: RessNetworkHandle, retry_delay: Duration, code_hash: B256) -> Self { 523 | let network_ = network.clone(); 524 | Self { 525 | network, 526 | retry_delay, 527 | code_hash, 528 | started_at: Instant::now(), 529 | pending: Box::pin(async move { network_.fetch_bytecode(code_hash).await }), 530 | } 531 | } 532 | 533 | /// Returns the code hash of the bytecode being requested. 534 | pub fn code_hash(&self) -> B256 { 535 | self.code_hash 536 | } 537 | 538 | /// The duration elapsed since request was started. 539 | pub fn elapsed(&self) -> Duration { 540 | self.started_at.elapsed() 541 | } 542 | 543 | fn bytecode_request(&self) -> DownloadFut { 544 | let network = self.network.clone(); 545 | let hash = self.code_hash; 546 | let delay = self.retry_delay; 547 | Box::pin(async move { 548 | tokio::time::sleep(delay).await; 549 | network.fetch_bytecode(hash).await 550 | }) 551 | } 552 | } 553 | 554 | impl Future for FetchBytecodeFuture { 555 | type Output = Bytecode; 556 | 557 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 558 | let this = self.get_mut(); 559 | 560 | loop { 561 | match ready!(this.pending.poll_unpin(cx)) { 562 | Ok(bytecode) => { 563 | let bytecode = Bytecode::new_raw(bytecode); 564 | let code_hash = bytecode.hash_slow(); 565 | if code_hash == this.code_hash { 566 | return Poll::Ready(bytecode) 567 | } else { 568 | trace!(target: "ress::engine::downloader", expected = %this.code_hash, received = %code_hash, "Received wrong bytecode"); 569 | } 570 | } 571 | Err(error) => { 572 | trace!(target: "ress::engine::downloader", %error, %this.code_hash, "Bytecode download failed"); 573 | } 574 | }; 575 | this.pending = this.bytecode_request(); 576 | } 577 | } 578 | } 579 | 580 | /// A future that downloads a witness from the network. 581 | #[must_use = "futures do nothing unless polled"] 582 | pub struct FetchWitnessFuture { 583 | network: RessNetworkHandle, 584 | block_hash: B256, 585 | retry_delay: Duration, 586 | started_at: Instant, 587 | pending: DownloadFut>, 588 | } 589 | 590 | impl FetchWitnessFuture { 591 | /// Create new fetch witness future. 592 | pub fn new(network: RessNetworkHandle, retry_delay: Duration, block_hash: B256) -> Self { 593 | let network_ = network.clone(); 594 | Self { 595 | network, 596 | retry_delay, 597 | block_hash, 598 | started_at: Instant::now(), 599 | pending: Box::pin(async move { network_.fetch_witness(block_hash).await }), 600 | } 601 | } 602 | 603 | /// Returns the hash of the block the witness is being requested for. 604 | pub fn block_hash(&self) -> B256 { 605 | self.block_hash 606 | } 607 | 608 | /// The duration elapsed since request was started. 609 | pub fn elapsed(&self) -> Duration { 610 | self.started_at.elapsed() 611 | } 612 | 613 | fn witness_request(&self) -> DownloadFut> { 614 | let network = self.network.clone(); 615 | let hash = self.block_hash; 616 | let delay = self.retry_delay; 617 | Box::pin(async move { 618 | tokio::time::sleep(delay).await; 619 | network.fetch_witness(hash).await 620 | }) 621 | } 622 | } 623 | 624 | impl Future for FetchWitnessFuture { 625 | type Output = ExecutionWitness; 626 | 627 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 628 | let this = self.get_mut(); 629 | 630 | loop { 631 | match ready!(this.pending.poll_unpin(cx)) { 632 | Ok(witness) => { 633 | if witness.is_empty() { 634 | trace!(target: "ress::engine::downloader", block_hash = %this.block_hash, "Received empty witness"); 635 | } else { 636 | let rlp_size_bytes = witness.length(); 637 | let valid = { 638 | // TODO: 639 | // for StateWitnessEntry { hash, bytes } in witness { 640 | // let entry_hash = keccak256(&bytes); 641 | // if hash == entry_hash { 642 | // state_witness.insert(hash, bytes); 643 | // } else { 644 | // trace!(target: "ress::engine::downloader", block_hash = 645 | // %this.block_hash, expected = %entry_hash, received = %hash, "Invalid 646 | // witness entry"); break 'witness 647 | // false } 648 | // } 649 | true 650 | }; 651 | if valid { 652 | return Poll::Ready(ExecutionWitness::new(witness, rlp_size_bytes)) 653 | } 654 | } 655 | } 656 | Err(error) => { 657 | trace!(target: "ress::engine::downloader", %error, %this.block_hash, "Witness download failed"); 658 | } 659 | }; 660 | this.pending = this.witness_request(); 661 | } 662 | } 663 | } 664 | -------------------------------------------------------------------------------- /crates/engine/src/download/mod.rs: -------------------------------------------------------------------------------- 1 | use alloy_primitives::{map::HashMap, B256}; 2 | use futures::FutureExt; 3 | use metrics::{Counter, Gauge, Histogram}; 4 | use ress_network::RessNetworkHandle; 5 | use ress_primitives::witness::ExecutionWitness; 6 | use reth_chainspec::ChainSpec; 7 | use reth_metrics::Metrics; 8 | use reth_node_ethereum::consensus::EthBeaconConsensus; 9 | use reth_primitives::{Bytecode, SealedBlock, SealedHeader}; 10 | use std::{ 11 | collections::VecDeque, 12 | task::{Context, Poll}, 13 | time::Duration, 14 | }; 15 | use tracing::*; 16 | 17 | /// Futures for fetching and validating blockchain data. 18 | #[allow(missing_debug_implementations)] 19 | pub mod futs; 20 | use futs::*; 21 | 22 | /// Struct for downloading chain data from the network. 23 | #[allow(missing_debug_implementations)] 24 | pub struct EngineDownloader { 25 | network: RessNetworkHandle, 26 | consensus: EthBeaconConsensus, 27 | retry_delay: Duration, 28 | 29 | inflight_full_block_requests: Vec, 30 | inflight_bytecode_requests: Vec, 31 | inflight_witness_requests: Vec, 32 | inflight_finalized_block_requests: Vec, 33 | outcomes: VecDeque, 34 | 35 | metrics: EngineDownloaderMetrics, 36 | } 37 | 38 | impl EngineDownloader { 39 | /// Create new engine downloader. 40 | pub fn new(network: RessNetworkHandle, consensus: EthBeaconConsensus) -> Self { 41 | Self { 42 | network, 43 | consensus, 44 | retry_delay: Duration::from_millis(50), 45 | inflight_full_block_requests: Vec::new(), 46 | inflight_witness_requests: Vec::new(), 47 | inflight_bytecode_requests: Vec::new(), 48 | inflight_finalized_block_requests: Vec::new(), 49 | outcomes: VecDeque::new(), 50 | metrics: EngineDownloaderMetrics::default(), 51 | } 52 | } 53 | 54 | /// Download full block by block hash. 55 | pub fn download_full_block(&mut self, block_hash: B256) { 56 | if self.inflight_full_block_requests.iter().any(|req| req.block_hash() == block_hash) { 57 | return 58 | } 59 | 60 | debug!(target: "ress::engine::downloader", %block_hash, "Downloading full block"); 61 | let fut = FetchFullBlockFuture::new( 62 | self.network.clone(), 63 | self.consensus.clone(), 64 | self.retry_delay, 65 | block_hash, 66 | ); 67 | self.inflight_full_block_requests.push(fut); 68 | self.metrics.inc_total(RequestMetricTy::FullBlock); 69 | self.metrics.set_inflight(RequestMetricTy::FullBlock, self.inflight_witness_requests.len()); 70 | } 71 | 72 | /// Download bytecode by code hash. 73 | pub fn download_bytecode(&mut self, code_hash: B256) { 74 | if self.inflight_bytecode_requests.iter().any(|req| req.code_hash() == code_hash) { 75 | return 76 | } 77 | 78 | debug!(target: "ress::engine::downloader", %code_hash, "Downloading bytecode"); 79 | let fut = FetchBytecodeFuture::new(self.network.clone(), self.retry_delay, code_hash); 80 | self.inflight_bytecode_requests.push(fut); 81 | self.metrics.inc_total(RequestMetricTy::Bytecode); 82 | self.metrics.set_inflight(RequestMetricTy::Bytecode, self.inflight_bytecode_requests.len()); 83 | } 84 | 85 | /// Download witness by block hash. 86 | pub fn download_witness(&mut self, block_hash: B256) { 87 | if self.inflight_witness_requests.iter().any(|req| req.block_hash() == block_hash) { 88 | return 89 | } 90 | 91 | debug!(target: "ress::engine::downloader", %block_hash, "Downloading witness"); 92 | let fut = FetchWitnessFuture::new(self.network.clone(), self.retry_delay, block_hash); 93 | self.inflight_witness_requests.push(fut); 94 | self.metrics.inc_total(RequestMetricTy::Witness); 95 | self.metrics.set_inflight(RequestMetricTy::Witness, self.inflight_witness_requests.len()); 96 | } 97 | 98 | /// Download finalized block with 256 ancestors. 99 | pub fn download_finalized_with_ancestors(&mut self, block_hash: B256) { 100 | if self.inflight_finalized_block_requests.iter().any(|req| req.block_hash() == block_hash) { 101 | return 102 | } 103 | 104 | debug!(target: "ress::engine::downloader", %block_hash, "Downloading finalized"); 105 | let fut = FetchFullBlockWithAncestorsFuture::new( 106 | self.network.clone(), 107 | self.consensus.clone(), 108 | self.retry_delay, 109 | block_hash, 110 | 256, 111 | ); 112 | self.inflight_finalized_block_requests.push(fut); 113 | self.metrics.inc_total(RequestMetricTy::Finalized); 114 | self.metrics 115 | .set_inflight(RequestMetricTy::Finalized, self.inflight_finalized_block_requests.len()); 116 | } 117 | 118 | /// Poll downloader. 119 | pub fn poll(&mut self, cx: &mut Context<'_>) -> Poll { 120 | if let Some(outcome) = self.outcomes.pop_front() { 121 | return Poll::Ready(outcome) 122 | } 123 | 124 | // advance all full block range requests 125 | for idx in (0..self.inflight_finalized_block_requests.len()).rev() { 126 | let mut request = self.inflight_finalized_block_requests.swap_remove(idx); 127 | if let Poll::Ready((block, ancestors)) = request.poll_unpin(cx) { 128 | let elapsed = request.elapsed(); 129 | self.metrics.record_elapsed(RequestMetricTy::Finalized, elapsed); 130 | trace!(target: "ress::engine::downloader", block=?block.num_hash(), ancestors_len = ancestors.len(), ?elapsed, "Received finalized block"); 131 | self.outcomes.push_back(DownloadOutcome::new( 132 | DownloadData::FinalizedBlock(block, ancestors), 133 | elapsed, 134 | )); 135 | } else { 136 | self.inflight_finalized_block_requests.push(request); 137 | } 138 | } 139 | self.metrics 140 | .set_inflight(RequestMetricTy::Finalized, self.inflight_finalized_block_requests.len()); 141 | 142 | // advance all full block requests 143 | for idx in (0..self.inflight_full_block_requests.len()).rev() { 144 | let mut request = self.inflight_full_block_requests.swap_remove(idx); 145 | if let Poll::Ready(block) = request.poll_unpin(cx) { 146 | let elapsed = request.elapsed(); 147 | self.metrics.record_elapsed(RequestMetricTy::FullBlock, elapsed); 148 | trace!(target: "ress::engine::downloader", block = ?block.num_hash(), ?elapsed, "Received single full block"); 149 | self.outcomes 150 | .push_back(DownloadOutcome::new(DownloadData::FullBlock(block), elapsed)); 151 | } else { 152 | self.inflight_full_block_requests.push(request); 153 | } 154 | } 155 | self.metrics 156 | .set_inflight(RequestMetricTy::FullBlock, self.inflight_full_block_requests.len()); 157 | 158 | // advance all witness requests 159 | for idx in (0..self.inflight_witness_requests.len()).rev() { 160 | let mut request = self.inflight_witness_requests.swap_remove(idx); 161 | if let Poll::Ready(witness) = request.poll_unpin(cx) { 162 | let elapsed = request.elapsed(); 163 | self.metrics.record_elapsed(RequestMetricTy::Witness, elapsed); 164 | trace!(target: "ress::engine::downloader", block_hash = %request.block_hash(), ?elapsed, "Received witness"); 165 | self.outcomes.push_back(DownloadOutcome::new( 166 | DownloadData::Witness(request.block_hash(), witness), 167 | elapsed, 168 | )); 169 | } else { 170 | self.inflight_witness_requests.push(request); 171 | } 172 | } 173 | self.metrics.set_inflight(RequestMetricTy::Witness, self.inflight_witness_requests.len()); 174 | 175 | // advance all bytecode requests 176 | for idx in (0..self.inflight_bytecode_requests.len()).rev() { 177 | let mut request = self.inflight_bytecode_requests.swap_remove(idx); 178 | if let Poll::Ready(bytecode) = request.poll_unpin(cx) { 179 | let elapsed = request.elapsed(); 180 | self.metrics.record_elapsed(RequestMetricTy::Bytecode, elapsed); 181 | trace!(target: "ress::engine::downloader", code_hash = %request.code_hash(), ?elapsed, "Received bytecode"); 182 | self.outcomes.push_back(DownloadOutcome::new( 183 | DownloadData::Bytecode(request.code_hash(), bytecode), 184 | elapsed, 185 | )); 186 | } else { 187 | self.inflight_bytecode_requests.push(request); 188 | } 189 | } 190 | self.metrics.set_inflight(RequestMetricTy::Bytecode, self.inflight_bytecode_requests.len()); 191 | 192 | if let Some(outcome) = self.outcomes.pop_front() { 193 | return Poll::Ready(outcome) 194 | } 195 | 196 | Poll::Pending 197 | } 198 | } 199 | 200 | /// Download outcome. 201 | #[derive(Debug)] 202 | pub struct DownloadOutcome { 203 | /// Downloaded data. 204 | pub data: DownloadData, 205 | /// Time elapsed since download started. 206 | pub elapsed: Duration, 207 | } 208 | 209 | impl DownloadOutcome { 210 | /// Create new download outcome. 211 | pub fn new(data: DownloadData, elapsed: Duration) -> Self { 212 | Self { data, elapsed } 213 | } 214 | } 215 | 216 | /// Download data. 217 | #[derive(Debug)] 218 | pub enum DownloadData { 219 | /// Downloaded full block. 220 | FullBlock(SealedBlock), 221 | /// Downloaded bytecode. 222 | Bytecode(B256, Bytecode), 223 | /// Downloaded execution witness. 224 | Witness(B256, ExecutionWitness), 225 | /// Downloaded full block with ancestors. 226 | FinalizedBlock(SealedBlock, Vec), 227 | } 228 | 229 | #[derive(Default, Debug)] 230 | struct EngineDownloaderMetrics { 231 | by_type: HashMap, 232 | } 233 | 234 | impl EngineDownloaderMetrics { 235 | fn for_type(&mut self, ty: RequestMetricTy) -> &DownloadRequestTypeMetrics { 236 | self.by_type.entry(ty).or_insert_with(|| { 237 | DownloadRequestTypeMetrics::new_with_labels(&[("type", ty.to_string())]) 238 | }) 239 | } 240 | 241 | fn inc_total(&mut self, ty: RequestMetricTy) { 242 | self.for_type(ty).total.increment(1); 243 | } 244 | 245 | fn set_inflight(&mut self, ty: RequestMetricTy, count: usize) { 246 | self.for_type(ty).inflight.set(count as f64); 247 | } 248 | 249 | fn record_elapsed(&mut self, ty: RequestMetricTy, elapsed: Duration) { 250 | self.for_type(ty).elapsed.record(elapsed.as_secs_f64()); 251 | } 252 | } 253 | 254 | #[derive(Metrics)] 255 | #[metrics(scope = "engine.downloader")] 256 | struct DownloadRequestTypeMetrics { 257 | /// The total number of requests. 258 | total: Counter, 259 | /// The number of inflight requests. 260 | inflight: Gauge, 261 | /// The number of seconds request took to complete. 262 | elapsed: Histogram, 263 | } 264 | 265 | #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, strum_macros::Display)] 266 | #[strum(serialize_all = "snake_case")] 267 | enum RequestMetricTy { 268 | FullBlock, 269 | Bytecode, 270 | Witness, 271 | Finalized, 272 | } 273 | -------------------------------------------------------------------------------- /crates/engine/src/engine.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | download::{DownloadData, DownloadOutcome, EngineDownloader}, 3 | tree::{DownloadRequest, EngineTree, TreeAction, TreeEvent}, 4 | }; 5 | use alloy_primitives::{map::B256HashSet, B256}; 6 | use alloy_rpc_types_engine::{PayloadStatus, PayloadStatusEnum}; 7 | use futures::{FutureExt, StreamExt}; 8 | use metrics::Histogram; 9 | use ress_network::RessNetworkHandle; 10 | use ress_primitives::witness::ExecutionWitness; 11 | use ress_provider::RessProvider; 12 | use reth_chainspec::ChainSpec; 13 | use reth_engine_tree::tree::error::InsertBlockFatalError; 14 | use reth_errors::ProviderError; 15 | use reth_metrics::Metrics; 16 | use reth_node_api::{BeaconConsensusEngineEvent, BeaconEngineMessage, BeaconOnNewPayloadError}; 17 | use reth_node_ethereum::{consensus::EthBeaconConsensus, EthEngineTypes, EthereumEngineValidator}; 18 | use std::{ 19 | future::Future, 20 | pin::Pin, 21 | task::{Context, Poll}, 22 | time::{Duration, Instant}, 23 | }; 24 | use tokio::{ 25 | sync::{mpsc, oneshot}, 26 | time::Sleep, 27 | }; 28 | use tokio_stream::wrappers::UnboundedReceiverStream; 29 | use tracing::*; 30 | 31 | /// Metrics for the consensus engine 32 | #[derive(Metrics)] 33 | #[metrics(scope = "engine")] 34 | pub(crate) struct ConsensusEngineMetrics { 35 | /// Histogram of witness sizes in bytes. 36 | pub witness_size_bytes: Histogram, 37 | /// Histogram of witness node counts. 38 | pub witness_nodes_count: Histogram, 39 | } 40 | 41 | /// Ress consensus engine. 42 | #[allow(missing_debug_implementations)] 43 | pub struct ConsensusEngine { 44 | tree: EngineTree, 45 | downloader: EngineDownloader, 46 | from_beacon_engine: UnboundedReceiverStream>, 47 | parked_payload_timeout: Duration, 48 | parked_payload: Option, 49 | metrics: ConsensusEngineMetrics, 50 | } 51 | 52 | impl ConsensusEngine { 53 | /// Initialize consensus engine. 54 | pub fn new( 55 | provider: RessProvider, 56 | consensus: EthBeaconConsensus, 57 | engine_validator: EthereumEngineValidator, 58 | network: RessNetworkHandle, 59 | from_beacon_engine: mpsc::UnboundedReceiver>, 60 | engine_events_sender: mpsc::UnboundedSender, 61 | ) -> Self { 62 | Self { 63 | tree: EngineTree::new( 64 | provider, 65 | consensus.clone(), 66 | engine_validator, 67 | engine_events_sender, 68 | ), 69 | downloader: EngineDownloader::new(network, consensus), 70 | from_beacon_engine: UnboundedReceiverStream::from(from_beacon_engine), 71 | parked_payload_timeout: Duration::from_secs(3), 72 | parked_payload: None, 73 | metrics: Default::default(), 74 | } 75 | } 76 | 77 | fn on_maybe_tree_event(&mut self, maybe_event: Option) { 78 | if let Some(event) = maybe_event { 79 | self.on_tree_event(event); 80 | } 81 | } 82 | 83 | fn on_tree_event(&mut self, event: TreeEvent) { 84 | match event { 85 | TreeEvent::Download(DownloadRequest::Block { block_hash }) => { 86 | self.downloader.download_full_block(block_hash); 87 | if !self.tree.block_buffer.witnesses.contains_key(&block_hash) { 88 | self.downloader.download_witness(block_hash); 89 | } 90 | } 91 | TreeEvent::Download(DownloadRequest::Witness { block_hash }) => { 92 | self.downloader.download_witness(block_hash); 93 | } 94 | TreeEvent::Download(DownloadRequest::Finalized { block_hash }) => { 95 | self.downloader.download_finalized_with_ancestors(block_hash); 96 | } 97 | TreeEvent::TreeAction(TreeAction::MakeCanonical { sync_target_head }) => { 98 | self.tree.make_canonical(sync_target_head); 99 | } 100 | } 101 | } 102 | 103 | fn on_download_outcome( 104 | &mut self, 105 | outcome: DownloadOutcome, 106 | ) -> Result<(), InsertBlockFatalError> { 107 | let elapsed = outcome.elapsed; 108 | let mut unlocked_block_hashes = B256HashSet::default(); 109 | match outcome.data { 110 | DownloadData::FinalizedBlock(block, ancestors) => { 111 | let block_num_hash = block.num_hash(); 112 | info!(target: "ress::engine", ?block_num_hash, ancestors_len = ancestors.len(), "Downloaded finalized block"); 113 | 114 | let recovered = block.try_recover().map_err(|_| { 115 | InsertBlockFatalError::Provider(ProviderError::SenderRecoveryError) 116 | })?; 117 | self.tree.set_canonical_head(block_num_hash); 118 | self.tree.provider.insert_canonical_hash(recovered.number, recovered.hash()); 119 | self.tree.provider.insert_block(recovered, None); 120 | for header in ancestors { 121 | self.tree.provider.insert_canonical_hash(header.number, header.hash()); 122 | } 123 | unlocked_block_hashes.insert(block_num_hash.hash); 124 | } 125 | DownloadData::FullBlock(block) => { 126 | let block_num_hash = block.num_hash(); 127 | trace!(target: "ress::engine", ?block_num_hash, ?elapsed, "Downloaded block"); 128 | let recovered = match block.try_recover() { 129 | Ok(block) => block, 130 | Err(_error) => { 131 | debug!(target: "ress::engine", ?block_num_hash, "Error recovering downloaded block"); 132 | return Ok(()) 133 | } 134 | }; 135 | self.tree.block_buffer.insert_block(recovered); 136 | unlocked_block_hashes.insert(block_num_hash.hash); 137 | } 138 | DownloadData::Witness(block_hash, witness) => { 139 | let code_hashes = witness.bytecode_hashes().clone(); 140 | let missing_code_hashes = 141 | self.tree.provider.missing_code_hashes(code_hashes).map_err(|error| { 142 | InsertBlockFatalError::Provider(ProviderError::Database(error)) 143 | })?; 144 | let missing_bytecodes_len = missing_code_hashes.len(); 145 | let rlp_size = humansize::format_size(witness.rlp_size_bytes(), humansize::DECIMAL); 146 | let witness_nodes_count = witness.state_witness().len(); 147 | 148 | // Record witness metrics before inserting the witness 149 | self.record_witness_metrics(&witness); 150 | 151 | self.tree.block_buffer.insert_witness( 152 | block_hash, 153 | witness, 154 | missing_code_hashes.clone(), 155 | ); 156 | 157 | if Some(block_hash) == self.parked_payload.as_ref().map(|parked| parked.block_hash) 158 | { 159 | info!(target: "ress::engine", %block_hash, missing_bytecodes_len, %rlp_size, witness_nodes_count, ?elapsed, "Downloaded for parked payload"); 160 | } else { 161 | trace!(target: "ress::engine", %block_hash, missing_bytecodes_len, %rlp_size, witness_nodes_count, ?elapsed, "Downloaded witness"); 162 | } 163 | if missing_code_hashes.is_empty() { 164 | unlocked_block_hashes.insert(block_hash); 165 | } else { 166 | for code_hash in missing_code_hashes { 167 | self.downloader.download_bytecode(code_hash); 168 | } 169 | } 170 | } 171 | DownloadData::Bytecode(code_hash, bytecode) => { 172 | trace!(target: "ress::engine", %code_hash, ?elapsed, "Downloaded bytecode"); 173 | match self.tree.provider.insert_bytecode(code_hash, bytecode) { 174 | Ok(()) => { 175 | unlocked_block_hashes 176 | .extend(self.tree.block_buffer.on_bytecode_received(code_hash)); 177 | } 178 | Err(error) => { 179 | error!(target: "ress::engine", %error, "Failed to insert the bytecode"); 180 | } 181 | }; 182 | } 183 | }; 184 | 185 | for unlocked_hash in unlocked_block_hashes { 186 | let Some((block, witness)) = self.tree.block_buffer.remove_block(&unlocked_hash) else { 187 | continue 188 | }; 189 | let block_num_hash = block.num_hash(); 190 | trace!(target: "ress::engine", block = ?block_num_hash, "Inserting block after download"); 191 | let mut result = self 192 | .tree 193 | .on_downloaded_block(block, witness) 194 | .map_err(BeaconOnNewPayloadError::internal); 195 | match &mut result { 196 | Ok(outcome) => { 197 | self.on_maybe_tree_event(outcome.event.take()); 198 | } 199 | Err(error) => { 200 | error!(target: "ress::engine", block = ?block_num_hash, %error, "Error inserting downloaded block"); 201 | } 202 | }; 203 | if self 204 | .parked_payload 205 | .as_ref() 206 | .is_some_and(|parked| parked.block_hash == block_num_hash.hash) 207 | { 208 | let parked = self.parked_payload.take().unwrap(); 209 | trace!(target: "ress::engine", block = ?block_num_hash, elapsed = ?parked.parked_at.elapsed(), "Sending response for parked payload"); 210 | if let Err(error) = parked.tx.send(result.map(|o| o.outcome)) { 211 | error!(target: "ress::engine", block = ?block_num_hash, ?error, "Failed to send payload status"); 212 | } 213 | } 214 | } 215 | 216 | Ok(()) 217 | } 218 | 219 | fn on_engine_message(&mut self, message: BeaconEngineMessage) { 220 | match message { 221 | BeaconEngineMessage::NewPayload { payload, tx } => { 222 | let block_hash = payload.block_hash(); 223 | let block_number = payload.block_number(); 224 | let maybe_witness = self.tree.block_buffer.remove_witness(&payload.block_hash()); 225 | let has_witness = maybe_witness.is_some(); 226 | debug!(target: "ress::engine", block_number, %block_hash, has_witness, "Inserting new payload"); 227 | let mut result = self 228 | .tree 229 | .on_new_payload(payload, maybe_witness) 230 | .map_err(BeaconOnNewPayloadError::internal); 231 | if let Ok(outcome) = &mut result { 232 | if let Some(event) = outcome.event.take() { 233 | self.on_tree_event(event.clone()); 234 | if let Some(block_hash) = 235 | event.as_witness_download().filter(|_| outcome.outcome.is_syncing()) 236 | { 237 | debug!(target: "ress::engine", block_number, %block_hash, "Parking payload due to missing witness"); 238 | self.parked_payload = Some(ParkedPayload::new( 239 | block_hash, 240 | tx, 241 | self.parked_payload_timeout, 242 | )); 243 | return 244 | } 245 | if !has_witness { 246 | self.on_tree_event(TreeEvent::download_witness(block_hash)); 247 | } 248 | } 249 | } 250 | let outcome_result = result.map(|o| o.outcome); 251 | debug!(target: "ress::engine", block_number, %block_hash, result = ?outcome_result, "Returning payload result"); 252 | if let Err(error) = tx.send(outcome_result) { 253 | error!(target: "ress::engine", ?error, "Failed to send payload status"); 254 | } 255 | } 256 | BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx, version } => { 257 | debug!(target: "ress::engine", head = %state.head_block_hash, safe = %state.safe_block_hash, finalized = %state.finalized_block_hash, "Updating forkchoice state"); 258 | let mut result = self.tree.on_forkchoice_updated(state, payload_attrs, version); 259 | if let Ok(outcome) = &mut result { 260 | // track last received forkchoice state 261 | let status = outcome.outcome.forkchoice_status(); 262 | self.tree.forkchoice_state_tracker.set_latest(state, status); 263 | self.tree 264 | .emit_event(BeaconConsensusEngineEvent::ForkchoiceUpdated(state, status)); 265 | self.on_maybe_tree_event(outcome.event.take()); 266 | } 267 | let outcome_result = result.map(|o| o.outcome); 268 | debug!(target: "ress::engine", ?state, result = ?outcome_result, "Returning forkchoice update result"); 269 | if let Err(error) = tx.send(outcome_result) { 270 | error!(target: "ress::engine", ?error, "Failed to send forkchoice outcome"); 271 | } 272 | } 273 | BeaconEngineMessage::TransitionConfigurationExchanged => { 274 | warn!(target: "ress::engine", "Received unsupported `TransitionConfigurationExchanged` message"); 275 | } 276 | } 277 | } 278 | 279 | /// Record witness metrics 280 | fn record_witness_metrics(&self, witness: &ExecutionWitness) { 281 | let witness_size_bytes = witness.rlp_size_bytes(); 282 | let witness_nodes_count = witness.state_witness().len(); 283 | 284 | self.metrics.witness_size_bytes.record(witness_size_bytes as f64); 285 | self.metrics.witness_nodes_count.record(witness_nodes_count as f64); 286 | } 287 | } 288 | 289 | impl Future for ConsensusEngine { 290 | type Output = Result<(), InsertBlockFatalError>; 291 | 292 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 293 | let this = self.get_mut(); 294 | 295 | loop { 296 | if let Poll::Ready(outcome) = this.downloader.poll(cx) { 297 | this.on_download_outcome(outcome)?; 298 | continue; 299 | } 300 | 301 | if let Some(parked) = &mut this.parked_payload { 302 | if parked.timeout.poll_unpin(cx).is_ready() { 303 | let parked = this.parked_payload.take().unwrap(); 304 | warn!(target: "ress::engine", block_hash = %parked.block_hash, "Could not download missing payload data in time"); 305 | let status = PayloadStatus::from_status(PayloadStatusEnum::Syncing); 306 | if let Err(error) = parked.tx.send(Ok(status)) { 307 | error!(target: "ress::engine", ?error, "Failed to send parked payload status"); 308 | } 309 | } else { 310 | return Poll::Pending 311 | } 312 | } 313 | 314 | if let Poll::Ready(Some(message)) = this.from_beacon_engine.poll_next_unpin(cx) { 315 | this.on_engine_message(message); 316 | continue; 317 | } 318 | 319 | return Poll::Pending 320 | } 321 | } 322 | } 323 | 324 | struct ParkedPayload { 325 | block_hash: B256, 326 | tx: oneshot::Sender>, 327 | parked_at: Instant, 328 | timeout: Pin>, 329 | } 330 | 331 | impl ParkedPayload { 332 | fn new( 333 | block_hash: B256, 334 | tx: oneshot::Sender>, 335 | timeout: Duration, 336 | ) -> Self { 337 | Self { 338 | block_hash, 339 | tx, 340 | parked_at: Instant::now(), 341 | timeout: Box::pin(tokio::time::sleep(timeout)), 342 | } 343 | } 344 | } 345 | -------------------------------------------------------------------------------- /crates/engine/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Ress consensus engine. 2 | 3 | #![cfg_attr(not(test), warn(unused_crate_dependencies))] 4 | 5 | /// Engine tree. 6 | pub mod tree; 7 | 8 | /// Engine downloader. 9 | pub mod download; 10 | 11 | /// Consensus engine. 12 | pub mod engine; 13 | -------------------------------------------------------------------------------- /crates/engine/src/tree/block_buffer.rs: -------------------------------------------------------------------------------- 1 | use alloy_consensus::BlockHeader; 2 | use alloy_primitives::{map::B256HashSet, BlockHash, BlockNumber, B256}; 3 | use metrics::Gauge; 4 | use ress_primitives::witness::ExecutionWitness; 5 | use reth_metrics::Metrics; 6 | use reth_primitives_traits::{Block, RecoveredBlock}; 7 | use schnellru::{ByLength, LruMap}; 8 | use std::collections::{BTreeMap, HashMap, HashSet}; 9 | 10 | /// Metrics for the blockchain tree block buffer 11 | #[derive(Metrics)] 12 | #[metrics(scope = "engine.tree.block_buffer")] 13 | pub(crate) struct BlockBufferMetrics { 14 | /// Total blocks in the block buffer. 15 | pub blocks: Gauge, 16 | /// Total witnesses in the block buffer. 17 | pub witnesses: Gauge, 18 | } 19 | 20 | /// Contains the tree of pending blocks that cannot be executed due to missing parent. 21 | /// It allows to store unconnected blocks for potential future inclusion. 22 | /// 23 | /// The buffer has three main functionalities: 24 | /// * [`BlockBuffer::insert_block`] for inserting blocks inside the buffer. 25 | /// * [`BlockBuffer::remove_block_with_children`] for connecting blocks if the parent gets received 26 | /// and inserted. 27 | /// * [`BlockBuffer::evict_old_blocks`] to evict old blocks that precede the finalized number. 28 | /// 29 | /// Note: Buffer is limited by number of blocks that it can contain and eviction of the block 30 | /// is done by last recently used block. 31 | #[derive(Debug)] 32 | pub struct BlockBuffer { 33 | /// All blocks in the buffer stored by their block hash. 34 | pub(crate) blocks: HashMap>, 35 | /// All witnesses stored by their block hash. 36 | pub(crate) witnesses: HashMap, 37 | /// Missing bytecodes by block hash. 38 | pub(crate) missing_bytecodes: HashMap, 39 | /// Map of any parent block hash (even the ones not currently in the buffer) 40 | /// to the buffered children. 41 | /// Allows connecting buffered blocks by parent. 42 | pub(crate) parent_to_child: HashMap>, 43 | /// `BTreeMap` tracking the earliest blocks by block number. 44 | /// Used for removal of old blocks that precede finalization. 45 | pub(crate) earliest_blocks: BTreeMap>, 46 | /// LRU used for tracing oldest inserted blocks that are going to be 47 | /// first in line for evicting if `max_blocks` limit is hit. 48 | /// 49 | /// Used as counter of amount of blocks inside buffer. 50 | pub(crate) lru: LruMap, 51 | /// Various metrics for the block buffer. 52 | pub(crate) metrics: BlockBufferMetrics, 53 | } 54 | 55 | impl BlockBuffer { 56 | /// Create new buffer with max limit of blocks 57 | pub fn new(limit: u32) -> Self { 58 | Self { 59 | blocks: Default::default(), 60 | witnesses: Default::default(), 61 | missing_bytecodes: Default::default(), 62 | parent_to_child: Default::default(), 63 | earliest_blocks: Default::default(), 64 | lru: LruMap::new(ByLength::new(limit)), 65 | metrics: Default::default(), 66 | } 67 | } 68 | 69 | /// Return reference to the requested witness. 70 | pub fn witness(&self, hash: &BlockHash) -> Option<&ExecutionWitness> { 71 | self.witnesses.get(hash) 72 | } 73 | 74 | /// Return a reference to the lowest ancestor of the given block in the buffer. 75 | pub fn lowest_ancestor(&self, hash: &BlockHash) -> Option<&RecoveredBlock> { 76 | let mut current_block = self.blocks.get(hash)?; 77 | while let Some(parent) = self.blocks.get(¤t_block.parent_hash()) { 78 | current_block = parent; 79 | } 80 | Some(current_block) 81 | } 82 | 83 | /// Insert a correct block inside the buffer. 84 | pub fn insert_block(&mut self, block: RecoveredBlock) { 85 | let hash = block.hash(); 86 | 87 | self.parent_to_child.entry(block.parent_hash()).or_default().insert(hash); 88 | self.earliest_blocks.entry(block.number()).or_default().insert(hash); 89 | self.blocks.insert(hash, block); 90 | 91 | if let Some(evicted_hash) = self.insert_hash_and_get_evicted(hash) { 92 | // evict the block if limit is hit 93 | if let Some(evicted_block) = self.evict_block(&evicted_hash) { 94 | // evict the block if limit is hit 95 | self.remove_from_parent(evicted_block.parent_hash(), &evicted_hash); 96 | } 97 | } 98 | self.metrics.blocks.set(self.blocks.len() as f64); 99 | } 100 | 101 | /// Insert a witness in the buffer. 102 | pub fn insert_witness( 103 | &mut self, 104 | block_hash: BlockHash, 105 | witness: ExecutionWitness, 106 | missing_bytecodes: B256HashSet, 107 | ) { 108 | self.witnesses.insert(block_hash, witness); 109 | self.metrics.witnesses.set(self.witnesses.len() as f64); 110 | if !missing_bytecodes.is_empty() { 111 | self.missing_bytecodes.insert(block_hash, missing_bytecodes); 112 | } 113 | } 114 | 115 | /// Inserts the hash and returns the oldest evicted hash if any. 116 | fn insert_hash_and_get_evicted(&mut self, entry: BlockHash) -> Option { 117 | let new = self.lru.peek(&entry).is_none(); 118 | let evicted = if new && self.lru.limiter().max_length() as usize <= self.lru.len() { 119 | self.lru.pop_oldest().map(|(k, ())| k) 120 | } else { 121 | None 122 | }; 123 | self.lru.get_or_insert(entry, || ()); 124 | evicted 125 | } 126 | 127 | /// Removes the given block from the buffer and also all the children of the block. 128 | /// 129 | /// This is used to get all the blocks that are dependent on the block that is included. 130 | /// 131 | /// Note: that order of returned blocks is important and the blocks with lower block number 132 | /// in the chain will come first so that they can be executed in the correct order. 133 | pub fn remove_block_with_children( 134 | &mut self, 135 | parent_hash: BlockHash, 136 | ) -> Vec<(RecoveredBlock, ExecutionWitness)> { 137 | let removed = self 138 | .remove_block(&parent_hash) 139 | .into_iter() 140 | .chain(self.remove_children(Vec::from([parent_hash]))) 141 | .collect(); 142 | self.metrics.blocks.set(self.blocks.len() as f64); 143 | self.metrics.witnesses.set(self.witnesses.len() as f64); 144 | removed 145 | } 146 | 147 | /// Discard all blocks that precede block number from the buffer. 148 | pub fn evict_old_blocks(&mut self, block_number: BlockNumber) { 149 | let mut block_hashes_to_remove = Vec::new(); 150 | 151 | // discard all blocks that are before the finalized number. 152 | while let Some(entry) = self.earliest_blocks.first_entry() { 153 | if *entry.key() > block_number { 154 | break 155 | } 156 | let block_hashes = entry.remove(); 157 | block_hashes_to_remove.extend(block_hashes); 158 | } 159 | 160 | // remove from other collections. 161 | for block_hash in &block_hashes_to_remove { 162 | // It's fine to call 163 | self.evict_block(block_hash); 164 | } 165 | 166 | self.evict_children(block_hashes_to_remove); 167 | self.metrics.blocks.set(self.blocks.len() as f64); 168 | self.metrics.witnesses.set(self.witnesses.len() as f64); 169 | } 170 | 171 | /// Remove block entry 172 | fn remove_from_earliest_blocks(&mut self, number: BlockNumber, hash: &BlockHash) { 173 | if let Some(entry) = self.earliest_blocks.get_mut(&number) { 174 | entry.remove(hash); 175 | if entry.is_empty() { 176 | self.earliest_blocks.remove(&number); 177 | } 178 | } 179 | } 180 | 181 | /// Remove from parent child connection. This method does not remove children. 182 | fn remove_from_parent(&mut self, parent_hash: BlockHash, hash: &BlockHash) { 183 | // remove from parent to child connection, but only for this block parent. 184 | if let Some(entry) = self.parent_to_child.get_mut(&parent_hash) { 185 | entry.remove(hash); 186 | // if set is empty remove block entry. 187 | if entry.is_empty() { 188 | self.parent_to_child.remove(&parent_hash); 189 | } 190 | } 191 | } 192 | 193 | /// Removes block from inner collections. 194 | /// This method will only remove the block if it's present inside `self.blocks`. 195 | /// The block might be missing from other collections, the method will only ensure that it has 196 | /// been removed. 197 | pub fn remove_block( 198 | &mut self, 199 | hash: &BlockHash, 200 | ) -> Option<(RecoveredBlock, ExecutionWitness)> { 201 | if !self.blocks.contains_key(hash) { 202 | return None 203 | } 204 | let witness = self.remove_witness(hash)?; 205 | let block = self.blocks.remove(hash).unwrap(); 206 | self.remove_from_earliest_blocks(block.number(), hash); 207 | self.remove_from_parent(block.parent_hash(), hash); 208 | self.lru.remove(hash); 209 | Some((block, witness)) 210 | } 211 | 212 | /// Evicts the block from inner collections. 213 | /// This method will only remove the block if it's present inside `self.blocks`. 214 | fn evict_block(&mut self, hash: &BlockHash) -> Option> { 215 | let block = self.blocks.remove(hash)?; 216 | self.witnesses.remove(hash); 217 | self.missing_bytecodes.remove(hash); 218 | self.remove_from_earliest_blocks(block.number(), hash); 219 | self.remove_from_parent(block.parent_hash(), hash); 220 | self.lru.remove(hash); 221 | Some(block) 222 | } 223 | 224 | /// Remove all children and their descendants for the given blocks and return them. 225 | fn remove_children( 226 | &mut self, 227 | parent_hashes: Vec, 228 | ) -> Vec<(RecoveredBlock, ExecutionWitness)> { 229 | // remove all parent child connection and all the child children blocks that are connected 230 | // to the discarded parent blocks. 231 | let mut remove_parent_children = parent_hashes; 232 | let mut removed_blocks = Vec::new(); 233 | while let Some(parent_hash) = remove_parent_children.pop() { 234 | // get this child blocks children and add them to the remove list. 235 | if let Some(parent_children) = self.parent_to_child.remove(&parent_hash) { 236 | // remove child from buffer 237 | for child_hash in &parent_children { 238 | if let Some((block, witness)) = self.remove_block(child_hash) { 239 | removed_blocks.push((block, witness)); 240 | } 241 | } 242 | remove_parent_children.extend(parent_children); 243 | } 244 | } 245 | removed_blocks 246 | } 247 | 248 | /// Remove all children and their descendants for the given blocks and return them. 249 | fn evict_children(&mut self, parent_hashes: Vec) { 250 | // remove all parent child connection and all the child children blocks that are connected 251 | // to the discarded parent blocks. 252 | let mut remove_parent_children = parent_hashes; 253 | while let Some(parent_hash) = remove_parent_children.pop() { 254 | // get this child blocks children and add them to the remove list. 255 | if let Some(parent_children) = self.parent_to_child.remove(&parent_hash) { 256 | // remove child from buffer 257 | for child_hash in &parent_children { 258 | self.evict_block(child_hash); 259 | } 260 | remove_parent_children.extend(parent_children); 261 | } 262 | } 263 | } 264 | 265 | /// Remove witness from the buffer. 266 | pub fn remove_witness(&mut self, block_hash: &BlockHash) -> Option { 267 | // Remove the witness only if there are no missing bytecodes for it. 268 | if self.missing_bytecodes.get(block_hash).is_none_or(|b| b.is_empty()) { 269 | self.missing_bytecodes.remove(block_hash); 270 | return self.witnesses.remove(block_hash) 271 | } 272 | None 273 | } 274 | 275 | /// Update missing bytecodes on bytecode received. 276 | /// Returns block hashes that are ready for insertion. 277 | pub fn on_bytecode_received(&mut self, code_hash: B256) -> B256HashSet { 278 | let mut block_hashes = B256HashSet::default(); 279 | self.missing_bytecodes.retain(|block_hash, missing| { 280 | missing.remove(&code_hash); 281 | if missing.is_empty() { 282 | block_hashes.insert(*block_hash); 283 | false 284 | } else { 285 | true 286 | } 287 | }); 288 | block_hashes 289 | } 290 | } 291 | 292 | #[cfg(test)] 293 | mod tests { 294 | use super::*; 295 | use alloy_eips::BlockNumHash; 296 | use alloy_primitives::BlockHash; 297 | use reth_primitives_traits::RecoveredBlock; 298 | use reth_testing_utils::generators::{self, random_block, BlockParams, Rng}; 299 | use std::collections::HashMap; 300 | 301 | /// Create random block with specified number and parent hash. 302 | fn create_block( 303 | rng: &mut R, 304 | number: u64, 305 | parent: BlockHash, 306 | ) -> RecoveredBlock { 307 | let block = 308 | random_block(rng, number, BlockParams { parent: Some(parent), ..Default::default() }); 309 | block.try_recover().unwrap() 310 | } 311 | 312 | /// Insert block with default witness. 313 | fn insert_block_with_witness(buffer: &mut BlockBuffer, block: RecoveredBlock) { 314 | buffer.insert_witness(block.hash(), Default::default(), Default::default()); 315 | buffer.insert_block(block); 316 | } 317 | 318 | /// Assert that all buffer collections have the same data length. 319 | fn assert_buffer_lengths(buffer: &BlockBuffer, expected: usize) { 320 | assert_eq!(buffer.blocks.len(), expected); 321 | assert_eq!(buffer.lru.len(), expected); 322 | assert_eq!( 323 | buffer.parent_to_child.iter().fold(0, |acc, (_, hashes)| acc + hashes.len()), 324 | expected 325 | ); 326 | assert_eq!( 327 | buffer.earliest_blocks.iter().fold(0, |acc, (_, hashes)| acc + hashes.len()), 328 | expected 329 | ); 330 | } 331 | 332 | /// Assert that the block was removed from all buffer collections. 333 | fn assert_block_removal( 334 | buffer: &BlockBuffer, 335 | block: &RecoveredBlock, 336 | ) { 337 | assert!(!buffer.blocks.contains_key(&block.hash())); 338 | assert!(buffer 339 | .parent_to_child 340 | .get(&block.parent_hash) 341 | .and_then(|p| p.get(&block.hash())) 342 | .is_none()); 343 | assert!(buffer 344 | .earliest_blocks 345 | .get(&block.number) 346 | .and_then(|hashes| hashes.get(&block.hash())) 347 | .is_none()); 348 | } 349 | 350 | #[test] 351 | fn simple_insertion() { 352 | let mut rng = generators::rng(); 353 | let parent = rng.random(); 354 | let block1 = create_block(&mut rng, 10, parent); 355 | let mut buffer = BlockBuffer::new(3); 356 | 357 | buffer.insert_block(block1.clone()); 358 | assert_buffer_lengths(&buffer, 1); 359 | assert_eq!(buffer.blocks.get(&block1.hash()), Some(&block1)); 360 | } 361 | 362 | #[test] 363 | fn take_entire_chain_of_children() { 364 | let mut rng = generators::rng(); 365 | 366 | let main_parent_hash = rng.random(); 367 | let block1 = create_block(&mut rng, 10, main_parent_hash); 368 | let block2 = create_block(&mut rng, 11, block1.hash()); 369 | let block3 = create_block(&mut rng, 12, block2.hash()); 370 | let parent4 = rng.random(); 371 | let block4 = create_block(&mut rng, 14, parent4); 372 | 373 | let mut buffer = BlockBuffer::new(5); 374 | 375 | insert_block_with_witness(&mut buffer, block1.clone()); 376 | insert_block_with_witness(&mut buffer, block2.clone()); 377 | insert_block_with_witness(&mut buffer, block3.clone()); 378 | insert_block_with_witness(&mut buffer, block4.clone()); 379 | 380 | assert_buffer_lengths(&buffer, 4); 381 | assert_eq!(buffer.blocks.get(&block4.hash()), Some(&block4)); 382 | assert_eq!(buffer.blocks.get(&block2.hash()), Some(&block2)); 383 | assert_eq!(buffer.blocks.get(&main_parent_hash), None); 384 | 385 | assert_eq!(buffer.lowest_ancestor(&block4.hash()), Some(&block4)); 386 | assert_eq!(buffer.lowest_ancestor(&block3.hash()), Some(&block1)); 387 | assert_eq!(buffer.lowest_ancestor(&block1.hash()), Some(&block1)); 388 | assert_eq!( 389 | buffer.remove_block_with_children(main_parent_hash), 390 | Vec::from_iter([block1, block2, block3].map(|b| (b, Default::default()))) 391 | ); 392 | assert_buffer_lengths(&buffer, 1); 393 | } 394 | 395 | #[test] 396 | fn take_all_multi_level_children() { 397 | let mut rng = generators::rng(); 398 | 399 | let main_parent_hash = rng.random(); 400 | let block1 = create_block(&mut rng, 10, main_parent_hash); 401 | let block2 = create_block(&mut rng, 11, block1.hash()); 402 | let block3 = create_block(&mut rng, 11, block1.hash()); 403 | let block4 = create_block(&mut rng, 12, block2.hash()); 404 | 405 | let mut buffer = BlockBuffer::new(5); 406 | 407 | insert_block_with_witness(&mut buffer, block1.clone()); 408 | insert_block_with_witness(&mut buffer, block2.clone()); 409 | insert_block_with_witness(&mut buffer, block3.clone()); 410 | insert_block_with_witness(&mut buffer, block4.clone()); 411 | 412 | assert_buffer_lengths(&buffer, 4); 413 | assert_eq!( 414 | buffer 415 | .remove_block_with_children(main_parent_hash) 416 | .into_iter() 417 | .map(|(b, _)| (b.hash(), b)) 418 | .collect::>(), 419 | HashMap::from([ 420 | (block1.hash(), block1), 421 | (block2.hash(), block2), 422 | (block3.hash(), block3), 423 | (block4.hash(), block4) 424 | ]) 425 | ); 426 | assert_buffer_lengths(&buffer, 0); 427 | } 428 | 429 | #[test] 430 | fn take_block_with_children() { 431 | let mut rng = generators::rng(); 432 | 433 | let main_parent = BlockNumHash::new(9, rng.random()); 434 | let block1 = create_block(&mut rng, 10, main_parent.hash); 435 | let block2 = create_block(&mut rng, 11, block1.hash()); 436 | let block3 = create_block(&mut rng, 11, block1.hash()); 437 | let block4 = create_block(&mut rng, 12, block2.hash()); 438 | 439 | let mut buffer = BlockBuffer::new(5); 440 | 441 | insert_block_with_witness(&mut buffer, block1.clone()); 442 | insert_block_with_witness(&mut buffer, block2.clone()); 443 | insert_block_with_witness(&mut buffer, block3.clone()); 444 | insert_block_with_witness(&mut buffer, block4.clone()); 445 | 446 | assert_buffer_lengths(&buffer, 4); 447 | assert_eq!( 448 | buffer 449 | .remove_block_with_children(block1.hash()) 450 | .into_iter() 451 | .map(|(b, _)| (b.hash(), b)) 452 | .collect::>(), 453 | HashMap::from([ 454 | (block1.hash(), block1), 455 | (block2.hash(), block2), 456 | (block3.hash(), block3), 457 | (block4.hash(), block4) 458 | ]) 459 | ); 460 | assert_buffer_lengths(&buffer, 0); 461 | } 462 | 463 | #[test] 464 | fn remove_chain_of_children() { 465 | let mut rng = generators::rng(); 466 | 467 | let main_parent = BlockNumHash::new(9, rng.random()); 468 | let block1 = create_block(&mut rng, 10, main_parent.hash); 469 | let block2 = create_block(&mut rng, 11, block1.hash()); 470 | let block3 = create_block(&mut rng, 12, block2.hash()); 471 | let parent4 = rng.random(); 472 | let block4 = create_block(&mut rng, 14, parent4); 473 | 474 | let mut buffer = BlockBuffer::new(5); 475 | 476 | buffer.insert_block(block1.clone()); 477 | buffer.insert_block(block2); 478 | buffer.insert_block(block3); 479 | buffer.insert_block(block4); 480 | 481 | assert_buffer_lengths(&buffer, 4); 482 | buffer.evict_old_blocks(block1.number); 483 | assert_buffer_lengths(&buffer, 1); 484 | } 485 | 486 | #[test] 487 | fn remove_all_multi_level_children() { 488 | let mut rng = generators::rng(); 489 | 490 | let main_parent = BlockNumHash::new(9, rng.random()); 491 | let block1 = create_block(&mut rng, 10, main_parent.hash); 492 | let block2 = create_block(&mut rng, 11, block1.hash()); 493 | let block3 = create_block(&mut rng, 11, block1.hash()); 494 | let block4 = create_block(&mut rng, 12, block2.hash()); 495 | 496 | let mut buffer = BlockBuffer::new(5); 497 | 498 | buffer.insert_block(block1.clone()); 499 | buffer.insert_block(block2); 500 | buffer.insert_block(block3); 501 | buffer.insert_block(block4); 502 | 503 | assert_buffer_lengths(&buffer, 4); 504 | buffer.evict_old_blocks(block1.number); 505 | assert_buffer_lengths(&buffer, 0); 506 | } 507 | 508 | #[test] 509 | fn remove_multi_chains() { 510 | let mut rng = generators::rng(); 511 | 512 | let main_parent = BlockNumHash::new(9, rng.random()); 513 | let block1 = create_block(&mut rng, 10, main_parent.hash); 514 | let block1a = create_block(&mut rng, 10, main_parent.hash); 515 | let block2 = create_block(&mut rng, 11, block1.hash()); 516 | let block2a = create_block(&mut rng, 11, block1.hash()); 517 | let random_parent1 = rng.random(); 518 | let random_block1 = create_block(&mut rng, 10, random_parent1); 519 | let random_parent2 = rng.random(); 520 | let random_block2 = create_block(&mut rng, 11, random_parent2); 521 | let random_parent3 = rng.random(); 522 | let random_block3 = create_block(&mut rng, 12, random_parent3); 523 | 524 | let mut buffer = BlockBuffer::new(10); 525 | 526 | buffer.insert_block(block1.clone()); 527 | buffer.insert_block(block1a.clone()); 528 | buffer.insert_block(block2.clone()); 529 | buffer.insert_block(block2a.clone()); 530 | buffer.insert_block(random_block1.clone()); 531 | buffer.insert_block(random_block2.clone()); 532 | buffer.insert_block(random_block3.clone()); 533 | 534 | // check that random blocks are their own ancestor, and that chains have proper ancestors 535 | assert_eq!(buffer.lowest_ancestor(&random_block1.hash()), Some(&random_block1)); 536 | assert_eq!(buffer.lowest_ancestor(&random_block2.hash()), Some(&random_block2)); 537 | assert_eq!(buffer.lowest_ancestor(&random_block3.hash()), Some(&random_block3)); 538 | 539 | // descendants have ancestors 540 | assert_eq!(buffer.lowest_ancestor(&block2a.hash()), Some(&block1)); 541 | assert_eq!(buffer.lowest_ancestor(&block2.hash()), Some(&block1)); 542 | 543 | // roots are themselves 544 | assert_eq!(buffer.lowest_ancestor(&block1a.hash()), Some(&block1a)); 545 | assert_eq!(buffer.lowest_ancestor(&block1.hash()), Some(&block1)); 546 | 547 | assert_buffer_lengths(&buffer, 7); 548 | buffer.evict_old_blocks(10); 549 | assert_buffer_lengths(&buffer, 2); 550 | } 551 | 552 | #[test] 553 | fn evict_with_gap() { 554 | let mut rng = generators::rng(); 555 | 556 | let main_parent = BlockNumHash::new(9, rng.random()); 557 | let block1 = create_block(&mut rng, 10, main_parent.hash); 558 | let block2 = create_block(&mut rng, 11, block1.hash()); 559 | let block3 = create_block(&mut rng, 12, block2.hash()); 560 | let parent4 = rng.random(); 561 | let block4 = create_block(&mut rng, 13, parent4); 562 | 563 | let mut buffer = BlockBuffer::new(3); 564 | 565 | buffer.insert_block(block1.clone()); 566 | buffer.insert_block(block2.clone()); 567 | buffer.insert_block(block3.clone()); 568 | 569 | // pre-eviction block1 is the root 570 | assert_eq!(buffer.lowest_ancestor(&block3.hash()), Some(&block1)); 571 | assert_eq!(buffer.lowest_ancestor(&block2.hash()), Some(&block1)); 572 | assert_eq!(buffer.lowest_ancestor(&block1.hash()), Some(&block1)); 573 | 574 | buffer.insert_block(block4.clone()); 575 | 576 | assert_eq!(buffer.lowest_ancestor(&block4.hash()), Some(&block4)); 577 | 578 | // block1 gets evicted 579 | assert_block_removal(&buffer, &block1); 580 | 581 | // check lowest ancestor results post eviction 582 | assert_eq!(buffer.lowest_ancestor(&block3.hash()), Some(&block2)); 583 | assert_eq!(buffer.lowest_ancestor(&block2.hash()), Some(&block2)); 584 | assert_eq!(buffer.lowest_ancestor(&block1.hash()), None); 585 | 586 | assert_buffer_lengths(&buffer, 3); 587 | } 588 | 589 | #[test] 590 | fn simple_eviction() { 591 | let mut rng = generators::rng(); 592 | 593 | let main_parent = BlockNumHash::new(9, rng.random()); 594 | let block1 = create_block(&mut rng, 10, main_parent.hash); 595 | let block2 = create_block(&mut rng, 11, block1.hash()); 596 | let block3 = create_block(&mut rng, 12, block2.hash()); 597 | let parent4 = rng.random(); 598 | let block4 = create_block(&mut rng, 13, parent4); 599 | 600 | let mut buffer = BlockBuffer::new(3); 601 | 602 | buffer.insert_block(block1.clone()); 603 | buffer.insert_block(block2); 604 | buffer.insert_block(block3); 605 | buffer.insert_block(block4); 606 | 607 | // block3 gets evicted 608 | assert_block_removal(&buffer, &block1); 609 | 610 | assert_buffer_lengths(&buffer, 3); 611 | } 612 | } 613 | -------------------------------------------------------------------------------- /crates/engine/src/tree/outcome.rs: -------------------------------------------------------------------------------- 1 | use alloy_primitives::B256; 2 | 3 | /// The outcome of a tree operation. 4 | #[derive(Debug)] 5 | pub struct TreeOutcome { 6 | /// The outcome of the operation. 7 | pub outcome: T, 8 | /// An optional event to tell the caller to do something. 9 | pub event: Option, 10 | } 11 | 12 | impl TreeOutcome { 13 | /// Create new tree outcome. 14 | pub const fn new(outcome: T) -> Self { 15 | Self { outcome, event: None } 16 | } 17 | 18 | /// Set event on the outcome. 19 | pub fn with_event(mut self, event: TreeEvent) -> Self { 20 | self.event = Some(event); 21 | self 22 | } 23 | } 24 | 25 | /// Events that are triggered by Tree Chain 26 | #[derive(Clone, Debug)] 27 | pub enum TreeEvent { 28 | /// Tree action is needed. 29 | TreeAction(TreeAction), 30 | /// Data needs to be downloaded. 31 | Download(DownloadRequest), 32 | } 33 | 34 | impl TreeEvent { 35 | /// Create download witness tree event. 36 | pub fn download_witness(block_hash: B256) -> Self { 37 | Self::Download(DownloadRequest::Witness { block_hash }) 38 | } 39 | 40 | /// Crate download block tree event. 41 | pub fn download_block(block_hash: B256) -> Self { 42 | Self::Download(DownloadRequest::Block { block_hash }) 43 | } 44 | 45 | /// Crate download finalized tree event. 46 | pub fn download_finalized(block_hash: B256) -> Self { 47 | Self::Download(DownloadRequest::Finalized { block_hash }) 48 | } 49 | 50 | /// Crate make canonical tree event. 51 | pub fn make_canonical(sync_target_head: B256) -> Self { 52 | Self::TreeAction(TreeAction::MakeCanonical { sync_target_head }) 53 | } 54 | 55 | /// Return witness download target hash if event is [`DownloadRequest::Witness`] of 56 | /// [`TreeEvent::Download`] variant. 57 | pub fn as_witness_download(&self) -> Option { 58 | if let Self::Download(DownloadRequest::Witness { block_hash }) = self { 59 | Some(*block_hash) 60 | } else { 61 | None 62 | } 63 | } 64 | } 65 | 66 | /// The actions that can be performed on the tree. 67 | #[derive(Clone, Debug)] 68 | pub enum TreeAction { 69 | /// Make target canonical. 70 | MakeCanonical { 71 | /// The sync target head hash 72 | sync_target_head: B256, 73 | }, 74 | } 75 | 76 | /// The download request. 77 | #[derive(Clone, Debug)] 78 | pub enum DownloadRequest { 79 | /// Download block. 80 | Block { 81 | /// Target block hash. 82 | block_hash: B256, 83 | }, 84 | /// Download witness. 85 | Witness { 86 | /// Target block hash. 87 | block_hash: B256, 88 | }, 89 | /// Download finalized block with ancestors. 90 | Finalized { 91 | /// Target block hash. 92 | block_hash: B256, 93 | }, 94 | } 95 | -------------------------------------------------------------------------------- /crates/engine/src/tree/root.rs: -------------------------------------------------------------------------------- 1 | use alloy_primitives::B256; 2 | use alloy_rlp::{Decodable, Encodable}; 3 | use itertools::Itertools; 4 | use rayon::prelude::*; 5 | use reth_trie::{ 6 | HashedPostState, Nibbles, TrieAccount, EMPTY_ROOT_HASH, TRIE_ACCOUNT_RLP_MAX_SIZE, 7 | }; 8 | use reth_trie_sparse::{errors::SparseStateTrieResult, SparseStateTrie, SparseTrie}; 9 | use std::sync::mpsc; 10 | 11 | /// Compute the state root given a revealed sparse trie and hashed state update. 12 | pub fn calculate_state_root( 13 | trie: &mut SparseStateTrie, 14 | state: HashedPostState, 15 | ) -> SparseStateTrieResult { 16 | // Update storage slots with new values and calculate storage roots. 17 | let (storage_tx, storage_rx) = mpsc::channel(); 18 | 19 | state 20 | .storages 21 | .into_iter() 22 | .map(|(address, storage)| (address, storage, trie.take_storage_trie(&address))) 23 | .par_bridge() 24 | .map(|(address, storage, storage_trie)| { 25 | let mut storage_trie = storage_trie.unwrap_or_else(SparseTrie::revealed_empty); 26 | 27 | if storage.wiped { 28 | storage_trie.wipe()?; 29 | } 30 | for (hashed_slot, value) in 31 | storage.storage.into_iter().sorted_unstable_by_key(|(hashed_slot, _)| *hashed_slot) 32 | { 33 | let nibbles = Nibbles::unpack(hashed_slot); 34 | if value.is_zero() { 35 | storage_trie.remove_leaf(&nibbles)?; 36 | } else { 37 | storage_trie 38 | .update_leaf(nibbles, alloy_rlp::encode_fixed_size(&value).to_vec())?; 39 | } 40 | } 41 | 42 | storage_trie.root(); 43 | 44 | SparseStateTrieResult::Ok((address, storage_trie)) 45 | }) 46 | .for_each_init( 47 | || storage_tx.clone(), 48 | |storage_tx, result| storage_tx.send(result).unwrap(), 49 | ); 50 | drop(storage_tx); 51 | for result in storage_rx { 52 | let (address, storage_trie) = result?; 53 | trie.insert_storage_trie(address, storage_trie); 54 | } 55 | 56 | // Update accounts with new values 57 | // TODO: upstream changes into reth so that `SparseStateTrie::update_account` handles this 58 | let mut account_rlp_buf = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); 59 | for (hashed_address, account) in 60 | state.accounts.into_iter().sorted_unstable_by_key(|(hashed_address, _)| *hashed_address) 61 | { 62 | let nibbles = Nibbles::unpack(hashed_address); 63 | let account = account.unwrap_or_default(); 64 | let storage_root = if let Some(storage_trie) = trie.storage_trie_mut(&hashed_address) { 65 | storage_trie.root() 66 | } else if let Some(value) = trie.get_account_value(&hashed_address) { 67 | TrieAccount::decode(&mut &value[..])?.storage_root 68 | } else { 69 | EMPTY_ROOT_HASH 70 | }; 71 | 72 | if account.is_empty() && storage_root == EMPTY_ROOT_HASH { 73 | trie.remove_account_leaf(&nibbles)?; 74 | } else { 75 | account_rlp_buf.clear(); 76 | account.into_trie_account(storage_root).encode(&mut account_rlp_buf); 77 | trie.update_account_leaf(nibbles, account_rlp_buf.clone())?; 78 | } 79 | } 80 | 81 | trie.root() 82 | } 83 | -------------------------------------------------------------------------------- /crates/evm/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ress-evm" 3 | version.workspace = true 4 | edition.workspace = true 5 | rust-version.workspace = true 6 | license.workspace = true 7 | homepage.workspace = true 8 | repository.workspace = true 9 | 10 | [lints] 11 | workspace = true 12 | 13 | [dependencies] 14 | # ress 15 | ress-provider.workspace = true 16 | 17 | # alloy 18 | alloy-primitives.workspace = true 19 | alloy-rlp.workspace = true 20 | alloy-trie.workspace = true 21 | alloy-eips.workspace = true 22 | 23 | # reth 24 | reth-primitives.workspace = true 25 | reth-provider.workspace = true 26 | reth-evm.workspace = true 27 | reth-evm-ethereum.workspace = true 28 | reth-revm.workspace = true 29 | reth-trie-sparse.workspace = true 30 | 31 | # misc 32 | tracing.workspace = true 33 | -------------------------------------------------------------------------------- /crates/evm/src/db.rs: -------------------------------------------------------------------------------- 1 | //! EVM database implementation. 2 | 3 | use alloy_eips::BlockNumHash; 4 | use alloy_primitives::{keccak256, Address, B256, U256}; 5 | use alloy_rlp::Decodable; 6 | use alloy_trie::TrieAccount; 7 | use ress_provider::RessProvider; 8 | use reth_provider::ProviderError; 9 | use reth_revm::{bytecode::Bytecode, state::AccountInfo, Database}; 10 | use reth_trie_sparse::SparseStateTrie; 11 | use tracing::trace; 12 | 13 | /// EVM database implementation that uses a [`SparseStateTrie`] for account and storage data 14 | /// retrieval. Block hashes and bytecodes are retrieved from the [`RessProvider`]. 15 | #[derive(Debug)] 16 | pub struct WitnessDatabase<'a> { 17 | provider: RessProvider, 18 | parent: BlockNumHash, 19 | trie: &'a SparseStateTrie, 20 | } 21 | 22 | impl<'a> WitnessDatabase<'a> { 23 | /// Create new witness database. 24 | pub fn new(provider: RessProvider, parent: BlockNumHash, trie: &'a SparseStateTrie) -> Self { 25 | Self { provider, parent, trie } 26 | } 27 | } 28 | 29 | impl Database for WitnessDatabase<'_> { 30 | /// The database error type. 31 | type Error = ProviderError; 32 | 33 | /// Get basic account information. 34 | fn basic(&mut self, address: Address) -> Result, Self::Error> { 35 | let hashed_address = keccak256(address); 36 | trace!(target: "ress::evm", %address, %hashed_address, "retrieving account"); 37 | let Some(bytes) = self.trie.get_account_value(&hashed_address) else { 38 | trace!(target: "ress::evm", %address, %hashed_address, "no account found"); 39 | return Ok(None) 40 | }; 41 | let account = TrieAccount::decode(&mut bytes.as_slice())?; 42 | let account_info = AccountInfo { 43 | balance: account.balance, 44 | nonce: account.nonce, 45 | code_hash: account.code_hash, 46 | code: None, 47 | }; 48 | trace!(target: "ress::evm", %address, %hashed_address, ?account_info, "account retrieved"); 49 | Ok(Some(account_info)) 50 | } 51 | 52 | /// Get storage value of address at slot. 53 | fn storage(&mut self, address: Address, slot: U256) -> Result { 54 | let slot = B256::from(slot); 55 | let hashed_address = keccak256(address); 56 | let hashed_slot = keccak256(slot); 57 | trace!(target: "ress::evm", %address, %hashed_address, %slot, %hashed_slot, "retrieving storage slot"); 58 | let value = match self.trie.get_storage_slot_value(&hashed_address, &hashed_slot) { 59 | Some(value) => U256::decode(&mut value.as_slice())?, 60 | None => U256::ZERO, 61 | }; 62 | trace!(target: "ress::evm", %address, %hashed_address, %slot, %hashed_slot, %value, "storage slot retrieved"); 63 | Ok(value) 64 | } 65 | 66 | /// Get account code by its hash. 67 | fn code_by_hash(&mut self, code_hash: B256) -> Result { 68 | trace!(target: "ress::evm", %code_hash, "retrieving bytecode"); 69 | let bytecode = self.provider.get_bytecode(code_hash)?.ok_or_else(|| { 70 | ProviderError::TrieWitnessError(format!("bytecode for {code_hash} not found")) 71 | })?; 72 | Ok(bytecode.0) 73 | } 74 | 75 | /// Get block hash by block number. 76 | fn block_hash(&mut self, block_number: u64) -> Result { 77 | trace!(target: "ress::evm", block_number, parent = ?self.parent, "retrieving block hash"); 78 | self.provider 79 | .block_hash(self.parent, block_number) 80 | .ok_or(ProviderError::StateForNumberNotFound(block_number)) 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /crates/evm/src/executor.rs: -------------------------------------------------------------------------------- 1 | //! EVM block executor implementation. 2 | 3 | use alloy_eips::BlockNumHash; 4 | use ress_provider::RessProvider; 5 | use reth_evm::{ 6 | execute::{BlockExecutionError, BlockExecutor as _}, 7 | ConfigureEvm, 8 | }; 9 | use reth_evm_ethereum::EthEvmConfig; 10 | use reth_primitives::{Block, Receipt, RecoveredBlock}; 11 | use reth_provider::BlockExecutionOutput; 12 | use reth_revm::db::{states::bundle_state::BundleRetention, State}; 13 | use reth_trie_sparse::SparseStateTrie; 14 | 15 | use crate::db::WitnessDatabase; 16 | 17 | /// An evm block executor that uses a reth's block executor to execute blocks by 18 | /// using state from [`SparseStateTrie`]. 19 | #[allow(missing_debug_implementations)] 20 | pub struct BlockExecutor<'a> { 21 | evm_config: EthEvmConfig, 22 | state: State>, 23 | } 24 | 25 | impl<'a> BlockExecutor<'a> { 26 | /// Instantiate new block executor with chain spec and witness database. 27 | pub fn new(provider: RessProvider, parent: BlockNumHash, trie: &'a SparseStateTrie) -> Self { 28 | let evm_config = EthEvmConfig::new(provider.chain_spec()); 29 | let db = WitnessDatabase::new(provider, parent, trie); 30 | let state = 31 | State::builder().with_database(db).with_bundle_update().without_state_clear().build(); 32 | Self { evm_config, state } 33 | } 34 | 35 | /// Execute a block. 36 | pub fn execute( 37 | mut self, 38 | block: &RecoveredBlock, 39 | ) -> Result, BlockExecutionError> { 40 | let mut strategy = self.evm_config.executor_for_block(&mut self.state, block); 41 | strategy.apply_pre_execution_changes()?; 42 | for tx in block.transactions_recovered() { 43 | strategy.execute_transaction(tx)?; 44 | } 45 | let result = strategy.apply_post_execution_changes()?; 46 | self.state.merge_transitions(BundleRetention::PlainState); 47 | Ok(BlockExecutionOutput { state: self.state.take_bundle(), result }) 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /crates/evm/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Ress evm implementation. 2 | 3 | #![cfg_attr(not(test), warn(unused_crate_dependencies))] 4 | 5 | mod db; 6 | pub use db::WitnessDatabase; 7 | 8 | mod executor; 9 | pub use executor::BlockExecutor; 10 | -------------------------------------------------------------------------------- /crates/network/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ress-network" 3 | version.workspace = true 4 | edition.workspace = true 5 | rust-version.workspace = true 6 | license.workspace = true 7 | homepage.workspace = true 8 | repository.workspace = true 9 | 10 | [lints] 11 | workspace = true 12 | 13 | [dependencies] 14 | # reth 15 | reth-ress-protocol.workspace = true 16 | reth-primitives.workspace = true 17 | reth-network-api.workspace = true 18 | reth-network.workspace = true 19 | 20 | # alloy 21 | alloy-primitives.workspace = true 22 | 23 | # misc 24 | tokio.workspace = true 25 | tokio-stream.workspace = true 26 | futures.workspace = true 27 | tracing.workspace = true 28 | thiserror.workspace = true 29 | -------------------------------------------------------------------------------- /crates/network/src/handle.rs: -------------------------------------------------------------------------------- 1 | use alloy_primitives::{Bytes, B256}; 2 | use reth_network::NetworkHandle; 3 | use reth_primitives::{BlockBody, Header}; 4 | use reth_ress_protocol::{GetHeaders, RessPeerRequest}; 5 | use thiserror::Error; 6 | use tokio::sync::{mpsc, oneshot}; 7 | use tracing::trace; 8 | 9 | /// Ress networking handle. 10 | #[derive(Clone, Debug)] 11 | pub struct RessNetworkHandle { 12 | /// Handle for interacting with the network. 13 | network_handle: NetworkHandle, 14 | /// Sender for forwarding peer requests. 15 | peer_requests_sender: mpsc::UnboundedSender, 16 | } 17 | 18 | impl RessNetworkHandle { 19 | /// Create new network handle from reth's handle and peer connection. 20 | pub fn new( 21 | network_handle: NetworkHandle, 22 | peer_requests_sender: mpsc::UnboundedSender, 23 | ) -> Self { 24 | Self { network_handle, peer_requests_sender } 25 | } 26 | 27 | /// Return reference to reth's network handle. 28 | pub fn inner(&self) -> &NetworkHandle { 29 | &self.network_handle 30 | } 31 | 32 | fn send_request(&self, request: RessPeerRequest) -> Result<(), PeerRequestError> { 33 | self.peer_requests_sender.send(request).map_err(|_| PeerRequestError::ConnectionClosed) 34 | } 35 | } 36 | 37 | impl RessNetworkHandle { 38 | /// Get block headers. 39 | pub async fn fetch_headers( 40 | &self, 41 | request: GetHeaders, 42 | ) -> Result, PeerRequestError> { 43 | trace!(target: "ress::net", ?request, "requesting header"); 44 | let (tx, rx) = oneshot::channel(); 45 | self.send_request(RessPeerRequest::GetHeaders { request, tx })?; 46 | let response = rx.await.map_err(|_| PeerRequestError::RequestDropped)?; 47 | trace!(target: "ress::net", ?request, "headers received"); 48 | Ok(response) 49 | } 50 | 51 | /// Get block bodies. 52 | pub async fn fetch_block_bodies( 53 | &self, 54 | request: Vec, 55 | ) -> Result, PeerRequestError> { 56 | trace!(target: "ress::net", ?request, "requesting block bodies"); 57 | let (tx, rx) = oneshot::channel(); 58 | self.send_request(RessPeerRequest::GetBlockBodies { request: request.clone(), tx })?; 59 | let response = rx.await.map_err(|_| PeerRequestError::RequestDropped)?; 60 | trace!(target: "ress::net", ?request, "block bodies received"); 61 | Ok(response) 62 | } 63 | 64 | /// Get contract bytecode by code hash. 65 | pub async fn fetch_bytecode(&self, code_hash: B256) -> Result { 66 | trace!(target: "ress::net", %code_hash, "requesting bytecode"); 67 | let (tx, rx) = oneshot::channel(); 68 | self.send_request(RessPeerRequest::GetBytecode { code_hash, tx })?; 69 | let response = rx.await.map_err(|_| PeerRequestError::RequestDropped)?; 70 | trace!(target: "ress::net", %code_hash, "bytecode received"); 71 | Ok(response) 72 | } 73 | 74 | /// Get StateWitness from block hash 75 | pub async fn fetch_witness(&self, block_hash: B256) -> Result, PeerRequestError> { 76 | trace!(target: "ress::net", %block_hash, "requesting witness"); 77 | let (tx, rx) = oneshot::channel(); 78 | self.send_request(RessPeerRequest::GetWitness { block_hash, tx })?; 79 | let response = rx.await.map_err(|_| PeerRequestError::RequestDropped)?; 80 | trace!(target: "ress::net", %block_hash, "witness received"); 81 | Ok(response) 82 | } 83 | } 84 | 85 | /// Peer request errors. 86 | #[derive(Debug, Error)] 87 | pub enum PeerRequestError { 88 | /// Request dropped. 89 | #[error("Peer request dropped")] 90 | RequestDropped, 91 | 92 | /// Connection closed. 93 | #[error("Peer connection was closed")] 94 | ConnectionClosed, 95 | } 96 | -------------------------------------------------------------------------------- /crates/network/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Ress networking implementation. 2 | 3 | #![cfg_attr(not(test), warn(unused_crate_dependencies))] 4 | 5 | mod handle; 6 | pub use handle::*; 7 | 8 | mod manager; 9 | pub use manager::RessNetworkManager; 10 | -------------------------------------------------------------------------------- /crates/network/src/manager.rs: -------------------------------------------------------------------------------- 1 | use futures::StreamExt; 2 | use reth_network_api::PeerId; 3 | use reth_ress_protocol::{ProtocolEvent, RessPeerRequest}; 4 | use std::{ 5 | collections::VecDeque, 6 | future::Future, 7 | pin::Pin, 8 | task::{Context, Poll}, 9 | }; 10 | use tokio::sync::mpsc; 11 | use tokio_stream::wrappers::UnboundedReceiverStream; 12 | use tracing::{debug, trace}; 13 | 14 | /// Peer connection handle. 15 | #[derive(Debug)] 16 | struct ConnectionHandle { 17 | peer_id: PeerId, 18 | to_connection: mpsc::UnboundedSender, 19 | } 20 | 21 | /// Network manager for forwarding requests to peer connections. 22 | #[derive(Debug)] 23 | pub struct RessNetworkManager { 24 | protocol_events: UnboundedReceiverStream, 25 | peer_requests: UnboundedReceiverStream, 26 | connections: VecDeque, 27 | pending_requests: VecDeque, 28 | } 29 | 30 | impl RessNetworkManager { 31 | /// Create new network manager. 32 | pub fn new( 33 | protocol_events: UnboundedReceiverStream, 34 | peer_requests: UnboundedReceiverStream, 35 | ) -> Self { 36 | Self { 37 | protocol_events, 38 | peer_requests, 39 | connections: VecDeque::new(), 40 | pending_requests: VecDeque::new(), 41 | } 42 | } 43 | 44 | fn on_peer_request(&mut self, mut request: RessPeerRequest) { 45 | // Rotate connections for peer requests 46 | while let Some(connection) = self.connections.pop_front() { 47 | trace!(target: "ress::net", peer_id = %connection.peer_id, ?request, "Sending request to peer"); 48 | match connection.to_connection.send(request) { 49 | Ok(()) => { 50 | self.connections.push_back(connection); 51 | return 52 | } 53 | Err(mpsc::error::SendError(request_)) => { 54 | request = request_; 55 | trace!(target: "ress::net", peer_id = %connection.peer_id, ?request, "Failed to send request, connection closed"); 56 | } 57 | } 58 | } 59 | trace!(target: "ress::net", ?request, "No connections are available"); 60 | self.pending_requests.push_back(request); 61 | } 62 | } 63 | 64 | impl Future for RessNetworkManager { 65 | type Output = (); 66 | 67 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 68 | let this = self.get_mut(); 69 | loop { 70 | if !this.connections.is_empty() && !this.pending_requests.is_empty() { 71 | let request = this.pending_requests.pop_front().unwrap(); 72 | this.on_peer_request(request); 73 | continue 74 | } 75 | 76 | if let Poll::Ready(Some(ProtocolEvent::Established { 77 | direction, 78 | peer_id, 79 | to_connection, 80 | })) = this.protocol_events.poll_next_unpin(cx) 81 | { 82 | debug!(target: "ress::net", %peer_id, %direction, "Peer connection established"); 83 | this.connections.push_back(ConnectionHandle { peer_id, to_connection }); 84 | continue 85 | } 86 | 87 | if let Poll::Ready(Some(request)) = this.peer_requests.poll_next_unpin(cx) { 88 | this.on_peer_request(request); 89 | continue 90 | } 91 | 92 | return Poll::Pending 93 | } 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /crates/primitives/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ress-primitives" 3 | version.workspace = true 4 | edition.workspace = true 5 | rust-version.workspace = true 6 | license.workspace = true 7 | homepage.workspace = true 8 | repository.workspace = true 9 | 10 | [lints] 11 | workspace = true 12 | 13 | [dependencies] 14 | alloy-primitives.workspace = true 15 | alloy-rlp.workspace = true 16 | alloy-trie.workspace = true 17 | 18 | # `serde` feature 19 | serde = { workspace = true, optional = true } 20 | 21 | [features] 22 | serde = ["dep:serde"] 23 | -------------------------------------------------------------------------------- /crates/primitives/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Ress primitive types. 2 | 3 | #![cfg_attr(not(test), warn(unused_crate_dependencies))] 4 | 5 | pub mod witness; 6 | pub mod witness_rpc; 7 | -------------------------------------------------------------------------------- /crates/primitives/src/witness.rs: -------------------------------------------------------------------------------- 1 | //! Execution witness type. 2 | 3 | use alloy_primitives::{map::B256HashSet, Bytes}; 4 | use alloy_rlp::Decodable; 5 | use alloy_trie::{nodes::TrieNode, TrieAccount, KECCAK_EMPTY}; 6 | use std::sync::OnceLock; 7 | 8 | /// Alias type representing execution state witness. 9 | /// Execution state witness is a collection of trie node preimages. 10 | pub type StateWitness = Vec; 11 | 12 | /// Execution witness contains all data necessary to execute the block (except for bytecodes). 13 | /// That includes: 14 | /// - state witness - collection of all touched trie nodes which is used for state retrieval and 15 | /// state root computation. 16 | #[derive(PartialEq, Eq, Clone, Debug, Default)] 17 | pub struct ExecutionWitness { 18 | /// The state witness with touched trie nodes. 19 | state_witness: StateWitness, 20 | /// Size of RLP encoded state witness. 21 | rlp_size_bytes: usize, 22 | /// Lazy-loaded bytecode hashes. 23 | bytecode_hashes: OnceLock, 24 | } 25 | 26 | impl ExecutionWitness { 27 | /// Create new [`ExecutionWitness`]. 28 | pub fn new(state_witness: StateWitness, rlp_size_bytes: usize) -> Self { 29 | Self { state_witness, rlp_size_bytes, bytecode_hashes: OnceLock::new() } 30 | } 31 | 32 | /// Returns reference to the state witness. 33 | pub fn state_witness(&self) -> &StateWitness { 34 | &self.state_witness 35 | } 36 | 37 | /// Converts execution witness into state witness. 38 | pub fn into_state_witness(self) -> StateWitness { 39 | self.state_witness 40 | } 41 | 42 | /// Returns the size of RLP encoded state witness in bytes. 43 | pub fn rlp_size_bytes(&self) -> usize { 44 | self.rlp_size_bytes 45 | } 46 | 47 | /// Returns all code hashes found in the witness. 48 | pub fn bytecode_hashes(&self) -> &B256HashSet { 49 | self.bytecode_hashes.get_or_init(|| { 50 | let mut bytecode_hashes = B256HashSet::default(); 51 | for encoded in &self.state_witness { 52 | if let Ok(TrieNode::Leaf(leaf)) = TrieNode::decode(&mut &encoded[..]) { 53 | if let Ok(account) = TrieAccount::decode(&mut &leaf.value[..]) { 54 | if account.code_hash != KECCAK_EMPTY { 55 | bytecode_hashes.insert(account.code_hash); 56 | } 57 | } 58 | } 59 | } 60 | bytecode_hashes 61 | }) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /crates/primitives/src/witness_rpc.rs: -------------------------------------------------------------------------------- 1 | //! Types for the `debug` API. 2 | 3 | use alloy_primitives::{map::B256HashMap, Bytes}; 4 | 5 | /// Represents the execution witness of a block. Contains an optional map of state preimages. 6 | #[derive(Clone, Debug, Default, PartialEq, Eq)] 7 | #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] 8 | pub struct RpcExecutionWitness { 9 | /// Map of all hashed trie nodes to their preimages that were required during the execution of 10 | /// the block, including during state root recomputation. 11 | /// 12 | /// `keccak(rlp(node)) => rlp(node)` 13 | pub state: B256HashMap, 14 | /// Map of all contract codes (created / accessed) to their preimages that were required during 15 | /// the execution of the block, including during state root recomputation. 16 | /// 17 | /// `keccak(bytecodes) => bytecodes` 18 | pub codes: B256HashMap, 19 | } 20 | 21 | impl RpcExecutionWitness { 22 | /// Create new RPC execution witness from state and bytecodes. 23 | pub fn new(state: B256HashMap, codes: B256HashMap) -> Self { 24 | Self { state, codes } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /crates/provider/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ress-provider" 3 | version.workspace = true 4 | edition.workspace = true 5 | rust-version.workspace = true 6 | license.workspace = true 7 | homepage.workspace = true 8 | repository.workspace = true 9 | 10 | [lints] 11 | workspace = true 12 | 13 | [dependencies] 14 | # alloy 15 | alloy-primitives.workspace = true 16 | alloy-eips.workspace = true 17 | 18 | # reth 19 | reth-ress-protocol.workspace = true 20 | reth-primitives.workspace = true 21 | reth-chainspec.workspace = true 22 | reth-storage-errors.workspace = true 23 | reth-db = { workspace = true, features = ["mdbx"] } 24 | reth-db-api.workspace = true 25 | 26 | # misc 27 | parking_lot.workspace = true 28 | eyre.workspace = true 29 | itertools.workspace = true 30 | tracing.workspace = true 31 | metrics.workspace = true 32 | 33 | [dev-dependencies] 34 | derive_more.workspace = true 35 | tempfile = "3.8" -------------------------------------------------------------------------------- /crates/provider/src/chain_state.rs: -------------------------------------------------------------------------------- 1 | use alloy_eips::BlockNumHash; 2 | use alloy_primitives::{ 3 | map::{B256HashMap, B256HashSet}, 4 | BlockHash, BlockNumber, Bytes, B256, 5 | }; 6 | use itertools::Itertools; 7 | use parking_lot::RwLock; 8 | use reth_primitives::{Block, BlockBody, Header, RecoveredBlock, SealedBlock, SealedHeader}; 9 | use std::{ 10 | collections::{btree_map, BTreeMap}, 11 | sync::Arc, 12 | }; 13 | 14 | /// In-memory blockchain tree state. 15 | /// Stores all validated blocks as well as keeps track of the ones 16 | /// that form the canonical chain. 17 | #[derive(Clone, Default, Debug)] 18 | pub struct ChainState(Arc>); 19 | 20 | #[derive(Default, Debug)] 21 | struct ChainStateInner { 22 | /// Canonical block hashes stored by respective block number. 23 | canonical_hashes_by_number: BTreeMap, 24 | /// __All__ validated blocks by block hash that are connected to the canonical chain. 25 | /// 26 | /// This includes blocks for all forks. 27 | blocks_by_hash: B256HashMap>, 28 | /// __All__ block hashes stored by their number. 29 | block_hashes_by_number: BTreeMap, 30 | /// Valid block witnesses by block hash. 31 | witnesses: B256HashMap>, 32 | } 33 | 34 | impl ChainState { 35 | /// Returns `true` if block hash is canonical. 36 | pub fn is_hash_canonical(&self, hash: &BlockHash) -> bool { 37 | self.0.read().canonical_hashes_by_number.values().contains(hash) 38 | } 39 | 40 | /// Returns block hash for a given block number. 41 | /// If no canonical hash is found, traverses parent hashes from the given block hash 42 | /// to find an ancestor at the specified block number. 43 | pub fn block_hash(&self, parent: BlockNumHash, number: BlockNumber) -> Option { 44 | let inner = self.0.read(); 45 | 46 | // First traverse the ancestors and attempt to find the block number in executed blocks. 47 | let mut ancestor = parent; 48 | while let Some(block) = inner.blocks_by_hash.get(&ancestor.hash) { 49 | if block.number == number { 50 | return Some(block.hash()) 51 | } 52 | ancestor = block.parent_num_hash(); 53 | } 54 | 55 | // We exhausted all executed blocks, the target block must be canonical. 56 | if number <= ancestor.number && 57 | inner.canonical_hashes_by_number.get(&ancestor.number) == Some(&ancestor.hash) 58 | { 59 | return inner.canonical_hashes_by_number.get(&number).cloned() 60 | } 61 | 62 | None 63 | } 64 | 65 | /// Inserts canonical hash for block number. 66 | pub fn insert_canonical_hash(&self, number: BlockNumber, hash: BlockHash) { 67 | self.0.write().canonical_hashes_by_number.insert(number, hash); 68 | } 69 | 70 | /// Remove canonical hash for block number if it matches. 71 | pub fn remove_canonical_hash(&self, number: BlockNumber, hash: BlockHash) { 72 | let mut this = self.0.write(); 73 | if let btree_map::Entry::Occupied(entry) = this.canonical_hashes_by_number.entry(number) { 74 | if entry.get() == &hash { 75 | entry.remove(); 76 | } 77 | } 78 | } 79 | 80 | /// Return block number by hash. 81 | pub fn block_number(&self, hash: &B256) -> Option { 82 | self.map_recovered_block(hash, |b| b.number) 83 | } 84 | 85 | /// Returns header by hash. 86 | pub fn header(&self, hash: &BlockHash) -> Option

{ 87 | self.map_recovered_block(hash, RecoveredBlock::clone_header) 88 | } 89 | 90 | /// Returns sealed header by hash. 91 | pub fn sealed_header(&self, hash: &BlockHash) -> Option { 92 | self.map_recovered_block(hash, RecoveredBlock::clone_sealed_header) 93 | } 94 | 95 | /// Returns block body by hash. 96 | pub fn block_body(&self, hash: &BlockHash) -> Option { 97 | self.map_recovered_block(hash, |b| b.body().clone()) 98 | } 99 | 100 | /// Returns sealed block by hash. 101 | pub fn sealed_block(&self, hash: &BlockHash) -> Option { 102 | self.map_recovered_block(hash, RecoveredBlock::clone_sealed_block) 103 | } 104 | 105 | /// Returns recovered block by hash. 106 | pub fn recovered_block(&self, hash: &BlockHash) -> Option> { 107 | self.map_recovered_block(hash, Clone::clone) 108 | } 109 | 110 | /// Returns witness by block hash. 111 | pub fn witness(&self, hash: &BlockHash) -> Option> { 112 | self.0.read().witnesses.get(hash).cloned() 113 | } 114 | 115 | /// Insert recovered block. 116 | pub fn insert_block(&self, block: RecoveredBlock, maybe_witness: Option>) { 117 | let mut this = self.0.write(); 118 | let block_hash = block.hash(); 119 | this.block_hashes_by_number.entry(block.number).or_default().insert(block_hash); 120 | this.blocks_by_hash.insert(block_hash, block); 121 | if let Some(witness) = maybe_witness { 122 | this.witnesses.insert(block_hash, witness); 123 | } 124 | } 125 | 126 | /// Remove all blocks before finalized as well as 127 | /// all canonical block hashes before `finalized.number - 256`. 128 | pub fn remove_blocks_on_finalized(&self, finalized_hash: &B256) { 129 | let mut this = self.0.write(); 130 | if let Some(finalized) = this.blocks_by_hash.get(finalized_hash) { 131 | let finalized_number = finalized.number; 132 | 133 | // Remove blocks before finalized. 134 | while this 135 | .block_hashes_by_number 136 | .first_key_value() 137 | .is_some_and(|(number, _)| number < &finalized_number) 138 | { 139 | let (_, block_hashes) = this.block_hashes_by_number.pop_first().unwrap(); 140 | for block_hash in block_hashes { 141 | this.blocks_by_hash.remove(&block_hash); 142 | this.witnesses.remove(&block_hash); 143 | } 144 | } 145 | 146 | // Remove canonical hashes before `finalized.number - 256`. 147 | let last_block_hash_number = finalized_number.saturating_sub(256); 148 | while this 149 | .canonical_hashes_by_number 150 | .first_key_value() 151 | .is_some_and(|(number, _)| number < &last_block_hash_number) 152 | { 153 | this.canonical_hashes_by_number.pop_first(); 154 | } 155 | } 156 | } 157 | 158 | /// Returns recovered block by hash mapped to desired type. 159 | fn map_recovered_block(&self, hash: &BlockHash, to_type: F) -> Option 160 | where 161 | F: FnOnce(&RecoveredBlock) -> R, 162 | { 163 | self.0.read().blocks_by_hash.get(hash).map(to_type) 164 | } 165 | } 166 | -------------------------------------------------------------------------------- /crates/provider/src/database.rs: -------------------------------------------------------------------------------- 1 | use alloy_primitives::{map::B256HashSet, B256}; 2 | use itertools::Itertools; 3 | use metrics::Label; 4 | use reth_db::{ 5 | create_db, database_metrics::DatabaseMetrics, mdbx::DatabaseArguments, Database, DatabaseEnv, 6 | DatabaseError, 7 | }; 8 | use reth_db_api::{ 9 | cursor::DbCursorRO, 10 | transaction::{DbTx, DbTxMut}, 11 | }; 12 | use reth_primitives::Bytecode; 13 | use std::{path::Path, sync::Arc}; 14 | use tracing::*; 15 | 16 | mod tables { 17 | use alloy_primitives::B256; 18 | use reth_db::{tables, TableSet, TableType, TableViewer}; 19 | use reth_db_api::table::TableInfo; 20 | use reth_primitives::Bytecode; 21 | use std::fmt; 22 | 23 | tables! { 24 | /// Stores all contract bytecodes. 25 | table Bytecodes { 26 | type Key = B256; 27 | type Value = Bytecode; 28 | } 29 | } 30 | } 31 | use tables::{Bytecodes, Tables}; 32 | 33 | /// Ress persisted database for storing bytecodes. 34 | #[derive(Clone, Debug)] 35 | pub struct RessDatabase { 36 | database: Arc, 37 | } 38 | 39 | impl RessDatabase { 40 | /// Create new database at path. 41 | pub fn new>(path: P) -> eyre::Result { 42 | Self::new_with_args( 43 | path, 44 | DatabaseArguments::default() 45 | .with_growth_step(Some(1024 * 1024 * 1024)) 46 | .with_geometry_max_size(Some(64 * 1024 * 1024 * 1024)), 47 | ) 48 | } 49 | 50 | /// Create new database at path with arguments. 51 | pub fn new_with_args>(path: P, args: DatabaseArguments) -> eyre::Result { 52 | let database = create_db(path, args)?; 53 | database.create_tables_for::()?; 54 | Ok(Self { database: Arc::new(database) }) 55 | } 56 | 57 | /// Check if bytecode exists in the database. 58 | /// NOTE: find a better way to check this. 59 | pub fn bytecode_exists(&self, code_hash: B256) -> Result { 60 | Ok(self.get_bytecode(code_hash)?.is_some()) 61 | } 62 | 63 | /// Get bytecode by code hash. 64 | pub fn get_bytecode(&self, code_hash: B256) -> Result, DatabaseError> { 65 | self.database.tx()?.get::(code_hash) 66 | } 67 | 68 | /// Insert bytecode into the database. 69 | pub fn insert_bytecode( 70 | &self, 71 | code_hash: B256, 72 | bytecode: Bytecode, 73 | ) -> Result<(), DatabaseError> { 74 | let tx_mut = self.database.tx_mut()?; 75 | tx_mut.put::(code_hash, bytecode)?; 76 | tx_mut.commit()?; 77 | Ok(()) 78 | } 79 | 80 | /// Filter the collection of code hashes for the ones that are missing from the database. 81 | pub fn missing_code_hashes( 82 | &self, 83 | code_hashes: B256HashSet, 84 | ) -> Result { 85 | let mut missing = B256HashSet::default(); 86 | let tx = self.database.tx()?; 87 | let mut cursor = tx.cursor_read::()?; 88 | for code_hash in code_hashes.into_iter().sorted_unstable() { 89 | if cursor.seek_exact(code_hash)?.is_none() { 90 | missing.insert(code_hash); 91 | } 92 | } 93 | Ok(missing) 94 | } 95 | 96 | #[allow(clippy::type_complexity)] 97 | fn collect_metrics(&self) -> Result)>, DatabaseError> { 98 | self.database 99 | .view(|tx| -> reth_db::mdbx::Result<_> { 100 | let table = Tables::Bytecodes.name(); 101 | let table_label = Label::new("table", table); 102 | let table_db = tx.inner.open_db(Some(table))?; 103 | let stats = tx.inner.db_stat(&table_db)?; 104 | 105 | let page_size = stats.page_size() as usize; 106 | let leaf_pages = stats.leaf_pages(); 107 | let branch_pages = stats.branch_pages(); 108 | let overflow_pages = stats.overflow_pages(); 109 | let num_pages = leaf_pages + branch_pages + overflow_pages; 110 | let table_size = page_size * num_pages; 111 | let entries = stats.entries(); 112 | 113 | let metrics = Vec::from([ 114 | ("db.table_size", table_size as f64, Vec::from([table_label.clone()])), 115 | ( 116 | "db.table_pages", 117 | leaf_pages as f64, 118 | Vec::from([table_label.clone(), Label::new("type", "leaf")]), 119 | ), 120 | ( 121 | "db.table_pages", 122 | branch_pages as f64, 123 | Vec::from([table_label.clone(), Label::new("type", "branch")]), 124 | ), 125 | ( 126 | "db.table_pages", 127 | overflow_pages as f64, 128 | Vec::from([table_label.clone(), Label::new("type", "overflow")]), 129 | ), 130 | ("db.table_entries", entries as f64, Vec::from([table_label])), 131 | ]); 132 | Ok(metrics) 133 | })? 134 | .map_err(|e| DatabaseError::Read(e.into())) 135 | } 136 | } 137 | 138 | impl DatabaseMetrics for RessDatabase { 139 | fn report_metrics(&self) { 140 | for (name, value, labels) in self.gauge_metrics() { 141 | metrics::gauge!(name, labels).set(value); 142 | } 143 | } 144 | 145 | fn gauge_metrics(&self) -> Vec<(&'static str, f64, Vec