├── .env.example ├── .github └── workflows │ └── pr.yml ├── .gitignore ├── .vscode └── settings.json ├── Cargo.lock ├── Cargo.toml ├── Justfile ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── analyze_blocks.py ├── bin ├── client-eth-agg │ ├── Cargo.lock │ ├── Cargo.toml │ └── src │ │ └── main.rs ├── client-eth-subblock │ ├── Cargo.lock │ ├── Cargo.toml │ └── src │ │ └── main.rs ├── client-eth │ ├── Cargo.lock │ ├── Cargo.toml │ └── src │ │ └── main.rs └── host │ ├── Cargo.toml │ ├── build.rs │ └── src │ ├── cli.rs │ ├── main.rs │ └── subblock.rs ├── crates ├── executor │ ├── client │ │ ├── Cargo.toml │ │ └── src │ │ │ ├── custom.rs │ │ │ ├── error.rs │ │ │ ├── io.rs │ │ │ ├── lib.rs │ │ │ └── utils.rs │ └── host │ │ ├── Cargo.toml │ │ └── src │ │ ├── error.rs │ │ └── lib.rs ├── mpt │ ├── Cargo.toml │ └── src │ │ ├── lib.rs │ │ └── mpt.rs ├── primitives │ ├── Cargo.toml │ └── src │ │ ├── account_proof.rs │ │ ├── chain_spec.rs │ │ ├── lib.rs │ │ └── rkyv.rs └── storage │ └── rpc-db │ ├── Cargo.toml │ └── src │ └── lib.rs ├── rust-toolchain.toml └── rustfmt.toml /.env.example: -------------------------------------------------------------------------------- 1 | RPC_1= 2 | RPC_10= 3 | RPC_59144= 4 | RPC_11155111= 5 | # environment variables for prover network 6 | SP1_PROVER=network 7 | SP1_PRIVATE_KEY= 8 | -------------------------------------------------------------------------------- /.github/workflows/pr.yml: -------------------------------------------------------------------------------- 1 | name: "PR" 2 | 3 | on: 4 | push: 5 | branches: 6 | - "main" 7 | pull_request: 8 | 9 | concurrency: 10 | group: "${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}" 11 | cancel-in-progress: true 12 | 13 | jobs: 14 | lock-files: 15 | name: "Check lock files" 16 | runs-on: ["runs-on", "runner=8cpu-linux-x64", "run-id=${{ github.run_id }}"] 17 | env: 18 | CARGO_NET_GIT_FETCH_WITH_CLI: "true" 19 | steps: 20 | - name: "Checkout sources" 21 | uses: "actions/checkout@v4" 22 | 23 | - name: "Update lock files" 24 | run: | 25 | cargo tree 26 | (cd ./bin/client-eth && cargo tree) 27 | (cd ./bin/client-op && cargo tree) 28 | (cd ./bin/client-linea && cargo tree) 29 | (cd ./bin/client-sepolia && cargo tree) 30 | 31 | - name: "Assert no changes" 32 | run: | 33 | if [ -n "$(git status --porcelain)" ]; then 34 | echo "Lock files not up to date" 35 | exit 1 36 | fi 37 | 38 | deps-semver: 39 | name: "Check upstream SemVer violations" 40 | runs-on: ["runs-on", "runner=8cpu-linux-x64", "run-id=${{ github.run_id }}"] 41 | steps: 42 | - name: "Checkout sources" 43 | uses: "actions/checkout@v4" 44 | 45 | - name: "Install sp1up" 46 | run: | 47 | curl -L https://sp1.succinct.xyz | bash 48 | echo "$HOME/.sp1/bin" >> $GITHUB_PATH 49 | 50 | - name: "Install SP1 toolchain" 51 | run: | 52 | sp1up 53 | 54 | - name: "Remove lock files" 55 | run: | 56 | find -name Cargo.lock -type f -exec rm {} \; 57 | 58 | - name: "Build without lock files" 59 | run: | 60 | cargo build --all --all-targets 61 | 62 | fmt: 63 | name: "Check code format" 64 | runs-on: ["runs-on", "runner=8cpu-linux-x64", "run-id=${{ github.run_id }}"] 65 | steps: 66 | - name: "Checkout sources" 67 | uses: "actions/checkout@v4" 68 | 69 | - name: "Setup stable toolchain" 70 | uses: "actions-rs/toolchain@v1" 71 | with: 72 | toolchain: "1.85.0" 73 | profile: "minimal" 74 | components: "rustfmt" 75 | override: true 76 | 77 | - name: "Check Rust format" 78 | run: | 79 | cargo fmt --all -- --check 80 | 81 | clippy: 82 | name: "Run clippy lints" 83 | runs-on: ["runs-on", "runner=8cpu-linux-x64", "run-id=${{ github.run_id }}"] 84 | steps: 85 | - name: "Checkout sources" 86 | uses: "actions/checkout@v4" 87 | 88 | - name: "Install sp1up" 89 | run: | 90 | curl -L https://sp1.succinct.xyz | bash 91 | echo "$HOME/.sp1/bin" >> $GITHUB_PATH 92 | 93 | - name: "Install SP1 toolchain" 94 | run: | 95 | sp1up 96 | 97 | # This step is necessary to generate the ELF files. 98 | - name: "Build" 99 | run: | 100 | cargo build --all --all-targets 101 | 102 | - name: "Run clippy lints" 103 | run: | 104 | cargo clippy --all --all-targets -- -D warnings 105 | 106 | tests: 107 | name: "Run tests" 108 | runs-on: 109 | ["runs-on", "runner=64cpu-linux-x64", "run-id=${{ github.run_id }}"] 110 | env: 111 | CARGO_NET_GIT_FETCH_WITH_CLI: "true" 112 | steps: 113 | - name: "Checkout sources" 114 | uses: "actions/checkout@v4" 115 | 116 | - name: "Install sp1up" 117 | run: | 118 | curl -L https://sp1.succinct.xyz | bash 119 | echo "$HOME/.sp1/bin" >> $GITHUB_PATH 120 | 121 | - name: "Install SP1 toolchain" 122 | run: | 123 | sp1up 124 | 125 | - name: "Set up test fixture" 126 | run: | 127 | git clone https://github.com/succinctlabs/rsp-tests --branch 2025-01-18 --depth 1 ../rsp-tests 128 | cd ../rsp-tests/ 129 | docker compose up -d 130 | 131 | - name: "Use local test fixture" 132 | run: | 133 | echo "RPC_1=http://localhost:9545/main/evm/1" >> $GITHUB_ENV 134 | echo "RPC_10=http://localhost:9545/main/evm/10" >> $GITHUB_ENV 135 | echo "RPC_59144=http://localhost:9545/main/evm/59144" >> $GITHUB_ENV 136 | echo "RPC_11155111=http://localhost:9545/main/evm/11155111" >> $GITHUB_ENV 137 | 138 | - name: "Run tests" 139 | run: | 140 | export RUST_LOG=info 141 | cargo test --all -- --nocapture 142 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Cargo build 2 | **/target 3 | 4 | # Cargo config 5 | .cargo 6 | 7 | # Profile-guided optimization 8 | /tmp 9 | pgo-data.profdata 10 | 11 | # MacOS nuisances 12 | .DS_Store 13 | 14 | # Proofs 15 | **/proof-with-pis.json 16 | **/proof-with-io.json 17 | latest_proof.json 18 | 19 | # Env 20 | .env 21 | 22 | # SP1 ELF files 23 | elf 24 | 25 | **.csv 26 | 27 | input/ 28 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "editor.inlineSuggest.enabled": true, 3 | "[rust]": { 4 | "editor.defaultFormatter": "rust-lang.rust-analyzer", 5 | "editor.formatOnSave": true, 6 | }, 7 | "[typescript][typescriptreact]": { 8 | "editor.defaultFormatter": "esbenp.prettier-vscode", 9 | "editor.formatOnSave": true, 10 | "editor.codeActionsOnSave": { 11 | "source.fixAll.eslint": "explicit" 12 | } 13 | }, 14 | "editor.rulers": [ 15 | 100 16 | ], 17 | "rust-analyzer.check.overrideCommand": [ 18 | "cargo", 19 | "clippy", 20 | // "--workspace", 21 | "--message-format=json", 22 | "--all-features", 23 | "--all-targets", 24 | "--", 25 | "-A", 26 | "incomplete-features" 27 | ], 28 | "rust-analyzer.linkedProjects": [ 29 | "${workspaceFolder}/Cargo.toml", 30 | "${workspaceFolder}/bin/client-eth-subblock/Cargo.toml", 31 | "${workspaceFolder}/bin/client-eth-agg/Cargo.toml", 32 | "${workspaceFolder}/bin/client-eth/Cargo.toml", 33 | ], 34 | "go.formatTool": "gofmt", 35 | "go.lintTool": "golangci-lint", 36 | "go.lintFlags": [ 37 | "--fast" 38 | ], 39 | "[go]": { 40 | "editor.formatOnSave": true, 41 | "editor.codeActionsOnSave": { 42 | "source.organizeImports": "explicit" 43 | } 44 | }, 45 | "go.vetFlags": [ 46 | "-all" 47 | ], 48 | "go.useLanguageServer": true, 49 | "go.lintOnSave": "package", 50 | } 51 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | # TODO: use public sp1 version! (and new patches?) 2 | 3 | [workspace] 4 | members = [ 5 | "bin/host", 6 | "crates/executor/client", 7 | "crates/executor/host", 8 | "crates/mpt", 9 | "crates/primitives", 10 | "crates/storage/rpc-db", 11 | ] 12 | exclude = [] 13 | resolver = "2" 14 | 15 | [profile.release-with-debug] 16 | inherits = "release" 17 | debug-assertions = true 18 | debug = true 19 | 20 | 21 | [workspace.package] 22 | edition = "2021" 23 | license = "MIT OR Apache-2.0" 24 | authors = ["puma314", "jtguibas", "rkrasiuk"] 25 | repository = "https://github.com/moongate-forks/minimal-reth" 26 | homepage = "https://github.com/moongate-forks/minimal-reth" 27 | exclude = ["**/target"] 28 | version = "0.1.0" 29 | 30 | [workspace.dependencies] 31 | tracing = { version = "0.1.40", default-features = false } 32 | cfg-if = "1.0.0" 33 | spin = { version = "0.9.8", features = ["mutex"] } 34 | async-trait = "0.1.80" 35 | tokio = { version = "1.21", default-features = false, features = [ 36 | "rt", 37 | "rt-multi-thread", 38 | ] } 39 | serde_json = "1.0.94" 40 | serde = { version = "1.0", default-features = false, features = ["derive"] } 41 | futures = "0.3" 42 | sha2 = "0.10.8" 43 | url = "2.3" 44 | thiserror = "1.0.61" 45 | hex-literal = "0.4.1" 46 | rayon = "1.10.0" 47 | rlp = "0.5.2" 48 | bincode = "1.3.3" 49 | ruint = { git = "https://github.com/yuwen01/uint", branch = "yuwen/rkyv" } 50 | rkyv = "0.8.10" 51 | eyre = "0.6.12" 52 | reqwest = "0.12.9" 53 | base64 = "0.22.1" 54 | hex = "0.4.3" 55 | chrono = "0.4.39" 56 | tracing-subscriber = "0.3.18" 57 | dotenv = "0.15.0" 58 | clap = { version = "4.5.7", features = ["derive", "env"] } 59 | csv = "1.1" 60 | lazy_static = "1.5.0" 61 | itertools = "0.13.0" 62 | 63 | 64 | # workspace 65 | rsp-rpc-db = { path = "./crates/storage/rpc-db" } 66 | rsp-client-executor = { path = "./crates/executor/client" } 67 | rsp-host-executor = { path = "./crates/executor/host" } 68 | rsp-mpt = { path = "./crates/mpt" } 69 | rsp-primitives = { path = "./crates/primitives" } 70 | 71 | # reth 72 | # reth-primitives = { git = "https://github.com/sp1-patches/reth", tag = "rsp-20240830", default-features = false, features = [ 73 | # "alloy-compat", 74 | # "optimism", 75 | # "std", 76 | # ] } 77 | # reth-codecs = { git = "https://github.com/sp1-patches/reth", tag = "rsp-20240830", default-features = false } 78 | # reth-consensus = { git = "https://github.com/sp1-patches/reth", tag = "rsp-20240830", default-features = false } 79 | # reth-evm = { git = "https://github.com/sp1-patches/reth", tag = "rsp-20240830", default-features = false } 80 | # reth-revm = { git = "https://github.com/sp1-patches/reth", tag = "rsp-20240830", default-features = false, features = [ 81 | # "std", 82 | # ] } 83 | # reth-node-ethereum = { git = "https://github.com/sp1-patches/reth", tag = "rsp-20240830", default-features = false } 84 | # reth-evm-ethereum = { git = "https://github.com/sp1-patches/reth", tag = "rsp-20240830", default-features = false, features = [ 85 | # "std", 86 | # ] } 87 | # reth-evm-optimism = { git = "https://github.com/sp1-patches/reth", tag = "rsp-20240830", default-features = false, features = [ 88 | # "optimism", 89 | # ] } 90 | # reth-storage-errors = { git = "https://github.com/sp1-patches/reth", tag = "rsp-20240830", default-features = false, features = [ 91 | # "std", 92 | # ] } 93 | # reth-trie = { git = "https://github.com/sp1-patches/reth", tag = "rsp-20240830", default-features = false } 94 | # reth-trie-common = { git = "https://github.com/sp1-patches/reth", tag = "rsp-20240830", default-features = false } 95 | # reth-chainspec = { git = "https://github.com/sp1-patches/reth", tag = "rsp-20240830", default-features = false } 96 | # reth-optimism-chainspec = { git = "https://github.com/sp1-patches/reth", tag = "rsp-20240830", default-features = false } 97 | # reth-execution-errors = { git = "https://github.com/sp1-patches/reth", tag = "rsp-20240830", default-features = false } 98 | # reth-execution-types = { git = "https://github.com/sp1-patches/reth", tag = "rsp-20240830", default-features = false } 99 | # reth-db = { git = "https://github.com/sp1-patches/reth", tag = "rsp-20240830", default-features = false } 100 | # reth-errors = { git = "https://github.com/sp1-patches/reth", tag = "rsp-20240830", default-features = false } 101 | # reth-ethereum-consensus = { git = "https://github.com/sp1-patches/reth", tag = "rsp-20240830", default-features = false } 102 | # reth-optimism-consensus = { git = "https://github.com/sp1-patches/reth", tag = "rsp-20240830", default-features = false, features = [ 103 | # "optimism", 104 | # ] } 105 | 106 | reth-primitives = { git = "https://github.com/succinctlabs/reth-subblock", branch = "subblock", default-features = false, features = [ 107 | "alloy-compat", 108 | "optimism", 109 | "std", 110 | ] } 111 | reth-codecs = { git = "https://github.com/succinctlabs/reth-subblock", branch = "subblock", default-features = false } 112 | reth-consensus = { git = "https://github.com/succinctlabs/reth-subblock", branch = "subblock", default-features = false } 113 | reth-evm = { git = "https://github.com/succinctlabs/reth-subblock", branch = "subblock", default-features = false } 114 | reth-revm = { git = "https://github.com/succinctlabs/reth-subblock", branch = "subblock", default-features = false, features = [ 115 | "std", 116 | ] } 117 | reth-node-ethereum = { git = "https://github.com/succinctlabs/reth-subblock", branch = "subblock", default-features = false } 118 | reth-evm-ethereum = { git = "https://github.com/succinctlabs/reth-subblock", branch = "subblock", default-features = false, features = [ 119 | "std", 120 | ] } 121 | reth-evm-optimism = { git = "https://github.com/succinctlabs/reth-subblock", branch = "subblock", default-features = false, features = [ 122 | "optimism", 123 | ] } 124 | reth-storage-errors = { git = "https://github.com/succinctlabs/reth-subblock", branch = "subblock", default-features = false, features = [ 125 | "std", 126 | ] } 127 | reth-trie = { git = "https://github.com/succinctlabs/reth-subblock", branch = "subblock", default-features = false, features = ["serde"] } 128 | reth-trie-common = { git = "https://github.com/succinctlabs/reth-subblock", branch = "subblock", default-features = false } 129 | reth-chainspec = { git = "https://github.com/succinctlabs/reth-subblock", branch = "subblock",default-features = false } 130 | reth-optimism-chainspec = { git = "https://github.com/succinctlabs/reth-subblock", branch = "subblock", default-features = false } 131 | reth-optimism-forks = { git = "https://github.com/succinctlabs/reth-subblock", branch = "subblock", default-features = false } 132 | reth-execution-errors = { git = "https://github.com/succinctlabs/reth-subblock", branch = "subblock", default-features = false } 133 | reth-execution-types = { git = "https://github.com/succinctlabs/reth-subblock", branch = "subblock", default-features = false } 134 | reth-db = { git = "https://github.com/succinctlabs/reth-subblock", branch = "subblock", default-features = false } 135 | reth-errors = { git = "https://github.com/succinctlabs/reth-subblock", branch = "subblock",default-features = false } 136 | reth-ethereum-consensus = { git = "https://github.com/succinctlabs/reth-subblock", branch = "subblock",default-features = false } 137 | reth-optimism-consensus = { git = "https://github.com/succinctlabs/reth-subblock", branch = "subblock", default-features = false, features = [ 138 | "optimism", 139 | ] } 140 | 141 | 142 | # revm 143 | revm = { version = "14.0.0", features = [ 144 | "optimism", 145 | "std", 146 | "serde", 147 | "kzg-rs", 148 | ], default-features = false } 149 | revm-primitives = { version = "9.0.0", features = [ 150 | "std", 151 | "serde", 152 | ], default-features = false } 153 | revm-inspectors = "0.6" 154 | revm-interpreter = { version = "=10.0.1", default-features = false } 155 | revm-precompile = { version = "=11.0.1", default-features = false } 156 | 157 | # revm = { path = "../revm/crates/revm", features = [ 158 | # "optimism", 159 | # "std", 160 | # "serde", 161 | # "kzg-rs", 162 | # ], default-features = false } 163 | # revm-primitives = { path = "../revm/crates/primitives", features = [ 164 | # "std", 165 | # "serde", 166 | # ], default-features = false } 167 | # revm-interpreter = { path = "../revm/crates/interpreter", default-features = false } 168 | # revm-precompile = { path = "../revm/crates/precompile", default-features = false } 169 | 170 | # alloy 171 | # alloy-primitives = { version = "0.8.4", features = ["sha3-keccak"] } 172 | alloy-primitives = { version = "0.8.20", features = ["sha3-keccak"] } 173 | alloy-provider = { version = "0.3", default-features = false, features = [ 174 | "reqwest", 175 | "reqwest-rustls-tls", 176 | ] } 177 | alloy-rpc-types = { version = "0.3", default-features = false, features = [ 178 | "eth", 179 | ] } 180 | alloy-rlp = "0.3.4" 181 | alloy-consensus = { version = "0.3", default-features = false } 182 | alloy-transport = { version = "0.3" } 183 | alloy-transport-http = { version = "0.3", features = [ 184 | "reqwest-rustls-tls", 185 | ], default-features = false } 186 | alloy-eips = { version = "0.3", default-features = false } 187 | alloy-trie = "0.5.0" 188 | 189 | [workspace.lints] 190 | rust.missing_debug_implementations = "warn" 191 | rust.unreachable_pub = "warn" 192 | rust.unused_must_use = "deny" 193 | rust.rust_2018_idioms = { level = "deny", priority = -1 } 194 | rustdoc.all = "warn" 195 | -------------------------------------------------------------------------------- /Justfile: -------------------------------------------------------------------------------- 1 | # Justfile 2 | 3 | # Recipe to run the rsp CLI for a particular block and chain id. 4 | run-block block_number chain_id: 5 | cargo run --release --bin rsp -- --block-number {{block_number}} --chain-id {{chain_id}} 6 | 7 | # Usage: 8 | # just run-block 9 | 10 | # Example: 11 | # just run-block 20526624 1 12 | 13 | # Recipe to run the rsp CLI for a range of blocks. 14 | run-blocks start_block end_block chain_id: 15 | #!/usr/bin/env bash 16 | echo "Running command for block numbers from {{start_block}} to {{end_block}} on chain ID: {{chain_id}}" 17 | for ((block_number={{start_block}}; block_number<={{end_block}}; block_number++)); do 18 | echo "Running for block number $block_number" 19 | cargo run --release --bin rsp -- --block-number "$block_number" --chain-id {{chain_id}} 20 | done 21 | 22 | # Usage: 23 | # just run-blocks 24 | 25 | # Example: 26 | # just run-blocks 20526624 20526630 1 27 | 28 | # Recipe to run the rsp CLI (with tracing) for a block and chain id. 29 | trace-block block chain_id: 30 | TRACE_FILE=trace_$block_$chain_id.log cargo run --release --bin rsp -- --block-number "$block_number" --chain-id {{chain_id}} 31 | cargo prove --trace 32 | 33 | # Recipe to run the rsp CLI on the latest block in a loop at the given interval and submit proving times to ETH proofs. 34 | run-eth-proofs cluster_id="1" sleep_time="900": 35 | #!/usr/bin/env bash 36 | 37 | while true; do 38 | RESPONSE=$(curl -s \ 39 | -X POST \ 40 | -H "Content-Type: application/json" \ 41 | --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' \ 42 | "$RPC_URL") 43 | BLOCK_NUMBER=$((16#$(echo $RESPONSE | grep -o '"result":"[^"]*"' | cut -d'"' -f4 | sed 's/0x//'))) 44 | echo "Latest block number: $BLOCK_NUMBER" 45 | 46 | ROUNDED_BLOCK=$((BLOCK_NUMBER - (BLOCK_NUMBER % 100))) 47 | echo "Rounded block number: $ROUNDED_BLOCK" 48 | 49 | echo "Running rsp..." 50 | SP1_PROVER=cuda cargo run --bin rsp --release -F cuda -- --block-number $ROUNDED_BLOCK --eth-proofs-cluster-id {{cluster_id}} --rpc-url $RPC_URL --prove 51 | 52 | echo "Sleeping for $(({{sleep_time}} / 60)) minutes..." 53 | sleep {{sleep_time}} 54 | done 55 | 56 | # Usage: 57 | # just run-eth-proofs 58 | 59 | # Example: 60 | # just run-eth-proofs 5 600 61 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2023 Succinct Labs 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2023 Succinct Labs 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Reth Succinct Processor (RSP): Subblock POC 2 | 3 | > [!CAUTION] 4 | > 5 | > This repository is still an active work-in-progress and is not formally audited or meant for production usage. 6 | 7 | ## Overview 8 | 9 | A proof of concept system for generating zero-knowledge proofs of EVM block execution using [Reth](https://github.com/paradigmxyz/reth) in real time (Sub 12 seconds). Intended for use with Succinct's Prover Network, for ultra low-latency proofs. 10 | 11 | In order to minimize latency, Ethereum blocks are split up by transaction into several subblocks. 12 | Each subblock proof can be generated in parallel, and then aggregated into a single proof. 13 | 14 | ## Getting Started 15 | 16 | To build and execute a monolithic SP1 program for a given block, run: 17 | 18 | ```bash 19 | cargo run --release --bin rsp -- --block-number --chain-id 1 20 | ``` 21 | 22 | To build and execute the subblock and aggregation SP1 programs for a given block, run: 23 | 24 | ```bash 25 | cargo run --release --bin subblock -- --block-number --chain-id 1 26 | ``` 27 | 28 | Note that neither of these commands will actually generate proofs. They will only build the executables 29 | and optionally execute them in the SP1 zkVM. 30 | 31 | Run the following command for more details on the CLI. 32 | 33 | ```bash 34 | cargo run --release --bin subblock -- --help 35 | ``` 36 | 37 | ## Subblock constraint overview 38 | 39 | Each subblock uses the SP1 program in [`bin/client-eth-subblock`](bin/client-eth-subblock). The subblock program takes as input: 40 | 41 | 1. The subblock to execute. This takes the form of a normal block, except only the transactions contained in the subblock are included. 42 | 2. The parent state. This contains all the state that is needed to execute the subblock, including any state modified from previous subblocks. 43 | 3. Other metadata about the subblock. 44 | 45 | The subblock program then executes the subblock and returns the new state root, logs bloom, and transaction receipts. 46 | 47 | The aggregation program takes as input: 48 | 49 | 1. Proofs for all the subblocks. 50 | 2. The parent state root. 51 | 3. The current block header. 52 | 4. The current block body. 53 | 54 | The aggregation program then verifies the proofs and asserts that the public values of all the subblocks are consistent with the block passed into the aggregation program. It then commits the current block hash, the parent block hash, and the current block's body as public values. 55 | -------------------------------------------------------------------------------- /analyze_blocks.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Analyzes the gas usage of transactions in specific blocks listed in a CSV file. 4 | 5 | Reads block numbers from 'evaluation_blocks.csv' and outputs analysis to 6 | 'evaluation_blocks_gas_analysis.csv'. 7 | """ 8 | 9 | import sys 10 | import os 11 | import csv 12 | from web3 import Web3 13 | from dotenv import load_dotenv 14 | 15 | def main(): 16 | # Load environment variables from .env file 17 | load_dotenv() 18 | 19 | # Define the input CSV filename 20 | input_csv_filename = "evaluation_blocks.csv" 21 | blocks_to_process = [] 22 | 23 | # Read block numbers from the input CSV file 24 | try: 25 | with open(input_csv_filename, 'r', newline='') as infile: 26 | reader = csv.reader(infile) 27 | header = next(reader) # Skip header row 28 | print(f"Reading blocks from {input_csv_filename}, skipping header: {header}") 29 | for row in reader: 30 | if row: # Ensure row is not empty 31 | try: 32 | # Assuming block number is in the first column 33 | block_number = int(row[0]) 34 | blocks_to_process.append(block_number) 35 | except (ValueError, IndexError): 36 | print(f"Warning: Skipping invalid row in {input_csv_filename}: {row}", file=sys.stderr) 37 | if not blocks_to_process: 38 | print(f"Error: No valid block numbers found in {input_csv_filename}.") 39 | sys.exit(1) 40 | print(f"Found {len(blocks_to_process)} blocks to process.") 41 | 42 | except FileNotFoundError: 43 | print(f"Error: Input file '{input_csv_filename}' not found.") 44 | sys.exit(1) 45 | except Exception as e: 46 | print(f"Error reading {input_csv_filename}: {e}") 47 | sys.exit(1) 48 | 49 | # Get Ethereum RPC URL from .env file 50 | ethereum_rpc_url = os.environ.get('RPC_SLOW') 51 | if not ethereum_rpc_url: 52 | print("Error: RPC_SLOW variable not found in .env file.") 53 | print("Please create a .env file with RPC_SLOW=your_ethereum_node_url") 54 | print("Example: RPC_SLOW=https://mainnet.infura.io/v3/YOUR_INFURA_API_KEY") 55 | sys.exit(1) 56 | 57 | w3 = Web3(Web3.HTTPProvider(ethereum_rpc_url)) 58 | 59 | # Check if connected to Ethereum node 60 | if not w3.is_connected(): 61 | print("Error: Failed to connect to Ethereum node.") 62 | print("Please check that the RPC_SLOW in your .env file contains a valid Ethereum node URL.") 63 | sys.exit(1) 64 | 65 | # Create CSV file for output 66 | csv_filename = "evaluation_blocks_gas_analysis.csv" # Updated filename 67 | with open(csv_filename, 'w', newline='') as csvfile: 68 | csv_writer = csv.writer(csvfile) 69 | 70 | # Write CSV header 71 | csv_writer.writerow(['Block Number', 'Transaction Hash', 'Gas Limit', 'Gas Used']) 72 | 73 | # Process each block from the list 74 | process_blocks(w3, blocks_to_process, csv_writer) # Updated call 75 | 76 | print(f"Analysis complete. Results saved to {csv_filename}") # Updated message 77 | 78 | def get_transactions(block): 79 | """Extract transactions from a block, handling different block formats.""" 80 | if hasattr(block, 'transactions'): 81 | return block.transactions 82 | elif isinstance(block, dict) and 'transactions' in block: 83 | return block['transactions'] 84 | return [] 85 | 86 | def get_transaction_gas_limit(tx): 87 | """Extract the gas limit from a transaction, handling different transaction formats.""" 88 | if hasattr(tx, 'gas'): 89 | return tx.gas 90 | elif isinstance(tx, dict) and 'gas' in tx: 91 | return tx['gas'] 92 | return 0 93 | 94 | def get_transaction_hash(tx): 95 | """Extract the transaction hash, handling different transaction formats.""" 96 | if hasattr(tx, 'hash'): 97 | return tx.hash 98 | elif isinstance(tx, dict) and 'hash' in tx: 99 | return tx['hash'] 100 | return None 101 | 102 | def process_blocks(w3, block_numbers, csv_writer): # Updated signature 103 | """Process each block in the provided list and output the transaction that used the most gas to CSV.""" 104 | total_blocks = len(block_numbers) 105 | for i, block_number in enumerate(block_numbers): # Iterate over the list 106 | print(f"Processing block {block_number} ({i+1}/{total_blocks})") 107 | try: 108 | # Get the block with full transaction details 109 | block = w3.eth.get_block(block_number, full_transactions=True) 110 | 111 | # Get transactions from the block 112 | transactions = get_transactions(block) 113 | 114 | # Check if there are any transactions 115 | if not transactions: 116 | csv_writer.writerow([block_number, 'No transactions', 0, 0]) 117 | continue 118 | 119 | # Find the transaction that used the most gas 120 | max_gas_used = 0 121 | max_gas_limit = 0 122 | max_tx_hash = None 123 | 124 | for tx in transactions: 125 | # Get transaction hash 126 | tx_hash = get_transaction_hash(tx) 127 | if not tx_hash: 128 | continue 129 | 130 | # Get gas limit from transaction 131 | gas_limit = get_transaction_gas_limit(tx) 132 | 133 | # Get the receipt to find actual gas used 134 | try: 135 | receipt = w3.eth.get_transaction_receipt(tx_hash) 136 | gas_used = receipt.gasUsed 137 | 138 | if gas_used > max_gas_used: 139 | max_gas_used = gas_used 140 | max_gas_limit = gas_limit 141 | max_tx_hash = tx_hash 142 | except Exception as e: 143 | # Skip transactions with receipt issues 144 | continue 145 | 146 | # Write the result to CSV 147 | if max_tx_hash: 148 | # Convert hash to hex string if it's not already a string 149 | tx_hash_str = max_tx_hash.hex() if hasattr(max_tx_hash, 'hex') else str(max_tx_hash) 150 | csv_writer.writerow([block_number, tx_hash_str, max_gas_limit, max_gas_used]) 151 | else: 152 | csv_writer.writerow([block_number, 'No valid transactions', 0, 0]) 153 | 154 | except Exception as e: 155 | csv_writer.writerow([block_number, f'Error: {str(e)}', 0, 0]) 156 | print(f"Error processing block {block_number}: {str(e)}", file=sys.stderr) 157 | 158 | if __name__ == "__main__": 159 | main() -------------------------------------------------------------------------------- /bin/client-eth-agg/Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace.package] 2 | [package] 3 | name = "rsp-client-eth-agg" 4 | description = "" 5 | edition = "2021" 6 | 7 | [profile.release] 8 | lto = "fat" 9 | 10 | [dependencies] 11 | # workspace 12 | rsp-client-executor = { path = "../../crates/executor/client" } 13 | reth-primitives = { git = "https://github.com/succinctlabs/reth-subblock", branch = "subblock", default-features = false, features = [ 14 | "alloy-compat", 15 | "optimism", 16 | "std", 17 | ] } 18 | 19 | # sp1 20 | sp1-zkvm = { version = "4.1.7" } 21 | 22 | # Statically turns off logging 23 | log = { version = "0.4", features = ["max_level_off", "release_max_level_off"] } 24 | tracing = { version = "0.1", features = ["max_level_off", "release_max_level_off"] } 25 | 26 | [patch.crates-io] 27 | # Precompile patches 28 | sha2 = { git = "https://github.com/sp1-patches/RustCrypto-hashes", tag = "patch-sha2-0.10.8-sp1-4.0.0", package = "sha2" } 29 | bn = { git = "https://github.com/sp1-patches/bn", tag = "patch-0.6.0-sp1-4.0.0", package = "substrate-bn" } 30 | sha3 = { git = "https://github.com/sp1-patches/RustCrypto-hashes", tag = "patch-sha3-0.10.8-sp1-4.0.0" } 31 | k256 = { git = "https://github.com/sp1-patches/elliptic-curves", tag = "patch-k256-13.4-sp1-4.1.0" } 32 | p256 = { git = "https://github.com/sp1-patches/elliptic-curves", tag = "patch-p256-13.2-sp1-4.1.0" } 33 | -------------------------------------------------------------------------------- /bin/client-eth-agg/src/main.rs: -------------------------------------------------------------------------------- 1 | #![no_main] 2 | sp1_zkvm::entrypoint!(main); 3 | 4 | use reth_primitives::B256; 5 | use rsp_client_executor::{io::AggregationInput, ClientExecutor, EthereumVariant}; 6 | 7 | pub fn main() { 8 | // Read the input. 9 | println!("cycle-tracker-start: deserialize"); 10 | // Read the public values, vkey, and aggregation input. 11 | let public_values = sp1_zkvm::io::read::>>(); 12 | let vkey = sp1_zkvm::io::read::<[u32; 8]>(); 13 | println!("cycle-tracker-start: deserialize aggregation input"); 14 | let aggregation_input = sp1_zkvm::io::read::(); 15 | println!("cycle-tracker-end: deserialize aggregation input"); 16 | 17 | let parent_state_root = sp1_zkvm::io::read::(); 18 | sp1_zkvm::io::commit(&parent_state_root); 19 | sp1_zkvm::io::commit(&aggregation_input.current_block); 20 | println!("cycle-tracker-end: deserialize"); 21 | 22 | let client = ClientExecutor; 23 | 24 | let header = client 25 | .execute_aggregation::( 26 | public_values, 27 | vkey, 28 | aggregation_input, 29 | parent_state_root, 30 | ) 31 | .expect("failed to execute aggregation"); 32 | 33 | let hash = header.hash_slow(); 34 | 35 | sp1_zkvm::io::commit(&hash); 36 | } 37 | -------------------------------------------------------------------------------- /bin/client-eth-subblock/Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace.package] 2 | [package] 3 | name = "rsp-client-eth-subblock" 4 | description = "" 5 | edition = "2021" 6 | 7 | [profile.release] 8 | lto = "fat" 9 | 10 | [dependencies] 11 | # workspace 12 | rsp-client-executor = { path = "../../crates/executor/client" } 13 | rsp-mpt = { path = "../../crates/mpt" } 14 | 15 | # sp1 16 | sp1-zkvm = { version = "4.1.7" } 17 | 18 | # rkyv 19 | rkyv = "0.8.10" 20 | 21 | # Statically turns off logging 22 | log = { version = "0.4", features = ["max_level_off", "release_max_level_off"] } 23 | tracing = { version = "0.1", features = ["max_level_off", "release_max_level_off"] } 24 | 25 | [patch.crates-io] 26 | # Precompile patches 27 | sha2 = { git = "https://github.com/sp1-patches/RustCrypto-hashes", tag = "patch-sha2-0.10.8-sp1-4.0.0", package = "sha2" } 28 | bn = { git = "https://github.com/sp1-patches/bn", tag = "patch-0.6.0-sp1-4.0.0", package = "substrate-bn" } 29 | sha3 = { git = "https://github.com/sp1-patches/RustCrypto-hashes", tag = "patch-sha3-0.10.8-sp1-4.0.0" } 30 | k256 = { git = "https://github.com/sp1-patches/elliptic-curves", tag = "patch-k256-13.4-sp1-4.1.0" } 31 | p256 = { git = "https://github.com/sp1-patches/elliptic-curves", tag = "patch-p256-13.2-sp1-4.1.0" } 32 | -------------------------------------------------------------------------------- /bin/client-eth-subblock/src/main.rs: -------------------------------------------------------------------------------- 1 | #![no_main] 2 | sp1_zkvm::entrypoint!(main); 3 | 4 | use rsp_client_executor::{ 5 | io::{read_aligned_vec, SubblockInput}, 6 | ClientExecutor, EthereumVariant, 7 | }; 8 | use rsp_mpt::EthereumState; 9 | 10 | pub fn main() { 11 | // Read the input. 12 | println!("cycle-tracker-start: deserialize input"); 13 | let input = sp1_zkvm::io::read::(); 14 | println!("cycle-tracker-end: deserialize input"); 15 | 16 | println!("cycle-tracker-start: commit input"); 17 | sp1_zkvm::io::commit(&input); 18 | println!("cycle-tracker-end: commit input"); 19 | 20 | println!("cycle-tracker-start: deserialize parent state"); 21 | 22 | let aligned = read_aligned_vec::<16>(); 23 | let mut parent_state = 24 | rkyv::from_bytes::(&aligned).unwrap(); 25 | 26 | println!("cycle-tracker-end: deserialize parent state"); 27 | 28 | println!("cycle-tracker-start: execute subblock"); 29 | // Execute the block. 30 | let executor = ClientExecutor; 31 | let subblock_output = executor 32 | .execute_subblock::(input, &mut parent_state) 33 | .expect("failed to execute client"); 34 | println!("cycle-tracker-end: execute subblock"); 35 | 36 | // Commit the state diff. 37 | sp1_zkvm::io::commit(&subblock_output); 38 | } 39 | -------------------------------------------------------------------------------- /bin/client-eth/Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace.package] 2 | [package] 3 | name = "rsp-client-eth" 4 | description = "" 5 | edition = "2021" 6 | 7 | [dependencies] 8 | bincode = "1.3.3" 9 | 10 | # workspace 11 | rsp-client-executor = { path = "../../crates/executor/client" } 12 | 13 | # sp1 14 | sp1-zkvm = { version = "4.1.7" } 15 | 16 | # Statically turns off logging 17 | log = { version = "0.4", features = ["max_level_off", "release_max_level_off"] } 18 | tracing = { version = "0.1", features = ["max_level_off", "release_max_level_off"] } 19 | 20 | [patch.crates-io] 21 | # Precompile patches 22 | sha2 = { git = "https://github.com/sp1-patches/RustCrypto-hashes", tag = "patch-sha2-0.10.8-sp1-4.0.0", package = "sha2" } 23 | bn = { git = "https://github.com/sp1-patches/bn", tag = "patch-0.6.0-sp1-4.0.0", package = "substrate-bn" } 24 | sha3 = { git = "https://github.com/sp1-patches/RustCrypto-hashes", tag = "patch-sha3-0.10.8-sp1-4.0.0" } 25 | k256 = { git = "https://github.com/sp1-patches/elliptic-curves", tag = "patch-k256-13.4-sp1-4.1.0" } 26 | p256 = { git = "https://github.com/sp1-patches/elliptic-curves", tag = "patch-p256-13.2-sp1-4.1.0" } 27 | -------------------------------------------------------------------------------- /bin/client-eth/src/main.rs: -------------------------------------------------------------------------------- 1 | #![no_main] 2 | sp1_zkvm::entrypoint!(main); 3 | 4 | use rsp_client_executor::{io::ClientExecutorInput, ClientExecutor, EthereumVariant}; 5 | 6 | pub fn main() { 7 | // Read the input. 8 | let input = sp1_zkvm::io::read_vec(); 9 | let input = bincode::deserialize::(&input).unwrap(); 10 | 11 | // Execute the block. 12 | let executor = ClientExecutor; 13 | let header = executor.execute::(input).expect("failed to execute client"); 14 | let block_hash = header.hash_slow(); 15 | 16 | // Commit the block hash. 17 | sp1_zkvm::io::commit(&block_hash); 18 | } 19 | -------------------------------------------------------------------------------- /bin/host/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | version = "0.1.0" 3 | name = "rsp" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | dotenv.workspace = true 8 | tokio.workspace = true 9 | url.workspace = true 10 | tracing-subscriber.workspace = true 11 | tracing.workspace = true 12 | clap.workspace = true 13 | serde_json.workspace = true 14 | serde.workspace = true 15 | bincode.workspace = true 16 | eyre.workspace = true 17 | rkyv.workspace = true 18 | sha2.workspace = true 19 | 20 | # workspace 21 | rsp-host-executor.workspace = true 22 | rsp-client-executor.workspace = true 23 | rsp-mpt.workspace = true 24 | 25 | # alloy 26 | alloy-provider.workspace = true 27 | 28 | # sp1 29 | sp1-sdk = { version = "4.1.7" } 30 | 31 | [build-dependencies] 32 | sp1-build = { version = "4.1.7" } 33 | 34 | 35 | [[bin]] 36 | name = "subblock" 37 | path = "src/subblock.rs" 38 | 39 | [features] 40 | default = [] 41 | s3 = [] 42 | cuda = ["sp1-sdk/cuda"] 43 | -------------------------------------------------------------------------------- /bin/host/build.rs: -------------------------------------------------------------------------------- 1 | use sp1_build::{build_program_with_args, BuildArgs}; 2 | 3 | fn main() { 4 | build_program_with_args( 5 | "../client-eth", 6 | BuildArgs { ignore_rust_version: true, ..Default::default() }, 7 | ); 8 | build_program_with_args( 9 | "../client-eth-agg", 10 | BuildArgs { ignore_rust_version: true, ..Default::default() }, 11 | ); 12 | build_program_with_args( 13 | "../client-eth-subblock", 14 | BuildArgs { ignore_rust_version: true, ..Default::default() }, 15 | ); 16 | } 17 | -------------------------------------------------------------------------------- /bin/host/src/cli.rs: -------------------------------------------------------------------------------- 1 | use alloy_provider::{network::AnyNetwork, Provider as _, ReqwestProvider}; 2 | use clap::Parser; 3 | use url::Url; 4 | 5 | /// The arguments for configuring the chain data provider. 6 | #[derive(Debug, Clone, Parser)] 7 | pub struct ProviderArgs { 8 | /// The rpc url used to fetch data about the block. If not provided, will use the 9 | /// RPC_{chain_id} env var. 10 | #[clap(long)] 11 | rpc_url: Option, 12 | /// The chain ID. If not provided, requires the rpc_url argument to be provided. 13 | #[clap(long)] 14 | chain_id: Option, 15 | } 16 | 17 | pub struct ProviderConfig { 18 | pub rpc_url: Option, 19 | pub chain_id: u64, 20 | } 21 | 22 | impl ProviderArgs { 23 | pub async fn into_provider(self) -> eyre::Result { 24 | // We don't need RPC when using cache with known chain ID, so we leave it as `Option` 25 | // here and decide on whether to panic later. 26 | // 27 | // On the other hand chain ID is always needed. 28 | let (rpc_url, chain_id) = match (self.rpc_url, self.chain_id) { 29 | (Some(rpc_url), Some(chain_id)) => (Some(rpc_url), chain_id), 30 | (None, Some(chain_id)) => { 31 | match std::env::var(format!("RPC_{}", chain_id)) { 32 | Ok(rpc_env_var) => { 33 | // We don't always need it but if the value exists it has to be valid. 34 | (Some(Url::parse(rpc_env_var.as_str()).expect("invalid rpc url")), chain_id) 35 | } 36 | Err(_) => { 37 | // Not having RPC is okay because we know chain ID. 38 | (None, chain_id) 39 | } 40 | } 41 | } 42 | (Some(rpc_url), None) => { 43 | // We can find out about chain ID from RPC. 44 | let provider: ReqwestProvider = 45 | ReqwestProvider::new_http(rpc_url.clone()); 46 | let chain_id = provider.get_chain_id().await?; 47 | 48 | (Some(rpc_url), chain_id) 49 | } 50 | (None, None) => { 51 | eyre::bail!("either --rpc-url or --chain-id must be used") 52 | } 53 | }; 54 | 55 | Ok(ProviderConfig { rpc_url, chain_id }) 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /bin/host/src/main.rs: -------------------------------------------------------------------------------- 1 | use alloy_provider::ReqwestProvider; 2 | use clap::Parser; 3 | use rsp_client_executor::{io::ClientExecutorInput, ChainVariant, CHAIN_ID_ETH_MAINNET}; 4 | use rsp_host_executor::HostExecutor; 5 | use sp1_sdk::{include_elf, Prover, ProverClient, SP1Stdin}; 6 | use std::path::PathBuf; 7 | use tracing_subscriber::{ 8 | filter::EnvFilter, fmt, prelude::__tracing_subscriber_SubscriberExt, util::SubscriberInitExt, 9 | }; 10 | mod cli; 11 | use cli::ProviderArgs; 12 | 13 | /// The arguments for the host executable. 14 | #[derive(Debug, Clone, Parser)] 15 | struct HostArgs { 16 | /// The block number of the block to execute. 17 | #[clap(long)] 18 | block_number: u64, 19 | 20 | #[clap(flatten)] 21 | provider: ProviderArgs, 22 | 23 | /// Where to dump the elf and stdin for the monolithic SP1 program. 24 | #[clap(long)] 25 | dump_dir: Option, 26 | 27 | /// Optional path to the directory containing cached client input. A new cache file will be 28 | /// created from RPC data if it doesn't already exist. 29 | #[clap(long)] 30 | cache_dir: Option, 31 | } 32 | 33 | #[tokio::main(flavor = "multi_thread")] 34 | async fn main() -> eyre::Result<()> { 35 | // Intialize the environment variables. 36 | dotenv::dotenv().ok(); 37 | 38 | if std::env::var("RUST_LOG").is_err() { 39 | std::env::set_var("RUST_LOG", "info"); 40 | } 41 | 42 | // Initialize the logger. 43 | tracing_subscriber::registry().with(fmt::layer()).with(EnvFilter::from_default_env()).init(); 44 | 45 | // Parse the command line arguments. 46 | let args = HostArgs::parse(); 47 | let provider_config = args.provider.clone().into_provider().await?; 48 | 49 | let variant = match provider_config.chain_id { 50 | CHAIN_ID_ETH_MAINNET => ChainVariant::Ethereum, 51 | _ => { 52 | eyre::bail!("unknown chain ID: {}", provider_config.chain_id); 53 | } 54 | }; 55 | 56 | let client_input_from_cache = try_load_input_from_cache( 57 | args.cache_dir.as_ref(), 58 | provider_config.chain_id, 59 | args.block_number, 60 | )?; 61 | 62 | let client_input = match (client_input_from_cache, provider_config.rpc_url) { 63 | (Some(client_input_from_cache), _) => client_input_from_cache, 64 | (None, Some(rpc_url)) => { 65 | // Cache not found, but RPC is set. 66 | // Setup the provider. 67 | let provider = ReqwestProvider::new_http(rpc_url); 68 | 69 | // Setup the host executor. 70 | let host_executor = HostExecutor::new(provider); 71 | 72 | // Execute the host. 73 | let client_input = host_executor 74 | .execute(args.block_number, variant) 75 | .await 76 | .expect("failed to execute host"); 77 | 78 | if let Some(ref cache_dir) = args.cache_dir { 79 | let input_folder = cache_dir.join(format!("input/{}", provider_config.chain_id)); 80 | if !input_folder.exists() { 81 | std::fs::create_dir_all(&input_folder)?; 82 | } 83 | 84 | let input_path = input_folder.join(format!("{}.bin", args.block_number)); 85 | let mut cache_file = std::fs::File::create(input_path)?; 86 | 87 | bincode::serialize_into(&mut cache_file, &client_input)?; 88 | } 89 | 90 | client_input 91 | } 92 | (None, None) => { 93 | eyre::bail!("cache not found and RPC URL not provided") 94 | } 95 | }; 96 | 97 | // Generate the proof. 98 | let client = 99 | tokio::task::spawn_blocking(|| ProverClient::builder().cpu().build()).await.unwrap(); 100 | 101 | // Setup the proving key and verification key. 102 | let (pk, _vk) = client.setup(match variant { 103 | ChainVariant::Ethereum => include_elf!("rsp-client-eth"), 104 | }); 105 | 106 | // Execute the block inside the zkVM. 107 | let mut stdin = SP1Stdin::new(); 108 | let buffer = bincode::serialize(&client_input).unwrap(); 109 | stdin.write_vec(buffer); 110 | 111 | // Only execute the program. 112 | let (_public_values, execution_report) = client.execute(&pk.elf, &stdin).run().unwrap(); 113 | 114 | println!("execution_report: {}", execution_report); 115 | 116 | if let Some(dump_dir) = args.dump_dir { 117 | let dump_dir = dump_dir.join(format!("{}", args.block_number)); 118 | let elf_path = dump_dir.join("basic_elf.bin"); 119 | let stdin_path = dump_dir.join("basic_stdin.bin"); 120 | std::fs::write(elf_path, &pk.elf)?; 121 | std::fs::write(stdin_path, bincode::serialize(&stdin)?)?; 122 | } 123 | 124 | Ok(()) 125 | } 126 | 127 | fn try_load_input_from_cache( 128 | cache_dir: Option<&PathBuf>, 129 | chain_id: u64, 130 | block_number: u64, 131 | ) -> eyre::Result> { 132 | Ok(if let Some(cache_dir) = cache_dir { 133 | let cache_path = cache_dir.join(format!("input/{}/{}.bin", chain_id, block_number)); 134 | 135 | if cache_path.exists() { 136 | // Try to deserialize the cache file, but handle errors gracefully 137 | match std::fs::File::open(&cache_path) { 138 | Ok(mut cache_file) => match bincode::deserialize_from(&mut cache_file) { 139 | Ok(client_input) => Some(client_input), 140 | Err(err) => { 141 | tracing::warn!( 142 | "Failed to deserialize cache file at {}: {}", 143 | cache_path.display(), 144 | err 145 | ); 146 | None 147 | } 148 | }, 149 | Err(err) => { 150 | tracing::warn!( 151 | "Failed to open cache file at {}: {}", 152 | cache_path.display(), 153 | err 154 | ); 155 | None 156 | } 157 | } 158 | } else { 159 | None 160 | } 161 | } else { 162 | None 163 | }) 164 | } 165 | -------------------------------------------------------------------------------- /bin/host/src/subblock.rs: -------------------------------------------------------------------------------- 1 | //! Subblock executor. 2 | //! 3 | //! This is a standalone program that can be used to execute a subblock, and optionally dump the 4 | //! elf/stdin pairs to a directory. 5 | 6 | use alloy_provider::ReqwestProvider; 7 | use clap::Parser; 8 | use rsp_client_executor::{io::SubblockHostOutput, ChainVariant}; 9 | use rsp_host_executor::HostExecutor; 10 | use sp1_sdk::{ 11 | include_elf, HashableKey, Prover, ProverClient, SP1ProvingKey, SP1Stdin, SP1VerifyingKey, 12 | }; 13 | use std::path::PathBuf; 14 | use tracing_subscriber::{ 15 | filter::EnvFilter, fmt, prelude::__tracing_subscriber_SubscriberExt, util::SubscriberInitExt, 16 | }; 17 | 18 | mod cli; 19 | use cli::ProviderArgs; 20 | 21 | /// The arguments for the subblock executable. 22 | #[derive(Debug, Clone, Parser)] 23 | struct HostArgs { 24 | /// The block number of the block to execute. 25 | #[clap(long)] 26 | block_number: u64, 27 | #[clap(flatten)] 28 | provider: ProviderArgs, 29 | /// Whether to execute the subblock and aggregation programs in the SP1 zkVM. 30 | /// 31 | /// Note: does not generate a proof. 32 | #[clap(long)] 33 | execute: bool, 34 | /// Where to dump the elf and stdin for the subblock and aggregation programs. 35 | #[clap(long)] 36 | dump_dir: Option, 37 | /// Optional path to the directory containing cached client input. A new cache file will be 38 | /// created from RPC data if it doesn't already exist. 39 | #[clap(long)] 40 | cache_dir: Option, 41 | } 42 | 43 | #[tokio::main] 44 | async fn main() -> eyre::Result<()> { 45 | // Intialize the environment variables. 46 | dotenv::dotenv().ok(); 47 | 48 | // Initialize the logger. 49 | tracing_subscriber::registry().with(fmt::layer()).with(EnvFilter::from_default_env()).init(); 50 | 51 | // Parse the command line arguments. 52 | let args = HostArgs::parse(); 53 | 54 | let provider_config = args.provider.clone().into_provider().await?; 55 | 56 | let cache_data = try_load_input_from_cache( 57 | args.cache_dir.as_ref(), 58 | provider_config.chain_id, 59 | args.block_number, 60 | )?; 61 | 62 | let client_input = match (cache_data, provider_config.rpc_url) { 63 | (Some(cache_data), _) => cache_data, 64 | (None, Some(rpc_url)) => { 65 | // Cache not found but we have RPC 66 | // Setup the provider. 67 | let provider = ReqwestProvider::new_http(rpc_url); 68 | 69 | // Setup the host executor. 70 | let host_executor = HostExecutor::new(provider); 71 | 72 | // Execute the host. 73 | let cache_data = host_executor 74 | .execute_subblock(args.block_number, ChainVariant::Ethereum) 75 | .await 76 | .expect("failed to execute host"); 77 | 78 | if let Some(ref cache_dir) = args.cache_dir { 79 | let input_folder = cache_dir.join(format!("input/{}", provider_config.chain_id)); 80 | if !input_folder.exists() { 81 | std::fs::create_dir_all(&input_folder)?; 82 | } 83 | 84 | let input_path = input_folder.join(format!("{}.bin", args.block_number)); 85 | let mut cache_file = std::fs::File::create(input_path)?; 86 | 87 | bincode::serialize_into(&mut cache_file, &cache_data)?; 88 | } 89 | 90 | cache_data 91 | } 92 | (None, None) => { 93 | eyre::bail!("cache not found and RPC URL not provided") 94 | } 95 | }; 96 | 97 | // Generate the proof. 98 | let client = 99 | tokio::task::spawn_blocking(|| ProverClient::builder().cpu().build()).await.unwrap(); 100 | 101 | // Setup the proving key and verification key. 102 | let (subblock_pk, _subblock_vk) = client.setup(include_elf!("rsp-client-eth-subblock")); 103 | 104 | let (agg_pk, _agg_vk) = client.setup(include_elf!("rsp-client-eth-agg")); 105 | 106 | schedule_subblock_execution( 107 | subblock_pk, 108 | args.block_number, 109 | agg_pk, 110 | client_input, 111 | args.execute, 112 | args.dump_dir, 113 | ) 114 | .await?; 115 | 116 | Ok(()) 117 | } 118 | 119 | async fn schedule_subblock_execution( 120 | subblock_pk: SP1ProvingKey, 121 | block_number: u64, 122 | agg_pk: SP1ProvingKey, 123 | inputs: SubblockHostOutput, 124 | execute: bool, 125 | dump_dir: Option, 126 | ) -> eyre::Result<()> { 127 | let (subblock_elf, subblock_vk) = (subblock_pk.elf, subblock_pk.vk); 128 | let agg_elf = agg_pk.elf; 129 | 130 | let dump_dir = dump_dir.map(|d| d.join(format!("{}", block_number))); 131 | 132 | if let Some(dump_dir) = dump_dir.as_ref() { 133 | std::fs::create_dir_all(dump_dir)?; 134 | std::fs::write(dump_dir.join("subblock_elf.bin"), &subblock_elf)?; 135 | std::fs::write(dump_dir.join("subblock_vk.bin"), bincode::serialize(&subblock_vk)?)?; 136 | std::fs::write(dump_dir.join("agg_elf.bin"), &agg_elf)?; 137 | } 138 | 139 | let client = 140 | tokio::task::spawn_blocking(|| ProverClient::builder().cpu().build()).await.unwrap(); 141 | 142 | let aggregation_stdin = to_aggregation_stdin(inputs.clone(), &subblock_vk); 143 | if let Some(dump_dir) = dump_dir.as_ref() { 144 | let stdin_path = dump_dir.join("agg_stdin.bin"); 145 | std::fs::write(stdin_path, bincode::serialize(&aggregation_stdin)?)?; 146 | } 147 | 148 | for i in 0..inputs.subblock_inputs.len() { 149 | let input = &inputs.subblock_inputs[i]; 150 | let parent_state = &inputs.subblock_parent_states[i]; 151 | 152 | let mut stdin = SP1Stdin::new(); 153 | stdin.write(input); 154 | stdin.write_vec(parent_state.clone()); 155 | 156 | // Save the elf/stdin pair to the dump directory. 157 | if let Some(dump_dir) = dump_dir.as_ref() { 158 | let stdin_dir_path = dump_dir.join("subblock_stdins"); 159 | std::fs::create_dir_all(&stdin_dir_path)?; 160 | let stdin_path = stdin_dir_path.join(format!("{}.bin", i)); 161 | std::fs::write(stdin_path, bincode::serialize(&stdin)?)?; 162 | } 163 | 164 | if execute { 165 | let (_public_values, report) = client.execute(&subblock_elf, &stdin).run().unwrap(); 166 | let subblock_instruction_count = report.total_instruction_count(); 167 | tracing::info!("Subblock {} instruction count: {}", i, subblock_instruction_count); 168 | } 169 | } 170 | 171 | if execute { 172 | // Execute the aggregation program with deferred proof verification off, since we don't have the proof yet. 173 | let (_public_values, report) = client 174 | .execute(&agg_elf, &aggregation_stdin) 175 | .deferred_proof_verification(false) 176 | .run() 177 | .unwrap(); 178 | let agg_instruction_count = report.total_instruction_count(); 179 | tracing::info!("Aggregation program instruction count: {}", agg_instruction_count); 180 | } 181 | 182 | Ok(()) 183 | } 184 | 185 | /// Constructs the aggregation stdin, minus the subblock proofs. 186 | pub fn to_aggregation_stdin( 187 | subblock_host_output: SubblockHostOutput, 188 | subblock_vk: &SP1VerifyingKey, 189 | ) -> SP1Stdin { 190 | let mut stdin = SP1Stdin::new(); 191 | 192 | assert_eq!( 193 | subblock_host_output.subblock_inputs.len(), 194 | subblock_host_output.subblock_outputs.len() 195 | ); 196 | let mut public_values = Vec::new(); 197 | for i in 0..subblock_host_output.subblock_inputs.len() { 198 | let mut current_public_values = Vec::new(); 199 | let input = &subblock_host_output.subblock_inputs[i]; 200 | bincode::serialize_into(&mut current_public_values, input).unwrap(); 201 | bincode::serialize_into( 202 | &mut current_public_values, 203 | &subblock_host_output.subblock_outputs[i], 204 | ) 205 | .unwrap(); 206 | public_values.push(current_public_values); 207 | } 208 | 209 | tracing::info!( 210 | "Public values size in bytes: {}", 211 | public_values.iter().map(|v| v.len()).sum::() 212 | ); 213 | 214 | // // Deserialize the parent state and compute the root. 215 | // let mut aligned_vec = AlignedVec::<16>::new(); 216 | // let mut reader = Cursor::new(&subblock_host_output.agg_parent_state); 217 | // aligned_vec.extend_from_reader(&mut reader).unwrap(); 218 | // let parent_state = 219 | // rkyv::from_bytes::(&aligned_vec).unwrap(); 220 | // let parent_state_root = parent_state.state_root(); 221 | 222 | stdin.write::>>(&public_values); 223 | stdin.write::<[u32; 8]>(&subblock_vk.hash_u32()); 224 | stdin.write(&subblock_host_output.agg_input); 225 | stdin.write(&subblock_host_output.agg_input.parent_header().state_root); 226 | stdin 227 | } 228 | 229 | fn try_load_input_from_cache( 230 | cache_dir: Option<&PathBuf>, 231 | chain_id: u64, 232 | block_number: u64, 233 | ) -> eyre::Result> { 234 | Ok(if let Some(cache_dir) = cache_dir { 235 | let cache_path = 236 | cache_dir.join(format!("subblock-input/{}/{}.bin", chain_id, block_number)); 237 | 238 | if cache_path.exists() { 239 | // Try to open and deserialize the cache file, delete it if there's an error 240 | match (|| -> eyre::Result { 241 | let mut cache_file = std::fs::File::open(&cache_path)?; 242 | let cache_data: SubblockHostOutput = bincode::deserialize_from(&mut cache_file)?; 243 | Ok(cache_data) 244 | })() { 245 | Ok(cache_data) => Some(cache_data), 246 | Err(err) => { 247 | tracing::warn!("Failed to load cache file {}: {}", cache_path.display(), err); 248 | // Delete the invalid cache file 249 | if let Err(delete_err) = std::fs::remove_file(&cache_path) { 250 | tracing::warn!("Failed to delete invalid cache file: {}", delete_err); 251 | } else { 252 | tracing::info!("Deleted invalid cache file: {}", cache_path.display()); 253 | } 254 | None 255 | } 256 | } 257 | } else { 258 | None 259 | } 260 | } else { 261 | None 262 | }) 263 | } 264 | -------------------------------------------------------------------------------- /crates/executor/client/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rsp-client-executor" 3 | description = "" 4 | version.workspace = true 5 | edition.workspace = true 6 | license.workspace = true 7 | homepage.workspace = true 8 | repository.workspace = true 9 | 10 | [lints] 11 | workspace = true 12 | 13 | [dependencies] 14 | thiserror.workspace = true 15 | serde.workspace = true 16 | sha2.workspace = true 17 | itertools.workspace = true 18 | bincode.workspace = true 19 | rkyv.workspace = true 20 | cfg-if.workspace = true 21 | tracing.workspace = true 22 | 23 | # workspace 24 | rsp-primitives.workspace = true 25 | rsp-mpt.workspace = true 26 | 27 | # reth 28 | reth-consensus.workspace = true 29 | reth-ethereum-consensus.workspace = true 30 | reth-execution-types.workspace = true 31 | reth-primitives.workspace = true 32 | reth-trie.workspace = true 33 | reth-evm.workspace = true 34 | reth-evm-ethereum.workspace = true 35 | reth-errors.workspace = true 36 | reth-chainspec.workspace = true 37 | reth-revm.workspace = true 38 | 39 | # revm 40 | revm.workspace = true 41 | revm-primitives.workspace = true 42 | 43 | # alloy 44 | alloy-primitives.workspace = true 45 | alloy-rlp.workspace = true 46 | 47 | # sp1 48 | sp1-zkvm = { version = "4.1.7", features = ["verify"] } 49 | 50 | [dev-dependencies] 51 | -------------------------------------------------------------------------------- /crates/executor/client/src/custom.rs: -------------------------------------------------------------------------------- 1 | //! A cunstom EVM configuration for annotated precompiles. 2 | //! 3 | //! Originally from: https://github.com/paradigmxyz/alphanet/blob/main/crates/node/src/evm.rs. 4 | //! 5 | //! The [CustomEvmConfig] type implements the [ConfigureEvm] and [ConfigureEvmEnv] traits, 6 | //! configuring the custom CustomEvmConfig precompiles and instructions. 7 | 8 | use crate::ChainVariant; 9 | use reth_chainspec::ChainSpec; 10 | use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; 11 | use reth_evm_ethereum::EthEvmConfig; 12 | use reth_primitives::{ 13 | revm_primitives::{CfgEnvWithHandlerCfg, TxEnv}, 14 | Address, Bytes, Header, TransactionSigned, U256, 15 | }; 16 | use reth_revm::{ 17 | handler::register::EvmHandler, precompile::PrecompileSpecId, primitives::Env, 18 | ContextPrecompiles, Database, Evm, EvmBuilder, 19 | }; 20 | use revm::precompile::{ 21 | bn128, kzg_point_evaluation, secp256k1, Precompile, PrecompileResult, PrecompileWithAddress, 22 | }; 23 | use std::sync::Arc; 24 | 25 | /// Create an annotated precompile that tracks the cycle count of a precompile. 26 | /// This is useful for tracking how many cycles in total are consumed by calls to a given 27 | /// precompile. 28 | macro_rules! create_annotated_precompile { 29 | ($precompile:expr, $name:expr) => { 30 | PrecompileWithAddress( 31 | $precompile.0, 32 | Precompile::Standard(|input: &Bytes, gas_limit: u64| -> PrecompileResult { 33 | let precompile = $precompile.precompile(); 34 | match precompile { 35 | Precompile::Standard(precompile) => { 36 | println!(concat!("cycle-tracker-report-start: precompile-", $name)); 37 | let result = precompile(input, gas_limit); 38 | println!(concat!("cycle-tracker-report-end: precompile-", $name)); 39 | result 40 | } 41 | _ => panic!("Annotated precompile must be a standard precompile."), 42 | } 43 | }), 44 | ) 45 | }; 46 | } 47 | 48 | // An annotated version of the KZG point evaluation precompile. Because this is a stateful 49 | // precompile we cannot use the `create_annotated_precompile` macro 50 | pub(crate) const ANNOTATED_KZG_PROOF: PrecompileWithAddress = PrecompileWithAddress( 51 | kzg_point_evaluation::POINT_EVALUATION.0, 52 | Precompile::Env(|input: &Bytes, gas_limit: u64, env: &Env| -> PrecompileResult { 53 | let precompile = kzg_point_evaluation::POINT_EVALUATION.precompile(); 54 | match precompile { 55 | Precompile::Env(precompile) => { 56 | println!(concat!( 57 | "cycle-tracker-report-start: precompile-", 58 | "kzg-point-evaluation" 59 | )); 60 | let result = precompile(input, gas_limit, env); 61 | println!(concat!("cycle-tracker-report-end: precompile-", "kzg-point-evaluation")); 62 | result 63 | } 64 | _ => panic!("Annotated precompile must be a env precompile."), 65 | } 66 | }), 67 | ); 68 | 69 | pub(crate) const ANNOTATED_ECRECOVER: PrecompileWithAddress = 70 | create_annotated_precompile!(secp256k1::ECRECOVER, "ecrecover"); 71 | pub(crate) const ANNOTATED_BN_ADD: PrecompileWithAddress = 72 | create_annotated_precompile!(bn128::add::ISTANBUL, "bn-add"); 73 | pub(crate) const ANNOTATED_BN_MUL: PrecompileWithAddress = 74 | create_annotated_precompile!(bn128::mul::ISTANBUL, "bn-mul"); 75 | pub(crate) const ANNOTATED_BN_PAIR: PrecompileWithAddress = 76 | create_annotated_precompile!(bn128::pair::ISTANBUL, "bn-pair"); 77 | 78 | /// Custom EVM configuration 79 | #[derive(Debug, Clone, Copy)] 80 | #[non_exhaustive] 81 | pub struct CustomEvmConfig(pub ChainVariant); 82 | 83 | impl CustomEvmConfig { 84 | /// Sets the precompiles to the EVM handler 85 | /// 86 | /// This will be invoked when the EVM is created via [ConfigureEvm::evm] or 87 | /// [ConfigureEvm::evm_with_inspector] 88 | /// 89 | /// This will use the default mainnet precompiles and add additional precompiles. 90 | fn set_precompiles(handler: &mut EvmHandler<'_, EXT, DB>) 91 | where 92 | DB: Database, 93 | { 94 | // first we need the evm spec id, which determines the precompiles 95 | let spec_id = handler.cfg.spec_id; 96 | // install the precompiles 97 | handler.pre_execution.load_precompiles = Arc::new(move || { 98 | let mut loaded_precompiles: ContextPrecompiles = 99 | ContextPrecompiles::new(PrecompileSpecId::from_spec_id(spec_id)); 100 | loaded_precompiles.extend(vec![ 101 | ANNOTATED_ECRECOVER, 102 | ANNOTATED_BN_ADD, 103 | ANNOTATED_BN_MUL, 104 | ANNOTATED_BN_PAIR, 105 | ANNOTATED_KZG_PROOF, 106 | ]); 107 | 108 | loaded_precompiles 109 | }); 110 | } 111 | 112 | pub fn from_variant(variant: ChainVariant) -> Self { 113 | Self(variant) 114 | } 115 | } 116 | 117 | impl ConfigureEvm for CustomEvmConfig { 118 | type DefaultExternalContext<'a> = (); 119 | 120 | fn evm(&self, db: DB) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { 121 | match self.0 { 122 | ChainVariant::Ethereum => { 123 | EvmBuilder::default() 124 | .with_db(db) 125 | // add additional precompiles 126 | .append_handler_register(Self::set_precompiles) 127 | .build() 128 | } 129 | } 130 | } 131 | 132 | fn default_external_context<'a>(&self) -> Self::DefaultExternalContext<'a> {} 133 | } 134 | 135 | impl ConfigureEvmEnv for CustomEvmConfig { 136 | fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { 137 | match self.0 { 138 | ChainVariant::Ethereum => { 139 | EthEvmConfig::default().fill_tx_env(tx_env, transaction, sender) 140 | } 141 | } 142 | } 143 | 144 | fn fill_cfg_env( 145 | &self, 146 | cfg_env: &mut CfgEnvWithHandlerCfg, 147 | chain_spec: &ChainSpec, 148 | header: &Header, 149 | total_difficulty: U256, 150 | ) { 151 | match self.0 { 152 | ChainVariant::Ethereum => { 153 | EthEvmConfig::default().fill_cfg_env(cfg_env, chain_spec, header, total_difficulty) 154 | } 155 | } 156 | } 157 | 158 | fn fill_tx_env_system_contract_call( 159 | &self, 160 | env: &mut Env, 161 | caller: Address, 162 | contract: Address, 163 | data: Bytes, 164 | ) { 165 | match self.0 { 166 | ChainVariant::Ethereum => EthEvmConfig::default() 167 | .fill_tx_env_system_contract_call(env, caller, contract, data), 168 | } 169 | } 170 | } 171 | 172 | #[cfg(test)] 173 | mod tests { 174 | use super::*; 175 | use reth_chainspec::{Chain, ChainSpecBuilder, EthereumHardfork}; 176 | use reth_primitives::{ 177 | revm_primitives::{BlockEnv, CfgEnv, SpecId}, 178 | ForkCondition, Genesis, 179 | }; 180 | 181 | #[test] 182 | fn test_fill_cfg_and_block_env() { 183 | let mut cfg_env = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); 184 | let mut block_env = BlockEnv::default(); 185 | let header = Header::default(); 186 | let chain_spec = ChainSpecBuilder::default() 187 | .chain(Chain::optimism_mainnet()) 188 | .genesis(Genesis::default()) 189 | .with_fork(EthereumHardfork::Frontier, ForkCondition::Block(0)) 190 | .build(); 191 | let total_difficulty = U256::ZERO; 192 | 193 | CustomEvmConfig::from_variant(ChainVariant::Ethereum).fill_cfg_and_block_env( 194 | &mut cfg_env, 195 | &mut block_env, 196 | &chain_spec, 197 | &header, 198 | total_difficulty, 199 | ); 200 | 201 | assert_eq!(cfg_env.chain_id, chain_spec.chain().id()); 202 | } 203 | } 204 | -------------------------------------------------------------------------------- /crates/executor/client/src/error.rs: -------------------------------------------------------------------------------- 1 | use alloy_primitives::{Address, FixedBytes}; 2 | use reth_consensus::ConsensusError; 3 | use reth_evm::execute::BlockExecutionError; 4 | use rsp_mpt::Error as MptError; 5 | 6 | #[derive(Debug, thiserror::Error)] 7 | pub enum ClientError { 8 | #[error("Failed to recover senders from signatures")] 9 | SignatureRecoveryFailed, 10 | #[error("Mismatched state root after executing the block")] 11 | MismatchedStateRoot, 12 | #[error("Mismatched storage root after executing the block")] 13 | MismatchedStorageRoot, 14 | #[error("Missing bytecode for account {}", .0)] 15 | MissingBytecode(Address), 16 | #[error("Missing trie for address {}", .0)] 17 | MissingTrie(Address), 18 | #[error("Invalid block number found in headers \n expected: {} found: {}", .0, .1)] 19 | InvalidHeaderBlockNumber(u64, u64), 20 | #[error("Invalid parent header found for block \n expected: {}, found: {}", .0, .1)] 21 | InvalidHeaderParentHash(FixedBytes<32>, FixedBytes<32>), 22 | #[error("Failed to validate post exectution state {}", 0)] 23 | PostExecutionError(#[from] ConsensusError), 24 | #[error("Block Execution Failed: {}", .0)] 25 | BlockExecutionError(#[from] BlockExecutionError), 26 | #[error("Mpt Error: {}", .0)] 27 | MptError(#[from] MptError), 28 | #[error("Deserialization Error")] 29 | DeserializationError, 30 | #[error("Invalid subblock output")] 31 | InvalidSubblockOutput, 32 | #[error("Invalid state diff")] 33 | InvalidStateDiff, 34 | } 35 | -------------------------------------------------------------------------------- /crates/executor/client/src/io.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::{BTreeMap, HashMap}, 3 | iter::once, 4 | }; 5 | 6 | use itertools::Itertools; 7 | use reth_errors::ProviderError; 8 | use reth_primitives::{ 9 | revm_primitives::AccountInfo, Address, Block, Bloom, Header, Receipt, Request, B256, U256, 10 | }; 11 | use reth_trie::{TrieAccount, EMPTY_ROOT_HASH}; 12 | use revm::DatabaseRef; 13 | use revm_primitives::{keccak256, Bytecode}; 14 | use rsp_mpt::EthereumState; 15 | use serde::{Deserialize, Serialize}; 16 | 17 | use rkyv::util::AlignedVec; 18 | 19 | use crate::{error::ClientError, EthereumVariant}; 20 | 21 | /// The input for the client to execute a block and fully verify the STF (state transition 22 | /// function). 23 | /// 24 | /// Instead of passing in the entire state, we only pass in the state roots along with merkle proofs 25 | /// for the storage slots that were modified and accessed. 26 | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] 27 | pub struct ClientExecutorInput { 28 | /// The current block (which will be executed inside the client). 29 | pub current_block: Block, 30 | /// The previous block headers starting from the most recent. There must be at least one header 31 | /// to provide the parent state root. 32 | pub ancestor_headers: Vec
, 33 | /// Network state as of the parent block. 34 | pub parent_state: EthereumState, 35 | /// Requests to account state and storage slots. 36 | pub state_requests: HashMap>, 37 | /// Account bytecodes. 38 | pub bytecodes: Vec, 39 | } 40 | 41 | impl ClientExecutorInput { 42 | /// Gets the immediate parent block's header. 43 | #[inline(always)] 44 | pub fn parent_header(&self) -> &Header { 45 | &self.ancestor_headers[0] 46 | } 47 | 48 | /// Creates a [`WitnessDb`]. 49 | pub fn witness_db(&self) -> Result, ClientError> { 50 | ::witness_db(self) 51 | } 52 | } 53 | 54 | impl WitnessInput for ClientExecutorInput { 55 | #[inline(always)] 56 | fn state(&self) -> &EthereumState { 57 | &self.parent_state 58 | } 59 | 60 | #[inline(always)] 61 | fn state_anchor(&self) -> B256 { 62 | self.parent_header().state_root 63 | } 64 | 65 | #[inline(always)] 66 | fn state_requests(&self) -> impl Iterator)> { 67 | self.state_requests.iter() 68 | } 69 | 70 | #[inline(always)] 71 | fn bytecodes(&self) -> impl Iterator { 72 | self.bytecodes.iter() 73 | } 74 | 75 | #[inline(always)] 76 | fn headers(&self) -> impl Iterator { 77 | once(&self.current_block.header).chain(self.ancestor_headers.iter()) 78 | } 79 | } 80 | 81 | /// Input to the subblock program. 82 | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] 83 | pub struct SubblockInput { 84 | /// The current block (which will be executed inside the client). 85 | pub current_block: Block, 86 | /// The blockhashes used by the subblock. 87 | /// 88 | /// Right now, this is just the blockhashes used by every subblock. In the future, we can 89 | /// probably shrink this down to just the blockhashes used by the current subblock. 90 | pub block_hashes: BTreeMap, 91 | /// The bytecodes used by the subblock 92 | pub bytecodes: Vec, 93 | /// Whether this is the first subblock (do we need to do pre-execution transactions?) 94 | pub is_first_subblock: bool, 95 | /// Whether this is the last subblock (do we need to do post-execution transactions?) 96 | pub is_last_subblock: bool, 97 | /// The starting gas used for the subblock. 98 | pub starting_gas_used: u64, 99 | } 100 | 101 | /// The committed execution output of the subblock program. 102 | /// 103 | /// The subblock program also commits its `[SubblockInput]`. 104 | #[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq, Eq)] 105 | pub struct SubblockOutput { 106 | /// The new state root after executing this subblock. 107 | pub output_state_root: B256, 108 | /// The logs bloom. 109 | pub logs_bloom: Bloom, 110 | /// The transaction receipts. 111 | pub receipts: Vec, 112 | /// The state root before executing this subblock. 113 | pub input_state_root: B256, 114 | /// EIP 7685 Requests. 115 | pub requests: Vec, 116 | } 117 | 118 | impl SubblockOutput { 119 | /// This is intended to ONLY be called by consecutive subblocks of the same block. 120 | /// `self` is the current cumulative subblock output, and `other` is the new subblock output. 121 | #[inline] 122 | pub fn extend(&mut self, other: Self) { 123 | // Make sure that the current output state root lines up with the next input state root. 124 | assert_eq!(self.output_state_root, other.input_state_root); 125 | self.output_state_root = other.output_state_root; 126 | self.logs_bloom.accrue_bloom(&other.logs_bloom); 127 | 128 | // Add other receipts to the current receipts. 129 | self.receipts.extend(other.receipts); 130 | 131 | // Add other requests to the current requests. 132 | self.requests.extend(other.requests); 133 | } 134 | } 135 | 136 | /// Everything needed to run the subblock task e2e. 137 | /// 138 | /// Necessary data for subblock stdin and agg stdin. Note that the subblock parent states and 139 | /// agg parent state are serialized with rkyv as bytes here. 140 | #[derive(Debug, Clone, Serialize, Deserialize)] 141 | pub struct SubblockHostOutput { 142 | pub subblock_inputs: Vec, 143 | pub subblock_parent_states: Vec>, 144 | pub subblock_outputs: Vec, 145 | pub agg_input: AggregationInput, 146 | } 147 | 148 | impl SubblockHostOutput { 149 | /// Validates the output of the host executor, by running all of the subblocks natively and 150 | /// checking their consistency. 151 | pub fn validate(&self) -> Result<(), ClientError> { 152 | let executor = crate::ClientExecutor; 153 | 154 | for (i, subblock_input) in self.subblock_inputs.iter().enumerate() { 155 | let mut subblock_parent_state = rkyv::from_bytes::( 156 | &self.subblock_parent_states[i], 157 | ) 158 | .unwrap(); 159 | 160 | let subblock_output = executor.execute_subblock::( 161 | subblock_input.clone(), 162 | &mut subblock_parent_state, 163 | )?; 164 | 165 | if subblock_output != self.subblock_outputs[i] { 166 | eprintln!( 167 | "executed output state root {:?}\n pre-generated output state root {:?}", 168 | subblock_output.output_state_root, self.subblock_outputs[i].output_state_root 169 | ); 170 | eprintln!( 171 | "executed input state root {:?}\n pre-generated input state root {:?}", 172 | subblock_output.input_state_root, self.subblock_outputs[i].input_state_root 173 | ); 174 | return Err(ClientError::InvalidSubblockOutput); 175 | } 176 | } 177 | Ok(()) 178 | } 179 | } 180 | 181 | /// The input for the client to aggregate multiple subblocks and prove their consistency. 182 | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] 183 | pub struct AggregationInput { 184 | /// The current block (which will be executed inside the client). 185 | pub current_block: Block, 186 | /// The previous block headers starting from the most recent. There must be at least one header 187 | /// to provide the parent state root. 188 | pub ancestor_headers: Vec
, 189 | /// Account bytecodes. 190 | pub bytecodes: Vec, 191 | } 192 | 193 | impl AggregationInput { 194 | pub fn parent_header(&self) -> &Header { 195 | &self.ancestor_headers[0] 196 | } 197 | } 198 | 199 | #[derive(Debug, Clone)] 200 | pub struct TrieDB<'a> { 201 | inner: &'a EthereumState, 202 | block_hashes: BTreeMap, 203 | bytecode_by_hash: HashMap, 204 | } 205 | 206 | impl<'a> TrieDB<'a> { 207 | pub fn new( 208 | inner: &'a EthereumState, 209 | block_hashes: BTreeMap, 210 | bytecode_by_hash: HashMap, 211 | ) -> Self { 212 | Self { inner, block_hashes, bytecode_by_hash } 213 | } 214 | 215 | pub fn get_account_from_hashed_address( 216 | &self, 217 | hashed_address: &[u8], 218 | ) -> Result, ::Error> { 219 | let account_in_trie = self.inner.state_trie.get_rlp::(hashed_address).unwrap(); 220 | 221 | let account = account_in_trie.map(|account_in_trie| AccountInfo { 222 | balance: account_in_trie.balance, 223 | nonce: account_in_trie.nonce, 224 | code_hash: account_in_trie.code_hash, 225 | code: None, 226 | }); 227 | 228 | Ok(account) 229 | } 230 | 231 | pub fn get_storage_from_hashed_address( 232 | &self, 233 | hashed_address: &[u8], 234 | index: U256, 235 | ) -> Result::Error> { 236 | let storage_trie = self 237 | .inner 238 | .storage_tries 239 | .get(hashed_address) 240 | .expect("A storage trie must be provided for each account"); 241 | 242 | Ok(storage_trie 243 | .get_rlp::(keccak256(index.to_be_bytes::<32>()).as_slice()) 244 | .expect("Can get from MPT") 245 | .unwrap_or_default()) 246 | } 247 | } 248 | 249 | impl DatabaseRef for TrieDB<'_> { 250 | /// The database error type. 251 | type Error = ProviderError; 252 | 253 | /// Get basic account information. 254 | fn basic_ref(&self, address: Address) -> Result, Self::Error> { 255 | let hashed_address = keccak256(address); 256 | 257 | self.get_account_from_hashed_address(hashed_address.as_slice()) 258 | } 259 | 260 | /// Get account code by its hash. 261 | fn code_by_hash_ref(&self, hash: B256) -> Result { 262 | Ok(self.bytecode_by_hash.get(&hash).map(|code| (*code).clone()).unwrap()) 263 | } 264 | 265 | /// Get storage value of address at index. 266 | fn storage_ref(&self, address: Address, index: U256) -> Result { 267 | let hashed_address = keccak256(address); 268 | let hashed_address = hashed_address.as_slice(); 269 | 270 | self.get_storage_from_hashed_address(hashed_address, index) 271 | } 272 | 273 | /// Get block hash by block number. 274 | fn block_hash_ref(&self, number: u64) -> Result { 275 | Ok(*self 276 | .block_hashes 277 | .get(&number) 278 | .expect("A block hash must be provided for each block number")) 279 | } 280 | } 281 | 282 | /// A trait for constructing [`TrieDB`]. 283 | pub trait WitnessInput { 284 | /// Gets a reference to the state from which account info and storage slots are loaded. 285 | fn state(&self) -> &EthereumState; 286 | 287 | /// Gets the state trie root hash that the state referenced by 288 | /// [state()](trait.WitnessInput#tymethod.state) must conform to. 289 | fn state_anchor(&self) -> B256; 290 | 291 | /// Gets an iterator over address state requests. For each request, the account info and storage 292 | /// slots are loaded from the relevant tries in the state returned by 293 | /// [state()](trait.WitnessInput#tymethod.state). 294 | fn state_requests(&self) -> impl Iterator)>; 295 | 296 | /// Gets an iterator over account bytecodes. 297 | fn bytecodes(&self) -> impl Iterator; 298 | 299 | /// Gets an iterator over references to a consecutive, reverse-chronological block headers 300 | /// starting from the current block header. 301 | fn headers(&self) -> impl Iterator; 302 | 303 | /// Creates a [`WitnessDb`] from a [`WitnessInput`] implementation. To do so, it verifies the 304 | /// state root, ancestor headers and account bytecodes, and constructs the account and 305 | /// storage values by reading against state tries. 306 | /// 307 | /// NOTE: For some unknown reasons, calling this trait method directly from outside of the type 308 | /// implementing this trait causes a zkVM run to cost over 5M cycles more. To avoid this, define 309 | /// a method inside the type that calls this trait method instead. 310 | #[inline(always)] 311 | fn witness_db(&self) -> Result, ClientError> { 312 | let state = self.state(); 313 | 314 | if self.state_anchor() != state.state_root() { 315 | return Err(ClientError::MismatchedStateRoot); 316 | } 317 | 318 | // Verify the storage tries. 319 | for (hashed_address, storage_trie) in state.storage_tries.iter() { 320 | let account = 321 | state.state_trie.get_rlp::(hashed_address.as_slice()).unwrap(); 322 | let storage_root = account.map_or(EMPTY_ROOT_HASH, |a| a.storage_root); 323 | if storage_root != storage_trie.hash() { 324 | return Err(ClientError::MismatchedStorageRoot); 325 | } 326 | } 327 | 328 | let bytecodes_by_hash = 329 | self.bytecodes().map(|code| (code.hash_slow(), code)).collect::>(); 330 | 331 | // Verify and build block hashes 332 | let mut block_hashes: BTreeMap = BTreeMap::new(); 333 | for (child_header, parent_header) in self.headers().tuple_windows() { 334 | if parent_header.number != child_header.number - 1 { 335 | return Err(ClientError::InvalidHeaderBlockNumber( 336 | parent_header.number + 1, 337 | child_header.number, 338 | )); 339 | } 340 | 341 | let parent_header_hash = parent_header.hash_slow(); 342 | if parent_header_hash != child_header.parent_hash { 343 | return Err(ClientError::InvalidHeaderParentHash( 344 | parent_header_hash, 345 | child_header.parent_hash, 346 | )); 347 | } 348 | 349 | block_hashes.insert(parent_header.number, child_header.parent_hash); 350 | } 351 | 352 | Ok(TrieDB::new(state, block_hashes, bytecodes_by_hash)) 353 | } 354 | } 355 | 356 | /// Read a buffer of bytes aligned to N from the SP1 zkVM input stream. 357 | /// 358 | /// Note: Since `u8` is the smallest alignment, any alignment with N % 4 == 0 is a valid alignment. 359 | /// 360 | /// # Panics 361 | /// - If N is not a multiple of 4. 362 | /// - If the size hinted is 0. 363 | pub fn read_aligned_vec() -> AlignedVec { 364 | cfg_if::cfg_if! { 365 | if #[cfg(target_os = "zkvm")] { 366 | use sp1_zkvm::syscalls::{syscall_hint_len, syscall_hint_read}; 367 | assert!(N % align_of::() == 0, "SP1 zkVM alignment must be a multiple of 4"); 368 | 369 | // Round up to the nearest multiple of 4 so that the memory allocated is in whole words 370 | let len = syscall_hint_len(); 371 | let capacity = (len + 3) / 4 * 4; 372 | 373 | // Allocate a buffer of the required length that is 4 byte aligned 374 | let mut vec = AlignedVec::::with_capacity(capacity); 375 | 376 | // Read the vec into uninitialized memory. The syscall assumes the memory is uninitialized, 377 | // which should be true because the allocator does not dealloc, so a new alloc should be fresh. 378 | unsafe { 379 | syscall_hint_read(vec.as_mut_ptr(), len); 380 | vec.set_len(len); 381 | } 382 | vec 383 | } else { 384 | unimplemented!() 385 | } 386 | } 387 | } 388 | -------------------------------------------------------------------------------- /crates/executor/client/src/lib.rs: -------------------------------------------------------------------------------- 1 | /// Client program input data types. 2 | pub mod io; 3 | #[macro_use] 4 | mod utils; 5 | pub mod custom; 6 | pub mod error; 7 | 8 | use std::{collections::BTreeMap, fmt::Display, io::Cursor, iter::once}; 9 | 10 | use cfg_if::cfg_if; 11 | use custom::CustomEvmConfig; 12 | use error::ClientError; 13 | use io::{AggregationInput, ClientExecutorInput, SubblockInput, SubblockOutput, TrieDB}; 14 | use itertools::Itertools; 15 | use reth_chainspec::ChainSpec; 16 | use reth_errors::{ConsensusError, ProviderError}; 17 | use reth_ethereum_consensus::{ 18 | validate_block_post_execution as validate_block_post_execution_ethereum, 19 | validate_subblock_post_execution as validate_subblock_post_execution_ethereum, 20 | }; 21 | use reth_evm::execute::{ 22 | BlockExecutionError, BlockExecutionOutput, BlockExecutorProvider, Executor, 23 | }; 24 | use reth_evm_ethereum::execute::EthExecutorProvider; 25 | use reth_execution_types::ExecutionOutcome; 26 | use reth_primitives::{ 27 | proofs, Block, BlockWithSenders, Bloom, Header, Receipt, Receipts, Request, TransactionSigned, 28 | }; 29 | use revm::{db::WrapDatabaseRef, Database}; 30 | use revm_primitives::{B256, U256}; 31 | use rsp_mpt::EthereumState; 32 | use sha2::{Digest, Sha256}; 33 | 34 | /// Chain ID for Ethereum Mainnet. 35 | pub const CHAIN_ID_ETH_MAINNET: u64 = 0x1; 36 | 37 | /// Chain ID for OP Mainnnet. 38 | pub const CHAIN_ID_OP_MAINNET: u64 = 0xa; 39 | 40 | /// Chain ID for Linea Mainnet. 41 | pub const CHAIN_ID_LINEA_MAINNET: u64 = 0xe708; 42 | 43 | /// Chain ID for Sepolia. 44 | pub const CHAIN_ID_SEPOLIA: u64 = 0xaa36a7; 45 | 46 | /// An executor that executes a block inside a zkVM. 47 | #[derive(Debug, Clone, Default)] 48 | pub struct ClientExecutor; 49 | 50 | /// Trait for representing different execution/validation rules of different chain variants. This 51 | /// allows for dead code elimination to minimize the ELF size for each variant. 52 | pub trait Variant { 53 | fn spec() -> ChainSpec; 54 | 55 | fn execute( 56 | executor_block_input: &BlockWithSenders, 57 | executor_difficulty: U256, 58 | cache_db: DB, 59 | ) -> Result, BlockExecutionError> 60 | where 61 | DB: Database + Display>; 62 | 63 | fn validate_block_post_execution( 64 | block: &BlockWithSenders, 65 | chain_spec: &ChainSpec, 66 | receipts: &[Receipt], 67 | requests: &[Request], 68 | ) -> Result<(), ConsensusError>; 69 | 70 | fn validate_subblock_aggregation( 71 | _header: &Header, 72 | _chain_spec: &ChainSpec, 73 | _receipts: &[Receipt], 74 | _requests: &[Request], 75 | ) -> Result<(), ConsensusError> { 76 | unimplemented!() 77 | } 78 | 79 | fn pre_process_block(block: &Block) -> Block { 80 | block.clone() 81 | } 82 | } 83 | 84 | /// Implementation for Ethereum-specific execution/validation logic. 85 | #[derive(Debug)] 86 | pub struct EthereumVariant; 87 | 88 | /// EVM chain variants that implement different execution/validation rules. 89 | #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] 90 | pub enum ChainVariant { 91 | /// Ethereum networks. 92 | Ethereum, 93 | } 94 | 95 | impl ChainVariant { 96 | /// Returns the chain ID for the given variant. 97 | pub fn chain_id(&self) -> u64 { 98 | match self { 99 | ChainVariant::Ethereum => CHAIN_ID_ETH_MAINNET, 100 | } 101 | } 102 | } 103 | 104 | impl ClientExecutor { 105 | pub fn execute(&self, mut input: ClientExecutorInput) -> Result 106 | where 107 | V: Variant, 108 | { 109 | // Initialize the witnessed database with verified storage proofs. 110 | let wrap_ref = profile!("initialize witness db", { 111 | let trie_db = input.witness_db().unwrap(); 112 | WrapDatabaseRef(trie_db) 113 | }); 114 | 115 | // Execute the block. 116 | let spec = V::spec(); 117 | let executor_block_input = profile!("recover senders", { 118 | input 119 | .current_block 120 | .clone() 121 | .with_recovered_senders() 122 | .ok_or(ClientError::SignatureRecoveryFailed) 123 | })?; 124 | let executor_difficulty = input.current_block.header.difficulty; 125 | let executor_output = profile!("execute", { 126 | V::execute(&executor_block_input, executor_difficulty, wrap_ref) 127 | })?; 128 | 129 | // Validate the block post execution. 130 | profile!("validate block post-execution", { 131 | V::validate_block_post_execution( 132 | &executor_block_input, 133 | &spec, 134 | &executor_output.receipts, 135 | &executor_output.requests, 136 | ) 137 | })?; 138 | 139 | // Accumulate the logs bloom. 140 | let mut logs_bloom = Bloom::default(); 141 | profile!("accrue logs bloom", { 142 | executor_output.receipts.iter().for_each(|r| { 143 | logs_bloom.accrue_bloom(&r.bloom_slow()); 144 | }) 145 | }); 146 | 147 | // Convert the output to an execution outcome. 148 | let executor_outcome = ExecutionOutcome::new( 149 | executor_output.state, 150 | Receipts::from(executor_output.receipts), 151 | input.current_block.header.number, 152 | vec![executor_output.requests.into()], 153 | ); 154 | 155 | // Verify the state root. 156 | let state_root = profile!("compute state root", { 157 | input.parent_state.update(&executor_outcome.hash_state_slow()); 158 | input.parent_state.state_root() 159 | }); 160 | 161 | if state_root != input.current_block.state_root { 162 | return Err(ClientError::MismatchedStateRoot); 163 | } 164 | 165 | // Derive the block header. 166 | // 167 | // Note: the receipts root and gas used are verified by `validate_block_post_execution`. 168 | let mut header = input.current_block.header.clone(); 169 | header.parent_hash = input.parent_header().hash_slow(); 170 | header.ommers_hash = proofs::calculate_ommers_root(&input.current_block.ommers); 171 | header.state_root = input.current_block.state_root; 172 | header.transactions_root = proofs::calculate_transaction_root(&input.current_block.body); 173 | header.receipts_root = input.current_block.header.receipts_root; 174 | header.withdrawals_root = input 175 | .current_block 176 | .withdrawals 177 | .take() 178 | .map(|w| proofs::calculate_withdrawals_root(w.into_inner().as_slice())); 179 | header.logs_bloom = logs_bloom; 180 | header.requests_root = 181 | input.current_block.requests.as_ref().map(|r| proofs::calculate_requests_root(&r.0)); 182 | 183 | Ok(header) 184 | } 185 | 186 | /// Executes a SubblockInput, and returns a SubblockOutput. 187 | pub fn execute_subblock( 188 | &self, 189 | input: SubblockInput, 190 | input_state: &mut EthereumState, 191 | ) -> Result 192 | where 193 | V: Variant, 194 | { 195 | let input_state_root = profile!("compute input state root", { input_state.state_root() }); 196 | 197 | let wrap_ref = profile!("construct trie db", { 198 | // Finally, construct the database. 199 | let bytecode_by_hash = input.bytecodes.iter().map(|b| (b.hash_slow(), b)).collect(); 200 | let trie_db = TrieDB::new(input_state, input.block_hashes, bytecode_by_hash); 201 | WrapDatabaseRef(trie_db) 202 | }); 203 | 204 | // Execute the block. 205 | let mut executor_block_input = profile!("recover senders", { 206 | input 207 | .current_block 208 | .clone() 209 | .with_recovered_senders() 210 | .ok_or(ClientError::SignatureRecoveryFailed) 211 | })?; 212 | executor_block_input.is_first_subblock = input.is_first_subblock; 213 | executor_block_input.is_last_subblock = input.is_last_subblock; 214 | executor_block_input.starting_gas_used = input.starting_gas_used; 215 | 216 | let executor_difficulty = input.current_block.header.difficulty; 217 | let executor_output = profile!("execute", { 218 | V::execute(&executor_block_input, executor_difficulty, wrap_ref) 219 | })?; 220 | 221 | let requests = executor_output.requests.clone(); 222 | let receipts = executor_output.receipts.clone(); 223 | 224 | let mut logs_bloom = Bloom::default(); 225 | profile!("accrue logs bloom", { 226 | executor_output.receipts.iter().for_each(|r| { 227 | logs_bloom.accrue_bloom(&r.bloom_slow()); 228 | }) 229 | }); 230 | 231 | let subblock_output = profile!("finalize output", { 232 | // Convert the output to an execution outcome. 233 | let executor_outcome = ExecutionOutcome::new( 234 | executor_output.state, 235 | Receipts::from(executor_output.receipts), 236 | input.current_block.header.number, 237 | vec![executor_output.requests.into()], 238 | ); 239 | 240 | let hash_state = executor_outcome.hash_state_slow(); 241 | 242 | // Get the output state root by applying the diff to the input state. 243 | input_state.update(&hash_state); 244 | let output_state_root = input_state.state_root(); 245 | 246 | SubblockOutput { output_state_root, logs_bloom, receipts, input_state_root, requests } 247 | }); 248 | 249 | Ok(subblock_output) 250 | } 251 | 252 | /// Executes the aggregation of multiple subblocks. 253 | /// 254 | /// When executed in the zkvm, this will verify all of the subblock proofs and perform 255 | /// consistency checks between them. The aggregation input is committed as a public value, so 256 | /// it is taken as a trusted input. 257 | #[allow(unused)] 258 | pub fn execute_aggregation( 259 | &self, 260 | public_values: Vec>, 261 | vkey: [u32; 8], 262 | mut aggregation_input: AggregationInput, 263 | parent_state_root: B256, 264 | ) -> Result { 265 | let mut cumulative_state_diff = 266 | SubblockOutput { output_state_root: parent_state_root, ..Default::default() }; 267 | let mut transaction_body: Vec = Vec::new(); 268 | let mut block_hashes = None; 269 | profile!("aggregate", { 270 | for (i, public_value) in public_values.iter().enumerate() { 271 | let public_values_digest = Sha256::digest(public_value); 272 | cfg_if! { 273 | if #[cfg(target_os = "zkvm")] { 274 | sp1_zkvm::lib::verify::verify_sp1_proof(&vkey, &public_values_digest.into()); 275 | } 276 | } 277 | println!("cycle-tracker-start: deserialize subblock input"); 278 | let mut reader = Cursor::new(&public_value); 279 | let subblock_input: SubblockInput = bincode::deserialize_from(&mut reader).unwrap(); 280 | println!("cycle-tracker-end: deserialize subblock input"); 281 | 282 | // Every subblock should have at least one block hash: the immediate parent block hash. 283 | // So an empty block_hashes indicates that this is the first subblock. 284 | if i == 0 && block_hashes.is_none() { 285 | block_hashes = Some(subblock_input.block_hashes); 286 | } else { 287 | assert_eq!(block_hashes, Some(subblock_input.block_hashes)); 288 | } 289 | 290 | // Check that the starting gas used is the same as the last cumulative gas used. 291 | assert_eq!( 292 | subblock_input.starting_gas_used, 293 | cumulative_state_diff 294 | .receipts 295 | .last() 296 | .map(|r| r.cumulative_gas_used) 297 | .unwrap_or(0) 298 | ); 299 | 300 | // Consistency checks on the subblock input's first/last subblock flags. 301 | if i == 0 { 302 | assert!(subblock_input.is_first_subblock); 303 | } 304 | if i == public_values.len() - 1 { 305 | assert!(subblock_input.is_last_subblock); 306 | } 307 | if i > 0 && i < public_values.len() - 1 { 308 | assert!(!subblock_input.is_first_subblock); 309 | assert!(!subblock_input.is_last_subblock); 310 | } 311 | 312 | // Check that the subblock header, ommers, withdrawals, and requests are the same as 313 | // the main block. 314 | assert_eq!( 315 | subblock_input.current_block.header, 316 | aggregation_input.current_block.header 317 | ); 318 | assert_eq!( 319 | subblock_input.current_block.ommers, 320 | aggregation_input.current_block.ommers 321 | ); 322 | assert_eq!( 323 | subblock_input.current_block.withdrawals, 324 | aggregation_input.current_block.withdrawals 325 | ); 326 | assert_eq!( 327 | subblock_input.current_block.requests, 328 | aggregation_input.current_block.requests 329 | ); 330 | println!("cycle-tracker-start: deserialize subblock output"); 331 | 332 | let subblock_output: SubblockOutput = 333 | bincode::deserialize_from(&mut reader).unwrap(); 334 | println!("cycle-tracker-end: deserialize subblock output"); 335 | 336 | println!("cycle-tracker-start: extend state"); 337 | 338 | // Accumulate subblock's output into the cumulative state diff. 339 | // This function also contains consistency checks between the cumulative state diff 340 | // and the subblock output. 341 | cumulative_state_diff.extend(subblock_output); 342 | 343 | // Also add this subblock's transaction body to the transaction body. 344 | transaction_body.extend(subblock_input.current_block.body); 345 | println!("cycle-tracker-end: extend state"); 346 | } 347 | }); 348 | 349 | profile!("verify block hashes", { 350 | let mut reconstructed_block_hashes: BTreeMap = BTreeMap::new(); 351 | for (child_header, parent_header) in once(&aggregation_input.current_block.header) 352 | .chain(aggregation_input.ancestor_headers.iter()) 353 | .tuple_windows() 354 | { 355 | assert!(parent_header.number == child_header.number - 1); 356 | 357 | let parent_header_hash = parent_header.hash_slow(); 358 | assert_eq!(parent_header_hash, child_header.parent_hash); 359 | 360 | reconstructed_block_hashes.insert(parent_header.number, parent_header_hash); 361 | } 362 | 363 | assert_eq!(reconstructed_block_hashes, block_hashes.unwrap()); 364 | }); 365 | 366 | // Check that the subblock transactions match the main block transactions. 367 | assert_eq!( 368 | transaction_body, aggregation_input.current_block.body, 369 | "subblock transactions do not match main block transactions" 370 | ); 371 | 372 | profile!("validate subblock aggregation", { 373 | // Check that the accumulated logs bloom is the same as the main block logs bloom. 374 | assert_eq!( 375 | cumulative_state_diff.logs_bloom, 376 | aggregation_input.current_block.header.logs_bloom 377 | ); 378 | V::validate_subblock_aggregation( 379 | &aggregation_input.current_block.header, 380 | &V::spec(), 381 | &cumulative_state_diff.receipts, 382 | &cumulative_state_diff.requests, 383 | ) 384 | .expect("failed to validate subblock aggregation") 385 | }); 386 | 387 | // The final state root of the entire block is the cumulative output state root. 388 | let state_root = cumulative_state_diff.output_state_root; 389 | if state_root != aggregation_input.current_block.state_root { 390 | panic!( 391 | "mismatched state root: {state_root} != {:?}", 392 | aggregation_input.current_block.state_root 393 | ); 394 | } 395 | 396 | // Derive the block header. 397 | // 398 | // Note: the receipts root and gas used are verified by `validate_subblock_aggregation`. 399 | let mut header = aggregation_input.current_block.header.clone(); 400 | header.parent_hash = aggregation_input.parent_header().hash_slow(); 401 | header.ommers_hash = proofs::calculate_ommers_root(&aggregation_input.current_block.ommers); 402 | header.state_root = aggregation_input.current_block.state_root; 403 | header.transactions_root = 404 | proofs::calculate_transaction_root(&aggregation_input.current_block.body); 405 | header.receipts_root = aggregation_input.current_block.header.receipts_root; 406 | header.withdrawals_root = aggregation_input 407 | .current_block 408 | .withdrawals 409 | .take() 410 | .map(|w| proofs::calculate_withdrawals_root(w.into_inner().as_slice())); 411 | header.logs_bloom = cumulative_state_diff.logs_bloom; 412 | header.requests_root = aggregation_input 413 | .current_block 414 | .requests 415 | .as_ref() 416 | .map(|r| proofs::calculate_requests_root(&r.0)); 417 | 418 | Ok(header) 419 | } 420 | } 421 | 422 | impl Variant for EthereumVariant { 423 | fn spec() -> ChainSpec { 424 | rsp_primitives::chain_spec::mainnet() 425 | } 426 | 427 | fn execute( 428 | executor_block_input: &BlockWithSenders, 429 | executor_difficulty: U256, 430 | cache_db: DB, 431 | ) -> Result, BlockExecutionError> 432 | where 433 | DB: Database + Display>, 434 | { 435 | EthExecutorProvider::new( 436 | Self::spec().into(), 437 | CustomEvmConfig::from_variant(ChainVariant::Ethereum), 438 | ) 439 | .executor(cache_db) 440 | .execute((executor_block_input, executor_difficulty).into()) 441 | } 442 | 443 | fn validate_block_post_execution( 444 | block: &BlockWithSenders, 445 | chain_spec: &ChainSpec, 446 | receipts: &[Receipt], 447 | requests: &[Request], 448 | ) -> Result<(), ConsensusError> { 449 | validate_block_post_execution_ethereum(block, chain_spec, receipts, requests) 450 | } 451 | 452 | fn validate_subblock_aggregation( 453 | header: &Header, 454 | chain_spec: &ChainSpec, 455 | receipts: &[Receipt], 456 | requests: &[Request], 457 | ) -> Result<(), ConsensusError> { 458 | validate_subblock_post_execution_ethereum(header, chain_spec, receipts, requests) 459 | } 460 | } 461 | -------------------------------------------------------------------------------- /crates/executor/client/src/utils.rs: -------------------------------------------------------------------------------- 1 | macro_rules! profile { 2 | ($name:expr, $block:block) => {{ 3 | #[cfg(target_os = "zkvm")] 4 | { 5 | println!("cycle-tracker-start: {}", $name); 6 | let result = (|| $block)(); 7 | println!("cycle-tracker-end: {}", $name); 8 | result 9 | } 10 | 11 | #[cfg(not(target_os = "zkvm"))] 12 | { 13 | $block 14 | } 15 | }}; 16 | } 17 | -------------------------------------------------------------------------------- /crates/executor/host/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rsp-host-executor" 3 | description = "" 4 | version.workspace = true 5 | edition.workspace = true 6 | license.workspace = true 7 | homepage.workspace = true 8 | repository.workspace = true 9 | 10 | [lints] 11 | workspace = true 12 | 13 | [dependencies] 14 | # workspace 15 | rsp-rpc-db.workspace = true 16 | rsp-client-executor.workspace = true 17 | rsp-mpt = { workspace = true, features = ["preimage_context"] } 18 | rsp-primitives.workspace = true 19 | 20 | # reth 21 | reth-codecs.workspace = true 22 | reth-primitives = { workspace = true, features = ["secp256k1"] } 23 | reth-storage-errors.workspace = true 24 | reth-trie.workspace = true 25 | reth-execution-types.workspace = true 26 | reth-errors.workspace = true 27 | reth-chainspec.workspace = true 28 | 29 | # revm 30 | revm.workspace = true 31 | revm-primitives.workspace = true 32 | 33 | # alloy 34 | alloy-provider.workspace = true 35 | alloy-transport.workspace = true 36 | alloy-rpc-types.workspace = true 37 | 38 | # rkyv 39 | rkyv.workspace = true 40 | 41 | # misc 42 | itertools.workspace = true 43 | lazy_static.workspace = true 44 | thiserror.workspace = true 45 | tokio.workspace = true 46 | tracing.workspace = true 47 | 48 | [dev-dependencies] 49 | alloy-primitives.workspace = true 50 | tracing-subscriber = "0.3.18" 51 | bincode = "1.3.3" 52 | dotenv = "0.15.0" 53 | -------------------------------------------------------------------------------- /crates/executor/host/src/error.rs: -------------------------------------------------------------------------------- 1 | use alloy_rpc_types::ConversionError; 2 | use alloy_transport::TransportError; 3 | use reth_errors::BlockExecutionError; 4 | use revm_primitives::B256; 5 | use rsp_client_executor::error::ClientError; 6 | use rsp_mpt::FromProofError; 7 | 8 | #[derive(Debug, thiserror::Error)] 9 | pub enum Error { 10 | #[error("Failed to parse blocks into executor friendly format {}", .0)] 11 | ParseError(#[from] ConversionError), 12 | #[error("Transport Error: {}", .0)] 13 | Transport(#[from] TransportError), 14 | #[error("Failed to recover senders from RPC block data")] 15 | FailedToRecoverSenders, 16 | #[error("Failed to validate post execution state")] 17 | PostExecutionCheck(#[from] reth_errors::ConsensusError), 18 | #[error("Local Execution Failed {}", .0)] 19 | ExecutionFailed(#[from] BlockExecutionError), 20 | #[error("Failed to construct a valid state trie from RPC data {}", .0)] 21 | FromProof(#[from] FromProofError), 22 | #[error("RPC didnt have expected block height {}", .0)] 23 | ExpectedBlock(u64), 24 | #[error("Header Mismatch \n found {} expected {}", .0, .1)] 25 | HeaderMismatch(B256, B256), 26 | #[error("State root mismatch after local execution \n found {} expected {}", .0, .1)] 27 | StateRootMismatch(B256, B256), 28 | #[error("Client validation error: {}", .0)] 29 | ClientValidation(#[from] ClientError), 30 | } 31 | -------------------------------------------------------------------------------- /crates/executor/host/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod error; 2 | pub use error::Error as HostError; 3 | use reth_trie::AccountProof; 4 | use std::{ 5 | collections::{BTreeMap, BTreeSet, HashMap}, 6 | marker::PhantomData, 7 | sync::Arc, 8 | time::Duration, 9 | }; 10 | 11 | use alloy_provider::{network::AnyNetwork, Provider}; 12 | use alloy_transport::Transport; 13 | use itertools::Itertools; 14 | use reth_execution_types::ExecutionOutcome; 15 | use reth_primitives::{proofs, Block, Bloom, Receipts, B256, U256}; 16 | use revm::db::CacheDB; 17 | use revm_primitives::{keccak256, Address}; 18 | use rsp_client_executor::{ 19 | io::{ 20 | AggregationInput, ClientExecutorInput, SubblockHostOutput, SubblockInput, SubblockOutput, 21 | }, 22 | ChainVariant, EthereumVariant, Variant, 23 | }; 24 | use rsp_mpt::EthereumState; 25 | use rsp_primitives::account_proof::eip1186_proof_to_account_proof; 26 | use rsp_rpc_db::RpcDb; 27 | use tokio::{task::JoinSet, time::sleep}; 28 | 29 | /// The maximum number of times to retry fetching a proof. 30 | const MAX_PROOF_RETRIES: u32 = 3; 31 | /// The initial backoff duration for proof fetching retries. 32 | const INITIAL_RETRY_BACKOFF: Duration = Duration::from_millis(100); 33 | 34 | /// An executor that fetches data from a [Provider] to execute blocks in the [ClientExecutor]. 35 | #[derive(Debug, Clone)] 36 | pub struct HostExecutor + Clone> { 37 | /// The provider which fetches data. 38 | pub provider: Arc

, 39 | /// A phantom type to make the struct generic over the transport. 40 | pub phantom: PhantomData, 41 | } 42 | lazy_static::lazy_static! { 43 | /// Amount of gas used per subblock. 44 | pub static ref SUBBLOCK_GAS_LIMIT: u64 = std::env::var("SUBBLOCK_GAS_LIMIT") 45 | .map(|s| s.parse().unwrap()) 46 | .unwrap_or(1_000_000); 47 | } 48 | 49 | fn merge_state_requests( 50 | state_requests: &mut HashMap>, 51 | subblock_state_requests: &HashMap>, 52 | ) { 53 | for (address, keys) in subblock_state_requests.iter() { 54 | state_requests.entry(*address).or_default().extend(keys.iter().cloned()); 55 | } 56 | } 57 | 58 | impl + Clone + 'static> HostExecutor { 59 | /// Create a new [`HostExecutor`] with a specific [Provider] and [Transport]. 60 | pub fn new(provider: P) -> Self { 61 | Self { provider: Arc::new(provider), phantom: PhantomData } 62 | } 63 | 64 | async fn get_proof( 65 | provider: Arc

, 66 | address: Address, 67 | keys: Vec, 68 | block_number: u64, 69 | ) -> Result { 70 | let mut attempts = 0; 71 | let mut backoff = INITIAL_RETRY_BACKOFF; 72 | 73 | loop { 74 | match provider.get_proof(address, keys.clone()).block_id((block_number).into()).await { 75 | Ok(proof) => return Ok(eip1186_proof_to_account_proof(proof)), 76 | Err(e) => { 77 | attempts += 1; 78 | if attempts >= MAX_PROOF_RETRIES { 79 | tracing::error!( 80 | "Failed to get proof for address {} at block {} after {} attempts: {:?}", 81 | address, 82 | block_number, 83 | attempts, 84 | e 85 | ); 86 | // Consider returning a more specific error if needed 87 | return Err(HostError::Transport(e)); 88 | } 89 | tracing::warn!( 90 | "Attempt {} failed to get proof for address {} at block {}. Retrying in {:?}...", 91 | attempts, 92 | address, 93 | block_number, 94 | backoff 95 | ); 96 | sleep(backoff).await; 97 | // Exponential backoff 98 | backoff *= 2; 99 | } 100 | } 101 | } 102 | } 103 | 104 | /// Executes the block with the given block number. 105 | pub async fn execute( 106 | &self, 107 | block_number: u64, 108 | variant: ChainVariant, 109 | ) -> Result { 110 | match variant { 111 | ChainVariant::Ethereum => self.execute_variant::(block_number).await, 112 | } 113 | } 114 | 115 | /// Executes the block with the given block number and returns the client input for each 116 | /// subblock. 117 | pub async fn execute_subblock( 118 | &self, 119 | block_number: u64, 120 | variant: ChainVariant, 121 | ) -> Result { 122 | match variant { 123 | ChainVariant::Ethereum => { 124 | self.execute_variant_subblocks::(block_number).await 125 | } 126 | } 127 | } 128 | 129 | async fn execute_variant(&self, block_number: u64) -> Result 130 | where 131 | V: Variant, 132 | { 133 | // Fetch the current block and the previous block from the provider. 134 | tracing::info!("fetching the current block and the previous block"); 135 | 136 | let current_block = self 137 | .provider 138 | .get_block_by_number(block_number.into(), true) 139 | .await? 140 | .ok_or(HostError::ExpectedBlock(block_number)) 141 | .map(|block| Block::try_from(block.inner))??; 142 | 143 | let previous_block = self 144 | .provider 145 | .get_block_by_number((block_number - 1).into(), true) 146 | .await? 147 | .ok_or(HostError::ExpectedBlock(block_number)) 148 | .map(|block| Block::try_from(block.inner))??; 149 | 150 | // Setup the spec for the block executor. 151 | tracing::info!("setting up the spec for the block executor"); 152 | let spec = V::spec(); 153 | 154 | // Setup the database for the block executor. 155 | tracing::info!("setting up the database for the block executor"); 156 | let rpc_db = RpcDb::new(self.provider.clone(), block_number - 1); 157 | let cache_db = CacheDB::new(&rpc_db); 158 | 159 | // Execute the block and fetch all the necessary data along the way. 160 | tracing::info!( 161 | "executing the block and with rpc db: block_number={}, transaction_count={}", 162 | block_number, 163 | current_block.body.len() 164 | ); 165 | 166 | let executor_block_input = V::pre_process_block(¤t_block) 167 | .with_recovered_senders() 168 | .ok_or(HostError::FailedToRecoverSenders)?; 169 | 170 | let executor_difficulty = current_block.header.difficulty; 171 | let executor_output = V::execute(&executor_block_input, executor_difficulty, cache_db)?; 172 | 173 | // Validate the block post execution. 174 | tracing::info!("validating the block post execution"); 175 | V::validate_block_post_execution( 176 | &executor_block_input, 177 | &spec, 178 | &executor_output.receipts, 179 | &executor_output.requests, 180 | )?; 181 | 182 | // Accumulate the logs bloom. 183 | tracing::info!("accumulating the logs bloom"); 184 | let mut logs_bloom = Bloom::default(); 185 | executor_output.receipts.iter().for_each(|r| { 186 | logs_bloom.accrue_bloom(&r.bloom_slow()); 187 | }); 188 | 189 | // Convert the output to an execution outcome. 190 | let executor_outcome = ExecutionOutcome::new( 191 | executor_output.state, 192 | Receipts::from(executor_output.receipts), 193 | current_block.header.number, 194 | vec![executor_output.requests.into()], 195 | ); 196 | 197 | let state_requests = rpc_db.get_state_requests(); 198 | 199 | // For every account we touched, fetch the storage proofs for all the slots we touched. 200 | tracing::info!("fetching storage proofs"); 201 | let mut before_storage_proofs = Vec::new(); 202 | let mut after_storage_proofs = Vec::new(); 203 | 204 | for (address, used_keys) in state_requests.iter() { 205 | let modified_keys = executor_outcome 206 | .state() 207 | .state 208 | .get(address) 209 | .map(|account| { 210 | account.storage.keys().map(|key| B256::from(*key)).collect::>() 211 | }) 212 | .unwrap_or_default() 213 | .into_iter() 214 | .collect::>(); 215 | 216 | let keys = used_keys 217 | .iter() 218 | .map(|key| B256::from(*key)) 219 | .chain(modified_keys.clone().into_iter()) 220 | .collect::>() 221 | .into_iter() 222 | .collect::>(); 223 | 224 | let storage_proof = self 225 | .provider 226 | .get_proof(*address, keys.clone()) 227 | .block_id((block_number - 1).into()) 228 | .await?; 229 | before_storage_proofs.push(eip1186_proof_to_account_proof(storage_proof)); 230 | 231 | let storage_proof = self 232 | .provider 233 | .get_proof(*address, modified_keys) 234 | .block_id((block_number).into()) 235 | .await?; 236 | after_storage_proofs.push(eip1186_proof_to_account_proof(storage_proof)); 237 | } 238 | 239 | let state = EthereumState::from_transition_proofs( 240 | previous_block.state_root, 241 | &before_storage_proofs.iter().map(|item| (item.address, item.clone())).collect(), 242 | &after_storage_proofs.iter().map(|item| (item.address, item.clone())).collect(), 243 | )?; 244 | 245 | // Verify the state root. 246 | tracing::info!("verifying the state root"); 247 | let state_root = { 248 | let mut mutated_state = state.clone(); 249 | mutated_state.update(&executor_outcome.hash_state_slow()); 250 | mutated_state.state_root() 251 | }; 252 | if state_root != current_block.state_root { 253 | return Err(HostError::StateRootMismatch(state_root, current_block.state_root)); 254 | } 255 | 256 | // Derive the block header. 257 | // 258 | // Note: the receipts root and gas used are verified by `validate_block_post_execution`. 259 | let mut header = current_block.header.clone(); 260 | header.parent_hash = previous_block.hash_slow(); 261 | header.ommers_hash = proofs::calculate_ommers_root(¤t_block.ommers); 262 | header.state_root = current_block.state_root; 263 | header.transactions_root = proofs::calculate_transaction_root(¤t_block.body); 264 | header.receipts_root = current_block.header.receipts_root; 265 | header.withdrawals_root = current_block 266 | .withdrawals 267 | .clone() 268 | .map(|w| proofs::calculate_withdrawals_root(w.into_inner().as_slice())); 269 | header.logs_bloom = logs_bloom; 270 | header.requests_root = 271 | current_block.requests.as_ref().map(|r| proofs::calculate_requests_root(&r.0)); 272 | 273 | // Assert the derived header is correct. 274 | let constructed_header_hash = header.hash_slow(); 275 | let target_hash = current_block.header.hash_slow(); 276 | if constructed_header_hash != target_hash { 277 | return Err(HostError::HeaderMismatch(constructed_header_hash, target_hash)); 278 | } 279 | 280 | // Log the result. 281 | tracing::info!( 282 | "successfully executed block: block_number={}, block_hash={}, state_root={}", 283 | current_block.header.number, 284 | header.hash_slow(), 285 | state_root 286 | ); 287 | 288 | // Fetch the parent headers needed to constrain the BLOCKHASH opcode. 289 | let oldest_ancestor = *rpc_db.oldest_ancestor.borrow(); 290 | let mut ancestor_headers = vec![]; 291 | tracing::info!("fetching {} ancestor headers", block_number - oldest_ancestor); 292 | for height in (oldest_ancestor..=(block_number - 1)).rev() { 293 | let block = self 294 | .provider 295 | .get_block_by_number(height.into(), false) 296 | .await? 297 | .ok_or(HostError::ExpectedBlock(height))?; 298 | 299 | ancestor_headers.push(block.inner.header.try_into()?); 300 | } 301 | 302 | // Create the client input. 303 | let client_input = ClientExecutorInput { 304 | current_block: V::pre_process_block(¤t_block), 305 | ancestor_headers, 306 | parent_state: state, 307 | state_requests, 308 | bytecodes: rpc_db.get_bytecodes(), 309 | }; 310 | tracing::info!("successfully generated client input"); 311 | 312 | Ok(client_input) 313 | } 314 | 315 | async fn execute_variant_subblocks( 316 | &self, 317 | block_number: u64, 318 | ) -> Result 319 | where 320 | V: Variant, 321 | { 322 | // Fetch the current block and the previous block from the provider. 323 | tracing::info!("fetching the current block and the previous block"); 324 | let current_block = self 325 | .provider 326 | .get_block_by_number(block_number.into(), true) 327 | .await? 328 | .ok_or(HostError::ExpectedBlock(block_number)) 329 | .map(|block| Block::try_from(block.inner))??; 330 | 331 | let previous_block = self 332 | .provider 333 | .get_block_by_number((block_number - 1).into(), true) 334 | .await? 335 | .ok_or(HostError::ExpectedBlock(block_number)) 336 | .map(|block| Block::try_from(block.inner))??; 337 | 338 | let total_transactions = current_block.body.len() as u64; 339 | 340 | let previous_block_hash = previous_block.hash_slow(); 341 | 342 | // Setup the spec for the block executor. 343 | tracing::info!("setting up the spec for the block executor"); 344 | 345 | // Setup the database for the block executor. 346 | tracing::info!("setting up the database for the block executor"); 347 | let mut rpc_db = RpcDb::new(self.provider.clone(), block_number - 1); 348 | 349 | // Execute the block and fetch all the necessary data along the way. 350 | tracing::info!( 351 | "executing the block and with rpc db: block_number={}, transaction_count={}", 352 | block_number, 353 | total_transactions 354 | ); 355 | 356 | let executor_block_input = V::pre_process_block(¤t_block) 357 | .with_recovered_senders() 358 | .ok_or(HostError::FailedToRecoverSenders)?; 359 | 360 | let executor_difficulty = current_block.header.difficulty; 361 | 362 | // These accumulate across multiple subblocks. 363 | let mut cumulative_executor_outcomes = ExecutionOutcome::default(); 364 | let mut cumulative_state_requests = HashMap::new(); 365 | 366 | // These store individual state requests, executor outcomes, and state diffs for each subblock. 367 | let mut all_state_requests = Vec::new(); 368 | let mut all_executor_outcomes = Vec::new(); 369 | let mut state_diffs = Vec::new(); 370 | 371 | // The number of transactions completed so far. 372 | let mut num_transactions_completed: u64 = 0; 373 | 374 | // The amount of gas used so far. 375 | let mut cumulative_gas_used = 0; 376 | 377 | // The accumulated logs bloom, across subblocks. 378 | let mut global_logs_bloom = Bloom::default(); 379 | 380 | // These store the inputs, outputs, and parent states for each subblock. 381 | // These are eventually fed into the zkvm. 382 | let mut subblock_inputs = Vec::new(); 383 | let mut subblock_outputs = Vec::new(); 384 | let mut subblock_parent_states = Vec::new(); 385 | 386 | loop { 387 | tracing::info!("executing subblock"); 388 | let cache_db = CacheDB::new(&rpc_db); 389 | 390 | // Slice the block to only include the transactions that have not been executed yet. 391 | let mut subblock_input = executor_block_input.clone(); 392 | subblock_input.body = 393 | subblock_input.body[num_transactions_completed as usize..].to_vec(); 394 | subblock_input.senders = 395 | subblock_input.senders[num_transactions_completed as usize..].to_vec(); 396 | 397 | // Set the subblock configuration. In the host, we set `subblock_gas_limit` to the 398 | // `SUBBLOCK_GAS_LIMIT` environment variable. Then, even though many transactions may 399 | // be included in the executor input, the subblock will only execute up to the 400 | // `subblock_gas_limit`. 401 | let is_first_subblock = num_transactions_completed == 0; 402 | subblock_input.is_first_subblock = is_first_subblock; 403 | subblock_input.is_last_subblock = false; 404 | subblock_input.subblock_gas_limit = *SUBBLOCK_GAS_LIMIT + cumulative_gas_used; 405 | subblock_input.starting_gas_used = cumulative_gas_used; 406 | let starting_gas_used = cumulative_gas_used; 407 | 408 | tracing::info!("num transactions left: {}", subblock_input.body.len()); 409 | 410 | // Execute the subblock. 411 | let subblock_output = V::execute(&subblock_input, executor_difficulty, cache_db)?; 412 | 413 | let num_executed_transactions = subblock_output.receipts.len(); 414 | let upper = num_transactions_completed + num_executed_transactions as u64; 415 | let is_last_subblock = upper == current_block.body.len() as u64; 416 | 417 | tracing::info!( 418 | "successfully executed subblock: num_transactions_completed={}, upper={}", 419 | num_transactions_completed, 420 | upper 421 | ); 422 | 423 | // Accumulate the logs bloom. 424 | tracing::info!("accumulating the logs bloom"); 425 | let mut logs_bloom = Bloom::default(); 426 | subblock_output.receipts.iter().for_each(|r| { 427 | logs_bloom.accrue_bloom(&r.bloom_slow()); 428 | }); 429 | global_logs_bloom.accrue_bloom(&logs_bloom); 430 | 431 | // Using the diffs from the bundle, update the RPC DB. 432 | // This way, the next subblock will see the state changes from the current subblock. 433 | rpc_db.update_state_diffs(&subblock_output.state); 434 | 435 | // Update the cumulative gas used. 436 | let receipts = subblock_output.receipts.clone(); 437 | cumulative_gas_used += 438 | receipts.last().map(|r| r.cumulative_gas_used - starting_gas_used).unwrap_or(0); 439 | 440 | // Convert the output to an execution outcome. 441 | let executor_outcome = ExecutionOutcome::new( 442 | subblock_output.state, 443 | Receipts::from(subblock_output.receipts), 444 | current_block.header.number, 445 | vec![subblock_output.requests.into()], 446 | ); 447 | all_executor_outcomes.push(executor_outcome.clone()); 448 | 449 | // Save the subblock's `HashedPostState` for debugging. 450 | let target_post_state = executor_outcome.hash_state_slow(); 451 | state_diffs.push(target_post_state); 452 | 453 | // Initialize and set part of the subblock output. The rest will be set later. 454 | let subblock_output = SubblockOutput { 455 | receipts, 456 | logs_bloom, 457 | output_state_root: B256::default(), 458 | input_state_root: B256::default(), 459 | requests: vec![], 460 | }; 461 | subblock_outputs.push(subblock_output); 462 | 463 | // Accumulate this subblock's `ExecutionOutcome` into `cumulative_executor_outcomes`. 464 | cumulative_executor_outcomes.extend(executor_outcome); 465 | 466 | // Record the state requests for this subblock. 467 | let subblock_state_requests = rpc_db.get_state_requests(); 468 | 469 | // Merge the state requests from the subblock into `cumulative_state_requests`. 470 | merge_state_requests(&mut cumulative_state_requests, &subblock_state_requests); 471 | all_state_requests.push(subblock_state_requests); 472 | 473 | let mut subblock_input = SubblockInput { 474 | current_block: V::pre_process_block(¤t_block), 475 | block_hashes: BTreeMap::new(), 476 | bytecodes: rpc_db.get_bytecodes(), 477 | is_first_subblock, 478 | is_last_subblock, 479 | starting_gas_used, 480 | }; 481 | 482 | // Slice the correct transactions for this subblock 483 | subblock_input.current_block.body = subblock_input.current_block.body 484 | [num_transactions_completed as usize..upper as usize] 485 | .to_vec(); 486 | 487 | // Advance subblock. 488 | num_transactions_completed = upper; 489 | rpc_db.advance_subblock(); 490 | 491 | subblock_inputs.push(subblock_input); 492 | 493 | if num_transactions_completed >= current_block.body.len() as u64 { 494 | break; 495 | } 496 | } 497 | 498 | // Build parent state from modified keys and used keys from this subblock 499 | let mut before_storage_proofs = Vec::new(); 500 | let mut after_storage_proofs = Vec::new(); 501 | 502 | for chunk in cumulative_state_requests.into_iter().chunks(10).into_iter() { 503 | let mut before_handles = JoinSet::new(); 504 | let mut after_handles = JoinSet::new(); 505 | for (address, used_keys) in chunk { 506 | let modified_keys = cumulative_executor_outcomes 507 | .state() 508 | .state 509 | .get(&address) 510 | .map(|account| { 511 | account.storage.keys().map(|key| B256::from(*key)).collect::>() 512 | }) 513 | .unwrap_or_default() 514 | .into_iter() 515 | .collect::>(); 516 | 517 | let keys = used_keys 518 | .iter() 519 | .map(|key| B256::from(*key)) 520 | .chain(modified_keys.clone().into_iter()) 521 | .collect::>() 522 | .into_iter() 523 | .collect::>(); 524 | 525 | let provider_clone = self.provider.clone(); 526 | 527 | before_handles.spawn(async move { 528 | Self::get_proof(provider_clone, address, keys, block_number - 1).await.unwrap() 529 | }); 530 | 531 | let provider_clone = self.provider.clone(); 532 | after_handles.spawn(async move { 533 | Self::get_proof(provider_clone, address, modified_keys, block_number) 534 | .await 535 | .unwrap() 536 | }); 537 | } 538 | before_storage_proofs.extend(before_handles.join_all().await); 539 | after_storage_proofs.extend(after_handles.join_all().await); 540 | } 541 | 542 | let parent_state = EthereumState::from_transition_proofs( 543 | previous_block.state_root, 544 | &before_storage_proofs.iter().map(|item| (item.address, item.clone())).collect(), 545 | &after_storage_proofs.iter().map(|item| (item.address, item.clone())).collect(), 546 | )?; 547 | 548 | let cumulative_state_diffs = cumulative_executor_outcomes.hash_state_slow(); 549 | 550 | // Update the parent state with the cumulative state diffs from all subblocks. 551 | let mut mutated_state = parent_state.clone(); 552 | mutated_state.update(&cumulative_state_diffs); 553 | 554 | // Verify the state root. 555 | let state_root = mutated_state.state_root(); 556 | if state_root != current_block.state_root { 557 | return Err(HostError::StateRootMismatch(state_root, current_block.state_root)); 558 | } 559 | 560 | // Derive the block header. 561 | // 562 | // Note: the receipts root and gas used are verified by `validate_block_post_execution`. 563 | let mut header = current_block.header.clone(); 564 | header.parent_hash = previous_block_hash; 565 | header.ommers_hash = proofs::calculate_ommers_root(¤t_block.ommers); 566 | header.state_root = current_block.state_root; 567 | header.transactions_root = proofs::calculate_transaction_root(¤t_block.body); 568 | header.receipts_root = current_block.header.receipts_root; 569 | header.withdrawals_root = current_block 570 | .withdrawals 571 | .clone() 572 | .map(|w| proofs::calculate_withdrawals_root(w.into_inner().as_slice())); 573 | header.logs_bloom = global_logs_bloom; 574 | header.requests_root = 575 | current_block.requests.as_ref().map(|r| proofs::calculate_requests_root(&r.0)); 576 | 577 | // Assert the derived header is correct. 578 | let constructed_header_hash = header.hash_slow(); 579 | let target_hash = current_block.header.hash_slow(); 580 | if constructed_header_hash != target_hash { 581 | return Err(HostError::HeaderMismatch(constructed_header_hash, target_hash)); 582 | } 583 | 584 | // Log the result. 585 | tracing::info!( 586 | "successfully executed block: block_number={}, block_hash={}, state_root={}", 587 | current_block.header.number, 588 | header.hash_slow(), 589 | current_block.state_root 590 | ); 591 | 592 | // Fetch the parent headers needed to constrain the BLOCKHASH opcode. 593 | let oldest_ancestor = *rpc_db.oldest_ancestor.borrow(); 594 | let mut ancestor_headers = vec![]; 595 | let mut block_hashes = BTreeMap::new(); 596 | tracing::info!("fetching {} ancestor headers", block_number - oldest_ancestor); 597 | for height in (oldest_ancestor..=(block_number - 1)).rev() { 598 | let block = self 599 | .provider 600 | .get_block_by_number(height.into(), false) 601 | .await? 602 | .ok_or(HostError::ExpectedBlock(height))?; 603 | 604 | block_hashes.insert(height, block.inner.header.hash); 605 | ancestor_headers.push(block.inner.header.try_into()?); 606 | } 607 | 608 | let aggregation_input = AggregationInput { 609 | current_block: V::pre_process_block(¤t_block), 610 | ancestor_headers, 611 | bytecodes: rpc_db.get_bytecodes(), 612 | }; 613 | 614 | let mut big_state = parent_state.clone(); 615 | for i in 0..subblock_inputs.len() { 616 | let input_root = big_state.state_root(); 617 | // Get the touched addresses / storage slots in this subblock. 618 | let mut touched_state = HashMap::new(); 619 | for (address, used_keys) in all_state_requests[i].iter() { 620 | let modified_keys = all_executor_outcomes[i] 621 | .state() 622 | .state 623 | .get(address) 624 | .map(|account| { 625 | account.storage.keys().map(|key| B256::from(*key)).collect::>() 626 | }) 627 | .unwrap_or_default() 628 | .into_iter() 629 | .collect::>(); 630 | 631 | let keys = used_keys 632 | .iter() 633 | .map(|key| B256::from(*key)) 634 | .chain(modified_keys.clone().into_iter()) 635 | .collect::>() 636 | .into_iter() 637 | .map(keccak256) 638 | .collect::>(); 639 | 640 | touched_state.insert(keccak256(address), keys); 641 | } 642 | 643 | // Generate the subblock parent state by taking the state diff of the subblock and all 644 | // touched addresses, and then pruning the big state. 645 | let mut subblock_parent_state = big_state.clone(); 646 | 647 | let serialized_size = 648 | rkyv::to_bytes::(&subblock_parent_state).unwrap().len(); 649 | let prev_root = subblock_parent_state.state_root(); 650 | 651 | subblock_parent_state.prune(&state_diffs[i], &touched_state); 652 | 653 | // Assert that pruning did not change the state root. 654 | let new_root = subblock_parent_state.state_root(); 655 | assert_eq!(prev_root, new_root); 656 | 657 | let new_serialized_size = 658 | rkyv::to_bytes::(&subblock_parent_state).unwrap().len(); 659 | tracing::info!( 660 | "Pruned state compression ratio: {}", 661 | new_serialized_size as f64 / serialized_size as f64 662 | ); 663 | subblock_parent_states.push( 664 | rkyv::to_bytes::(&subblock_parent_state).unwrap().to_vec(), 665 | ); 666 | 667 | // Update the big state with the state diff of this subblock, and set the fields of this 668 | // subblock's input/output accordingly. 669 | big_state.update(&state_diffs[i]); 670 | let output_root = big_state.state_root(); 671 | 672 | subblock_outputs[i].input_state_root = input_root; 673 | subblock_outputs[i].output_state_root = output_root; 674 | 675 | let subblock_input = &mut subblock_inputs[i]; 676 | subblock_input.block_hashes = block_hashes.clone(); 677 | } 678 | 679 | let all_subblock_outputs = SubblockHostOutput { 680 | subblock_inputs, 681 | subblock_parent_states, 682 | subblock_outputs, 683 | agg_input: aggregation_input, 684 | }; 685 | 686 | #[cfg(debug_assertions)] 687 | { 688 | all_subblock_outputs.validate().map_err(HostError::ClientValidation)?; 689 | } 690 | 691 | Ok(all_subblock_outputs) 692 | } 693 | } 694 | -------------------------------------------------------------------------------- /crates/mpt/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rsp-mpt" 3 | description = "" 4 | version.workspace = true 5 | edition.workspace = true 6 | license.workspace = true 7 | homepage.workspace = true 8 | repository.workspace = true 9 | 10 | [lints] 11 | workspace = true 12 | 13 | [dependencies] 14 | rlp.workspace = true 15 | serde.workspace = true 16 | thiserror.workspace = true 17 | rkyv.workspace = true 18 | itertools = "0.13.0" 19 | 20 | # workspace 21 | rsp-primitives.workspace = true 22 | 23 | # reth 24 | reth-primitives.workspace = true 25 | reth-trie.workspace = true 26 | 27 | # revm 28 | revm.workspace = true 29 | 30 | # alloy 31 | alloy-primitives.workspace = true 32 | alloy-rlp.workspace = true 33 | 34 | [dev-dependencies] 35 | alloy-trie.workspace = true 36 | hex-literal.workspace = true 37 | tracing-subscriber = "0.3.18" 38 | rand = "0.8" 39 | 40 | rsp-mpt = { path = ".", features = ["preimage_context"] } 41 | 42 | [features] 43 | default = [] 44 | preimage_context = [] 45 | -------------------------------------------------------------------------------- /crates/mpt/src/lib.rs: -------------------------------------------------------------------------------- 1 | use itertools::Itertools; 2 | use reth_trie::{AccountProof, HashedPostState, TrieAccount}; 3 | use revm::primitives::{Address, HashMap, B256}; 4 | use rkyv::with::{Identity, MapKV}; 5 | use rsp_primitives::rkyv::B256Def; 6 | use serde::{Deserialize, Serialize}; 7 | use std::collections::HashSet; 8 | 9 | /// Module containing MPT code adapted from `zeth`. 10 | mod mpt; 11 | pub use mpt::Error; 12 | use mpt::{proofs_to_tries, transition_proofs_to_tries, MptNode, MptNodeReference}; 13 | 14 | /// Ethereum state trie and account storage tries. 15 | #[derive( 16 | Debug, 17 | Clone, 18 | PartialEq, 19 | Eq, 20 | Serialize, 21 | Deserialize, 22 | Default, 23 | rkyv::Archive, 24 | rkyv::Serialize, 25 | rkyv::Deserialize, 26 | )] 27 | pub struct EthereumState { 28 | pub state_trie: MptNode, 29 | #[rkyv(with = MapKV)] 30 | pub storage_tries: HashMap, 31 | } 32 | 33 | impl EthereumState { 34 | /// Builds Ethereum state tries from relevant proofs before and after a state transition. 35 | pub fn from_transition_proofs( 36 | state_root: B256, 37 | parent_proofs: &HashMap, 38 | proofs: &HashMap, 39 | ) -> Result { 40 | transition_proofs_to_tries(state_root, parent_proofs, proofs) 41 | } 42 | 43 | /// Builds Ethereum state tries from relevant proofs from a given state. 44 | pub fn from_proofs( 45 | state_root: B256, 46 | proofs: &HashMap, 47 | ) -> Result { 48 | proofs_to_tries(state_root, proofs) 49 | } 50 | 51 | /// Mutates state based on diffs provided in [`HashedPostState`]. 52 | pub fn update(&mut self, post_state: &HashedPostState) { 53 | for (hashed_address, account) in post_state.accounts.iter() { 54 | let hashed_address = hashed_address.as_slice(); 55 | 56 | match account { 57 | Some(account) => { 58 | let state_storage = &post_state.storages.get(hashed_address).unwrap(); 59 | let storage_root = { 60 | let storage_trie = self.storage_tries.get_mut(hashed_address).unwrap(); 61 | 62 | if state_storage.wiped { 63 | storage_trie.clear(); 64 | } 65 | 66 | for (key, value) in state_storage.storage.iter() { 67 | let key = key.as_slice(); 68 | if value.is_zero() { 69 | storage_trie.delete(key).unwrap(); 70 | } else { 71 | storage_trie.insert_rlp(key, *value).unwrap(); 72 | } 73 | } 74 | 75 | storage_trie.hash() 76 | }; 77 | 78 | let state_account = TrieAccount { 79 | nonce: account.nonce, 80 | balance: account.balance, 81 | storage_root, 82 | code_hash: account.get_bytecode_hash(), 83 | }; 84 | self.state_trie.insert_rlp(hashed_address, state_account).unwrap(); 85 | } 86 | None => { 87 | self.state_trie.delete(hashed_address).unwrap(); 88 | } 89 | } 90 | } 91 | } 92 | 93 | /// Computes the state root. 94 | pub fn state_root(&self) -> B256 { 95 | self.state_trie.hash() 96 | } 97 | 98 | /// Given a state trie constructed with some storage proofs, prunes it to only include the 99 | /// necessary hashes for certain addresses / storage slots touched. 100 | /// 101 | /// Note: never called in the zkvm, so it's pretty fine that this is not optimized. 102 | pub fn prune( 103 | &mut self, 104 | state_diff: &HashedPostState, 105 | touched_state: &HashMap>, 106 | ) { 107 | // Iterate over all of the touched state, marking nodes touched along the way. 108 | let mut self_clone = self.clone(); 109 | let (touched_account_refs, touched_storage_refs) = 110 | self_clone.get_touched_nodes(state_diff, touched_state); 111 | 112 | // Now, traverse the entire trie, replacing any nodes that are not touched with their 113 | // digest. 114 | let prev_state_root = self.state_root(); 115 | self.state_trie.prune_unmarked_nodes(&touched_account_refs); 116 | let new_state_root = self.state_root(); 117 | assert_eq!(prev_state_root, new_state_root); 118 | 119 | for (hashed_address, storage_refs) in touched_storage_refs { 120 | let storage_trie = self.storage_tries.get_mut(&hashed_address).unwrap(); 121 | let prev_storage_root = storage_trie.hash(); 122 | storage_trie.prune_unmarked_nodes(&storage_refs); 123 | let new_storage_root = storage_trie.hash(); 124 | assert_eq!(prev_storage_root, new_storage_root); 125 | } 126 | } 127 | 128 | fn get_touched_nodes( 129 | &mut self, 130 | post_state: &HashedPostState, 131 | touched_state: &HashMap>, 132 | ) -> (HashSet, HashMap>) { 133 | let mut touched_account_refs = HashSet::new(); 134 | let mut touched_storage_refs = HashMap::>::new(); 135 | for (hashed_address_b256, account) in 136 | post_state.accounts.iter().sorted_by(|a, b| a.0.cmp(b.0)) 137 | { 138 | let hashed_address = hashed_address_b256.as_slice(); 139 | 140 | match account { 141 | Some(_account) => { 142 | let state_storage = &post_state.storages.get(hashed_address).unwrap(); 143 | 144 | let storage_trie = self.storage_tries.get_mut(hashed_address).unwrap(); 145 | let account_touched = 146 | touched_storage_refs.entry(*hashed_address_b256).or_default(); 147 | 148 | if state_storage.wiped { 149 | println!("clearing storage"); 150 | // storage_trie.clear(); 151 | } 152 | 153 | for (key, value) in state_storage.storage.iter() { 154 | let key = key.as_slice(); 155 | let mut deferred_deletes = Vec::new(); 156 | if value.is_zero() { 157 | deferred_deletes.push(key); 158 | } else { 159 | let (_gotten, touched) = storage_trie.get_with_touched(key).unwrap(); 160 | account_touched.extend(touched); 161 | } 162 | for key in deferred_deletes { 163 | let (_gotten, touched) = storage_trie.delete_with_touched(key).unwrap(); 164 | account_touched.extend(touched); 165 | } 166 | } 167 | 168 | let (_gotten, touched) = 169 | self.state_trie.get_with_touched(hashed_address).unwrap(); 170 | touched_account_refs.extend(touched); 171 | } 172 | None => { 173 | let (_gotten, touched) = 174 | self.state_trie.delete_with_touched(hashed_address).unwrap(); 175 | touched_account_refs.extend(touched); 176 | } 177 | } 178 | } 179 | 180 | for (hashed_address, keys) in touched_state.iter() { 181 | let hashed_address_bytes = hashed_address.as_slice(); 182 | match self.storage_tries.get(hashed_address) { 183 | Some(storage_trie) => { 184 | for key in keys { 185 | let key = key.as_slice(); 186 | let (_, touched) = storage_trie.get_with_touched(key).unwrap(); 187 | touched_storage_refs.entry(*hashed_address).or_default().extend(touched); 188 | } 189 | let (_account_ref, touched) = 190 | self.state_trie.get_with_touched(hashed_address_bytes).unwrap(); 191 | touched_account_refs.extend(touched); 192 | } 193 | None => { 194 | let (_account_ref, touched) = 195 | self.state_trie.get_with_touched(hashed_address_bytes).unwrap(); 196 | touched_account_refs.extend(touched); 197 | } 198 | } 199 | } 200 | (touched_account_refs, touched_storage_refs) 201 | } 202 | } 203 | 204 | #[derive(Debug, thiserror::Error)] 205 | pub enum FromProofError { 206 | #[error("Node {} is not found by hash", .0)] 207 | NodeNotFoundByHash(usize), 208 | #[error("Node {} refrences invalid successor", .0)] 209 | NodeHasInvalidSuccessor(usize), 210 | #[error("Node {} cannot have children and is invalid", .0)] 211 | NodeCannotHaveChildren(usize), 212 | #[error("Found mismatched storage root after reconstruction \n account {}, found {}, expected {}", .0, .1, .2)] 213 | MismatchedStorageRoot(Address, B256, B256), 214 | #[error("Found mismatched state root after reconstruction \n found {}, expected {}", .0, .1)] 215 | MismatchedStateRoot(B256, B256), 216 | #[error("Error decoding proofs from bytes, {}", .0)] 217 | DecodingError(#[from] Error), 218 | } 219 | -------------------------------------------------------------------------------- /crates/primitives/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rsp-primitives" 3 | description = "" 4 | version.workspace = true 5 | edition.workspace = true 6 | license.workspace = true 7 | homepage.workspace = true 8 | repository.workspace = true 9 | 10 | [dependencies] 11 | rkyv.workspace = true 12 | 13 | # reth 14 | reth-primitives.workspace = true 15 | reth-trie.workspace = true 16 | reth-chainspec.workspace = true 17 | 18 | # revm 19 | revm-primitives.workspace = true 20 | 21 | # alloy 22 | alloy-rpc-types.workspace = true 23 | -------------------------------------------------------------------------------- /crates/primitives/src/account_proof.rs: -------------------------------------------------------------------------------- 1 | use alloy_rpc_types::EIP1186AccountProofResponse; 2 | use reth_primitives::Account; 3 | use reth_trie::{AccountProof, StorageProof, EMPTY_ROOT_HASH}; 4 | 5 | /// Converts an [EIP1186AccountProofResponse] to an [AccountProof]. 6 | pub fn eip1186_proof_to_account_proof(proof: EIP1186AccountProofResponse) -> AccountProof { 7 | let address = proof.address; 8 | let balance = proof.balance; 9 | let code_hash = proof.code_hash; 10 | let storage_root = proof.storage_hash; 11 | let account_proof = proof.account_proof; 12 | let storage_proofs = proof 13 | .storage_proof 14 | .into_iter() 15 | .map(|storage_proof| { 16 | let key = storage_proof.key; 17 | let value = storage_proof.value; 18 | let proof = storage_proof.proof; 19 | let mut sp = StorageProof::new(key.0); 20 | sp.value = value; 21 | sp.proof = proof; 22 | sp 23 | }) 24 | .collect(); 25 | 26 | let (storage_root, info) = 27 | if proof.nonce == 0 && balance.is_zero() && storage_root.is_zero() && code_hash.is_zero() { 28 | // Account does not exist in state. Return `None` here to prevent proof verification. 29 | (EMPTY_ROOT_HASH, None) 30 | } else { 31 | ( 32 | storage_root, 33 | Some(Account { nonce: proof.nonce, balance, bytecode_hash: code_hash.into() }), 34 | ) 35 | }; 36 | 37 | AccountProof { address, info, proof: account_proof, storage_root, storage_proofs } 38 | } 39 | -------------------------------------------------------------------------------- /crates/primitives/src/chain_spec.rs: -------------------------------------------------------------------------------- 1 | use reth_chainspec::{ 2 | BaseFeeParams, BaseFeeParamsKind, Chain, ChainHardforks, ChainSpec, DepositContract, 3 | EthereumHardfork, ForkCondition, OptimismHardfork, 4 | }; 5 | use reth_primitives::{ 6 | constants::ETHEREUM_BLOCK_GAS_LIMIT, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH, 7 | }; 8 | use revm_primitives::{address, b256, U256}; 9 | 10 | /// Returns the [ChainSpec] for Ethereum mainnet. 11 | pub fn mainnet() -> ChainSpec { 12 | // Spec extracted from: 13 | // 14 | // https://github.com/paradigmxyz/reth/blob/c228fe15808c3acbf18dc3af1a03ef5cbdcda07a/crates/chainspec/src/spec.rs#L35-L60 15 | let mut spec = ChainSpec { 16 | chain: Chain::mainnet(), 17 | // We don't need the genesis state. Using default to save cycles. 18 | genesis: Default::default(), 19 | genesis_hash: Some(MAINNET_GENESIS_HASH), 20 | paris_block_and_final_difficulty: Some((0, U256::ZERO)), 21 | // For some reasons a state root mismatch error arises if we don't force activate everything 22 | // before and including Shanghai. 23 | hardforks: ChainHardforks::new(vec![ 24 | (EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)), 25 | (EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)), 26 | (EthereumHardfork::Dao.boxed(), ForkCondition::Block(0)), 27 | (EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)), 28 | (EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)), 29 | (EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)), 30 | (EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)), 31 | (EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)), 32 | (EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)), 33 | (EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)), 34 | (EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)), 35 | (EthereumHardfork::London.boxed(), ForkCondition::Block(0)), 36 | (EthereumHardfork::ArrowGlacier.boxed(), ForkCondition::Block(0)), 37 | (EthereumHardfork::GrayGlacier.boxed(), ForkCondition::Block(0)), 38 | ( 39 | EthereumHardfork::Paris.boxed(), 40 | ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::ZERO }, 41 | ), 42 | (EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(0)), 43 | (EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(1710338135)), 44 | ]), 45 | deposit_contract: Some(DepositContract::new( 46 | address!("00000000219ab540356cbb839cbe05303d7705fa"), 47 | 11052984, 48 | b256!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"), 49 | )), 50 | base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), 51 | max_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, 52 | prune_delete_limit: 20000, 53 | }; 54 | spec.genesis.config.dao_fork_support = true; 55 | spec 56 | } 57 | 58 | /// Returns the [ChainSpec] for OP Mainnet. 59 | pub fn op_mainnet() -> ChainSpec { 60 | // Spec extracted from: 61 | // 62 | // https://github.com/paradigmxyz/reth/blob/c228fe15808c3acbf18dc3af1a03ef5cbdcda07a/crates/optimism/chainspec/src/op.rs#L18-L44 63 | ChainSpec { 64 | chain: Chain::optimism_mainnet(), 65 | // We don't need the genesis state. Using default to save cycles. 66 | genesis: Default::default(), 67 | genesis_hash: Some(b256!( 68 | "7ca38a1916c42007829c55e69d3e9a73265554b586a499015373241b8a3fa48b" 69 | )), 70 | paris_block_and_final_difficulty: Some((0, U256::ZERO)), 71 | hardforks: OptimismHardfork::op_mainnet(), 72 | base_fee_params: BaseFeeParamsKind::Variable( 73 | vec![ 74 | (EthereumHardfork::London.boxed(), BaseFeeParams::optimism()), 75 | (OptimismHardfork::Canyon.boxed(), BaseFeeParams::optimism_canyon()), 76 | ] 77 | .into(), 78 | ), 79 | max_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, 80 | prune_delete_limit: 10000, 81 | ..Default::default() 82 | } 83 | } 84 | 85 | /// Returns the [ChainSpec] for Linea Mainnet. 86 | pub fn linea_mainnet() -> ChainSpec { 87 | // NOTE: Linea has London activated; but setting Paris tricks reth into disabling 88 | // block rewards, which we need for Linea (clique consensus) to work. 89 | ChainSpec { 90 | chain: Chain::linea(), 91 | // We don't need the genesis state. Using default to save cycles. 92 | genesis: Default::default(), 93 | paris_block_and_final_difficulty: Some((0, U256::ZERO)), 94 | // For some reasons a state root mismatch error arises if we don't force activate everything 95 | // before and including Shanghai. 96 | hardforks: ChainHardforks::new(vec![ 97 | (EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)), 98 | (EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)), 99 | (EthereumHardfork::Dao.boxed(), ForkCondition::Block(0)), 100 | (EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)), 101 | (EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)), 102 | (EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)), 103 | (EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)), 104 | (EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)), 105 | (EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)), 106 | (EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)), 107 | (EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)), 108 | (EthereumHardfork::London.boxed(), ForkCondition::Block(0)), 109 | (EthereumHardfork::ArrowGlacier.boxed(), ForkCondition::Block(0)), 110 | (EthereumHardfork::GrayGlacier.boxed(), ForkCondition::Block(0)), 111 | ( 112 | EthereumHardfork::Paris.boxed(), 113 | ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::ZERO }, 114 | ), 115 | ]), 116 | ..Default::default() 117 | } 118 | } 119 | 120 | /// Returns the [ChainSpec] for Sepolia testnet. 121 | pub fn sepolia() -> ChainSpec { 122 | // Spec extracted from: 123 | // 124 | // https://github.com/paradigmxyz/reth/blob/c228fe15808c3acbf18dc3af1a03ef5cbdcda07a/crates/chainspec/src/spec.rs#L35-L60 125 | let mut spec = ChainSpec { 126 | chain: Chain::sepolia(), 127 | // We don't need the genesis state. Using default to save cycles. 128 | genesis: Default::default(), 129 | genesis_hash: Some(SEPOLIA_GENESIS_HASH), 130 | paris_block_and_final_difficulty: Some((0, U256::ZERO)), 131 | // For some reasons a state root mismatch error arises if we don't force activate everything 132 | // before and including Shanghai. 133 | hardforks: ChainHardforks::new(vec![ 134 | (EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)), 135 | (EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)), 136 | (EthereumHardfork::Dao.boxed(), ForkCondition::Block(0)), 137 | (EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)), 138 | (EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)), 139 | (EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)), 140 | (EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)), 141 | (EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)), 142 | (EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)), 143 | (EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)), 144 | (EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)), 145 | (EthereumHardfork::London.boxed(), ForkCondition::Block(0)), 146 | ( 147 | EthereumHardfork::Paris.boxed(), 148 | ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::ZERO }, 149 | ), 150 | (EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(0)), 151 | (EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(1706655072)), 152 | ]), 153 | deposit_contract: Some(DepositContract::new( 154 | address!("7f02c3e3c98b133055b8b348b2ac625669ed295d"), 155 | 1273020, 156 | b256!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"), 157 | )), 158 | base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), 159 | max_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, 160 | prune_delete_limit: 10000, 161 | }; 162 | spec.genesis.config.dao_fork_support = true; 163 | spec 164 | } 165 | -------------------------------------------------------------------------------- /crates/primitives/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod account_proof; 2 | pub mod chain_spec; 3 | pub mod rkyv; 4 | -------------------------------------------------------------------------------- /crates/primitives/src/rkyv.rs: -------------------------------------------------------------------------------- 1 | use reth_primitives::Bloom; 2 | use revm_primitives::B256; 3 | 4 | #[derive( 5 | Clone, 6 | Debug, 7 | PartialEq, 8 | Eq, 9 | Hash, 10 | PartialOrd, 11 | Ord, 12 | rkyv::Archive, 13 | rkyv::Serialize, 14 | rkyv::Deserialize, 15 | )] 16 | #[rkyv(remote = B256)] 17 | #[rkyv(archived = ArchivedB256)] 18 | #[rkyv(attr(derive(Eq, PartialEq, Hash)))] 19 | pub struct B256Def(pub [u8; 32]); 20 | 21 | impl From for B256 { 22 | fn from(value: B256Def) -> Self { 23 | B256::new(value.0) 24 | } 25 | } 26 | 27 | pub struct BloomDef(pub [u8; 256]); 28 | 29 | impl From for Bloom { 30 | fn from(value: BloomDef) -> Self { 31 | Bloom::new(value.0) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /crates/storage/rpc-db/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rsp-rpc-db" 3 | description = "" 4 | version.workspace = true 5 | edition.workspace = true 6 | license.workspace = true 7 | homepage.workspace = true 8 | repository.workspace = true 9 | 10 | [dependencies] 11 | tokio.workspace = true 12 | thiserror.workspace = true 13 | tracing.workspace = true 14 | 15 | # reth 16 | reth-primitives.workspace = true 17 | reth-storage-errors.workspace = true 18 | reth-revm.workspace = true 19 | 20 | # revm 21 | revm-primitives.workspace = true 22 | 23 | # alloy 24 | alloy-provider.workspace = true 25 | alloy-rpc-types.workspace = true 26 | alloy-transport.workspace = true -------------------------------------------------------------------------------- /crates/storage/rpc-db/src/lib.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | cell::RefCell, 3 | collections::{BTreeMap, BTreeSet}, 4 | marker::PhantomData, 5 | }; 6 | 7 | use alloy_provider::{network::AnyNetwork, Provider}; 8 | use alloy_rpc_types::BlockId; 9 | use alloy_transport::Transport; 10 | use reth_primitives::{ 11 | revm_primitives::{AccountInfo, Bytecode}, 12 | Address, B256, U256, 13 | }; 14 | use reth_revm::{db::BundleState, DatabaseRef}; 15 | use reth_storage_errors::{db::DatabaseError, provider::ProviderError}; 16 | use revm_primitives::HashMap; 17 | 18 | /// A database that fetches data from a [Provider] over a [Transport]. 19 | #[derive(Debug, Clone)] 20 | pub struct RpcDb { 21 | /// The provider which fetches data. 22 | pub provider: P, 23 | /// The block to fetch data from. 24 | pub block: BlockId, 25 | /// The subblock's accounts. 26 | pub subblock_accounts: RefCell>, 27 | /// The subblock's storage. 28 | pub subblock_storage: RefCell>>, 29 | /// The block hashes. 30 | pub block_hashes: RefCell>, 31 | /// The persistent accounts, used across multiple subblocks. 32 | pub persistent_accounts: RefCell>, 33 | /// The persistent storage, used across multiple subblocks. 34 | pub persistent_storage: RefCell>>, 35 | /// The oldest block whose header/hash has been requested. 36 | pub oldest_ancestor: RefCell, 37 | /// A phantom type to make the struct generic over the transport. 38 | pub _phantom: PhantomData, 39 | } 40 | 41 | /// Errors that can occur when interacting with the [RpcDb]. 42 | #[derive(Debug, Clone, thiserror::Error)] 43 | pub enum RpcDbError { 44 | #[error("failed to fetch data: {0}")] 45 | RpcError(String), 46 | #[error("failed to find block")] 47 | BlockNotFound, 48 | #[error("failed to find trie node preimage")] 49 | PreimageNotFound, 50 | } 51 | 52 | impl + Clone> RpcDb { 53 | /// Create a new [`RpcDb`]. 54 | pub fn new(provider: P, block: u64) -> Self { 55 | RpcDb { 56 | provider, 57 | block: block.into(), 58 | subblock_accounts: RefCell::new(HashMap::new()), 59 | subblock_storage: RefCell::new(HashMap::new()), 60 | block_hashes: RefCell::new(HashMap::new()), 61 | persistent_accounts: RefCell::new(HashMap::new()), 62 | persistent_storage: RefCell::new(HashMap::new()), 63 | oldest_ancestor: RefCell::new(block), 64 | _phantom: PhantomData, 65 | } 66 | } 67 | 68 | /// Fetch the [AccountInfo] for an [Address]. 69 | pub async fn fetch_account_info(&self, address: Address) -> Result { 70 | tracing::debug!("fetching account info for address: {}", address); 71 | 72 | // Prioritize fetching from the cache. 73 | if self.persistent_accounts.borrow().contains_key(&address) { 74 | // Record the account info to the subblock state. 75 | let account_info = self.persistent_accounts.borrow().get(&address).unwrap().clone(); 76 | self.subblock_accounts.borrow_mut().insert(address, account_info.clone()); 77 | 78 | return Ok(account_info); 79 | } 80 | 81 | // Fetch the proof for the account. 82 | let proof = self 83 | .provider 84 | .get_proof(address, vec![]) 85 | .block_id(self.block) 86 | .await 87 | .map_err(|e| RpcDbError::RpcError(e.to_string()))?; 88 | 89 | // Fetch the code of the account. 90 | let code = self 91 | .provider 92 | .get_code_at(address) 93 | .block_id(self.block) 94 | .await 95 | .map_err(|e| RpcDbError::RpcError(e.to_string()))?; 96 | 97 | // Construct the account info & write it to the log. 98 | let bytecode = Bytecode::new_raw(code); 99 | let account_info = AccountInfo { 100 | nonce: proof.nonce, 101 | balance: proof.balance, 102 | code_hash: proof.code_hash, 103 | code: Some(bytecode.clone()), 104 | }; 105 | 106 | // Record the account info to the state. 107 | self.subblock_accounts.borrow_mut().insert(address, account_info.clone()); 108 | 109 | Ok(account_info) 110 | } 111 | 112 | /// Fetch the storage value at an [Address] and [U256] index. 113 | pub async fn fetch_storage_at( 114 | &self, 115 | address: Address, 116 | index: U256, 117 | ) -> Result { 118 | tracing::debug!("fetching storage value at address: {}, index: {}", address, index); 119 | 120 | // Prioritize fetching from the cache. 121 | if let Some(storage_map) = self.persistent_storage.borrow().get(&address) { 122 | if let Some(value) = storage_map.get(&index) { 123 | // Record the storage value to the subblock state. 124 | let mut storage_values = self.subblock_storage.borrow_mut(); 125 | let entry = storage_values.entry(address).or_default(); 126 | entry.insert(index, *value); 127 | return Ok(*value); 128 | } 129 | } 130 | 131 | // Fetch the storage value. 132 | let value = self 133 | .provider 134 | .get_storage_at(address, index) 135 | .block_id(self.block) 136 | .await 137 | .map_err(|e| RpcDbError::RpcError(e.to_string()))?; 138 | 139 | // Record the storage value to the state. 140 | let mut storage_values = self.subblock_storage.borrow_mut(); 141 | let entry = storage_values.entry(address).or_default(); 142 | entry.insert(index, value); 143 | 144 | Ok(value) 145 | } 146 | 147 | /// Fetch the block hash for a block number. 148 | pub async fn fetch_block_hash(&self, number: u64) -> Result { 149 | tracing::info!("fetching block hash for block number: {}", number); 150 | 151 | // Fetch the block. 152 | let block = self 153 | .provider 154 | .get_block_by_number(number.into(), false) 155 | .await 156 | .map_err(|e| RpcDbError::RpcError(e.to_string()))?; 157 | 158 | // Record the block hash to the state. 159 | let block = block.ok_or(RpcDbError::BlockNotFound)?; 160 | let hash = block.header.hash; 161 | 162 | let mut oldest_ancestor = self.oldest_ancestor.borrow_mut(); 163 | *oldest_ancestor = number.min(*oldest_ancestor); 164 | 165 | // Record the block hash to the state. 166 | self.block_hashes.borrow_mut().insert(number, hash); 167 | 168 | Ok(hash) 169 | } 170 | 171 | /// Gets all the state keys used. The client uses this to read the actual state data from tries. 172 | pub fn get_state_requests(&self) -> HashMap> { 173 | let accounts = self.subblock_accounts.borrow(); 174 | let storage = self.subblock_storage.borrow(); 175 | 176 | accounts 177 | .keys() 178 | .chain(storage.keys()) 179 | .map(|&address| { 180 | let storage_keys_for_address: BTreeSet = storage 181 | .get(&address) 182 | .map(|storage_map| storage_map.keys().cloned().collect()) 183 | .unwrap_or_default(); 184 | 185 | (address, storage_keys_for_address.into_iter().collect()) 186 | }) 187 | .collect() 188 | } 189 | 190 | /// Resets the subblock state, to get ready for the next subblock. 191 | pub fn advance_subblock(&self) { 192 | self.subblock_accounts.borrow_mut().clear(); 193 | self.subblock_storage.borrow_mut().clear(); 194 | } 195 | 196 | /// Accumulates the subblock's state diffs into the persistent state. 197 | pub fn update_state_diffs(&mut self, state_diffs: &BundleState) { 198 | for (address, account) in state_diffs.state.iter() { 199 | match &account.info { 200 | Some(info) => self.persistent_accounts.borrow_mut().insert(*address, info.clone()), 201 | None => { 202 | // This indicates a destroyed account. 203 | self.persistent_accounts.borrow_mut().insert(*address, AccountInfo::default()) 204 | } 205 | }; 206 | account.storage.iter().for_each(|(k, v)| { 207 | self.persistent_storage 208 | .borrow_mut() 209 | .entry(*address) 210 | .or_default() 211 | .insert(*k, v.present_value()); 212 | }); 213 | } 214 | } 215 | 216 | /// Gets all account bytecodes. 217 | pub fn get_bytecodes(&self) -> Vec { 218 | let accounts = self.subblock_accounts.borrow(); 219 | 220 | accounts 221 | .values() 222 | .flat_map(|account| account.code.clone()) 223 | .map(|code| (code.hash_slow(), code)) 224 | .collect::>() 225 | .into_values() 226 | .collect::>() 227 | } 228 | } 229 | 230 | impl + Clone> DatabaseRef for RpcDb { 231 | type Error = ProviderError; 232 | 233 | fn basic_ref(&self, address: Address) -> Result, Self::Error> { 234 | let handle = tokio::runtime::Handle::try_current().map_err(|_| { 235 | ProviderError::Database(DatabaseError::Other("no tokio runtime found".to_string())) 236 | })?; 237 | let result = 238 | tokio::task::block_in_place(|| handle.block_on(self.fetch_account_info(address))); 239 | let account_info = 240 | result.map_err(|e| ProviderError::Database(DatabaseError::Other(e.to_string())))?; 241 | Ok(Some(account_info)) 242 | } 243 | 244 | fn code_by_hash_ref(&self, _code_hash: B256) -> Result { 245 | unimplemented!() 246 | } 247 | 248 | fn storage_ref(&self, address: Address, index: U256) -> Result { 249 | let handle = tokio::runtime::Handle::try_current().map_err(|_| { 250 | ProviderError::Database(DatabaseError::Other("no tokio runtime found".to_string())) 251 | })?; 252 | let result = 253 | tokio::task::block_in_place(|| handle.block_on(self.fetch_storage_at(address, index))); 254 | let value = 255 | result.map_err(|e| ProviderError::Database(DatabaseError::Other(e.to_string())))?; 256 | Ok(value) 257 | } 258 | 259 | fn block_hash_ref(&self, number: u64) -> Result { 260 | let handle = tokio::runtime::Handle::try_current().map_err(|_| { 261 | ProviderError::Database(DatabaseError::Other("no tokio runtime found".to_string())) 262 | })?; 263 | let result = tokio::task::block_in_place(|| handle.block_on(self.fetch_block_hash(number))); 264 | let value = 265 | result.map_err(|e| ProviderError::Database(DatabaseError::Other(e.to_string())))?; 266 | Ok(value) 267 | } 268 | } 269 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.85.0" 3 | components = ["llvm-tools", "rustc-dev"] 4 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | reorder_imports = true 2 | imports_granularity = "Crate" 3 | use_small_heuristics = "Max" 4 | comment_width = 100 5 | wrap_comments = true 6 | binop_separator = "Back" 7 | trailing_comma = "Vertical" 8 | trailing_semicolon = false 9 | use_field_init_shorthand = true 10 | format_code_in_doc_comments = true 11 | doc_comment_code_block_width = 100 --------------------------------------------------------------------------------