├── .cargo └── config.toml ├── .github └── workflows │ └── main.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── LICENSE ├── README.md ├── benches └── bench.rs ├── bin ├── akula-rpc.rs ├── akula-sentry.rs ├── akula-toolbox.rs ├── akula.rs └── consensus-tests.rs ├── build.rs ├── rust-toolchain └── src ├── accessors ├── chain.rs ├── mod.rs └── state.rs ├── binutil.rs ├── bitmapdb └── mod.rs ├── chain ├── intrinsic_gas.rs ├── mod.rs └── protocol_param.rs ├── consensus ├── base.rs ├── beacon.rs ├── blockchain.rs ├── clique │ ├── mod.rs │ └── state.rs ├── fork_choice_graph.rs └── mod.rs ├── crypto ├── blake2.rs └── mod.rs ├── etl ├── collector.rs ├── data_provider.rs └── mod.rs ├── execution ├── address.rs ├── analysis_cache.rs ├── evm │ ├── benches │ │ ├── bench.rs │ │ └── inputs │ │ │ └── benchmarks │ │ │ └── main │ │ │ ├── blake2b_huff.evm │ │ │ ├── blake2b_huff.toml │ │ │ ├── blake2b_shifts.evm │ │ │ ├── blake2b_shifts.toml │ │ │ ├── sha1_divs.evm │ │ │ ├── sha1_divs.toml │ │ │ ├── sha1_shifts.evm │ │ │ ├── sha1_shifts.toml │ │ │ ├── snailtracer.evm │ │ │ ├── snailtracer.toml │ │ │ ├── weierstrudel.evm │ │ │ └── weierstrudel.toml │ ├── common.rs │ ├── host.rs │ ├── instructions │ │ ├── arithmetic.rs │ │ ├── bitwise.rs │ │ ├── boolean.rs │ │ ├── call.rs │ │ ├── control.rs │ │ ├── external.rs │ │ ├── instruction_table.rs │ │ ├── memory.rs │ │ ├── mod.rs │ │ ├── properties.rs │ │ └── stack_manip.rs │ ├── interpreter.rs │ ├── mod.rs │ ├── opcode.rs │ ├── state.rs │ ├── tests │ │ ├── basefee.rs │ │ ├── call.rs │ │ ├── eip2929.rs │ │ ├── execute.rs │ │ ├── mod.rs │ │ ├── other.rs │ │ └── state.rs │ └── util │ │ ├── bytecode.rs │ │ ├── mocked_host.rs │ │ ├── mod.rs │ │ └── tester.rs ├── evmglue.rs ├── mod.rs ├── precompiled.rs ├── processor.rs └── tracer │ ├── adhoc.rs │ ├── eip3155_tracer.rs │ └── mod.rs ├── kv ├── mdbx.rs ├── mod.rs ├── tables.rs └── traits.rs ├── lib.rs ├── models ├── account.rs ├── block.rs ├── bloom.rs ├── chainspec.rs ├── config.rs ├── header.rs ├── log.rs ├── mod.rs ├── receipt.rs ├── revision.rs ├── transaction.rs └── util.rs ├── p2p ├── mod.rs ├── node │ ├── builder.rs │ ├── mod.rs │ ├── node.rs │ ├── stash.rs │ └── stream.rs └── types │ ├── block.rs │ ├── event.rs │ ├── header.rs │ ├── message.rs │ ├── mod.rs │ ├── penalty.rs │ ├── rlp.rs │ └── status.rs ├── res ├── chainspec │ ├── ethereum.ron │ ├── goerli.ron │ ├── mod.rs │ ├── rinkeby.ron │ ├── ropsten.ron │ └── sepolia.ron ├── mod.rs └── readme-screenshot.png ├── rpc ├── debug.rs ├── erigon.rs ├── eth.rs ├── mod.rs ├── net.rs ├── otterscan.rs ├── parity.rs ├── trace.rs └── web3.rs ├── sentry ├── devp2p │ ├── disc.rs │ ├── disc │ │ ├── dns │ │ │ ├── backend │ │ │ │ ├── memory.rs │ │ │ │ ├── mod.rs │ │ │ │ └── trust_dns.rs │ │ │ └── mod.rs │ │ └── v4 │ │ │ ├── kad.rs │ │ │ ├── message.rs │ │ │ ├── mod.rs │ │ │ ├── node.rs │ │ │ ├── proto.rs │ │ │ └── util.rs │ ├── ecies.rs │ ├── ecies │ │ ├── algorithm.rs │ │ └── proto.rs │ ├── errors.rs │ ├── mac.rs │ ├── mod.rs │ ├── node_filter.rs │ ├── peer.rs │ ├── rlpx.rs │ ├── transport.rs │ ├── types.rs │ └── util.rs ├── eth.rs ├── grpc.rs ├── mod.rs └── services │ ├── mod.rs │ └── sentry.rs ├── stagedsync ├── mod.rs ├── stage.rs └── util.rs ├── stages ├── block_hashes.rs ├── bodies.rs ├── call_trace_index.rs ├── execution.rs ├── finish.rs ├── hashstate.rs ├── headers.rs ├── history_index.rs ├── interhashes.rs ├── log_address_index.rs ├── log_topic_index.rs ├── mod.rs ├── sender_recovery.rs ├── stage_util.rs ├── total_gas_index.rs ├── total_tx_index.rs └── tx_lookup.rs ├── state ├── buffer.rs ├── database.rs ├── database_version.rs ├── delta.rs ├── genesis.rs ├── in_memory_state.rs ├── interface.rs ├── intra_block_state.rs ├── mod.rs └── object.rs ├── trie ├── hash_builder.rs ├── intermediate_hashes.rs ├── mod.rs ├── node.rs ├── prefix_set.rs ├── util.rs └── vector_root.rs └── util.rs /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [env] 2 | RUSTC_BOOTSTRAP = "1" 3 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | on: 2 | pull_request: 3 | push: 4 | branches: 5 | - master 6 | 7 | name: CI 8 | 9 | jobs: 10 | ci: 11 | strategy: 12 | matrix: 13 | os: [ubuntu-latest, macos-latest] 14 | runs-on: ${{ matrix.os }} 15 | 16 | steps: 17 | - uses: actions/checkout@v3 18 | 19 | - uses: actions-rs/toolchain@v1 20 | with: 21 | profile: minimal 22 | components: rustfmt, clippy 23 | 24 | - uses: actions-rs/cargo@v1 25 | with: 26 | command: fmt 27 | args: --all -- --check --config=imports_granularity=Crate 28 | 29 | - uses: actions-rs/install@v0.1 30 | with: 31 | crate: cargo-hack 32 | version: latest 33 | use-tool-cache: true 34 | 35 | - uses: actions-rs/cargo@v1 36 | with: 37 | command: hack 38 | args: check --workspace --ignore-private --each-feature --no-dev-deps 39 | 40 | - uses: actions-rs/cargo@v1 41 | with: 42 | command: check 43 | args: --workspace --all-targets --all-features 44 | 45 | - uses: actions-rs/cargo@v1 46 | with: 47 | command: test 48 | 49 | - uses: actions-rs/clippy-check@v1 50 | with: 51 | token: ${{ secrets.GITHUB_TOKEN }} 52 | args: --all-features 53 | 54 | - uses: actions/checkout@v3 55 | with: 56 | repository: ethereum/tests 57 | submodules: recursive 58 | path: ethereum-tests 59 | ref: 'v11.1' 60 | 61 | - run: | 62 | env RUST_LOG=error cargo run --release --bin consensus-tests -- --tests="./ethereum-tests" 63 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | .idea 3 | .DS_Store 4 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "akula" 3 | version = "0.1.0" 4 | authors = ["Artem Vorotnikov "] 5 | edition = "2021" 6 | description = "Next-generation Ethereum implementation" 7 | license = "AGPL-3.0-only" 8 | default-run = "akula" 9 | 10 | [dependencies] 11 | aes = "0.8" 12 | anyhow = "1" 13 | arrayref = "0.3" 14 | arrayvec = { version = "0.7", features = ["serde"] } 15 | array-init = "2" 16 | async-recursion = "1" 17 | async-stream = "0.3" 18 | async-trait = "0.1" 19 | auto_impl = "1" 20 | bitvec = "1" 21 | block-padding = "0.3" 22 | byte-unit = "4" 23 | byteorder = "1" 24 | bytes = { version = "1", features = ["serde"] } 25 | bytes-literal = { git = "https://github.com/vorot93/bytes-literal" } 26 | bytesize = "1" 27 | cidr = "0.2" 28 | cipher = { version = "0.4", features = ["block-padding"] } 29 | clap = { version = "4", features = ["derive"] } 30 | croaring = { git = "https://github.com/saulius/croaring-rs" } 31 | ctr = "0.9" 32 | data-encoding = "2" 33 | derive_more = "0.99" 34 | digest = "0.10" 35 | directories = "4.0" 36 | educe = { version = "0.4", features = ["Debug", "Default"] } 37 | eio = "0.1" 38 | enr = { version = "0.7", default-features = false, features = [ 39 | "rust-secp256k1", 40 | ] } 41 | enum-primitive-derive = "0.2" 42 | expanded-pathbuf = "0.1" 43 | ethereum-forkid = "0.11" 44 | ethereum-interfaces = { git = "https://github.com/ledgerwatch/interfaces", features = [ 45 | "remotekv", 46 | "sentry", 47 | "web3", 48 | ] } 49 | ethereum-types = { version = "0.14", features = ["codec"] } 50 | ethereum-jsonrpc = { git = "https://github.com/vorot93/ethereum-jsonrpc", features = [ 51 | "server", 52 | ] } 53 | ethnum = { version = "1", features = ["eio", "rlp"] } 54 | fastrlp = { version = "0.2", features = [ 55 | "derive", 56 | "ethbloom", 57 | "ethereum-types", 58 | "ethnum", 59 | "std", 60 | ] } 61 | fdlimit = "0.2" 62 | futures = { version = "0.3", features = ["unstable"] } 63 | gen-iter = "0.2" 64 | generic-array = "0.14" 65 | getset = "0.1" 66 | hash-db = "0.15" 67 | hash256-std-hasher = "0.15" 68 | hex = "0.4" 69 | hex-literal = "0.3" 70 | hmac = "0.12" 71 | http = "0.2" 72 | i256 = { git = "https://github.com/vorot93/rust-i256" } 73 | igd = { git = "https://github.com/stevefan1999-personal/rust-igd", features = [ 74 | "aio", 75 | "tokio1", 76 | ] } 77 | impls = "1" 78 | itertools = "0.10" 79 | jsonrpsee = { version = "0.16", features = ["client", "server"] } 80 | lru = "0.8" 81 | maplit = "1" 82 | mdbx = { package = "libmdbx", version = "0.1" } 83 | modular-bitfield = "0.11" 84 | num-bigint = "0.4" 85 | num_cpus = "1.13" 86 | num_enum = "0.5" 87 | num-traits = "0.2" 88 | once_cell = "1" 89 | parking_lot = "0.12" 90 | primitive-types = { version = "0.12", default-features = false, features = [ 91 | "rlp", 92 | "rustc-hex", 93 | ] } 94 | rand = "0.8" 95 | rayon = "1" 96 | ripemd = "0.1" 97 | ron = "0.8" 98 | secp256k1 = { version = "0.24", features = [ 99 | "global-context", 100 | "rand-std", 101 | "recovery", 102 | ] } 103 | serde = "1" 104 | serde_json = "1" 105 | serde_with = "2" 106 | sha2 = "0.10" 107 | sha3 = "0.10" 108 | shellexpand = "2" 109 | snap = "1" 110 | string = "0.3" 111 | strum = { version = "0.24", features = ["derive"] } 112 | strum_macros = "0.24" 113 | stubborn-io = "0.3" 114 | substrate-bn = "0.6" 115 | tempfile = "3" 116 | task-group = { git = "https://github.com/vorot93/task-group" } 117 | thiserror = "1" 118 | tokio = { version = "1.18", features = ["full"] } 119 | console-subscriber = { version = "0.1.5", optional = true } 120 | tokio-stream = { version = "0.1", features = ["sync"] } 121 | tokio-util = { version = "0.7", features = ["codec"] } 122 | toml = { version = "0.5", features = ["preserve_order"] } 123 | tonic = { version = "0.8", default-features = false, features = [ 124 | "codegen", 125 | "prost", 126 | "transport", 127 | ] } 128 | tonic-health = "0.7" 129 | tonic-reflection = "0.5" 130 | tracing = "0.1" 131 | tracing-futures = "0.2" 132 | tracing-subscriber = { version = "0.3", features = ["env-filter"] } 133 | trust-dns-resolver = "0.22" 134 | unsigned-varint = { version = "0.7", features = ["std"] } 135 | url = "2" 136 | walkdir = "2" 137 | hashlink = "0.8.0" 138 | hashbrown = { version = "0.13", features = ["inline-more", "nightly"] } 139 | dashmap = "5.3" 140 | public-ip = "0.2.2" 141 | 142 | [features] 143 | default = [] 144 | console = ["tokio/tracing", "dep:console-subscriber"] 145 | 146 | [build-dependencies] 147 | anyhow = "1" 148 | vergen = "7" 149 | 150 | [dev-dependencies] 151 | bytes-literal = { git = "https://github.com/vorot93/bytes-literal" } 152 | criterion = { version = "0.4", default-features = false } 153 | include_dir = "0.7" 154 | pprof = { version = "0.11", features = ["flamegraph"] } 155 | proptest = "1.0.0" 156 | rand = { version = "0.8", features = ["std"] } 157 | tokio = { version = "1.18", features = ["full"] } 158 | tokio-test = "0.4.2" 159 | triehash = "0.8" 160 | 161 | [patch.crates-io] 162 | arrayvec = { git = "https://github.com/vorot93/arrayvec", branch = "pop-unchecked" } 163 | ethnum = { git = "https://github.com/vorot93/ethnum-rs", branch = "impls-3" } 164 | tonic = { git = "https://github.com/hyperium/tonic" } 165 | tonic-health = { git = "https://github.com/hyperium/tonic" } 166 | tonic-reflection = { git = "https://github.com/hyperium/tonic" } 167 | 168 | [[bin]] 169 | path = "bin/akula.rs" 170 | name = "akula" 171 | 172 | [[bin]] 173 | path = "bin/akula-rpc.rs" 174 | name = "akula-rpc" 175 | 176 | [[bin]] 177 | path = "bin/akula-sentry.rs" 178 | name = "akula-sentry" 179 | 180 | [[bin]] 181 | path = "bin/akula-toolbox.rs" 182 | name = "akula-toolbox" 183 | 184 | [[bin]] 185 | path = "bin/consensus-tests.rs" 186 | name = "consensus-tests" 187 | 188 | [[bench]] 189 | name = "bench" 190 | path = "./src/execution/evm/benches/bench.rs" 191 | harness = false 192 | 193 | [[bench]] 194 | name = "trie-bench" 195 | path = "./benches/bench.rs" 196 | harness = false 197 | 198 | [profile.production] 199 | inherits = "release" 200 | panic = "abort" 201 | codegen-units = 1 202 | lto = true 203 | 204 | [profile.bench] 205 | codegen-units = 1 206 | lto = true 207 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM fedora:36 as builder 2 | 3 | # base requirements 4 | RUN dnf install -y git clang \ 5 | cmake e2fsprogs e2fsprogs-devel \ 6 | protobuf-compiler protobuf-devel 7 | 8 | RUN mkdir /rust 9 | WORKDIR /rust 10 | 11 | RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs > install_rust.sh 12 | RUN chmod +x ./install_rust.sh 13 | RUN ./install_rust.sh -y 14 | ENV PATH="${PATH}:/root/.cargo/bin" 15 | 16 | WORKDIR /akula 17 | ADD . . 18 | 19 | RUN cargo build --workspace --profile=production 20 | 21 | FROM fedora:36 22 | RUN dnf install -y e2fsprogs 23 | 24 | # Avoid copying over a bunch of junk 25 | COPY --from=builder /akula/target/production/akula /usr/local/bin/akula 26 | COPY --from=builder /akula/target/production/akula-rpc /usr/local/bin/akula-rpc 27 | COPY --from=builder /akula/target/production/akula-sentry /usr/local/bin/akula-sentry 28 | COPY --from=builder /akula/target/production/akula-toolbox /usr/local/bin/akula-toolbox 29 | COPY --from=builder /akula/target/production/consensus-tests /usr/local/bin/consensus-tests 30 | 31 | ARG UID=1000 32 | ARG GID=1000 33 | 34 | RUN groupadd -g $GID akula 35 | RUN adduser --uid $UID --gid $GID akula 36 | USER akula 37 | RUN mkdir -p ~/.local/share/akula 38 | 39 | EXPOSE 8545 \ 40 | 8551 \ 41 | 30303 \ 42 | 30303/udp \ 43 | 7545 44 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # NOTE: Akula has been deprecated, see [this Medium post](https://erigon.substack.com/p/winding-down-support-for-akula-project) for details. 2 | 3 | #

🦈 Akula 🦈

4 | 5 | Next-generation implementation of Ethereum protocol ("client") written in Rust, based on [Erigon architecture](https://github.com/ledgerwatch/interfaces). 6 | 7 | ## Why run Akula? 8 | 9 | Look at Mgas/s. 10 | 11 | ![](./src/res/readme-screenshot.png) 12 | 13 | 14 | ## Installation 15 | 16 | Installation instructions available on our [website](https://docs.akula.app/installation.html). 17 | 18 | --- 19 | Akula (_Акула_) is pronounced as `ah-koo-lah` and stands for _shark_ in Russian. 20 | 21 | ## License 22 | The entire code within this repository is licensed under the [GNU Affero General Public License v3](./LICENSE) 23 | 24 | Commercial licensing is available. Please contact info@akula.app for details. 25 | -------------------------------------------------------------------------------- /benches/bench.rs: -------------------------------------------------------------------------------- 1 | use akula::{ 2 | crypto::keccak256, 3 | etl::collector::{TableCollector, OPTIMAL_BUFFER_CAPACITY}, 4 | kv::{new_mem_chaindata, tables}, 5 | trie::{do_increment_intermediate_hashes, unpack_nibbles, DbTrieLoader, PrefixSet}, 6 | }; 7 | use criterion::*; 8 | use ethereum_types::Address; 9 | use primitive_types::H256; 10 | use tempfile::tempdir; 11 | 12 | fn generate_prefix_sets(address_from: u64, n_addresses: u64) -> (PrefixSet, PrefixSet) { 13 | let mut account_changes = PrefixSet::new(); 14 | let mut storage_changes = PrefixSet::new(); 15 | 16 | for i in 0..n_addresses { 17 | let hashed_address = keccak256(Address::from_low_u64_be(address_from + i)); 18 | account_changes.insert(hashed_address.as_ref()); 19 | if i % 2 == 0 { 20 | for j in 0..(i % 100) { 21 | let hashed_location = keccak256(H256::from_low_u64_be(j)); 22 | let key = [ 23 | hashed_address.as_bytes(), 24 | unpack_nibbles(hashed_location.as_bytes()).as_slice(), 25 | ] 26 | .concat(); 27 | storage_changes.insert(key.as_slice()); 28 | } 29 | } 30 | } 31 | 32 | (account_changes, storage_changes) 33 | } 34 | 35 | pub fn benchmark_trie(c: &mut Criterion) { 36 | let db = new_mem_chaindata().unwrap(); 37 | let tx = db.begin_mutable().unwrap(); 38 | 39 | let temp_dir = tempdir().unwrap(); 40 | let mut account_collector = 41 | TableCollector::::new(&temp_dir, OPTIMAL_BUFFER_CAPACITY); 42 | let mut storage_collector = 43 | TableCollector::::new(&temp_dir, OPTIMAL_BUFFER_CAPACITY); 44 | 45 | { 46 | let mut loader = DbTrieLoader::new(&tx, &mut account_collector, &mut storage_collector); 47 | 48 | let (account_changes, storage_changes) = generate_prefix_sets(0, 20_000); 49 | 50 | c.bench_function("trie-loader-generate", |b| { 51 | b.iter_with_setup( 52 | || (account_changes.clone(), storage_changes.clone()), 53 | |(mut ac, mut sc)| loader.calculate_root(&mut ac, &mut sc).unwrap(), 54 | ) 55 | }); 56 | 57 | for i in 0..10 { 58 | let (mut account_changes, mut storage_changes) = 59 | generate_prefix_sets(10_000 + i * 100_000, 100_000); 60 | do_increment_intermediate_hashes( 61 | &tx, 62 | &temp_dir, 63 | None, 64 | &mut account_changes, 65 | &mut storage_changes, 66 | ) 67 | .unwrap(); 68 | } 69 | 70 | tx.commit().unwrap(); 71 | } 72 | 73 | let tx = db.begin_mutable().unwrap(); 74 | let mut loader = DbTrieLoader::new(&tx, &mut account_collector, &mut storage_collector); 75 | 76 | let (account_changes, storage_changes) = generate_prefix_sets(0, 20_000); 77 | 78 | c.bench_function("trie-loader-increment", |b| { 79 | b.iter_with_setup( 80 | || (account_changes.clone(), storage_changes.clone()), 81 | |(mut ac, mut sc)| loader.calculate_root(&mut ac, &mut sc).unwrap(), 82 | ) 83 | }); 84 | } 85 | 86 | criterion_group!(benches, benchmark_trie); 87 | criterion_main!(benches); 88 | -------------------------------------------------------------------------------- /bin/akula-rpc.rs: -------------------------------------------------------------------------------- 1 | use akula::{ 2 | akula_tracing::{self, Component}, 3 | binutil::AkulaDataDir, 4 | kv::{mdbx::*, MdbxWithDirHandle}, 5 | rpc::{ 6 | debug::DebugApiServerImpl, erigon::ErigonApiServerImpl, eth::EthApiServerImpl, 7 | net::NetApiServerImpl, otterscan::OtterscanApiServerImpl, parity::ParityApiServerImpl, 8 | trace::TraceApiServerImpl, web3::Web3ApiServerImpl, 9 | }, 10 | }; 11 | use anyhow::format_err; 12 | use clap::Parser; 13 | use ethereum_jsonrpc::{ 14 | ErigonApiServer, EthApiServer, NetApiServer, OtterscanApiServer, ParityApiServer, 15 | TraceApiServer, Web3ApiServer, 16 | }; 17 | use jsonrpsee::{core::server::rpc_module::Methods, server::ServerBuilder}; 18 | use std::{collections::HashSet, future::pending, net::SocketAddr, sync::Arc}; 19 | use tracing::*; 20 | use tracing_subscriber::prelude::*; 21 | 22 | #[derive(Parser)] 23 | #[clap(name = "Akula RPC", about = "RPC server for Akula")] 24 | pub struct Opt { 25 | #[clap(long, default_value_t)] 26 | pub datadir: AkulaDataDir, 27 | 28 | #[clap(long, default_value = "127.0.0.1:8545")] 29 | pub rpc_listen_address: String, 30 | 31 | #[clap(long, default_value = "127.0.0.1:7545")] 32 | pub grpc_listen_address: SocketAddr, 33 | 34 | /// Enable API options 35 | #[clap(long)] 36 | pub enable_api: Option, 37 | } 38 | 39 | #[tokio::main] 40 | async fn main() -> anyhow::Result<()> { 41 | let opt = Opt::parse(); 42 | 43 | akula_tracing::build_subscriber(Component::RPCDaemon).init(); 44 | 45 | let akula_chain_data_dir = opt.datadir.chain_data_dir(); 46 | let db: Arc> = Arc::new( 47 | MdbxEnvironment::::open_ro( 48 | mdbx::Environment::new(), 49 | &akula_chain_data_dir, 50 | &akula::kv::tables::CHAINDATA_TABLES, 51 | )? 52 | .into(), 53 | ); 54 | 55 | let network_id = akula::accessors::chain::chain_config::read(&db.begin()?)? 56 | .ok_or_else(|| format_err!("no chainspec found"))? 57 | .params 58 | .network_id; 59 | 60 | let jsonrpc_server = ServerBuilder::default() 61 | .build(&opt.rpc_listen_address) 62 | .await?; 63 | 64 | let mut api = Methods::new(); 65 | 66 | let api_options = opt 67 | .enable_api 68 | .map(|v| { 69 | v.split(',') 70 | .into_iter() 71 | .map(|s| s.to_lowercase()) 72 | .collect::>() 73 | }) 74 | .unwrap_or_default(); 75 | 76 | if api_options.is_empty() || api_options.contains("eth") { 77 | api.merge( 78 | EthApiServerImpl { 79 | db: db.clone(), 80 | call_gas_limit: 100_000_000, 81 | } 82 | .into_rpc(), 83 | ) 84 | .unwrap(); 85 | } 86 | 87 | if api_options.is_empty() || api_options.contains("net") { 88 | api.merge(NetApiServerImpl { network_id }.into_rpc()) 89 | .unwrap(); 90 | } 91 | 92 | if api_options.is_empty() || api_options.contains("erigon") { 93 | api.merge(ErigonApiServerImpl { db: db.clone() }.into_rpc()) 94 | .unwrap(); 95 | } 96 | 97 | if api_options.is_empty() || api_options.contains("otterscan") { 98 | api.merge(OtterscanApiServerImpl { db: db.clone() }.into_rpc()) 99 | .unwrap(); 100 | } 101 | 102 | if api_options.is_empty() || api_options.contains("parity") { 103 | api.merge(ParityApiServerImpl { db: db.clone() }.into_rpc()) 104 | .unwrap(); 105 | } 106 | 107 | if api_options.is_empty() || api_options.contains("trace") { 108 | api.merge( 109 | TraceApiServerImpl { 110 | db: db.clone(), 111 | call_gas_limit: 100_000_000, 112 | } 113 | .into_rpc(), 114 | ) 115 | .unwrap(); 116 | } 117 | 118 | if api_options.is_empty() || api_options.contains("web3") { 119 | api.merge(Web3ApiServerImpl.into_rpc()).unwrap(); 120 | } 121 | 122 | let _jsonrpc_server_handle = jsonrpc_server.start(api.clone())?; 123 | info!("JSONRPC server listening on {}", opt.rpc_listen_address); 124 | 125 | tokio::spawn({ 126 | let db = db.clone(); 127 | async move { 128 | info!("Starting gRPC server on {}", opt.grpc_listen_address); 129 | let mut builder = tonic::transport::Server::builder(); 130 | 131 | builder.add_service( 132 | tonic_reflection::server::Builder::configure() 133 | .register_encoded_file_descriptor_set(ethereum_interfaces::FILE_DESCRIPTOR_SET) 134 | .build() 135 | .unwrap(), 136 | ); 137 | 138 | builder 139 | .add_service( 140 | ethereum_interfaces::web3::debug_api_server::DebugApiServer::new( 141 | DebugApiServerImpl { db: db.clone() }, 142 | ), 143 | ) 144 | .add_service( 145 | ethereum_interfaces::web3::trace_api_server::TraceApiServer::new( 146 | TraceApiServerImpl { 147 | db, 148 | call_gas_limit: 100_000_000, 149 | }, 150 | ), 151 | ) 152 | .serve(opt.grpc_listen_address) 153 | .await 154 | .unwrap(); 155 | } 156 | }); 157 | 158 | pending().await 159 | } 160 | -------------------------------------------------------------------------------- /bin/akula-sentry.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code, clippy::upper_case_acronyms)] 2 | use akula::{ 3 | akula_tracing::{self, Component}, 4 | binutil::AkulaDataDir, 5 | models::ChainSpec, 6 | }; 7 | use clap::Parser; 8 | use educe::Educe; 9 | use expanded_pathbuf::ExpandedPathBuf; 10 | use std::time::Duration; 11 | use tokio::time::sleep; 12 | use tracing::*; 13 | use tracing_subscriber::prelude::*; 14 | 15 | #[derive(Educe, Parser)] 16 | #[clap( 17 | name = "ethereum-sentry", 18 | about = "Service that listens to Ethereum's P2P network, serves information to other nodes, and provides gRPC interface to clients to interact with the network." 19 | )] 20 | #[educe(Debug)] 21 | pub struct Opts { 22 | #[clap(flatten)] 23 | pub sentry_opts: akula::sentry::Opts, 24 | /// Path to database directory. 25 | #[clap(long = "datadir", help = "Database directory path", default_value_t)] 26 | pub data_dir: AkulaDataDir, 27 | /// Name of the network to join 28 | #[clap(long, default_value = "mainnet")] 29 | pub chain: String, 30 | /// Chain specification file to use 31 | #[clap(long)] 32 | pub chain_spec_file: Option, 33 | } 34 | 35 | #[tokio::main] 36 | async fn main() -> anyhow::Result<()> { 37 | let opts: Opts = Opts::parse(); 38 | fdlimit::raise_fd_limit(); 39 | 40 | akula_tracing::build_subscriber(Component::Sentry).init(); 41 | 42 | let max_peers = opts.sentry_opts.max_peers; 43 | std::fs::create_dir_all(&opts.data_dir.0)?; 44 | 45 | let network_params = if let Some(path) = opts.chain_spec_file { 46 | ChainSpec::load_from_file(path)?.p2p 47 | } else { 48 | ChainSpec::load_builtin(&opts.chain)?.p2p 49 | }; 50 | 51 | let swarm = akula::sentry::run(opts.sentry_opts, opts.data_dir, network_params).await?; 52 | 53 | loop { 54 | info!( 55 | "Peer info: {} active (+{} dialing) / {} max.", 56 | swarm.connected_peers(), 57 | swarm.dialing(), 58 | max_peers 59 | ); 60 | 61 | sleep(Duration::from_secs(5)).await; 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use vergen::*; 3 | 4 | fn main() -> Result<()> { 5 | let mut config = Config::default(); 6 | *config.git_mut().commit_timestamp_kind_mut() = TimestampKind::DateOnly; 7 | *config.git_mut().sha_kind_mut() = ShaKind::Short; 8 | vergen(config) 9 | } 10 | -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | stable 2 | -------------------------------------------------------------------------------- /src/accessors/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod chain; 2 | pub mod state; 3 | -------------------------------------------------------------------------------- /src/binutil.rs: -------------------------------------------------------------------------------- 1 | use crate::{models::ChainSpec, res::chainspec}; 2 | use anyhow::format_err; 3 | use derive_more::*; 4 | use directories::ProjectDirs; 5 | use expanded_pathbuf::ExpandedPathBuf; 6 | use std::{ 7 | fmt::Display, 8 | fs::File, 9 | path::{Path, PathBuf}, 10 | }; 11 | 12 | #[derive(Clone, Debug, Deref, DerefMut, FromStr)] 13 | 14 | pub struct AkulaDataDir(pub ExpandedPathBuf); 15 | 16 | impl AkulaDataDir { 17 | pub fn chain_data_dir(&self) -> PathBuf { 18 | self.0.join("chaindata") 19 | } 20 | 21 | pub fn etl_temp_dir(&self) -> PathBuf { 22 | self.0.join("etl-temp") 23 | } 24 | 25 | pub fn sentry_db(&self) -> PathBuf { 26 | self.0.join("sentrydb") 27 | } 28 | 29 | pub fn nodekey(&self) -> PathBuf { 30 | self.0.join("nodekey") 31 | } 32 | } 33 | 34 | impl Default for AkulaDataDir { 35 | fn default() -> Self { 36 | Self(ExpandedPathBuf( 37 | ProjectDirs::from("", "", "Akula") 38 | .map(|pd| pd.data_dir().to_path_buf()) 39 | .unwrap_or_else(|| "data".into()), 40 | )) 41 | } 42 | } 43 | 44 | impl Display for AkulaDataDir { 45 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 46 | write!(f, "{}", self.0.as_os_str().to_str().unwrap()) 47 | } 48 | } 49 | 50 | impl ChainSpec { 51 | pub fn load_from_file(path: impl AsRef) -> anyhow::Result { 52 | Ok(ron::de::from_reader(File::open(path)?)?) 53 | } 54 | 55 | pub fn load_builtin(name: impl AsRef) -> anyhow::Result { 56 | let name = name.as_ref(); 57 | Ok(match name.to_lowercase().as_str() { 58 | "mainnet" | "ethereum" => chainspec::MAINNET.clone(), 59 | "ropsten" => chainspec::ROPSTEN.clone(), 60 | "rinkeby" => chainspec::RINKEBY.clone(), 61 | "goerli" => chainspec::GOERLI.clone(), 62 | "sepolia" => chainspec::SEPOLIA.clone(), 63 | _ => return Err(format_err!("Network {name} is unknown")), 64 | }) 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /src/chain/intrinsic_gas.rs: -------------------------------------------------------------------------------- 1 | use super::protocol_param::fee; 2 | use crate::models::*; 3 | 4 | pub fn intrinsic_gas(txn: &Message, homestead: bool, istanbul: bool) -> u128 { 5 | let mut gas = fee::G_TRANSACTION as u128; 6 | 7 | if matches!(txn.action(), TransactionAction::Create) && homestead { 8 | gas += u128::from(fee::G_TX_CREATE); 9 | } 10 | 11 | // https://eips.ethereum.org/EIPS/eip-2930 12 | gas += txn.access_list().len() as u128 * u128::from(fee::ACCESS_LIST_ADDRESS_COST); 13 | for e in &*txn.access_list() { 14 | gas += e.slots.len() as u128 * u128::from(fee::ACCESS_LIST_STORAGE_KEY_COST); 15 | } 16 | 17 | if txn.input().is_empty() { 18 | return gas; 19 | } 20 | 21 | let non_zero_bytes = txn.input().iter().filter(|&&c| c != 0).count() as u128; 22 | 23 | let non_zero_gas = u128::from(if istanbul { 24 | fee::G_TX_DATA_NON_ZERO_ISTANBUL 25 | } else { 26 | fee::G_TX_DATA_NON_ZERO_FRONTIER 27 | }); 28 | gas += non_zero_bytes * non_zero_gas; 29 | 30 | let zero_bytes = txn.input().len() as u128 - non_zero_bytes; 31 | gas += zero_bytes * u128::from(fee::G_TX_DATA_ZERO); 32 | 33 | gas 34 | } 35 | -------------------------------------------------------------------------------- /src/chain/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod intrinsic_gas; 2 | pub mod protocol_param; 3 | -------------------------------------------------------------------------------- /src/chain/protocol_param.rs: -------------------------------------------------------------------------------- 1 | // Gas & refund fee schedule—see Appendix G of the Yellow Paper 2 | // https://ethereum.github.io/yellowpaper/paper.pdf 3 | pub mod fee { 4 | 5 | pub const G_SLOAD_TANGERINE_WHISTLE: u64 = 200; 6 | pub const G_SLOAD_ISTANBUL: u64 = 800; 7 | 8 | pub const WARM_STORAGE_READ_COST: u64 = 100; 9 | pub const COLD_SLOAD_COST: u64 = 2100; 10 | pub const ACCESS_LIST_STORAGE_KEY_COST: u64 = 1900; 11 | pub const ACCESS_LIST_ADDRESS_COST: u64 = 2400; 12 | 13 | pub const G_SSET: u64 = 20_000; 14 | pub const G_SRESET: u64 = 5_000; 15 | 16 | pub const R_SCLEAR: u64 = 15_000; 17 | pub const R_SELF_DESTRUCT: u64 = 24_000; 18 | 19 | pub const G_CODE_DEPOSIT: u64 = 200; 20 | 21 | pub const G_TX_CREATE: u64 = 32_000; 22 | pub const G_TX_DATA_ZERO: u64 = 4; 23 | pub const G_TX_DATA_NON_ZERO_FRONTIER: u64 = 68; 24 | pub const G_TX_DATA_NON_ZERO_ISTANBUL: u64 = 16; 25 | pub const G_TRANSACTION: u64 = 21_000; 26 | } // namespace fee 27 | 28 | pub mod param { 29 | // https://eips.ethereum.org/EIPS/eip-170 30 | pub const MAX_CODE_SIZE: usize = 0x6000; 31 | 32 | pub const G_QUAD_DIVISOR_BYZANTIUM: u64 = 20; // EIP-198 33 | pub const G_QUAD_DIVISOR_BERLIN: u64 = 3; // EIP-2565 34 | 35 | // https://eips.ethereum.org/EIPS/eip-3529 36 | pub const MAX_REFUND_QUOTIENT_FRONTIER: u64 = 2; 37 | pub const MAX_REFUND_QUOTIENT_LONDON: u64 = 5; 38 | 39 | // https://eips.ethereum.org/EIPS/eip-1559 40 | pub const INITIAL_BASE_FEE: u64 = 1_000_000_000; 41 | pub const BASE_FEE_MAX_CHANGE_DENOMINATOR: u64 = 8; 42 | pub const ELASTICITY_MULTIPLIER: u64 = 2; 43 | } 44 | -------------------------------------------------------------------------------- /src/crypto/blake2.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::many_single_char_names)] 2 | 3 | /// The precomputed values for BLAKE2b [from the spec](https://tools.ietf.org/html/rfc7693#section-2.7) 4 | /// There are 10 16-byte arrays - one for each round 5 | /// the entries are calculated from the sigma constants. 6 | const SIGMA: [[usize; 16]; 10] = [ 7 | [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 8 | [14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3], 9 | [11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4], 10 | [7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8], 11 | [9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13], 12 | [2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9], 13 | [12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11], 14 | [13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10], 15 | [6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5], 16 | [10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0], 17 | ]; 18 | 19 | /// IV is the initialization vector for BLAKE2b. See https://tools.ietf.org/html/rfc7693#section-2.6 20 | /// for details. 21 | const IV: [u64; 8] = [ 22 | 0x6a09e667f3bcc908, 23 | 0xbb67ae8584caa73b, 24 | 0x3c6ef372fe94f82b, 25 | 0xa54ff53a5f1d36f1, 26 | 0x510e527fade682d1, 27 | 0x9b05688c2b3e6c1f, 28 | 0x1f83d9abfb41bd6b, 29 | 0x5be0cd19137e2179, 30 | ]; 31 | 32 | #[inline] 33 | /// The G mixing function. See https://tools.ietf.org/html/rfc7693#section-3.1 34 | fn g(v: &mut [u64], a: usize, b: usize, c: usize, d: usize, x: u64, y: u64) { 35 | v[a] = v[a].wrapping_add(v[b]).wrapping_add(x); 36 | v[d] = (v[d] ^ v[a]).rotate_right(32); 37 | v[c] = v[c].wrapping_add(v[d]); 38 | v[b] = (v[b] ^ v[c]).rotate_right(24); 39 | v[a] = v[a].wrapping_add(v[b]).wrapping_add(y); 40 | v[d] = (v[d] ^ v[a]).rotate_right(16); 41 | v[c] = v[c].wrapping_add(v[d]); 42 | v[b] = (v[b] ^ v[c]).rotate_right(63); 43 | } 44 | 45 | /// The Blake2 compression function F. See https://tools.ietf.org/html/rfc7693#section-3.2 46 | /// Takes as an argument the state vector `h`, message block vector `m`, offset counter `t`, final 47 | /// block indicator flag `f`, and number of rounds `rounds`. The state vector provided as the first 48 | /// parameter is modified by the function. 49 | pub fn compress(h: &mut [u64; 8], m: [u64; 16], t: [u64; 2], f: bool, rounds: usize) { 50 | let mut v = [0u64; 16]; 51 | v[..h.len()].copy_from_slice(h); // First half from state. 52 | v[h.len()..].copy_from_slice(&IV); // Second half from IV. 53 | 54 | v[12] ^= t[0]; 55 | v[13] ^= t[1]; 56 | 57 | if f { 58 | v[14] = !v[14] // Invert all bits if the last-block-flag is set. 59 | } 60 | for i in 0..rounds { 61 | // Message word selection permutation for this round. 62 | let s = &SIGMA[i % 10]; 63 | g(&mut v, 0, 4, 8, 12, m[s[0]], m[s[1]]); 64 | g(&mut v, 1, 5, 9, 13, m[s[2]], m[s[3]]); 65 | g(&mut v, 2, 6, 10, 14, m[s[4]], m[s[5]]); 66 | g(&mut v, 3, 7, 11, 15, m[s[6]], m[s[7]]); 67 | 68 | g(&mut v, 0, 5, 10, 15, m[s[8]], m[s[9]]); 69 | g(&mut v, 1, 6, 11, 12, m[s[10]], m[s[11]]); 70 | g(&mut v, 2, 7, 8, 13, m[s[12]], m[s[13]]); 71 | g(&mut v, 3, 4, 9, 14, m[s[14]], m[s[15]]); 72 | } 73 | 74 | for i in 0..8 { 75 | h[i] ^= v[i] ^ v[i + 8]; 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /src/crypto/mod.rs: -------------------------------------------------------------------------------- 1 | use ethereum_types::H256; 2 | use hash256_std_hasher::Hash256StdHasher; 3 | use hash_db::Hasher; 4 | use hex_literal::hex; 5 | use sha3::{Digest, Keccak256}; 6 | 7 | pub mod blake2; 8 | 9 | /// Concrete `Hasher` impl for the Keccak-256 hash 10 | #[derive(Default, Debug, Clone, PartialEq, Eq)] 11 | pub struct KeccakHasher; 12 | impl Hasher for KeccakHasher { 13 | type Out = H256; 14 | 15 | type StdHasher = Hash256StdHasher; 16 | 17 | const LENGTH: usize = 32; 18 | 19 | fn hash(x: &[u8]) -> Self::Out { 20 | keccak256(x) 21 | } 22 | } 23 | 24 | #[cfg(test)] 25 | /// Generates a trie root hash for a vector of key-value tuples 26 | pub fn trie_root(input: I) -> H256 27 | where 28 | I: IntoIterator, 29 | K: AsRef<[u8]> + Ord, 30 | V: AsRef<[u8]>, 31 | { 32 | triehash::trie_root::(input) 33 | } 34 | 35 | pub fn is_valid_signature(r: H256, s: H256) -> bool { 36 | const UPPER: H256 = H256(hex!( 37 | "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141" 38 | )); 39 | 40 | if r.is_zero() || s.is_zero() { 41 | return false; 42 | } 43 | 44 | if r >= UPPER && s >= UPPER { 45 | return false; 46 | } 47 | 48 | true 49 | } 50 | 51 | pub fn keccak256(data: impl AsRef<[u8]>) -> H256 { 52 | H256::from_slice(&Keccak256::digest(data.as_ref())) 53 | } 54 | -------------------------------------------------------------------------------- /src/etl/data_provider.rs: -------------------------------------------------------------------------------- 1 | use anyhow; 2 | use std::{ 3 | cmp::Ord, 4 | io::{prelude::*, BufReader, BufWriter, SeekFrom}, 5 | path::Path, 6 | }; 7 | use tempfile::NamedTempFile; 8 | use tracing::*; 9 | 10 | #[derive(Eq, Clone, PartialEq, PartialOrd, Ord)] 11 | pub struct Entry { 12 | pub key: Key, 13 | pub value: Value, 14 | } 15 | 16 | impl Entry { 17 | pub fn new(key: Key, value: Value) -> Self { 18 | Self { key, value } 19 | } 20 | } 21 | 22 | pub struct DataProvider { 23 | file: BufReader, 24 | len: usize, 25 | } 26 | 27 | impl DataProvider { 28 | pub fn new( 29 | dir: &Path, 30 | buffer: Vec>, 31 | ) -> anyhow::Result 32 | where 33 | Self: Sized, 34 | Key: AsRef<[u8]>, 35 | Value: AsRef<[u8]>, 36 | { 37 | let file = NamedTempFile::new_in(dir)?; 38 | let path = file.path().to_string_lossy().into_owned(); 39 | let mut w = BufWriter::new(file); 40 | for entry in &buffer { 41 | let k = entry.key.as_ref(); 42 | let v = entry.value.as_ref(); 43 | 44 | w.write_all(&k.len().to_be_bytes())?; 45 | w.write_all(&v.len().to_be_bytes())?; 46 | w.write_all(k)?; 47 | w.write_all(v)?; 48 | } 49 | 50 | let mut file = BufReader::new(w.into_inner()?); 51 | file.seek(SeekFrom::Start(0))?; 52 | let len = buffer.len(); 53 | info!("Flushed out {len} bytes into temporary file at {path}"); 54 | Ok(DataProvider { file, len }) 55 | } 56 | 57 | #[allow(clippy::wrong_self_convention)] 58 | pub fn to_next(&mut self) -> anyhow::Result, Vec)>> { 59 | if self.len == 0 { 60 | return Ok(None); 61 | } 62 | 63 | let mut buffer_key_length = [0; 8]; 64 | let mut buffer_value_length = [0; 8]; 65 | 66 | self.file.read_exact(&mut buffer_key_length)?; 67 | self.file.read_exact(&mut buffer_value_length)?; 68 | 69 | let key_length = usize::from_be_bytes(buffer_key_length); 70 | let value_length = usize::from_be_bytes(buffer_value_length); 71 | let mut key = vec![0; key_length]; 72 | let mut value = vec![0; value_length]; 73 | 74 | self.file.read_exact(&mut key)?; 75 | self.file.read_exact(&mut value)?; 76 | 77 | self.len -= 1; 78 | 79 | Ok(Some((key, value))) 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /src/etl/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod collector; 2 | pub mod data_provider; 3 | -------------------------------------------------------------------------------- /src/execution/address.rs: -------------------------------------------------------------------------------- 1 | use crate::{crypto::*, models::*, u256_to_h256}; 2 | 3 | pub fn create_address(caller: Address, nonce: u64) -> Address { 4 | #[derive(fastrlp::RlpEncodable, fastrlp::RlpMaxEncodedLen)] 5 | struct V { 6 | caller: Address, 7 | nonce: u64, 8 | } 9 | 10 | Address::from_slice(&keccak256(fastrlp::encode_fixed_size(&V { caller, nonce })).0[12..]) 11 | } 12 | 13 | pub fn create2_address(caller: Address, salt: U256, code_hash: H256) -> Address { 14 | let mut buf = [0_u8; 1 + ADDRESS_LENGTH + KECCAK_LENGTH + KECCAK_LENGTH]; 15 | buf[0] = 0xff; 16 | buf[1..1 + ADDRESS_LENGTH].copy_from_slice(&caller.0); 17 | buf[1 + ADDRESS_LENGTH..1 + ADDRESS_LENGTH + KECCAK_LENGTH] 18 | .copy_from_slice(&u256_to_h256(salt).0); 19 | buf[1 + ADDRESS_LENGTH + KECCAK_LENGTH..].copy_from_slice(&code_hash.0); 20 | 21 | Address::from_slice(&keccak256(buf).0[12..]) 22 | } 23 | 24 | #[cfg(test)] 25 | mod tests { 26 | use super::*; 27 | use hex_literal::hex; 28 | 29 | #[test] 30 | fn create() { 31 | assert_eq!( 32 | create_address(hex!("fbe0afcd7658ba86be41922059dd879c192d4c73").into(), 0), 33 | hex!("c669eaad75042be84daaf9b461b0e868b9ac1871").into() 34 | ); 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/execution/analysis_cache.rs: -------------------------------------------------------------------------------- 1 | use super::evm::AnalyzedCode; 2 | use ethereum_types::H256; 3 | use lru::LruCache; 4 | use std::num::NonZeroUsize; 5 | 6 | #[derive(Debug)] 7 | pub struct AnalysisCache { 8 | inner: LruCache, 9 | } 10 | 11 | impl Default for AnalysisCache { 12 | fn default() -> Self { 13 | Self::new(NonZeroUsize::new(5000).unwrap()) 14 | } 15 | } 16 | 17 | impl AnalysisCache { 18 | pub fn new(cap: NonZeroUsize) -> Self { 19 | Self { 20 | inner: LruCache::new(cap), 21 | } 22 | } 23 | 24 | pub fn get(&mut self, code_hash: &H256) -> Option<&AnalyzedCode> { 25 | self.inner.get(code_hash) 26 | } 27 | 28 | pub fn put(&mut self, code_hash: H256, code: AnalyzedCode) { 29 | self.inner.put(code_hash, code); 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/execution/evm/benches/inputs/benchmarks/main/blake2b_huff.toml: -------------------------------------------------------------------------------- 1 | [empty] 2 | args = [ 3 | { c = "00000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000" }, 4 | ] 5 | out = "9ab7a73a97a1a3031406b6c169634a9c06cfb81dec3323bb4de5ce6f4b7ca107de534442a7eaeafbaf366ccfdde1cb97d7c884e4344cd0a23039de71a56d630a" 6 | 7 | [2805nulls] 8 | args = [ 9 | { c = "00000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000af5" }, 10 | { c = "00", x = 2805 }, 11 | ] 12 | out = "2a715625dd8e3b86dc5c3f88d229968e8db4ce2e2c257cf40c6cabbc9aa3e17dddf22739fd79e4bef38c8c2166ff374e407ab25000cba307dc160b6cbd49f1c5" 13 | 14 | [5610nulls] 15 | args = [ 16 | { c = "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000015ea" }, 17 | { c = "00", x = 5610 }, 18 | ] 19 | out = "f6f72a7f2dfc944429a4a6ec4d1a1c7998c5dd1de8a0c6d21918787ff6651c979bc0d35e4b66d7005f0417b37a13f8669ecdd2897b2f30d85cd91a307e247825" 20 | 21 | [8415nulls] 22 | args = [ 23 | { c = "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000020df" }, 24 | { c = "00", x = 8415 }, 25 | ] 26 | out = "ee058d30512191b6ddbf27cef0b8e582fb25c4f9cb0f5b87ae321e0487566d9a32f7e4a94af0578cecf7d5b976dccc7fa74214262aae14094216158957b81002" 27 | 28 | [65536nulls] 29 | args = [ 30 | { c = "00000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000010000" }, 31 | { c = "00", x = 65536 }, 32 | ] 33 | out = "528b404cf7d409a0656aba45a037eb685a5e1c58c28dae8b15d8f29edea2d896928a5b0df2e72b9a8e6133f72002cb9e0a79407d0a28b857b1b0ad8e68774b14" 34 | -------------------------------------------------------------------------------- /src/execution/evm/benches/inputs/benchmarks/main/blake2b_shifts.toml: -------------------------------------------------------------------------------- 1 | [2805nulls] 2 | args = [ 3 | { c = "d299dac00000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000af5" }, 4 | { c = "00", x = 2805 }, 5 | ] 6 | out = "000000000000000000000000000000000000000000000000610f78eb767b35d60000000000000000000000000000000000000000000000008acb88982dd67191000000000000000000000000000000000000000000000000d96d8b16cbf058cb00000000000000000000000000000000000000000000000075e58196f211891e000000000000000000000000000000000000000000000000b86aeafcaeb2344600000000000000000000000000000000000000000000000088074269d31f0c89000000000000000000000000000000000000000000000000b8d0461382136a330000000000000000000000000000000000000000000000005246a9d5610d58c2" 7 | 8 | [5610nulls] 9 | args = [ 10 | { c = "d299dac000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000015ea" }, 11 | { c = "00", x = 5610 }, 12 | ] 13 | out = "000000000000000000000000000000000000000000000000e24b661901100a9f000000000000000000000000000000000000000000000000e86c19106fc93aa3000000000000000000000000000000000000000000000000f3f772ede793c73c000000000000000000000000000000000000000000000000e2a620fb9c2542ce00000000000000000000000000000000000000000000000027dd4f1b786e9b6a00000000000000000000000000000000000000000000000072ee3bd30df788c4000000000000000000000000000000000000000000000000bbfdee69798ce72e0000000000000000000000000000000000000000000000001d2c903a1d9e92c7" 14 | 15 | [8415nulls] 16 | args = [ 17 | { c = "d299dac000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000020df" }, 18 | { c = "00", x = 8415 }, 19 | ] 20 | out = "000000000000000000000000000000000000000000000000ff7720faeac09953000000000000000000000000000000000000000000000000a5ebd368781c3ddb000000000000000000000000000000000000000000000000626c172d3892a6dc000000000000000000000000000000000000000000000000768456d18531f95e000000000000000000000000000000000000000000000000e5504c9f0fa9a153000000000000000000000000000000000000000000000000a5f665b4bee1301f0000000000000000000000000000000000000000000000009358198b0f89274200000000000000000000000000000000000000000000000074b191b954d547e1" 21 | 22 | [65536nulls] 23 | args = [ 24 | { c = "d299dac00000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000010000" }, 25 | { c = "00", x = 65536 }, 26 | ] 27 | out = "000000000000000000000000000000000000000000000000224bf59a700688ba000000000000000000000000000000000000000000000000a36d6ae418d9314d000000000000000000000000000000000000000000000000880adeb791ab854e000000000000000000000000000000000000000000000000bb697bc853d6f4c2000000000000000000000000000000000000000000000000faf9797b9902103f000000000000000000000000000000000000000000000000f8aa7b92b7340ec900000000000000000000000000000000000000000000000095e64259dcdb2be7000000000000000000000000000000000000000000000000ab45d93d04b14cd5" 28 | -------------------------------------------------------------------------------- /src/execution/evm/benches/inputs/benchmarks/main/sha1_divs.evm: -------------------------------------------------------------------------------- 1 | 608060405234801561001057600080fd5b5060043610610047577c010000000000000000000000000000000000000000000000000000000060003504631605782b811461004c575b600080fd5b6100f26004803603602081101561006257600080fd5b81019060208101813564010000000081111561007d57600080fd5b82018360208201111561008f57600080fd5b803590602001918460018302840111640100000000831117156100b157600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610127945050505050565b604080517fffffffffffffffffffffffffffffffffffffffff0000000000000000000000009092168252519081900360200190f35b60006040518251602084019350604067ffffffffffffffc0600183011601600982820310600181146101585761015f565b6040820191505b50776745230100efcdab890098badcfe001032547600c3d2e1f06101d0565b6000838310156101c9575080820151928290039260208410156101c9577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60208590036101000a0119165b9392505050565b60005b82811015610686576101e684828961017e565b85526101f684602083018961017e565b60208601526040818503106001811461020e57610217565b60808286038701535b506040830381146001811461022b57610239565b602086018051600887021790525b5060405b6080811015610339578581017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc08101517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc88201517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08301517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff48401516002911891909218189081027ffffffffefffffffefffffffefffffffefffffffefffffffefffffffefffffffe1663800000009091047c010000000100000001000000010000000100000001000000010000000116179052600c0161023d565b5060805b61014081101561043a578581017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff808101517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff908201517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc08301517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe88401516004911891909218189081027ffffffffcfffffffcfffffffcfffffffcfffffffcfffffffcfffffffcfffffffc1663400000009091047c03000000030000000300000003000000030000000300000003000000031617905260180161033d565b508160008060005b605081101561065c5760148104801561047257600181146104ae57600281146104e857600381146105275761055d565b6501000000000085046a0100000000000000000000860481186f01000000000000000000000000000000870416189350635a827999925061055d565b6501000000000085046f0100000000000000000000000000000086046a0100000000000000000000870418189350636ed9eba1925061055d565b6a010000000000000000000085046f010000000000000000000000000000008604818117650100000000008804169116179350638f1bbcdc925061055d565b6501000000000085046f0100000000000000000000000000000086046a010000000000000000000087041818935063ca62c1d692505b50601f770800000000000000000000000000000000000000000000008504168063ffffffe073080000000000000000000000000000000000000087041617905080840190508063ffffffff86160190508083019050807c0100000000000000000000000000000000000000000000000000000000600484028c0151040190507401000000000000000000000000000000000000000081026501000000000086041794506a0100000000000000000000633fffffff6a040000000000000000000087041663c00000006604000000000000880416170277ffffffff00ffffffff000000000000ffffffff00ffffffff861617945050600181019050610442565b5050509190910177ffffffff00ffffffff00ffffffff00ffffffff00ffffffff16906040016101d3565b506c0100000000000000000000000063ffffffff821667ffffffff000000006101008404166bffffffff0000000000000000620100008504166fffffffff000000000000000000000000630100000086041673ffffffff00000000000000000000000000000000640100000000870416171717170294505050505091905056fea165627a7a7230582083396642a98f6018c81ca24dc0c2af8e842bd33a6b8d7f08632dc1bc372e466a0029 -------------------------------------------------------------------------------- /src/execution/evm/benches/inputs/benchmarks/main/sha1_divs.toml: -------------------------------------------------------------------------------- 1 | [empty] 2 | args = [ 3 | { c = "1605782b00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000" }, 4 | ] 5 | out = "da39a3ee5e6b4b0d3255bfef95601890afd80709000000000000000000000000" 6 | 7 | [1351] 8 | args = [ 9 | { c = "1605782b00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000547" }, 10 | { c = "00", x = 1351 }, 11 | ] 12 | out = "c150bb7410841ea9231d01f36b504c7484c46317000000000000000000000000" 13 | 14 | [2737] 15 | args = [ 16 | { c = "1605782b00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000ab1" }, 17 | { c = "00", x = 2737 }, 18 | ] 19 | out = "22170b5185363658b8f74d5f4409064df25c2efa000000000000000000000000" 20 | 21 | [5311] 22 | args = [ 23 | { c = "1605782b000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000014bf" }, 24 | { c = "00", x = 5311 }, 25 | ] 26 | out = "86c9614de84df44ba01bfd1dd68e869dc730af09000000000000000000000000" 27 | 28 | [65536] 29 | args = [ 30 | { c = "1605782b00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000010000" }, 31 | { c = "00", x = 65536 }, 32 | ] 33 | out = "1adc95bebe9eea8c112d40cd04ab7a8d75c4f961000000000000000000000000" 34 | -------------------------------------------------------------------------------- /src/execution/evm/benches/inputs/benchmarks/main/sha1_shifts.evm: -------------------------------------------------------------------------------- 1 | 608060405234801561001057600080fd5b506004361061002b5760003560e01c80631605782b14610030575b600080fd5b6100d66004803603602081101561004657600080fd5b81019060208101813564010000000081111561006157600080fd5b82018360208201111561007357600080fd5b8035906020019184600183028401116401000000008311171561009557600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295506100f8945050505050565b604080516bffffffffffffffffffffffff199092168252519081900360200190f35b60006040518251602084019350604067ffffffffffffffc06001830116016009828203106001811461012957610130565b6040820191505b50776745230100efcdab890098badcfe001032547600c3d2e1f0610183565b60008383101561017c5750808201519282900392602084101561017c5760001960208590036101000a0119165b9392505050565b60005b8281101561045c5761019984828961014f565b85526101a984602083018961014f565b6020860152604081850310600181146101c1576101ca565b60808286038701535b50604083038114600181146101de576101ee565b8460031b60208701511760208701525b5060405b608081101561027157858101603f19810151603719820151601f19830151600b1984015118911818600181901b7ffffffffefffffffefffffffefffffffefffffffefffffffefffffffefffffffe16601f9190911c7c010000000100000001000000010000000100000001000000010000000116179052600c016101f2565b5060805b6101408110156102f557858101607f19810151606f19820151603f1983015160171984015118911818600281901b7ffffffffcfffffffcfffffffcfffffffcfffffffcfffffffcfffffffcfffffffc16601e9190911c7c030000000300000003000000030000000300000003000000030000000316179052601801610275565b508160008060005b60508110156104325760148104801561032d576001811461034e576002811461036d5760038114610391576103ac565b602885901c605086901c8118607887901c16189350635a82799992506103ac565b8460501c8560781c189350838560281c189350636ed9eba192506103ac565b605085901c607886901c818117602888901c169116179350638f1bbcdc92506103ac565b8460501c8560781c189350838560281c18935063ca62c1d692505b50601f8460bb1c168063ffffffe086609b1c1617905080840190508063ffffffff86160190508083019050808260021b8b015160e01c0190508060a01b8560281c179450633fffffff8560521c1663c00000008660321c161760501b77ffffffff00ffffffff000000000000ffffffff00ffffffff8616179450506001810190506102fd565b5050509190910177ffffffff00ffffffff00ffffffff00ffffffff00ffffffff1690604001610186565b5063ffffffff811667ffffffff000000008260081c166bffffffff00000000000000008360101c166fffffffff0000000000000000000000008460181c1673ffffffff000000000000000000000000000000008560201c161717171760601b94505050505091905056fea165627a7a72305820227af8b272b9b0e3d345f580ebcde55f50e3e8b7ecafabffcadb92e55e4de68e0029 -------------------------------------------------------------------------------- /src/execution/evm/benches/inputs/benchmarks/main/sha1_shifts.toml: -------------------------------------------------------------------------------- 1 | [empty] 2 | args = [ 3 | { c = "1605782b00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000" }, 4 | ] 5 | out = "da39a3ee5e6b4b0d3255bfef95601890afd80709000000000000000000000000" 6 | 7 | [1351] 8 | args = [ 9 | { c = "1605782b00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000547" }, 10 | { c = "00", x = 1351 }, 11 | ] 12 | out = "c150bb7410841ea9231d01f36b504c7484c46317000000000000000000000000" 13 | 14 | [2737] 15 | args = [ 16 | { c = "1605782b00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000ab1" }, 17 | { c = "00", x = 2737 }, 18 | ] 19 | out = "22170b5185363658b8f74d5f4409064df25c2efa000000000000000000000000" 20 | 21 | [5311] 22 | args = [ 23 | { c = "1605782b000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000014bf" }, 24 | { c = "00", x = 5311 }, 25 | ] 26 | out = "86c9614de84df44ba01bfd1dd68e869dc730af09000000000000000000000000" 27 | 28 | [65536] 29 | args = [ 30 | { c = "1605782b00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000010000" }, 31 | { c = "00", x = 65536 }, 32 | ] 33 | out = "1adc95bebe9eea8c112d40cd04ab7a8d75c4f961000000000000000000000000" 34 | -------------------------------------------------------------------------------- /src/execution/evm/benches/inputs/benchmarks/main/snailtracer.toml: -------------------------------------------------------------------------------- 1 | [benchmark] 2 | args = [{ c = "30627b7c" }] 3 | out = "190000000000000000000000000000000000000000000000000000000000000018000000000000000000000000000000000000000000000000000000000000006300000000000000000000000000000000000000000000000000000000000000" 4 | -------------------------------------------------------------------------------- /src/execution/evm/benches/inputs/benchmarks/main/weierstrudel.toml: -------------------------------------------------------------------------------- 1 | [0] 2 | args = [ 3 | { c = "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002" }, 4 | { c = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" }, 5 | ] 6 | out = "0267044364fafcc4179b3d9841976c5f9751085391a8d62ea2d85f3add49df212fd05e78e4d6069841dca813b82c477c70ff014564c72add2f3258355ae3900e1b237dbf02109b56a5d2e9bd7ac6d5613b4b685f39689ba531cc4383128a151f" 7 | 8 | [1] 9 | args = [ 10 | { c = "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", x = 2 }, 11 | { c = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", x = 2 }, 12 | ] 13 | out = "23386b694c363dc9e2e2eee49aa5bf940e847fc66a54058e42b780c97d7e53630dbf2d60869bd617de2578e9ccd52c903ec669efd4219228e91c72407568ae2618b263f94f331dfebd495772e61d3517b404ce327c504b23f3b15af8f44bdfad" 14 | 15 | [3] 16 | args = [ 17 | { c = "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", x = 4 }, 18 | { c = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", x = 4 }, 19 | ] 20 | out = "124f4bf2511c4ae2770b50b1ab956174bf87260d6ce8255c36ac44d8c354ceb3251e057af103a5b54eef7986550787979eb30384b77dffd8e57891c771553f4400c87e7d58a5ffc110788895ed1569b8b21bfac97502da2f38bd273c5046250a" 21 | 22 | [9] 23 | args = [ 24 | { c = "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", x = 10 }, 25 | { c = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", x = 10 }, 26 | ] 27 | out = "1c895346881918d3b79f69a13cf38ba4f4377b3225edb88a93135f3b0ec986c20dc6f1ddba9b2573209c7f1165bf5eb03786ecadc30e130ff67c17fda6c4fb700fe7b15aad333f3b985a54c2aa523c9e9c4a2e4f53eb8abaa49668289b43147b" 28 | 29 | [14] 30 | args = [ 31 | { c = "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", x = 15 }, 32 | { c = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", x = 15 }, 33 | ] 34 | out = "0f7f8f15d02f4e47ac51fe38826c20f511253fe504557da98f0aa915cb21db9b1fbfa0a8ccd10005eba9e5f7d575e530baa14c9711e9dbedca27a79f01c0ddac16989a70b325764d40bb22072b810f8606c35e6af9116fbdac4bc53c081109d3" 35 | -------------------------------------------------------------------------------- /src/execution/evm/host.rs: -------------------------------------------------------------------------------- 1 | use crate::execution::tracer::{NoopTracer, Tracer}; 2 | 3 | use super::{ 4 | common::{InterpreterMessage, Output}, 5 | CreateMessage, StatusCode, 6 | }; 7 | use bytes::Bytes; 8 | use ethereum_types::Address; 9 | use ethnum::U256; 10 | 11 | /// State access status (EIP-2929). 12 | #[derive(Clone, Copy, Debug, PartialEq, Eq)] 13 | pub enum AccessStatus { 14 | Cold, 15 | Warm, 16 | } 17 | 18 | impl Default for AccessStatus { 19 | fn default() -> Self { 20 | Self::Cold 21 | } 22 | } 23 | 24 | #[derive(Clone, Copy, Debug)] 25 | pub enum StorageStatus { 26 | /// The value of a storage item has been left unchanged: 0 -> 0 and X -> X. 27 | Unchanged, 28 | /// The value of a storage item has been modified: X -> Y. 29 | Modified, 30 | /// A storage item has been modified after being modified before: X -> Y -> Z. 31 | ModifiedAgain, 32 | /// A new storage item has been added: 0 -> X. 33 | Added, 34 | /// A storage item has been deleted: X -> 0. 35 | Deleted, 36 | } 37 | 38 | /// The transaction and block data for execution. 39 | #[derive(Clone, Debug)] 40 | pub struct TxContext { 41 | /// The transaction gas price. 42 | pub tx_gas_price: U256, 43 | /// The transaction origin account. 44 | pub tx_origin: Address, 45 | /// The miner of the block. 46 | pub block_coinbase: Address, 47 | /// The block number. 48 | pub block_number: u64, 49 | /// The block timestamp. 50 | pub block_timestamp: u64, 51 | /// The block gas limit. 52 | pub block_gas_limit: u64, 53 | /// The block difficulty. 54 | pub block_difficulty: U256, 55 | /// The blockchain's ChainID. 56 | pub chain_id: U256, 57 | /// The block base fee per gas (EIP-1559, EIP-3198). 58 | pub block_base_fee: U256, 59 | } 60 | 61 | #[derive(Clone, Debug, PartialEq, Eq)] 62 | pub enum Call<'a> { 63 | Call(&'a InterpreterMessage), 64 | Create(&'a CreateMessage), 65 | } 66 | 67 | /// Abstraction that exposes host context to EVM. 68 | pub trait Host { 69 | fn trace_instructions(&self) -> bool { 70 | false 71 | } 72 | fn tracer(&mut self, mut f: impl FnMut(&mut dyn Tracer)) { 73 | (f)(&mut NoopTracer) 74 | } 75 | /// Check if an account exists. 76 | fn account_exists(&mut self, address: Address) -> bool; 77 | /// Get value of a storage key. 78 | /// 79 | /// Returns `Ok(U256::zero())` if does not exist. 80 | fn get_storage(&mut self, address: Address, key: U256) -> U256; 81 | /// Set value of a storage key. 82 | fn set_storage(&mut self, address: Address, key: U256, value: U256) -> StorageStatus; 83 | /// Get balance of an account. 84 | /// 85 | /// Returns `Ok(0)` if account does not exist. 86 | fn get_balance(&mut self, address: Address) -> U256; 87 | /// Get code size of an account. 88 | /// 89 | /// Returns `Ok(0)` if account does not exist. 90 | fn get_code_size(&mut self, address: Address) -> U256; 91 | /// Get code hash of an account. 92 | /// 93 | /// Returns `Ok(0)` if account does not exist. 94 | fn get_code_hash(&mut self, address: Address) -> U256; 95 | /// Copy code of an account. 96 | /// 97 | /// Returns `Ok(0)` if offset is invalid. 98 | fn copy_code(&mut self, address: Address, offset: usize, buffer: &mut [u8]) -> usize; 99 | /// Self-destruct account. 100 | fn selfdestruct(&mut self, address: Address, beneficiary: Address); 101 | /// Call to another account. 102 | fn call(&mut self, msg: Call) -> Output; 103 | /// Retrieve transaction context. 104 | fn get_tx_context(&mut self) -> Result; 105 | /// Get block hash. 106 | /// 107 | /// Returns `Ok(U256::zero())` if block does not exist. 108 | fn get_block_hash(&mut self, block_number: u64) -> U256; 109 | /// Emit a log. 110 | fn emit_log(&mut self, address: Address, data: Bytes, topics: &[U256]); 111 | /// Mark account as warm, return previous access status. 112 | /// 113 | /// Returns `Ok(AccessStatus::Cold)` if account does not exist. 114 | fn access_account(&mut self, address: Address) -> AccessStatus; 115 | /// Mark storage key as warm, return previous access status. 116 | /// 117 | /// Returns `Ok(AccessStatus::Cold)` if account does not exist. 118 | fn access_storage(&mut self, address: Address, key: U256) -> AccessStatus; 119 | } 120 | -------------------------------------------------------------------------------- /src/execution/evm/instructions/arithmetic.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | execution::evm::{state::*, StatusCode}, 3 | models::*, 4 | }; 5 | use ethereum_types::U512; 6 | use ethnum::U256; 7 | use i256::{i256_div, i256_mod}; 8 | 9 | #[inline] 10 | pub(crate) fn add(stack: &mut Stack) { 11 | let a = stack.pop(); 12 | let b = stack.pop(); 13 | stack.push(a.overflowing_add(b).0); 14 | } 15 | 16 | #[inline] 17 | pub(crate) fn mul(stack: &mut Stack) { 18 | let a = stack.pop(); 19 | let b = stack.pop(); 20 | stack.push(a.overflowing_mul(b).0); 21 | } 22 | 23 | #[inline] 24 | pub(crate) fn sub(stack: &mut Stack) { 25 | let a = stack.pop(); 26 | let b = stack.pop(); 27 | stack.push(a.overflowing_sub(b).0); 28 | } 29 | 30 | #[inline] 31 | pub(crate) fn div(stack: &mut Stack) { 32 | let a = stack.pop(); 33 | let b = stack.get_mut(0); 34 | *b = if *b == 0 { U256::ZERO } else { a / *b }; 35 | } 36 | 37 | #[inline] 38 | pub(crate) fn sdiv(stack: &mut Stack) { 39 | let a = stack.pop(); 40 | let b = stack.pop(); 41 | let v = i256_div(a, b); 42 | stack.push(v); 43 | } 44 | 45 | #[inline] 46 | pub(crate) fn modulo(stack: &mut Stack) { 47 | let a = stack.pop(); 48 | let b = stack.get_mut(0); 49 | *b = if *b == 0 { U256::ZERO } else { a % *b }; 50 | } 51 | 52 | #[inline] 53 | pub(crate) fn smod(stack: &mut Stack) { 54 | let a = stack.pop(); 55 | let b = stack.get_mut(0); 56 | 57 | if *b == 0 { 58 | *b = U256::ZERO 59 | } else { 60 | *b = i256_mod(a, *b); 61 | }; 62 | } 63 | 64 | #[inline] 65 | pub(crate) fn addmod(stack: &mut Stack) { 66 | let a = stack.pop(); 67 | let b = stack.pop(); 68 | let c = stack.pop(); 69 | 70 | let v = if c == 0 { 71 | U256::ZERO 72 | } else { 73 | let v = ethereum_types::U256::try_from( 74 | (U512::from_big_endian(&a.to_be_bytes()) + U512::from_big_endian(&b.to_be_bytes())) 75 | % U512::from_big_endian(&c.to_be_bytes()), 76 | ) 77 | .unwrap(); 78 | let mut arr = [0; 32]; 79 | v.to_big_endian(&mut arr); 80 | U256::from_be_bytes(arr) 81 | }; 82 | 83 | stack.push(v); 84 | } 85 | 86 | #[inline] 87 | pub(crate) fn mulmod(stack: &mut Stack) { 88 | let a = stack.pop(); 89 | let b = stack.pop(); 90 | let c = stack.pop(); 91 | 92 | let v = if c == 0 { 93 | U256::ZERO 94 | } else { 95 | let v = ethereum_types::U256::try_from( 96 | (U512::from_big_endian(&a.to_be_bytes()) * U512::from_big_endian(&b.to_be_bytes())) 97 | % U512::from_big_endian(&c.to_be_bytes()), 98 | ) 99 | .unwrap(); 100 | let mut arr = [0; 32]; 101 | v.to_big_endian(&mut arr); 102 | U256::from_be_bytes(arr) 103 | }; 104 | 105 | stack.push(v); 106 | } 107 | 108 | #[inline] 109 | fn log2floor(value: U256) -> u64 { 110 | debug_assert!(value != 0); 111 | let mut l: u64 = 256; 112 | for v in [value.high(), value.low()] { 113 | if *v == 0 { 114 | l -= 128; 115 | } else { 116 | l -= v.leading_zeros() as u64; 117 | if l == 0 { 118 | return l; 119 | } else { 120 | return l - 1; 121 | } 122 | } 123 | } 124 | l 125 | } 126 | 127 | #[inline] 128 | pub(crate) fn exp(state: &mut ExecutionState) -> Result<(), StatusCode> { 129 | let mut base = state.stack.pop(); 130 | let mut power = state.stack.pop(); 131 | 132 | if power > 0 { 133 | let factor = if REVISION >= Revision::Spurious { 134 | 50 135 | } else { 136 | 10 137 | }; 138 | let additional_gas = factor * (log2floor(power) / 8 + 1); 139 | 140 | state.gas_left -= additional_gas as i64; 141 | 142 | if state.gas_left < 0 { 143 | return Err(StatusCode::OutOfGas); 144 | } 145 | } 146 | 147 | let mut v = U256::ONE; 148 | 149 | while power > 0 { 150 | if (power & 1) != 0 { 151 | v = v.overflowing_mul(base).0; 152 | } 153 | power >>= 1; 154 | base = base.overflowing_mul(base).0; 155 | } 156 | 157 | state.stack.push(v); 158 | 159 | Ok(()) 160 | } 161 | 162 | #[inline] 163 | pub(crate) fn signextend(stack: &mut Stack) { 164 | let a = stack.pop(); 165 | let b = stack.get_mut(0); 166 | 167 | if a < 32 { 168 | let bit_index = (8 * a.as_u8() + 7) as u16; 169 | let (hi, lo) = b.into_words(); 170 | let bit = if bit_index > 0x7f { hi } else { lo } & (1 << (bit_index % 128)) != 0; 171 | let mask = (U256::ONE << bit_index) - U256::ONE; 172 | *b = if bit { *b | !mask } else { *b & mask } 173 | } 174 | } 175 | -------------------------------------------------------------------------------- /src/execution/evm/instructions/bitwise.rs: -------------------------------------------------------------------------------- 1 | use crate::execution::evm::state::Stack; 2 | use ethnum::U256; 3 | use i256::{i256_sign, two_compl, Sign}; 4 | 5 | #[inline] 6 | pub(crate) fn byte(stack: &mut Stack) { 7 | let i = stack.pop(); 8 | let x = stack.get_mut(0); 9 | 10 | if i >= 32 { 11 | *x = U256::ZERO; 12 | return; 13 | } 14 | 15 | let mut i = *i.low(); 16 | 17 | let x_word = if i >= 16 { 18 | i -= 16; 19 | x.low() 20 | } else { 21 | x.high() 22 | }; 23 | 24 | *x = U256::from((x_word >> (120 - i * 8)) & 0xFF); 25 | } 26 | 27 | #[inline] 28 | pub(crate) fn shl(stack: &mut Stack) { 29 | let shift = stack.pop(); 30 | let value = stack.get_mut(0); 31 | 32 | if *value == 0 || shift >= 256 { 33 | *value = U256::ZERO; 34 | } else { 35 | *value <<= shift.as_u8() 36 | }; 37 | } 38 | 39 | #[inline] 40 | pub(crate) fn shr(stack: &mut Stack) { 41 | let shift = stack.pop(); 42 | let value = stack.get_mut(0); 43 | 44 | if *value == 0 || shift >= 256 { 45 | *value = U256::ZERO 46 | } else { 47 | *value >>= shift.as_u8() 48 | }; 49 | } 50 | 51 | #[inline] 52 | pub(crate) fn sar(stack: &mut Stack) { 53 | let shift = stack.pop(); 54 | let mut value = stack.pop(); 55 | 56 | let value_sign = i256_sign::(&mut value); 57 | 58 | stack.push(if value == U256::ZERO || shift >= 256 { 59 | match value_sign { 60 | // value is 0 or >=1, pushing 0 61 | Sign::Plus | Sign::Zero => U256::ZERO, 62 | // value is <0, pushing -1 63 | Sign::Minus => two_compl(U256::ONE), 64 | } 65 | } else { 66 | let shift = shift.as_u128(); 67 | 68 | match value_sign { 69 | Sign::Plus | Sign::Zero => value >> shift, 70 | Sign::Minus => { 71 | let shifted = ((value.overflowing_sub(U256::ONE).0) >> shift) 72 | .overflowing_add(U256::ONE) 73 | .0; 74 | two_compl(shifted) 75 | } 76 | } 77 | }); 78 | } 79 | 80 | #[cfg(test)] 81 | mod tests { 82 | use super::*; 83 | 84 | #[test] 85 | fn test_instruction_byte() { 86 | let value = U256::from_be_bytes( 87 | (1u8..=32u8) 88 | .map(|x| 5 * x) 89 | .collect::>() 90 | .try_into() 91 | .unwrap(), 92 | ); 93 | 94 | for i in 0u16..32 { 95 | let mut stack = Stack::new(); 96 | stack.push(value); 97 | stack.push(U256::from(i)); 98 | 99 | byte(&mut stack); 100 | let result = stack.pop(); 101 | 102 | assert_eq!(result, U256::from(5 * (i + 1))); 103 | } 104 | 105 | let mut stack = Stack::new(); 106 | stack.push(value); 107 | stack.push(U256::from(100u128)); 108 | 109 | byte(&mut stack); 110 | let result = stack.pop(); 111 | assert_eq!(result, U256::ZERO); 112 | 113 | let mut stack = Stack::new(); 114 | stack.push(value); 115 | stack.push(U256::from_words(1, 0)); 116 | 117 | byte(&mut stack); 118 | let result = stack.pop(); 119 | assert_eq!(result, U256::ZERO); 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /src/execution/evm/instructions/boolean.rs: -------------------------------------------------------------------------------- 1 | use crate::execution::evm::state::*; 2 | use ethnum::U256; 3 | use i256::i256_cmp; 4 | use std::cmp::Ordering; 5 | 6 | #[inline] 7 | pub(crate) fn lt(stack: &mut Stack) { 8 | let a = stack.pop(); 9 | let b = stack.get_mut(0); 10 | 11 | *b = if a.lt(b) { U256::ONE } else { U256::ZERO } 12 | } 13 | 14 | #[inline] 15 | pub(crate) fn gt(stack: &mut Stack) { 16 | let a = stack.pop(); 17 | let b = stack.get_mut(0); 18 | 19 | *b = if a.gt(b) { U256::ONE } else { U256::ZERO } 20 | } 21 | 22 | #[inline] 23 | pub(crate) fn slt(stack: &mut Stack) { 24 | let a = stack.pop(); 25 | let b = stack.get_mut(0); 26 | 27 | *b = if i256_cmp(a, *b) == Ordering::Less { 28 | U256::ONE 29 | } else { 30 | U256::ZERO 31 | } 32 | } 33 | 34 | #[inline] 35 | pub(crate) fn sgt(stack: &mut Stack) { 36 | let a = stack.pop(); 37 | let b = stack.get_mut(0); 38 | 39 | *b = if i256_cmp(a, *b) == Ordering::Greater { 40 | U256::ONE 41 | } else { 42 | U256::ZERO 43 | } 44 | } 45 | 46 | #[inline] 47 | pub(crate) fn eq(stack: &mut Stack) { 48 | let a = stack.pop(); 49 | let b = stack.get_mut(0); 50 | 51 | *b = if a.eq(b) { U256::ONE } else { U256::ZERO } 52 | } 53 | 54 | #[inline] 55 | pub(crate) fn iszero(stack: &mut Stack) { 56 | let a = stack.get_mut(0); 57 | *a = if *a == 0 { U256::ONE } else { U256::ZERO } 58 | } 59 | 60 | #[inline] 61 | pub(crate) fn and(stack: &mut Stack) { 62 | let a = stack.pop(); 63 | let b = stack.get_mut(0); 64 | *b = a & *b; 65 | } 66 | 67 | #[inline] 68 | pub(crate) fn or(stack: &mut Stack) { 69 | let a = stack.pop(); 70 | let b = stack.get_mut(0); 71 | *b = a | *b; 72 | } 73 | 74 | #[inline] 75 | pub(crate) fn xor(stack: &mut Stack) { 76 | let a = stack.pop(); 77 | let b = stack.get_mut(0); 78 | *b = a ^ *b; 79 | } 80 | 81 | #[inline] 82 | pub(crate) fn not(stack: &mut Stack) { 83 | let v = stack.get_mut(0); 84 | *v = !*v; 85 | } 86 | -------------------------------------------------------------------------------- /src/execution/evm/instructions/control.rs: -------------------------------------------------------------------------------- 1 | use crate::execution::evm::{interpreter::JumpdestMap, state::ExecutionState, StatusCode}; 2 | use ethnum::U256; 3 | 4 | #[inline] 5 | pub(crate) fn ret(state: &mut ExecutionState) -> Result<(), StatusCode> { 6 | let offset = *state.stack.get(0); 7 | let size = *state.stack.get(1); 8 | 9 | if let Some(region) = super::memory::get_memory_region(state, offset, size)? { 10 | let offset = region.offset; 11 | let size = region.size.get(); 12 | state.output_data = state.memory[offset..][..size].to_vec().into(); 13 | } 14 | 15 | Ok(()) 16 | } 17 | 18 | #[inline] 19 | pub(crate) fn op_jump(dst: U256, jumpdest_map: &JumpdestMap) -> Result { 20 | if !jumpdest_map.contains(dst) { 21 | return Err(StatusCode::BadJumpDestination); 22 | } 23 | Ok(dst.as_usize()) 24 | } 25 | 26 | #[inline] 27 | pub(crate) fn calldataload(state: &mut ExecutionState) { 28 | let index = state.stack.pop(); 29 | 30 | let input_len = state.message.input_data.len(); 31 | 32 | let res = if index > u128::try_from(input_len).unwrap() { 33 | U256::ZERO 34 | } else { 35 | let index_usize = index.as_usize(); 36 | let end = core::cmp::min(index_usize + 32, input_len); 37 | 38 | let mut data = [0; 32]; 39 | data[..end - index_usize].copy_from_slice(&state.message.input_data[index_usize..end]); 40 | 41 | U256::from_be_bytes(data) 42 | }; 43 | state.stack.push(res); 44 | } 45 | 46 | #[inline] 47 | pub(crate) fn calldatasize(state: &mut ExecutionState) { 48 | let res = u128::try_from(state.message.input_data.len()) 49 | .unwrap() 50 | .into(); 51 | state.stack.push(res); 52 | } 53 | -------------------------------------------------------------------------------- /src/execution/evm/instructions/instruction_table.rs: -------------------------------------------------------------------------------- 1 | use super::properties::GAS_COSTS; 2 | use crate::{execution::evm::instructions::properties, models::*}; 3 | 4 | #[derive(Clone, Copy, Debug)] 5 | pub struct InstructionTableEntry { 6 | pub gas_cost: i16, 7 | pub stack_height_required: u8, 8 | pub can_overflow_stack: bool, 9 | } 10 | 11 | pub type InstructionTable = [InstructionTableEntry; 256]; 12 | pub type InstructionTables = [InstructionTable; Revision::len()]; 13 | 14 | const fn instruction_tables() -> InstructionTables { 15 | let mut table = [[InstructionTableEntry { 16 | gas_cost: -1, 17 | stack_height_required: 0, 18 | can_overflow_stack: false, 19 | }; 256]; Revision::len()]; 20 | 21 | const LATEST: Revision = Revision::latest(); 22 | 23 | let revtable = Revision::iter(); 24 | let mut reviter = 0_usize; 25 | loop { 26 | let revision = revtable[reviter]; 27 | 28 | let mut opcode = 0; 29 | loop { 30 | let (stack_height_required, can_overflow_stack) = 31 | if let Some(p) = &properties::PROPERTIES[opcode] { 32 | (p.stack_height_required, p.stack_height_change > 0) 33 | } else { 34 | (0, false) 35 | }; 36 | 37 | table[revision as usize][opcode] = InstructionTableEntry { 38 | gas_cost: GAS_COSTS[revision as usize][opcode], 39 | stack_height_required, 40 | can_overflow_stack, 41 | }; 42 | 43 | if opcode == u8::MAX as usize { 44 | break; 45 | } else { 46 | opcode += 1; 47 | } 48 | } 49 | 50 | if matches!(revision, LATEST) { 51 | break; 52 | } else { 53 | reviter += 1; 54 | } 55 | } 56 | table 57 | } 58 | 59 | pub const INSTRUCTION_TABLES: InstructionTables = instruction_tables(); 60 | 61 | #[inline] 62 | pub fn get_instruction_table(revision: Revision) -> &'static InstructionTable { 63 | &INSTRUCTION_TABLES[revision as usize] 64 | } 65 | -------------------------------------------------------------------------------- /src/execution/evm/instructions/mod.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | pub(crate) mod arithmetic; 3 | #[macro_use] 4 | pub(crate) mod bitwise; 5 | #[macro_use] 6 | pub(crate) mod boolean; 7 | #[macro_use] 8 | pub(crate) mod call; 9 | #[macro_use] 10 | pub(crate) mod control; 11 | #[macro_use] 12 | pub(crate) mod external; 13 | #[macro_use] 14 | pub mod instruction_table; 15 | #[macro_use] 16 | pub(crate) mod memory; 17 | #[macro_use] 18 | pub(crate) mod properties; 19 | #[macro_use] 20 | pub(crate) mod stack_manip; 21 | 22 | pub use properties::PROPERTIES; 23 | -------------------------------------------------------------------------------- /src/execution/evm/instructions/stack_manip.rs: -------------------------------------------------------------------------------- 1 | use crate::execution::evm::{common::*, state::Stack, AnalyzedCode}; 2 | use arrayref::array_ref; 3 | use ethnum::U256; 4 | 5 | #[inline(always)] 6 | pub(crate) fn push(stack: &mut Stack, s: &AnalyzedCode, pc: usize) -> usize { 7 | let code = &s.padded_code()[pc + 1..]; 8 | match LEN { 9 | 1 => stack.push(code[0].into()), 10 | 2..=31 => stack.push(u256_from_slice(&code[..LEN])), 11 | 32 => stack.push(U256::from_be_bytes(*array_ref!(code, 0, 32))), 12 | _ => unreachable!(), 13 | } 14 | LEN 15 | } 16 | 17 | #[inline] 18 | pub(crate) fn dup(stack: &mut Stack) { 19 | stack.push(*stack.get(HEIGHT - 1)); 20 | } 21 | 22 | #[inline] 23 | pub(crate) fn swap(stack: &mut Stack) { 24 | stack.swap_top(HEIGHT); 25 | } 26 | 27 | #[inline] 28 | pub(crate) fn pop(stack: &mut Stack) { 29 | stack.pop(); 30 | } 31 | -------------------------------------------------------------------------------- /src/execution/evm/mod.rs: -------------------------------------------------------------------------------- 1 | use bytes::Bytes; 2 | pub use common::{ 3 | CallKind, CreateMessage, InterpreterMessage, Output, StatusCode, SuccessfulOutput, 4 | }; 5 | pub use host::Host; 6 | pub use interpreter::AnalyzedCode; 7 | pub use opcode::OpCode; 8 | pub use state::{ExecutionState, Stack}; 9 | 10 | /// Maximum allowed EVM bytecode size. 11 | pub const MAX_CODE_SIZE: usize = 0x6000; 12 | 13 | mod common; 14 | pub mod host; 15 | #[macro_use] 16 | pub mod instructions; 17 | mod interpreter; 18 | pub mod opcode; 19 | mod state; 20 | pub mod util; 21 | 22 | #[cfg(test)] 23 | mod tests; 24 | -------------------------------------------------------------------------------- /src/execution/evm/state.rs: -------------------------------------------------------------------------------- 1 | use super::common::InterpreterMessage; 2 | use arrayvec::ArrayVec; 3 | use bytes::{Bytes, BytesMut}; 4 | use derive_more::{Deref, DerefMut}; 5 | use ethnum::U256; 6 | use getset::{Getters, MutGetters}; 7 | use serde::Serialize; 8 | 9 | pub const STACK_SIZE: usize = 1024; 10 | pub const MAX_CONTEXT_DEPTH: usize = 1024; 11 | 12 | /// EVM stack. 13 | #[derive(Clone, Debug, Default, Serialize)] 14 | pub struct Stack(pub ArrayVec); 15 | 16 | impl Stack { 17 | #[inline] 18 | pub const fn new() -> Self { 19 | Self(ArrayVec::new_const()) 20 | } 21 | 22 | #[inline] 23 | const fn get_pos(&self, pos: usize) -> usize { 24 | self.len() - 1 - pos 25 | } 26 | 27 | #[inline] 28 | pub fn get(&self, pos: usize) -> &U256 { 29 | let pos = self.get_pos(pos); 30 | unsafe { self.0.get_unchecked(pos) } 31 | } 32 | 33 | #[inline] 34 | pub fn get_mut(&mut self, pos: usize) -> &mut U256 { 35 | let pos = self.get_pos(pos); 36 | unsafe { self.0.get_unchecked_mut(pos) } 37 | } 38 | 39 | #[inline(always)] 40 | pub const fn len(&self) -> usize { 41 | self.0.len() 42 | } 43 | 44 | #[inline(always)] 45 | pub fn is_empty(&self) -> bool { 46 | self.len() == 0 47 | } 48 | 49 | #[inline] 50 | pub fn push(&mut self, v: U256) { 51 | unsafe { self.0.push_unchecked(v) } 52 | } 53 | 54 | #[inline] 55 | pub fn pop(&mut self) -> U256 { 56 | unsafe { self.0.pop_unchecked() } 57 | } 58 | 59 | #[inline] 60 | pub fn swap_top(&mut self, pos: usize) { 61 | let top = self.0.len() - 1; 62 | let pos = self.get_pos(pos); 63 | unsafe { 64 | self.0.swap_unchecked(top, pos); 65 | } 66 | } 67 | } 68 | 69 | const PAGE_SIZE: usize = 4 * 1024; 70 | 71 | #[derive(Clone, Debug, Deref, DerefMut)] 72 | pub struct Memory(BytesMut); 73 | 74 | impl Memory { 75 | #[inline] 76 | pub fn new() -> Self { 77 | Self(BytesMut::with_capacity(PAGE_SIZE)) 78 | } 79 | 80 | #[inline] 81 | pub fn grow(&mut self, size: usize) { 82 | let cap = self.0.capacity(); 83 | if size > cap { 84 | let required_pages = (size + PAGE_SIZE - 1) / PAGE_SIZE; 85 | self.0.reserve((PAGE_SIZE * required_pages) - self.0.len()); 86 | } 87 | self.0.resize(size, 0); 88 | } 89 | } 90 | 91 | impl Default for Memory { 92 | fn default() -> Self { 93 | Self::new() 94 | } 95 | } 96 | 97 | /// EVM execution state. 98 | #[derive(Clone, Debug, Getters, MutGetters)] 99 | pub struct ExecutionState<'m> { 100 | #[getset(get = "pub", get_mut = "pub")] 101 | pub(crate) gas_left: i64, 102 | #[getset(get = "pub", get_mut = "pub")] 103 | pub(crate) stack: Stack, 104 | #[getset(get = "pub", get_mut = "pub")] 105 | pub(crate) memory: Memory, 106 | pub(crate) message: &'m InterpreterMessage, 107 | #[getset(get = "pub", get_mut = "pub")] 108 | pub(crate) return_data: Bytes, 109 | pub(crate) output_data: Bytes, 110 | } 111 | 112 | impl<'m> ExecutionState<'m> { 113 | pub fn new(message: &'m InterpreterMessage) -> Self { 114 | Self { 115 | gas_left: message.gas, 116 | stack: Stack::default(), 117 | memory: Memory::new(), 118 | message, 119 | return_data: Default::default(), 120 | output_data: Bytes::new(), 121 | } 122 | } 123 | } 124 | 125 | #[cfg(test)] 126 | mod tests { 127 | use super::*; 128 | 129 | #[test] 130 | fn stack() { 131 | let mut stack = Stack::default(); 132 | 133 | let items: [u128; 4] = [0xde, 0xad, 0xbe, 0xef]; 134 | 135 | for (i, item) in items.iter().copied().enumerate() { 136 | stack.push(item.into()); 137 | assert_eq!(stack.len(), i + 1); 138 | } 139 | 140 | assert_eq!(*stack.get(2), 0xad); 141 | 142 | assert_eq!(stack.pop(), 0xef); 143 | 144 | assert_eq!(*stack.get(2), 0xde); 145 | } 146 | 147 | #[test] 148 | fn grow() { 149 | let mut mem = Memory::new(); 150 | mem.grow(PAGE_SIZE * 2 + 1); 151 | assert_eq!(mem.len(), PAGE_SIZE * 2 + 1); 152 | assert_eq!(mem.capacity(), PAGE_SIZE * 3); 153 | } 154 | } 155 | -------------------------------------------------------------------------------- /src/execution/evm/tests/basefee.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | execution::evm::{opcode::*, util::*, *}, 3 | models::*, 4 | }; 5 | 6 | #[test] 7 | fn basefee_pre_london() { 8 | EvmTester::new() 9 | .revision(Revision::Berlin) 10 | .code(Bytecode::new().opcode(OpCode::BASEFEE)) 11 | .status(StatusCode::UndefinedInstruction) 12 | .check() 13 | } 14 | 15 | #[test] 16 | fn basefee_nominal_case() { 17 | // https://eips.ethereum.org/EIPS/eip-3198#nominal-case 18 | let t = EvmTester::new() 19 | .revision(Revision::London) 20 | .apply_host_fn(|host, _| { 21 | host.tx_context.block_base_fee = 7_u128.into(); 22 | }); 23 | t.clone() 24 | .code(Bytecode::new().opcode(OpCode::BASEFEE).opcode(OpCode::STOP)) 25 | .status(StatusCode::Success) 26 | .gas_used(2) 27 | .check(); 28 | 29 | t.code(Bytecode::new().opcode(OpCode::BASEFEE).ret_top()) 30 | .status(StatusCode::Success) 31 | .gas_used(17) 32 | .output_value(7_u128) 33 | .check() 34 | } 35 | -------------------------------------------------------------------------------- /src/execution/evm/tests/mod.rs: -------------------------------------------------------------------------------- 1 | mod basefee; 2 | mod call; 3 | mod eip2929; 4 | mod execute; 5 | mod other; 6 | mod state; 7 | -------------------------------------------------------------------------------- /src/execution/evm/tests/other.rs: -------------------------------------------------------------------------------- 1 | use crate::execution::evm::{opcode::*, util::*, *}; 2 | 3 | #[test] 4 | fn loop_full_of_jumpdests() { 5 | // The code is a simple loop with a counter taken from the input or a constant (325) if the 6 | // input is zero. The loop body contains of only JUMPDESTs, as much as the code size limit 7 | // allows. 8 | 9 | // The `mul(325, iszero(dup1(calldataload(0)))) + OP_OR` is equivalent of 10 | // `((x == 0) * 325) | x` 11 | // what is 12 | // `x == 0 ? 325 : x`. 13 | 14 | // The `not_(0)` is -1 so we can do `loop_counter + (-1)` to decrease the loop counter. 15 | 16 | let code = Bytecode::new() 17 | .pushv(15) 18 | .pushv(0_u128) 19 | .opcode(OpCode::NOT) 20 | .pushv(0_u128) 21 | .opcode(OpCode::CALLDATALOAD) 22 | .opcode(OpCode::DUP1) 23 | .opcode(OpCode::ISZERO) 24 | .pushv(325) 25 | .opcode(OpCode::MUL) 26 | + OpCode::OR 27 | + (MAX_CODE_SIZE - 20) * OpCode::JUMPDEST 28 | + OpCode::DUP2 29 | + OpCode::ADD 30 | + OpCode::DUP1 31 | + OpCode::DUP4 32 | + OpCode::JUMPI; 33 | 34 | assert_eq!(code.clone().build().len(), MAX_CODE_SIZE); 35 | 36 | EvmTester::new() 37 | .code(code) 38 | .status(StatusCode::Success) 39 | .gas_used(7987882) 40 | .check() 41 | } 42 | 43 | #[test] 44 | fn jumpdest_with_high_offset() { 45 | for offset in [3, 16383, 16384, 32767, 32768, 65535, 65536] { 46 | let mut code = Bytecode::new().pushv(offset).opcode(OpCode::JUMP).build(); 47 | code.resize(offset, OpCode::INVALID.to_u8()); 48 | code.push(OpCode::JUMPDEST.to_u8()); 49 | EvmTester::new() 50 | .code(code) 51 | .status(StatusCode::Success) 52 | .check() 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /src/execution/evm/util/mod.rs: -------------------------------------------------------------------------------- 1 | mod bytecode; 2 | pub mod mocked_host; 3 | mod tester; 4 | 5 | pub use bytecode::*; 6 | pub use tester::*; 7 | -------------------------------------------------------------------------------- /src/execution/tracer/eip3155_tracer.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use crate::{ 3 | execution::evm::{ExecutionState, OpCode, Output, Stack, StatusCode}, 4 | models::*, 5 | }; 6 | use bytes::Bytes; 7 | use serde::Serialize; 8 | 9 | #[derive(Serialize)] 10 | struct ExecutionStart { 11 | pub depth: u16, 12 | pub rev: Revision, 13 | #[serde(rename = "static")] 14 | pub is_static: bool, 15 | } 16 | 17 | #[derive(Serialize)] 18 | #[serde(rename_all = "camelCase")] 19 | pub(crate) struct InstructionStart { 20 | pub pc: usize, 21 | pub op: u8, 22 | pub op_name: &'static str, 23 | pub gas: u64, 24 | pub stack: Stack, 25 | pub memory_size: usize, 26 | } 27 | 28 | #[derive(Serialize)] 29 | #[serde(rename_all = "camelCase")] 30 | pub(crate) struct ExecutionEnd { 31 | pub error: Option, 32 | pub gas: u64, 33 | pub gas_used: u64, 34 | pub output: String, 35 | } 36 | 37 | #[derive(Debug)] 38 | struct TracerContext { 39 | message_gas: u64, 40 | } 41 | 42 | /// Tracer which prints to stdout. 43 | #[derive(Debug, Default)] 44 | pub struct StdoutTracer { 45 | execution_stack: Vec, 46 | } 47 | 48 | impl Tracer for StdoutTracer { 49 | fn capture_start( 50 | &mut self, 51 | _: u16, 52 | _: Address, 53 | _: Address, 54 | _: Address, 55 | _: Address, 56 | _: MessageKind, 57 | _: Bytes, 58 | gas: u64, 59 | _: U256, 60 | ) { 61 | self.execution_stack 62 | .push(TracerContext { message_gas: gas }); 63 | } 64 | 65 | fn capture_state(&mut self, env: &ExecutionState, pc: usize, op: OpCode, _: u64, _: u16) { 66 | println!( 67 | "{}", 68 | serde_json::to_string(&InstructionStart { 69 | pc, 70 | op: op.0, 71 | op_name: op.name(), 72 | gas: env.gas_left as u64, 73 | stack: env.stack.clone(), 74 | memory_size: env.memory.len() 75 | }) 76 | .unwrap() 77 | ) 78 | } 79 | 80 | fn capture_end(&mut self, _: usize, _: u64, output: &Output) { 81 | let context = self.execution_stack.pop().unwrap(); 82 | let error = match output.status_code { 83 | StatusCode::Success => None, 84 | other => Some(other.to_string()), 85 | }; 86 | let (gas_left, gas_used) = match output.status_code { 87 | StatusCode::Success | StatusCode::Revert => ( 88 | output.gas_left as u64, 89 | context.message_gas - output.gas_left as u64, 90 | ), 91 | _ => (0, context.message_gas), 92 | }; 93 | 94 | println!( 95 | "{}", 96 | serde_json::to_string(&ExecutionEnd { 97 | error, 98 | gas: gas_left, 99 | gas_used, 100 | output: hex::encode(&output.output_data), 101 | }) 102 | .unwrap() 103 | ) 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /src/execution/tracer/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod adhoc; 2 | pub mod eip3155_tracer; 3 | 4 | use auto_impl::auto_impl; 5 | pub use eip3155_tracer::StdoutTracer; 6 | 7 | use crate::{ 8 | execution::evm::{ExecutionState, OpCode}, 9 | models::*, 10 | }; 11 | use bytes::Bytes; 12 | use std::{ 13 | collections::{BTreeMap, HashMap}, 14 | fmt::Debug, 15 | }; 16 | 17 | use super::evm::Output; 18 | 19 | #[derive(Clone, Debug, PartialEq, Eq)] 20 | pub enum CodeKind { 21 | Precompile, 22 | Bytecode(Option), 23 | } 24 | 25 | #[derive(Clone, Debug, PartialEq, Eq)] 26 | pub enum CallKind { 27 | Call, 28 | CallCode, 29 | DelegateCall, 30 | StaticCall, 31 | } 32 | 33 | #[derive(Clone, Debug, PartialEq, Eq)] 34 | pub enum MessageKind { 35 | Create { 36 | salt: Option, 37 | }, 38 | Call { 39 | call_kind: CallKind, 40 | code_kind: CodeKind, 41 | }, 42 | } 43 | 44 | #[allow(unused, clippy::too_many_arguments)] 45 | #[auto_impl(&mut)] 46 | pub trait Tracer: Debug + Send { 47 | fn trace_instructions(&self) -> bool { 48 | false 49 | } 50 | fn capture_start( 51 | &mut self, 52 | depth: u16, 53 | sender: Address, 54 | recipient: Address, 55 | real_sender: Address, 56 | code_address: Address, 57 | call_type: MessageKind, 58 | input: Bytes, 59 | gas: u64, 60 | value: U256, 61 | ) { 62 | } 63 | fn capture_state( 64 | &mut self, 65 | env: &ExecutionState, 66 | pc: usize, 67 | op: OpCode, 68 | cost: u64, 69 | depth: u16, 70 | ) { 71 | } 72 | fn capture_end(&mut self, depth: usize, start_gas: u64, output: &Output) {} 73 | fn capture_self_destruct(&mut self, caller: Address, beneficiary: Address, balance: U256) {} 74 | fn capture_account_read(&mut self, account: Address) {} 75 | fn capture_account_write(&mut self, account: Address) {} 76 | } 77 | 78 | /// Tracer which does nothing. 79 | #[derive(Debug)] 80 | pub struct NoopTracer; 81 | 82 | impl Tracer for NoopTracer {} 83 | 84 | #[derive(Clone, Copy, Debug, Default)] 85 | pub struct CallTracerFlags { 86 | pub from: bool, 87 | pub to: bool, 88 | } 89 | 90 | #[derive(Debug, Default)] 91 | pub struct CallTracer { 92 | addresses: HashMap, 93 | } 94 | 95 | impl Tracer for CallTracer { 96 | fn capture_start( 97 | &mut self, 98 | _: u16, 99 | _: Address, 100 | _: Address, 101 | real_sender: Address, 102 | code_address: Address, 103 | _: MessageKind, 104 | _: Bytes, 105 | _: u64, 106 | _: U256, 107 | ) { 108 | self.addresses.entry(real_sender).or_default().from = true; 109 | self.addresses.entry(code_address).or_default().to = true; 110 | } 111 | 112 | fn capture_self_destruct(&mut self, caller: Address, beneficiary: Address, _: U256) { 113 | self.addresses.entry(caller).or_default().from = true; 114 | self.addresses.entry(beneficiary).or_default().to = true; 115 | } 116 | } 117 | 118 | impl CallTracer { 119 | pub fn into_sorted_iter(&self) -> impl Iterator { 120 | self.addresses 121 | .iter() 122 | .map(|(&k, &v)| (k, v)) 123 | .collect::>() 124 | .into_iter() 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /src/kv/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod mdbx; 2 | pub mod tables; 3 | pub mod traits; 4 | 5 | use self::{tables::DatabaseChart, traits::*}; 6 | use crate::kv::{mdbx::*, tables::CHAINDATA_TABLES}; 7 | use byte_unit::*; 8 | use bytes::Bytes; 9 | use derive_more::Deref; 10 | use std::fmt::Debug; 11 | 12 | #[derive(Debug)] 13 | pub struct CustomTable(pub string::String); 14 | 15 | impl Table for CustomTable { 16 | type Key = Vec; 17 | type Value = Vec; 18 | type SeekKey = Vec; 19 | 20 | fn db_name(&self) -> string::String { 21 | self.0.clone() 22 | } 23 | } 24 | 25 | impl From for CustomTable { 26 | fn from(s: String) -> Self { 27 | Self(unsafe { string::String::from_utf8_unchecked(Bytes::from(s.into_bytes())) }) 28 | } 29 | } 30 | 31 | impl DupSort for CustomTable { 32 | type SeekBothKey = Vec; 33 | } 34 | 35 | #[derive(Debug, Deref)] 36 | pub struct MdbxWithDirHandle 37 | where 38 | E: EnvironmentKind, 39 | { 40 | #[deref] 41 | inner: mdbx::MdbxEnvironment, 42 | _tmpdir: Option, 43 | } 44 | 45 | impl From> for MdbxWithDirHandle { 46 | fn from(inner: mdbx::MdbxEnvironment) -> Self { 47 | Self { 48 | inner, 49 | _tmpdir: None, 50 | } 51 | } 52 | } 53 | 54 | pub fn new_mem_chaindata() -> anyhow::Result> { 55 | let tmpdir = tempfile::tempdir()?; 56 | Ok(MdbxWithDirHandle { 57 | inner: new_environment(&CHAINDATA_TABLES, tmpdir.path(), n_mib_bytes!(64), None)?, 58 | _tmpdir: Some(tmpdir), 59 | }) 60 | } 61 | 62 | pub fn new_database( 63 | chart: &DatabaseChart, 64 | path: &std::path::Path, 65 | ) -> anyhow::Result> { 66 | Ok(MdbxWithDirHandle { 67 | inner: new_environment(chart, path, n_tib_bytes!(4), Some(n_gib_bytes!(4) as usize))?, 68 | _tmpdir: None, 69 | }) 70 | } 71 | 72 | fn new_environment( 73 | chart: &DatabaseChart, 74 | path: &std::path::Path, 75 | size_upper_limit: u128, 76 | growth_step: Option, 77 | ) -> anyhow::Result> { 78 | let mut builder = ::mdbx::Environment::::new(); 79 | builder.set_max_dbs(CHAINDATA_TABLES.len()); 80 | builder.set_geometry(Geometry { 81 | size: Some(0..size_upper_limit.try_into().unwrap_or(usize::MAX)), 82 | growth_step: growth_step.map(|s| s.try_into().unwrap_or(isize::MAX)), 83 | shrink_threshold: None, 84 | page_size: None, 85 | }); 86 | builder.set_rp_augment_limit(16 * 256 * 1024); 87 | mdbx::MdbxEnvironment::open_rw(builder, path, chart) 88 | } 89 | -------------------------------------------------------------------------------- /src/kv/traits.rs: -------------------------------------------------------------------------------- 1 | use bytes::Bytes; 2 | use std::{ 3 | fmt::Debug, 4 | marker::PhantomData, 5 | ops::{Generator, GeneratorState}, 6 | pin::Pin, 7 | }; 8 | 9 | pub trait TableEncode: Send + Sync + Sized { 10 | type Encoded: AsRef<[u8]> + Send + Sync; 11 | 12 | fn encode(self) -> Self::Encoded; 13 | } 14 | 15 | pub trait TableDecode: Send + Sync + Sized { 16 | fn decode(b: &[u8]) -> anyhow::Result; 17 | } 18 | 19 | pub trait TableObject: TableEncode + TableDecode {} 20 | 21 | impl TableObject for T where T: TableEncode + TableDecode {} 22 | 23 | pub trait Table: Send + Sync + Debug + 'static { 24 | type Key: TableEncode; 25 | type Value: TableObject; 26 | type SeekKey: TableEncode; 27 | 28 | fn db_name(&self) -> string::String; 29 | } 30 | pub trait DupSort: Table { 31 | type SeekBothKey: TableObject; 32 | } 33 | 34 | #[derive(Copy, Clone, Debug)] 35 | pub struct TryGenIter<'a, G, E> 36 | where 37 | G: Generator> + Unpin + 'a, 38 | { 39 | done: bool, 40 | inner: G, 41 | _marker: PhantomData<&'a ()>, 42 | } 43 | 44 | impl<'a, G, E> Iterator for TryGenIter<'a, G, E> 45 | where 46 | G: Generator> + Unpin + 'a, 47 | { 48 | type Item = Result; 49 | 50 | fn next(&mut self) -> Option { 51 | if self.done { 52 | return None; 53 | } 54 | 55 | match Pin::new(&mut self.inner).resume(()) { 56 | GeneratorState::Yielded(n) => Some(Ok(n)), 57 | GeneratorState::Complete(res) => { 58 | self.done = true; 59 | if let Err(e) = res { 60 | Some(Err(e)) 61 | } else { 62 | None 63 | } 64 | } 65 | } 66 | } 67 | } 68 | 69 | impl<'a, G, E> From for TryGenIter<'a, G, E> 70 | where 71 | G: Generator> + Unpin, 72 | { 73 | fn from(gen: G) -> Self { 74 | Self { 75 | done: false, 76 | inner: gen, 77 | _marker: PhantomData, 78 | } 79 | } 80 | } 81 | 82 | pub fn ttw<'a, T, E>(f: impl Fn(&T) -> bool + 'a) -> impl Fn(&Result) -> bool + 'a { 83 | move |res| match res { 84 | Ok(v) => (f)(v), 85 | Err(_) => true, 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![feature( 2 | adt_const_params, 3 | assert_matches, 4 | const_for, 5 | const_mut_refs, 6 | entry_insert, 7 | generator_trait, 8 | generators, 9 | iter_collect_into, 10 | map_first_last, 11 | never_type, 12 | poll_ready, 13 | slice_swap_unchecked, 14 | step_trait 15 | )] 16 | #![recursion_limit = "256"] 17 | #![allow( 18 | dead_code, 19 | incomplete_features, 20 | clippy::mutable_key_type, 21 | clippy::too_many_arguments, 22 | clippy::type_complexity, 23 | clippy::unused_io_amount 24 | )] 25 | #![doc = include_str!("../README.md")] 26 | 27 | pub mod accessors; 28 | #[doc(hidden)] 29 | pub mod binutil; 30 | mod bitmapdb; 31 | pub mod chain; 32 | pub mod consensus; 33 | pub mod crypto; 34 | pub mod etl; 35 | pub mod execution; 36 | pub mod kv; 37 | pub mod models; 38 | pub mod p2p; 39 | pub mod res; 40 | pub mod rpc; 41 | pub mod sentry; 42 | pub mod stagedsync; 43 | pub mod stages; 44 | mod state; 45 | pub mod trie; 46 | pub(crate) mod util; 47 | 48 | pub use stagedsync::stage::StageId; 49 | pub use state::*; 50 | pub use util::*; 51 | -------------------------------------------------------------------------------- /src/models/account.rs: -------------------------------------------------------------------------------- 1 | use crate::{kv::tables::VariableVec, models::*, util::*}; 2 | use anyhow::bail; 3 | use arrayvec::ArrayVec; 4 | use bytes::{Buf, Bytes}; 5 | use educe::*; 6 | use fastrlp::*; 7 | use modular_bitfield::prelude::*; 8 | use serde::*; 9 | use std::collections::HashMap; 10 | 11 | #[derive(Clone, Copy, Debug, PartialEq, Eq)] 12 | pub struct Account { 13 | pub nonce: u64, 14 | pub balance: U256, 15 | pub code_hash: H256, // hash of the bytecode 16 | } 17 | 18 | #[derive(Clone, Copy, Debug, RlpEncodable, RlpDecodable, RlpMaxEncodedLen)] 19 | pub struct RlpAccount { 20 | pub nonce: u64, 21 | pub balance: U256, 22 | pub storage_root: H256, 23 | pub code_hash: H256, 24 | } 25 | 26 | impl Default for Account { 27 | fn default() -> Self { 28 | Self { 29 | nonce: 0, 30 | balance: U256::ZERO, 31 | code_hash: EMPTY_HASH, 32 | } 33 | } 34 | } 35 | 36 | #[derive(Deserialize, Educe)] 37 | #[educe(Debug)] 38 | pub struct SerializedAccount { 39 | pub balance: U256, 40 | #[serde(with = "hexbytes")] 41 | #[educe(Debug(method = "write_hex_string"))] 42 | pub code: Bytes, 43 | pub nonce: U64, 44 | pub storage: HashMap, 45 | } 46 | 47 | fn bytes_to_u64(buf: &[u8]) -> u64 { 48 | let mut decoded = [0u8; 8]; 49 | for (i, b) in buf.iter().rev().enumerate() { 50 | decoded[i] = *b; 51 | } 52 | 53 | u64::from_le_bytes(decoded) 54 | } 55 | 56 | #[allow(dead_code)] 57 | #[bitfield] 58 | #[derive(Clone, Copy, Debug, Default)] 59 | struct AccountStorageFlags { 60 | nonce_len: B4, 61 | code_hash: bool, 62 | #[skip] 63 | unused: B3, 64 | } 65 | 66 | pub const MAX_ACCOUNT_LEN: usize = 1 + (1 + 32) + (1 + 32) + (1 + 8); 67 | 68 | pub type EncodedAccount = VariableVec; 69 | 70 | impl Account { 71 | fn write_compact(input: &[u8; LEN]) -> ArrayVec { 72 | input.iter().copied().skip_while(|v| *v == 0).collect() 73 | } 74 | 75 | pub fn encode_for_storage(&self) -> EncodedAccount { 76 | let mut buffer = EncodedAccount::default(); 77 | 78 | let mut field_set = AccountStorageFlags::default(); // start with first bit set to 0 79 | buffer.push(0); 80 | if self.nonce != 0 { 81 | let b = Self::write_compact(&self.nonce.to_be_bytes()); 82 | field_set.set_nonce_len(b.len().try_into().unwrap()); 83 | buffer.try_extend_from_slice(&b[..]).unwrap(); 84 | } 85 | 86 | // Encoding code hash 87 | if self.code_hash != EMPTY_HASH { 88 | field_set.set_code_hash(true); 89 | buffer 90 | .try_extend_from_slice(self.code_hash.as_fixed_bytes()) 91 | .unwrap(); 92 | } 93 | 94 | // Encoding balance 95 | if self.balance != 0 { 96 | let b = Self::write_compact(&self.balance.to_be_bytes()); 97 | buffer.try_extend_from_slice(&b[..]).unwrap(); 98 | } 99 | 100 | let fs = field_set.into_bytes()[0]; 101 | buffer[0] = fs; 102 | 103 | buffer 104 | } 105 | 106 | pub fn decode_for_storage(mut enc: &[u8]) -> anyhow::Result> { 107 | if enc.is_empty() { 108 | return Ok(None); 109 | } 110 | 111 | let mut a = Self::default(); 112 | 113 | let field_set = AccountStorageFlags::from_bytes([enc.get_u8()]); 114 | 115 | let decode_length = field_set.nonce_len(); 116 | if decode_length > 0 { 117 | a.nonce = bytes_to_u64(&enc[..decode_length.into()]); 118 | enc.advance(decode_length.into()); 119 | } 120 | 121 | if field_set.code_hash() { 122 | a.code_hash = H256::from_slice(&enc[..KECCAK_LENGTH]); 123 | enc.advance(KECCAK_LENGTH); 124 | } 125 | 126 | if enc.len() > 32 { 127 | bail!("balance cannot be longer than 32 bytes") 128 | } 129 | a.balance = U256::from_be_bytes(static_left_pad(enc)); 130 | 131 | Ok(Some(a)) 132 | } 133 | 134 | pub fn to_rlp(&self, storage_root: H256) -> RlpAccount { 135 | RlpAccount { 136 | nonce: self.nonce, 137 | balance: self.balance, 138 | storage_root, 139 | code_hash: self.code_hash, 140 | } 141 | } 142 | } 143 | 144 | #[cfg(test)] 145 | mod tests { 146 | use super::*; 147 | use crate::crypto::*; 148 | use hex_literal::hex; 149 | 150 | fn run_test_storage( 151 | original: Account, 152 | expected_encoded: [u8; EXPECTED_LEN], 153 | ) { 154 | let encoded_account = original.encode_for_storage(); 155 | 156 | assert_eq!(&encoded_account[..], &expected_encoded[..]); 157 | 158 | let decoded = Account::decode_for_storage(&encoded_account) 159 | .unwrap() 160 | .unwrap(); 161 | 162 | assert_eq!(original, decoded); 163 | } 164 | 165 | #[test] 166 | fn empty() { 167 | run_test_storage( 168 | Account { 169 | nonce: 100, 170 | balance: 0.as_u256(), 171 | code_hash: EMPTY_HASH, 172 | }, 173 | hex!("0164"), 174 | ) 175 | } 176 | 177 | #[test] 178 | fn with_code() { 179 | run_test_storage( 180 | Account { 181 | nonce: 2, 182 | balance: 1000.as_u256(), 183 | code_hash: keccak256([1, 2, 3]), 184 | }, 185 | hex!("1102f1885eda54b7a053318cd41e2093220dab15d65381b1157a3633a83bfd5c923903e8"), 186 | ) 187 | } 188 | 189 | #[test] 190 | fn without_code() { 191 | run_test_storage( 192 | Account { 193 | nonce: 2, 194 | balance: 1000.as_u256(), 195 | code_hash: EMPTY_HASH, 196 | }, 197 | hex!("010203e8"), 198 | ) 199 | } 200 | 201 | #[test] 202 | fn with_empty_balance_non_nil_contract() { 203 | run_test_storage( 204 | Account { 205 | nonce: 0, 206 | balance: 0.as_u256(), 207 | code_hash: H256(hex!( 208 | "0000000000000000000000000000000000000000000000000000000000000123" 209 | )), 210 | }, 211 | hex!("100000000000000000000000000000000000000000000000000000000000000123"), 212 | ) 213 | } 214 | 215 | #[test] 216 | fn with_empty_balance() { 217 | run_test_storage( 218 | Account { 219 | nonce: 0, 220 | balance: 0.as_u256(), 221 | code_hash: EMPTY_HASH, 222 | }, 223 | hex!("00"), 224 | ) 225 | } 226 | } 227 | -------------------------------------------------------------------------------- /src/models/bloom.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use sha3::{Digest, Keccak256}; 3 | 4 | pub const BLOOM_BYTE_LENGTH: usize = 256; 5 | 6 | // See Section 4.3.1 "Transaction Receipt" of the Yellow Paper 7 | fn m3_2048(bloom: &mut Bloom, x: &[u8]) { 8 | let hash = Keccak256::digest(x); 9 | let h = hash.as_slice(); 10 | for i in [0, 2, 4] { 11 | let bit = (h[i + 1] as usize + ((h[i] as usize) << 8)) & 0x7FF; 12 | bloom.0[BLOOM_BYTE_LENGTH - 1 - bit / 8] |= 1 << (bit % 8); 13 | } 14 | } 15 | 16 | pub fn logs_bloom<'a, It>(logs: It) -> Bloom 17 | where 18 | It: IntoIterator, 19 | { 20 | let mut bloom = Bloom::zero(); 21 | for log in logs { 22 | m3_2048(&mut bloom, log.address.as_bytes()); 23 | for topic in &log.topics { 24 | m3_2048(&mut bloom, topic.as_bytes()); 25 | } 26 | } 27 | bloom 28 | } 29 | 30 | #[cfg(test)] 31 | mod tests { 32 | use super::*; 33 | use hex_literal::hex; 34 | 35 | #[test] 36 | fn hardcoded_bloom() { 37 | let logs = vec![ 38 | Log { 39 | address: hex!("22341ae42d6dd7384bc8584e50419ea3ac75b83f").into(), 40 | topics: vec![hex!( 41 | "04491edcd115127caedbd478e2e7895ed80c7847e903431f94f9cfa579cad47f" 42 | ) 43 | .into()], 44 | data: vec![].into(), 45 | }, 46 | Log { 47 | address: hex!("e7fb22dfef11920312e4989a3a2b81e2ebf05986").into(), 48 | topics: vec![ 49 | hex!("7f1fef85c4b037150d3675218e0cdb7cf38fea354759471e309f3354918a442f").into(), 50 | hex!("d85629c7eaae9ea4a10234fed31bc0aeda29b2683ebe0c1882499d272621f6b6").into(), 51 | ], 52 | data: hex::decode("2d690516512020171c1ec870f6ff45398cc8609250326be89915fb538e7b") 53 | .unwrap() 54 | .into(), 55 | }, 56 | ]; 57 | assert_eq!( 58 | logs_bloom(&logs), 59 | Bloom::from(hex!( 60 | "000000000000000000810000000000000000000000000000000000020000000000000000000000000000008000" 61 | "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" 62 | "000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000" 63 | "000000000000000000000000000000000000000000000000000000280000000000400000800000004000000000" 64 | "000000000000000000000000000000000000000000000000000000000000100000100000000000000000000000" 65 | "00000000001400000000000000008000000000000000000000000000000000" 66 | )) 67 | ); 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /src/models/config.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | genesis::GenesisState, 3 | models::{BlockNumber, ChainSpec, NetworkId, H256}, 4 | }; 5 | 6 | const REPOSITORY_URL: &str = "https://github.com/akula-bft/akula"; 7 | 8 | #[derive(Debug, Clone)] 9 | pub struct ChainConfig { 10 | pub chain_spec: ChainSpec, 11 | pub genesis_hash: H256, 12 | } 13 | 14 | impl From for ChainConfig { 15 | fn from(chain_spec: ChainSpec) -> Self { 16 | let genesis = GenesisState::new(chain_spec.clone()); 17 | Self { 18 | chain_spec, 19 | genesis_hash: genesis.header(&genesis.initial_state()).hash(), 20 | } 21 | } 22 | } 23 | 24 | impl ChainConfig { 25 | pub fn new(name: &str) -> anyhow::Result { 26 | ChainSpec::load_builtin(name).map(From::from) 27 | } 28 | 29 | pub const fn network_id(&self) -> NetworkId { 30 | self.chain_spec.params.network_id 31 | } 32 | 33 | pub fn chain_name(&self) -> &str { 34 | &self.chain_spec.name 35 | } 36 | 37 | pub fn forks(&self) -> Vec { 38 | self.chain_spec 39 | .gather_forks() 40 | .into_iter() 41 | .collect::>() 42 | } 43 | 44 | pub fn bootnodes(&self) -> Vec { 45 | self.chain_spec.p2p.bootnodes.clone() 46 | } 47 | 48 | pub fn dns(&self) -> Option { 49 | self.chain_spec.p2p.dns.clone() 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/models/log.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use bytes::Bytes; 3 | use fastrlp::*; 4 | 5 | #[derive(Clone, Debug, PartialEq, Eq, RlpEncodable, RlpDecodable)] 6 | pub struct Log { 7 | pub address: Address, 8 | pub topics: Vec, 9 | pub data: Bytes, 10 | } 11 | -------------------------------------------------------------------------------- /src/models/receipt.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use crate::trie::*; 3 | use bytes::{Buf, BufMut}; 4 | use fastrlp::*; 5 | 6 | #[derive(Clone, Debug, PartialEq, Eq)] 7 | pub struct Receipt { 8 | pub tx_type: TxType, 9 | pub success: bool, 10 | pub cumulative_gas_used: u64, 11 | pub bloom: Bloom, 12 | pub logs: Vec, 13 | } 14 | 15 | impl Receipt { 16 | pub fn new(tx_type: TxType, success: bool, cumulative_gas_used: u64, logs: Vec) -> Self { 17 | let bloom = logs_bloom(&logs); 18 | Self { 19 | tx_type, 20 | success, 21 | cumulative_gas_used, 22 | bloom, 23 | logs, 24 | } 25 | } 26 | 27 | fn rlp_header(&self) -> fastrlp::Header { 28 | let mut h = fastrlp::Header { 29 | list: true, 30 | payload_length: 0, 31 | }; 32 | 33 | h.payload_length += Encodable::length(&self.success); 34 | h.payload_length += Encodable::length(&self.cumulative_gas_used); 35 | h.payload_length += Encodable::length(&self.bloom); 36 | h.payload_length += Encodable::length(&self.logs); 37 | 38 | h 39 | } 40 | 41 | fn encode_inner(&self, out: &mut dyn BufMut, rlp_head: Header) { 42 | if !matches!(self.tx_type, TxType::Legacy) { 43 | out.put_u8(self.tx_type as u8); 44 | } 45 | 46 | rlp_head.encode(out); 47 | Encodable::encode(&self.success, out); 48 | Encodable::encode(&self.cumulative_gas_used, out); 49 | Encodable::encode(&self.bloom, out); 50 | Encodable::encode(&self.logs, out); 51 | } 52 | } 53 | 54 | impl Encodable for Receipt { 55 | fn length(&self) -> usize { 56 | let rlp_head = self.rlp_header(); 57 | let rlp_len = length_of_length(rlp_head.payload_length) + rlp_head.payload_length; 58 | if matches!(self.tx_type, TxType::Legacy) { 59 | rlp_len 60 | } else { 61 | // EIP-2718 objects are wrapped into byte array in containing RLP 62 | length_of_length(rlp_len + 1) + rlp_len + 1 63 | } 64 | } 65 | 66 | fn encode(&self, out: &mut dyn BufMut) { 67 | let rlp_head = self.rlp_header(); 68 | 69 | if !matches!(self.tx_type, TxType::Legacy) { 70 | let rlp_len = length_of_length(rlp_head.payload_length) + rlp_head.payload_length; 71 | Header { 72 | list: false, 73 | payload_length: rlp_len + 1, 74 | } 75 | .encode(out); 76 | 77 | out.put_u8(self.tx_type as u8); 78 | } 79 | 80 | self.encode_inner(out, rlp_head); 81 | } 82 | } 83 | 84 | impl TrieEncode for Receipt { 85 | fn trie_encode(&self, buf: &mut dyn BufMut) { 86 | self.encode_inner(buf, self.rlp_header()) 87 | } 88 | } 89 | 90 | impl Decodable for Receipt { 91 | fn decode(buf: &mut &[u8]) -> Result { 92 | fn base_decode(buf: &mut &[u8], tx_type: TxType) -> Result { 93 | let receipt = Receipt { 94 | tx_type, 95 | success: Decodable::decode(buf)?, 96 | cumulative_gas_used: Decodable::decode(buf)?, 97 | bloom: Decodable::decode(buf)?, 98 | logs: Decodable::decode(buf)?, 99 | }; 100 | 101 | Ok(receipt) 102 | } 103 | 104 | fn eip2718_decode(buf: &mut &[u8], tx_type: TxType) -> Result { 105 | let h = Header::decode(buf)?; 106 | 107 | if !h.list { 108 | return Err(DecodeError::UnexpectedString); 109 | } 110 | 111 | base_decode(buf, tx_type) 112 | } 113 | 114 | let rlp_head = Header::decode(buf)?; 115 | 116 | Ok(if rlp_head.list { 117 | let started_len = buf.len(); 118 | let this = base_decode(buf, TxType::Legacy)?; 119 | 120 | let consumed = started_len - buf.len(); 121 | if consumed != rlp_head.payload_length { 122 | return Err(fastrlp::DecodeError::ListLengthMismatch { 123 | expected: rlp_head.payload_length, 124 | got: consumed, 125 | }); 126 | } 127 | 128 | this 129 | } else if rlp_head.payload_length == 0 { 130 | return Err(DecodeError::InputTooShort); 131 | } else { 132 | if buf.is_empty() { 133 | return Err(DecodeError::InputTooShort); 134 | } 135 | 136 | let tx_type = TxType::try_from(buf.get_u8())?; 137 | 138 | if tx_type != TxType::EIP2930 && tx_type != TxType::EIP1559 { 139 | return Err(DecodeError::Custom("Unsupported transaction type")); 140 | } 141 | 142 | let this = eip2718_decode(buf, tx_type)?; 143 | 144 | if !buf.is_empty() { 145 | return Err(DecodeError::ListLengthMismatch { 146 | expected: 0, 147 | got: buf.len(), 148 | }); 149 | } 150 | 151 | this 152 | }) 153 | } 154 | } 155 | 156 | #[derive(RlpDecodable)] 157 | struct UntypedReceipt { 158 | pub success: bool, 159 | pub cumulative_gas_used: u64, 160 | pub bloom: Bloom, 161 | pub logs: Vec, 162 | } 163 | 164 | impl UntypedReceipt { 165 | fn into_receipt(self, tx_type: TxType) -> Receipt { 166 | Receipt { 167 | tx_type, 168 | success: self.success, 169 | cumulative_gas_used: self.cumulative_gas_used, 170 | bloom: self.bloom, 171 | logs: self.logs, 172 | } 173 | } 174 | } 175 | -------------------------------------------------------------------------------- /src/models/revision.rs: -------------------------------------------------------------------------------- 1 | use derive_more::Display; 2 | use serde::Serialize; 3 | 4 | /// EVM revision. 5 | #[derive(Clone, Copy, Debug, Display, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize)] 6 | pub enum Revision { 7 | /// The Frontier revision. 8 | /// The one Ethereum launched with. 9 | Frontier = 0, 10 | 11 | /// [The Homestead revision.](https://eips.ethereum.org/EIPS/eip-606) 12 | Homestead = 1, 13 | 14 | /// [The Tangerine Whistle revision.](https://eips.ethereum.org/EIPS/eip-608) 15 | Tangerine = 2, 16 | 17 | /// [The Spurious Dragon revision.](https://eips.ethereum.org/EIPS/eip-607) 18 | Spurious = 3, 19 | 20 | /// [The Byzantium revision.](https://eips.ethereum.org/EIPS/eip-609) 21 | Byzantium = 4, 22 | 23 | /// [The Constantinople revision.](https://eips.ethereum.org/EIPS/eip-1013) 24 | Constantinople = 5, 25 | 26 | /// [The Petersburg revision.](https://eips.ethereum.org/EIPS/eip-1716) 27 | Petersburg = 6, 28 | 29 | /// [The Istanbul revision.](https://eips.ethereum.org/EIPS/eip-1679) 30 | Istanbul = 7, 31 | 32 | /// [The Berlin revision.](https://github.com/ethereum/eth1.0-specs/blob/master/network-upgrades/mainnet-upgrades/berlin.md) 33 | Berlin = 8, 34 | 35 | /// [The London revision.](https://github.com/ethereum/eth1.0-specs/blob/master/network-upgrades/mainnet-upgrades/london.md) 36 | London = 9, 37 | 38 | /// [The Paris revision.](https://github.com/ethereum/eth1.0-specs/blob/master/network-upgrades/mainnet-upgrades/paris.md) 39 | Paris = 10, 40 | } 41 | 42 | impl Revision { 43 | pub const fn iter() -> [Revision; Revision::len()] { 44 | [ 45 | Self::Frontier, 46 | Self::Homestead, 47 | Self::Tangerine, 48 | Self::Spurious, 49 | Self::Byzantium, 50 | Self::Constantinople, 51 | Self::Petersburg, 52 | Self::Istanbul, 53 | Self::Berlin, 54 | Self::London, 55 | Self::Paris, 56 | ] 57 | } 58 | 59 | pub const fn latest() -> Self { 60 | Self::Paris 61 | } 62 | 63 | pub const fn len() -> usize { 64 | Self::latest() as usize + 1 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /src/models/util.rs: -------------------------------------------------------------------------------- 1 | use crate::models::*; 2 | use anyhow::{bail, format_err}; 3 | use arrayvec::ArrayVec; 4 | use bytes::*; 5 | use eio::{FromBytes, ToBytes}; 6 | 7 | pub fn variable_to_compact>(v: T) -> ArrayVec { 8 | v.to_be_bytes() 9 | .into_iter() 10 | .skip_while(|&v| v == 0) 11 | .collect() 12 | } 13 | 14 | pub fn variable_from_compact>( 15 | mut buf: &[u8], 16 | len: u8, 17 | ) -> anyhow::Result<(T, &[u8])> { 18 | let len = len as usize; 19 | if len > N { 20 | bail!("len too big"); 21 | } 22 | if buf.len() < len { 23 | bail!("input too short"); 24 | } 25 | 26 | let mut value = T::default(); 27 | if len > 0 { 28 | let mut arr = [0; N]; 29 | arr[N - len..].copy_from_slice(&buf[..len]); 30 | value = T::from_be_bytes(arr); 31 | buf.advance(len); 32 | } 33 | 34 | Ok((value, buf)) 35 | } 36 | 37 | pub fn h160_from_compact(mut buf: &[u8]) -> anyhow::Result<(H160, &[u8])> { 38 | let v = H160::from_slice( 39 | buf.get(..20) 40 | .ok_or_else(|| format_err!("input too short"))?, 41 | ); 42 | buf.advance(20); 43 | Ok((v, buf)) 44 | } 45 | 46 | pub fn h256_from_compact(mut buf: &[u8]) -> anyhow::Result<(H256, &[u8])> { 47 | let v = H256::from_slice( 48 | buf.get(..32) 49 | .ok_or_else(|| format_err!("input too short"))?, 50 | ); 51 | buf.advance(32); 52 | Ok((v, buf)) 53 | } 54 | -------------------------------------------------------------------------------- /src/p2p/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod node; 2 | pub mod types; 3 | -------------------------------------------------------------------------------- /src/p2p/node/builder.rs: -------------------------------------------------------------------------------- 1 | use super::{stash::Stash, Node, Sentry}; 2 | use crate::{ 3 | models::{BlockNumber, ChainConfig, H256, U256}, 4 | p2p::types::Status, 5 | }; 6 | use hashlink::LruCache; 7 | use http::Uri; 8 | use parking_lot::{Mutex, RwLock}; 9 | use std::sync::Arc; 10 | use tokio::sync::{watch, Notify}; 11 | use tonic::transport::Channel; 12 | 13 | #[derive(Debug)] 14 | pub struct NodeBuilder { 15 | sentries: Vec, 16 | stash: Option>, 17 | config: ChainConfig, 18 | status: Option, 19 | } 20 | 21 | impl NodeBuilder { 22 | pub fn new(config: ChainConfig) -> Self { 23 | Self { 24 | config, 25 | 26 | sentries: Default::default(), 27 | stash: Default::default(), 28 | status: Default::default(), 29 | } 30 | } 31 | 32 | pub fn add_sentry(mut self, endpoint: impl Into) -> Self { 33 | self.sentries.push(Sentry::new( 34 | Channel::builder(endpoint.into()).connect_lazy(), 35 | )); 36 | self 37 | } 38 | 39 | pub fn set_chain_head(mut self, height: BlockNumber, hash: H256, td: U256) -> Self { 40 | let status = Status { 41 | height, 42 | hash, 43 | total_difficulty: H256::from(td.to_be_bytes()), 44 | }; 45 | self.status = Some(status); 46 | self 47 | } 48 | 49 | pub fn set_stash(mut self, stash: Arc) -> Self { 50 | self.stash = Some(stash); 51 | self 52 | } 53 | 54 | pub fn build(self) -> anyhow::Result { 55 | let stash = self.stash.unwrap_or_else(|| Arc::new(())); 56 | let sentries = self.sentries; 57 | if sentries.is_empty() { 58 | anyhow::bail!("No sentries"); 59 | } 60 | 61 | let config = self.config; 62 | let status = RwLock::new(self.status.unwrap_or_else(|| Status::from(&config))); 63 | let forks = config.forks().into_iter().map(|f| *f).collect::>(); 64 | 65 | let (chain_tip_sender, chain_tip) = watch::channel(Default::default()); 66 | 67 | Ok(Node { 68 | stash, 69 | sentries, 70 | status, 71 | config, 72 | chain_tip, 73 | chain_tip_sender, 74 | bad_blocks: Default::default(), 75 | block_cache: Mutex::new(LruCache::new(64)), 76 | block_cache_notify: Notify::new(), 77 | forks, 78 | }) 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /src/p2p/node/mod.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::module_inception)] 2 | 3 | mod builder; 4 | mod node; 5 | mod stash; 6 | mod stream; 7 | 8 | pub use self::{builder::*, node::*, stream::NodeStream}; 9 | -------------------------------------------------------------------------------- /src/p2p/node/stash.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | accessors::chain, 3 | kv::{tables, MdbxWithDirHandle}, 4 | models::{BlockBody, BlockHeader, BlockNumber, H256}, 5 | p2p::types::{BlockId, GetBlockHeadersParams}, 6 | }; 7 | use mdbx::EnvironmentKind; 8 | use std::fmt::Debug; 9 | 10 | pub trait Stash: Send + Sync + Debug { 11 | fn get_headers(&self, _: GetBlockHeadersParams) -> anyhow::Result>; 12 | fn get_bodies(&self, _: Vec) -> anyhow::Result>; 13 | } 14 | 15 | impl Stash for () { 16 | fn get_headers(&self, _: GetBlockHeadersParams) -> anyhow::Result> { 17 | Ok(vec![]) 18 | } 19 | fn get_bodies(&self, _: Vec) -> anyhow::Result> { 20 | Ok(vec![]) 21 | } 22 | } 23 | 24 | impl Stash for MdbxWithDirHandle 25 | where 26 | E: EnvironmentKind, 27 | { 28 | fn get_headers(&self, params: GetBlockHeadersParams) -> anyhow::Result> { 29 | let txn = self.begin()?; 30 | 31 | let limit = std::cmp::min(params.limit, 1024); 32 | let reverse = params.reverse == 1; 33 | 34 | let mut add_op = if params.skip == 0 { 35 | 1 36 | } else { 37 | params.skip as i64 + 1 38 | }; 39 | if reverse { 40 | add_op = -add_op; 41 | } 42 | 43 | let mut headers = Vec::with_capacity(limit as usize); 44 | let mut number_cursor = txn.cursor(tables::HeaderNumber)?; 45 | let mut header_cursor = txn.cursor(tables::Header)?; 46 | 47 | let mut next_number = match params.start { 48 | BlockId::Hash(hash) => number_cursor.seek_exact(hash)?.map(|(_, k)| k), 49 | BlockId::Number(number) => Some(number), 50 | }; 51 | 52 | for _ in 0..limit { 53 | match next_number { 54 | Some(block_number) => { 55 | if let Some((_, header)) = header_cursor.seek_exact(block_number)? { 56 | headers.push(header); 57 | } 58 | next_number = u64::try_from(block_number.0 as i64 + add_op) 59 | .ok() 60 | .map(BlockNumber); 61 | } 62 | None => break, 63 | }; 64 | } 65 | 66 | Ok::<_, anyhow::Error>(headers) 67 | } 68 | 69 | fn get_bodies(&self, hashes: Vec) -> anyhow::Result> { 70 | let txn = self.begin().expect("Failed to begin transaction"); 71 | 72 | Ok(hashes 73 | .into_iter() 74 | .filter_map(|hash| txn.get(tables::HeaderNumber, hash).unwrap_or(None)) 75 | .filter_map(|number| { 76 | chain::block_body::read_without_senders(&txn, number).unwrap_or(None) 77 | }) 78 | .collect::>()) 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /src/p2p/node/stream.rs: -------------------------------------------------------------------------------- 1 | use super::Sentry; 2 | use crate::p2p::types::InboundMessage; 3 | use ethereum_interfaces::sentry::{self as grpc_sentry, PenalizePeerRequest}; 4 | use futures::Stream; 5 | use std::{pin::Pin, time::Duration}; 6 | use tokio::sync::mpsc; 7 | use tokio_stream::StreamExt; 8 | 9 | pub struct SentryStream; 10 | 11 | pub type NodeStream = Pin + Send>>; 12 | 13 | impl SentryStream { 14 | const BACKOFF: Duration = Duration::from_millis(100); 15 | 16 | #[allow(clippy::new_ret_no_self)] 17 | pub async fn new( 18 | sentry: &Sentry, 19 | sentry_id: usize, 20 | pred: Vec, 21 | ) -> anyhow::Result { 22 | let (penalize_tx, mut penalize_rx) = mpsc::channel(4); 23 | tokio::task::spawn({ 24 | let mut sentry = sentry.clone(); 25 | async move { 26 | while let Some(peer_id) = penalize_rx.recv().await { 27 | let _ = sentry 28 | .penalize_peer(PenalizePeerRequest { 29 | peer_id, 30 | penalty: 0i32, 31 | }) 32 | .await; 33 | } 34 | } 35 | }); 36 | 37 | let stream = { 38 | let mut sentry = sentry.clone(); 39 | let mut inner_stream = sentry 40 | .messages(grpc_sentry::MessagesRequest { ids: pred }) 41 | .await? 42 | .into_inner(); 43 | 44 | Box::pin(async_stream::stream! { 45 | loop { 46 | if let Some(Ok(msg)) = inner_stream.next().await { 47 | let peer_id = msg.peer_id.clone(); 48 | 49 | if let Ok(msg) = InboundMessage::new(msg, sentry_id) { 50 | yield msg; 51 | } else { 52 | let _ = penalize_tx.send(peer_id).await; 53 | } 54 | } 55 | } 56 | }) 57 | }; 58 | 59 | Ok::<_, anyhow::Error>(stream) 60 | } 61 | 62 | pub async fn join_all<'sentry, T, P>(iter: T, pred: P) -> NodeStream 63 | where 64 | T: IntoIterator, 65 | P: IntoIterator, 66 | { 67 | let pred = pred.into_iter().collect::>(); 68 | 69 | Box::pin(futures::stream::select_all( 70 | futures::future::join_all( 71 | iter.into_iter() 72 | .enumerate() 73 | .map(|(sentry_id, sentry)| Self::new(sentry, sentry_id, pred.clone())) 74 | .collect::>(), 75 | ) 76 | .await 77 | .into_iter() 78 | .filter_map(Result::ok), 79 | )) 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /src/p2p/types/block.rs: -------------------------------------------------------------------------------- 1 | use crate::models::{Block, BlockNumber, H256}; 2 | use fastrlp::*; 3 | 4 | #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] 5 | pub struct GetBlockBodies { 6 | pub request_id: u64, 7 | pub hashes: Vec, 8 | } 9 | 10 | #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] 11 | pub enum BlockId { 12 | Hash(H256), 13 | Number(BlockNumber), 14 | } 15 | 16 | impl From for BlockId { 17 | #[inline(always)] 18 | fn from(number: BlockNumber) -> Self { 19 | BlockId::Number(number) 20 | } 21 | } 22 | 23 | impl From for BlockId { 24 | #[inline(always)] 25 | fn from(hash: H256) -> Self { 26 | BlockId::Hash(hash) 27 | } 28 | } 29 | 30 | #[derive(Debug, Clone, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper)] 31 | pub struct NewBlockHashes(pub Vec); 32 | 33 | impl NewBlockHashes { 34 | #[inline(always)] 35 | pub fn new(block_hashes: Vec<(H256, BlockNumber)>) -> Self { 36 | Self( 37 | block_hashes 38 | .into_iter() 39 | .map(|(hash, number)| BlockHashAndNumber { hash, number }) 40 | .collect::>(), 41 | ) 42 | } 43 | } 44 | 45 | #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, RlpEncodable, RlpDecodable)] 46 | pub struct BlockHashAndNumber { 47 | pub hash: H256, 48 | pub number: BlockNumber, 49 | } 50 | 51 | #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] 52 | pub struct NewBlock { 53 | pub block: Block, 54 | pub total_difficulty: u128, 55 | } 56 | -------------------------------------------------------------------------------- /src/p2p/types/event.rs: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akula-bft/akula/b8dd0bc280dd1a8b08bdd3a93c3c2215b7218db8/src/p2p/types/event.rs -------------------------------------------------------------------------------- /src/p2p/types/header.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | models::{BlockHeader, BlockNumber, H256}, 3 | p2p::types::BlockId, 4 | }; 5 | use fastrlp::*; 6 | 7 | #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] 8 | pub struct HeaderRequest { 9 | pub start: BlockId, 10 | pub limit: u64, 11 | pub skip: u64, 12 | pub reverse: bool, 13 | } 14 | 15 | impl Default for HeaderRequest { 16 | #[inline(always)] 17 | fn default() -> Self { 18 | HeaderRequest { 19 | start: BlockId::Number(BlockNumber(0)), 20 | limit: 1024, 21 | skip: 0, 22 | reverse: false, 23 | } 24 | } 25 | } 26 | 27 | pub struct Announce { 28 | pub hash: H256, 29 | pub number: BlockNumber, 30 | } 31 | 32 | #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] 33 | pub struct GetBlockHeaders { 34 | pub request_id: u64, 35 | pub params: GetBlockHeadersParams, 36 | } 37 | 38 | #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] 39 | pub struct GetBlockHeadersParams { 40 | pub start: BlockId, 41 | pub limit: u64, 42 | pub skip: u64, 43 | pub reverse: u8, 44 | } 45 | 46 | impl From for GetBlockHeadersParams { 47 | fn from(req: HeaderRequest) -> Self { 48 | Self { 49 | start: req.start, 50 | limit: req.limit, 51 | skip: req.skip, 52 | reverse: req.reverse.into(), 53 | } 54 | } 55 | } 56 | 57 | #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] 58 | pub struct BlockHeaders { 59 | pub request_id: u64, 60 | pub headers: Vec, 61 | } 62 | -------------------------------------------------------------------------------- /src/p2p/types/mod.rs: -------------------------------------------------------------------------------- 1 | mod block; 2 | mod header; 3 | mod message; 4 | mod penalty; 5 | mod rlp; 6 | mod status; 7 | 8 | pub use self::{block::*, header::*, message::*, penalty::*, rlp::*, status::*}; 9 | 10 | use super::node::SentryId; 11 | use crate::sentry::devp2p::PeerId; 12 | 13 | #[derive(Clone, Debug, PartialEq, Eq)] 14 | pub enum PeerFilter { 15 | All, 16 | Random(u64), 17 | Peer(PeerId, SentryId), 18 | MinBlock(u64), 19 | } 20 | -------------------------------------------------------------------------------- /src/p2p/types/penalty.rs: -------------------------------------------------------------------------------- 1 | use crate::sentry::devp2p::PeerId; 2 | use ethereum_interfaces::sentry as grpc_sentry; 3 | 4 | #[derive(Debug, Clone, Default)] 5 | pub enum PenaltyKind { 6 | #[default] 7 | BadBlock, 8 | DuplicateHeader, 9 | WrongChildBlockHeight, 10 | WrongChildDifficulty, 11 | InvalidSeal, 12 | TooFarFuture, 13 | TooFarPast, 14 | } 15 | 16 | #[derive(Debug, Clone)] 17 | pub struct Penalty { 18 | pub peer_id: PeerId, 19 | pub kind: PenaltyKind, 20 | } 21 | 22 | impl From for grpc_sentry::PenalizePeerRequest { 23 | #[inline(always)] 24 | fn from(penalty: Penalty) -> Self { 25 | grpc_sentry::PenalizePeerRequest { 26 | peer_id: Some(penalty.peer_id.into()), 27 | penalty: 0, 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /src/p2p/types/rlp.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | models::{BlockNumber, H256}, 3 | p2p::types::{BlockId, Message}, 4 | }; 5 | use fastrlp::*; 6 | 7 | impl Decodable for BlockId { 8 | fn decode(buf: &mut &[u8]) -> Result { 9 | if buf.len() == 32 { 10 | Ok(BlockId::Hash(::decode(buf)?)) 11 | } else { 12 | Ok(BlockId::Number(::decode(buf)?)) 13 | } 14 | } 15 | } 16 | 17 | impl Encodable for BlockId { 18 | fn encode(&self, out: &mut dyn BufMut) { 19 | match *self { 20 | BlockId::Hash(ref hash) => hash.encode(out), 21 | BlockId::Number(ref number) => number.encode(out), 22 | } 23 | } 24 | } 25 | 26 | impl Encodable for Message { 27 | fn encode(&self, out: &mut dyn BufMut) { 28 | match *self { 29 | Message::NewBlockHashes(ref value) => value.encode(out), 30 | Message::GetBlockHeaders(ref value) => value.encode(out), 31 | Message::GetBlockBodies(ref value) => value.encode(out), 32 | Message::BlockBodies(ref value) => value.encode(out), 33 | Message::BlockHeaders(ref value) => value.encode(out), 34 | Message::NewBlock(ref value) => value.encode(out), 35 | Message::NewPooledTransactionHashes(ref value) => value.encode(out), 36 | Message::Transactions(ref value) => value.encode(out), 37 | Message::GetPooledTransactions(ref value) => value.encode(out), 38 | Message::PooledTransactions(ref value) => value.encode(out), 39 | } 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/p2p/types/status.rs: -------------------------------------------------------------------------------- 1 | use crate::models::*; 2 | 3 | #[derive(Debug, Clone, Copy, Default)] 4 | pub struct Status { 5 | pub height: BlockNumber, 6 | pub hash: H256, 7 | pub total_difficulty: H256, 8 | } 9 | 10 | impl Status { 11 | pub fn new(height: BlockNumber, hash: H256, td: U256) -> Self { 12 | Self { 13 | height, 14 | hash, 15 | total_difficulty: H256::from(td.to_be_bytes()), 16 | } 17 | } 18 | } 19 | 20 | impl<'a> From<&'a ChainConfig> for Status { 21 | fn from(config: &'a ChainConfig) -> Self { 22 | let height = config.chain_spec.genesis.number; 23 | let hash = config.genesis_hash; 24 | let total_difficulty = 25 | H256::from(config.chain_spec.genesis.seal.difficulty().to_be_bytes()); 26 | Self { 27 | height, 28 | hash, 29 | total_difficulty, 30 | } 31 | } 32 | } 33 | 34 | impl PartialEq for Status { 35 | #[inline(always)] 36 | fn eq(&self, other: &Status) -> bool { 37 | self.height == other.height 38 | && self.hash == other.hash 39 | && self.total_difficulty == other.total_difficulty 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/res/chainspec/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::models::ChainSpec; 2 | use once_cell::sync::Lazy; 3 | 4 | pub static MAINNET: Lazy = 5 | Lazy::new(|| ron::from_str(include_str!("ethereum.ron")).unwrap()); 6 | pub static ROPSTEN: Lazy = 7 | Lazy::new(|| ron::from_str(include_str!("ropsten.ron")).unwrap()); 8 | pub static RINKEBY: Lazy = 9 | Lazy::new(|| ron::from_str(include_str!("rinkeby.ron")).unwrap()); 10 | pub static GOERLI: Lazy = 11 | Lazy::new(|| ron::from_str(include_str!("goerli.ron")).unwrap()); 12 | pub static SEPOLIA: Lazy = 13 | Lazy::new(|| ron::from_str(include_str!("sepolia.ron")).unwrap()); 14 | -------------------------------------------------------------------------------- /src/res/chainspec/sepolia.ron: -------------------------------------------------------------------------------- 1 | ( 2 | name: "Sepolia", 3 | consensus: ( 4 | seal_verification: Beacon( 5 | terminal_total_difficulty: "0x3c6568f12e8000", 6 | since: 1450409, 7 | block_reward: { 8 | 0: "0x1bc16d674ec80000", 9 | 1450409: "0x0", 10 | }, 11 | ), 12 | eip1559_block: 0, 13 | ), 14 | upgrades: ( 15 | homestead: 0, 16 | tangerine: 0, 17 | spurious: 0, 18 | byzantium: 0, 19 | constantinople: 0, 20 | petersburg: 0, 21 | istanbul: 0, 22 | berlin: 0, 23 | london: 0, 24 | paris: 1450409, 25 | ), 26 | params: ( 27 | chain_id: 11155111, 28 | network_id: 11155111, 29 | ), 30 | genesis: ( 31 | number: 0, 32 | author: "0x0000000000000000000000000000000000000000", 33 | timestamp: 1633267481, 34 | gas_limit: 30000000, 35 | base_fee_per_gas: "0x3b9aca00", 36 | seal: Ethash( 37 | vanity: "0x5365706f6c69612c20417468656e732c204174746963612c2047726565636521", 38 | difficulty: "0x20000", 39 | nonce: "0x0000000000000000", 40 | mix_hash: "0x0000000000000000000000000000000000000000000000000000000000000000", 41 | ), 42 | ), 43 | balances: { 44 | 0: { 45 | "0xa2a6d93439144ffe4d27c9e088dcd8b783946263": "0xd3c21bcecceda1000000", 46 | "0xbc11295936aa79d594139de1b2e12629414f3bdb": "0xd3c21bcecceda1000000", 47 | "0x7cf5b79bfe291a67ab02b393e456ccc4c266f753": "0xd3c21bcecceda1000000", 48 | "0xaaec86394441f915bce3e6ab399977e9906f3b69": "0xd3c21bcecceda1000000", 49 | "0xf47cae1cf79ca6758bfc787dbd21e6bdbe7112b8": "0xd3c21bcecceda1000000", 50 | "0xd7eddb78ed295b3c9629240e8924fb8d8874ddd8": "0xd3c21bcecceda1000000", 51 | "0x8b7f0977bb4f0fbe7076fa22bc24aca043583f5e": "0xd3c21bcecceda1000000", 52 | "0xe2e2659028143784d557bcec6ff3a0721048880a": "0xd3c21bcecceda1000000", 53 | "0xd9a5179f091d85051d3c982785efd1455cec8699": "0xd3c21bcecceda1000000", 54 | "0xbeef32ca5b9a198d27b4e02f4c70439fe60356cf": "0xd3c21bcecceda1000000", 55 | "0x0000006916a87b82333f4245046623b23794c65c": "0x84595161401484a000000", 56 | "0xb21c33de1fab3fa15499c62b59fe0cc3250020d1": "0x52b7d2dcc80cd2e4000000", 57 | "0x10f5d45854e038071485ac9e402308cf80d2d2fe": "0x52b7d2dcc80cd2e4000000", 58 | "0xd7d76c58b3a519e9fa6cc4d22dc017259bc49f1e": "0x52b7d2dcc80cd2e4000000", 59 | "0x799d329e5f583419167cd722962485926e338f4a": "0xde0b6b3a7640000" 60 | }, 61 | }, 62 | p2p: ( 63 | bootnodes: [ 64 | "enode://9246d00bc8fd1742e5ad2428b80fc4dc45d786283e05ef6edbd9002cbc335d40998444732fbe921cb88e1d2c73d1b1de53bae6a2237996e9bfe14f871baf7066@18.168.182.86:30303", 65 | "enode://ec66ddcf1a974950bd4c782789a7e04f8aa7110a72569b6e65fcd51e937e74eed303b1ea734e4d19cfaec9fbff9b6ee65bf31dcb50ba79acce9dd63a6aca61c7@52.14.151.177:30303" 66 | ], 67 | dns: "all.sepolia.ethdisco.net", 68 | ), 69 | ) 70 | -------------------------------------------------------------------------------- /src/res/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod chainspec; 2 | -------------------------------------------------------------------------------- /src/res/readme-screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akula-bft/akula/b8dd0bc280dd1a8b08bdd3a93c3c2215b7218db8/src/res/readme-screenshot.png -------------------------------------------------------------------------------- /src/rpc/erigon.rs: -------------------------------------------------------------------------------- 1 | use super::helpers; 2 | use crate::{ 3 | kv::{mdbx::*, MdbxWithDirHandle}, 4 | models::*, 5 | }; 6 | use async_trait::async_trait; 7 | use ethereum_jsonrpc::{types, ErigonApiServer}; 8 | use jsonrpsee::core::RpcResult; 9 | use std::sync::Arc; 10 | 11 | pub struct ErigonApiServerImpl 12 | where 13 | SE: EnvironmentKind, 14 | { 15 | pub db: Arc>, 16 | } 17 | 18 | #[async_trait] 19 | impl ErigonApiServer for ErigonApiServerImpl 20 | where 21 | DB: EnvironmentKind, 22 | { 23 | async fn get_header_by_number(&self, block_number: u64) -> RpcResult> { 24 | let db = self.db.clone(); 25 | 26 | tokio::task::spawn_blocking(move || { 27 | let tx = db.begin()?; 28 | 29 | if let Some((block_number, _)) = 30 | helpers::resolve_block_id(&tx, types::BlockNumber::Number(block_number.into()))? 31 | { 32 | if let Some(BlockHeader { 33 | parent_hash, 34 | ommers_hash, 35 | beneficiary, 36 | state_root, 37 | transactions_root, 38 | receipts_root, 39 | logs_bloom, 40 | difficulty, 41 | number, 42 | gas_limit, 43 | gas_used, 44 | timestamp, 45 | extra_data, 46 | mix_hash, 47 | nonce, 48 | base_fee_per_gas, 49 | }) = crate::accessors::chain::header::read(&tx, block_number)? 50 | { 51 | return Ok(Some(types::Header { 52 | parent_hash, 53 | sha3_uncles: ommers_hash, 54 | miner: beneficiary, 55 | state_root, 56 | transactions_root, 57 | receipts_root, 58 | logs_bloom, 59 | difficulty, 60 | number: number.0.into(), 61 | gas_limit: gas_limit.into(), 62 | gas_used: gas_used.into(), 63 | timestamp: timestamp.into(), 64 | extra_data: extra_data.into(), 65 | mix_hash, 66 | nonce, 67 | base_fee_per_gas, 68 | })); 69 | } 70 | } 71 | 72 | Ok(None) 73 | }) 74 | .await 75 | .unwrap_or_else(helpers::joinerror_to_result) 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /src/rpc/net.rs: -------------------------------------------------------------------------------- 1 | use crate::models::*; 2 | use async_trait::async_trait; 3 | use ethereum_jsonrpc::{types::StringU64, NetApiServer}; 4 | use jsonrpsee::core::RpcResult; 5 | 6 | pub struct NetApiServerImpl { 7 | pub network_id: NetworkId, 8 | } 9 | 10 | #[async_trait] 11 | impl NetApiServer for NetApiServerImpl { 12 | async fn listening(&self) -> RpcResult { 13 | Ok(true) 14 | } 15 | async fn peer_count(&self) -> RpcResult { 16 | Ok(U64::zero()) 17 | } 18 | async fn version(&self) -> RpcResult { 19 | Ok(self.network_id.0.into()) 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /src/rpc/parity.rs: -------------------------------------------------------------------------------- 1 | use super::helpers; 2 | use crate::{ 3 | kv::{mdbx::*, MdbxWithDirHandle}, 4 | models::*, 5 | }; 6 | use async_trait::async_trait; 7 | use ethereum_jsonrpc::{types, ParityApiServer}; 8 | use jsonrpsee::core::RpcResult; 9 | use std::{collections::BTreeSet, num::NonZeroUsize, sync::Arc}; 10 | 11 | pub struct ParityApiServerImpl 12 | where 13 | SE: EnvironmentKind, 14 | { 15 | pub db: Arc>, 16 | } 17 | 18 | #[async_trait] 19 | impl ParityApiServer for ParityApiServerImpl 20 | where 21 | DB: EnvironmentKind, 22 | { 23 | async fn list_storage_keys( 24 | &self, 25 | address: Address, 26 | number_of_slots: NonZeroUsize, 27 | offset: Option, 28 | block_id: Option, 29 | ) -> RpcResult>> { 30 | let tx = self.db.begin()?; 31 | 32 | let block = match block_id { 33 | None 34 | | Some(types::BlockId::Number(types::BlockNumber::Latest)) 35 | | Some(types::BlockId::Number(types::BlockNumber::Pending)) => None, 36 | Some(block_id) => { 37 | if let Some((block_number, _)) = helpers::resolve_block_id(&tx, block_id)? { 38 | Some(block_number) 39 | } else { 40 | return Ok(None); 41 | } 42 | } 43 | }; 44 | 45 | Ok(Some( 46 | crate::accessors::state::storage::walk(&tx, address, offset, block) 47 | .take(number_of_slots.get()) 48 | .map(|res| res.map(|(slot, _)| slot)) 49 | .collect::>()?, 50 | )) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/rpc/web3.rs: -------------------------------------------------------------------------------- 1 | use crate::version_string; 2 | use async_trait::async_trait; 3 | use ethereum_jsonrpc::Web3ApiServer; 4 | use jsonrpsee::core::RpcResult; 5 | 6 | pub struct Web3ApiServerImpl; 7 | 8 | #[async_trait] 9 | impl Web3ApiServer for Web3ApiServerImpl { 10 | async fn client_version(&self) -> RpcResult { 11 | Ok(version_string()) 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /src/sentry/devp2p/disc.rs: -------------------------------------------------------------------------------- 1 | use crate::sentry::devp2p::types::*; 2 | use async_stream::stream; 3 | use futures::{stream::BoxStream, StreamExt}; 4 | use std::{collections::HashMap, net::SocketAddr, pin::Pin, task::Poll, time::Duration}; 5 | use tokio::time::sleep; 6 | use tokio_stream::Stream; 7 | 8 | pub mod v4; 9 | pub use self::v4::{Discv4, Discv4Builder}; 10 | 11 | pub mod dns; 12 | 13 | pub use self::dns::DnsDiscovery; 14 | 15 | pub type Discovery = BoxStream<'static, anyhow::Result>; 16 | 17 | pub struct StaticNodes(Pin> + Send + 'static>>); 18 | 19 | impl StaticNodes { 20 | pub fn new(nodes: HashMap, delay: Duration) -> Self { 21 | Self(Box::pin(stream! { 22 | loop { 23 | for (&addr, &id) in &nodes { 24 | yield Ok(NodeRecord { id, addr }); 25 | sleep(delay).await; 26 | } 27 | } 28 | })) 29 | } 30 | } 31 | 32 | impl Stream for StaticNodes { 33 | type Item = anyhow::Result; 34 | 35 | fn poll_next( 36 | mut self: Pin<&mut Self>, 37 | cx: &mut std::task::Context<'_>, 38 | ) -> Poll> { 39 | self.0.poll_next_unpin(cx) 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/sentry/devp2p/disc/dns/backend/memory.rs: -------------------------------------------------------------------------------- 1 | use super::Backend; 2 | use async_trait::async_trait; 3 | use std::collections::HashMap; 4 | use tracing::*; 5 | 6 | #[async_trait] 7 | impl Backend for HashMap { 8 | async fn get_record(&self, fqdn: String) -> anyhow::Result> { 9 | debug!("resolving {}", fqdn); 10 | if let Some(v) = self.get(&fqdn) { 11 | debug!("resolved {} to {}", fqdn, v); 12 | return Ok(Some(v.clone())); 13 | } 14 | 15 | Ok(None) 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /src/sentry/devp2p/disc/dns/backend/mod.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use auto_impl::auto_impl; 3 | 4 | pub mod memory; 5 | pub mod trust_dns; 6 | 7 | #[async_trait] 8 | #[auto_impl(&, Box, Arc)] 9 | pub trait Backend: Send + Sync + 'static { 10 | async fn get_record(&self, fqdn: String) -> anyhow::Result>; 11 | } 12 | -------------------------------------------------------------------------------- /src/sentry/devp2p/disc/dns/backend/trust_dns.rs: -------------------------------------------------------------------------------- 1 | use super::Backend; 2 | use async_trait::async_trait; 3 | use tracing::*; 4 | use trust_dns_resolver::{ 5 | error::{ResolveError, ResolveErrorKind}, 6 | proto::DnsHandle, 7 | AsyncResolver, ConnectionProvider, 8 | }; 9 | 10 | #[async_trait] 11 | impl Backend for AsyncResolver 12 | where 13 | C: DnsHandle, 14 | P: ConnectionProvider, 15 | { 16 | async fn get_record(&self, fqdn: String) -> anyhow::Result> { 17 | trace!("Resolving FQDN {}", fqdn); 18 | match self.txt_lookup(format!("{}.", fqdn)).await { 19 | Err(e) => { 20 | if !matches!(e.kind(), ResolveErrorKind::NoRecordsFound { .. }) { 21 | return Err(e.into()); 22 | } 23 | } 24 | Ok(v) => { 25 | if let Some(txt) = v.into_iter().next() { 26 | if let Some(txt_entry) = txt.iter().next() { 27 | return Ok(Some(String::from_utf8(txt_entry.to_vec())?)); 28 | } 29 | } 30 | } 31 | } 32 | 33 | Ok(None) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /src/sentry/devp2p/disc/v4/message.rs: -------------------------------------------------------------------------------- 1 | use super::{NodeId, NodeRecord}; 2 | use derive_more::*; 3 | use fastrlp::*; 4 | use primitive_types::H256; 5 | use std::net::IpAddr; 6 | 7 | #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Deref, DerefMut, From)] 8 | pub struct Ip(pub IpAddr); 9 | 10 | impl Encodable for Ip { 11 | fn encode(&self, out: &mut dyn BufMut) { 12 | match self.0 { 13 | IpAddr::V4(addr) => addr.octets().encode(out), 14 | IpAddr::V6(addr) => addr.octets().encode(out), 15 | } 16 | } 17 | 18 | fn length(&self) -> usize { 19 | match self.0 { 20 | IpAddr::V4(addr) => addr.octets().length(), 21 | IpAddr::V6(addr) => addr.octets().length(), 22 | } 23 | } 24 | } 25 | 26 | impl Decodable for Ip { 27 | fn decode(buf: &mut &[u8]) -> Result { 28 | match Header::decode(&mut &**buf)?.payload_length { 29 | 0 => Err(DecodeError::Custom("empty")), 30 | 4 => Ok(Self(IpAddr::from(<[u8; 4]>::decode(buf)?))), 31 | 16 => Ok(Self(IpAddr::from(<[u8; 16]>::decode(buf)?))), 32 | other => { 33 | tracing::debug!("ip_addr_rlp_decode: wrong address length {other}"); 34 | Err(DecodeError::Custom("wrong IP address length")) 35 | } 36 | } 37 | } 38 | } 39 | 40 | #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, RlpEncodable, RlpDecodable)] 41 | pub struct Endpoint { 42 | pub address: Ip, 43 | pub udp_port: u16, 44 | pub tcp_port: u16, 45 | } 46 | 47 | impl From for Endpoint { 48 | fn from( 49 | NodeRecord { 50 | address, 51 | tcp_port, 52 | udp_port, 53 | .. 54 | }: NodeRecord, 55 | ) -> Self { 56 | Self { 57 | address, 58 | udp_port, 59 | tcp_port, 60 | } 61 | } 62 | } 63 | 64 | #[derive(Clone, Copy, Debug, RlpEncodable, RlpDecodable)] 65 | pub struct FindNodeMessage { 66 | pub id: NodeId, 67 | pub expire: u64, 68 | } 69 | 70 | #[derive(Clone, Debug, RlpEncodable, RlpDecodable)] 71 | pub struct NeighboursMessage { 72 | pub nodes: Vec, 73 | pub expire: u64, 74 | } 75 | 76 | #[derive(Debug, Clone)] 77 | pub struct PingMessage { 78 | pub from: Endpoint, 79 | pub to: Endpoint, 80 | pub expire: u64, 81 | } 82 | 83 | #[derive(RlpEncodable)] 84 | struct PingMessageE<'s> { 85 | version: u64, 86 | from: &'s Endpoint, 87 | to: &'s Endpoint, 88 | expire: &'s u64, 89 | } 90 | 91 | impl Encodable for PingMessage { 92 | fn encode(&self, out: &mut dyn BufMut) { 93 | let Self { from, to, expire } = self; 94 | 95 | PingMessageE { 96 | version: 4, 97 | from, 98 | to, 99 | expire, 100 | } 101 | .encode(out) 102 | } 103 | fn length(&self) -> usize { 104 | let Self { from, to, expire } = self; 105 | 106 | PingMessageE { 107 | version: 4, 108 | from, 109 | to, 110 | expire, 111 | } 112 | .length() 113 | } 114 | } 115 | 116 | #[derive(RlpDecodable)] 117 | struct PingMessageD { 118 | version: u64, 119 | from: Endpoint, 120 | to: Endpoint, 121 | expire: u64, 122 | } 123 | 124 | #[derive(RlpDecodable)] 125 | struct PingMessageDEnr { 126 | version: u64, 127 | from: Endpoint, 128 | to: Endpoint, 129 | expire: u64, 130 | enr_seq: u64, 131 | } 132 | 133 | impl Decodable for PingMessage { 134 | fn decode(buf: &mut &[u8]) -> Result { 135 | let (from, to, expire) = { 136 | PingMessageD::decode(buf) 137 | .map( 138 | |PingMessageD { 139 | from, to, expire, .. 140 | }| (from, to, expire), 141 | ) 142 | .or_else(|e| { 143 | if let DecodeError::ListLengthMismatch { .. } = e { 144 | PingMessageDEnr::decode(buf).map( 145 | |PingMessageDEnr { 146 | from, to, expire, .. 147 | }| (from, to, expire), 148 | ) 149 | } else { 150 | Err(e) 151 | } 152 | })? 153 | }; 154 | 155 | Ok(Self { from, to, expire }) 156 | } 157 | } 158 | 159 | #[derive(Debug, Clone, RlpEncodable)] 160 | pub struct PongMessage { 161 | pub to: Endpoint, 162 | pub echo: H256, 163 | pub expire: u64, 164 | } 165 | 166 | #[derive(RlpDecodable)] 167 | struct PongMessageD { 168 | to: Endpoint, 169 | echo: H256, 170 | expire: u64, 171 | } 172 | 173 | #[derive(RlpDecodable)] 174 | struct PongMessageDEnr { 175 | to: Endpoint, 176 | echo: H256, 177 | expire: u64, 178 | enr_seq: u64, 179 | } 180 | 181 | impl Decodable for PongMessage { 182 | fn decode(buf: &mut &[u8]) -> Result { 183 | let (to, echo, expire) = { 184 | PongMessageD::decode(buf) 185 | .map( 186 | |PongMessageD { 187 | to, echo, expire, .. 188 | }| (to, echo, expire), 189 | ) 190 | .or_else(|e| { 191 | if let DecodeError::ListLengthMismatch { .. } = e { 192 | PongMessageDEnr::decode(buf).map( 193 | |PongMessageDEnr { 194 | to, echo, expire, .. 195 | }| (to, echo, expire), 196 | ) 197 | } else { 198 | Err(e) 199 | } 200 | })? 201 | }; 202 | 203 | Ok(Self { to, echo, expire }) 204 | } 205 | } 206 | -------------------------------------------------------------------------------- /src/sentry/devp2p/disc/v4/mod.rs: -------------------------------------------------------------------------------- 1 | //! Ethereum Node Discovery v4 implementation. 2 | 3 | #![allow(clippy::type_complexity)] 4 | 5 | mod kad; 6 | mod message; 7 | mod node; 8 | mod proto; 9 | mod util; 10 | 11 | use educe::Educe; 12 | use ethereum_types::H512; 13 | use std::{ 14 | pin::Pin, 15 | sync::{ 16 | atomic::{AtomicBool, Ordering}, 17 | Arc, 18 | }, 19 | }; 20 | use task_group::TaskGroup; 21 | use tokio::sync::mpsc::{channel, Receiver}; 22 | use tokio_stream::Stream; 23 | use tracing::*; 24 | 25 | pub type NodeId = H512; 26 | use crate::sentry::THROTTLE_INTERVAL; 27 | 28 | pub use self::node::{Node, NodeRecord}; 29 | 30 | #[derive(Educe)] 31 | #[educe(Default)] 32 | pub struct Discv4Builder { 33 | #[educe(Default(1))] 34 | concurrent_lookups: usize, 35 | #[educe(Default(20))] 36 | cache: usize, 37 | throttle: Arc, 38 | } 39 | 40 | impl Discv4Builder { 41 | pub fn with_concurrent_lookups(mut self, concurrent_lookups: usize) -> Self { 42 | self.concurrent_lookups = concurrent_lookups; 43 | self 44 | } 45 | 46 | pub fn with_cache(mut self, cache: usize) -> Self { 47 | self.cache = cache; 48 | self 49 | } 50 | 51 | pub fn with_throttle(mut self, throttle: Arc) -> Self { 52 | self.throttle = throttle; 53 | self 54 | } 55 | 56 | pub fn build(self, node: Arc) -> Discv4 { 57 | Discv4::new(node, self.concurrent_lookups, self.throttle, self.cache) 58 | } 59 | } 60 | 61 | pub struct Discv4 { 62 | #[allow(unused)] 63 | tasks: TaskGroup, 64 | receiver: Receiver, 65 | } 66 | 67 | impl Discv4 { 68 | #[must_use] 69 | fn new( 70 | node: Arc, 71 | concurrent_lookups: usize, 72 | throttled: Arc, 73 | cache: usize, 74 | ) -> Self { 75 | let tasks = TaskGroup::default(); 76 | 77 | let (tx, receiver) = channel(cache); 78 | 79 | for i in 0..concurrent_lookups { 80 | let node = node.clone(); 81 | let tx = tx.clone(); 82 | let throttled = throttled.clone(); 83 | tasks.spawn_with_name(format!("discv4 lookup #{}", i), { 84 | async move { 85 | loop { 86 | if i > 0 && throttled.load(Ordering::SeqCst) { 87 | trace!("Throttling requested, delaying lookup"); 88 | tokio::time::sleep(THROTTLE_INTERVAL).await; 89 | } else { 90 | for record in node.lookup(rand::random()).await { 91 | let _ = tx 92 | .send(crate::sentry::devp2p::types::NodeRecord { 93 | addr: record.tcp_addr(), 94 | id: record.id, 95 | }) 96 | .await; 97 | } 98 | } 99 | } 100 | } 101 | }); 102 | } 103 | 104 | Self { tasks, receiver } 105 | } 106 | } 107 | 108 | impl Stream for Discv4 { 109 | type Item = anyhow::Result; 110 | 111 | fn poll_next( 112 | mut self: std::pin::Pin<&mut Self>, 113 | cx: &mut std::task::Context<'_>, 114 | ) -> std::task::Poll> { 115 | Pin::new(&mut self.receiver) 116 | .poll_recv(cx) 117 | .map(|opt| opt.map(Ok)) 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /src/sentry/devp2p/disc/v4/proto.rs: -------------------------------------------------------------------------------- 1 | use super::message::*; 2 | use enum_primitive_derive::Primitive; 3 | use tokio::sync::oneshot::Sender as OneshotSender; 4 | 5 | #[derive(Primitive)] 6 | pub enum MessageId { 7 | Ping = 1, 8 | Pong = 2, 9 | FindNode = 3, 10 | Neighbours = 4, 11 | } 12 | 13 | #[derive(Debug)] 14 | pub enum EgressMessage { 15 | Ping(PingMessage, Option>), 16 | Pong(PongMessage), 17 | FindNode(FindNodeMessage), 18 | Neighbours(NeighboursMessage), 19 | } 20 | -------------------------------------------------------------------------------- /src/sentry/devp2p/disc/v4/util.rs: -------------------------------------------------------------------------------- 1 | use super::NodeId; 2 | use primitive_types::H256; 3 | use secp256k1::{Message, PublicKey}; 4 | use sha3::{Digest, Keccak256}; 5 | 6 | pub fn keccak256>(data: T) -> H256 { 7 | H256::from_slice(Keccak256::digest(data.as_ref()).as_slice()) 8 | } 9 | 10 | pub fn keccak256_message>(data: T) -> Message { 11 | Message::from_slice(Keccak256::digest(data.as_ref()).as_slice()).unwrap() 12 | } 13 | 14 | pub fn pk2id(pk: &PublicKey) -> NodeId { 15 | NodeId::from_slice(&pk.serialize_uncompressed()[1..]) 16 | } 17 | -------------------------------------------------------------------------------- /src/sentry/devp2p/ecies.rs: -------------------------------------------------------------------------------- 1 | //! ECIES protocol implementation 2 | 3 | mod algorithm; 4 | mod proto; 5 | 6 | pub use self::proto::{ECIESCodec, ECIESState, ECIESStream, EgressECIESValue, IngressECIESValue}; 7 | -------------------------------------------------------------------------------- /src/sentry/devp2p/errors.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | use thiserror::Error; 3 | 4 | #[derive(Debug, Error)] 5 | pub enum ECIESError { 6 | #[error("IO error")] 7 | IO(#[from] io::Error), 8 | #[error("tag check failure")] 9 | TagCheckFailed, 10 | #[error("invalid auth data")] 11 | InvalidAuthData, 12 | #[error("invalid ack data")] 13 | InvalidAckData, 14 | #[error("invalid body data")] 15 | InvalidHeader, 16 | #[error("other")] 17 | Other(#[from] anyhow::Error), 18 | } 19 | 20 | impl From for io::Error { 21 | fn from(error: ECIESError) -> Self { 22 | Self::new(io::ErrorKind::Other, format!("ECIES error: {:?}", error)) 23 | } 24 | } 25 | 26 | impl From for ECIESError { 27 | fn from(error: secp256k1::Error) -> Self { 28 | Self::Other(error.into()) 29 | } 30 | } 31 | 32 | impl From for ECIESError { 33 | fn from(error: fastrlp::DecodeError) -> Self { 34 | Self::Other(error.into()) 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/sentry/devp2p/mac.rs: -------------------------------------------------------------------------------- 1 | use aes::*; 2 | use block_padding::NoPadding; 3 | use cipher::BlockEncrypt; 4 | use digest::KeyInit; 5 | use ethereum_types::{H128, H256}; 6 | use generic_array::{typenum::U16, GenericArray}; 7 | use sha3::{Digest, Keccak256}; 8 | 9 | pub type HeaderBytes = GenericArray; 10 | 11 | #[derive(Debug)] 12 | pub struct MAC { 13 | secret: H256, 14 | hasher: Keccak256, 15 | } 16 | 17 | impl MAC { 18 | pub fn new(secret: H256) -> Self { 19 | Self { 20 | secret, 21 | hasher: Keccak256::new(), 22 | } 23 | } 24 | 25 | pub fn update(&mut self, data: &[u8]) { 26 | self.hasher.update(data) 27 | } 28 | 29 | pub fn update_header(&mut self, data: &HeaderBytes) { 30 | let aes = Aes256Enc::new_from_slice(self.secret.as_ref()).unwrap(); 31 | let mut encrypted = self.digest().to_fixed_bytes(); 32 | aes.encrypt_padded::(&mut encrypted, H128::len_bytes()) 33 | .unwrap(); 34 | for i in 0..data.len() { 35 | encrypted[i] ^= data[i]; 36 | } 37 | self.hasher.update(encrypted); 38 | } 39 | 40 | pub fn update_body(&mut self, data: &[u8]) { 41 | self.hasher.update(data); 42 | let prev = self.digest(); 43 | let aes = Aes256Enc::new_from_slice(self.secret.as_ref()).unwrap(); 44 | let mut encrypted = self.digest().to_fixed_bytes(); 45 | aes.encrypt_padded::(&mut encrypted, H128::len_bytes()) 46 | .unwrap(); 47 | for i in 0..16 { 48 | encrypted[i] ^= prev[i]; 49 | } 50 | self.hasher.update(encrypted); 51 | } 52 | 53 | pub fn digest(&self) -> H128 { 54 | H128::from_slice(&self.hasher.clone().finalize()[0..16]) 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /src/sentry/devp2p/mod.rs: -------------------------------------------------------------------------------- 1 | //! Ethereum devp2p protocol implementation 2 | //! 3 | //! It is layered in the following way: 4 | //! * `RLPxNode` which represents the whole pool of connected peers. It handles message routing and peer management. 5 | //! * `MuxServer` which provides a request-response API to otherwise stateless P2P protocol. 6 | //! * `EthIngressServer` which `MuxServer` calls into when new requests and gossip messages arrive. 7 | //! * `MuxServer` itself implements `EthProtocol` which is a simple gateway to abstract Ethereum network. 8 | 9 | #![allow(clippy::large_enum_variant, clippy::upper_case_acronyms)] 10 | 11 | pub mod disc; 12 | pub mod ecies; 13 | mod errors; 14 | mod mac; 15 | mod node_filter; 16 | mod peer; 17 | mod rlpx; 18 | pub mod transport; 19 | mod types; 20 | pub mod util; 21 | 22 | pub use disc::*; 23 | pub use peer::{DisconnectReason, PeerStream}; 24 | pub use rlpx::{ListenOptions, Swarm, SwarmBuilder}; 25 | pub use types::{ 26 | CapabilityId, CapabilityInfo, CapabilityName, CapabilityServer, CapabilityVersion, 27 | InboundEvent, Message, NodeRecord, OutboundEvent, PeerId, 28 | }; 29 | -------------------------------------------------------------------------------- /src/sentry/devp2p/node_filter.rs: -------------------------------------------------------------------------------- 1 | use super::types::PeerId; 2 | use std::{ 3 | collections::HashSet, 4 | fmt::Debug, 5 | sync::{ 6 | atomic::{AtomicUsize, Ordering}, 7 | Arc, 8 | }, 9 | }; 10 | 11 | pub trait NodeFilter: Debug + Send + 'static { 12 | fn max_peers(&self) -> usize; 13 | fn is_banned(&self, id: PeerId) -> bool; 14 | fn is_allowed(&self, pool_size: usize, id: PeerId) -> bool { 15 | pool_size < self.max_peers() && !self.is_banned(id) 16 | } 17 | fn ban(&mut self, id: PeerId); 18 | } 19 | 20 | #[derive(Debug)] 21 | pub struct MemoryNodeFilter { 22 | peer_limiter: Arc, 23 | ban_list: HashSet, 24 | } 25 | 26 | impl MemoryNodeFilter { 27 | pub fn new(peer_limiter: Arc) -> Self { 28 | Self { 29 | peer_limiter, 30 | ban_list: Default::default(), 31 | } 32 | } 33 | } 34 | 35 | impl NodeFilter for MemoryNodeFilter { 36 | fn max_peers(&self) -> usize { 37 | self.peer_limiter.load(Ordering::Relaxed) 38 | } 39 | 40 | fn is_banned(&self, id: PeerId) -> bool { 41 | self.ban_list.contains(&id) 42 | } 43 | 44 | fn ban(&mut self, id: PeerId) { 45 | self.ban_list.insert(id); 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/sentry/devp2p/transport.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use cidr::IpCidr; 3 | use std::{fmt::Debug, net::SocketAddr}; 4 | use tokio::{ 5 | io::{AsyncRead, AsyncWrite}, 6 | net::TcpStream, 7 | }; 8 | use tracing::*; 9 | 10 | pub trait Transport: AsyncRead + AsyncWrite + Debug + Send + Unpin + 'static { 11 | fn remote_addr(&self) -> Option; 12 | } 13 | 14 | impl Transport for TcpStream { 15 | fn remote_addr(&self) -> Option { 16 | self.peer_addr().ok() 17 | } 18 | } 19 | 20 | #[async_trait] 21 | pub trait TcpServer { 22 | type Conn: Transport; 23 | 24 | async fn accept(&self) -> anyhow::Result; 25 | } 26 | 27 | #[async_trait] 28 | impl TcpServer for tokio::net::TcpListener { 29 | type Conn = tokio::net::TcpStream; 30 | 31 | async fn accept(&self) -> anyhow::Result { 32 | Ok(tokio::net::TcpListener::accept(self).await?.0) 33 | } 34 | } 35 | 36 | pub struct TokioCidrListener { 37 | tcp_server: tokio::net::TcpListener, 38 | cidr_mask: Option, 39 | } 40 | 41 | impl TokioCidrListener { 42 | pub fn new(tcp_server: tokio::net::TcpListener, cidr_mask: Option) -> Self { 43 | Self { 44 | tcp_server, 45 | cidr_mask, 46 | } 47 | } 48 | } 49 | 50 | #[async_trait] 51 | impl TcpServer for TokioCidrListener { 52 | type Conn = tokio::net::TcpStream; 53 | 54 | async fn accept(&self) -> anyhow::Result { 55 | loop { 56 | let (node, remote_addr) = self.tcp_server.accept().await?; 57 | 58 | if let Some(cidr) = &self.cidr_mask { 59 | if !cidr.contains(&remote_addr.ip()) { 60 | debug!( 61 | "Ignoring connection request: {} is not in range {}", 62 | remote_addr, cidr 63 | ); 64 | continue; 65 | } 66 | } 67 | return Ok(node); 68 | } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /src/sentry/devp2p/types.rs: -------------------------------------------------------------------------------- 1 | use crate::sentry::devp2p::{peer::DisconnectReason, util::*}; 2 | use arrayvec::ArrayString; 3 | use async_trait::async_trait; 4 | use auto_impl::auto_impl; 5 | use bytes::{Bytes, BytesMut}; 6 | use derive_more::Display; 7 | use educe::Educe; 8 | pub use ethereum_types::H512 as PeerId; 9 | use fastrlp::*; 10 | use std::{collections::HashMap, fmt::Debug, future::pending, net::SocketAddr, str::FromStr}; 11 | 12 | /// Record that specifies information necessary to connect to RLPx node 13 | #[derive(Clone, Copy, Debug)] 14 | pub struct NodeRecord { 15 | /// Node ID. 16 | pub id: PeerId, 17 | /// Address of RLPx TCP server. 18 | pub addr: SocketAddr, 19 | } 20 | 21 | impl FromStr for NodeRecord { 22 | type Err = Box; 23 | 24 | fn from_str(s: &str) -> Result { 25 | const PREFIX: &str = "enode://"; 26 | 27 | let (prefix, data) = s.split_at(PREFIX.len()); 28 | if prefix != PREFIX { 29 | return Err("Not an enode".into()); 30 | } 31 | 32 | let mut parts = data.split('@'); 33 | let id = parts.next().ok_or("Failed to read remote ID")?.parse()?; 34 | let addr = parts.next().ok_or("Failed to read address")?.parse()?; 35 | 36 | Ok(Self { id, addr }) 37 | } 38 | } 39 | 40 | #[derive(Clone, Copy, Debug, Display, PartialEq, Eq, Hash, PartialOrd, Ord)] 41 | pub struct CapabilityName(pub ArrayString<4>); 42 | 43 | impl Encodable for CapabilityName { 44 | fn encode(&self, out: &mut dyn BufMut) { 45 | self.0.as_bytes().encode(out) 46 | } 47 | fn length(&self) -> usize { 48 | self.0.as_bytes().length() 49 | } 50 | } 51 | 52 | impl Decodable for CapabilityName { 53 | fn decode(buf: &mut &[u8]) -> Result { 54 | Ok(Self( 55 | ArrayString::from( 56 | std::str::from_utf8(&BytesMut::decode(buf)?) 57 | .map_err(|_| DecodeError::Custom("should be a UTF-8 string"))?, 58 | ) 59 | .map_err(|_| DecodeError::Custom("capability name is too long"))?, 60 | )) 61 | } 62 | } 63 | 64 | pub type CapabilityLength = usize; 65 | pub type CapabilityVersion = usize; 66 | 67 | #[derive(Clone, Debug, Copy, PartialEq, Eq)] 68 | /// Capability information 69 | pub struct CapabilityInfo { 70 | pub name: CapabilityName, 71 | pub version: CapabilityVersion, 72 | pub length: CapabilityLength, 73 | } 74 | 75 | impl CapabilityInfo { 76 | pub fn new(CapabilityId { name, version }: CapabilityId, length: CapabilityLength) -> Self { 77 | Self { 78 | name, 79 | version, 80 | length, 81 | } 82 | } 83 | } 84 | 85 | #[derive(Clone, Debug, Display, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] 86 | #[display(fmt = "{}/{}", name, version)] 87 | pub struct CapabilityId { 88 | pub name: CapabilityName, 89 | pub version: CapabilityVersion, 90 | } 91 | 92 | impl From for CapabilityId { 93 | fn from(CapabilityInfo { name, version, .. }: CapabilityInfo) -> Self { 94 | Self { name, version } 95 | } 96 | } 97 | 98 | #[derive(Clone, Debug, Display)] 99 | pub enum InboundEvent { 100 | #[display( 101 | fmt = "disconnect/{}", 102 | "reason.map(|r| r.to_string()).unwrap_or_else(|| \"(no reason)\".to_string())" 103 | )] 104 | Disconnect { reason: Option }, 105 | #[display(fmt = "message/{}/{}", capability_name, "message.id")] 106 | Message { 107 | capability_name: CapabilityName, 108 | message: Message, 109 | }, 110 | } 111 | 112 | #[derive(Clone, Debug)] 113 | pub enum OutboundEvent { 114 | Disconnect { 115 | reason: DisconnectReason, 116 | }, 117 | Message { 118 | capability_name: CapabilityName, 119 | message: Message, 120 | }, 121 | } 122 | 123 | #[async_trait] 124 | #[auto_impl(&, Box, Arc)] 125 | pub trait CapabilityServer: Send + Sync + 'static { 126 | /// Should be used to set up relevant state for the peer. 127 | fn on_peer_connect(&self, peer: PeerId, caps: HashMap); 128 | /// Called on the next event for peer. 129 | async fn on_peer_event(&self, peer: PeerId, event: InboundEvent); 130 | /// Get the next event for peer. 131 | async fn next(&self, peer: PeerId) -> OutboundEvent; 132 | } 133 | 134 | #[async_trait] 135 | impl CapabilityServer for () { 136 | fn on_peer_connect(&self, _: PeerId, _: HashMap) {} 137 | 138 | async fn on_peer_event(&self, _: PeerId, _: InboundEvent) {} 139 | 140 | async fn next(&self, _: PeerId) -> OutboundEvent { 141 | pending().await 142 | } 143 | } 144 | 145 | #[derive(Clone, Educe)] 146 | #[educe(Debug)] 147 | pub struct Message { 148 | pub id: usize, 149 | #[educe(Debug(method = "hex_debug"))] 150 | pub data: Bytes, 151 | } 152 | -------------------------------------------------------------------------------- /src/sentry/devp2p/util.rs: -------------------------------------------------------------------------------- 1 | use super::types::*; 2 | use ethereum_types::H256; 3 | use hmac::{Hmac, Mac}; 4 | use secp256k1::PublicKey; 5 | use sha2::Sha256; 6 | use sha3::{Digest, Keccak256}; 7 | use std::fmt::{self, Formatter}; 8 | 9 | pub fn keccak256(data: &[u8]) -> H256 { 10 | H256::from(Keccak256::digest(data).as_ref()) 11 | } 12 | 13 | pub fn sha256(data: &[u8]) -> H256 { 14 | H256::from(Sha256::digest(data).as_ref()) 15 | } 16 | 17 | pub fn hmac_sha256(key: &[u8], input: &[&[u8]], auth_data: &[u8]) -> H256 { 18 | let mut hmac = Hmac::::new_from_slice(key).unwrap(); 19 | for input in input { 20 | hmac.update(input); 21 | } 22 | hmac.update(auth_data); 23 | H256::from_slice(&hmac.finalize().into_bytes()) 24 | } 25 | 26 | pub fn pk2id(pk: &PublicKey) -> PeerId { 27 | PeerId::from_slice(&pk.serialize_uncompressed()[1..]) 28 | } 29 | 30 | pub fn id2pk(id: PeerId) -> Result { 31 | let mut s = [0_u8; 65]; 32 | s[0] = 4; 33 | s[1..].copy_from_slice(id.as_bytes()); 34 | PublicKey::from_slice(&s) 35 | } 36 | 37 | pub fn hex_debug>(s: &T, f: &mut Formatter) -> fmt::Result { 38 | f.write_str(&hex::encode(s)) 39 | } 40 | 41 | #[cfg(test)] 42 | mod tests { 43 | use super::*; 44 | use secp256k1::{SecretKey, SECP256K1}; 45 | 46 | #[test] 47 | fn pk2id2pk() { 48 | let prikey = SecretKey::new(&mut secp256k1::rand::thread_rng()); 49 | let pubkey = PublicKey::from_secret_key(SECP256K1, &prikey); 50 | assert_eq!(pubkey, id2pk(pk2id(&pubkey)).unwrap()); 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/sentry/eth.rs: -------------------------------------------------------------------------------- 1 | use super::devp2p::*; 2 | use crate::models::*; 3 | use anyhow::anyhow; 4 | use arrayvec::ArrayString; 5 | use enum_primitive_derive::*; 6 | use ethereum_forkid::{ForkFilter, ForkId}; 7 | use fastrlp::*; 8 | use std::{collections::BTreeSet, convert::TryFrom}; 9 | 10 | pub fn capability_name() -> CapabilityName { 11 | CapabilityName(ArrayString::from("eth").unwrap()) 12 | } 13 | 14 | #[derive(Clone, Debug, RlpEncodable, RlpDecodable, RlpMaxEncodedLen)] 15 | pub struct StatusMessage { 16 | pub protocol_version: usize, 17 | pub network_id: u64, 18 | pub total_difficulty: U256, 19 | pub best_hash: H256, 20 | pub genesis_hash: H256, 21 | pub fork_id: ForkId, 22 | } 23 | 24 | #[derive(Clone, Debug)] 25 | pub struct Forks { 26 | pub genesis: H256, 27 | pub forks: BTreeSet, 28 | } 29 | 30 | #[derive(Clone, Debug)] 31 | pub struct StatusData { 32 | pub network_id: u64, 33 | pub total_difficulty: U256, 34 | pub best_hash: H256, 35 | pub fork_data: Forks, 36 | } 37 | 38 | #[derive(Clone, Debug)] 39 | pub struct FullStatusData { 40 | pub status: StatusData, 41 | pub fork_filter: ForkFilter, 42 | } 43 | 44 | impl TryFrom for FullStatusData { 45 | type Error = anyhow::Error; 46 | 47 | fn try_from(value: ethereum_interfaces::sentry::StatusData) -> Result { 48 | let ethereum_interfaces::sentry::StatusData { 49 | network_id, 50 | total_difficulty, 51 | best_hash, 52 | fork_data, 53 | max_block, 54 | } = value; 55 | 56 | let fork_data = fork_data.ok_or_else(|| anyhow!("no fork data"))?; 57 | let genesis = fork_data 58 | .genesis 59 | .ok_or_else(|| anyhow!("no genesis"))? 60 | .into(); 61 | 62 | let fork_filter = ForkFilter::new(max_block, genesis, fork_data.forks.clone()); 63 | let status = StatusData { 64 | network_id, 65 | total_difficulty: total_difficulty 66 | .ok_or_else(|| anyhow!("no total difficulty"))? 67 | .into(), 68 | best_hash: best_hash.ok_or_else(|| anyhow!("no best hash"))?.into(), 69 | fork_data: Forks { 70 | genesis, 71 | forks: fork_data.forks.into_iter().collect(), 72 | }, 73 | }; 74 | 75 | Ok(Self { 76 | status, 77 | fork_filter, 78 | }) 79 | } 80 | } 81 | 82 | #[derive(Clone, Copy, Debug, Primitive)] 83 | pub enum EthMessageId { 84 | Status = 0, 85 | NewBlockHashes = 1, 86 | Transactions = 2, 87 | GetBlockHeaders = 3, 88 | BlockHeaders = 4, 89 | GetBlockBodies = 5, 90 | BlockBodies = 6, 91 | NewBlock = 7, 92 | NewPooledTransactionHashes = 8, 93 | GetPooledTransactions = 9, 94 | PooledTransactions = 10, 95 | GetNodeData = 13, 96 | NodeData = 14, 97 | GetReceipts = 15, 98 | Receipts = 16, 99 | } 100 | 101 | #[derive(Clone, Copy, Debug, Primitive)] 102 | pub enum EthProtocolVersion { 103 | Eth65 = 65, 104 | Eth66 = 66, 105 | } 106 | -------------------------------------------------------------------------------- /src/sentry/grpc.rs: -------------------------------------------------------------------------------- 1 | use super::eth::{EthMessageId, EthProtocolVersion}; 2 | use anyhow::bail; 3 | use ethereum_interfaces::sentry; 4 | use std::convert::TryFrom; 5 | 6 | impl From for sentry::MessageId { 7 | fn from(id: EthMessageId) -> Self { 8 | match id { 9 | EthMessageId::Status => Self::Status66, 10 | EthMessageId::NewBlockHashes => Self::NewBlockHashes66, 11 | EthMessageId::Transactions => Self::Transactions66, 12 | EthMessageId::GetBlockHeaders => Self::GetBlockHeaders66, 13 | EthMessageId::BlockHeaders => Self::BlockHeaders66, 14 | EthMessageId::GetBlockBodies => Self::GetBlockBodies66, 15 | EthMessageId::BlockBodies => Self::BlockBodies66, 16 | EthMessageId::NewBlock => Self::NewBlock66, 17 | EthMessageId::NewPooledTransactionHashes => Self::NewPooledTransactionHashes66, 18 | EthMessageId::GetPooledTransactions => Self::GetPooledTransactions66, 19 | EthMessageId::PooledTransactions => Self::PooledTransactions66, 20 | EthMessageId::GetNodeData => Self::GetNodeData66, 21 | EthMessageId::NodeData => Self::NodeData66, 22 | EthMessageId::GetReceipts => Self::GetReceipts66, 23 | EthMessageId::Receipts => Self::Receipts66, 24 | } 25 | } 26 | } 27 | 28 | impl TryFrom for EthMessageId { 29 | type Error = anyhow::Error; 30 | 31 | fn try_from(id: sentry::MessageId) -> Result { 32 | Ok(match id { 33 | sentry::MessageId::NewBlockHashes66 => Self::NewBlockHashes, 34 | sentry::MessageId::NewBlock66 => Self::NewBlock, 35 | sentry::MessageId::Transactions66 => Self::Transactions, 36 | sentry::MessageId::NewPooledTransactionHashes66 => Self::NewPooledTransactionHashes, 37 | sentry::MessageId::GetBlockHeaders66 => Self::GetBlockHeaders, 38 | sentry::MessageId::GetBlockBodies66 => Self::GetBlockBodies, 39 | sentry::MessageId::GetNodeData66 => Self::GetNodeData, 40 | sentry::MessageId::GetReceipts66 => Self::GetReceipts, 41 | sentry::MessageId::GetPooledTransactions66 => Self::GetPooledTransactions, 42 | sentry::MessageId::BlockHeaders66 => Self::BlockHeaders, 43 | sentry::MessageId::BlockBodies66 => Self::BlockBodies, 44 | sentry::MessageId::NodeData66 => Self::NodeData, 45 | sentry::MessageId::Receipts66 => Self::Receipts, 46 | sentry::MessageId::PooledTransactions66 => Self::PooledTransactions, 47 | other => bail!("Unsupported message id: {:?}", other), 48 | }) 49 | } 50 | } 51 | 52 | impl From for sentry::Protocol { 53 | fn from(version: EthProtocolVersion) -> Self { 54 | match version { 55 | EthProtocolVersion::Eth65 => Self::Eth65, 56 | EthProtocolVersion::Eth66 => Self::Eth66, 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/sentry/services/mod.rs: -------------------------------------------------------------------------------- 1 | mod sentry; 2 | 3 | pub use self::sentry::*; 4 | -------------------------------------------------------------------------------- /src/stagedsync/stage.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | consensus::ValidationError, 3 | kv::{mdbx::*, tables}, 4 | models::*, 5 | }; 6 | use async_trait::async_trait; 7 | use auto_impl::auto_impl; 8 | use std::{ 9 | fmt::{Debug, Display}, 10 | time::Instant, 11 | }; 12 | use tracing::*; 13 | 14 | #[derive(Clone, Copy, Debug)] 15 | pub struct StageInput { 16 | pub restarted: bool, 17 | pub first_started_at: (Instant, Option), 18 | pub previous_stage: Option<(StageId, BlockNumber)>, 19 | pub stage_progress: Option, 20 | } 21 | 22 | #[derive(Clone, Copy, Debug)] 23 | pub struct UnwindInput { 24 | pub stage_progress: BlockNumber, 25 | pub unwind_to: BlockNumber, 26 | pub bad_block: Option, 27 | } 28 | 29 | #[derive(Debug, PartialEq, Eq)] 30 | pub enum ExecOutput { 31 | Unwind { 32 | unwind_to: BlockNumber, 33 | }, 34 | Progress { 35 | stage_progress: BlockNumber, 36 | done: bool, 37 | reached_tip: bool, 38 | }, 39 | } 40 | 41 | #[derive(Debug, PartialEq, Eq)] 42 | pub struct UnwindOutput { 43 | pub stage_progress: BlockNumber, 44 | } 45 | 46 | #[allow(clippy::large_enum_variant)] 47 | #[derive(Debug)] 48 | pub enum StageError { 49 | Validation { 50 | block: BlockNumber, 51 | error: ValidationError, 52 | }, 53 | Internal(anyhow::Error), 54 | } 55 | 56 | impl From for StageError { 57 | fn from(e: anyhow::Error) -> Self { 58 | StageError::Internal(e) 59 | } 60 | } 61 | 62 | #[async_trait] 63 | #[auto_impl(&mut, Box)] 64 | pub trait Stage<'db, E>: Send + Sync + Debug 65 | where 66 | E: EnvironmentKind, 67 | { 68 | /// ID of the sync stage. Should not be empty and should be unique. It is recommended to prefix it with reverse domain to avoid clashes (`com.example.my-stage`). 69 | fn id(&self) -> StageId; 70 | /// Called when the stage is executed. The main logic of the stage should be here. 71 | async fn execute<'tx>( 72 | &mut self, 73 | tx: &'tx mut MdbxTransaction<'db, RW, E>, 74 | input: StageInput, 75 | ) -> Result 76 | where 77 | 'db: 'tx; 78 | /// Called when the stage should be unwound. The unwind logic should be there. 79 | async fn unwind<'tx>( 80 | &mut self, 81 | tx: &'tx mut MdbxTransaction<'db, RW, E>, 82 | input: UnwindInput, 83 | ) -> anyhow::Result 84 | where 85 | 'db: 'tx; 86 | } 87 | 88 | #[derive(Clone, Copy, Debug, PartialEq, Eq)] 89 | pub struct StageId(pub &'static str); 90 | 91 | impl AsRef for StageId { 92 | fn as_ref(&self) -> &str { 93 | self.0 94 | } 95 | } 96 | 97 | impl AsRef<[u8]> for StageId { 98 | fn as_ref(&self) -> &[u8] { 99 | self.0.as_bytes() 100 | } 101 | } 102 | 103 | impl Display for StageId { 104 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 105 | write!(f, "{}", self.0) 106 | } 107 | } 108 | 109 | impl StageId { 110 | #[instrument] 111 | pub fn get_progress<'db, K, E>( 112 | &self, 113 | tx: &MdbxTransaction<'db, K, E>, 114 | ) -> anyhow::Result> 115 | where 116 | K: TransactionKind, 117 | E: EnvironmentKind, 118 | { 119 | tx.get(tables::SyncStage, *self) 120 | } 121 | 122 | #[instrument] 123 | pub fn save_progress<'db, E>( 124 | &self, 125 | tx: &MdbxTransaction<'db, RW, E>, 126 | block: BlockNumber, 127 | ) -> anyhow::Result<()> 128 | where 129 | E: EnvironmentKind, 130 | { 131 | tx.set(tables::SyncStage, *self, block) 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /src/stagedsync/util.rs: -------------------------------------------------------------------------------- 1 | use super::stage::*; 2 | use crate::{ 3 | kv::{mdbx::*, traits::*}, 4 | models::*, 5 | }; 6 | 7 | pub fn unwind_by_block_key<'db: 'tx, 'tx, T, F, E>( 8 | tx: &'tx mut MdbxTransaction<'db, RW, E>, 9 | table: T, 10 | input: UnwindInput, 11 | block_key_extractor: F, 12 | ) -> anyhow::Result<()> 13 | where 14 | T: Table, 15 | T::Key: TableDecode, 16 | F: Fn(T::Key) -> BlockNumber, 17 | E: EnvironmentKind, 18 | { 19 | let mut cur = tx.cursor(table)?; 20 | let mut e = cur.last()?; 21 | while let Some(block_num) = e.map(|(k, _)| (block_key_extractor)(k)) { 22 | if block_num <= input.unwind_to { 23 | break; 24 | } 25 | 26 | cur.delete_current()?; 27 | 28 | e = cur.prev()?; 29 | } 30 | 31 | Ok(()) 32 | } 33 | 34 | pub fn unwind_by_block_key_duplicates<'db: 'tx, 'tx, T, F, E>( 35 | tx: &'tx mut MdbxTransaction<'db, RW, E>, 36 | table: T, 37 | input: UnwindInput, 38 | block_key_extractor: F, 39 | ) -> anyhow::Result<()> 40 | where 41 | T: DupSort, 42 | T::Key: TableDecode, 43 | F: Fn(T::Key) -> BlockNumber, 44 | E: EnvironmentKind, 45 | { 46 | let mut cur = tx.cursor(table)?; 47 | let mut e = cur.last()?; 48 | while let Some(block_num) = e.map(|(k, _)| (block_key_extractor)(k)) { 49 | if block_num <= input.unwind_to { 50 | break; 51 | } 52 | 53 | cur.delete_current_duplicates()?; 54 | 55 | e = cur.prev_no_dup()?; 56 | } 57 | 58 | Ok(()) 59 | } 60 | -------------------------------------------------------------------------------- /src/stages/block_hashes.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | etl::collector::*, 3 | kv::{mdbx::*, tables}, 4 | models::*, 5 | stagedsync::stage::*, 6 | StageId, 7 | }; 8 | use async_trait::async_trait; 9 | use std::sync::Arc; 10 | use tempfile::TempDir; 11 | use tokio::pin; 12 | use tracing::*; 13 | 14 | pub const BLOCK_HASHES: StageId = StageId("BlockHashes"); 15 | 16 | /// Generate BlockHashes => BlockNumber Mapping 17 | #[derive(Debug)] 18 | pub struct BlockHashes { 19 | pub temp_dir: Arc, 20 | } 21 | 22 | #[async_trait] 23 | impl<'db, E> Stage<'db, E> for BlockHashes 24 | where 25 | E: EnvironmentKind, 26 | { 27 | fn id(&self) -> StageId { 28 | BLOCK_HASHES 29 | } 30 | 31 | async fn execute<'tx>( 32 | &mut self, 33 | tx: &'tx mut MdbxTransaction<'db, RW, E>, 34 | input: StageInput, 35 | ) -> Result 36 | where 37 | 'db: 'tx, 38 | { 39 | let original_highest_block = input.stage_progress.unwrap_or(BlockNumber(0)); 40 | let mut highest_block = original_highest_block; 41 | 42 | let bodies_cursor = tx.cursor(tables::CanonicalHeader)?; 43 | let mut blockhashes_cursor = tx.cursor(tables::HeaderNumber.erased())?; 44 | 45 | let mut collector = TableCollector::new(&self.temp_dir, OPTIMAL_BUFFER_CAPACITY); 46 | let walker = bodies_cursor.walk(Some(highest_block + 1)); 47 | pin!(walker); 48 | 49 | while let Some((block_number, block_hash)) = walker.next().transpose()? { 50 | if block_number.0 % 500_000 == 0 { 51 | info!("Processing block {}", block_number); 52 | } 53 | // BlockBody Key is block_number + hash, so we just separate and collect 54 | collector.push(block_hash, block_number); 55 | 56 | highest_block = block_number; 57 | } 58 | collector.load(&mut blockhashes_cursor)?; 59 | Ok(ExecOutput::Progress { 60 | stage_progress: highest_block, 61 | done: true, 62 | reached_tip: true, 63 | }) 64 | } 65 | 66 | async fn unwind<'tx>( 67 | &mut self, 68 | tx: &'tx mut MdbxTransaction<'db, RW, E>, 69 | input: UnwindInput, 70 | ) -> anyhow::Result 71 | where 72 | 'db: 'tx, 73 | { 74 | let mut header_number_cur = tx.cursor(tables::HeaderNumber)?; 75 | 76 | let walker = tx.cursor(tables::CanonicalHeader)?.walk_back(None); 77 | pin!(walker); 78 | 79 | while let Some((block_num, block_hash)) = walker.next().transpose()? { 80 | if block_num > input.unwind_to { 81 | if header_number_cur.seek(block_hash)?.is_some() { 82 | header_number_cur.delete_current()?; 83 | } 84 | } else { 85 | break; 86 | } 87 | } 88 | 89 | Ok(UnwindOutput { 90 | stage_progress: input.unwind_to, 91 | }) 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /src/stages/finish.rs: -------------------------------------------------------------------------------- 1 | use crate::{kv::mdbx::*, models::*, stagedsync::stage::*, StageId}; 2 | use async_trait::async_trait; 3 | 4 | pub const FINISH: StageId = StageId("Finish"); 5 | 6 | #[derive(Debug)] 7 | pub struct Finish; 8 | 9 | #[async_trait] 10 | impl<'db, E> Stage<'db, E> for Finish 11 | where 12 | E: EnvironmentKind, 13 | { 14 | fn id(&self) -> StageId { 15 | FINISH 16 | } 17 | async fn execute<'tx>( 18 | &mut self, 19 | _: &'tx mut MdbxTransaction<'db, RW, E>, 20 | input: StageInput, 21 | ) -> Result 22 | where 23 | 'db: 'tx, 24 | { 25 | let prev_stage = input 26 | .previous_stage 27 | .map(|(_, b)| b) 28 | .unwrap_or(BlockNumber(0)); 29 | 30 | Ok(ExecOutput::Progress { 31 | stage_progress: prev_stage, 32 | done: true, 33 | reached_tip: true, 34 | }) 35 | } 36 | async fn unwind<'tx>( 37 | &mut self, 38 | _: &'tx mut MdbxTransaction<'db, RW, E>, 39 | input: UnwindInput, 40 | ) -> anyhow::Result 41 | where 42 | 'db: 'tx, 43 | { 44 | Ok(UnwindOutput { 45 | stage_progress: input.unwind_to, 46 | }) 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /src/stages/interhashes.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | accessors, 3 | consensus::DuoError, 4 | kv::mdbx::*, 5 | models::*, 6 | stagedsync::stage::{ExecOutput, Stage, StageError, StageInput, UnwindInput, UnwindOutput}, 7 | stages::stage_util::should_do_clean_promotion, 8 | trie::*, 9 | StageId, 10 | }; 11 | use anyhow::format_err; 12 | use async_trait::async_trait; 13 | use std::{cmp, sync::Arc}; 14 | use tempfile::TempDir; 15 | use tracing::*; 16 | 17 | pub const INTERMEDIATE_HASHES: StageId = StageId("IntermediateHashes"); 18 | 19 | /// Generation of intermediate hashes for efficient computation of the state trie root 20 | #[derive(Debug)] 21 | pub struct Interhashes { 22 | temp_dir: Arc, 23 | clean_promotion_threshold: u64, 24 | } 25 | 26 | impl Interhashes { 27 | pub fn new(temp_dir: Arc, clean_promotion_threshold: Option) -> Self { 28 | Self { 29 | temp_dir, 30 | clean_promotion_threshold: clean_promotion_threshold.unwrap_or(1_000_000_000_000), 31 | } 32 | } 33 | } 34 | 35 | #[async_trait] 36 | impl<'db, E> Stage<'db, E> for Interhashes 37 | where 38 | E: EnvironmentKind, 39 | { 40 | fn id(&self) -> StageId { 41 | INTERMEDIATE_HASHES 42 | } 43 | 44 | async fn execute<'tx>( 45 | &mut self, 46 | tx: &'tx mut MdbxTransaction<'db, RW, E>, 47 | input: StageInput, 48 | ) -> Result 49 | where 50 | 'db: 'tx, 51 | { 52 | let genesis = BlockNumber(0); 53 | let max_block = input 54 | .previous_stage 55 | .map(|tuple| tuple.1) 56 | .ok_or_else(|| format_err!("Cannot be first stage"))?; 57 | let past_progress = input.stage_progress.unwrap_or(genesis); 58 | 59 | if max_block > past_progress { 60 | let block_state_root = accessors::chain::header::read(tx, max_block)? 61 | .ok_or_else(|| format_err!("No header for block {}", max_block))? 62 | .state_root; 63 | 64 | let trie_root = if should_do_clean_promotion( 65 | tx, 66 | genesis, 67 | past_progress, 68 | max_block, 69 | self.clean_promotion_threshold, 70 | )? { 71 | debug!("Regenerating intermediate hashes"); 72 | regenerate_intermediate_hashes(tx, self.temp_dir.as_ref(), Some(block_state_root)) 73 | } else { 74 | debug!("Incrementing intermediate hashes"); 75 | increment_intermediate_hashes( 76 | tx, 77 | self.temp_dir.as_ref(), 78 | past_progress, 79 | Some(block_state_root), 80 | ) 81 | } 82 | .map_err(|e| match e { 83 | DuoError::Validation(error) => StageError::Validation { 84 | block: max_block, 85 | error, 86 | }, 87 | DuoError::Internal(e) => { 88 | StageError::Internal(e.context("state root computation failure")) 89 | } 90 | })?; 91 | 92 | info!("Block #{} state root OK: {:?}", max_block, trie_root) 93 | }; 94 | 95 | Ok(ExecOutput::Progress { 96 | stage_progress: cmp::max(max_block, past_progress), 97 | done: true, 98 | reached_tip: true, 99 | }) 100 | } 101 | 102 | async fn unwind<'tx>( 103 | &mut self, 104 | tx: &'tx mut MdbxTransaction<'db, RW, E>, 105 | input: UnwindInput, 106 | ) -> anyhow::Result 107 | where 108 | 'db: 'tx, 109 | { 110 | let block_state_root = accessors::chain::header::read(tx, input.unwind_to)? 111 | .ok_or_else(|| format_err!("No header for block {}", input.unwind_to))? 112 | .state_root; 113 | 114 | unwind_intermediate_hashes( 115 | tx, 116 | self.temp_dir.as_ref(), 117 | input.unwind_to, 118 | Some(block_state_root), 119 | )?; 120 | 121 | Ok(UnwindOutput { 122 | stage_progress: input.unwind_to, 123 | }) 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /src/stages/log_address_index.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | kv::{mdbx::*, tables}, 3 | stagedsync::stage::*, 4 | stages::stage_util::*, 5 | StageId, 6 | }; 7 | use async_trait::async_trait; 8 | 9 | #[derive(Debug)] 10 | pub struct LogAddressIndex(pub IndexParams); 11 | 12 | #[async_trait] 13 | impl<'db, E> Stage<'db, E> for LogAddressIndex 14 | where 15 | E: EnvironmentKind, 16 | { 17 | fn id(&self) -> StageId { 18 | StageId("LogAddressIndex") 19 | } 20 | 21 | async fn execute<'tx>( 22 | &mut self, 23 | tx: &'tx mut MdbxTransaction<'db, RW, E>, 24 | input: StageInput, 25 | ) -> Result 26 | where 27 | 'db: 'tx, 28 | { 29 | Ok(execute_index( 30 | tx, 31 | input, 32 | &self.0, 33 | tables::LogAddressesByBlock, 34 | tables::LogAddressIndex, 35 | |block_number, address| (block_number, address), 36 | )?) 37 | } 38 | 39 | async fn unwind<'tx>( 40 | &mut self, 41 | tx: &'tx mut MdbxTransaction<'db, RW, E>, 42 | input: UnwindInput, 43 | ) -> anyhow::Result 44 | where 45 | 'db: 'tx, 46 | { 47 | unwind_index( 48 | tx, 49 | input, 50 | tables::LogAddressesByBlock, 51 | tables::LogAddressIndex, 52 | |_, address| address, 53 | ) 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/stages/log_topic_index.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | kv::{mdbx::*, tables}, 3 | stagedsync::stage::*, 4 | stages::stage_util::*, 5 | StageId, 6 | }; 7 | use async_trait::async_trait; 8 | 9 | #[derive(Debug)] 10 | pub struct LogTopicIndex(pub IndexParams); 11 | 12 | #[async_trait] 13 | impl<'db, E> Stage<'db, E> for LogTopicIndex 14 | where 15 | E: EnvironmentKind, 16 | { 17 | fn id(&self) -> StageId { 18 | StageId("LogTopicIndex") 19 | } 20 | 21 | async fn execute<'tx>( 22 | &mut self, 23 | tx: &'tx mut MdbxTransaction<'db, RW, E>, 24 | input: StageInput, 25 | ) -> Result 26 | where 27 | 'db: 'tx, 28 | { 29 | Ok(execute_index( 30 | tx, 31 | input, 32 | &self.0, 33 | tables::LogTopicsByBlock, 34 | tables::LogTopicIndex, 35 | |block_number, topic| (block_number, topic), 36 | )?) 37 | } 38 | 39 | async fn unwind<'tx>( 40 | &mut self, 41 | tx: &'tx mut MdbxTransaction<'db, RW, E>, 42 | input: UnwindInput, 43 | ) -> anyhow::Result 44 | where 45 | 'db: 'tx, 46 | { 47 | unwind_index( 48 | tx, 49 | input, 50 | tables::LogTopicsByBlock, 51 | tables::LogTopicIndex, 52 | |_, topic| topic, 53 | ) 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/stages/mod.rs: -------------------------------------------------------------------------------- 1 | mod block_hashes; 2 | mod bodies; 3 | mod call_trace_index; 4 | mod execution; 5 | mod finish; 6 | mod hashstate; 7 | mod headers; 8 | mod history_index; 9 | mod interhashes; 10 | mod log_address_index; 11 | mod log_topic_index; 12 | mod sender_recovery; 13 | pub mod stage_util; 14 | mod total_gas_index; 15 | mod total_tx_index; 16 | mod tx_lookup; 17 | 18 | pub use block_hashes::*; 19 | pub use bodies::*; 20 | pub use call_trace_index::*; 21 | pub use execution::*; 22 | pub use finish::*; 23 | pub use hashstate::*; 24 | pub use headers::*; 25 | pub use history_index::*; 26 | pub use interhashes::*; 27 | pub use log_address_index::*; 28 | pub use log_topic_index::*; 29 | pub use sender_recovery::*; 30 | pub use total_gas_index::*; 31 | pub use total_tx_index::*; 32 | pub use tx_lookup::*; 33 | -------------------------------------------------------------------------------- /src/stages/total_gas_index.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | accessors, 3 | kv::{mdbx::*, tables}, 4 | stagedsync::stage::*, 5 | StageId, 6 | }; 7 | use anyhow::format_err; 8 | use async_trait::async_trait; 9 | use tracing::*; 10 | 11 | pub const TOTAL_GAS_INDEX: StageId = StageId("TotalGasIndex"); 12 | 13 | #[derive(Debug)] 14 | pub struct TotalGasIndex; 15 | 16 | #[async_trait] 17 | impl<'db, E> Stage<'db, E> for TotalGasIndex 18 | where 19 | E: EnvironmentKind, 20 | { 21 | fn id(&self) -> StageId { 22 | TOTAL_GAS_INDEX 23 | } 24 | 25 | async fn execute<'tx>( 26 | &mut self, 27 | tx: &'tx mut MdbxTransaction<'db, RW, E>, 28 | input: StageInput, 29 | ) -> Result 30 | where 31 | 'db: 'tx, 32 | { 33 | let prev_progress = input.stage_progress.unwrap_or_default(); 34 | 35 | let mut cumulative_index_cur = tx.cursor(tables::TotalGas)?; 36 | 37 | let starting_block = prev_progress + 1; 38 | let max_block = input 39 | .previous_stage 40 | .map(|(_, v)| v) 41 | .ok_or_else(|| format_err!("Cannot be the first stage"))?; 42 | 43 | if max_block >= starting_block { 44 | let mut gas = cumulative_index_cur.seek_exact(prev_progress)?.unwrap().1; 45 | 46 | for block_num in starting_block..=max_block { 47 | if block_num.0 % 500_000 == 0 { 48 | info!("Building total gas index for block {block_num}"); 49 | } 50 | 51 | let header = accessors::chain::header::read(tx, block_num)? 52 | .ok_or_else(|| format_err!("No header for block #{block_num}"))?; 53 | 54 | gas += header.gas_used; 55 | 56 | cumulative_index_cur.append(block_num, gas)?; 57 | } 58 | } 59 | 60 | Ok(ExecOutput::Progress { 61 | stage_progress: max_block, 62 | done: true, 63 | reached_tip: true, 64 | }) 65 | } 66 | 67 | async fn unwind<'tx>( 68 | &mut self, 69 | tx: &'tx mut MdbxTransaction<'db, RW, E>, 70 | input: UnwindInput, 71 | ) -> anyhow::Result 72 | where 73 | 'db: 'tx, 74 | { 75 | let mut cumulative_index_cur = tx.cursor(tables::TotalGas)?; 76 | 77 | while let Some((block_num, _)) = cumulative_index_cur.last()? { 78 | if block_num > input.unwind_to { 79 | cumulative_index_cur.delete_current()?; 80 | } else { 81 | break; 82 | } 83 | } 84 | 85 | Ok(UnwindOutput { 86 | stage_progress: input.unwind_to, 87 | }) 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /src/stages/total_tx_index.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | kv::{mdbx::*, tables}, 3 | stagedsync::stage::*, 4 | StageId, 5 | }; 6 | use anyhow::format_err; 7 | use async_trait::async_trait; 8 | use tracing::*; 9 | 10 | pub const TOTAL_TX_INDEX: StageId = StageId("TotalTxIndex"); 11 | 12 | #[derive(Debug)] 13 | pub struct TotalTxIndex; 14 | 15 | #[async_trait] 16 | impl<'db, E> Stage<'db, E> for TotalTxIndex 17 | where 18 | E: EnvironmentKind, 19 | { 20 | fn id(&self) -> StageId { 21 | TOTAL_TX_INDEX 22 | } 23 | 24 | async fn execute<'tx>( 25 | &mut self, 26 | tx: &'tx mut MdbxTransaction<'db, RW, E>, 27 | input: StageInput, 28 | ) -> Result 29 | where 30 | 'db: 'tx, 31 | { 32 | let prev_progress = input.stage_progress.unwrap_or_default(); 33 | 34 | let mut cumulative_index_cur = tx.cursor(tables::TotalTx)?; 35 | 36 | let starting_block = prev_progress + 1; 37 | let max_block = input 38 | .previous_stage 39 | .map(|(_, v)| v) 40 | .ok_or_else(|| format_err!("Cannot be the first stage"))?; 41 | 42 | if max_block >= starting_block { 43 | let mut tx_num = cumulative_index_cur 44 | .seek_exact(prev_progress)? 45 | .ok_or_else(|| { 46 | format_err!("Cumulative index not found for block #{prev_progress}") 47 | })? 48 | .1; 49 | 50 | for block_num in starting_block..=max_block { 51 | if block_num.0 % 500_000 == 0 { 52 | info!("Building total tx index for block {block_num}"); 53 | } 54 | 55 | let body = tx 56 | .get(tables::BlockBody, block_num)? 57 | .ok_or_else(|| format_err!("Body not found for block #{block_num}"))?; 58 | 59 | tx_num += body.tx_amount; 60 | 61 | cumulative_index_cur.append(block_num, tx_num)?; 62 | } 63 | } 64 | 65 | Ok(ExecOutput::Progress { 66 | stage_progress: max_block, 67 | done: true, 68 | reached_tip: true, 69 | }) 70 | } 71 | 72 | async fn unwind<'tx>( 73 | &mut self, 74 | tx: &'tx mut MdbxTransaction<'db, RW, E>, 75 | input: UnwindInput, 76 | ) -> anyhow::Result 77 | where 78 | 'db: 'tx, 79 | { 80 | let mut cumulative_index_cur = tx.cursor(tables::TotalTx)?; 81 | 82 | while let Some((block_num, _)) = cumulative_index_cur.last()? { 83 | if block_num > input.unwind_to { 84 | cumulative_index_cur.delete_current()?; 85 | } else { 86 | break; 87 | } 88 | } 89 | 90 | Ok(UnwindOutput { 91 | stage_progress: input.unwind_to, 92 | }) 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /src/state/database.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | kv::{mdbx::*, tables}, 3 | models::*, 4 | u256_to_h256, 5 | }; 6 | 7 | pub fn seek_storage_key( 8 | cur: &mut MdbxCursor<'_, K, tables::Storage>, 9 | address: Address, 10 | location: U256, 11 | ) -> anyhow::Result> 12 | where 13 | K: TransactionKind, 14 | { 15 | let location = u256_to_h256(location); 16 | if let Some((l, v)) = cur.seek_both_range(address, location)? { 17 | if l == location { 18 | return Ok(Some(v)); 19 | } 20 | } 21 | 22 | Ok(None) 23 | } 24 | 25 | pub fn upsert_storage_value( 26 | cur: &mut MdbxCursor<'_, RW, tables::Storage>, 27 | address: Address, 28 | location: U256, 29 | value: U256, 30 | ) -> anyhow::Result<()> { 31 | if seek_storage_key(cur, address, location)?.is_some() { 32 | cur.delete_current()?; 33 | } 34 | 35 | if value != 0 { 36 | cur.upsert(address, (u256_to_h256(location), value))?; 37 | } 38 | 39 | Ok(()) 40 | } 41 | 42 | pub fn seek_hashed_storage_key( 43 | cur: &mut MdbxCursor<'_, K, tables::HashedStorage>, 44 | hashed_address: H256, 45 | hashed_location: H256, 46 | ) -> anyhow::Result> 47 | where 48 | K: TransactionKind, 49 | { 50 | Ok(cur 51 | .seek_both_range(hashed_address, hashed_location)? 52 | .filter(|&(l, _)| l == hashed_location) 53 | .map(|(_, v)| v)) 54 | } 55 | 56 | pub fn upsert_hashed_storage_value( 57 | cur: &mut MdbxCursor<'_, RW, tables::HashedStorage>, 58 | hashed_address: H256, 59 | hashed_location: H256, 60 | value: U256, 61 | ) -> anyhow::Result<()> { 62 | if seek_hashed_storage_key(cur, hashed_address, hashed_location)?.is_some() { 63 | cur.delete_current()?; 64 | } 65 | 66 | if value != 0 { 67 | cur.upsert(hashed_address, (hashed_location, value))?; 68 | } 69 | 70 | Ok(()) 71 | } 72 | -------------------------------------------------------------------------------- /src/state/database_version.rs: -------------------------------------------------------------------------------- 1 | use crate::kv::{mdbx::*, tables, MdbxWithDirHandle}; 2 | use anyhow::format_err; 3 | use std::collections::BTreeMap; 4 | use thiserror::Error; 5 | 6 | const DATABASE_VERSION: u64 = 3; 7 | 8 | type Migration<'db, E> = fn(&mut MdbxTransaction<'db, RW, E>) -> anyhow::Result; 9 | 10 | #[derive(Debug, Error)] 11 | pub enum MigrationError { 12 | #[error("No migration from database version {current}, please re-sync")] 13 | DbTooOld { current: u64 }, 14 | #[error("Database version {current} too high. Newest version {newest}")] 15 | DbTooNew { current: u64, newest: u64 }, 16 | #[error("Migration error")] 17 | MigrationError(#[from] anyhow::Error), 18 | } 19 | 20 | fn init_database_version(_: &mut MdbxTransaction<'_, RW, E>) -> anyhow::Result 21 | where 22 | E: EnvironmentKind, 23 | { 24 | Ok(DATABASE_VERSION) 25 | } 26 | 27 | fn set_database_version(txn: &MdbxTransaction<'_, RW, E>, version: u64) -> anyhow::Result<()> 28 | where 29 | E: EnvironmentKind, 30 | { 31 | txn.set(tables::Version, (), version)?; 32 | Ok(()) 33 | } 34 | 35 | fn get_database_version(txn: &MdbxTransaction<'_, K, E>) -> anyhow::Result 36 | where 37 | K: TransactionKind, 38 | E: EnvironmentKind, 39 | { 40 | let version = txn.get(tables::Version, ())?.unwrap_or(0); 41 | Ok(version) 42 | } 43 | 44 | fn apply_migrations<'db, E>( 45 | tx: &mut MdbxTransaction<'db, RW, E>, 46 | from_version: u64, 47 | migrations: BTreeMap>, 48 | ) -> Result<(), MigrationError> 49 | where 50 | E: EnvironmentKind, 51 | { 52 | let mut current_version = from_version; 53 | while current_version < DATABASE_VERSION { 54 | if let Some(migration) = migrations.get(¤t_version) { 55 | let new_version = 56 | migration(tx).map_err(|e| format_err!("Failed database migration: {e}"))?; 57 | current_version = new_version; 58 | set_database_version(tx, new_version)?; 59 | } else { 60 | return Err(MigrationError::DbTooOld { 61 | current: current_version, 62 | }); 63 | } 64 | } 65 | 66 | Ok(()) 67 | } 68 | 69 | pub fn migrate_database(db: &MdbxWithDirHandle) -> Result<(), MigrationError> 70 | where 71 | E: EnvironmentKind, 72 | { 73 | let mut tx = db.begin_mutable()?; 74 | let current_version = get_database_version(&tx).unwrap_or(0); 75 | 76 | if current_version > DATABASE_VERSION { 77 | return Err(MigrationError::DbTooNew { 78 | current: current_version, 79 | newest: DATABASE_VERSION, 80 | }); 81 | } 82 | 83 | let migrations: BTreeMap> = 84 | BTreeMap::from([(0, init_database_version as Migration); 1]); 85 | 86 | apply_migrations(&mut tx, current_version, migrations)?; 87 | tx.commit()?; 88 | 89 | Ok(()) 90 | } 91 | 92 | #[cfg(test)] 93 | mod tests { 94 | use super::*; 95 | use crate::kv::new_mem_chaindata; 96 | use std::assert_matches::assert_matches; 97 | 98 | #[test] 99 | fn test_migrate_database_new() { 100 | let db = new_mem_chaindata().unwrap(); 101 | migrate_database(&db).unwrap(); 102 | let version = get_database_version(&db.begin().unwrap()); 103 | assert_eq!(DATABASE_VERSION, version.unwrap()); 104 | } 105 | 106 | #[test] 107 | fn test_migrate_database_too_high() { 108 | let db = new_mem_chaindata().unwrap(); 109 | { 110 | let txn = db.begin_mutable().unwrap(); 111 | set_database_version(&txn, u64::MAX).unwrap(); 112 | txn.commit().unwrap(); 113 | } 114 | assert_matches!( 115 | migrate_database(&db), 116 | Err(MigrationError::DbTooNew { 117 | current, 118 | newest 119 | }) if current == u64::MAX && newest == DATABASE_VERSION 120 | ); 121 | } 122 | 123 | #[test] 124 | fn test_apply_migrations() { 125 | let db = new_mem_chaindata().unwrap(); 126 | assert_matches!( 127 | apply_migrations(&mut db.begin_mutable().unwrap(), 0, BTreeMap::new()), 128 | Err(MigrationError::DbTooOld { current }) if current == 0 129 | ); 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /src/state/delta.rs: -------------------------------------------------------------------------------- 1 | use super::{intra_block_state::IntraBlockState, object::Object}; 2 | use crate::{models::*, StateReader, Storage}; 3 | use std::{collections::hash_map::Entry, fmt::Debug}; 4 | 5 | /// Reversible change made to `IntraBlockState`. 6 | #[derive(Debug)] 7 | pub enum Delta { 8 | Create { 9 | address: Address, 10 | }, 11 | Update { 12 | address: Address, 13 | previous: Object, 14 | }, 15 | UpdateBalance { 16 | address: Address, 17 | previous: U256, 18 | }, 19 | Incarnation { 20 | address: Address, 21 | }, 22 | Selfdestruct { 23 | address: Address, 24 | }, 25 | Touch { 26 | address: Address, 27 | }, 28 | StorageChange { 29 | address: Address, 30 | key: U256, 31 | previous: U256, 32 | }, 33 | StorageWipe { 34 | address: Address, 35 | storage: Storage, 36 | }, 37 | StorageCreate { 38 | address: Address, 39 | }, 40 | StorageAccess { 41 | address: Address, 42 | key: U256, 43 | }, 44 | AccountAccess { 45 | address: Address, 46 | }, 47 | } 48 | 49 | impl Delta { 50 | pub fn revert(self, state: &mut IntraBlockState<'_, R>) 51 | where 52 | R: StateReader, 53 | { 54 | match self { 55 | Delta::Create { address } => { 56 | state.objects.remove(&address); 57 | } 58 | Delta::Update { address, previous } => { 59 | state.objects.insert(address, previous); 60 | } 61 | Delta::UpdateBalance { address, previous } => { 62 | state 63 | .objects 64 | .get_mut(&address) 65 | .unwrap() 66 | .current 67 | .as_mut() 68 | .unwrap() 69 | .balance = previous; 70 | } 71 | Delta::Incarnation { address } => { 72 | let Entry::Occupied(mut e) = state.incarnations.entry(address) else {unreachable!()}; 73 | 74 | *e.get_mut() -= 1; 75 | if *e.get() == 0 { 76 | e.remove(); 77 | } 78 | } 79 | Delta::Selfdestruct { address } => { 80 | state.self_destructs.remove(&address); 81 | } 82 | Delta::Touch { address } => { 83 | state.touched.remove(&address); 84 | } 85 | Delta::StorageChange { 86 | address, 87 | key, 88 | previous, 89 | } => { 90 | state 91 | .storage 92 | .entry(address) 93 | .or_default() 94 | .current 95 | .insert(key, previous); 96 | } 97 | Delta::StorageWipe { address, storage } => { 98 | state.storage.insert(address, storage); 99 | } 100 | Delta::StorageCreate { address } => { 101 | state.storage.remove(&address); 102 | } 103 | Delta::StorageAccess { address, key } => { 104 | state 105 | .accessed_storage_keys 106 | .entry(address) 107 | .or_default() 108 | .remove(&key); 109 | } 110 | Delta::AccountAccess { address } => { 111 | state.accessed_addresses.remove(&address); 112 | } 113 | } 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /src/state/interface.rs: -------------------------------------------------------------------------------- 1 | use crate::models::*; 2 | use auto_impl::auto_impl; 3 | use bytes::Bytes; 4 | use std::fmt::Debug; 5 | 6 | #[auto_impl(&mut, &, Box)] 7 | pub trait HeaderReader: Debug + Send + Sync { 8 | fn read_header( 9 | &self, 10 | block_number: BlockNumber, 11 | block_hash: H256, 12 | ) -> anyhow::Result>; 13 | 14 | fn read_parent_header(&self, header: &BlockHeader) -> anyhow::Result> { 15 | if let Some(parent_number) = header.number.0.checked_sub(1) { 16 | return self.read_header(parent_number.into(), header.parent_hash); 17 | } 18 | 19 | Ok(None) 20 | } 21 | } 22 | 23 | #[auto_impl(&mut, &, Box)] 24 | pub trait BlockReader: HeaderReader { 25 | fn read_body( 26 | &self, 27 | block_number: BlockNumber, 28 | block_hash: H256, 29 | ) -> anyhow::Result>; 30 | } 31 | 32 | #[auto_impl(&mut, Box)] 33 | pub trait StateReader: Debug + Send + Sync { 34 | fn read_account(&self, address: Address) -> anyhow::Result>; 35 | 36 | fn read_code(&self, code_hash: H256) -> anyhow::Result; 37 | 38 | fn read_storage(&self, address: Address, location: U256) -> anyhow::Result; 39 | } 40 | 41 | #[auto_impl(&mut, Box)] 42 | pub trait StateWriter: Debug + Send + Sync { 43 | fn erase_storage(&mut self, address: Address) -> anyhow::Result<()>; 44 | 45 | /// State changes 46 | /// Change sets are backward changes of the state, i.e. account/storage values _at the beginning of a block_. 47 | 48 | /// Mark the beggining of a new block. 49 | /// Must be called prior to calling update_account/update_account_code/update_storage. 50 | fn begin_block(&mut self, block_number: BlockNumber); 51 | 52 | fn update_account( 53 | &mut self, 54 | address: Address, 55 | initial: Option, 56 | current: Option, 57 | ); 58 | 59 | fn update_code(&mut self, code_hash: H256, code: Bytes) -> anyhow::Result<()>; 60 | 61 | fn update_storage( 62 | &mut self, 63 | address: Address, 64 | location: U256, 65 | initial: U256, 66 | current: U256, 67 | ) -> anyhow::Result<()>; 68 | } 69 | 70 | pub trait State: HeaderReader + StateReader + StateWriter {} 71 | 72 | impl State for T where T: HeaderReader + StateReader + StateWriter {} 73 | -------------------------------------------------------------------------------- /src/state/mod.rs: -------------------------------------------------------------------------------- 1 | mod buffer; 2 | mod database; 3 | pub mod database_version; 4 | mod delta; 5 | pub mod genesis; 6 | mod in_memory_state; 7 | mod interface; 8 | mod intra_block_state; 9 | mod object; 10 | 11 | pub use self::{ 12 | buffer::*, database::*, in_memory_state::*, interface::*, intra_block_state::*, object::*, 13 | }; 14 | -------------------------------------------------------------------------------- /src/state/object.rs: -------------------------------------------------------------------------------- 1 | use crate::models::*; 2 | use std::collections::HashMap; 3 | 4 | #[derive(Clone, Debug, Default)] 5 | pub struct Object { 6 | pub initial: Option, 7 | pub current: Option, 8 | } 9 | 10 | #[derive(Debug, Default)] 11 | pub struct CommittedValue { 12 | /// Value at the begining of the block 13 | pub initial: U256, 14 | /// Value at the begining of the transaction; see EIP-2200 15 | pub original: U256, 16 | } 17 | 18 | #[derive(Debug, Default)] 19 | pub struct Storage { 20 | pub committed: HashMap, 21 | pub current: HashMap, 22 | } 23 | -------------------------------------------------------------------------------- /src/trie/mod.rs: -------------------------------------------------------------------------------- 1 | mod hash_builder; 2 | mod intermediate_hashes; 3 | mod node; 4 | mod prefix_set; 5 | mod util; 6 | mod vector_root; 7 | 8 | pub use hash_builder::{unpack_nibbles, HashBuilder}; 9 | pub use intermediate_hashes::{ 10 | do_increment_intermediate_hashes, increment_intermediate_hashes, 11 | regenerate_intermediate_hashes, unwind_intermediate_hashes, DbTrieLoader, 12 | }; 13 | pub use prefix_set::PrefixSet; 14 | pub use vector_root::{root_hash, TrieEncode}; 15 | -------------------------------------------------------------------------------- /src/trie/node.rs: -------------------------------------------------------------------------------- 1 | use crate::{models::KECCAK_LENGTH, trie::util::assert_subset}; 2 | use ethereum_types::H256; 3 | 4 | #[derive(Clone, Debug, PartialEq, Eq)] 5 | pub struct Node { 6 | pub state_mask: u16, 7 | pub tree_mask: u16, 8 | pub hash_mask: u16, 9 | pub hashes: Vec, 10 | pub root_hash: Option, 11 | } 12 | 13 | impl Node { 14 | pub fn new( 15 | state_mask: u16, 16 | tree_mask: u16, 17 | hash_mask: u16, 18 | hashes: Vec, 19 | root_hash: Option, 20 | ) -> Self { 21 | assert_subset(tree_mask, state_mask); 22 | assert_subset(hash_mask, state_mask); 23 | assert_eq!(hash_mask.count_ones() as usize, hashes.len()); 24 | Self { 25 | state_mask, 26 | tree_mask, 27 | hash_mask, 28 | hashes, 29 | root_hash, 30 | } 31 | } 32 | 33 | pub fn hash_for_nibble(&self, nibble: i8) -> H256 { 34 | let mask = (1u16 << nibble) - 1; 35 | let index = (self.hash_mask & mask).count_ones(); 36 | self.hashes[index as usize] 37 | } 38 | } 39 | 40 | pub(crate) fn marshal_node(n: &Node) -> Vec { 41 | let number_of_hashes = n.hashes.len() + usize::from(n.root_hash.is_some()); 42 | let buf_size = number_of_hashes * KECCAK_LENGTH + 6; 43 | let mut buf = Vec::::with_capacity(buf_size); 44 | 45 | buf.extend_from_slice(n.state_mask.to_be_bytes().as_slice()); 46 | buf.extend_from_slice(n.tree_mask.to_be_bytes().as_slice()); 47 | buf.extend_from_slice(n.hash_mask.to_be_bytes().as_slice()); 48 | 49 | if let Some(root_hash) = n.root_hash { 50 | buf.extend_from_slice(root_hash.as_bytes()); 51 | } 52 | 53 | for hash in &n.hashes { 54 | buf.extend_from_slice(hash.as_bytes()); 55 | } 56 | 57 | buf 58 | } 59 | 60 | pub(crate) fn unmarshal_node(v: &[u8]) -> Option { 61 | if v.len() % KECCAK_LENGTH != 6 { 62 | return None; 63 | } 64 | 65 | let state_mask = u16::from_be_bytes(v[0..2].try_into().unwrap()); 66 | let tree_mask = u16::from_be_bytes(v[2..4].try_into().unwrap()); 67 | let hash_mask = u16::from_be_bytes(v[4..6].try_into().unwrap()); 68 | let mut i = 6; 69 | 70 | let mut root_hash = None; 71 | if hash_mask.count_ones() as usize + 1 == v[6..].len() / KECCAK_LENGTH { 72 | root_hash = Some(H256::from_slice(&v[i..i + KECCAK_LENGTH])); 73 | i += KECCAK_LENGTH; 74 | } 75 | 76 | let num_hashes = v[i..].len() / KECCAK_LENGTH; 77 | let mut hashes = Vec::::with_capacity(num_hashes); 78 | for _ in 0..num_hashes { 79 | hashes.push(H256::from_slice(&v[i..i + KECCAK_LENGTH])); 80 | i += KECCAK_LENGTH; 81 | } 82 | 83 | Some(Node::new( 84 | state_mask, tree_mask, hash_mask, hashes, root_hash, 85 | )) 86 | } 87 | 88 | #[cfg(test)] 89 | mod tests { 90 | use super::*; 91 | use hex_literal::hex; 92 | 93 | #[test] 94 | fn node_marshalling() { 95 | let n = Node::new( 96 | 0xf607, 97 | 0x0005, 98 | 0x4004, 99 | vec![ 100 | hex!("90d53cd810cc5d4243766cd4451e7b9d14b736a1148b26b3baac7617f617d321").into(), 101 | hex!("cc35c964dda53ba6c0b87798073a9628dbc9cd26b5cce88eb69655a9c609caf1").into(), 102 | ], 103 | Some(hex!("aaaabbbb0006767767776fffffeee44444000005567645600000000eeddddddd").into()), 104 | ); 105 | 106 | // REQUIRE(std::bitset<16>(n.hash_mask()).count() == n.hashes().size()); 107 | 108 | assert_eq!(unmarshal_node(&marshal_node(&n)).unwrap(), n); 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /src/trie/prefix_set.rs: -------------------------------------------------------------------------------- 1 | use crate::trie::util::has_prefix; 2 | use bytes::Bytes; 3 | 4 | #[derive(Clone)] 5 | pub struct PrefixSet { 6 | keys: Vec, 7 | sorted: bool, 8 | index: usize, 9 | } 10 | 11 | impl Default for PrefixSet { 12 | fn default() -> Self { 13 | PrefixSet::new() 14 | } 15 | } 16 | 17 | impl PrefixSet { 18 | pub fn new() -> Self { 19 | Self { 20 | keys: Vec::new(), 21 | sorted: true, 22 | index: 0, 23 | } 24 | } 25 | 26 | fn sort(&mut self) { 27 | self.keys.sort(); 28 | self.keys.dedup(); 29 | self.sorted = true; 30 | } 31 | 32 | pub(crate) fn contains(&mut self, prefix: &[u8]) -> bool { 33 | if self.keys.is_empty() { 34 | return false; 35 | } 36 | 37 | if !self.sorted { 38 | self.sort(); 39 | } 40 | 41 | while self.index > 0 && self.keys[self.index] > prefix { 42 | self.index -= 1; 43 | } 44 | 45 | loop { 46 | let current = &self.keys[self.index]; 47 | 48 | if has_prefix(current, prefix) { 49 | break true; 50 | } 51 | 52 | if current > prefix { 53 | break false; 54 | } 55 | 56 | if self.index >= self.keys.len() - 1 { 57 | break false; 58 | } 59 | 60 | self.index += 1; 61 | } 62 | } 63 | 64 | pub fn insert(&mut self, key: &[u8]) { 65 | self.keys.push(Bytes::copy_from_slice(key)); 66 | self.sorted = false; 67 | } 68 | 69 | pub(crate) fn len(&self) -> usize { 70 | self.keys.len() 71 | } 72 | } 73 | 74 | #[cfg(test)] 75 | mod tests { 76 | use super::*; 77 | 78 | #[test] 79 | fn prefix_set() { 80 | let mut ps = PrefixSet::new(); 81 | assert!(!ps.contains(b"")); 82 | assert!(!ps.contains(b"a")); 83 | 84 | ps.insert(b"abc"); 85 | ps.insert(b"fg"); 86 | ps.insert(b"abc"); // duplicate 87 | ps.insert(b"ab"); 88 | 89 | assert!(ps.contains(b"")); 90 | assert!(ps.contains(b"a")); 91 | assert!(!ps.contains(b"aac")); 92 | assert!(ps.contains(b"ab")); 93 | assert!(ps.contains(b"abc")); 94 | assert!(!ps.contains(b"abcd")); 95 | assert!(!ps.contains(b"b")); 96 | assert!(ps.contains(b"f")); 97 | assert!(ps.contains(b"fg")); 98 | assert!(!ps.contains(b"fgk")); 99 | assert!(!ps.contains(b"fy")); 100 | assert!(!ps.contains(b"yyz")); 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /src/trie/util.rs: -------------------------------------------------------------------------------- 1 | use std::cmp; 2 | 3 | pub(crate) fn has_prefix(s: &[u8], prefix: &[u8]) -> bool { 4 | s.starts_with(prefix) 5 | } 6 | 7 | pub(crate) fn assert_subset(sub: u16, sup: u16) { 8 | assert_eq!(sub & sup, sub); 9 | } 10 | 11 | pub(crate) fn prefix_length(a: &[u8], b: &[u8]) -> usize { 12 | let len = cmp::min(a.len(), b.len()); 13 | for i in 0..len { 14 | if a[i] != b[i] { 15 | return i; 16 | } 17 | } 18 | len 19 | } 20 | -------------------------------------------------------------------------------- /src/trie/vector_root.rs: -------------------------------------------------------------------------------- 1 | use super::{hash_builder::unpack_nibbles, HashBuilder}; 2 | use crate::models::*; 3 | use bytes::{BufMut, BytesMut}; 4 | use fastrlp::Encodable; 5 | 6 | pub trait TrieEncode { 7 | fn trie_encode(&self, out: &mut dyn BufMut); 8 | } 9 | 10 | /// Lexicographic order for RLP-encoded integers is the same as their natural order, 11 | /// save for 0, which, due to its RLP encoding, should be placed between 0x7f and 0x80. 12 | const fn adjust_index_for_rlp(i: usize, len: usize) -> usize { 13 | if i > 0x7f { 14 | i 15 | } else if i == 0x7f || i + 1 == len { 16 | 0 17 | } else { 18 | i + 1 19 | } 20 | } 21 | 22 | /// Trie root hash of RLP-encoded values, the keys are RLP-encoded integers. 23 | /// See Section 4.3.2. "Holistic Validity" of the Yellow Paper. 24 | pub fn root_hash(values: &[T]) -> H256 25 | where 26 | T: TrieEncode, 27 | { 28 | let mut index_rlp = BytesMut::new(); 29 | let mut value_rlp = BytesMut::new(); 30 | 31 | let mut hb = HashBuilder::<'static>::new(None); 32 | 33 | let iter_len = values.len(); 34 | 35 | for j in 0..values.len() { 36 | let index = adjust_index_for_rlp(j, iter_len); 37 | index_rlp.clear(); 38 | index.encode(&mut index_rlp); 39 | value_rlp.clear(); 40 | values[index].trie_encode(&mut value_rlp); 41 | 42 | hb.add_leaf(unpack_nibbles(&index_rlp), &value_rlp); 43 | } 44 | 45 | hb.compute_root_hash() 46 | } 47 | 48 | #[cfg(test)] 49 | mod tests { 50 | use super::*; 51 | use hex_literal::hex; 52 | 53 | #[test] 54 | fn empty_root_hash() { 55 | assert_eq!(root_hash::(&[]), EMPTY_ROOT) 56 | } 57 | 58 | #[test] 59 | fn hardcoded_root_hash() { 60 | assert_eq!(root_hash(&[ 61 | (21_000, vec![]), 62 | (42_000, vec![]), 63 | ( 64 | 65_092, 65 | vec![Log { 66 | address: hex!("8d12a197cb00d4747a1fe03395095ce2a5cc6819").into(), 67 | topics: vec![hex!("f341246adaac6f497bc2a656f546ab9e182111d630394f0c57c710a59a2cb567").into()], 68 | data: hex!("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000043b2126e7a22e0c288dfb469e3de4d2c097f3ca0000000000000000000000000000000000000000000000001195387bce41fd4990000000000000000000000000000000000000000000000000000000000000000").to_vec().into(), 69 | }], 70 | ), 71 | ].into_iter().map(|(cumulative_gas_used, logs)| { 72 | Receipt { 73 | tx_type: TxType::Legacy, 74 | success: true, 75 | cumulative_gas_used, 76 | bloom: logs_bloom(&logs), 77 | logs, 78 | } 79 | }).collect::>()), H256(hex!("7ea023138ee7d80db04eeec9cf436dc35806b00cc5fe8e5f611fb7cf1b35b177"))) 80 | } 81 | } 82 | --------------------------------------------------------------------------------