├── .dockerignore ├── .editorconfig ├── .github ├── CODEOWNERS ├── pull_request_template.md └── workflows │ ├── checks.yaml │ ├── checks_docker.yaml │ └── release.yaml ├── .gitignore ├── .gitmodules ├── CONTRIBUTING.md ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── LICENSE-APACHE ├── LICENSE-MIT ├── Makefile ├── README.md ├── SECURITY.md ├── config-backtest-example.toml ├── config-live-example.toml ├── config-optimism-local.toml ├── config-playground.toml ├── crates ├── eth-sparse-mpt │ ├── Cargo.toml │ ├── README.md │ ├── benches │ │ ├── trie_do_bench.rs │ │ ├── trie_example_bench.rs │ │ ├── trie_insert_bench.rs │ │ └── trie_nodes_benches.rs │ ├── src │ │ ├── lib.rs │ │ ├── test_utils.rs │ │ ├── utils.rs │ │ ├── v1 │ │ │ ├── mod.rs │ │ │ ├── reth_sparse_trie │ │ │ │ ├── change_set.rs │ │ │ │ ├── hash.rs │ │ │ │ ├── mod.rs │ │ │ │ ├── shared_cache.rs │ │ │ │ └── trie_fetcher │ │ │ │ │ └── mod.rs │ │ │ └── sparse_mpt │ │ │ │ ├── diff_trie │ │ │ │ ├── mod.rs │ │ │ │ ├── nodes.rs │ │ │ │ └── tests.rs │ │ │ │ ├── fixed_trie.rs │ │ │ │ └── mod.rs │ │ └── v2 │ │ │ ├── fetch.rs │ │ │ ├── mod.rs │ │ │ └── trie │ │ │ ├── mod.rs │ │ │ ├── proof_store.rs │ │ │ └── tests.rs │ └── test_data │ │ ├── README.md │ │ ├── changeset.json.gz │ │ ├── failure_case_0.json.gz │ │ ├── multiproof_0.json.gz │ │ ├── multiproof_1.json.gz │ │ └── prepared_tries │ │ ├── example0 │ │ ├── change_set.json.gz │ │ └── tries.json.gz │ │ ├── example1 │ │ ├── change_set.json.gz │ │ └── tries.json.gz │ │ ├── example2 │ │ ├── change_set.json.gz │ │ └── tries.json.gz │ │ ├── example3 │ │ ├── change_set.json.gz │ │ └── tries.json.gz │ │ ├── example4 │ │ ├── change_set.json.gz │ │ └── tries.json.gz │ │ ├── example5 │ │ ├── change_set.json.gz │ │ └── tries.json.gz │ │ ├── example6 │ │ ├── change_set.json.gz │ │ └── tries.json.gz │ │ └── example7 │ │ ├── change_set.json.gz │ │ └── tries.json.gz ├── rbuilder │ ├── Cargo.toml │ ├── benches │ │ ├── bench_main.rs │ │ ├── benchmarks │ │ │ ├── mev_boost.rs │ │ │ ├── mod.rs │ │ │ └── txpool_fetcher.rs │ │ └── blob_data │ │ │ └── blob1.json │ ├── build.rs │ └── src │ │ ├── backtest │ │ ├── backtest_build_range.rs │ │ ├── build_block │ │ │ ├── backtest_build_block.rs │ │ │ ├── landed_block_from_db.rs │ │ │ ├── mod.rs │ │ │ └── synthetic_orders.rs │ │ ├── execute.rs │ │ ├── fetch │ │ │ ├── backtest_fetch.rs │ │ │ ├── data_source.rs │ │ │ ├── flashbots_db.rs │ │ │ ├── mempool.rs │ │ │ ├── mev_boost.rs │ │ │ └── mod.rs │ │ ├── mod.rs │ │ ├── redistribute │ │ │ ├── cli │ │ │ │ ├── csv_output.rs │ │ │ │ └── mod.rs │ │ │ ├── mod.rs │ │ │ └── redistribution_algo.rs │ │ ├── restore_landed_orders │ │ │ ├── find_landed_orders.rs │ │ │ ├── mod.rs │ │ │ └── resim_landed_block.rs │ │ ├── results_store.rs │ │ └── store.rs │ │ ├── beacon_api_client │ │ └── mod.rs │ │ ├── bin │ │ ├── backtest-build-block.rs │ │ ├── backtest-build-range.rs │ │ ├── backtest-build-synthetic-block.rs │ │ ├── backtest-distribute.rs │ │ ├── backtest-fetch.rs │ │ ├── debug-bench-machine.rs │ │ ├── dummy-builder.rs │ │ ├── misc-relays-slot.rs │ │ ├── rbuilder.rs │ │ └── validate-config.rs │ │ ├── building │ │ ├── block_orders │ │ │ ├── mod.rs │ │ │ ├── multi_share_bundle_merger.rs │ │ │ ├── order_dumper.rs │ │ │ ├── order_priority.rs │ │ │ ├── prioritized_order_store.rs │ │ │ ├── share_bundle_merger.rs │ │ │ ├── test_context.rs │ │ │ └── test_data_generator.rs │ │ ├── builders │ │ │ ├── block_building_helper.rs │ │ │ ├── mock_block_building_helper.rs │ │ │ ├── mod.rs │ │ │ ├── ordering_builder.rs │ │ │ └── parallel_builder │ │ │ │ ├── block_building_result_assembler.rs │ │ │ │ ├── conflict_resolvers.rs │ │ │ │ ├── conflict_resolving_pool.rs │ │ │ │ ├── conflict_task_generator.rs │ │ │ │ ├── groups.rs │ │ │ │ ├── mod.rs │ │ │ │ ├── order_intake_store.rs │ │ │ │ ├── readme.md │ │ │ │ ├── results_aggregator.rs │ │ │ │ ├── simulation_cache.rs │ │ │ │ └── task.rs │ │ ├── built_block_trace.rs │ │ ├── cached_reads.rs │ │ ├── conflict.rs │ │ ├── evm.rs │ │ ├── evm_inspector.rs │ │ ├── fmt.rs │ │ ├── mod.rs │ │ ├── order_commit.rs │ │ ├── payout_tx.rs │ │ ├── precompile_cache.rs │ │ ├── sim.rs │ │ ├── testing │ │ │ ├── bundle_tests │ │ │ │ ├── mod.rs │ │ │ │ └── setup.rs │ │ │ ├── contracts.json │ │ │ ├── evm_inspector_tests │ │ │ │ ├── mod.rs │ │ │ │ └── setup.rs │ │ │ ├── mod.rs │ │ │ └── test_chain_state.rs │ │ ├── tracers.rs │ │ └── tx_sim_cache │ │ │ ├── evm_db.rs │ │ │ ├── mod.rs │ │ │ └── result_store.rs │ │ ├── integration │ │ ├── mod.rs │ │ ├── playground.rs │ │ ├── simple.rs │ │ └── test_data │ │ │ ├── blocklist.json │ │ │ ├── config-playground-http-blocklist.toml │ │ │ └── config-playground.toml │ │ ├── lib.rs │ │ ├── live_builder │ │ ├── base_config.rs │ │ ├── block_list_provider.rs │ │ ├── block_output │ │ │ ├── bid_observer.rs │ │ │ ├── bid_observer_multiplexer.rs │ │ │ ├── bid_value_source │ │ │ │ ├── best_bid_sync_source.rs │ │ │ │ ├── interfaces.rs │ │ │ │ ├── mod.rs │ │ │ │ └── null_bid_value_source.rs │ │ │ ├── bidding │ │ │ │ ├── interfaces.rs │ │ │ │ ├── mod.rs │ │ │ │ ├── sequential_sealer_bid_maker.rs │ │ │ │ ├── true_block_value_bidder.rs │ │ │ │ └── wallet_balance_watcher.rs │ │ │ ├── block_sealing_bidder_factory.rs │ │ │ ├── mod.rs │ │ │ └── relay_submit.rs │ │ ├── building │ │ │ ├── mod.rs │ │ │ └── unfinished_block_building_sink_muxer.rs │ │ ├── cli.rs │ │ ├── config.rs │ │ ├── mod.rs │ │ ├── order_input │ │ │ ├── mempool_txs_detector.rs │ │ │ ├── mod.rs │ │ │ ├── order_replacement_manager.rs │ │ │ ├── order_sink.rs │ │ │ ├── orderpool.rs │ │ │ ├── replaceable_order_sink.rs │ │ │ ├── rpc_server.rs │ │ │ └── txpool_fetcher.rs │ │ ├── payload_events │ │ │ ├── mod.rs │ │ │ ├── payload_source.rs │ │ │ └── relay_epoch_cache.rs │ │ ├── simulation │ │ │ ├── mod.rs │ │ │ ├── sim_worker.rs │ │ │ └── simulation_job.rs │ │ ├── testdata │ │ │ └── config_with_relay_override.toml │ │ └── watchdog.rs │ │ ├── mev_boost │ │ ├── error.rs │ │ ├── fake_mev_boost_relay.rs │ │ ├── mod.rs │ │ ├── rpc.rs │ │ ├── sign_payload.rs │ │ └── submission.rs │ │ ├── primitives │ │ ├── fmt.rs │ │ ├── mev_boost.rs │ │ ├── mod.rs │ │ ├── order_builder.rs │ │ ├── order_statistics.rs │ │ ├── serialize.rs │ │ └── test_data_generator.rs │ │ ├── provider │ │ ├── ipc_state_provider.rs │ │ ├── mod.rs │ │ ├── reth_prov.rs │ │ └── state_provider_factory_from_provider_factory.rs │ │ ├── roothash │ │ ├── mod.rs │ │ └── prefetcher.rs │ │ ├── telemetry │ │ ├── metrics │ │ │ ├── mod.rs │ │ │ ├── scope_meter.rs │ │ │ └── tracing_metrics.rs │ │ ├── metrics_macros │ │ │ ├── Cargo.toml │ │ │ └── src │ │ │ │ └── lib.rs │ │ ├── mod.rs │ │ └── servers │ │ │ ├── full.rs │ │ │ ├── mod.rs │ │ │ └── redacted.rs │ │ ├── test_utils │ │ ├── Cargo.toml │ │ └── src │ │ │ └── lib.rs │ │ └── utils │ │ ├── bls.rs │ │ ├── build_info.rs │ │ ├── constants.rs │ │ ├── error_storage.rs │ │ ├── fmt.rs │ │ ├── mod.rs │ │ ├── noncer.rs │ │ ├── provider_factory_reopen.rs │ │ ├── provider_head_state.rs │ │ ├── receipts.rs │ │ ├── reconnect.rs │ │ ├── test_data_generator.rs │ │ ├── test_utils.rs │ │ ├── tracing.rs │ │ └── tx_signer.rs ├── reth-rbuilder │ ├── Cargo.toml │ └── src │ │ └── main.rs ├── sysperf │ ├── Cargo.toml │ └── src │ │ └── lib.rs └── test-relay │ ├── Cargo.toml │ ├── README.md │ └── src │ ├── main.rs │ ├── metrics.rs │ ├── relay.rs │ └── validation_api_client.rs ├── docs ├── CONFIG.md ├── LIVEBUILDER_DATAFLOW.md ├── LOGS_PRIVACY.md └── REORG_LOSSES.md ├── mev-test-contract ├── Makefile ├── foundry.toml ├── src │ └── MevTest.sol └── test │ └── MevTest.t.sol ├── rust-toolchain.toml ├── rustfmt.toml ├── scripts └── ci │ ├── benchmark-in-ci.sh │ ├── criterion-get-changes.py │ ├── criterion-prettify-report.sh │ ├── criterion-update-html.py │ ├── download-op-reth.sh │ ├── env-vars.sh │ └── templates │ ├── benchmark-pr-comment.md │ ├── benchmark-summary.md │ ├── partials │ └── index-changes.html │ ├── report-criterion-benchmark.html │ ├── report-footer.html │ ├── report-head.html │ ├── report-index.html │ └── report-styles.css └── zepter.yaml /.dockerignore: -------------------------------------------------------------------------------- 1 | /cargo 2 | /data 3 | /mev-test-contract/cache 4 | /mev-test-contract/out 5 | /target 6 | /scripts/benchmark-results.* 7 | /test/ 8 | /integration_logs 9 | Dockerfile 10 | 11 | # editors 12 | .code 13 | .idea 14 | .vscode 15 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # EditorConfig helps developers define and maintain consistent 2 | # coding styles between different editors and IDEs 3 | # editorconfig.org 4 | 5 | root = true 6 | 7 | [*] 8 | end_of_line = lf 9 | charset = utf-8 10 | trim_trailing_whitespace = true 11 | insert_final_newline = true 12 | indent_style = space 13 | indent_size = 4 14 | 15 | [*.rs] 16 | max_line_length = 100 17 | 18 | [*.{yml,yaml}] 19 | indent_size = 2 20 | 21 | [*.md] 22 | # double whitespace at end of line 23 | # denotes a line break in Markdown 24 | trim_trailing_whitespace = false 25 | 26 | [Makefile] 27 | indent_style = tab 28 | 29 | [] 30 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # These owners will be the default owners for everything in 2 | # the repo. Unless a later match takes precedence, 3 | # they will be requested for review when someone opens a pull request. 4 | * @dvush @ZanCorDX 5 | /crates/ @dvush @ZanCorDX 6 | /.github/ @dvush @ZanCorDX @metachris @sukoneck 7 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ## 📝 Summary 2 | 3 | 4 | 5 | ## 💡 Motivation and Context 6 | 7 | 8 | 9 | --- 10 | 11 | ## ✅ I have completed the following steps: 12 | 13 | * [ ] Run `make lint` 14 | * [ ] Run `make test` 15 | * [ ] Added tests (if applicable) 16 | -------------------------------------------------------------------------------- /.github/workflows/checks.yaml: -------------------------------------------------------------------------------- 1 | name: Checks 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | merge_group: 7 | push: 8 | branches: [develop] 9 | 10 | env: 11 | CARGO_TERM_COLOR: always 12 | 13 | jobs: 14 | lint_and_test: 15 | name: Lint and test 16 | runs-on: warp-ubuntu-latest-x64-32x 17 | env: 18 | # Set features for the Makefile 19 | FEATURES: ${{ matrix.features }} 20 | strategy: 21 | matrix: 22 | toolchain: 23 | - stable 24 | features: 25 | - "" 26 | steps: 27 | - name: Checkout sources 28 | uses: actions/checkout@v4 29 | 30 | # https://github.com/dtolnay/rust-toolchain 31 | - name: Setup rust toolchain 32 | uses: dtolnay/rust-toolchain@stable 33 | with: 34 | toolchain: ${{ matrix.toolchain }} 35 | 36 | # https://github.com/swatinem/rust-cache 37 | - name: Run Swatinem/rust-cache@v2 38 | uses: Swatinem/rust-cache@v2 39 | with: 40 | cache-on-failure: true 41 | 42 | # https://github.com/Mozilla-Actions/sccache-action 43 | - name: Run sccache-action 44 | uses: mozilla-actions/sccache-action@v0.0.9 45 | 46 | - name: Set sccache env vars 47 | run: | 48 | echo "SCCACHE_GHA_ENABLED=true" >> $GITHUB_ENV 49 | echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV 50 | 51 | - name: Install Foundry toolchain 52 | uses: foundry-rs/foundry-toolchain@v1 53 | with: 54 | version: nightly 55 | 56 | - name: Install native dependencies 57 | run: sudo apt-get update && sudo apt-get install -y libsqlite3-dev 58 | 59 | - name: Lint 60 | run: make lint 61 | 62 | - name: Test 63 | run: make test 64 | 65 | - name: Validate config files 66 | run: make validate-config 67 | 68 | integration: 69 | name: Integration tests 70 | runs-on: warp-ubuntu-latest-x64-32x 71 | strategy: 72 | matrix: 73 | toolchain: 74 | - stable 75 | features: 76 | - "" 77 | steps: 78 | - name: Checkout sources 79 | uses: actions/checkout@v4 80 | 81 | # https://github.com/dtolnay/rust-toolchain 82 | - name: Setup rust toolchain 83 | uses: dtolnay/rust-toolchain@stable 84 | with: 85 | toolchain: ${{ matrix.toolchain }} 86 | 87 | - name: Download builder playground 88 | uses: flashbots/flashbots-toolchain@v0.1 89 | with: 90 | builder-playground: v0.1.3 91 | 92 | # https://github.com/swatinem/rust-cache 93 | - name: Run Swatinem/rust-cache@v2 94 | uses: Swatinem/rust-cache@v2 95 | with: 96 | cache-on-failure: true 97 | 98 | # https://github.com/Mozilla-Actions/sccache-action 99 | - name: Run sccache-action 100 | uses: mozilla-actions/sccache-action@v0.0.9 101 | 102 | - name: Set sccache env vars 103 | run: | 104 | echo "SCCACHE_GHA_ENABLED=true" >> $GITHUB_ENV 105 | echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV 106 | 107 | - name: Build the rbuilder 108 | run: cargo build --features="${{ matrix.features }}" 109 | 110 | - name: Run the playground 111 | run: builder-playground & 112 | 113 | - name: Run integration tests with flags 114 | run: cargo test --features="${{ matrix.features }}" --package rbuilder --lib -- integration --test-threads=1 115 | env: 116 | PLAYGROUND: TRUE 117 | 118 | - name: Aggregate playground logs 119 | # This steps fails if the test fails early and the playground logs dir has not been created 120 | if: ${{ failure() }} 121 | run: | 122 | mkdir /tmp/playground-logs 123 | mv $HOME/.playground/devnet/logs /tmp/playground-logs 124 | mv integration_logs /tmp/playground-logs 125 | 126 | - name: Archive playground logs 127 | uses: actions/upload-artifact@v4 128 | if: ${{ failure() }} 129 | with: 130 | name: playground-logs 131 | path: /tmp/playground-logs 132 | retention-days: 5 133 | -------------------------------------------------------------------------------- /.github/workflows/checks_docker.yaml: -------------------------------------------------------------------------------- 1 | name: Docker Build 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | merge_group: 7 | push: 8 | branches: [develop] 9 | 10 | jobs: 11 | build-docker: 12 | name: Build Docker image 13 | runs-on: warp-ubuntu-latest-x64-32x 14 | 15 | steps: 16 | - name: Checkout sources 17 | uses: actions/checkout@v4 18 | 19 | - name: Docker QEMU 20 | uses: docker/setup-qemu-action@v3 21 | 22 | - name: Docker Buildx 23 | uses: docker/setup-buildx-action@v3 24 | 25 | - name: Docker Build 26 | uses: docker/build-push-action@v5 27 | with: 28 | cache-from: type=gha 29 | cache-to: type=gha,mode=max 30 | platforms: linux/amd64 31 | context: . 32 | push: false 33 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /cargo 2 | /data 3 | /mev-test-contract/cache 4 | /mev-test-contract/out 5 | /target 6 | /scripts/benchmark-results.* 7 | /test/ 8 | /integration_logs 9 | genesis.json 10 | /op-reth 11 | 12 | # editors 13 | .code 14 | .idea 15 | .vscode 16 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "mev-test-contract/lib/forge-std"] 2 | path = mev-test-contract/lib/forge-std 3 | url = https://github.com/foundry-rs/forge-std.git 4 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # Base container (with sccache and cargo-chef) 3 | # 4 | # - https://github.com/mozilla/sccache 5 | # - https://github.com/LukeMathWalker/cargo-chef 6 | # 7 | # Based on https://depot.dev/blog/rust-dockerfile-best-practices 8 | # 9 | ARG FEATURES 10 | ARG RBUILDER_BIN="rbuilder" 11 | 12 | FROM rust:1.85 AS base 13 | ARG TARGETPLATFORM 14 | 15 | RUN apt-get update \ 16 | && apt-get install -y clang libclang-dev 17 | 18 | RUN rustup component add clippy rustfmt 19 | 20 | 21 | # We manually download sccache, because compilation is resource-intensive 22 | RUN set -eux; \ 23 | case "$TARGETPLATFORM" in \ 24 | "linux/amd64") ARCH_TAG="x86_64-unknown-linux-musl" ;; \ 25 | "linux/arm64") ARCH_TAG="aarch64-unknown-linux-musl" ;; \ 26 | *) \ 27 | echo "Unsupported platform: $TARGETPLATFORM"; \ 28 | exit 1 \ 29 | ;; \ 30 | esac; \ 31 | wget -O /tmp/sccache.tar.gz \ 32 | "https://github.com/mozilla/sccache/releases/download/v0.8.2/sccache-v0.8.2-${ARCH_TAG}.tar.gz"; \ 33 | tar -xf /tmp/sccache.tar.gz -C /tmp; \ 34 | mv /tmp/sccache-v0.8.2-${ARCH_TAG}/sccache /usr/local/bin/sccache; \ 35 | chmod +x /usr/local/bin/sccache; \ 36 | rm -rf /tmp/sccache.tar.gz /tmp/sccache-v0.8.2-${ARCH_TAG} 37 | 38 | RUN cargo install cargo-chef --version ^0.1 39 | 40 | 41 | ENV CARGO_HOME=/usr/local/cargo 42 | ENV RUSTC_WRAPPER=sccache 43 | ENV SCCACHE_DIR=/sccache 44 | 45 | # 46 | # Planner container (running "cargo chef prepare") 47 | # 48 | FROM base AS planner 49 | WORKDIR /app 50 | COPY . . 51 | RUN --mount=type=cache,target=/usr/local/cargo/registry \ 52 | --mount=type=cache,target=/usr/local/cargo/git \ 53 | --mount=type=cache,target=$SCCACHE_DIR,sharing=locked \ 54 | cargo chef prepare --recipe-path recipe.json 55 | 56 | # 57 | # Builder container (running "cargo chef cook" and "cargo build --release") 58 | # 59 | FROM base AS builder 60 | WORKDIR /app 61 | COPY --from=planner /app/recipe.json recipe.json 62 | RUN --mount=type=cache,target=/usr/local/cargo/registry \ 63 | --mount=type=cache,target=/usr/local/cargo/git \ 64 | --mount=type=cache,target=$SCCACHE_DIR,sharing=locked \ 65 | cargo chef cook --release --recipe-path recipe.json 66 | COPY . . 67 | 68 | 69 | FROM builder AS rbuilder 70 | ARG RBUILDER_BIN 71 | ARG FEATURES 72 | RUN --mount=type=cache,target=/usr/local/cargo/registry \ 73 | --mount=type=cache,target=/usr/local/cargo/git \ 74 | --mount=type=cache,target=$SCCACHE_DIR,sharing=locked \ 75 | cargo build --release --features="$FEATURES" --package=${RBUILDER_BIN} 76 | 77 | FROM builder AS test-relay 78 | ARG FEATURES 79 | RUN --mount=type=cache,target=/usr/local/cargo/registry \ 80 | --mount=type=cache,target=/usr/local/cargo/git \ 81 | --mount=type=cache,target=$SCCACHE_DIR,sharing=locked \ 82 | cargo build --release --features="$FEATURES" --package=test-relay 83 | 84 | 85 | # Runtime container for test-relay 86 | FROM gcr.io/distroless/cc-debian12 AS test-relay-runtime 87 | WORKDIR /app 88 | COPY --from=test-relay /app/target/release/test-relay /app/test-relay 89 | ENTRYPOINT ["/app/test-relay"] 90 | 91 | # Runtime container for rbuilder 92 | FROM gcr.io/distroless/cc-debian12 AS rbuilder-runtime 93 | ARG RBUILDER_BIN 94 | WORKDIR /app 95 | COPY --from=rbuilder /app/target/release/${RBUILDER_BIN} /app/rbuilder 96 | ENTRYPOINT ["/app/rbuilder"] 97 | 98 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023-2024 rbuilder Contributors 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Heavily inspired by Lighthouse: https://github.com/sigp/lighthouse/blob/stable/Makefile 2 | # and Reth: https://github.com/paradigmxyz/reth/blob/main/Makefile 3 | .DEFAULT_GOAL := help 4 | 5 | GIT_VER ?= $(shell git describe --tags --always --dirty="-dev") 6 | GIT_TAG ?= $(shell git describe --tags --abbrev=0) 7 | 8 | FEATURES ?= 9 | 10 | ##@ Help 11 | 12 | .PHONY: help 13 | help: ## Display this help. 14 | @awk 'BEGIN {FS = ":.*##"; printf "Usage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) 15 | 16 | .PHONY: v 17 | v: ## Show the current version 18 | @echo "Version: ${GIT_VER}" 19 | 20 | ##@ Build 21 | 22 | .PHONY: clean 23 | clean: ## Clean up 24 | cargo clean 25 | 26 | .PHONY: build 27 | build: ## Build (debug version) 28 | cargo build --features "$(FEATURES)" 29 | 30 | .PHONY: docker-image-rbuilder 31 | docker-image-rubilder: ## Build a rbuilder Docker image 32 | docker build --platform linux/amd64 --target rbuilder-runtime --build-arg FEATURES="$(FEATURES)" . -t rbuilder 33 | 34 | .PHONY: docker-image-test-relay 35 | docker-image-test-relay: ## Build a test relay Docker image 36 | docker build --platform linux/amd64 --target test-relay-runtime --build-arg FEATURES="$(FEATURES)" . -t test-relay 37 | 38 | ##@ Dev 39 | 40 | .PHONY: lint 41 | lint: ## Run the linters 42 | cargo fmt -- --check 43 | cargo clippy --features "$(FEATURES)" -- -D warnings 44 | 45 | .PHONY: test 46 | test: ## Run the tests for rbuilder 47 | cargo test --verbose --features "$(FEATURES)" 48 | 49 | .PHONY: lt 50 | lt: lint test ## Run "lint" and "test" 51 | 52 | .PHONY: fmt 53 | fmt: ## Format the code 54 | cargo fmt 55 | cargo fix --allow-staged 56 | cargo clippy --features "$(FEATURES)" --fix --allow-staged 57 | 58 | .PHONY: bench 59 | bench: ## Run benchmarks 60 | cargo bench --features "$(FEATURES)" --workspace 61 | 62 | .PHONY: bench-report-open 63 | bench-report-open: ## Open last benchmark report in the browser 64 | open "target/criterion/report/index.html" 65 | 66 | .PHONY: bench-in-ci 67 | bench-in-ci: ## Run benchmarks in CI (adds timestamp and version to the report, customizes Criterion output) 68 | ./scripts/ci/benchmark-in-ci.sh 69 | 70 | .PHONY: bench-clean 71 | bench-clean: ## Remove previous benchmark data 72 | rm -rf target/criterion 73 | rm -rf target/benchmark-in-ci 74 | rm -rf target/benchmark-html-dev 75 | 76 | .PHONY: bench-prettify 77 | bench-prettify: ## Prettifies the latest Criterion report 78 | rm -rf target/benchmark-html-dev 79 | ./scripts/ci/criterion-prettify-report.sh target/criterion target/benchmark-html-dev 80 | @echo "\nopen target/benchmark-html-dev/report/index.html" 81 | 82 | .PHONY: validate-config 83 | validate-config: ## Validate the correctness of the configuration files 84 | @for CONFIG in $(shell ls config-*.toml); do \ 85 | cargo run --bin validate-config -- --config $$CONFIG; \ 86 | done 87 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Reporting a Vulnerability 4 | 5 | Contact: security@flashbots.net -------------------------------------------------------------------------------- /config-backtest-example.toml: -------------------------------------------------------------------------------- 1 | log_level = "info,rbuilder=debug" 2 | 3 | chain = "mainnet" 4 | reth_datadir = "/mnt/data/reth" 5 | 6 | backtest_fetch_eth_rpc_url = "http://127.0.0.1:8545" 7 | backtest_fetch_eth_rpc_parallel = 400 8 | backtest_fetch_output_file = "~/.rbuilder/backtest/main.sqlite" 9 | backtest_fetch_mempool_data_dir = "~/.rbuilder/mempool-data" 10 | 11 | sbundle_mergeable_signers = [] 12 | 13 | backtest_builders = ["mp-ordering", "mgp-ordering"] 14 | 15 | [[builders]] 16 | name = "mgp-ordering" 17 | algo = "ordering-builder" 18 | discard_txs = true 19 | sorting = "mev-gas-price" 20 | failed_order_retries = 1 21 | drop_failed_orders = true 22 | 23 | [[builders]] 24 | name = "mp-ordering" 25 | algo = "ordering-builder" 26 | discard_txs = true 27 | sorting = "max-profit" 28 | failed_order_retries = 1 29 | drop_failed_orders = true 30 | 31 | [[builders]] 32 | name = "parallel" 33 | algo = "parallel-builder" 34 | discard_txs = true 35 | num_threads = 25 36 | safe_sorting_only = false -------------------------------------------------------------------------------- /config-live-example.toml: -------------------------------------------------------------------------------- 1 | log_json = true 2 | log_level = "info,rbuilder=debug" 3 | redacted_telemetry_server_port = 6061 4 | redacted_telemetry_server_ip = "0.0.0.0" 5 | full_telemetry_server_port = 6060 6 | full_telemetry_server_ip = "0.0.0.0" 7 | 8 | chain = "mainnet" 9 | reth_datadir = "/mnt/data/reth" 10 | 11 | coinbase_secret_key = "env:COINBASE_SECRET_KEY" 12 | relay_secret_key = "env:RELAY_SECRET_KEY" 13 | optimistic_relay_secret_key = "env:OPTIMISTIC_RELAY_SECRET_KEY" 14 | 15 | # cl_node_url can be a single value, array of values, or passed by an environment variables with values separated with a comma 16 | # cl_node_url = "http://localhost:3500" 17 | cl_node_url = ["env:CL_NODE_URL"] 18 | jsonrpc_server_port = 8645 19 | jsonrpc_server_ip = "0.0.0.0" 20 | el_node_ipc_path = "/tmp/reth.ipc" 21 | extra_data = "⚡🤖" 22 | 23 | blocklist_file_path = "./blocklist.json" 24 | 25 | ignore_cancellable_orders = true 26 | 27 | # watchdog_timeout_sec = 600 28 | # simulation_threads = 4 29 | 30 | # genesis_fork_version = "0x00112233" 31 | 32 | sbundle_mergeable_signers = [] 33 | live_builders = ["mp-ordering", "mgp-ordering", "parallel"] 34 | 35 | enabled_relays = ["flashbots"] 36 | 37 | # This can be used with test-relay 38 | # [[relays]] 39 | # name = "flashbots_test" 40 | # url = "http://localhost:80" 41 | # priority = 0 42 | 43 | [[builders]] 44 | name = "mgp-ordering" 45 | algo = "ordering-builder" 46 | discard_txs = true 47 | sorting = "mev-gas-price" 48 | failed_order_retries = 1 49 | drop_failed_orders = true 50 | 51 | [[builders]] 52 | name = "mp-ordering" 53 | algo = "ordering-builder" 54 | discard_txs = true 55 | sorting = "max-profit" 56 | failed_order_retries = 1 57 | drop_failed_orders = true 58 | 59 | [[builders]] 60 | name = "parallel" 61 | algo = "parallel-builder" 62 | discard_txs = true 63 | num_threads = 25 64 | safe_sorting_only = false -------------------------------------------------------------------------------- /config-optimism-local.toml: -------------------------------------------------------------------------------- 1 | log_json = false 2 | log_level = "info,rbuilder=debug" 3 | redacted_telemetry_server_port = 6071 4 | redacted_telemetry_server_ip = "0.0.0.0" 5 | full_telemetry_server_port = 6070 6 | full_telemetry_server_ip = "0.0.0.0" 7 | 8 | chain = "$HOME/grimoire/optimism/.devnet/genesis-l2.json" 9 | reth_datadir = "$HOME/.playground/devnet/data_reth" 10 | relay_secret_key = "5eae315483f028b5cdd5d1090ff0c7618b18737ea9bf3c35047189db22835c48" 11 | coinbase_secret_key = "ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" 12 | 13 | cl_node_url = ["http://localhost:3500"] 14 | jsonrpc_server_port = 8645 15 | jsonrpc_server_ip = "0.0.0.0" 16 | el_node_ipc_path = "/tmp/reth.ipc" 17 | extra_data = "⚡🤖" 18 | 19 | ignore_cancellable_orders = true 20 | 21 | sbundle_mergeable_signers = [] 22 | live_builders = ["mp-ordering"] 23 | 24 | enabled_relays = ["custom"] 25 | 26 | [[relays]] 27 | name = "custom" 28 | url = "http://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@localhost:5555" 29 | priority = 0 30 | use_ssz_for_submit = false 31 | use_gzip_for_submit = false 32 | 33 | [[builders]] 34 | name = "mgp-ordering" 35 | algo = "ordering-builder" 36 | discard_txs = true 37 | sorting = "mev-gas-price" 38 | failed_order_retries = 1 39 | drop_failed_orders = true 40 | 41 | -------------------------------------------------------------------------------- /config-playground.toml: -------------------------------------------------------------------------------- 1 | 2 | chain = "$HOME/.playground/devnet/genesis.json" 3 | reth_datadir = "$HOME/.playground/devnet/data_reth" 4 | relay_secret_key = "5eae315483f028b5cdd5d1090ff0c7618b18737ea9bf3c35047189db22835c48" 5 | el_node_ipc_path = "$HOME/.playground/devnet/reth.ipc" 6 | live_builders = ["mgp-ordering"] 7 | enabled_relays = ["playground"] 8 | log_level = "info,rbuilder=debug" 9 | coinbase_secret_key = "ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" 10 | 11 | root_hash_use_sparse_trie=true 12 | root_hash_compare_sparse_trie=false 13 | -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "eth-sparse-mpt" 3 | version.workspace = true 4 | edition.workspace = true 5 | rust-version.workspace = true 6 | license.workspace = true 7 | homepage.workspace = true 8 | repository.workspace = true 9 | 10 | 11 | [dependencies] 12 | thiserror = "1.0.61" 13 | serde = { version = "1.0.203", features = ["derive"] } 14 | serde_json = "1.0.117" 15 | serde_with = "3.9.0" 16 | rustc-hash = "2.0.0" 17 | rayon = "1.10.0" 18 | smallvec = "1.13.2" 19 | 20 | tracing.workspace = true 21 | 22 | # reth 23 | reth-errors.workspace = true 24 | reth-execution-errors.workspace = true 25 | reth-trie.workspace = true 26 | reth-trie-db.workspace = true 27 | reth-provider.workspace = true 28 | 29 | # revm 30 | revm.workspace = true 31 | 32 | # alloy 33 | alloy-primitives.workspace = true 34 | alloy-rlp.workspace = true 35 | alloy-trie.workspace = true 36 | 37 | # other 38 | parking_lot.workspace = true 39 | eyre = { workspace = true } 40 | arrayvec = "0.7.6" 41 | dashmap = { version = "6.1.0", features = ["rayon"] } 42 | 43 | # test deps 44 | hash-db = { version = "0.15.2", optional = true } 45 | triehash = { version = "0.8.4", optional = true } 46 | flate2 = { workspace = true, optional = true } 47 | 48 | [features] 49 | benchmark-utils = ["dep:hash-db", "dep:triehash", "dep:flate2"] 50 | 51 | [dev-dependencies] 52 | criterion = { version = "0.4", features = ["html_reports"] } 53 | rand = { version = "0.8.5", features = ["small_rng"] } 54 | proptest = "1.5.0" 55 | eth-sparse-mpt = { path = ".", features = ["benchmark-utils"] } 56 | 57 | [[bench]] 58 | name = "trie_insert_bench" 59 | harness = false 60 | 61 | [[bench]] 62 | name = "trie_nodes_benches" 63 | harness = false 64 | 65 | [[bench]] 66 | name = "trie_example_bench" 67 | harness = false 68 | 69 | [[bench]] 70 | name = "trie_do_bench" 71 | harness = false 72 | 73 | -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/README.md: -------------------------------------------------------------------------------- 1 | This library is useful when you need to calculate Ethereum root hash many times on top of the same parent block using reth database. 2 | 3 | To use this, for each parent block: 4 | * create `SparseTrieSharedCache` 5 | * call `calculate_root_hash_with_sparse_trie` with the given cache, reth db view and execution outcome. 6 | 7 | 8 | ### Speedup example. 9 | 10 | * block 20821340 11 | * machine with 64 cores, Samsung 980Pro SSD 12 | 13 | We calculate root hash of some specific blocks in a loop using the same changes. 14 | This implementation caches only disk access, all storage and main trie hashes are calculated fully on each iteration. 15 | 16 | ``` 17 | reth parallel root hash: 18 | 19 | first iteration : 220 ms 20 | next iterations: 140 ms (median, stable) 21 | 22 | eth-sparse-mpt: 23 | 24 | first iteration : 225 ms 25 | next iterations: 5.1 ms (median, stable) 26 | ``` 27 | -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/benches/trie_example_bench.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use eth_sparse_mpt::{ 4 | test_utils::deserialize_from_json_gzip, 5 | v1::reth_sparse_trie::{change_set::ETHTrieChangeSet, hash::EthSparseTries}, 6 | RootHashThreadPool, 7 | }; 8 | use std::time::Instant; 9 | 10 | fn main() { 11 | let mut examples = Vec::new(); 12 | 13 | for i in 0..7 { 14 | let dir: PathBuf = format!("./test_data/prepared_tries/example{}/", i) 15 | .parse() 16 | .unwrap(); 17 | 18 | let change_set: ETHTrieChangeSet = { 19 | let mut p = dir.clone(); 20 | p.push("change_set.json.gz"); 21 | deserialize_from_json_gzip(p).expect("change set") 22 | }; 23 | 24 | let tries: EthSparseTries = { 25 | let mut p = dir.clone(); 26 | p.push("tries.json.gz"); 27 | deserialize_from_json_gzip(p).expect("sparse trie") 28 | }; 29 | 30 | examples.push((change_set, tries)); 31 | } 32 | 33 | const WARMUP_RUNS: usize = 100; 34 | const REAL_RUNS: usize = 1000; 35 | 36 | const PAR_ACCOUNT_TRIE: bool = true; 37 | const PAR_STORAGE_TRIES: bool = true; 38 | 39 | let threadpool = RootHashThreadPool::try_new(4).unwrap(); 40 | 41 | println!("example,min,max,p50,p99,MAX/MIN,p99/p50"); 42 | for (i, (change_set, tries)) in examples.into_iter().enumerate() { 43 | let mut measures = Vec::new(); 44 | 45 | for _ in 0..WARMUP_RUNS { 46 | let (change_set, mut tries) = (change_set.clone(), tries.clone()); 47 | 48 | threadpool.rayon_pool.install(|| { 49 | tries 50 | .calculate_root_hash(change_set, PAR_STORAGE_TRIES, PAR_ACCOUNT_TRIE) 51 | .unwrap(); 52 | }); 53 | } 54 | 55 | for _ in 0..REAL_RUNS { 56 | let (change_set, mut tries) = (change_set.clone(), tries.clone()); 57 | 58 | let start = Instant::now(); 59 | 60 | threadpool.rayon_pool.install(|| { 61 | tries 62 | .calculate_root_hash(change_set, PAR_STORAGE_TRIES, PAR_ACCOUNT_TRIE) 63 | .unwrap(); 64 | }); 65 | measures.push(start.elapsed().as_micros()); 66 | } 67 | 68 | measures.sort(); 69 | let min = *measures.first().unwrap(); 70 | let max = *measures.last().unwrap(); 71 | let p50 = measures[measures.len() / 2]; 72 | let p99 = measures[measures.len() * 99 / 100]; 73 | 74 | println!( 75 | "{},{},{},{},{},{:.2},{:.2}", 76 | i, 77 | min, 78 | max, 79 | p50, 80 | p99, 81 | max as f64 / min as f64, 82 | p99 as f64 / p50 as f64 83 | ); 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/benches/trie_nodes_benches.rs: -------------------------------------------------------------------------------- 1 | use alloy_primitives::{keccak256, Bytes, B256, U256}; 2 | use alloy_rlp::Encodable; 3 | use criterion::{black_box, criterion_group, criterion_main, Criterion}; 4 | use eth_sparse_mpt::{test_utils::KeccakHasher, utils::HashMap, v1::sparse_mpt::DiffTrie}; 5 | 6 | // hashing this trie it roughly equivalent to updating the trie for the block 7 | const TRIE_SIZE: usize = 3000; 8 | 9 | fn prepare_key_value_data(n: usize) -> (Vec, Vec) { 10 | let mut keys = Vec::with_capacity(n); 11 | let mut values = Vec::with_capacity(n); 12 | for i in 0u64..3000 { 13 | let b: B256 = U256::from(i).into(); 14 | let data = keccak256(b).to_vec(); 15 | let value = keccak256(&data).to_vec(); 16 | keys.push(Bytes::copy_from_slice(data.as_slice())); 17 | values.push(Bytes::copy_from_slice(value.as_slice())); 18 | } 19 | (keys, values) 20 | } 21 | 22 | fn add_elements_bytes(keys: &[Bytes], values: &[Bytes]) -> B256 { 23 | triehash::trie_root::(keys.iter().zip(values)) 24 | } 25 | 26 | fn add_elements_only_neo_sparse_trie(keys: &[Bytes], values: &[Bytes]) { 27 | let mut trie = DiffTrie::new_empty(); 28 | // trie.reserve(keys.len()); 29 | for (key, value) in keys.iter().zip(values.iter()) { 30 | trie.insert(key.clone(), value.clone()) 31 | .expect("can't insert"); 32 | } 33 | } 34 | 35 | fn add_elements_only_neo_sparse_trie_insert_and_hash(keys: &[Bytes], values: &[Bytes]) -> B256 { 36 | let mut trie = DiffTrie::new_empty(); 37 | for (key, value) in keys.iter().zip(values.iter()) { 38 | trie.insert(key.clone(), value.clone()) 39 | .expect("can't insert"); 40 | } 41 | trie.root_hash().expect("must hash") 42 | } 43 | 44 | fn ptr_trie_insert_only(c: &mut Criterion) { 45 | let (keys, values) = prepare_key_value_data(TRIE_SIZE); 46 | c.bench_function(&format!("ptr_trie_insert_only_{}", TRIE_SIZE), |b| { 47 | b.iter(|| add_elements_only_neo_sparse_trie(&keys, &values)) 48 | }); 49 | } 50 | 51 | fn ptr_trie_insert_and_hash(c: &mut Criterion) { 52 | let (keys, values) = prepare_key_value_data(TRIE_SIZE); 53 | c.bench_function(&format!("ptr_trie_insert_and_hash_{}", TRIE_SIZE), |b| { 54 | b.iter(|| add_elements_only_neo_sparse_trie_insert_and_hash(&keys, &values)) 55 | }); 56 | c.bench_function( 57 | &format!("reference_trie_insert_and_hash_{}", TRIE_SIZE), 58 | |b| b.iter(|| add_elements_bytes(&keys, &values)), 59 | ); 60 | } 61 | 62 | fn hashing(c: &mut Criterion) { 63 | let mut data = Vec::new(); 64 | for _ in 0..TRIE_SIZE { 65 | data.push(B256::random()); 66 | } 67 | 68 | let mut hash_cache = HashMap::default(); 69 | 70 | c.bench_function(&format!("hashing_{}_elements", TRIE_SIZE), |b| { 71 | b.iter(|| { 72 | for d in data.iter() { 73 | let hash = keccak256(d); 74 | black_box(hash); 75 | } 76 | }) 77 | }); 78 | 79 | c.bench_function(&format!("hashing_{}_elements_with_cache", TRIE_SIZE), |b| { 80 | b.iter(|| { 81 | for d in data.iter() { 82 | let hash = hash_cache.entry(d).or_insert_with(|| keccak256(d)); 83 | black_box(hash); 84 | } 85 | }) 86 | }); 87 | } 88 | 89 | fn cloning(c: &mut Criterion) { 90 | let mut data = Vec::new(); 91 | for _ in 0..TRIE_SIZE { 92 | data.push(vec![B256::random(); 16]); 93 | } 94 | 95 | c.bench_function( 96 | &format!("cloning_{}_branch_node_size_elements", TRIE_SIZE), 97 | |b| { 98 | b.iter(|| { 99 | black_box(data.clone()); 100 | }) 101 | }, 102 | ); 103 | 104 | c.bench_function( 105 | &format!("hashing_{}_branch_node_size_elements", TRIE_SIZE), 106 | |b| { 107 | let mut buff = Vec::new(); 108 | b.iter(|| { 109 | data.encode(&mut buff); 110 | black_box(keccak256(&buff)); 111 | }) 112 | }, 113 | ); 114 | } 115 | 116 | criterion_group!( 117 | benches, 118 | hashing, 119 | cloning, 120 | ptr_trie_insert_only, 121 | ptr_trie_insert_and_hash, 122 | ); 123 | criterion_main!(benches); 124 | -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/src/test_utils.rs: -------------------------------------------------------------------------------- 1 | use std::{fs::File, io::Read, path::Path}; 2 | 3 | use alloy_primitives::{keccak256, Bytes, B256}; 4 | use flate2::read::GzDecoder; 5 | use rustc_hash::FxHasher; 6 | use serde::{de::DeserializeOwned, Deserialize, Serialize}; 7 | 8 | use crate::{ 9 | v1::reth_sparse_trie::{change_set::ETHTrieChangeSet, trie_fetcher::MultiProof}, 10 | v1::sparse_mpt::DiffTrie, 11 | }; 12 | 13 | pub fn deserialize_from_json_gzip(path: impl AsRef) -> eyre::Result { 14 | let file = File::open(path)?; 15 | let mut gz = GzDecoder::new(file); 16 | let mut content = String::new(); 17 | gz.read_to_string(&mut content)?; 18 | Ok(serde_json::from_str(&content)?) 19 | } 20 | 21 | #[derive(Debug)] 22 | pub struct KeccakHasher {} 23 | 24 | impl hash_db::Hasher for KeccakHasher { 25 | type Out = B256; 26 | type StdHasher = FxHasher; 27 | const LENGTH: usize = 32; 28 | 29 | fn hash(x: &[u8]) -> Self::Out { 30 | keccak256(x) 31 | } 32 | } 33 | 34 | pub fn reference_trie_hash(data: &[(Bytes, Bytes)]) -> B256 { 35 | triehash::trie_root::(data.to_vec()) 36 | } 37 | 38 | pub fn reference_trie_hash_vec(data: &[(Vec, Vec)]) -> B256 { 39 | triehash::trie_root::(data.to_vec()) 40 | } 41 | 42 | pub fn get_test_multiproofs() -> Vec { 43 | let files = [ 44 | "./test_data/multiproof_0.json.gz", 45 | "./test_data/multiproof_1.json.gz", 46 | ]; 47 | let mut result = Vec::new(); 48 | for file in files { 49 | result.push(deserialize_from_json_gzip(file).expect("parsing multiproof")); 50 | } 51 | result 52 | } 53 | 54 | pub fn get_test_change_set() -> ETHTrieChangeSet { 55 | deserialize_from_json_gzip("./test_data/changeset.json.gz").expect("changeset") 56 | } 57 | 58 | #[derive(Debug, Clone, Serialize, Deserialize)] 59 | pub struct StoredFailureCase { 60 | pub trie: DiffTrie, 61 | pub updated_keys: Vec, 62 | pub updated_values: Vec, 63 | pub deleted_keys: Vec, 64 | } 65 | 66 | impl StoredFailureCase { 67 | pub fn load(path: &str) -> StoredFailureCase { 68 | deserialize_from_json_gzip(path).expect("stored failure case") 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/src/utils.rs: -------------------------------------------------------------------------------- 1 | use std::hash::{Hash, Hasher}; 2 | 3 | use alloy_primitives::{keccak256, Bytes}; 4 | use alloy_rlp::{length_of_length, BufMut, Encodable, Header, EMPTY_STRING_CODE}; 5 | use alloy_trie::{ 6 | nodes::{ExtensionNodeRef, LeafNodeRef}, 7 | Nibbles, 8 | }; 9 | use reth_trie::RlpNode; 10 | use rustc_hash::{FxBuildHasher, FxHasher}; 11 | 12 | pub type HashMap = std::collections::HashMap; 13 | pub type HashSet = std::collections::HashSet; 14 | 15 | pub fn hash_map_with_capacity(capacity: usize) -> HashMap { 16 | HashMap::with_capacity_and_hasher(capacity, FxBuildHasher) 17 | } 18 | 19 | pub fn fast_hash(value: &H) -> u64 { 20 | let mut hasher = FxHasher::default(); 21 | value.hash(&mut hasher); 22 | hasher.finish() 23 | } 24 | 25 | pub fn rlp_pointer(rlp_encode: Bytes) -> Bytes { 26 | if rlp_encode.len() < 32 { 27 | rlp_encode 28 | } else { 29 | Bytes::copy_from_slice(RlpNode::word_rlp(&keccak256(&rlp_encode)).as_ref()) 30 | } 31 | } 32 | 33 | pub fn concat_path(p1: &Nibbles, p2: &[u8]) -> Nibbles { 34 | let mut result = Nibbles::with_capacity(p1.len() + p2.len()); 35 | result.extend_from_slice_unchecked(p1); 36 | result.extend_from_slice_unchecked(p2); 37 | result 38 | } 39 | 40 | pub fn strip_first_nibble_mut(p: &mut Nibbles) -> u8 { 41 | let nibble = p[0]; 42 | let vec = p.as_mut_vec_unchecked(); 43 | vec.remove(0); 44 | nibble 45 | } 46 | 47 | #[inline] 48 | pub fn extract_prefix_and_suffix(p1: &Nibbles, p2: &Nibbles) -> (Nibbles, Nibbles, Nibbles) { 49 | let prefix_len = p1.common_prefix_length(p2); 50 | let prefix = Nibbles::from_nibbles_unchecked(&p1[..prefix_len]); 51 | let suffix1 = Nibbles::from_nibbles_unchecked(&p1[prefix_len..]); 52 | let suffix2 = Nibbles::from_nibbles_unchecked(&p2[prefix_len..]); 53 | 54 | (prefix, suffix1, suffix2) 55 | } 56 | 57 | #[inline] 58 | pub fn encode_leaf(key: &Nibbles, value: &[u8], out: &mut dyn BufMut) { 59 | LeafNodeRef { key, value }.encode(out) 60 | } 61 | 62 | pub fn encode_len_leaf(key: &Nibbles, value: &[u8]) -> usize { 63 | LeafNodeRef { key, value }.length() 64 | } 65 | 66 | #[inline] 67 | pub fn encode_extension(key: &Nibbles, child_rlp_pointer: &[u8], out: &mut dyn BufMut) { 68 | ExtensionNodeRef { 69 | key, 70 | child: child_rlp_pointer, 71 | } 72 | .encode(out) 73 | } 74 | 75 | pub fn encode_len_extension(key: &Nibbles, child_rlp_pointer: &[u8]) -> usize { 76 | ExtensionNodeRef { 77 | key, 78 | child: child_rlp_pointer, 79 | } 80 | .length() 81 | } 82 | 83 | #[inline] 84 | pub fn encode_branch_node(child_rlp_pointers: &[Option<&[u8]>; 16], out: &mut dyn BufMut) { 85 | let mut payload_length = 1; 86 | for i in 0..16 { 87 | if let Some(child) = child_rlp_pointers[i] { 88 | payload_length += child.len(); 89 | } else { 90 | payload_length += 1; 91 | } 92 | } 93 | 94 | Header { 95 | list: true, 96 | payload_length, 97 | } 98 | .encode(out); 99 | 100 | for i in 0..16 { 101 | if let Some(child) = child_rlp_pointers[i] { 102 | out.put_slice(child); 103 | } else { 104 | out.put_u8(EMPTY_STRING_CODE); 105 | } 106 | } 107 | out.put_u8(EMPTY_STRING_CODE); 108 | } 109 | 110 | pub fn encode_len_branch_node(child_rlp_pointers: &[Option<&[u8]>; 16]) -> usize { 111 | let mut payload_length = 1; 112 | for i in 0..16 { 113 | if let Some(child) = child_rlp_pointers[i] { 114 | payload_length += child.len(); 115 | } else { 116 | payload_length += 1; 117 | } 118 | } 119 | payload_length + length_of_length(payload_length) 120 | } 121 | 122 | pub fn encode_null_node(out: &mut Vec) { 123 | out.push(EMPTY_STRING_CODE) 124 | } 125 | 126 | #[inline] 127 | pub fn mismatch(xs: &[u8], ys: &[u8]) -> usize { 128 | mismatch_chunks::<8>(xs, ys) 129 | } 130 | 131 | #[inline] 132 | fn mismatch_chunks(xs: &[u8], ys: &[u8]) -> usize { 133 | let off = std::iter::zip(xs.chunks_exact(N), ys.chunks_exact(N)) 134 | .take_while(|(x, y)| x == y) 135 | .count() 136 | * N; 137 | off + std::iter::zip(&xs[off..], &ys[off..]) 138 | .take_while(|(x, y)| x == y) 139 | .count() 140 | } 141 | -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/src/v1/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod reth_sparse_trie; 2 | pub mod sparse_mpt; 3 | -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/src/v1/reth_sparse_trie/change_set.rs: -------------------------------------------------------------------------------- 1 | use alloy_primitives::{keccak256, Address, Bytes, B256}; 2 | use revm::{database::BundleAccount, state::AccountInfo}; 3 | use serde::{Deserialize, Serialize}; 4 | 5 | use crate::ChangedAccountData; 6 | 7 | #[derive(Debug, Clone, Serialize, Deserialize, Default)] 8 | pub struct ETHTrieChangeSet { 9 | pub account_trie_deletes: Vec, 10 | 11 | pub account_trie_updates: Vec, 12 | pub account_trie_updates_info: Vec, 13 | 14 | // for each account_trie_updates 15 | pub storage_trie_updated_keys: Vec>, 16 | pub storage_trie_updated_values: Vec>, 17 | pub storage_trie_deleted_keys: Vec>, 18 | } 19 | 20 | pub fn prepare_change_set_for_prefetch<'a>( 21 | changed_data: impl Iterator, 22 | ) -> ETHTrieChangeSet { 23 | let mut result = ETHTrieChangeSet::default(); 24 | 25 | for data in changed_data { 26 | let hashed_address = Bytes::copy_from_slice(keccak256(data.address).as_slice()); 27 | 28 | if data.account_deleted { 29 | result.account_trie_deletes.push(hashed_address); 30 | continue; 31 | } else { 32 | result.account_trie_updates.push(hashed_address); 33 | } 34 | 35 | let mut storage_updates_keys: Vec = Vec::new(); 36 | let mut storage_deleted_keys: Vec = Vec::new(); 37 | for (storage_key, deleted) in &data.slots { 38 | let hashed_key = Bytes::copy_from_slice(keccak256(B256::from(*storage_key)).as_slice()); 39 | if *deleted { 40 | storage_deleted_keys.push(hashed_key); 41 | } else { 42 | storage_updates_keys.push(hashed_key); 43 | } 44 | } 45 | 46 | result.storage_trie_updated_keys.push(storage_updates_keys); 47 | result.storage_trie_deleted_keys.push(storage_deleted_keys); 48 | } 49 | 50 | result 51 | } 52 | 53 | pub fn prepare_change_set<'a>( 54 | changes: impl Iterator, 55 | ) -> ETHTrieChangeSet { 56 | let mut result = ETHTrieChangeSet::default(); 57 | 58 | for (address, bundle_account) in changes { 59 | let status = bundle_account.status; 60 | if status.is_not_modified() { 61 | continue; 62 | } 63 | 64 | // @cache consider caching in the scratchpad 65 | let hashed_address = Bytes::copy_from_slice(keccak256(address).as_slice()); 66 | 67 | match bundle_account.account_info() { 68 | // account was modified 69 | Some(account) => { 70 | result.account_trie_updates.push(hashed_address); 71 | result 72 | .account_trie_updates_info 73 | .push(account.without_code()); 74 | } 75 | // account was destroyed 76 | None => { 77 | result.account_trie_deletes.push(hashed_address); 78 | continue; 79 | } 80 | } 81 | 82 | let mut storage_updates_keys: Vec = Vec::new(); 83 | let mut storage_updates_values: Vec = Vec::new(); 84 | let mut storage_deleted_keys: Vec = Vec::new(); 85 | for (storage_key, storage_value) in &bundle_account.storage { 86 | if !storage_value.is_changed() { 87 | continue; 88 | } 89 | // @cache consider caching in the scratchpad 90 | let hashed_key = Bytes::copy_from_slice(keccak256(B256::from(*storage_key)).as_slice()); 91 | let value = storage_value.present_value(); 92 | if value.is_zero() { 93 | storage_deleted_keys.push(hashed_key); 94 | } else { 95 | // @efficiently, alloy_fixed encoding 96 | let value = Bytes::from(alloy_rlp::encode(value)); 97 | storage_updates_keys.push(hashed_key); 98 | storage_updates_values.push(value); 99 | } 100 | } 101 | result.storage_trie_updated_keys.push(storage_updates_keys); 102 | result 103 | .storage_trie_updated_values 104 | .push(storage_updates_values); 105 | result.storage_trie_deleted_keys.push(storage_deleted_keys); 106 | } 107 | 108 | result 109 | } 110 | -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/src/v1/sparse_mpt/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod diff_trie; 2 | pub mod fixed_trie; 3 | 4 | pub use diff_trie::*; 5 | pub use fixed_trie::*; 6 | -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/src/v2/trie/proof_store.rs: -------------------------------------------------------------------------------- 1 | use alloy_primitives::Bytes; 2 | use alloy_rlp::Decodable; 3 | use parking_lot::{lock_api::RwLockReadGuard, RawRwLock, RwLock}; 4 | use std::sync::Arc; 5 | 6 | use alloy_trie::nodes::TrieNode as AlloyTrieNode; 7 | use arrayvec::ArrayVec; 8 | use dashmap::DashMap; 9 | use reth_trie::Nibbles; 10 | use rustc_hash::FxBuildHasher; 11 | 12 | #[derive(Debug, Clone)] 13 | pub enum ProofNode { 14 | Leaf { key: usize, value: usize }, 15 | Extension { key: usize, child: usize }, 16 | Branch { children: [Option; 16] }, 17 | Empty, 18 | } 19 | 20 | #[derive(Debug, Clone, Default)] 21 | pub struct ProofStore { 22 | keys: Arc>>, 23 | values: Arc>>>, 24 | rlp_ptrs: Arc>>>, 25 | 26 | pub proofs: Arc, FxBuildHasher>>, 27 | } 28 | 29 | impl ProofStore { 30 | fn add_rlp_ptr(&self, data: ArrayVec) -> usize { 31 | let mut arr = self.rlp_ptrs.write(); 32 | let idx = arr.len(); 33 | arr.push(data); 34 | idx 35 | } 36 | 37 | fn add_key(&self, data: Nibbles) -> usize { 38 | let mut arr = self.keys.write(); 39 | let idx = arr.len(); 40 | arr.push(data); 41 | idx 42 | } 43 | 44 | fn add_value(&self, data: Vec) -> usize { 45 | let mut arr = self.values.write(); 46 | let idx = arr.len(); 47 | arr.push(data.into()); 48 | idx 49 | } 50 | 51 | pub fn has_proof(&self, key: &Nibbles) -> bool { 52 | self.proofs.contains_key(key) 53 | } 54 | 55 | pub fn add_proof( 56 | &self, 57 | key: Nibbles, 58 | proof: Vec<(Nibbles, Bytes)>, 59 | ) -> Result<(), alloy_rlp::Error> { 60 | if self.proofs.contains_key(&key) { 61 | return Ok(()); 62 | } 63 | 64 | let mut parsed_proof: Vec<(Nibbles, ProofNode)> = Vec::with_capacity(proof.len()); 65 | 66 | for (path, encoded_node) in proof { 67 | let alloy_trie_node = AlloyTrieNode::decode(&mut encoded_node.as_ref())?; 68 | let decoded_node = match alloy_trie_node { 69 | AlloyTrieNode::Branch(alloy_node) => { 70 | let mut children: [Option; 16] = Default::default(); 71 | let mut stack_iter = alloy_node.stack.into_iter(); 72 | for index in 0..16 { 73 | if alloy_node.state_mask.is_bit_set(index) { 74 | let rlp_ptr: ArrayVec = stack_iter 75 | .next() 76 | .expect("stack must be the same size as mask") 77 | .as_slice() 78 | .try_into() 79 | .unwrap(); 80 | children[index as usize] = Some(self.add_rlp_ptr(rlp_ptr)); 81 | } 82 | } 83 | ProofNode::Branch { children } 84 | } 85 | AlloyTrieNode::Extension(node) => ProofNode::Extension { 86 | key: self.add_key(node.key), 87 | child: self.add_rlp_ptr(node.child.as_slice().try_into().unwrap()), 88 | }, 89 | AlloyTrieNode::Leaf(node) => ProofNode::Leaf { 90 | key: self.add_key(node.key), 91 | value: self.add_value(node.value), 92 | }, 93 | AlloyTrieNode::EmptyRoot => ProofNode::Empty, 94 | }; 95 | parsed_proof.push((path, decoded_node)); 96 | } 97 | 98 | self.proofs.insert(key, parsed_proof); 99 | 100 | Ok(()) 101 | } 102 | 103 | // panics if ptr is not stored in this proof store 104 | pub fn rlp_ptrs(&self) -> RwLockReadGuard<'_, RawRwLock, Vec>> { 105 | self.rlp_ptrs.read() 106 | } 107 | 108 | pub fn keys_guard(&self) -> RwLockReadGuard<'_, RawRwLock, Vec> { 109 | self.keys.read() 110 | } 111 | 112 | pub fn values_guard(&self) -> RwLockReadGuard<'_, RawRwLock, Vec>> { 113 | self.values.read() 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/test_data/README.md: -------------------------------------------------------------------------------- 1 | use `gzip` tool to work with these files 2 | 3 | ``` 4 | # compress file, it will remove original file 5 | gzip * 6 | ``` 7 | 8 | ``` 9 | # decompress file, without removing original file 10 | gzip -k -d name 11 | ``` 12 | -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/test_data/changeset.json.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/rbuilder/c71d4a11a9701c3b14c4b47e142d75bfd97c6178/crates/eth-sparse-mpt/test_data/changeset.json.gz -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/test_data/failure_case_0.json.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/rbuilder/c71d4a11a9701c3b14c4b47e142d75bfd97c6178/crates/eth-sparse-mpt/test_data/failure_case_0.json.gz -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/test_data/multiproof_0.json.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/rbuilder/c71d4a11a9701c3b14c4b47e142d75bfd97c6178/crates/eth-sparse-mpt/test_data/multiproof_0.json.gz -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/test_data/multiproof_1.json.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/rbuilder/c71d4a11a9701c3b14c4b47e142d75bfd97c6178/crates/eth-sparse-mpt/test_data/multiproof_1.json.gz -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/test_data/prepared_tries/example0/change_set.json.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/rbuilder/c71d4a11a9701c3b14c4b47e142d75bfd97c6178/crates/eth-sparse-mpt/test_data/prepared_tries/example0/change_set.json.gz -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/test_data/prepared_tries/example0/tries.json.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/rbuilder/c71d4a11a9701c3b14c4b47e142d75bfd97c6178/crates/eth-sparse-mpt/test_data/prepared_tries/example0/tries.json.gz -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/test_data/prepared_tries/example1/change_set.json.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/rbuilder/c71d4a11a9701c3b14c4b47e142d75bfd97c6178/crates/eth-sparse-mpt/test_data/prepared_tries/example1/change_set.json.gz -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/test_data/prepared_tries/example1/tries.json.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/rbuilder/c71d4a11a9701c3b14c4b47e142d75bfd97c6178/crates/eth-sparse-mpt/test_data/prepared_tries/example1/tries.json.gz -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/test_data/prepared_tries/example2/change_set.json.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/rbuilder/c71d4a11a9701c3b14c4b47e142d75bfd97c6178/crates/eth-sparse-mpt/test_data/prepared_tries/example2/change_set.json.gz -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/test_data/prepared_tries/example2/tries.json.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/rbuilder/c71d4a11a9701c3b14c4b47e142d75bfd97c6178/crates/eth-sparse-mpt/test_data/prepared_tries/example2/tries.json.gz -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/test_data/prepared_tries/example3/change_set.json.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/rbuilder/c71d4a11a9701c3b14c4b47e142d75bfd97c6178/crates/eth-sparse-mpt/test_data/prepared_tries/example3/change_set.json.gz -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/test_data/prepared_tries/example3/tries.json.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/rbuilder/c71d4a11a9701c3b14c4b47e142d75bfd97c6178/crates/eth-sparse-mpt/test_data/prepared_tries/example3/tries.json.gz -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/test_data/prepared_tries/example4/change_set.json.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/rbuilder/c71d4a11a9701c3b14c4b47e142d75bfd97c6178/crates/eth-sparse-mpt/test_data/prepared_tries/example4/change_set.json.gz -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/test_data/prepared_tries/example4/tries.json.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/rbuilder/c71d4a11a9701c3b14c4b47e142d75bfd97c6178/crates/eth-sparse-mpt/test_data/prepared_tries/example4/tries.json.gz -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/test_data/prepared_tries/example5/change_set.json.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/rbuilder/c71d4a11a9701c3b14c4b47e142d75bfd97c6178/crates/eth-sparse-mpt/test_data/prepared_tries/example5/change_set.json.gz -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/test_data/prepared_tries/example5/tries.json.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/rbuilder/c71d4a11a9701c3b14c4b47e142d75bfd97c6178/crates/eth-sparse-mpt/test_data/prepared_tries/example5/tries.json.gz -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/test_data/prepared_tries/example6/change_set.json.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/rbuilder/c71d4a11a9701c3b14c4b47e142d75bfd97c6178/crates/eth-sparse-mpt/test_data/prepared_tries/example6/change_set.json.gz -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/test_data/prepared_tries/example6/tries.json.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/rbuilder/c71d4a11a9701c3b14c4b47e142d75bfd97c6178/crates/eth-sparse-mpt/test_data/prepared_tries/example6/tries.json.gz -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/test_data/prepared_tries/example7/change_set.json.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/rbuilder/c71d4a11a9701c3b14c4b47e142d75bfd97c6178/crates/eth-sparse-mpt/test_data/prepared_tries/example7/change_set.json.gz -------------------------------------------------------------------------------- /crates/eth-sparse-mpt/test_data/prepared_tries/example7/tries.json.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/rbuilder/c71d4a11a9701c3b14c4b47e142d75bfd97c6178/crates/eth-sparse-mpt/test_data/prepared_tries/example7/tries.json.gz -------------------------------------------------------------------------------- /crates/rbuilder/benches/bench_main.rs: -------------------------------------------------------------------------------- 1 | use criterion::criterion_main; 2 | 3 | mod benchmarks; 4 | 5 | criterion_main! { 6 | benchmarks::mev_boost::serialization, 7 | benchmarks::txpool_fetcher::txpool, 8 | } 9 | -------------------------------------------------------------------------------- /crates/rbuilder/benches/benchmarks/mev_boost.rs: -------------------------------------------------------------------------------- 1 | use alloy_consensus::{Block, Header}; 2 | use alloy_eips::eip4844::BlobTransactionSidecar; 3 | use alloy_primitives::U256; 4 | use criterion::{criterion_group, Criterion}; 5 | use primitive_types::H384; 6 | use rbuilder::mev_boost::{ 7 | rpc::TestDataGenerator, sign_block_for_relay, submission::DenebSubmitBlockRequest, 8 | BLSBlockSigner, 9 | }; 10 | use reth::primitives::SealedBlock; 11 | use reth_chainspec::SEPOLIA; 12 | use reth_primitives::kzg::Blob; 13 | use std::{fs, path::PathBuf, sync::Arc}; 14 | 15 | fn mev_boost_serialize_submit_block(data: DenebSubmitBlockRequest) { 16 | data.as_ssz_bytes(); 17 | } 18 | 19 | fn bench_mevboost_serialization(c: &mut Criterion) { 20 | let mut generator = TestDataGenerator::default(); 21 | let mut group = c.benchmark_group("MEV-Boost SubmitBlock serialization"); 22 | 23 | group.bench_function("SSZ encoding", |b| { 24 | b.iter_batched( 25 | || generator.create_deneb_submit_block_request(), 26 | |b| { 27 | mev_boost_serialize_submit_block(b); 28 | }, 29 | criterion::BatchSize::SmallInput, 30 | ); 31 | }); 32 | 33 | group.bench_function("JSON encoding", |b| { 34 | b.iter_batched( 35 | || generator.create_deneb_submit_block_request(), 36 | |b| { 37 | serde_json::to_vec(&b).unwrap(); 38 | }, 39 | criterion::BatchSize::SmallInput, 40 | ); 41 | }); 42 | 43 | group.finish(); 44 | } 45 | 46 | fn bench_mevboost_sign(c: &mut Criterion) { 47 | let mut generator = TestDataGenerator::default(); 48 | 49 | let json_content = fs::read_to_string( 50 | PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("benches/blob_data/blob1.json"), 51 | ) 52 | .unwrap(); 53 | 54 | // Parse the JSON contents into a serde_json::Value 55 | let json_value: serde_json::Value = 56 | serde_json::from_str(&json_content).expect("Failed to deserialize JSON"); 57 | 58 | // Extract blob data from JSON and convert it to Blob 59 | let blobs: Vec = vec![Blob::from_hex( 60 | json_value 61 | .get("data") 62 | .unwrap() 63 | .as_str() 64 | .expect("Data is not a valid string"), 65 | ) 66 | .unwrap()]; 67 | 68 | // Generate a BlobTransactionSidecar from the blobs 69 | let blob = BlobTransactionSidecar::try_from_blobs(blobs).unwrap(); 70 | 71 | let sealed_block = SealedBlock::default(); 72 | let signer = BLSBlockSigner::test_signer(); 73 | let mut blobs = vec![]; 74 | for _ in 0..3 { 75 | blobs.push(Arc::new(blob.clone())); 76 | } 77 | 78 | let chain_spec = SEPOLIA.clone(); 79 | let payload = generator.create_payload_attribute_data(); 80 | 81 | let mut group = c.benchmark_group("MEV-Boost Sign block for relay"); 82 | 83 | // This benchmark is here to have a baseline for Deneb (with blobs) 84 | group.bench_function("Capella", |b| { 85 | b.iter(|| { 86 | let _ = sign_block_for_relay( 87 | &signer, 88 | &sealed_block, 89 | &blobs, 90 | &Vec::new(), 91 | &chain_spec, 92 | &payload, 93 | H384::default(), 94 | U256::default(), 95 | ) 96 | .unwrap(); 97 | }) 98 | }); 99 | 100 | // Create a sealed block that is after the Cancun hard fork in Sepolia 101 | // this is, a timestamp higher than 1706655072 102 | let sealed_block_deneb = SealedBlock::new_unhashed(Block::new( 103 | Header { 104 | timestamp: 2706655072, 105 | blob_gas_used: Some(64), 106 | excess_blob_gas: Some(64), 107 | ..Default::default() 108 | }, 109 | Default::default(), 110 | )); 111 | 112 | group.bench_function("Deneb", |b| { 113 | b.iter(|| { 114 | let _ = sign_block_for_relay( 115 | &signer, 116 | &sealed_block_deneb, 117 | &blobs, 118 | &Vec::new(), 119 | &chain_spec, 120 | &payload, 121 | H384::default(), 122 | U256::default(), 123 | ) 124 | .unwrap(); 125 | }) 126 | }); 127 | 128 | group.finish(); 129 | } 130 | 131 | criterion_group!( 132 | serialization, 133 | bench_mevboost_serialization, 134 | bench_mevboost_sign 135 | ); 136 | -------------------------------------------------------------------------------- /crates/rbuilder/benches/benchmarks/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod mev_boost; 2 | pub mod txpool_fetcher; 3 | -------------------------------------------------------------------------------- /crates/rbuilder/benches/benchmarks/txpool_fetcher.rs: -------------------------------------------------------------------------------- 1 | use alloy_network::{EthereumWallet, TransactionBuilder}; 2 | use alloy_node_bindings::Anvil; 3 | use alloy_primitives::U256; 4 | use alloy_provider::{Provider, ProviderBuilder}; 5 | use alloy_rpc_types::TransactionRequest; 6 | use alloy_signer_local::PrivateKeySigner; 7 | use criterion::{criterion_group, Criterion}; 8 | use rbuilder::live_builder::order_input::{ 9 | txpool_fetcher::subscribe_to_txpool_with_blobs, OrderInputConfig, 10 | }; 11 | use std::time::Duration; 12 | use tokio::sync::mpsc; 13 | use tokio_util::sync::CancellationToken; 14 | 15 | async fn txpool_receive_util(count: u32) { 16 | let anvil = Anvil::new() 17 | .args(["--ipc", "/tmp/anvil.ipc"]) 18 | .try_spawn() 19 | .unwrap(); 20 | 21 | let (sender, mut receiver) = mpsc::channel(10); 22 | subscribe_to_txpool_with_blobs( 23 | OrderInputConfig::default_e2e(), 24 | sender, 25 | CancellationToken::new(), 26 | ) 27 | .await 28 | .unwrap(); 29 | 30 | let signer: PrivateKeySigner = anvil.keys()[0].clone().into(); 31 | let wallet = EthereumWallet::from(signer); 32 | 33 | let provider = ProviderBuilder::new() 34 | .wallet(wallet) 35 | .on_http(anvil.endpoint().parse().unwrap()); 36 | 37 | let alice = anvil.addresses()[0]; 38 | let eip1559_est = provider.estimate_eip1559_fees().await.unwrap(); 39 | 40 | let tx = TransactionRequest::default() 41 | .with_to(alice) 42 | .with_value(U256::from(1)) 43 | .with_max_fee_per_gas(eip1559_est.max_fee_per_gas) 44 | .with_max_priority_fee_per_gas(eip1559_est.max_priority_fee_per_gas); 45 | 46 | tokio::spawn(async move { 47 | for i in 0..count { 48 | let tx = tx.clone().with_nonce(i.into()); 49 | let _ = provider.send_transaction(tx).await.unwrap(); 50 | } 51 | }); 52 | 53 | for _ in 0..count { 54 | let _ = receiver.recv().await.unwrap(); 55 | } 56 | } 57 | 58 | fn bench_txpool_receive(c: &mut Criterion) { 59 | let rt = tokio::runtime::Runtime::new().unwrap(); 60 | let mut group = c.benchmark_group("Txpool fetcher"); 61 | 62 | group.measurement_time(Duration::from_secs(20)); 63 | group.bench_function("txn_fetcher_normal_10", |b| { 64 | b.to_async(&rt).iter(|| txpool_receive_util(10)); 65 | }); 66 | group.bench_function("txn_fetcher_normal_50", |b| { 67 | b.to_async(&rt).iter(|| txpool_receive_util(50)); 68 | }); 69 | } 70 | 71 | criterion_group!(txpool, bench_txpool_receive,); 72 | -------------------------------------------------------------------------------- /crates/rbuilder/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | built::write_built_file().expect("Failed to acquire build-time information"); 3 | } 4 | -------------------------------------------------------------------------------- /crates/rbuilder/src/backtest/build_block/mod.rs: -------------------------------------------------------------------------------- 1 | mod backtest_build_block; 2 | pub mod landed_block_from_db; 3 | pub mod synthetic_orders; 4 | -------------------------------------------------------------------------------- /crates/rbuilder/src/backtest/fetch/data_source.rs: -------------------------------------------------------------------------------- 1 | use crate::backtest::{BuiltBlockData, OrdersWithTimestamp}; 2 | use alloy_primitives::B256; 3 | use async_trait::async_trait; 4 | 5 | #[derive(Debug, Clone)] 6 | pub struct DatasourceData { 7 | pub orders: Vec, 8 | pub built_block_data: Option, 9 | } 10 | 11 | /// DataSource trait 12 | /// 13 | /// This trait is used to fetch data from a datasource 14 | #[async_trait] 15 | pub trait DataSource: std::fmt::Debug { 16 | async fn get_data(&self, block: BlockRef) -> eyre::Result; 17 | 18 | fn clone_box(&self) -> Box; 19 | } 20 | 21 | impl Clone for Box { 22 | fn clone(&self) -> Self { 23 | self.clone_box() 24 | } 25 | } 26 | 27 | /// Some DataSources need also the block_timestamp and landed_block_hash to be able to get the orders 28 | /// so we use a BlockRef on [`DataSource::get_orders`] instead of just a block_number 29 | #[derive(Debug, Copy, Clone)] 30 | pub struct BlockRef { 31 | pub block_number: u64, 32 | pub block_timestamp: u64, 33 | pub landed_block_hash: Option, 34 | } 35 | 36 | impl BlockRef { 37 | pub fn new(block_number: u64, block_timestamp: u64, landed_block_hash: Option) -> Self { 38 | Self { 39 | block_number, 40 | block_timestamp, 41 | landed_block_hash, 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /crates/rbuilder/src/backtest/redistribute/cli/csv_output.rs: -------------------------------------------------------------------------------- 1 | use alloy_primitives::{Address, B256, U256}; 2 | use std::{fs::File, io, io::Write, path::Path}; 3 | 4 | #[derive(Debug)] 5 | pub struct CSVOutputRow { 6 | pub block_number: u64, 7 | pub block_hash: B256, 8 | pub address: Address, 9 | pub amount: U256, 10 | } 11 | 12 | #[derive(Debug)] 13 | pub struct CSVResultWriter { 14 | file: File, 15 | } 16 | 17 | impl CSVResultWriter { 18 | pub fn new(path: impl AsRef) -> io::Result { 19 | let file = std::fs::OpenOptions::new() 20 | .write(true) 21 | .create(true) 22 | .truncate(true) 23 | .open(path)?; 24 | let mut result = Self { file }; 25 | result.write_header()?; 26 | Ok(result) 27 | } 28 | 29 | fn write_header(&mut self) -> io::Result<()> { 30 | writeln!(self.file, "block_number,block_hash,address,amount")?; 31 | self.file.flush() 32 | } 33 | 34 | pub fn write_data(&mut self, mut values: Vec) -> io::Result<()> { 35 | // first sort values by block from low to high and then by address 36 | values.sort_by(|a, b| { 37 | a.block_number 38 | .cmp(&b.block_number) 39 | .then_with(|| a.address.cmp(&b.address)) 40 | }); 41 | 42 | for value in values { 43 | writeln!( 44 | self.file, 45 | "{},{:?},{:?},{}", 46 | value.block_number, value.block_hash, value.address, value.amount 47 | )?; 48 | } 49 | self.file.flush() 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /crates/rbuilder/src/backtest/restore_landed_orders/mod.rs: -------------------------------------------------------------------------------- 1 | mod find_landed_orders; 2 | mod resim_landed_block; 3 | 4 | pub use resim_landed_block::{sim_historical_block, ExecutedTxs}; 5 | 6 | pub use find_landed_orders::{ 7 | restore_landed_orders, ExecutedBlockTx, LandedOrderData, SimplifiedOrder, 8 | }; 9 | -------------------------------------------------------------------------------- /crates/rbuilder/src/beacon_api_client/mod.rs: -------------------------------------------------------------------------------- 1 | use alloy_rpc_types_beacon::events::PayloadAttributesEvent; 2 | use beacon_api_client::{mainnet::Client as bClient, Error, Topic}; 3 | use mev_share_sse::client::EventStream; 4 | use serde::Deserialize; 5 | use std::{collections::HashMap, fmt::Debug}; 6 | use url::Url; 7 | 8 | pub const DEFAULT_CL_NODE_URL: &str = "http://localhost:8000"; 9 | 10 | #[derive(Deserialize, Clone)] 11 | #[serde(try_from = "String")] 12 | pub struct Client { 13 | inner: bClient, 14 | } 15 | 16 | impl Debug for Client { 17 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 18 | f.debug_struct("Client").finish() 19 | } 20 | } 21 | 22 | impl Default for Client { 23 | fn default() -> Self { 24 | Self { 25 | inner: bClient::new(Url::parse(DEFAULT_CL_NODE_URL).unwrap()), 26 | } 27 | } 28 | } 29 | 30 | impl Client { 31 | pub fn new(endpoint: Url) -> Self { 32 | Self { 33 | inner: bClient::new(endpoint), 34 | } 35 | } 36 | 37 | pub async fn get_spec(&self) -> Result, Error> { 38 | self.inner.get_spec().await 39 | } 40 | 41 | pub async fn get_events(&self) -> Result, Error> { 42 | self.inner.get_events::().await 43 | } 44 | } 45 | 46 | impl TryFrom for Client { 47 | type Error = url::ParseError; 48 | 49 | fn try_from(s: String) -> Result { 50 | let url = Url::parse(&s)?; 51 | Ok(Client::new(url)) 52 | } 53 | } 54 | 55 | pub struct PayloadAttributesTopic; 56 | 57 | impl Topic for PayloadAttributesTopic { 58 | const NAME: &'static str = "payload_attributes"; 59 | 60 | type Data = PayloadAttributesEvent; 61 | } 62 | 63 | #[cfg(test)] 64 | mod tests { 65 | // TODO: Enable these tests. 66 | use super::*; 67 | use futures::StreamExt; 68 | 69 | #[ignore] 70 | #[tokio::test] 71 | async fn test_get_spec() { 72 | let client = Client::default(); 73 | let spec = client.get_spec().await.unwrap(); 74 | 75 | // validate that the spec contains the genesis fork version 76 | spec.get("GENESIS_FORK_VERSION").unwrap(); 77 | } 78 | 79 | #[ignore] 80 | #[tokio::test] 81 | async fn test_get_events() { 82 | let client = Client::default(); 83 | let mut stream = client.get_events::().await.unwrap(); 84 | 85 | // validate that the stream is not empty 86 | // TODO: add timeout 87 | let event = stream.next().await.unwrap().unwrap(); 88 | print!("{:?}", event); 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /crates/rbuilder/src/bin/backtest-build-block.rs: -------------------------------------------------------------------------------- 1 | //! Instantiation of run_backtest_build_block on our sample configuration. 2 | 3 | use rbuilder::{ 4 | backtest::build_block::landed_block_from_db::run_backtest, live_builder::config::Config, 5 | }; 6 | 7 | #[tokio::main] 8 | async fn main() -> eyre::Result<()> { 9 | run_backtest::().await 10 | } 11 | -------------------------------------------------------------------------------- /crates/rbuilder/src/bin/backtest-build-range.rs: -------------------------------------------------------------------------------- 1 | //! Instantiation of run_backtest_build_range on our sample configuration. 2 | use rbuilder::{backtest::run_backtest_build_range, live_builder::config::Config}; 3 | 4 | #[tokio::main] 5 | pub async fn main() -> eyre::Result<()> { 6 | run_backtest_build_range::().await 7 | } 8 | -------------------------------------------------------------------------------- /crates/rbuilder/src/bin/backtest-build-synthetic-block.rs: -------------------------------------------------------------------------------- 1 | //! Instantiation of run_backtest_build_block on our sample configuration. 2 | 3 | use rbuilder::{ 4 | backtest::build_block::synthetic_orders::run_backtest, live_builder::config::Config, 5 | }; 6 | 7 | #[tokio::main] 8 | async fn main() -> eyre::Result<()> { 9 | run_backtest::().await 10 | } 11 | -------------------------------------------------------------------------------- /crates/rbuilder/src/bin/backtest-distribute.rs: -------------------------------------------------------------------------------- 1 | use rbuilder::{backtest::redistribute::run_backtest_redistribute, live_builder::config::Config}; 2 | 3 | #[tokio::main] 4 | async fn main() -> eyre::Result<()> { 5 | run_backtest_redistribute::().await 6 | } 7 | -------------------------------------------------------------------------------- /crates/rbuilder/src/bin/backtest-fetch.rs: -------------------------------------------------------------------------------- 1 | //! Application to fetch orders from different sources (eg: mempool dumpster, external bundles db) and store them on a SQLite DB 2 | //! to be used later (eg: backtest-build-block, backtest-build-range) 3 | 4 | use rbuilder::{ 5 | backtest::fetch::{ 6 | backtest_fetch::run_backtest_fetch, data_source::DataSource, flashbots_db::RelayDB, 7 | }, 8 | live_builder::{cli::LiveBuilderConfig, config::Config}, 9 | }; 10 | 11 | async fn create_bundle_source(config: Config) -> eyre::Result>> { 12 | if let Some(db) = config.base_config().flashbots_db.clone() { 13 | let relay_db = RelayDB::from_url(db.value()?).await?; 14 | Ok(Some(Box::new(relay_db))) 15 | } else { 16 | Ok(None) 17 | } 18 | } 19 | 20 | #[tokio::main] 21 | #[allow(clippy::needless_borrow)] 22 | async fn main() -> eyre::Result<()> { 23 | run_backtest_fetch::(create_bundle_source).await 24 | } 25 | -------------------------------------------------------------------------------- /crates/rbuilder/src/bin/misc-relays-slot.rs: -------------------------------------------------------------------------------- 1 | //! Helper app to get information from a landed block from the relays. 2 | //! Takes no configuration since it uses a hardcoded list of relays ([`rbuilder::mev_boost::RELAYS`]). 3 | use alloy_primitives::utils::format_ether; 4 | use clap::Parser; 5 | use rbuilder::backtest::fetch::mev_boost::PayloadDeliveredFetcher; 6 | 7 | #[derive(Parser, Debug)] 8 | struct Cli { 9 | #[clap(help = "block number")] 10 | block: u64, 11 | } 12 | 13 | #[tokio::main] 14 | async fn main() -> eyre::Result<()> { 15 | let cli = Cli::parse(); 16 | 17 | let env = tracing_subscriber::EnvFilter::from_default_env(); 18 | let writer = tracing_subscriber::fmt() 19 | .with_env_filter(env) 20 | .with_test_writer(); 21 | writer.init(); 22 | 23 | let fetcher = PayloadDeliveredFetcher::default(); 24 | let result = fetcher.get_payload_delivered(cli.block).await; 25 | let payload = result.best_bid().ok_or_else(|| { 26 | eyre::eyre!( 27 | "No payload delivered, relay_errors: {:?}", 28 | result.relay_errors 29 | ) 30 | })?; 31 | 32 | let ts_diff = (payload.timestamp * 1000) as i64 - payload.timestamp_ms as i64; 33 | let value = format_ether(payload.value); 34 | 35 | println!("Payload delivered"); 36 | println!("relay {}", result.best_relay().unwrap()); 37 | println!("block {}", payload.block_number); 38 | println!("block_hash {:?}", payload.block_hash); 39 | println!("timestamp_ms {}", payload.timestamp_ms); 40 | println!("timestamp {}", payload.timestamp); 41 | println!("timestamp_diff {}", ts_diff); 42 | println!("num_tx {}", payload.num_tx); 43 | println!("gas_used {}", payload.gas_used); 44 | println!("builder {:?}", payload.builder_pubkey); 45 | println!("value {}", value); 46 | println!("optimistic {}", payload.optimistic_submission); 47 | 48 | Ok(()) 49 | } 50 | -------------------------------------------------------------------------------- /crates/rbuilder/src/bin/rbuilder.rs: -------------------------------------------------------------------------------- 1 | //! Instantiation of cli::run on our sample configuration. 2 | //! This runs the default included rbuilder! 3 | 4 | use rbuilder::{ 5 | live_builder::{cli, config::Config}, 6 | utils::build_info::print_version_info, 7 | }; 8 | #[cfg(not(target_env = "msvc"))] 9 | use tikv_jemallocator::Jemalloc; 10 | 11 | #[cfg(not(target_env = "msvc"))] 12 | #[global_allocator] 13 | static GLOBAL: Jemalloc = Jemalloc; 14 | 15 | #[tokio::main] 16 | async fn main() -> eyre::Result<()> { 17 | return cli::run::(print_version_info, None).await; 18 | } 19 | -------------------------------------------------------------------------------- /crates/rbuilder/src/bin/validate-config.rs: -------------------------------------------------------------------------------- 1 | //! CLI tool to validate a rbuilder config file 2 | 3 | use clap::Parser; 4 | use rbuilder::live_builder::{base_config::load_config_toml_and_env, config::Config}; 5 | use std::path::PathBuf; 6 | 7 | #[derive(Parser, Debug)] 8 | struct Cli { 9 | #[clap(long, help = "Config file path", env = "RBUILDER_CONFIG")] 10 | config: PathBuf, 11 | } 12 | 13 | #[tokio::main] 14 | async fn main() -> eyre::Result<()> { 15 | let cli = Cli::parse(); 16 | 17 | let config_path = &cli.config; 18 | let _: Config = load_config_toml_and_env(config_path)?; 19 | 20 | println!("Config file '{}' is valid", config_path.display()); 21 | 22 | Ok(()) 23 | } 24 | -------------------------------------------------------------------------------- /crates/rbuilder/src/building/block_orders/order_dumper.rs: -------------------------------------------------------------------------------- 1 | //! Soon to be replaced my mockall 2 | 3 | use std::{collections::VecDeque, sync::Arc}; 4 | 5 | use crate::primitives::{OrderId, SimulatedOrder}; 6 | 7 | use super::SimulatedOrderSink; 8 | pub enum OrderStoreAction { 9 | Insert(Arc), 10 | Remove(OrderId), 11 | } 12 | 13 | /// Helper to analyze the generated flow of orders 14 | /// The idea es to create this as a sink for a source object and the 15 | /// when we execute something on the source we check via pop_insert, pop_remove,etc 16 | /// if the behavior was correct. 17 | #[derive(Default)] 18 | pub struct OrderDumper { 19 | pub actions: VecDeque, 20 | } 21 | 22 | impl SimulatedOrderSink for OrderDumper { 23 | fn insert_order(&mut self, order: Arc) { 24 | self.actions.push_back(OrderStoreAction::Insert(order)); 25 | } 26 | 27 | fn remove_order(&mut self, id: OrderId) -> Option> { 28 | self.actions.push_back(OrderStoreAction::Remove(id)); 29 | None 30 | } 31 | } 32 | 33 | impl Drop for OrderDumper { 34 | fn drop(&mut self) { 35 | // Every action must be analyzed 36 | assert!(self.actions.is_empty()); 37 | } 38 | } 39 | 40 | impl OrderDumper { 41 | /// # Panics 42 | /// empty or first not insert 43 | pub fn pop_insert(&mut self) -> Arc { 44 | if self.actions.is_empty() { 45 | panic!("No actions, expected insert"); 46 | } 47 | match self.actions.pop_front().unwrap() { 48 | OrderStoreAction::Insert(sim_order) => sim_order, 49 | OrderStoreAction::Remove(_) => panic!("Expected insert found remove"), 50 | } 51 | } 52 | 53 | /// # Panics 54 | /// empty or first not remove 55 | pub fn pop_remove(&mut self) -> OrderId { 56 | if self.actions.is_empty() { 57 | panic!("No actions, expected insert"); 58 | } 59 | match self.actions.pop_front().unwrap() { 60 | OrderStoreAction::Insert(_) => panic!("Expected remove found insert"), 61 | OrderStoreAction::Remove(id) => id, 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /crates/rbuilder/src/building/block_orders/test_data_generator.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use alloy_primitives::U256; 4 | 5 | use crate::primitives::{AccountNonce, Order, SimValue, SimulatedOrder}; 6 | 7 | /// TestDataGenerator for Orders 8 | #[derive(Default)] 9 | pub struct TestDataGenerator { 10 | pub base: crate::primitives::TestDataGenerator, 11 | } 12 | 13 | impl TestDataGenerator { 14 | pub fn create_account_nonce(&mut self, nonce: u64) -> AccountNonce { 15 | AccountNonce { 16 | nonce, 17 | account: self.base.base.create_address(), 18 | } 19 | } 20 | 21 | pub fn create_sim_order( 22 | &self, 23 | order: Order, 24 | coinbase_profit: u64, 25 | mev_gas_price: u64, 26 | ) -> Arc { 27 | let sim_value = 28 | SimValue::new_test_no_gas(U256::from(coinbase_profit), U256::from(mev_gas_price)); 29 | 30 | Arc::new(SimulatedOrder { 31 | order, 32 | sim_value, 33 | used_state_trace: None, 34 | }) 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /crates/rbuilder/src/building/builders/parallel_builder/order_intake_store.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use tokio::sync::broadcast; 4 | 5 | use crate::{ 6 | building::{builders::OrderConsumer, SimulatedOrderStore}, 7 | live_builder::simulation::SimulatedOrderCommand, 8 | primitives::SimulatedOrder, 9 | }; 10 | 11 | /// Struct that allow getting the new orders from the order/cancellation stream in the way the parallel builder likes it. 12 | /// Contains the current whole set of orders but also can be queried for deltas on the orders ONLY if the deltas are all additions 13 | /// Usage: 14 | /// call consume_next_batch to poll the source and internally store the new orders 15 | /// call drain_new_orders/get_orders 16 | pub struct OrderIntakeStore { 17 | order_consumer: OrderConsumer, 18 | order_sink: SimulatedOrderStore, 19 | } 20 | 21 | impl OrderIntakeStore { 22 | pub fn new(orders_input_stream: broadcast::Receiver) -> Self { 23 | let order_sink = SimulatedOrderStore::new(); 24 | Self { 25 | order_consumer: OrderConsumer::new(orders_input_stream), 26 | order_sink, 27 | } 28 | } 29 | 30 | pub fn consume_next_batch(&mut self) -> eyre::Result { 31 | self.order_consumer.blocking_consume_next_commands()?; 32 | self.order_consumer.apply_new_commands(&mut self.order_sink); 33 | Ok(true) 34 | } 35 | 36 | /// returns the new orders since last call if we ONLY had new orders (no cancellations allowed) 37 | pub fn try_drain_new_orders_if_no_cancellations(&mut self) -> Option>> { 38 | self.order_sink.drain_new_orders() 39 | } 40 | 41 | /// All the current orders 42 | pub fn get_orders(&self) -> Vec> { 43 | self.order_sink.get_orders() 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /crates/rbuilder/src/building/builders/parallel_builder/readme.md: -------------------------------------------------------------------------------- 1 | # Parallel Builder 2 | The parallel builder is a block building algorithm that runs key components of building in parallel and attempts to do more sophisticated merging of bundles. 3 | 4 | It's primary differentiator is that it identifies groups of conflicting orders and resolves them independently of each other and in parallel. By doing so, we can pipeline the stages of orderflow intake, conflict resolution, and building a final block. 5 | 6 | ## Components and Process Flow 7 | 1. The **[OrderIntakeStore](order_intake_store.rs)** consumes orders from the intake store. 8 | 2. The **[ConflictFinder](groups.rs)** identifies conflict groups among the orders. 9 | 3. The **[ConflictTaskGenerator](task.rs)** creates tasks for resolving conflicts. 10 | 4. The **[ConflictResolvingPool](conflict_resolving_pool.rs)** is a pool of threads that process these tasks in parallel, executing merging algorithms defined by tasks. 11 | 5. The **[ResultsAggregator](results_aggregator.rs)** collects the results of conflict resolution, keeping track of the best results. 12 | 6. The **[BlockBuildingResultAssembler](block_building_result_assembler.rs)** constructs blocks from the best results obtained from the `ResultsAggregator`. 13 | 14 | ## Usage live 15 | The parallel builder requires extra configuration options to be used live. Here is an example for your config file: 16 | 17 | ``` 18 | [[builders]] 19 | name = "parallel" 20 | algo = "parallel-builder" 21 | discard_txs = true 22 | num_threads = 25 23 | ``` 24 | 25 | ## Backtesting 26 | The parallel builder can be backtested. However, it is quite slow due to how it is currently implemented. This isn't reflective of the latency performance in the live environment and could be improved with more work. -------------------------------------------------------------------------------- /crates/rbuilder/src/building/builders/parallel_builder/task.rs: -------------------------------------------------------------------------------- 1 | use std::{cmp::Ordering, time::Instant}; 2 | 3 | use super::ConflictGroup; 4 | 5 | /// ConflictTask provides a task for resolving a [ConflictGroup] with a specific [Algorithm]. 6 | #[derive(Debug, Clone)] 7 | pub struct ConflictTask { 8 | pub group_idx: usize, 9 | pub algorithm: Algorithm, 10 | pub priority: TaskPriority, 11 | pub group: ConflictGroup, 12 | pub created_at: Instant, 13 | } 14 | 15 | /// TaskPriority provides a priority for a [ConflictTask]. 16 | #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Debug)] 17 | pub enum TaskPriority { 18 | Low = 0, 19 | Medium = 1, 20 | High = 2, 21 | } 22 | 23 | impl TaskPriority { 24 | pub fn display(&self) -> &str { 25 | match self { 26 | TaskPriority::Low => "Low", 27 | TaskPriority::Medium => "Medium", 28 | TaskPriority::High => "High", 29 | } 30 | } 31 | } 32 | 33 | /// [PartialEq] [Eq] [PartialOrd] [Ord] are the traits that are required for a [ConflictTask] to be used in a priority queue. 34 | impl PartialEq for ConflictTask { 35 | fn eq(&self, other: &Self) -> bool { 36 | self.priority == other.priority 37 | } 38 | } 39 | 40 | impl Eq for ConflictTask {} 41 | 42 | impl PartialOrd for ConflictTask { 43 | fn partial_cmp(&self, other: &Self) -> Option { 44 | Some(self.cmp(other)) 45 | } 46 | } 47 | 48 | impl Ord for ConflictTask { 49 | fn cmp(&self, other: &Self) -> Ordering { 50 | // Higher priority first, then earlier created_at 51 | other 52 | .priority 53 | .cmp(&self.priority) 54 | .then_with(|| self.created_at.cmp(&other.created_at)) 55 | } 56 | } 57 | 58 | /// Algorithm provides an algorithm for resolving a [ConflictGroup]. 59 | /// Initially these are all algorithms that produce a sequence of orders to execute. 60 | #[derive(Debug, Clone, Copy)] 61 | pub enum Algorithm { 62 | /// `Greedy` checks the following ordrerings: max profit, mev gas price 63 | Greedy, 64 | /// `ReverseGreedy` checks the reverse greedy orderings: e.g. min profit, min mev gas price first 65 | ReverseGreedy, 66 | /// `Length` checks the length based orderings 67 | Length, 68 | /// `AllPermutations` checks all possible permutations of the group. 69 | AllPermutations, 70 | /// `Random` checks random permutations of the group. 71 | Random { seed: u64, count: usize }, 72 | } 73 | 74 | #[cfg(test)] 75 | mod tests { 76 | use super::*; 77 | 78 | #[test] 79 | fn test_task_priority_ordering() { 80 | assert!(TaskPriority::Low < TaskPriority::Medium); 81 | assert!(TaskPriority::Medium < TaskPriority::High); 82 | assert!(TaskPriority::Low < TaskPriority::High); 83 | } 84 | 85 | #[test] 86 | fn test_task_priority_display() { 87 | assert_eq!(TaskPriority::Low.display(), "Low"); 88 | assert_eq!(TaskPriority::Medium.display(), "Medium"); 89 | assert_eq!(TaskPriority::High.display(), "High"); 90 | } 91 | 92 | #[test] 93 | fn test_task_priority_equality() { 94 | assert_eq!(TaskPriority::Low, TaskPriority::Low); 95 | assert_ne!(TaskPriority::Low, TaskPriority::Medium); 96 | assert_ne!(TaskPriority::Low, TaskPriority::High); 97 | } 98 | 99 | // to-do: test equal priority ordering by created_at 100 | } 101 | -------------------------------------------------------------------------------- /crates/rbuilder/src/building/evm.rs: -------------------------------------------------------------------------------- 1 | use crate::building::precompile_cache::{PrecompileCache, WrappedPrecompile}; 2 | use parking_lot::Mutex; 3 | use reth_evm::{ 4 | eth::EthEvmContext, EthEvm, EthEvmFactory, Evm, EvmEnv, EvmFactory as RethEvmFactory, 5 | }; 6 | use revm::{ 7 | context::{ 8 | result::{EVMError, HaltReason}, 9 | TxEnv, 10 | }, 11 | handler::EthPrecompiles, 12 | inspector::NoOpInspector, 13 | interpreter::interpreter::EthInterpreter, 14 | primitives::hardfork::SpecId, 15 | Database, Inspector, 16 | }; 17 | use std::sync::Arc; 18 | 19 | /// Custom trait to abstract over EVM construction with a cleaner and more concrete 20 | /// interface than the `Evm` trait from `alloy-revm`. 21 | /// 22 | /// # Motivation 23 | /// 24 | /// The `alloy_revm::Evm` trait comes with a large number of associated types and trait 25 | /// bounds. This new `EvmFactory` trait is designed to encapsulate those complexities, 26 | /// providing an EVM interface less dependent on `alloy-revm` crate. 27 | /// 28 | /// It is particularly useful in reducing trait bound noise in other parts of the codebase 29 | /// (i.e. `execute_evm` in `order_commit`), and improves modularity. 30 | /// 31 | /// See [`EthCachedEvmFactory`] for an implementation that integrates precompile 32 | /// caching and uses `reth_evm::EthEvm` internally. 33 | pub trait EvmFactory { 34 | type Evm: Evm< 35 | DB = DB, 36 | Tx = TxEnv, 37 | HaltReason = HaltReason, 38 | Error = EVMError, 39 | Spec = SpecId, 40 | > 41 | where 42 | DB: Database, 43 | I: Inspector>; 44 | 45 | /// Create an EVM instance with default (no-op) inspector. 46 | fn create_evm(&self, db: DB, env: EvmEnv) -> Self::Evm 47 | where 48 | DB: Database; 49 | 50 | /// Create an EVM instance with a provided inspector. 51 | fn create_evm_with_inspector( 52 | &self, 53 | db: DB, 54 | env: EvmEnv, 55 | inspector: I, 56 | ) -> Self::Evm 57 | where 58 | DB: Database, 59 | I: Inspector, EthInterpreter>; 60 | } 61 | 62 | #[derive(Debug, Clone, Default)] 63 | pub struct EthCachedEvmFactory { 64 | evm_factory: EthEvmFactory, 65 | cache: Arc>, 66 | } 67 | 68 | /// Implementation of the `EvmFactory` trait for `EthCachedEvmFactory`. 69 | /// 70 | /// This implementation uses `reth_evm::EthEvm` internally and provides a concrete 71 | /// type for the `Evm` trait. 72 | /// 73 | /// It also integrates precompile caching using the [`PrecompileCache`] and 74 | /// [`WrappedPrecompile`] types. 75 | impl EvmFactory for EthCachedEvmFactory { 76 | type Evm 77 | = EthEvm> 78 | where 79 | DB: Database, 80 | I: Inspector>; 81 | 82 | fn create_evm(&self, db: DB, env: EvmEnv) -> Self::Evm 83 | where 84 | DB: Database, 85 | { 86 | let evm = self 87 | .evm_factory 88 | .create_evm(db, env) 89 | .into_inner() 90 | .with_precompiles(WrappedPrecompile::new( 91 | EthPrecompiles::default(), 92 | self.cache.clone(), 93 | )); 94 | 95 | EthEvm::new(evm, false) 96 | } 97 | 98 | fn create_evm_with_inspector( 99 | &self, 100 | db: DB, 101 | input: EvmEnv, 102 | inspector: I, 103 | ) -> Self::Evm 104 | where 105 | DB: Database, 106 | I: Inspector, EthInterpreter>, 107 | { 108 | EthEvm::new( 109 | self.create_evm(db, input) 110 | .into_inner() 111 | .with_inspector(inspector), 112 | true, 113 | ) 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /crates/rbuilder/src/building/fmt.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Write; 2 | 3 | use crate::{ 4 | primitives::fmt::{write_order, write_sim_value}, 5 | utils::fmt::write_indent, 6 | }; 7 | 8 | use super::ExecutionResult; 9 | 10 | pub fn write_exec_res( 11 | indent: usize, 12 | buf: &mut Buffer, 13 | exec_res: &ExecutionResult, 14 | ) -> std::fmt::Result { 15 | write_indent(indent, buf)?; 16 | buf.write_str(&format!("ExecResult {}:\n", exec_res.order.id()))?; 17 | 18 | write_indent(indent + 1, buf)?; 19 | buf.write_str("Sim:\n")?; 20 | 21 | write_sim_value(indent + 2, buf, &exec_res.inplace_sim)?; 22 | 23 | write_indent(indent + 1, buf)?; 24 | buf.write_str("Order:\n")?; 25 | 26 | write_order(indent + 2, buf, &exec_res.order) 27 | } 28 | -------------------------------------------------------------------------------- /crates/rbuilder/src/building/precompile_cache.rs: -------------------------------------------------------------------------------- 1 | use crate::telemetry::{inc_precompile_cache_hits, inc_precompile_cache_misses}; 2 | use ahash::HashMap; 3 | use alloy_primitives::{Address, Bytes}; 4 | use derive_more::{Deref, DerefMut}; 5 | use lru::LruCache; 6 | use parking_lot::Mutex; 7 | use revm::{ 8 | context::{Cfg, ContextTr}, 9 | handler::PrecompileProvider, 10 | interpreter::{InputsImpl, InterpreterResult}, 11 | primitives::hardfork::SpecId, 12 | }; 13 | use std::{num::NonZeroUsize, sync::Arc}; 14 | 15 | /// A precompile cache that stores precompile call results by precompile address. 16 | #[derive(Deref, DerefMut, Default, Debug)] 17 | pub struct PrecompileCache(HashMap); 18 | 19 | /// Precompile result LRU cache stored by `(spec id, input, gas limit)` key. 20 | pub type PrecompileResultCache = LruCache<(SpecId, Bytes, u64), Result>; 21 | 22 | /// A custom precompile that contains the cache and precompile it wraps. 23 | #[derive(Clone)] 24 | pub struct WrappedPrecompile

{ 25 | /// The precompile to wrap. 26 | precompile: P, 27 | /// The cache to use. 28 | cache: Arc>, 29 | /// The spec id to use. 30 | spec: SpecId, 31 | } 32 | 33 | impl

WrappedPrecompile

{ 34 | /// Given a [`PrecompileProvider`] and cache for a specific precompiles, create a 35 | /// wrapper that can be used inside Evm. 36 | pub fn new(precompile: P, cache: Arc>) -> Self { 37 | WrappedPrecompile { 38 | precompile, 39 | cache: cache.clone(), 40 | spec: SpecId::default(), 41 | } 42 | } 43 | } 44 | 45 | impl> PrecompileProvider 46 | for WrappedPrecompile

47 | { 48 | type Output = P::Output; 49 | 50 | fn set_spec(&mut self, spec: ::Spec) -> bool { 51 | self.precompile.set_spec(spec.clone()); 52 | self.spec = spec.into(); 53 | true 54 | } 55 | 56 | fn run( 57 | &mut self, 58 | context: &mut CTX, 59 | address: &Address, 60 | inputs: &InputsImpl, 61 | is_static: bool, 62 | gas_limit: u64, 63 | ) -> Result, String> { 64 | let key = (self.spec, inputs.input.bytes(context), gas_limit); 65 | 66 | // get the result if it exists 67 | if let Some(precompiles) = self.cache.lock().get_mut(address) { 68 | if let Some(result) = precompiles.get(&key) { 69 | inc_precompile_cache_hits(); 70 | return result.clone().map(Some); 71 | } 72 | } 73 | 74 | inc_precompile_cache_misses(); 75 | 76 | // call the precompile if cache miss 77 | let output = self 78 | .precompile 79 | .run(context, address, inputs, is_static, gas_limit); 80 | 81 | if let Some(output) = output.clone().transpose() { 82 | // insert the result into the cache 83 | self.cache 84 | .lock() 85 | .entry(*address) 86 | .or_insert(PrecompileResultCache::new(NonZeroUsize::new(2048).unwrap())) 87 | .put(key, output); 88 | } 89 | 90 | output 91 | } 92 | 93 | fn warm_addresses(&self) -> Box> { 94 | self.precompile.warm_addresses() 95 | } 96 | 97 | fn contains(&self, address: &Address) -> bool { 98 | self.precompile.contains(address) 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /crates/rbuilder/src/building/testing/contracts.json: -------------------------------------------------------------------------------- 1 | { 2 | "MevTest": "0x6080604052600436106100555760003560e01c80634988880a1461005a5780637da3c3ab1461006f578063d6782ec714610077578063e6d252451461008a578063f9711c221461009d578063f9da581d146100b0575b600080fd5b61006d61006836600461027b565b6100b8565b005b61006d610159565b61006d61008536600461029d565b610163565b61006d61009836600461029d565b61017b565b61006d6100ab36600461029d565b6101b0565b61006d610240565b815481811461010d5760405162461bcd60e51b815260206004820152601860248201527f4f6c642076616c756520646f6573206e6f74206d617463680000000000000000604482015260640160405180910390fd5b600061011a8360016102cd565b808555905034156101535760405141903480156108fc02916000818181858888f19350505050158015610151573d6000803e3d6000fd5b505b50505050565b610161610159565b565b6101776001600160a01b03821631476102cd565b5050565b6040516001600160a01b038216903480156108fc02916000818181858888f19350505050158015610177573d6000803e3d6000fd5b60006040516101be9061026f565b604051809103906000f0801580156101da573d6000803e3d6000fd5b50604051631beb261560e01b81526001600160a01b03848116600483015291925090821690631beb26159034906024016000604051808303818588803b15801561022357600080fd5b505af1158015610237573d6000803e3d6000fd5b50505050505050565b60405141903480156108fc02916000818181858888f1935050505015801561026c573d6000803e3d6000fd5b50565b60bf806102f583390190565b6000806040838503121561028e57600080fd5b50508035926020909101359150565b6000602082840312156102af57600080fd5b81356001600160a01b03811681146102c657600080fd5b9392505050565b808201808211156102ee57634e487b7160e01b600052601160045260246000fd5b9291505056fe6080604052348015600f57600080fd5b5060a280601d6000396000f3fe608060405260043610601c5760003560e01c80631beb2615146021575b600080fd5b6030602c366004603e565b6032565b005b806001600160a01b0316ff5b600060208284031215604f57600080fd5b81356001600160a01b0381168114606557600080fd5b939250505056fea26469706673582212207422a0f368426edbe9d06fc472e76995dc7edc7b9e20673c4ab45757ae32f30064736f6c634300081a0033a26469706673582212201b4bf34a5948cd633421a6c6648a0db2ec7df1d12ccf2973d2cf78dd6b5775e464736f6c634300081a0033", 3 | "MevTestInitBytecode": "0x6080604052348015600f57600080fd5b506103e98061001f6000396000f3fe6080604052600436106100555760003560e01c80634988880a1461005a5780637da3c3ab1461006f578063d6782ec714610077578063e6d252451461008a578063f9711c221461009d578063f9da581d146100b0575b600080fd5b61006d61006836600461027b565b6100b8565b005b61006d610159565b61006d61008536600461029d565b610163565b61006d61009836600461029d565b61017b565b61006d6100ab36600461029d565b6101b0565b61006d610240565b815481811461010d5760405162461bcd60e51b815260206004820152601860248201527f4f6c642076616c756520646f6573206e6f74206d617463680000000000000000604482015260640160405180910390fd5b600061011a8360016102cd565b808555905034156101535760405141903480156108fc02916000818181858888f19350505050158015610151573d6000803e3d6000fd5b505b50505050565b610161610159565b565b6101776001600160a01b03821631476102cd565b5050565b6040516001600160a01b038216903480156108fc02916000818181858888f19350505050158015610177573d6000803e3d6000fd5b60006040516101be9061026f565b604051809103906000f0801580156101da573d6000803e3d6000fd5b50604051631beb261560e01b81526001600160a01b03848116600483015291925090821690631beb26159034906024016000604051808303818588803b15801561022357600080fd5b505af1158015610237573d6000803e3d6000fd5b50505050505050565b60405141903480156108fc02916000818181858888f1935050505015801561026c573d6000803e3d6000fd5b50565b60bf806102f583390190565b6000806040838503121561028e57600080fd5b50508035926020909101359150565b6000602082840312156102af57600080fd5b81356001600160a01b03811681146102c657600080fd5b9392505050565b808201808211156102ee57634e487b7160e01b600052601160045260246000fd5b9291505056fe6080604052348015600f57600080fd5b5060a280601d6000396000f3fe608060405260043610601c5760003560e01c80631beb2615146021575b600080fd5b6030602c366004603e565b6032565b005b806001600160a01b0316ff5b600060208284031215604f57600080fd5b81356001600160a01b0381168114606557600080fd5b939250505056fea26469706673582212207422a0f368426edbe9d06fc472e76995dc7edc7b9e20673c4ab45757ae32f30064736f6c634300081a0033a26469706673582212201b4bf34a5948cd633421a6c6648a0db2ec7df1d12ccf2973d2cf78dd6b5775e464736f6c634300081a0033" 4 | } 5 | -------------------------------------------------------------------------------- /crates/rbuilder/src/building/testing/evm_inspector_tests/setup.rs: -------------------------------------------------------------------------------- 1 | use crate::building::{ 2 | cached_reads::LocalCachedReads, 3 | evm::EvmFactory, 4 | evm_inspector::{RBuilderEVMInspector, UsedStateTrace}, 5 | testing::test_chain_state::{BlockArgs, NamedAddr, TestChainState, TestContracts, TxArgs}, 6 | BlockState, 7 | }; 8 | use alloy_primitives::Address; 9 | use reth_evm::Evm; 10 | use reth_primitives::{Recovered, TransactionSigned}; 11 | 12 | #[derive(Debug)] 13 | pub struct TestSetup { 14 | test_chain: TestChainState, 15 | } 16 | 17 | impl TestSetup { 18 | pub fn new() -> eyre::Result { 19 | Ok(Self { 20 | test_chain: TestChainState::new(BlockArgs::default())?, 21 | }) 22 | } 23 | 24 | pub fn named_address(&self, named_addr: NamedAddr) -> eyre::Result

{ 25 | self.test_chain.named_address(named_addr) 26 | } 27 | 28 | pub fn test_contract_address(&self) -> eyre::Result
{ 29 | self.test_chain.named_address(NamedAddr::MevTest) 30 | } 31 | 32 | pub fn make_transfer_tx( 33 | &self, 34 | from: NamedAddr, 35 | to: NamedAddr, 36 | value: u64, 37 | ) -> eyre::Result> { 38 | let tx_args = TxArgs::new(from, 0).to(to).value(value); 39 | let tx = self.test_chain.sign_tx(tx_args)?; 40 | Ok(tx) 41 | } 42 | 43 | pub fn make_increment_value_tx( 44 | &self, 45 | slot: u64, 46 | current_value: u64, 47 | ) -> eyre::Result> { 48 | let tx_args = TxArgs::new_increment_value(NamedAddr::User(0), 0, slot, current_value); 49 | let tx = self.test_chain.sign_tx(tx_args)?; 50 | Ok(tx) 51 | } 52 | 53 | pub fn make_deploy_mev_test_tx(&self) -> eyre::Result> { 54 | let mev_test_init_bytecode = TestContracts::load().mev_test_init_bytecode; 55 | let tx_args = TxArgs::new(NamedAddr::User(0), 0).input(mev_test_init_bytecode.into()); 56 | let tx = self.test_chain.sign_tx(tx_args)?; 57 | Ok(tx) 58 | } 59 | 60 | pub fn make_test_read_balance_tx( 61 | &self, 62 | read_balance_addr: Address, 63 | value: u64, 64 | ) -> eyre::Result> { 65 | let tx_args = 66 | TxArgs::new_test_read_balance(NamedAddr::User(0), 0, read_balance_addr, value); 67 | let tx = self.test_chain.sign_tx(tx_args)?; 68 | Ok(tx) 69 | } 70 | 71 | pub fn make_test_ephemeral_contract_destruct_tx( 72 | &self, 73 | refund_addr: Address, 74 | value: u64, 75 | ) -> eyre::Result> { 76 | let tx_args = 77 | TxArgs::new_test_ephemeral_contract_destruct(NamedAddr::User(0), 0, refund_addr) 78 | .value(value); 79 | let tx = self.test_chain.sign_tx(tx_args)?; 80 | Ok(tx) 81 | } 82 | 83 | pub fn inspect_tx_without_commit( 84 | &self, 85 | tx: Recovered, 86 | ) -> eyre::Result { 87 | let mut used_state_trace = UsedStateTrace::default(); 88 | let mut inspector = RBuilderEVMInspector::new(&tx, Some(&mut used_state_trace)); 89 | let mut local_cached_reads = LocalCachedReads::default(); 90 | 91 | // block state 92 | let state_provider = self.test_chain.provider_factory().latest()?; 93 | let mut block_state = BlockState::new(state_provider); 94 | let mut db_ref = block_state.new_db_ref( 95 | &self.test_chain.block_building_context().shared_cached_reads, 96 | &mut local_cached_reads, 97 | ); 98 | 99 | // execute transaction 100 | { 101 | let ctx = self.test_chain.block_building_context(); 102 | let mut evm = ctx.evm_factory.create_evm_with_inspector( 103 | db_ref.as_mut(), 104 | ctx.evm_env.clone(), 105 | &mut inspector, 106 | ); 107 | evm.transact(&tx) 108 | .map_err(|e| eyre::eyre!("execution failure: {:?}", e))?; 109 | } 110 | 111 | Ok(used_state_trace) 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /crates/rbuilder/src/building/testing/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | pub mod bundle_tests; 3 | #[cfg(test)] 4 | pub mod evm_inspector_tests; 5 | pub mod test_chain_state; 6 | -------------------------------------------------------------------------------- /crates/rbuilder/src/building/tracers.rs: -------------------------------------------------------------------------------- 1 | use crate::building::evm_inspector::UsedStateTrace; 2 | 3 | /// Trait to trace ANY use of an EVM instance for metrics 4 | pub trait SimulationTracer { 5 | /// En EVM instance executed a tx consuming gas. 6 | /// This includes reverting transactions. 7 | fn add_gas_used(&mut self, _gas: u64) {} 8 | 9 | /// If tracer returns true tx_commit will call add_used_state_trace with the given transaction trace. 10 | fn should_collect_used_state_trace(&self) -> bool { 11 | false 12 | } 13 | 14 | fn add_used_state_trace(&mut self, _trace: &UsedStateTrace) {} 15 | 16 | fn get_used_state_tracer(&self) -> Option<&UsedStateTrace> { 17 | None 18 | } 19 | } 20 | 21 | impl SimulationTracer for () {} 22 | 23 | #[derive(Debug, Default, Clone)] 24 | pub struct GasUsedSimulationTracer { 25 | pub used_gas: u64, 26 | } 27 | 28 | impl SimulationTracer for GasUsedSimulationTracer { 29 | fn add_gas_used(&mut self, gas: u64) { 30 | self.used_gas += gas; 31 | } 32 | } 33 | 34 | /// Tracer that accumulates gas and used state. 35 | #[derive(Debug)] 36 | pub struct AccumulatorSimulationTracer { 37 | pub used_gas: u64, 38 | pub used_state_trace: UsedStateTrace, 39 | } 40 | 41 | impl AccumulatorSimulationTracer { 42 | pub fn new() -> Self { 43 | Self { 44 | used_gas: 0, 45 | used_state_trace: UsedStateTrace::default(), 46 | } 47 | } 48 | } 49 | 50 | impl Default for AccumulatorSimulationTracer { 51 | fn default() -> Self { 52 | Self::new() 53 | } 54 | } 55 | 56 | impl SimulationTracer for AccumulatorSimulationTracer { 57 | fn add_gas_used(&mut self, gas: u64) { 58 | self.used_gas += gas; 59 | } 60 | 61 | fn should_collect_used_state_trace(&self) -> bool { 62 | true 63 | } 64 | 65 | fn add_used_state_trace(&mut self, trace: &UsedStateTrace) { 66 | self.used_state_trace.append_trace(trace); 67 | } 68 | 69 | fn get_used_state_tracer(&self) -> Option<&UsedStateTrace> { 70 | Some(&self.used_state_trace) 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /crates/rbuilder/src/building/tx_sim_cache/evm_db.rs: -------------------------------------------------------------------------------- 1 | use alloy_primitives::Address; 2 | use alloy_primitives::B256; 3 | use alloy_primitives::U256; 4 | use revm::{ 5 | state::{AccountInfo, Bytecode}, 6 | Database, 7 | }; 8 | 9 | #[derive(Debug, Clone, PartialEq, Eq, Hash)] 10 | pub enum AccessRecord { 11 | Account { 12 | address: Address, 13 | result: Option, 14 | }, 15 | Storage { 16 | address: Address, 17 | index: U256, 18 | result: U256, 19 | }, 20 | } 21 | 22 | #[derive(Debug, Clone)] 23 | pub struct TxStateAccessTrace { 24 | pub trace: Vec, 25 | } 26 | 27 | impl TxStateAccessTrace { 28 | fn new() -> Self { 29 | Self { trace: Vec::new() } 30 | } 31 | 32 | fn push(&mut self, record: AccessRecord) { 33 | self.trace.push(record); 34 | } 35 | } 36 | 37 | /// revm database wrapper that records state access 38 | #[derive(Debug)] 39 | pub struct EVMRecordingDatabase { 40 | pub should_record: bool, 41 | pub inner_db: DB, 42 | pub recorded_trace: TxStateAccessTrace, 43 | } 44 | 45 | impl EVMRecordingDatabase { 46 | pub fn new(inner_db: DB, should_record: bool) -> Self { 47 | Self { 48 | inner_db, 49 | recorded_trace: TxStateAccessTrace::new(), 50 | should_record, 51 | } 52 | } 53 | } 54 | 55 | impl Database for EVMRecordingDatabase { 56 | type Error = DB::Error; 57 | 58 | fn basic(&mut self, address: Address) -> Result, Self::Error> { 59 | let result = self.inner_db.basic(address)?; 60 | if !self.should_record { 61 | return Ok(result); 62 | } 63 | 64 | self.recorded_trace.push(AccessRecord::Account { 65 | address, 66 | result: result.as_ref().map(|r| r.copy_without_code()), 67 | }); 68 | Ok(result) 69 | } 70 | 71 | fn code_by_hash(&mut self, code_hash: B256) -> Result { 72 | self.inner_db.code_by_hash(code_hash) 73 | } 74 | 75 | fn storage(&mut self, address: Address, index: U256) -> Result { 76 | let result = self.inner_db.storage(address, index)?; 77 | if !self.should_record { 78 | return Ok(result); 79 | } 80 | self.recorded_trace.push(AccessRecord::Storage { 81 | address, 82 | index, 83 | result, 84 | }); 85 | Ok(result) 86 | } 87 | 88 | fn block_hash(&mut self, number: u64) -> Result { 89 | self.inner_db.block_hash(number) 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /crates/rbuilder/src/integration/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod playground; 2 | mod simple; 3 | -------------------------------------------------------------------------------- /crates/rbuilder/src/integration/test_data/blocklist.json: -------------------------------------------------------------------------------- 1 | [ 2 | "0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC" 3 | ] -------------------------------------------------------------------------------- /crates/rbuilder/src/integration/test_data/config-playground-http-blocklist.toml: -------------------------------------------------------------------------------- 1 | 2 | chain = "$HOME/.playground/devnet/genesis.json" 3 | reth_datadir = "$HOME/.playground/devnet/data_reth" 4 | relay_secret_key = "5eae315483f028b5cdd5d1090ff0c7618b18737ea9bf3c35047189db22835c48" 5 | el_node_ipc_path = "$HOME/.playground/devnet/reth.ipc" 6 | live_builders = ["mgp-ordering"] 7 | enabled_relays = ["playground"] 8 | log_level = "info,rbuilder=debug" 9 | coinbase_secret_key = "ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" 10 | blocklist_url_max_age_secs = 5 11 | watchdog_timeout_sec = 15 12 | blocklist = "http://127.0.0.1:1934" 13 | root_hash_use_sparse_trie = true 14 | root_hash_compare_sparse_trie = false 15 | -------------------------------------------------------------------------------- /crates/rbuilder/src/integration/test_data/config-playground.toml: -------------------------------------------------------------------------------- 1 | 2 | chain = "$HOME/.playground/devnet/genesis.json" 3 | reth_datadir = "$HOME/.playground/devnet/data_reth" 4 | relay_secret_key = "5eae315483f028b5cdd5d1090ff0c7618b18737ea9bf3c35047189db22835c48" 5 | el_node_ipc_path = "$HOME/.playground/devnet/reth.ipc" 6 | live_builders = ["mgp-ordering"] 7 | enabled_relays = ["playground"] 8 | log_level = "info,rbuilder=debug" 9 | coinbase_secret_key = "ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" 10 | blocklist_file_path = "./src/integration/test_data/blocklist.json" 11 | root_hash_use_sparse_trie = true 12 | root_hash_compare_sparse_trie = false 13 | -------------------------------------------------------------------------------- /crates/rbuilder/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod backtest; 2 | pub mod beacon_api_client; 3 | pub mod building; 4 | pub mod integration; 5 | pub mod live_builder; 6 | pub mod mev_boost; 7 | pub mod primitives; 8 | pub mod provider; 9 | pub mod roothash; 10 | pub mod telemetry; 11 | pub mod utils; 12 | -------------------------------------------------------------------------------- /crates/rbuilder/src/live_builder/block_output/bid_observer.rs: -------------------------------------------------------------------------------- 1 | use alloy_primitives::U256; 2 | use reth_primitives::SealedBlock; 3 | 4 | use crate::{ 5 | building::BuiltBlockTrace, live_builder::payload_events::MevBoostSlotData, 6 | mev_boost::submission::SubmitBlockRequest, 7 | }; 8 | 9 | /// Trait that receives every bid made by us to the relays. 10 | pub trait BidObserver: std::fmt::Debug { 11 | /// This callback is executed after the bid was made so it gives away ownership of the data. 12 | /// This should NOT block since it's executed in the submitting thread. 13 | fn block_submitted( 14 | &self, 15 | slot_data: &MevBoostSlotData, 16 | sealed_block: &SealedBlock, 17 | submit_block_request: &SubmitBlockRequest, 18 | built_block_trace: &BuiltBlockTrace, 19 | builder_name: String, 20 | best_bid_value: U256, 21 | ); 22 | } 23 | 24 | #[derive(Debug)] 25 | pub struct NullBidObserver {} 26 | 27 | impl BidObserver for NullBidObserver { 28 | fn block_submitted( 29 | &self, 30 | _slot_data: &MevBoostSlotData, 31 | _sealed_block: &SealedBlock, 32 | _submit_block_request: &SubmitBlockRequest, 33 | _built_block_trace: &BuiltBlockTrace, 34 | _builder_name: String, 35 | _best_bid_value: U256, 36 | ) { 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /crates/rbuilder/src/live_builder/block_output/bid_observer_multiplexer.rs: -------------------------------------------------------------------------------- 1 | use reth_primitives::SealedBlock; 2 | 3 | use crate::{ 4 | building::BuiltBlockTrace, live_builder::payload_events::MevBoostSlotData, 5 | mev_boost::submission::SubmitBlockRequest, 6 | }; 7 | 8 | use super::bid_observer::BidObserver; 9 | 10 | /// Implements BidObserver forwarding all calls to several BidObservers. 11 | #[derive(Default)] 12 | pub struct BidObserverMultiplexer { 13 | observers: Vec>, 14 | } 15 | 16 | impl std::fmt::Debug for BidObserverMultiplexer { 17 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 18 | f.debug_struct("BidObserverMultiplexer").finish() 19 | } 20 | } 21 | 22 | impl BidObserverMultiplexer { 23 | pub fn push(&mut self, obs: Box) { 24 | self.observers.push(obs); 25 | } 26 | } 27 | 28 | impl BidObserver for BidObserverMultiplexer { 29 | fn block_submitted( 30 | &self, 31 | slot_data: &MevBoostSlotData, 32 | sealed_block: &SealedBlock, 33 | submit_block_request: &SubmitBlockRequest, 34 | built_block_trace: &BuiltBlockTrace, 35 | builder_name: String, 36 | best_bid_value: alloy_primitives::U256, 37 | ) { 38 | for obs in &self.observers { 39 | obs.block_submitted( 40 | slot_data, 41 | sealed_block, 42 | submit_block_request, 43 | built_block_trace, 44 | builder_name.clone(), 45 | best_bid_value, 46 | ); 47 | } 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /crates/rbuilder/src/live_builder/block_output/bid_value_source/best_bid_sync_source.rs: -------------------------------------------------------------------------------- 1 | use super::interfaces::{BidValueObs, BidValueSource, CompetitionBid}; 2 | use alloy_primitives::U256; 3 | use parking_lot::Mutex; 4 | use std::sync::Arc; 5 | 6 | /// Simple struct tracking the last best bid and asking it in a sync way via best_bid_value. 7 | pub struct BestBidSyncSource { 8 | best_bid_source_inner: Arc, 9 | bid_value_source: Arc, 10 | } 11 | 12 | impl Drop for BestBidSyncSource { 13 | fn drop(&mut self) { 14 | self.bid_value_source 15 | .unsubscribe(self.best_bid_source_inner.clone()); 16 | } 17 | } 18 | 19 | impl BestBidSyncSource { 20 | pub fn new( 21 | bid_value_source: Arc, 22 | block_number: u64, 23 | slot_number: u64, 24 | ) -> Self { 25 | let best_bid_source_inner = Arc::new(BestBidSyncSourceInner::default()); 26 | bid_value_source.subscribe(block_number, slot_number, best_bid_source_inner.clone()); 27 | Self { 28 | best_bid_source_inner, 29 | bid_value_source, 30 | } 31 | } 32 | 33 | pub fn best_bid_value(&self) -> Option { 34 | self.best_bid_source_inner 35 | .best_bid 36 | .lock() 37 | .as_ref() 38 | .map(|bid| bid.bid()) 39 | } 40 | } 41 | 42 | #[derive(Debug, Default)] 43 | struct BestBidSyncSourceInner { 44 | best_bid: Mutex>, 45 | } 46 | 47 | impl BidValueObs for BestBidSyncSourceInner { 48 | fn update_new_bid(&self, bid: CompetitionBid) { 49 | *self.best_bid.lock() = Some(bid); 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /crates/rbuilder/src/live_builder/block_output/bid_value_source/interfaces.rs: -------------------------------------------------------------------------------- 1 | use alloy_primitives::U256; 2 | use mockall::automock; 3 | use std::sync::Arc; 4 | use time::OffsetDateTime; 5 | 6 | #[derive(Clone, Debug)] 7 | pub struct CompetitionBid { 8 | bid: U256, 9 | /// For metrics. Set on creation which is the first time we see it in our process. 10 | creation_time: OffsetDateTime, 11 | } 12 | 13 | impl CompetitionBid { 14 | pub fn new(bid: U256) -> Self { 15 | Self { 16 | bid, 17 | creation_time: OffsetDateTime::now_utc(), 18 | } 19 | } 20 | 21 | pub fn new_for_deserialization(bid: U256, creation_time: OffsetDateTime) -> Self { 22 | Self { bid, creation_time } 23 | } 24 | 25 | pub fn bid(&self) -> U256 { 26 | self.bid 27 | } 28 | 29 | pub fn creation_time(&self) -> OffsetDateTime { 30 | self.creation_time 31 | } 32 | } 33 | 34 | /// Sync + Send to allow to be called from another thread. 35 | #[automock] 36 | pub trait BidValueObs: std::fmt::Debug + Sync + Send { 37 | /// @Pending: add source of the bid. 38 | fn update_new_bid(&self, bid: CompetitionBid); 39 | } 40 | 41 | /// Object watching a stream af the bids made. 42 | /// Allows us to subscribe to notifications for particular blocks/slots. 43 | pub trait BidValueSource: std::fmt::Debug { 44 | fn subscribe(&self, block_number: u64, slot_number: u64, obs: Arc); 45 | fn unsubscribe(&self, obs: Arc); 46 | } 47 | -------------------------------------------------------------------------------- /crates/rbuilder/src/live_builder/block_output/bid_value_source/mod.rs: -------------------------------------------------------------------------------- 1 | //! This module handles all objects needed to get feedback from the bids made by the competition. 2 | pub mod best_bid_sync_source; 3 | pub mod interfaces; 4 | pub mod null_bid_value_source; 5 | -------------------------------------------------------------------------------- /crates/rbuilder/src/live_builder/block_output/bid_value_source/null_bid_value_source.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use super::interfaces::{BidValueObs, BidValueSource}; 4 | 5 | /// BidValueSource that will NOT report anything. 6 | #[derive(Debug)] 7 | pub struct NullBidValueSource {} 8 | 9 | impl BidValueSource for NullBidValueSource { 10 | fn subscribe(&self, _block_number: u64, _slot_number: u64, _obs: Arc) {} 11 | fn unsubscribe(&self, _obs: Arc) {} 12 | } 13 | -------------------------------------------------------------------------------- /crates/rbuilder/src/live_builder/block_output/bidding/interfaces.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use crate::{ 4 | building::builders::{ 5 | block_building_helper::{BiddableUnfinishedBlock, BlockBuildingHelper}, 6 | UnfinishedBlockBuildingSink, 7 | }, 8 | live_builder::block_output::bid_value_source::interfaces::BidValueObs, 9 | }; 10 | use alloy_primitives::{BlockNumber, U256}; 11 | use mockall::automock; 12 | use time::OffsetDateTime; 13 | use tokio_util::sync::CancellationToken; 14 | 15 | /// Trait in charge of bidding blocks. 16 | /// It is created for each block / slot. 17 | /// Via UnfinishedBlockBuildingSink it gets the new biddable blocks. 18 | /// Via BidValueObs it gets the competition bids that it should improve when possible. 19 | /// On creation the concrete SlotBidder will get a BidMaker to make the bids. 20 | pub trait SlotBidder: UnfinishedBlockBuildingSink + BidValueObs {} 21 | 22 | /// Bid we want to make. 23 | pub struct Bid { 24 | /// Block we should seal with payout tx of payout_tx_value. 25 | block: BiddableUnfinishedBlock, 26 | /// payout_tx_value should be Some <=> block.can_add_payout_tx() 27 | payout_tx_value: Option, 28 | /// Value we saw in the competition when we decided to make this bid. 29 | seen_competition_bid: Option, 30 | } 31 | 32 | impl std::fmt::Debug for Bid { 33 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 34 | f.debug_struct("Bid") 35 | .field("payout_tx_value", &self.payout_tx_value) 36 | .finish_non_exhaustive() 37 | } 38 | } 39 | 40 | impl Bid { 41 | /// Creates a new Bid instance. 42 | pub fn new( 43 | block: BiddableUnfinishedBlock, 44 | payout_tx_value: Option, 45 | seen_competition_bid: Option, 46 | ) -> Self { 47 | Self { 48 | block, 49 | payout_tx_value, 50 | seen_competition_bid, 51 | } 52 | } 53 | 54 | pub fn block(self) -> Box { 55 | self.block.into_building_helper() 56 | } 57 | 58 | pub fn payout_tx_value(&self) -> Option { 59 | self.payout_tx_value 60 | } 61 | 62 | pub fn seen_competition_bid(&self) -> Option { 63 | self.seen_competition_bid 64 | } 65 | } 66 | 67 | /// Makes the actual bid (seal + send it to the relay). 68 | pub trait BidMaker: std::fmt::Debug { 69 | fn send_bid(&self, bid: Bid); 70 | } 71 | 72 | /// Info about a onchain block from reth. 73 | #[derive(Eq, PartialEq, Clone, Debug)] 74 | pub struct LandedBlockInfo { 75 | pub block_number: BlockNumber, 76 | pub block_timestamp: OffsetDateTime, 77 | pub builder_balance: U256, 78 | /// true -> we landed this block. 79 | /// If false we could have landed it in coinbase == fee recipient mode but balance wouldn't change so we don't care. 80 | pub beneficiary_is_builder: bool, 81 | } 82 | 83 | /// Trait in charge of bidding. 84 | /// We use one for the whole execution and ask for a [SlotBidder] for each particular slot. 85 | /// After BiddingService creation the builder will try to feed it all the needed update_new_landed_block_detected from the DB history. 86 | /// To avoid exposing how much info the BiddingService uses we don't ask it anything and feed it the max history we are willing to read. 87 | /// After that the builder will update each block via update_new_landed_block_detected. 88 | pub trait BiddingService: std::fmt::Debug + Send + Sync { 89 | fn create_slot_bidder( 90 | &mut self, 91 | block: u64, 92 | slot: u64, 93 | slot_timestamp: OffsetDateTime, 94 | bid_maker: Box, 95 | cancel: CancellationToken, 96 | ) -> Arc; 97 | 98 | /// Access to BiddingServiceWinControl::must_win_block. 99 | fn win_control(&self) -> Arc; 100 | 101 | /// We are notified about some landed blocks. 102 | /// They are sorted in ascending order. 103 | /// Consecutive calls will have consecutive block numbers. 104 | fn update_new_landed_blocks_detected(&mut self, landed_blocks: &[LandedBlockInfo]); 105 | 106 | /// We let the BiddingService know we had some problem reading landed blocks just in case we wants to change his strategy (eg: stop bidding until next update_new_landed_blocks_detected) 107 | fn update_failed_reading_new_landed_blocks(&mut self); 108 | } 109 | 110 | /// Trait to control the must_win_block feature of the BiddingService. 111 | /// It allows to use BiddingService as a Box (single threaded mutable access) but be able to call must_win_block from another thread. 112 | #[automock] 113 | pub trait BiddingServiceWinControl: Send + Sync + std::fmt::Debug { 114 | /// If called, any current or future SlotBidder working on that block will bid more aggressively to win the block. 115 | fn must_win_block(&self, block: u64); 116 | } 117 | -------------------------------------------------------------------------------- /crates/rbuilder/src/live_builder/block_output/bidding/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod interfaces; 2 | pub mod sequential_sealer_bid_maker; 3 | pub mod true_block_value_bidder; 4 | pub mod wallet_balance_watcher; 5 | -------------------------------------------------------------------------------- /crates/rbuilder/src/live_builder/block_output/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod bid_observer; 2 | pub mod bid_observer_multiplexer; 3 | pub mod bid_value_source; 4 | pub mod bidding; 5 | pub mod block_sealing_bidder_factory; 6 | pub mod relay_submit; 7 | -------------------------------------------------------------------------------- /crates/rbuilder/src/live_builder/order_input/mempool_txs_detector.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use ahash::RandomState; 4 | use alloy_primitives::TxHash; 5 | use dashmap::DashSet; 6 | 7 | use crate::primitives::{ 8 | BundleReplacementData, Order, ShareBundleReplacementKey, TransactionSignedEcRecoveredWithBlobs, 9 | }; 10 | 11 | use super::replaceable_order_sink::ReplaceableOrderSink; 12 | 13 | /// Get's in the middle of a ReplaceableOrder stream a feeds a MempoolTxsDetector. 14 | #[derive(Debug)] 15 | pub struct ReplaceableOrderStreamSniffer { 16 | detector: Arc, 17 | sink: Box, 18 | } 19 | 20 | impl ReplaceableOrderStreamSniffer { 21 | pub fn new(sink: Box, detector: Arc) -> Self { 22 | Self { detector, sink } 23 | } 24 | 25 | pub fn detector(&self) -> Arc { 26 | self.detector.clone() 27 | } 28 | } 29 | 30 | impl ReplaceableOrderSink for ReplaceableOrderStreamSniffer { 31 | fn insert_order(&mut self, order: Order) -> bool { 32 | self.detector.add_tx(&order); 33 | self.sink.insert_order(order) 34 | } 35 | 36 | fn remove_bundle(&mut self, replacement_data: BundleReplacementData) -> bool { 37 | self.sink.remove_bundle(replacement_data) 38 | } 39 | 40 | fn remove_sbundle(&mut self, key: ShareBundleReplacementKey) -> bool { 41 | self.sink.remove_sbundle(key) 42 | } 43 | 44 | fn is_alive(&self) -> bool { 45 | self.sink.is_alive() 46 | } 47 | } 48 | 49 | /// Given a TransactionSignedEcRecoveredWithBlobs answers if the tx is from the mempool or not. 50 | /// Current implementation is super simple, it just checks the tx hash against a set of hashes. 51 | #[derive(Debug)] 52 | pub struct MempoolTxsDetector { 53 | mempool_txs: DashSet, 54 | } 55 | 56 | impl MempoolTxsDetector { 57 | pub fn new() -> Self { 58 | Self { 59 | mempool_txs: Default::default(), 60 | } 61 | } 62 | 63 | pub fn add_tx(&self, order: &Order) { 64 | if let Order::Tx(mempool_tx) = order { 65 | self.mempool_txs.insert(mempool_tx.tx_with_blobs.hash()); 66 | } 67 | } 68 | 69 | pub fn is_mempool(&self, tx: &TransactionSignedEcRecoveredWithBlobs) -> bool { 70 | self.mempool_txs.contains(&tx.hash()) 71 | } 72 | } 73 | 74 | impl Default for MempoolTxsDetector { 75 | fn default() -> Self { 76 | Self::new() 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /crates/rbuilder/src/live_builder/order_input/order_sink.rs: -------------------------------------------------------------------------------- 1 | use mockall::automock; 2 | use tokio::sync::mpsc; 3 | use tracing::info; 4 | 5 | use crate::primitives::{Order, OrderId}; 6 | use core::fmt::Debug; 7 | 8 | /// Receiver of order commands. 9 | /// No replacement/cancellation (or version checking) is considered here. 10 | /// Orders are assumed to be immutable so there is no update. 11 | /// insert_order/remove_order return a bool indicating if the operation was successful. 12 | /// This bool allows the source to cancel notifications on errors if needed. 13 | #[automock] 14 | pub trait OrderSink: Debug + Send { 15 | fn insert_order(&mut self, order: Order) -> bool; 16 | fn remove_order(&mut self, id: OrderId) -> bool; 17 | /// @Pending remove this ugly hack to check if we can stop sending data. 18 | /// It should be replaced for a better control over object destruction 19 | fn is_alive(&self) -> bool; 20 | } 21 | 22 | /// Just printlns everything 23 | #[derive(Debug)] 24 | pub struct OrderPrinter {} 25 | 26 | impl OrderSink for OrderPrinter { 27 | fn insert_order(&mut self, order: Order) -> bool { 28 | info!(order_id = ?order.id() ,"New order"); 29 | true 30 | } 31 | 32 | fn remove_order(&mut self, id: OrderId) -> bool { 33 | info!(order_id = ?id ,"Cancelled order"); 34 | true 35 | } 36 | 37 | fn is_alive(&self) -> bool { 38 | true 39 | } 40 | } 41 | 42 | impl Drop for OrderPrinter { 43 | fn drop(&mut self) { 44 | println!("OrderPrinter Dropped"); 45 | } 46 | } 47 | 48 | /////////////////////// 49 | 50 | #[derive(Debug, Clone)] 51 | pub enum OrderPoolCommand { 52 | //OrderSink::insert_order 53 | Insert(Order), 54 | //OrderSink::remove_order 55 | Remove(OrderId), 56 | } 57 | 58 | /// Adapts push Order flow to pull flow. 59 | #[derive(Debug)] 60 | pub struct OrderSender2OrderSink { 61 | sender: mpsc::UnboundedSender, 62 | } 63 | 64 | impl OrderSender2OrderSink { 65 | /// returns the OrderSender2OrderSink to get the flow and the UnboundedReceiver to poll the flow. 66 | pub fn new() -> (Self, mpsc::UnboundedReceiver) { 67 | let (sender, receiver) = mpsc::unbounded_channel(); 68 | (Self { sender }, receiver) 69 | } 70 | } 71 | 72 | impl OrderSink for OrderSender2OrderSink { 73 | fn insert_order(&mut self, order: Order) -> bool { 74 | self.sender.send(OrderPoolCommand::Insert(order)).is_ok() 75 | } 76 | 77 | fn remove_order(&mut self, id: OrderId) -> bool { 78 | self.sender.send(OrderPoolCommand::Remove(id)).is_ok() 79 | } 80 | 81 | fn is_alive(&self) -> bool { 82 | !self.sender.is_closed() 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /crates/rbuilder/src/live_builder/order_input/replaceable_order_sink.rs: -------------------------------------------------------------------------------- 1 | use tracing::info; 2 | 3 | use crate::primitives::{BundleReplacementData, Order, ShareBundleReplacementKey}; 4 | use core::fmt::Debug; 5 | 6 | /// Receiver of order commands in a low level order stream (mempool + RPC calls). 7 | /// Orders are assumed to be immutable so there is no update. 8 | /// insert_order/remove_order return a bool indicating if the operation was successful. 9 | /// This bool allows the source to cancel notifications on errors if needed. 10 | /// Some Orders contain replacement_key so they can replace previous ones. 11 | /// Due to source problems insert_order/remove_bundle can arrive out of order so Orders also have a sequence number 12 | /// so we can identify the newest. 13 | pub trait ReplaceableOrderSink: Debug + Send { 14 | fn insert_order(&mut self, order: Order) -> bool; 15 | fn remove_bundle(&mut self, replacement_data: BundleReplacementData) -> bool; 16 | fn remove_sbundle(&mut self, key: ShareBundleReplacementKey) -> bool; 17 | /// @Pending remove this ugly hack to check if we can stop sending data. 18 | /// It should be replaced for a better control over object destruction 19 | fn is_alive(&self) -> bool; 20 | } 21 | 22 | /// Just printlns everything 23 | #[derive(Debug)] 24 | pub struct ReplaceableOrderPrinter {} 25 | 26 | impl ReplaceableOrderSink for ReplaceableOrderPrinter { 27 | fn insert_order(&mut self, order: Order) -> bool { 28 | info!( 29 | order_id = ?order.id(), 30 | order_rep_info = ?order.replacement_key_and_sequence_number(), 31 | "New order " 32 | ); 33 | true 34 | } 35 | 36 | fn remove_bundle(&mut self, replacement_data: BundleReplacementData) -> bool { 37 | info!(replacement_data=?replacement_data,"Cancelled Bundle"); 38 | true 39 | } 40 | 41 | fn is_alive(&self) -> bool { 42 | true 43 | } 44 | 45 | fn remove_sbundle(&mut self, key: ShareBundleReplacementKey) -> bool { 46 | info!(key=?key,"Cancelled SBundle"); 47 | true 48 | } 49 | } 50 | 51 | impl Drop for ReplaceableOrderPrinter { 52 | fn drop(&mut self) { 53 | println!("OrderPrinter Dropped"); 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /crates/rbuilder/src/live_builder/testdata/config_with_relay_override.toml: -------------------------------------------------------------------------------- 1 | log_json = true 2 | log_level = "info,rbuilder=debug" 3 | redacted_telemetry_server_port = 6061 4 | redacted_telemetry_server_ip = "0.0.0.0" 5 | full_telemetry_server_port = 6060 6 | full_telemetry_server_ip = "0.0.0.0" 7 | 8 | chain = "mainnet" 9 | reth_datadir = "/mnt/data/reth" 10 | 11 | coinbase_secret_key = "env:COINBASE_SECRET_KEY" 12 | relay_secret_key = "env:RELAY_SECRET_KEY" 13 | optimistic_relay_secret_key = "env:OPTIMISTIC_RELAY_SECRET_KEY" 14 | 15 | # cl_node_url can be a single value, array of values, or passed by an environment variables with values separated with a comma 16 | # cl_node_url = "http://localhost:3500" 17 | cl_node_url = ["env:CL_NODE_URL"] 18 | jsonrpc_server_port = 8645 19 | jsonrpc_server_ip = "0.0.0.0" 20 | el_node_ipc_path = "/tmp/reth.ipc" 21 | extra_data = "⚡🤖" 22 | 23 | blocklist_file_path = "./blocklist.json" 24 | 25 | ignore_cancellable_orders = true 26 | 27 | # genesis_fork_version = "0x00112233" 28 | 29 | sbundle_mergeable_signers = [] 30 | live_builders = ["mp-ordering", "mgp-ordering", "merging"] 31 | 32 | enabled_relays = ["playground"] 33 | 34 | [[relays]] 35 | name = "playground" 36 | url = "http://example.com" 37 | mode = "full" 38 | -------------------------------------------------------------------------------- /crates/rbuilder/src/live_builder/watchdog.rs: -------------------------------------------------------------------------------- 1 | use flume::RecvTimeoutError; 2 | use std::{io, time::Duration}; 3 | use tracing::{error, info}; 4 | 5 | /// Spawns a thread that will kill the process if there is no events sent on the channel 6 | /// for the timeout time. 7 | /// context is a string to be logged to be able to distinguish different types of deaths. 8 | pub fn spawn_watchdog_thread(timeout: Duration, context: String) -> io::Result> { 9 | let (sender, receiver) = flume::unbounded(); 10 | std::thread::Builder::new() 11 | .name(String::from("watchdog")) 12 | .spawn(move || { 13 | loop { 14 | match receiver.recv_timeout(timeout) { 15 | Ok(()) => {} 16 | Err(RecvTimeoutError::Timeout) => { 17 | error!(context, "Watchdog timeout"); 18 | std::process::exit(1); 19 | } 20 | Err(RecvTimeoutError::Disconnected) => { 21 | break; 22 | } 23 | } 24 | } 25 | info!( 26 | context, 27 | "Watchdog finished, will kill application in 12 seconds" 28 | ); 29 | 30 | std::thread::sleep(Duration::from_secs(12)); 31 | std::process::exit(1); 32 | })?; 33 | 34 | Ok(sender) 35 | } 36 | -------------------------------------------------------------------------------- /crates/rbuilder/src/mev_boost/error.rs: -------------------------------------------------------------------------------- 1 | use reqwest::{self, StatusCode}; 2 | use serde::{Deserialize, Serialize}; 3 | use std::fmt::{self, Debug, Display, Formatter}; 4 | use thiserror::Error; 5 | 6 | #[derive(Error)] 7 | pub enum RelayError { 8 | #[error("Request error: {0}")] 9 | RequestError(#[from] RedactableReqwestError), 10 | #[error("Header error")] 11 | InvalidHeader, 12 | #[error("Relay error: {0}")] 13 | RelayError(#[from] RedactableRelayErrorResponse), 14 | 15 | #[cfg_attr( 16 | not(feature = "redact-sensitive"), 17 | error("Unknown relay response, status: {0}, body: {1}") 18 | )] 19 | #[cfg_attr( 20 | feature = "redact-sensitive", 21 | error("Unknown relay response, status: {0}, body: [REDACTED]") 22 | )] 23 | UnknownRelayError(StatusCode, String), 24 | #[error("Too many requests")] 25 | TooManyRequests, 26 | #[error("Connection error")] 27 | ConnectionError, 28 | #[error("Internal Error")] 29 | InternalError, 30 | } 31 | 32 | impl Debug for RelayError { 33 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 34 | write!(f, "{}", self) 35 | } 36 | } 37 | 38 | impl From for RelayError { 39 | fn from(err: reqwest::Error) -> Self { 40 | RelayError::RequestError(RedactableReqwestError(err)) 41 | } 42 | } 43 | 44 | #[derive(Error)] 45 | pub struct RedactableReqwestError(reqwest::Error); 46 | 47 | impl From for RedactableReqwestError { 48 | fn from(err: reqwest::Error) -> Self { 49 | RedactableReqwestError(err) 50 | } 51 | } 52 | 53 | impl Display for RedactableReqwestError { 54 | #[cfg(not(feature = "redact-sensitive"))] 55 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 56 | write!(f, "{}", self.0) 57 | } 58 | 59 | #[cfg(feature = "redact-sensitive")] 60 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 61 | if self.0.is_builder() { 62 | write!(f, "Redacted Reqwest Error: Builder") 63 | } else if self.0.is_request() { 64 | write!(f, "Redacted Reqwest Error: Request") 65 | } else if self.0.is_redirect() { 66 | write!(f, "Redacted Reqwest Error: Redirect") 67 | } else if self.0.is_status() { 68 | write!(f, "Redacted Reqwest Error: Status") 69 | } else if self.0.is_body() { 70 | write!(f, "Redacted Reqwest Error: Body") 71 | } else if self.0.is_decode() { 72 | write!(f, "Redacted Reqwest Error: Decode") 73 | } else { 74 | write!(f, "Redacted Reqwest Error") 75 | } 76 | } 77 | } 78 | 79 | impl Debug for RedactableReqwestError { 80 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 81 | Display::fmt(self, f) 82 | } 83 | } 84 | 85 | #[derive(Error, Clone, Serialize, Deserialize)] 86 | pub struct RedactableRelayErrorResponse { 87 | pub code: Option, 88 | pub message: String, 89 | } 90 | 91 | impl std::fmt::Display for RedactableRelayErrorResponse { 92 | #[cfg(not(feature = "redact-sensitive"))] 93 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 94 | write!( 95 | f, 96 | "Relay error: (code: {}, message: {})", 97 | self.code.unwrap_or_default(), 98 | self.message 99 | ) 100 | } 101 | 102 | #[cfg(feature = "redact-sensitive")] 103 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 104 | write!( 105 | f, 106 | "Relay error: (code: {}, message: [REDACTED])", 107 | self.code.unwrap_or_default(), 108 | ) 109 | } 110 | } 111 | 112 | impl Debug for RedactableRelayErrorResponse { 113 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 114 | Display::fmt(self, f) 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /crates/rbuilder/src/mev_boost/fake_mev_boost_relay.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | io, 3 | path::PathBuf, 4 | process::{Child, Command}, 5 | }; 6 | 7 | #[derive(Debug)] 8 | pub enum FakeMevBoostRelayError { 9 | SpawnError, 10 | BinaryNotFound, 11 | } 12 | 13 | /// Helper struct to run a fake relay for testing. 14 | /// It mainly runs a process and not much more. 15 | /// Usage: 16 | /// - FakeMevBoostRelay::new().spawn(); 17 | /// - Auto kill the child process when the returned FakeMevBoostRelayInstance gets dropped. 18 | pub struct FakeMevBoostRelay { 19 | path: Option, 20 | } 21 | 22 | impl Default for FakeMevBoostRelay { 23 | fn default() -> Self { 24 | Self::new() 25 | } 26 | } 27 | 28 | fn is_enabled() -> bool { 29 | match std::env::var("RUN_TEST_FAKE_MEV_BOOST_RELAY") { 30 | Ok(value) => value.to_lowercase() == "1", 31 | Err(_) => false, 32 | } 33 | } 34 | 35 | impl FakeMevBoostRelay { 36 | pub fn new() -> Self { 37 | Self { path: None } 38 | } 39 | 40 | pub fn spawn(self) -> Option { 41 | self.try_spawn().unwrap() 42 | } 43 | 44 | fn try_spawn(self) -> Result, FakeMevBoostRelayError> { 45 | let mut cmd = if let Some(ref prg) = self.path { 46 | Command::new(prg) 47 | } else { 48 | Command::new("mev-boost-fake-relay") 49 | }; 50 | cmd.stdout(std::process::Stdio::piped()) 51 | .stderr(std::process::Stdio::inherit()); 52 | 53 | match cmd.spawn() { 54 | Ok(child) => Ok(Some(FakeMevBoostRelayInstance { child })), 55 | Err(e) => match e.kind() { 56 | io::ErrorKind::NotFound => { 57 | if is_enabled() { 58 | // If the binary is not found but it is required, we should return an error 59 | Err(FakeMevBoostRelayError::BinaryNotFound) 60 | } else { 61 | Ok(None) 62 | } 63 | } 64 | _ => Err(FakeMevBoostRelayError::SpawnError), 65 | }, 66 | } 67 | } 68 | } 69 | 70 | #[derive(Debug)] 71 | pub struct FakeMevBoostRelayInstance { 72 | child: Child, 73 | } 74 | 75 | impl FakeMevBoostRelayInstance { 76 | pub fn endpoint(&self) -> String { 77 | "http://localhost:8080".to_string() 78 | } 79 | } 80 | 81 | impl Drop for FakeMevBoostRelayInstance { 82 | fn drop(&mut self) { 83 | self.child.kill().expect("could not kill mev-boost-server"); 84 | } 85 | } 86 | 87 | #[cfg(test)] 88 | mod test { 89 | use super::*; 90 | 91 | #[ignore] 92 | #[test] 93 | fn test_spawn_fake_mev_boost_server() { 94 | let srv = FakeMevBoostRelay::new().spawn(); 95 | let _ = match srv { 96 | Some(srv) => srv, 97 | None => { 98 | println!("mev-boost binary not found, skipping test"); 99 | return; 100 | } 101 | }; 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /crates/rbuilder/src/primitives/fmt.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Write; 2 | 3 | use crate::utils::fmt::write_indent; 4 | 5 | use super::{Order, ShareBundleBody, ShareBundleInner, ShareBundleTx, SimValue, SimulatedOrder}; 6 | 7 | pub fn write_share_bundle_tx( 8 | indent: usize, 9 | buf: &mut Buffer, 10 | tx: &ShareBundleTx, 11 | ) -> std::fmt::Result { 12 | write_indent(indent, buf)?; 13 | buf.write_str(&format!( 14 | "TX {} Rev {:?} val {}\n", 15 | tx.tx.hash(), 16 | tx.revert_behavior, 17 | tx.tx.value() 18 | )) 19 | } 20 | 21 | pub fn write_share_bundle_inner( 22 | indent: usize, 23 | buf: &mut Buffer, 24 | inner: &ShareBundleInner, 25 | ) -> std::fmt::Result { 26 | write_indent(indent, buf)?; 27 | buf.write_str(&format!("Inner can skip {} \n", inner.can_skip))?; 28 | for item in &inner.body { 29 | match item { 30 | ShareBundleBody::Tx(tx) => write_share_bundle_tx(indent + 1, buf, tx)?, 31 | ShareBundleBody::Bundle(sb) => write_share_bundle_inner(indent + 1, buf, sb)?, 32 | } 33 | } 34 | Ok(()) 35 | } 36 | 37 | pub fn write_order( 38 | indent: usize, 39 | buf: &mut Buffer, 40 | order: &Order, 41 | ) -> std::fmt::Result { 42 | write_indent(indent, buf)?; 43 | match order { 44 | Order::Bundle(b) => buf.write_str(&format!("B {}\n", b.hash)), 45 | Order::Tx(tx) => buf.write_str(&format!( 46 | "Tx {} val {}\n", 47 | tx.tx_with_blobs.hash(), 48 | tx.tx_with_blobs.value() 49 | )), 50 | Order::ShareBundle(sb) => { 51 | buf.write_str(&format!("ShB {:?}\n", sb.hash))?; 52 | write_share_bundle_inner(indent + 1, buf, &sb.inner_bundle) 53 | } 54 | } 55 | } 56 | 57 | pub fn write_sim_value( 58 | indent: usize, 59 | buf: &mut Buffer, 60 | sim_value: &SimValue, 61 | ) -> std::fmt::Result { 62 | write_indent(indent, buf)?; 63 | std::fmt::write( 64 | buf, 65 | format_args!( 66 | "full coinbase_profit {} full mev_gas_price {}", 67 | sim_value.full_profit_info().coinbase_profit(), 68 | sim_value.full_profit_info().mev_gas_price() 69 | ), 70 | )?; 71 | std::fmt::write( 72 | buf, 73 | format_args!( 74 | "non mempool coinbase_profit {} non mempool mev_gas_price {}", 75 | sim_value.non_mempool_profit_info().coinbase_profit(), 76 | sim_value.non_mempool_profit_info().mev_gas_price() 77 | ), 78 | )?; 79 | buf.write_str(" Kickbacks ")?; 80 | for kb in sim_value.paid_kickbacks() { 81 | buf.write_str(&format!("{}->{},", kb.0, kb.1))?; 82 | } 83 | buf.write_str("\n") 84 | } 85 | 86 | pub fn write_sim_order( 87 | indent: usize, 88 | buf: &mut Buffer, 89 | sim_order: &SimulatedOrder, 90 | ) -> std::fmt::Result { 91 | write_indent(indent, buf)?; 92 | std::fmt::write(buf, format_args!("SimulatedOrder {}", sim_order.id()))?; 93 | 94 | write_indent(indent + 1, buf)?; 95 | buf.write_str("Sim")?; 96 | write_sim_value(indent + 2, buf, &sim_order.sim_value)?; 97 | 98 | write_indent(indent + 1, buf)?; 99 | buf.write_str("Order")?; 100 | write_order(indent + 2, buf, &sim_order.order) 101 | } 102 | -------------------------------------------------------------------------------- /crates/rbuilder/src/primitives/order_statistics.rs: -------------------------------------------------------------------------------- 1 | use crate::primitives::Order; 2 | 3 | /// Simple struct to count orders by type. 4 | #[derive(Clone, Debug, Default)] 5 | pub struct OrderStatistics { 6 | tx_count: i32, 7 | bundle_count: i32, 8 | sbundle_count: i32, 9 | } 10 | 11 | impl OrderStatistics { 12 | pub fn new() -> Self { 13 | Self::default() 14 | } 15 | 16 | pub fn add(&mut self, order: &Order) { 17 | match order { 18 | Order::Bundle(_) => self.tx_count += 1, 19 | Order::Tx(_) => self.bundle_count += 1, 20 | Order::ShareBundle(_) => self.sbundle_count += 1, 21 | } 22 | } 23 | 24 | pub fn remove(&mut self, order: &Order) { 25 | match order { 26 | Order::Bundle(_) => self.tx_count -= 1, 27 | Order::Tx(_) => self.bundle_count -= 1, 28 | Order::ShareBundle(_) => self.sbundle_count -= 1, 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /crates/rbuilder/src/provider/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::roothash::RootHashError; 2 | use crate::{ 3 | building::ThreadBlockBuildingContext, live_builder::simulation::SimulatedOrderCommand, 4 | }; 5 | use alloy_consensus::Header; 6 | use alloy_eips::BlockNumHash; 7 | use alloy_primitives::{BlockHash, BlockNumber, B256}; 8 | use reth::providers::ExecutionOutcome; 9 | use reth_errors::ProviderResult; 10 | use reth_provider::StateProviderBox; 11 | use tokio::sync::broadcast; 12 | use tokio_util::sync::CancellationToken; 13 | 14 | pub mod ipc_state_provider; 15 | pub mod reth_prov; 16 | pub mod state_provider_factory_from_provider_factory; 17 | 18 | /// Main trait to interact with the chain data. 19 | /// Allows to create different backends for chain data access without implementing lots of interfaces as would happen with reth_provider::StateProviderFactory 20 | /// since it only asks for what we really use. 21 | pub trait StateProviderFactory: Send + Sync { 22 | fn latest(&self) -> ProviderResult; 23 | 24 | fn history_by_block_number(&self, block: BlockNumber) -> ProviderResult; 25 | 26 | fn history_by_block_hash(&self, block: BlockHash) -> ProviderResult; 27 | 28 | fn header(&self, block_hash: &BlockHash) -> ProviderResult>; 29 | 30 | fn block_hash(&self, number: BlockNumber) -> ProviderResult>; 31 | 32 | fn best_block_number(&self) -> ProviderResult; 33 | 34 | fn header_by_number(&self, num: u64) -> ProviderResult>; 35 | 36 | fn last_block_number(&self) -> ProviderResult; 37 | 38 | fn root_hasher(&self, parent_num_hash: BlockNumHash) -> ProviderResult>; 39 | } 40 | 41 | /// trait that computes the roothash for a new block assuming a predefine parent block (given in StateProviderFactory::root_hasher) 42 | /// Ideally, it caches information in each roothash is computes (state_root) so the next one is faster. 43 | /// Before using all run_prefetcher to allow the RootHasher start a prefetcher task that will pre cache root state trie nodes 44 | /// based on what it sees on the simulations. 45 | pub trait RootHasher: std::fmt::Debug + Send + Sync { 46 | /// Must be called once before using. 47 | /// This is too specific and prone to error (you may forget to call it), maybe it's a better idea to pass this to StateProviderFactory::root_hasher and let each RootHasher decide what to do? 48 | fn run_prefetcher( 49 | &self, 50 | simulated_orders: broadcast::Receiver, 51 | cancel: CancellationToken, 52 | ); 53 | 54 | /// State root for changes outcome on top of parent block. 55 | fn state_root( 56 | &self, 57 | outcome: &ExecutionOutcome, 58 | local_ctx: &mut ThreadBlockBuildingContext, 59 | ) -> Result; 60 | } 61 | -------------------------------------------------------------------------------- /crates/rbuilder/src/provider/reth_prov.rs: -------------------------------------------------------------------------------- 1 | use crate::roothash::RootHashContext; 2 | use crate::utils::RootHasherImpl; 3 | use alloy_consensus::Header; 4 | use alloy_eips::BlockNumHash; 5 | use alloy_primitives::{BlockHash, BlockNumber, B256}; 6 | use reth_errors::ProviderResult; 7 | use reth_provider::{BlockReader, DatabaseProviderFactory, HeaderProvider}; 8 | use reth_provider::{StateCommitmentProvider, StateProviderBox}; 9 | use tracing::error; 10 | 11 | use super::{RootHasher, StateProviderFactory}; 12 | 13 | /// StateProviderFactory based on a reth traits. 14 | #[derive(Clone)] 15 | pub struct StateProviderFactoryFromRethProvider

{ 16 | provider: P, 17 | root_hash_context: RootHashContext, 18 | } 19 | 20 | impl

StateProviderFactoryFromRethProvider

{ 21 | pub fn new(provider: P, root_hash_context: RootHashContext) -> Self { 22 | Self { 23 | provider, 24 | root_hash_context, 25 | } 26 | } 27 | } 28 | 29 | impl

StateProviderFactory for StateProviderFactoryFromRethProvider

30 | where 31 | P: DatabaseProviderFactory 32 | + reth_provider::StateProviderFactory 33 | + HeaderProvider

34 | + StateCommitmentProvider 35 | + Clone 36 | + 'static, 37 | { 38 | fn latest(&self) -> ProviderResult { 39 | self.provider.latest() 40 | } 41 | 42 | fn history_by_block_number(&self, block: BlockNumber) -> ProviderResult { 43 | self.provider.history_by_block_number(block) 44 | } 45 | 46 | fn history_by_block_hash(&self, block: BlockHash) -> ProviderResult { 47 | self.provider.history_by_block_hash(block) 48 | } 49 | 50 | fn header(&self, block_hash: &BlockHash) -> ProviderResult> { 51 | self.provider.header(block_hash) 52 | } 53 | 54 | fn block_hash(&self, number: BlockNumber) -> ProviderResult> { 55 | self.provider.block_hash(number) 56 | } 57 | 58 | fn best_block_number(&self) -> ProviderResult { 59 | self.provider.best_block_number() 60 | } 61 | 62 | fn header_by_number(&self, num: u64) -> ProviderResult> { 63 | self.provider.header_by_number(num) 64 | } 65 | 66 | fn last_block_number(&self) -> ProviderResult { 67 | self.provider.last_block_number() 68 | } 69 | 70 | fn root_hasher(&self, parent_num_hash: BlockNumHash) -> ProviderResult> { 71 | let hasher = self.history_by_block_hash(parent_num_hash.hash)?; 72 | let parent_state_root = self 73 | .provider 74 | .header_by_hash_or_number(parent_num_hash.hash.into())? 75 | .map(|h| h.state_root); 76 | if parent_state_root.is_none() { 77 | error!("Parent hash is not found (for root_hasher)"); 78 | } 79 | Ok(Box::new(RootHasherImpl::new( 80 | parent_num_hash, 81 | parent_state_root, 82 | self.root_hash_context.clone(), 83 | self.provider.clone(), 84 | hasher, 85 | ))) 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /crates/rbuilder/src/provider/state_provider_factory_from_provider_factory.rs: -------------------------------------------------------------------------------- 1 | use alloy_consensus::Header; 2 | use alloy_eips::BlockNumHash; 3 | use alloy_primitives::{BlockHash, BlockNumber, B256}; 4 | use reth_errors::ProviderResult; 5 | use reth_node_api::{NodePrimitives, NodeTypes, NodeTypesWithDB}; 6 | use reth_provider::{ 7 | providers::ProviderNodeTypes, BlockHashReader, BlockNumReader, HeaderProvider, ProviderFactory, 8 | StateProviderBox, 9 | }; 10 | use tracing::error; 11 | 12 | use crate::{ 13 | building::builders::mock_block_building_helper::MockRootHasher, roothash::RootHashContext, 14 | utils::RootHasherImpl, 15 | }; 16 | 17 | use super::{RootHasher, StateProviderFactory}; 18 | 19 | /// StateProviderFactory based on a ProviderFactory. 20 | #[derive(Clone)] 21 | pub struct StateProviderFactoryFromProviderFactory { 22 | provider: ProviderFactory, 23 | root_hash_context: Option, 24 | } 25 | 26 | impl StateProviderFactoryFromProviderFactory { 27 | /// root_hash_config None -> no roothash (MockRootHasher) 28 | pub fn new(provider: ProviderFactory, root_hash_context: Option) -> Self { 29 | Self { 30 | provider, 31 | root_hash_context, 32 | } 33 | } 34 | } 35 | 36 | impl StateProviderFactory for StateProviderFactoryFromProviderFactory 37 | where 38 | N: NodeTypesWithDB + ProviderNodeTypes + Clone, 39 | ::Primitives: NodePrimitives, 40 | { 41 | fn latest(&self) -> ProviderResult { 42 | self.provider.latest() 43 | } 44 | 45 | fn history_by_block_number(&self, block: BlockNumber) -> ProviderResult { 46 | self.provider.history_by_block_number(block) 47 | } 48 | 49 | fn history_by_block_hash(&self, block: BlockHash) -> ProviderResult { 50 | self.provider.history_by_block_hash(block) 51 | } 52 | 53 | fn header(&self, block_hash: &BlockHash) -> ProviderResult> { 54 | self.provider.header(block_hash) 55 | } 56 | 57 | fn block_hash(&self, number: BlockNumber) -> ProviderResult> { 58 | self.provider.block_hash(number) 59 | } 60 | 61 | fn best_block_number(&self) -> ProviderResult { 62 | self.provider.best_block_number() 63 | } 64 | 65 | fn header_by_number(&self, num: u64) -> ProviderResult> { 66 | self.provider.header_by_number(num) 67 | } 68 | 69 | fn last_block_number(&self) -> ProviderResult { 70 | self.provider.last_block_number() 71 | } 72 | 73 | fn root_hasher(&self, parent_num_hash: BlockNumHash) -> ProviderResult> { 74 | Ok(if let Some(root_hash_context) = &self.root_hash_context { 75 | let parent_state_root = self 76 | .provider 77 | .header_by_hash_or_number(parent_num_hash.hash.into())? 78 | .map(|h| h.state_root); 79 | if parent_state_root.is_none() { 80 | error!("Parent hash is not found (for root_hasher)"); 81 | } 82 | Box::new(RootHasherImpl::new( 83 | parent_num_hash, 84 | parent_state_root, 85 | root_hash_context.clone(), 86 | self.provider.clone(), 87 | self.provider.clone(), 88 | )) 89 | } else { 90 | Box::new(MockRootHasher {}) 91 | }) 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /crates/rbuilder/src/telemetry/metrics/scope_meter.rs: -------------------------------------------------------------------------------- 1 | use std::time::{Duration, Instant}; 2 | 3 | /// Simple struct to measure the time since this is created until it dies to log delta on metrics 4 | /// Fn is used instead of FnOnce to avoid life problems on drop. 5 | pub struct ScopeMeter { 6 | start: Instant, 7 | callback: Callback, 8 | } 9 | 10 | impl ScopeMeter { 11 | pub fn new(callback: Callback) -> Self { 12 | Self { 13 | start: Instant::now(), 14 | callback, 15 | } 16 | } 17 | } 18 | 19 | impl Drop for ScopeMeter { 20 | fn drop(&mut self) { 21 | (self.callback)(self.start.elapsed()) 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /crates/rbuilder/src/telemetry/metrics_macros/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "metrics_macros" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [lib] 7 | proc-macro = true 8 | 9 | [dependencies] 10 | proc-macro2 = "1.0" 11 | quote = "1.0" 12 | syn = { version = "2.0", features = ["full"] } 13 | -------------------------------------------------------------------------------- /crates/rbuilder/src/telemetry/metrics_macros/src/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate proc_macro; 2 | use proc_macro::TokenStream; 3 | use quote::quote; 4 | use syn::{parse_macro_input, Item}; 5 | 6 | /// Generates and registers the given static Prometheus metrics to a static 7 | /// Registry named REGISTRY. 8 | /// 9 | /// This avoids the need for the caller to manually initialize the registry 10 | /// and call REGISTERY.register on every metric. 11 | /// 12 | /// Metrics are eagerly initialized when the program starts with `ctor`, 13 | /// so they will show up in the Prometheus metrics endpoint immediately. 14 | /// 15 | /// Note: `lazy_static::lazy_static` and `ctor::ctor` must be in scope. 16 | /// 17 | /// # Example 18 | /// 19 | /// ```ignore 20 | /// register_metrics! { 21 | /// pub static CURRENT_BLOCK: IntGauge = IntGauge::new("current_block", "Current Block").unwrap(); 22 | /// 23 | /// pub static BLOCK_FILL_TIME: HistogramVec = HistogramVec::new( 24 | /// HistogramOpts::new("block_fill_time", "Block Fill Times (ms)") 25 | /// .buckets(exponential_buckets_range(1.0, 3000.0, 100)), 26 | /// &["builder_name"] 27 | /// ) 28 | /// .unwrap(); 29 | /// } 30 | /// ``` 31 | /// 32 | /// expands to 33 | /// 34 | /// ```ignore 35 | /// lazy_static! { 36 | /// pub static ref CURRENT_BLOCK: IntGauge = { 37 | /// let metric = IntGauge::new("current_block", "Current Block").unwrap(); 38 | /// REGISTRY.register(Box::new(metric.clone())).unwrap(); 39 | /// metric 40 | /// }; 41 | /// 42 | /// pub static ref BLOCK_FILL_TIME: HistogramVec = { 43 | /// let metric = HistogramVec::new( 44 | /// HistogramOpts::new("block_fill_time", "Block Fill Times (ms)") 45 | /// .buckets(exponential_buckets_range(1.0, 3000.0, 100)), 46 | /// &["builder_name"] 47 | /// ).unwrap(); 48 | /// REGISTRY.register(Box::new(metric.clone())).expect("Failed to register metric"); 49 | /// metric 50 | /// }; 51 | /// } 52 | /// 53 | /// #[ctor] 54 | /// fn initialize_metrics() { 55 | /// // Force initialization of lazy statics 56 | /// let _ = *CURRENT_BLOCK; 57 | /// let _ = *BLOCK_FILL_TIME; 58 | /// } 59 | /// ``` 60 | #[proc_macro] 61 | pub fn register_metrics(input: TokenStream) -> TokenStream { 62 | let input = parse_macro_input!(input as syn::File); 63 | 64 | // Stuff to put in lazy_static! 65 | let mut metrics = quote! {}; 66 | 67 | // Stuff to put in ctor 68 | let mut initializers = quote! {}; 69 | 70 | for item in input.items { 71 | if let Item::Static(static_item) = item { 72 | let vis = &static_item.vis; 73 | let ident = &static_item.ident; 74 | let ty = &static_item.ty; 75 | let expr = &static_item.expr; 76 | 77 | // Create the static metric call REGISTER.register with it. 78 | metrics.extend(quote! { 79 | #vis static ref #ident: #ty = { 80 | let metric = #expr; 81 | REGISTRY.register(Box::new(metric.clone())).expect("Failed to register metric"); 82 | metric 83 | }; 84 | }); 85 | 86 | // Make sure the metric is eagerly initialized 87 | initializers.extend(quote! { 88 | let _ = *#ident; 89 | }); 90 | } else { 91 | panic!("register_metrics! only supports static items"); 92 | } 93 | } 94 | 95 | let out = quote! { 96 | lazy_static! { 97 | #metrics 98 | } 99 | 100 | #[ctor] 101 | fn initialize_metrics() { 102 | #initializers 103 | } 104 | }; 105 | 106 | TokenStream::from(out) 107 | } 108 | -------------------------------------------------------------------------------- /crates/rbuilder/src/telemetry/mod.rs: -------------------------------------------------------------------------------- 1 | //! Telemetry modules helps tracking what is happening in the rbuilder. 2 | //! 3 | //! The redacted server is seperate from the full server, because it may be desirable 4 | //! to expose full and redacted data differently in tdx builders. e.g. redacted data 5 | //! immediately avaliable, and full data avaliable after a delay or some seperate sanitisation. 6 | 7 | mod metrics; 8 | pub mod servers; 9 | 10 | pub use metrics::*; 11 | -------------------------------------------------------------------------------- /crates/rbuilder/src/telemetry/servers/full.rs: -------------------------------------------------------------------------------- 1 | //! Telemetry helps track what is happening in the running application using metrics and tracing. 2 | //! 3 | //! Interface to telemetry should be set of simple functions like: 4 | //! fn record_event(event_data) 5 | //! 6 | //! All internals are global variables. 7 | //! 8 | //! Full server may expose metrics that could leak information when running tdx. 9 | 10 | use std::net::SocketAddr; 11 | use warp::{Filter, Rejection, Reply}; 12 | 13 | use crate::{ 14 | telemetry::{ 15 | metrics::{gather_prometheus_metrics, set_version}, 16 | REGISTRY, 17 | }, 18 | utils::build_info::Version, 19 | }; 20 | 21 | pub async fn spawn(addr: SocketAddr, version: Version) -> eyre::Result<()> { 22 | set_version(version); 23 | 24 | // metrics over /debug/metrics/prometheus 25 | let metrics_route = warp::path!("debug" / "metrics" / "prometheus").and_then(metrics_handler); 26 | tokio::spawn(warp::serve(metrics_route).run(addr)); 27 | 28 | Ok(()) 29 | } 30 | 31 | async fn metrics_handler() -> Result { 32 | Ok(gather_prometheus_metrics(®ISTRY)) 33 | } 34 | -------------------------------------------------------------------------------- /crates/rbuilder/src/telemetry/servers/mod.rs: -------------------------------------------------------------------------------- 1 | //! Telemetry contains two servers. 2 | //! 3 | //! - [full]: verbose server exposing detailed operational information about the 4 | //! builder. 5 | //! - [redacted]: deliberately redacted server serves information suitable for 6 | //! tdx builders to expose in real-time. 7 | //! 8 | //! The redacted server is seperate from the debug server because it may be desirable 9 | //! to expose debug and redacted data differently in tdx builders. e.g. redacted data 10 | //! immediately avaliable, debug data avaliable after a delay or some seperate sanitisation. 11 | 12 | pub mod full; 13 | pub mod redacted; 14 | -------------------------------------------------------------------------------- /crates/rbuilder/src/telemetry/servers/redacted.rs: -------------------------------------------------------------------------------- 1 | //! Server that only exposes redacted data, suitable for being exposed by tdx 2 | //! builders in real-time. 3 | //! 4 | //! Currently exposes just a healthcheck endpoint on /health. Can be extended 5 | //! in the future. 6 | 7 | use std::net::SocketAddr; 8 | 9 | use warp::{Filter, Rejection, Reply}; 10 | 11 | async fn handler() -> Result { 12 | Ok("OK") 13 | } 14 | 15 | pub async fn spawn(addr: SocketAddr) -> eyre::Result<()> { 16 | let route = warp::path!("health").and_then(handler); 17 | tokio::spawn(warp::serve(route).run(addr)); 18 | Ok(()) 19 | } 20 | -------------------------------------------------------------------------------- /crates/rbuilder/src/test_utils/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "test_utils" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [lib] 7 | proc-macro = true 8 | 9 | [dependencies] 10 | syn = { version = "2.0", features = ["full"] } 11 | quote = "1.0" 12 | proc-macro2 = "1.0" 13 | which = "4.0" 14 | reqwest = { version = "0.11.20", features = ["blocking"] } 15 | -------------------------------------------------------------------------------- /crates/rbuilder/src/test_utils/src/lib.rs: -------------------------------------------------------------------------------- 1 | use proc_macro::TokenStream; 2 | use quote::quote; 3 | use reqwest::blocking::Client; 4 | use std::{env, time::Duration}; 5 | use syn::{parse_macro_input, ItemFn, LitStr}; 6 | 7 | #[proc_macro_attribute] 8 | pub fn ignore_if_env_not_set(attr: TokenStream, item: TokenStream) -> TokenStream { 9 | let env_var_to_check = parse_macro_input!(attr as LitStr).value(); 10 | let input = parse_macro_input!(item as ItemFn); 11 | 12 | // Check if the environment variable is set 13 | let env_var_set = env::var(env_var_to_check).is_ok(); 14 | 15 | let result = if env_var_set { 16 | // If the environment variable is set, return the original function 17 | quote! { #input } 18 | } else { 19 | // If the environment variable is not set, add the #[ignore] attribute 20 | let attrs = &input.attrs; 21 | let vis = &input.vis; 22 | let sig = &input.sig; 23 | let block = &input.block; 24 | 25 | quote! { 26 | #[ignore] 27 | #(#attrs)* 28 | #vis #sig #block 29 | } 30 | }; 31 | 32 | result.into() 33 | } 34 | 35 | #[proc_macro_attribute] 36 | pub fn ignore_if_endpoint_unavailable(attr: TokenStream, item: TokenStream) -> TokenStream { 37 | let endpoint_to_check = parse_macro_input!(attr as LitStr).value(); 38 | let input = parse_macro_input!(item as ItemFn); 39 | 40 | // Check if the HTTP endpoint is available 41 | let client = Client::builder() 42 | .timeout(Duration::from_secs(5)) 43 | .build() 44 | .unwrap(); 45 | 46 | let endpoint_available = client.get(endpoint_to_check).send().is_ok(); 47 | 48 | let result = if endpoint_available { 49 | // If the endpoint is available, return the original function 50 | quote! { #input } 51 | } else { 52 | // If the endpoint is not available, add the #[ignore] attribute 53 | let attrs = &input.attrs; 54 | let vis = &input.vis; 55 | let sig = &input.sig; 56 | let block = &input.block; 57 | 58 | quote! { 59 | #(#attrs)* 60 | #[ignore] 61 | #vis #sig #block 62 | } 63 | }; 64 | 65 | result.into() 66 | } 67 | -------------------------------------------------------------------------------- /crates/rbuilder/src/utils/bls.rs: -------------------------------------------------------------------------------- 1 | use alloy_primitives::hex; 2 | use ethereum_consensus::crypto::SecretKey; 3 | use rand; 4 | 5 | pub fn generate_random_bls_address() -> String { 6 | let mut rng = rand::thread_rng(); 7 | let sk = SecretKey::random(&mut rng).unwrap(); 8 | let pk = sk.public_key(); 9 | let raw_bytes = pk.as_ref(); 10 | hex::encode(raw_bytes) 11 | } 12 | 13 | #[cfg(test)] 14 | mod tests { 15 | use crate::utils::bls::generate_random_bls_address; 16 | 17 | #[test] 18 | fn test_generate_random_bls_address() { 19 | let bls_address = generate_random_bls_address(); 20 | assert_eq!(bls_address.len(), 96, "BLS address should be of 96 length"); 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /crates/rbuilder/src/utils/build_info.rs: -------------------------------------------------------------------------------- 1 | //! Functions to get git information from the current build. 2 | //! Everything is based on the data generated by crates/rbuilder/build.rs (included as a [build-dependencies] con cargo.toml) and left on 3 | //! a file called built.rs on the output dir. 4 | mod internal { 5 | include!(concat!(env!("OUT_DIR"), "/built.rs")); 6 | } 7 | 8 | use internal::{ 9 | BUILT_TIME_UTC, FEATURES, GIT_COMMIT_HASH_SHORT, GIT_DIRTY, GIT_HEAD_REF, GIT_VERSION, PROFILE, 10 | RUSTC_VERSION, 11 | }; 12 | 13 | pub fn print_version_info() { 14 | println!("version: {}", GIT_VERSION.unwrap_or_default()); 15 | println!("commit: {}", GIT_COMMIT_HASH_SHORT.unwrap_or_default()); 16 | println!("dirty: {}", GIT_DIRTY.unwrap_or_default()); 17 | println!("branch: {}", GIT_HEAD_REF.unwrap_or_default()); 18 | println!("build_time: {}", BUILT_TIME_UTC); 19 | println!("rustc: {}", RUSTC_VERSION); 20 | println!("features: {:?}", FEATURES); 21 | println!("profile: {}", PROFILE); 22 | } 23 | 24 | #[derive(Debug)] 25 | pub struct Version { 26 | /// 8 digits commit. if modified "-dirty" is appended 27 | pub git_commit: String, 28 | /// Git ref if available (eg:"refs/heads/more-comments") 29 | pub git_ref: String, 30 | pub build_time_utc: String, 31 | } 32 | 33 | pub fn rbuilder_version() -> Version { 34 | let git_commit = { 35 | let mut commit = String::new(); 36 | if let Some(hash) = GIT_COMMIT_HASH_SHORT { 37 | commit.push_str(hash); 38 | } 39 | if let Some(dirty) = GIT_DIRTY { 40 | if dirty { 41 | commit.push_str("-dirty"); 42 | } 43 | } 44 | if commit.is_empty() { 45 | commit.push_str("unknown"); 46 | } 47 | commit 48 | }; 49 | 50 | let git_ref = GIT_HEAD_REF.unwrap_or("unknown").to_string(); 51 | 52 | Version { 53 | git_commit, 54 | git_ref, 55 | build_time_utc: BUILT_TIME_UTC.to_string(), 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /crates/rbuilder/src/utils/constants.rs: -------------------------------------------------------------------------------- 1 | ////////////////////////////////////////////////// 2 | // Universal constants 3 | ////////////////////////////////////////////////// 4 | 5 | pub const BASE_TX_GAS: u64 = 21_000; 6 | 7 | pub const SECS_PER_MINUTE: u64 = 60; 8 | pub const MINS_PER_HOUR: u64 = 60; 9 | 10 | ////////////////////////////////////////////////// 11 | // Builder constants 12 | ////////////////////////////////////////////////// 13 | -------------------------------------------------------------------------------- /crates/rbuilder/src/utils/fmt.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Write; 2 | 3 | /// Writes indent spaces 4 | pub fn write_indent(indent: usize, buf: &mut Buffer) -> std::fmt::Result { 5 | buf.write_str(&format!("{: <1$}", "", indent)) 6 | } 7 | -------------------------------------------------------------------------------- /crates/rbuilder/src/utils/noncer.rs: -------------------------------------------------------------------------------- 1 | use alloy_primitives::Address; 2 | use dashmap::DashMap; 3 | use derivative::Derivative; 4 | use reth_errors::ProviderResult; 5 | use reth_provider::StateProvider; 6 | use std::sync::Arc; 7 | 8 | /// Struct to get nonces for Addresses, caching the results. 9 | #[derive(Derivative)] 10 | #[derivative(Debug)] 11 | pub struct NonceCache { 12 | #[derivative(Debug = "ignore")] 13 | state: Arc, 14 | cache: Arc>, 15 | } 16 | 17 | impl NonceCache { 18 | pub fn new(state: Arc) -> Self { 19 | Self { 20 | state, 21 | cache: Arc::new(DashMap::default()), 22 | } 23 | } 24 | 25 | pub fn nonce(&self, address: Address) -> ProviderResult { 26 | if let Some(nonce) = self.cache.get(&address) { 27 | return Ok(*nonce); 28 | } 29 | 30 | let nonce = self.state.account_nonce(&address)?.unwrap_or_default(); 31 | self.cache.insert(address, nonce); 32 | Ok(nonce) 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /crates/rbuilder/src/utils/provider_head_state.rs: -------------------------------------------------------------------------------- 1 | //! Mod for gathering info about reth's head. 2 | 3 | use crate::provider::StateProviderFactory; 4 | use alloy_primitives::{BlockNumber, B256}; 5 | use reth_errors::ProviderResult; 6 | 7 | /// For debugging. Results of asking for block number + hash to a BlockHashReader+BlockNumReader 8 | #[derive(Debug)] 9 | pub struct ProviderHeadStateBlockHash { 10 | /// Result of getting some (last/best) block number from the BlockNumReader 11 | pub block_number: ProviderResult, 12 | /// If block_number.is_ok -> Some(BlockHashReader::block_hash(block_number)) 13 | pub block_hash: Option>>, 14 | } 15 | 16 | impl ProviderHeadStateBlockHash { 17 | pub fn new( 18 | provider: &P, 19 | block_number: ProviderResult, 20 | ) -> Self { 21 | Self { 22 | block_number: block_number.clone(), 23 | block_hash: block_number.map_or(None, |b| Some(provider.block_hash(b))), 24 | } 25 | } 26 | } 27 | 28 | /// For debugging. Results of asking to the StateProviderFactory info about last_block and best block. 29 | #[derive(Debug)] 30 | pub struct ProviderHeadState { 31 | pub last_block: ProviderHeadStateBlockHash, 32 | pub best_block: ProviderHeadStateBlockHash, 33 | } 34 | 35 | impl ProviderHeadState { 36 | pub fn new(provider: &P) -> Self { 37 | Self { 38 | last_block: ProviderHeadStateBlockHash::new(provider, provider.last_block_number()), 39 | best_block: ProviderHeadStateBlockHash::new(provider, provider.best_block_number()), 40 | } 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /crates/rbuilder/src/utils/reconnect.rs: -------------------------------------------------------------------------------- 1 | use exponential_backoff::Backoff; 2 | use std::{thread::sleep, time::Duration}; 3 | use tokio_util::sync::CancellationToken; 4 | use tracing::{debug, error, info, info_span, warn}; 5 | 6 | #[derive(Debug)] 7 | pub enum RunCommand { 8 | Reconnect, 9 | Finish, 10 | } 11 | 12 | fn backoff() -> Backoff { 13 | Backoff::new(u32::MAX, Duration::from_secs(1), Duration::from_secs(12)) 14 | } 15 | 16 | /// Helper function for the typical scenario where we connect to something, process data, connection fails and we continuously try to reconnect. 17 | /// connect is continuously called until it returns Ok and then run is called. 18 | /// run's result decides if we end or go back to reconnecting. 19 | pub fn run_loop_with_reconnect< 20 | Connection, 21 | ConnectErr: std::error::Error, 22 | Connect: Fn() -> Result, 23 | Run: Fn(Connection) -> RunCommand, 24 | >( 25 | context: &str, 26 | connect: Connect, 27 | run: Run, 28 | cancellation_token: CancellationToken, 29 | ) { 30 | let span = info_span!("connect_loop_context", context); 31 | let _span_guard = span.enter(); 32 | 33 | 'reconnect: loop { 34 | if cancellation_token.is_cancelled() { 35 | break 'reconnect; 36 | } 37 | 38 | let backoff = backoff(); 39 | let mut backoff_iter = backoff.iter(); 40 | let connection = 'backoff: loop { 41 | let timeout = if let Some(timeout) = backoff_iter.next() { 42 | timeout 43 | } else { 44 | warn!("Backoff for connection reached max retries"); 45 | break 'reconnect; 46 | }; 47 | match connect() { 48 | Ok(connection) => { 49 | debug!("Established connection"); 50 | break 'backoff connection; 51 | } 52 | Err(err) => { 53 | error!(?err, "Failed to establish connection"); 54 | sleep(timeout); 55 | } 56 | }; 57 | }; 58 | 59 | match run(connection) { 60 | RunCommand::Reconnect => continue 'reconnect, 61 | RunCommand::Finish => break 'reconnect, 62 | } 63 | } 64 | 65 | info!("Exiting connect loop"); 66 | } 67 | -------------------------------------------------------------------------------- /crates/rbuilder/src/utils/test_data_generator.rs: -------------------------------------------------------------------------------- 1 | use alloy_primitives::{Address, BlockHash, TxHash, U256}; 2 | use uuid::Uuid; 3 | 4 | /// TestDataGenerator allows you to create unique test objects with unique content, it tries to use different numbers for every field it sets since it may help debugging. 5 | /// The idea is that each module creates its own TestDataGenerator that creates specific data needed in each context. 6 | /// Ideally all other TestDataGenerators will contain one instance of this one for the basic stuff, ideally if several TestDataGenerators are combined the should share this TestDataGenerator 7 | /// to guaranty no repeated data. 8 | /// ALL generated data is based on a single u64 that increments on every use. 9 | /// @Pending factorize with crates/rbuilder/src/mev_boost/rpc.rs (right now we are working on bidding so may generate conflicts) 10 | #[derive(Default)] 11 | pub struct TestDataGenerator { 12 | last_used_id: u64, 13 | } 14 | 15 | impl TestDataGenerator { 16 | pub fn create_u64(&mut self) -> u64 { 17 | self.last_used_id += 1; 18 | self.last_used_id 19 | } 20 | 21 | pub fn create_u256(&mut self) -> U256 { 22 | U256::from(self.create_u64()) 23 | } 24 | 25 | pub fn create_uuid(&mut self) -> Uuid { 26 | Uuid::from_u128(self.create_u128()) 27 | } 28 | 29 | pub fn create_u128(&mut self) -> u128 { 30 | self.create_u64() as u128 31 | } 32 | 33 | pub fn create_u8(&mut self) -> u8 { 34 | self.create_u64() as u8 35 | } 36 | 37 | pub fn create_address(&mut self) -> Address { 38 | Address::repeat_byte(self.create_u8()) 39 | } 40 | 41 | pub fn create_block_hash(&mut self) -> BlockHash { 42 | BlockHash::from(self.create_u256()) 43 | } 44 | 45 | pub fn create_tx_hash(&mut self) -> TxHash { 46 | TxHash::from(self.create_u256()) 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /crates/rbuilder/src/utils/test_utils.rs: -------------------------------------------------------------------------------- 1 | use crate::primitives::{OrderId, TransactionSignedEcRecoveredWithBlobs}; 2 | use alloy_primitives::{Address, Signature, B256, I256, U256}; 3 | use reth_primitives::{Recovered, Transaction, TransactionSigned}; 4 | 5 | pub fn order_id(id: u64) -> OrderId { 6 | OrderId::Tx(hash(id)) 7 | } 8 | 9 | pub fn hash(id: u64) -> B256 { 10 | B256::from(U256::from(id)) 11 | } 12 | 13 | pub fn addr(id: u64) -> Address { 14 | Address::from_slice(&u256(id).as_le_slice()[0..20]) 15 | } 16 | 17 | pub fn u256(i: u64) -> U256 { 18 | U256::from(i) 19 | } 20 | 21 | pub fn i256(i: i64) -> I256 { 22 | I256::try_from(i).unwrap() 23 | } 24 | 25 | pub fn tx(tx_hash: u64) -> TransactionSignedEcRecoveredWithBlobs { 26 | TransactionSignedEcRecoveredWithBlobs::new_for_testing(Recovered::new_unchecked( 27 | TransactionSigned::new( 28 | Transaction::Legacy(Default::default()), 29 | Signature::test_signature(), 30 | hash(tx_hash), 31 | ), 32 | Address::default(), 33 | )) 34 | } 35 | -------------------------------------------------------------------------------- /crates/rbuilder/src/utils/tracing.rs: -------------------------------------------------------------------------------- 1 | /// Allows to call event! with level as a parameter (event! only allows constants as level parameter) 2 | #[macro_export] 3 | macro_rules! dynamic_event { 4 | ($level:expr, $($arg:tt)+) => { 5 | match $level { 6 | Level::TRACE => event!(Level::TRACE, $($arg)+), 7 | Level::DEBUG => event!(Level::DEBUG, $($arg)+), 8 | Level::INFO => event!(Level::INFO, $($arg)+), 9 | Level::WARN => event!(Level::WARN, $($arg)+), 10 | Level::ERROR => event!(Level::ERROR, $($arg)+), 11 | } 12 | }; 13 | } 14 | 15 | pub use dynamic_event; 16 | use tracing_subscriber::EnvFilter; 17 | 18 | #[derive(Debug, Clone)] 19 | pub struct LoggerConfig { 20 | pub env_filter: String, 21 | pub log_json: bool, 22 | pub log_color: bool, 23 | } 24 | 25 | pub fn setup_tracing_subscriber(config: LoggerConfig) -> eyre::Result<()> { 26 | let env = EnvFilter::try_new(&config.env_filter)?; 27 | if config.log_json { 28 | tracing_subscriber::fmt() 29 | .json() 30 | .with_env_filter(env) 31 | .try_init() 32 | .map_err(|err| eyre::format_err!("{}", err))?; 33 | } else { 34 | tracing_subscriber::fmt() 35 | .with_env_filter(env) 36 | .with_ansi(config.log_color) 37 | .try_init() 38 | .map_err(|err| eyre::format_err!("{}", err))?; 39 | } 40 | Ok(()) 41 | } 42 | -------------------------------------------------------------------------------- /crates/rbuilder/src/utils/tx_signer.rs: -------------------------------------------------------------------------------- 1 | use alloy_consensus::SignableTransaction; 2 | use alloy_primitives::{Address, Signature, B256, U256}; 3 | use reth_primitives::{public_key_to_address, Recovered, Transaction, TransactionSigned}; 4 | use secp256k1::{Message, SecretKey, SECP256K1}; 5 | 6 | /// Simple struct to sign txs/messages. 7 | /// Mainly used to sign payout txs from the builder and to create test data. 8 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 9 | pub struct Signer { 10 | pub address: Address, 11 | pub secret: SecretKey, 12 | } 13 | 14 | impl Signer { 15 | pub fn try_from_secret(secret: B256) -> Result { 16 | let secret = SecretKey::from_slice(secret.as_ref())?; 17 | let pubkey = secret.public_key(SECP256K1); 18 | let address = public_key_to_address(pubkey); 19 | 20 | Ok(Self { address, secret }) 21 | } 22 | 23 | pub fn sign_message(&self, message: B256) -> Result { 24 | let s = SECP256K1 25 | .sign_ecdsa_recoverable(&Message::from_digest_slice(&message[..])?, &self.secret); 26 | let (rec_id, data) = s.serialize_compact(); 27 | 28 | let signature = Signature::new( 29 | U256::try_from_be_slice(&data[..32]).expect("The slice has at most 32 bytes"), 30 | U256::try_from_be_slice(&data[32..64]).expect("The slice has at most 32 bytes"), 31 | i32::from(rec_id) != 0, 32 | ); 33 | Ok(signature) 34 | } 35 | 36 | pub fn sign_tx( 37 | &self, 38 | tx: Transaction, 39 | ) -> Result, secp256k1::Error> { 40 | let signature = self.sign_message(tx.signature_hash())?; 41 | let signed = TransactionSigned::new_unhashed(tx, signature); 42 | Ok(Recovered::new_unchecked(signed, self.address)) 43 | } 44 | 45 | pub fn random() -> Self { 46 | Self::try_from_secret(B256::random()).expect("failed to create random signer") 47 | } 48 | } 49 | 50 | #[cfg(test)] 51 | mod test { 52 | use super::*; 53 | use alloy_consensus::TxEip1559; 54 | use alloy_primitives::{address, fixed_bytes, TxKind as TransactionKind}; 55 | use reth_node_core::primitives::SignedTransaction; 56 | use reth_primitives_traits::SignerRecoverable; 57 | #[test] 58 | fn test_sign_transaction() { 59 | let secret = 60 | fixed_bytes!("7a3233fcd52c19f9ffce062fd620a8888930b086fba48cfea8fc14aac98a4dce"); 61 | let address = address!("B2B9609c200CA9b7708c2a130b911dabf8B49B20"); 62 | let signer = Signer::try_from_secret(secret).expect("signer creation"); 63 | assert_eq!(signer.address, address); 64 | 65 | let tx = Transaction::Eip1559(TxEip1559 { 66 | chain_id: 1, 67 | nonce: 2, 68 | gas_limit: 21000, 69 | max_fee_per_gas: 1000, 70 | max_priority_fee_per_gas: 20000, 71 | to: TransactionKind::Call(address), 72 | value: U256::from(3000u128), 73 | ..Default::default() 74 | }); 75 | 76 | let signed_tx = signer.sign_tx(tx).expect("sign tx"); 77 | assert_eq!(signed_tx.signer(), address); 78 | 79 | let signed = signed_tx.into_inner(); 80 | assert_eq!(signed.recover_signer().ok(), Some(address)); 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /crates/reth-rbuilder/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "reth-rbuilder" 3 | version.workspace = true 4 | edition.workspace = true 5 | rust-version.workspace = true 6 | license.workspace = true 7 | homepage.workspace = true 8 | repository.workspace = true 9 | 10 | [dependencies] 11 | rbuilder = { path = "../rbuilder" } 12 | 13 | reth.workspace = true 14 | reth-node-ethereum.workspace = true 15 | reth-provider.workspace = true 16 | reth-transaction-pool.workspace = true 17 | reth-cli-util.workspace = true 18 | alloy-rlp.workspace = true 19 | 20 | tokio.workspace = true 21 | clap.workspace = true 22 | eyre.workspace = true 23 | tracing.workspace = true 24 | 25 | [target.'cfg(unix)'.dependencies] 26 | tikv-jemallocator = { workspace = true, optional = true } 27 | libc.workspace = true 28 | 29 | [features] 30 | jemalloc = [ 31 | "reth-cli-util/jemalloc" 32 | ] 33 | -------------------------------------------------------------------------------- /crates/sysperf/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sysperf" 3 | version.workspace = true 4 | edition.workspace = true 5 | rust-version.workspace = true 6 | license.workspace = true 7 | homepage.workspace = true 8 | repository.workspace = true 9 | 10 | [dependencies] 11 | alloy-primitives.workspace = true 12 | 13 | rayon = "1.8" 14 | rand = "0.8" 15 | num_cpus = "1.16" 16 | sysinfo = "0.33.0" -------------------------------------------------------------------------------- /crates/test-relay/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "test-relay" 3 | version.workspace = true 4 | edition.workspace = true 5 | rust-version.workspace = true 6 | license.workspace = true 7 | homepage.workspace = true 8 | repository.workspace = true 9 | exclude.workspace = true 10 | 11 | [dependencies] 12 | rbuilder.workspace = true 13 | clap.workspace = true 14 | clap_builder.workspace = true 15 | eyre.workspace = true 16 | tokio.workspace = true 17 | tokio-util.workspace = true 18 | 19 | alloy-json-rpc.workspace = true 20 | alloy-primitives.workspace = true 21 | alloy-provider.workspace = true 22 | alloy-consensus.workspace = true 23 | serde.workspace = true 24 | serde_with.workspace = true 25 | serde_json.workspace = true 26 | thiserror.workspace = true 27 | tracing.workspace = true 28 | warp.workspace = true 29 | flate2.workspace = true 30 | lazy_static.workspace = true 31 | metrics_macros.workspace = true 32 | 33 | prometheus.workspace = true 34 | ctor.workspace = true 35 | 36 | ethereum_ssz.workspace = true 37 | url.workspace = true 38 | ahash.workspace = true 39 | time.workspace = true 40 | 41 | parking_lot.workspace = true 42 | -------------------------------------------------------------------------------- /crates/test-relay/README.md: -------------------------------------------------------------------------------- 1 | # About 2 | 3 | `test-relay` is a dummy relay implementation for block builders that can be used for testing without submitting to the real MEV-boost relay. 4 | 5 | To use test-relay, you need: 6 | * A real MEV-boost relay URL 7 | * A connection to a consensus layer node 8 | * (optional) A validation endpoint to validate blocks 9 | 10 | It provides the following API endpoints: 11 | * GET /relay/v1/builder/validators 12 | * POST /relay/v1/builder/blocks 13 | 14 | 15 | Additionally, it exposes metrics, including estimated slot auction winners among builders who submit to this relay. 16 | -------------------------------------------------------------------------------- /crates/test-relay/src/main.rs: -------------------------------------------------------------------------------- 1 | use crate::validation_api_client::ValidationAPIClient; 2 | use ahash::HashMap; 3 | use metrics::spawn_metrics_server; 4 | use rbuilder::{ 5 | beacon_api_client::Client, 6 | mev_boost::RelayClient, 7 | primitives::mev_boost::MevBoostRelaySlotInfoProvider, 8 | utils::tracing::{setup_tracing_subscriber, LoggerConfig}, 9 | }; 10 | use relay::spawn_relay_server; 11 | use std::net::SocketAddr; 12 | use tokio_util::sync::CancellationToken; 13 | use url::Url; 14 | 15 | use clap::Parser; 16 | use tokio::signal::ctrl_c; 17 | 18 | pub mod metrics; 19 | pub mod relay; 20 | pub mod validation_api_client; 21 | 22 | #[derive(Parser, Debug)] 23 | struct Cli { 24 | #[clap( 25 | short, 26 | long, 27 | help = "API listen address", 28 | default_value = "0.0.0.0:80", 29 | env = "LISTEN_ADDRESS" 30 | )] 31 | listen_address: SocketAddr, 32 | #[clap( 33 | short, 34 | long, 35 | help = "metrics API", 36 | default_value = "0.0.0.0:6069", 37 | env = "METRICS_LISTEN_ADDRESS" 38 | )] 39 | metrics_address: SocketAddr, 40 | #[clap(long, action, default_value = "false", env = "LOG_JSON")] 41 | log_json: bool, 42 | #[clap( 43 | long, 44 | help = "Rust log describton", 45 | default_value = "info", 46 | env = "RUST_LOG" 47 | )] 48 | rust_log: String, 49 | #[clap( 50 | long, 51 | help = "URL to validate submitted blocks", 52 | env = "VALIDATION_URL" 53 | )] 54 | validation_url: Option, 55 | #[clap( 56 | long, 57 | help = "Relay to fetch current epoch data", 58 | env = "MEV_BOOST_RELAY" 59 | )] 60 | relay: String, 61 | #[clap( 62 | long, 63 | help = "CL clients to fetch mev boost slot data", 64 | env = "CL_CLIENTS", 65 | value_delimiter = ',', 66 | value_parser 67 | )] 68 | cl_clients: Vec, 69 | #[clap( 70 | long, 71 | help = "Map builder relay key to name, e.g. abb3..ca6c:staging-01", 72 | env = "BUILDER_NAMES", 73 | value_delimiter = ',', 74 | value_parser 75 | )] 76 | builder_names: Vec, 77 | } 78 | 79 | #[tokio::main] 80 | async fn main() -> eyre::Result<()> { 81 | let cli = Cli::parse(); 82 | 83 | let global_cancellation = CancellationToken::new(); 84 | 85 | let config = LoggerConfig { 86 | env_filter: cli.rust_log, 87 | log_json: cli.log_json, 88 | log_color: false, 89 | }; 90 | setup_tracing_subscriber(config)?; 91 | 92 | spawn_metrics_server(cli.metrics_address); 93 | 94 | let cl_clients = cli 95 | .cl_clients 96 | .iter() 97 | .map(|c| { 98 | let url = c.parse()?; 99 | Ok(Client::new(url)) 100 | }) 101 | .collect::>>()?; 102 | 103 | let relay = { 104 | let url: Url = cli.relay.parse()?; 105 | let client = RelayClient::from_url(url, None, None, None); 106 | MevBoostRelaySlotInfoProvider::new(client, "relay".to_string()) 107 | }; 108 | 109 | let validation_client = if let Some(url) = cli.validation_url { 110 | Some(ValidationAPIClient::new(&[&url])?) 111 | } else { 112 | None 113 | }; 114 | 115 | let builder_names = { 116 | let mut map = HashMap::default(); 117 | for arg in cli.builder_names { 118 | let arg: Vec<_> = arg.split(':').collect(); 119 | if arg.len() != 2 { 120 | eyre::bail!("Expected builder name with format \"[]:\" (e.g. \"abb3..ca6c:staging-01\""); 121 | } 122 | map.insert(arg[0].to_string(), arg[1].to_string()); 123 | } 124 | map 125 | }; 126 | 127 | spawn_relay_server( 128 | cli.listen_address, 129 | validation_client, 130 | cl_clients, 131 | relay, 132 | builder_names, 133 | global_cancellation.clone(), 134 | )?; 135 | 136 | ctrl_c().await.unwrap(); 137 | global_cancellation.cancel(); 138 | 139 | Ok(()) 140 | } 141 | -------------------------------------------------------------------------------- /crates/test-relay/src/metrics.rs: -------------------------------------------------------------------------------- 1 | use ctor::ctor; 2 | use lazy_static::lazy_static; 3 | use metrics_macros::register_metrics; 4 | use prometheus::{HistogramOpts, HistogramVec, IntCounterVec, Opts, Registry}; 5 | use rbuilder::{ 6 | telemetry::{exponential_buckets_range, gather_prometheus_metrics, linear_buckets_range}, 7 | utils::duration_ms, 8 | }; 9 | use std::{net::SocketAddr, time::Duration}; 10 | use warp::{reject::Rejection, reply::Reply, Filter}; 11 | 12 | lazy_static! { 13 | pub static ref REGISTRY: Registry = Registry::new(); 14 | } 15 | 16 | register_metrics! { 17 | // Statistics about finalized blocks 18 | pub static PAYLOADS_RECEIVED: IntCounterVec = IntCounterVec::new( 19 | Opts::new( 20 | "payloads_received", 21 | "payloads received" 22 | ), 23 | &["builder"] 24 | ) 25 | .unwrap(); 26 | 27 | pub static PAYLOAD_VALIDATION_ERRORS: IntCounterVec = IntCounterVec::new( 28 | Opts::new( 29 | "payloads_validation_errors", 30 | "payloads validation errors" 31 | ), 32 | &["builder"] 33 | ) 34 | .unwrap(); 35 | 36 | pub static RELAY_ERRORS: IntCounterVec = IntCounterVec::new( 37 | Opts::new( 38 | "relay_errors", 39 | "errors when fetching data from relays" 40 | ), 41 | &["relay"] 42 | ) 43 | .unwrap(); 44 | 45 | pub static WINS: IntCounterVec = IntCounterVec::new( 46 | Opts::new( 47 | "wins", 48 | "wins per builder (relay samples top bid close to the end to the slot)" 49 | ), 50 | &["builder"] 51 | ) 52 | .unwrap(); 53 | 54 | pub static WINNER_ADVANTAGE: HistogramVec = HistogramVec::new( 55 | HistogramOpts::new("winner_advantage", "Percentage of value that winner has over the next best bid by other builders") 56 | .buckets(linear_buckets_range(0.0, 100.0, 100)), 57 | &["builder"], 58 | ).unwrap(); 59 | 60 | 61 | pub static PAYLOAD_PROCESSING_TIME: HistogramVec = HistogramVec::new( 62 | HistogramOpts::new("payload_processing_time", "Time to fully process received payload (ms)") 63 | .buckets(exponential_buckets_range(1.0, 3000.0, 100)), 64 | &[], 65 | ).unwrap(); 66 | 67 | pub static PAYLOAD_VALIDATION_TIME: HistogramVec = HistogramVec::new( 68 | HistogramOpts::new("payload_validation_time", "Time to validate received payload (ms)") 69 | .buckets(exponential_buckets_range(1.0, 3000.0, 100)), 70 | &[], 71 | ).unwrap(); 72 | } 73 | 74 | pub fn inc_payloads_received(builder: &str) { 75 | PAYLOADS_RECEIVED.with_label_values(&[builder]).inc(); 76 | } 77 | 78 | pub fn inc_payload_validation_errors(builder: &str) { 79 | PAYLOAD_VALIDATION_ERRORS 80 | .with_label_values(&[builder]) 81 | .inc(); 82 | } 83 | 84 | pub fn inc_relay_errors() { 85 | RELAY_ERRORS.with_label_values(&[]).inc(); 86 | } 87 | 88 | pub fn add_winning_bid(builder: &str, advantage: f64) { 89 | WINS.with_label_values(&[builder]).inc(); 90 | if advantage != 0.0 { 91 | // we filter 0.0 advantage to filter edge cases like the first bid in the slot, etc. 92 | WINNER_ADVANTAGE 93 | .with_label_values(&[builder]) 94 | .observe(advantage); 95 | } 96 | } 97 | 98 | pub fn add_payload_processing_time(duration: Duration) { 99 | PAYLOAD_PROCESSING_TIME 100 | .with_label_values(&[]) 101 | .observe(duration_ms(duration)); 102 | } 103 | 104 | pub fn add_payload_validation_time(duration: Duration) { 105 | PAYLOAD_VALIDATION_TIME 106 | .with_label_values(&[]) 107 | .observe(duration_ms(duration)); 108 | } 109 | 110 | pub fn spawn_metrics_server(address: SocketAddr) { 111 | let metrics_route = warp::path!("debug" / "metrics" / "prometheus").and_then(metrics_handler); 112 | tokio::spawn(warp::serve(metrics_route).run(address)); 113 | } 114 | 115 | async fn metrics_handler() -> Result { 116 | Ok(gather_prometheus_metrics(®ISTRY)) 117 | } 118 | -------------------------------------------------------------------------------- /docs/LOGS_PRIVACY.md: -------------------------------------------------------------------------------- 1 | # Logs Privacy in rbuilder 2 | 3 | ## Introduction 4 | 5 | Log privacy in rbuilder refers to the level of data exposed in logs via macros like `error!`. A key principle in rbuilder is that we never log a full order, as this information could be harmful to the order sender. 6 | 7 | ### Why is this important? 8 | 9 | - A non-landed order, if logged in full, could potentially be executed in a later block, causing losses for the order owner. 10 | - Even if an order has built-in protections against unexpected executions, the order owner might still incur gas fees. 11 | 12 | ## External Error Redaction 13 | 14 | While we don't log full orders ourselves, we sometimes interact with external systems and log their error codes. Since some of these may contain plain strings, we offer an option to redact any error before logging. 15 | 16 | ### Enabling Error Redaction 17 | 18 | To enable external error redaction, use the `redact-sensitive` feature flag. 19 | 20 | ### Example of Error Redaction 21 | 22 | **Never** derive `Display` or `Debug` for errors which may contain sensitive info. 23 | 24 | Instead, explicitly implement them using your favourite library, or manually. 25 | 26 | ```rust 27 | #[derive(Error)] 28 | pub enum SomeError { 29 | #[error("Request error: {0}")] 30 | RequestError(#[from] RedactableReqwestError), 31 | 32 | #[cfg_attr( 33 | not(feature = "redact-sensitive"), 34 | error("Unknown relay response, status: {0}, body: {1}") 35 | )] 36 | #[cfg_attr( 37 | feature = "redact-sensitive", 38 | error("Unknown relay response, status: {0}, body: [REDACTED]") 39 | )] 40 | UnknownRelayError(StatusCode, String), 41 | 42 | #[error("Too many requests")] 43 | TooManyRequests, 44 | #[error("Connection error")] 45 | ConnectionError, 46 | #[error("Internal Error")] 47 | InternalError, 48 | } 49 | 50 | impl Debug for RelayError { 51 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 52 | write!(f, "{}", self) 53 | } 54 | } 55 | 56 | #[derive(Error)] 57 | pub struct RedactableReqwestError(reqwest::Error); 58 | 59 | impl Display for RedactableReqwestError { 60 | #[cfg(not(feature = "redact-sensitive"))] 61 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 62 | write!(f, "{}", self.0) 63 | } 64 | 65 | #[cfg(feature = "redact-sensitive")] 66 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 67 | if self.0.is_builder() { 68 | write!(f, "Redacted Reqwest Error: Builder") 69 | } else if self.0.is_request() { 70 | write!(f, "Redacted Reqwest Error: Request") 71 | } else if self.0.is_redirect() { 72 | write!(f, "Redacted Reqwest Error: Redirect") 73 | } else if self.0.is_status() { 74 | write!(f, "Redacted Reqwest Error: Status") 75 | } else if self.0.is_body() { 76 | write!(f, "Redacted Reqwest Error: Body") 77 | } else if self.0.is_decode() { 78 | write!(f, "Redacted Reqwest Error: Decode") 79 | } else { 80 | write!(f, "Redacted Reqwest Error") 81 | } 82 | } 83 | } 84 | 85 | impl Debug for RedactableReqwestError { 86 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 87 | Display::fmt(self, f) 88 | } 89 | } 90 | ``` 91 | -------------------------------------------------------------------------------- /docs/REORG_LOSSES.md: -------------------------------------------------------------------------------- 1 | # Reorg losses 2 | 3 | 4 | From time to time, the Ethereum network forks for a short time (you can check that at https://etherscan.io/blocks_forked), causing block reorganizations (reorgs). When a reorg happens, all the transactions from the losing fork go to the mempool. This includes any private transactions, particularly any builder-generated transactions, such as the ones we add at the end of the block to pay the validator. 5 | 6 | 7 | If this happens, the builder ends up paying the bid to a random validator without winning a block! Notice that **this happens even if this transaction gives no tip to the block**, probably because, for some builders, when all paying transactions are included in the block they are building (and some gas is left), they include free transactions. 8 | 9 | 10 | This is a rare event but occurs from time to time and **causes losses to the builder**. As soon as the builder wins a real (non-reorged) block, the nonce changes, the old reorged transaction gets invalidated, and we are safe again. 11 | 12 | 13 | As far as we know, major builders are not doing anything about this. Here are some examples of these events: 14 | - Flashbots: https://etherscan.io/tx/0xdb876101f649bf6801a04ec9da5535449eaec32247a59735bc3c88c287b1d5a9 15 | - Beaver: https://etherscan.io/tx/0xaef73164468705b14f830eda7461b10838fe7f575bec8e0169b1cab6906ed0f3 16 | - Titan: https://etherscan.io/tx/0x936ceee44b270b52d2765c78f27e3b4a9e067ef4dca41ce4e699664fde6c300d 17 | 18 | The main two ideas we are considering are: 19 | - Pay through a contract that checks that the coinbase is the builder's address and reverts otherwise. A gas analysis needs to be done to determine how much extra cost this adds to regular blocks. An alternative is to implement this only when the payment transaction is too large. 20 | - Detect reorgs and post a transaction with the same nonce, paying the smallest possible gas tip, in an attempt to lure the builders into using this transaction instead of the reorged one. 21 | - Periodically remove the winnings from the builder's account to limit potential losses. 22 | -------------------------------------------------------------------------------- /mev-test-contract/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: init test build update printSelectors 2 | 3 | init: 4 | git submodule update --init --recursive 5 | 6 | 7 | test: init 8 | forge test 9 | 10 | build: init 11 | forge build 12 | 13 | update: build 14 | cat out/MevTest.sol/MevTest.json | jq '{ "MevTest": .deployedBytecode.object, "MevTestInitBytecode": .bytecode.object }' > ../crates/rbuilder/src/building/testing/contracts.json 15 | 16 | printSelectors: build 17 | forge in MevTest methodIdentifiers 18 | -------------------------------------------------------------------------------- /mev-test-contract/foundry.toml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/rbuilder/c71d4a11a9701c3b14c4b47e142d75bfd97c6178/mev-test-contract/foundry.toml -------------------------------------------------------------------------------- /mev-test-contract/src/MevTest.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Unlicense 2 | pragma solidity ^0.8.13; 3 | 4 | contract EphemeralContractTest { 5 | function destruct(address payable refundAddr) payable public { 6 | selfdestruct(refundAddr); 7 | } 8 | } 9 | 10 | contract MevTest { 11 | 12 | /// Sends all value to coinbase. 13 | function sendToCoinbase() public payable { 14 | block.coinbase.transfer(msg.value); 15 | } 16 | 17 | /// Sends all value to the given address. 18 | function sendTo(address payable to) public payable { 19 | to.transfer(msg.value); 20 | } 21 | 22 | /// Check if the value in the slot is equal to the old value, if so increment it and send the value to coinbase. 23 | function incrementValue(uint256 slot, uint256 oldValue) public payable { 24 | // check old slot value 25 | uint256 storedValue; 26 | assembly { 27 | storedValue := sload(slot) 28 | } 29 | require(storedValue == oldValue, "Old value does not match"); 30 | uint256 newValue = oldValue + 1; 31 | assembly { 32 | sstore(slot, newValue) 33 | } 34 | 35 | if (msg.value > 0) { 36 | block.coinbase.transfer(msg.value); 37 | } 38 | } 39 | 40 | 41 | /// Just reverts! 42 | function revert() public payable { 43 | revert(); 44 | } 45 | 46 | /// Return sum of the contract's balance and addr's balance, for testing evm inspector with selfbalance/balance opcode. 47 | function testReadBalance(address payable addr) public payable { 48 | address(this).balance + addr.balance; 49 | } 50 | 51 | // Deploy a contract and let the contract self-destruct, for testing evm inspector on contract deploy and destruct. 52 | function testEphemeralContractDestruct(address payable refund) public payable { 53 | EphemeralContractTest ephemeral_contract = new EphemeralContractTest(); 54 | ephemeral_contract.destruct{value: msg.value}(refund); 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /mev-test-contract/test/MevTest.t.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Unlicense 2 | pragma solidity ^0.8.13; 3 | 4 | import "forge-std/Test.sol"; 5 | 6 | import "src/MevTest.sol"; 7 | 8 | contract TestContract is Test { 9 | MevTest c; 10 | 11 | function setUp() public { 12 | c = new MevTest(); 13 | } 14 | 15 | function testSendToCoinbase() public { 16 | uint balanceBefore = block.coinbase.balance; 17 | c.sendToCoinbase{value: 123}(); 18 | uint balanceAfter = block.coinbase.balance; 19 | assertEq(balanceAfter - balanceBefore, 123); 20 | } 21 | 22 | function testTo() public { 23 | address payable a = payable(vm.addr(1)); 24 | uint balanceBefore = a.balance; 25 | c.sendTo{value: 123}(a); 26 | uint balanceAfter = a.balance; 27 | assertEq(balanceAfter - balanceBefore, 123); 28 | } 29 | 30 | function testIncrementValue() public { 31 | // slot value is 0 by default 32 | c.incrementValue(1, 0); 33 | c.incrementValue(1, 1); 34 | c.incrementValue(1, 2); 35 | 36 | // conflicting tx was committed 37 | vm.expectRevert(); 38 | c.incrementValue(1, 2); 39 | 40 | // different slots don't conflict 41 | c.incrementValue(2, 0); 42 | 43 | // sends to coinbase 44 | uint balanceBefore = block.coinbase.balance; 45 | c.incrementValue{value: 123}(3, 0); 46 | uint balanceAfter = block.coinbase.balance; 47 | assertEq(balanceAfter - balanceBefore, 123); 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.86.0" 3 | components = ["rustfmt", "clippy"] 4 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | reorder_imports = true 2 | imports_granularity = "Crate" 3 | -------------------------------------------------------------------------------- /scripts/ci/benchmark-in-ci.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # 4 | # This script runs the benchmarks and prepares a customized report. 5 | # 6 | # Primary use-case is running in (GitHub) CI, but can also run locally. 7 | # 8 | # Benchmarks can be run in two modes: 9 | # 1. Basic mode: Run the benchmarks for the current commit only (used if HEAD_SHA == BASE_SHA) 10 | # 2. Comparison mode: Run the benchmarks for the current commit and a target commit, and compare them (to see the specific improvements) 11 | # 12 | # Important variables are sourced from ./env-vars.sh, which combines local git information with Github specific variables and methods. 13 | # 14 | # Useful environment variables (to possibly override): 15 | # - X_HEAD_SHA: custom head sha (for dev purposes) 16 | # - X_BASE_SHA: custom base sha (which is run first, and the head benchmark compared against) 17 | # - SKIP_BENCH: skip running `cargo bench` (just generates the HTML based on the previous benchmark). 18 | # 19 | set -e 20 | 21 | script_dir=$( dirname -- "$0"; ) 22 | 23 | echo "Running benchmarks ..." 24 | 25 | # Gather env vars 26 | # https://docs.github.com/en/actions/learn-github-actions/variables 27 | source "${script_dir}/env-vars.sh" 28 | 29 | echo "DATE_UTC: ${DATE}" 30 | echo "HEAD_SHA: ${HEAD_SHA}" 31 | echo "HEAD_BRANCH: ${HEAD_BRANCH}" 32 | echo "BASE_SHA: ${BASE_SHA}" 33 | echo "BASE_REF: ${BASE_REF}" 34 | echo "GITHUB_SHA: ${GITHUB_SHA}" 35 | echo "GITHUB_REF_NAME: ${GITHUB_REF_NAME}" 36 | echo "GITHUB_REF_TYPE: ${GITHUB_REF_TYPE}" 37 | echo "GITHUB_HEAD_REF: ${GITHUB_HEAD_REF}" 38 | echo "GITHUB_ACTOR: ${GITHUB_ACTOR}" 39 | echo "PR_NUMBER: ${PR_NUMBER}" 40 | echo "RUNNER_ARCH: ${RUNNER_ARCH}" 41 | echo "RUNNER_OS: ${RUNNER_OS}" 42 | 43 | # 44 | # RUN THE BENCHMARKS 45 | # 46 | cd $script_dir 47 | cd ../.. 48 | 49 | function run_benchmark() { 50 | if [ "$HEAD_SHA" == "$BASE_SHA" ]; then 51 | # Benchmark only current commit, no comparison 52 | echo "Running cargo bench ..." 53 | cargo bench --workspace --features optimism 54 | else 55 | # Benchmark target commit first, and then benchmark current commit against that baseline 56 | echo "Benchmarking ${HEAD_SHA_SHORT} against the target ${BASE_SHA_SHORT} ..." 57 | 58 | # Switch to target commit and run benchmarks 59 | echo "Switching to $BASE_SHA_SHORT and starting benchmarks ..." 60 | git checkout $BASE_SHA 61 | cargo bench --workspace --features optimism 62 | 63 | # Switch back to current commit and run benchmarks again 64 | echo "Switching back to $HEAD_SHA_SHORT and running benchmarks ..." 65 | # Reset to ensure any changes (e.g. to Cargo.lock) are discarded before attempting to checkout. 66 | git reset --hard 67 | git checkout $HEAD_SHA 68 | cargo bench --workspace --features optimism 69 | fi 70 | } 71 | 72 | # Run benchmarks now (if env var SKIP_BENCH is not set) 73 | if [ -n "$SKIP_BENCH" ]; then 74 | echo "SKIP_BENCH is set, skipping cargo bench." 75 | else 76 | run_benchmark 77 | fi 78 | 79 | # 80 | # Build the summary 81 | # 82 | # Grab the changes as markdown (used in templates for PR and job-summary comment) 83 | export BENCH_CHANGES_MD=$( ./scripts/ci/criterion-get-changes.py target/criterion --output-format md ) 84 | export BENCH_CHANGES_MD_ONLYSIGNIFICANT=$( ./scripts/ci/criterion-get-changes.py target/criterion --output-format md --only-significant ) 85 | if [ -z "$BENCH_CHANGES_MD_ONLYSIGNIFICANT" ]; then 86 | export BENCH_CHANGES_MD_ONLYSIGNIFICANT="None" 87 | fi 88 | 89 | # Prettify criterion report 90 | mkdir -p target/benchmark-in-ci 91 | ./scripts/ci/criterion-prettify-report.sh target/criterion target/benchmark-in-ci/benchmark-report 92 | echo "Saved report: target/benchmark-in-ci/benchmark-report/report/index.html" 93 | 94 | # Create summary markdown 95 | fn="target/benchmark-in-ci/benchmark-report/benchmark-summary.md" 96 | envsubst < scripts/ci/templates/benchmark-summary.md > $fn 97 | echo "Wrote summary: $fn" 98 | 99 | # Create summary pr comment 100 | fn="target/benchmark-in-ci/benchmark-report/benchmark-pr-comment.md" 101 | envsubst < scripts/ci/templates/benchmark-pr-comment.md > $fn 102 | echo "Wrote PR comment: $fn" 103 | -------------------------------------------------------------------------------- /scripts/ci/criterion-prettify-report.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Build a customized benchmark report directory (with HTML + images), using the original criterion report as input. 4 | # 5 | set -e 6 | 7 | if [ "$#" -ne 2 ]; then 8 | echo "Usage: $0 " 9 | exit 1 10 | fi 11 | 12 | script_dir=$( dirname -- "$0"; ) 13 | criterion_dir=$( realpath -s $1 ) 14 | output_dir=$( realpath -s $2 ) 15 | 16 | source "${script_dir}/env-vars.sh" 17 | 18 | echo "Input directory: $criterion_dir" 19 | echo "Output directory: $output_dir" 20 | 21 | # Error if input_dir doesn't exist 22 | if [ ! -d "$criterion_dir" ]; then 23 | echo "Error: input directory '$criterion_dir' does not exist." 24 | exit 1 25 | fi 26 | 27 | cmd="${script_dir}/criterion-get-changes.py $criterion_dir --output-format html --only-significant" 28 | export BENCH_CHANGES_HTML=$( $cmd ) 29 | 30 | # Clean up the output directory and create a copy of the input directory 31 | rm -rf "$output_dir" 32 | cp -r "$criterion_dir" "$output_dir" 33 | 34 | # Iterate over all html files in the input directory, recursively 35 | find $output_dir -type f -name "*.html" | while read file; do 36 | echo "- Updating $file ..." 37 | python3 "${script_dir}/criterion-update-html.py" "$file" "$file" 38 | done 39 | -------------------------------------------------------------------------------- /scripts/ci/download-op-reth.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | VERSION=${VERSION:-1.3.12} 4 | 5 | ARCH=$(uname -m) 6 | case $ARCH in 7 | "x86_64") 8 | if [[ "$(uname)" == "Darwin" ]]; then 9 | ARCH_STRING="x86_64-apple-darwin" 10 | else 11 | ARCH_STRING="x86_64-unknown-linux-gnu" 12 | fi 13 | ;; 14 | "arm64" | "aarch64") 15 | if [[ "$(uname)" == "Darwin" ]]; then 16 | ARCH_STRING="aarch64-apple-darwin" 17 | else 18 | ARCH_STRING="aarch64-unknown-linux-gnu" 19 | fi 20 | ;; 21 | *) 22 | echo "Unsupported architecture: $ARCH" 23 | exit 1 24 | ;; 25 | esac 26 | 27 | FILENAME="op-reth-v${VERSION}-${ARCH_STRING}.tar.gz" 28 | echo "Downloading ${FILENAME}" 29 | 30 | wget -q "https://github.com/paradigmxyz/reth/releases/download/v${VERSION}/${FILENAME}" 31 | 32 | # Extract the tar.gz file 33 | tar -xzf "${FILENAME}" 34 | 35 | # Make the binary executable 36 | chmod +x op-reth 37 | 38 | # Clean up the tar.gz file (optional) 39 | rm "${FILENAME}" 40 | -------------------------------------------------------------------------------- /scripts/ci/env-vars.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # General information 4 | export DATE=$(date -u -Is) # UTC 5 | export REPO_URL="https://github.com/flashbots/rbuilder" 6 | export PUBLIC_URL_HOST="https://flashbots-rbuilder-ci-stats.s3.us-east-2.amazonaws.com" 7 | export PUBLIC_URL_REPORT="$PUBLIC_URL_HOST/${S3_UPLOAD_DIR}" # S3_UPLOAD_DIR is set in the GitHub workflow 8 | export PUBLIC_URL_STATIC="$PUBLIC_URL_HOST/static" 9 | 10 | # HEAD_SHA is supposed to be the commit hash of the current branch. When run in 11 | # GitHub CI, this is actually set to the commit hash of the merge branch instead. 12 | # 13 | # This code gets the actual commit hash of the current branch. 14 | # 15 | # See also https://github.com/orgs/community/discussions/26325 16 | export HEAD_SHA=$( git log -n 1 --pretty=format:"%H" ) 17 | if [ "$GITHUB_EVENT_NAME" == "pull_request" ]; then 18 | export HEAD_SHA=$(cat $GITHUB_EVENT_PATH | jq -r .pull_request.head.sha) 19 | fi 20 | export HEAD_SHA_SHORT=$( echo $HEAD_SHA | cut -c1-7 ) 21 | 22 | # HEAD_BRANCH is "HEAD" when run in GitHub CI. Use GITHUB_HEAD_REF instead (which is the PR branch name). 23 | export HEAD_BRANCH=$( git branch --show-current ) 24 | if [ -z "$HEAD_BRANCH" ]; then 25 | export HEAD_BRANCH="${GITHUB_HEAD_REF}" 26 | fi 27 | 28 | # BASE_REF and BASE_SHA are the baseline (what the PR is pointing to, and what Criterion can compoare). 29 | export BASE_REF=${GITHUB_BASE_REF:=develop} # "develop" branch is the default 30 | export BASE_SHA=$( git log -n 1 --pretty=format:"%H" origin/${BASE_REF} ) # get hash of base branch 31 | export BASE_SHA_SHORT=$( echo $BASE_SHA | cut -c1-7 ) 32 | 33 | # 34 | # DEV_OVERRIDES: custom head and base sha 35 | # 36 | if [ -n "$X_HEAD_SHA" ]; then 37 | export HEAD_SHA=$X_HEAD_SHA 38 | export HEAD_SHA_SHORT=$( echo $HEAD_SHA | cut -c1-7 ) 39 | export HEAD_BRANCH="" 40 | fi 41 | 42 | if [ -n "$X_BASE_SHA" ]; then 43 | export BASE_SHA=$X_BASE_SHA 44 | export BASE_SHA_SHORT=$( echo $BASE_SHA | cut -c1-7 ) 45 | export BASE_REF="" 46 | fi -------------------------------------------------------------------------------- /scripts/ci/templates/benchmark-pr-comment.md: -------------------------------------------------------------------------------- 1 | ## Benchmark results for `${HEAD_SHA_SHORT}` 2 | 3 | Report: __BENCH_URL__ 4 | 5 | | | | 6 | | ---------- | ------------- | 7 | | Date (UTC) | ${DATE} | 8 | | Commit | `${HEAD_SHA}` | 9 | | Base SHA | `${BASE_SHA}` | 10 | 11 | ### Significant changes 12 | 13 | ${BENCH_CHANGES_MD_ONLYSIGNIFICANT} -------------------------------------------------------------------------------- /scripts/ci/templates/benchmark-summary.md: -------------------------------------------------------------------------------- 1 | # Benchmark results for `${HEAD_SHA_SHORT}` 2 | 3 | | | | 4 | | -------------- | -------------------- | 5 | | Date (UTC) | ${DATE} | 6 | | Head SHA | `${HEAD_SHA}` | 7 | | Head Branch | `${HEAD_BRANCH}` | 8 | | Base SHA | `${BASE_SHA}` | 9 | | Base Ref | `${BASE_REF}` | 10 | | GH SHA | `${GITHUB_SHA}` | 11 | | GH Ref Name | `${GITHUB_REF_NAME}` | 12 | | GH Ref Type | `${GITHUB_REF_TYPE}` | 13 | | GH Head Ref | `${GITHUB_HEAD_REF}` | 14 | | GH Actor | `${GITHUB_ACTOR}` | 15 | | GH Runner Arch | `${RUNNER_ARCH}` | 16 | | GH Runner OS | `${RUNNER_OS}` | 17 | | PR Number | `${PR_NUMBER}` | 18 | 19 | 20 | ### Significant changes 21 | 22 | ${BENCH_CHANGES_MD} 23 | -------------------------------------------------------------------------------- /scripts/ci/templates/partials/index-changes.html: -------------------------------------------------------------------------------- 1 |
2 | 3 |
4 |

Significant changes:

5 | 6 | __CHANGES_TABLE__ 7 |
8 |
-------------------------------------------------------------------------------- /scripts/ci/templates/report-criterion-benchmark.html: -------------------------------------------------------------------------------- 1 | 2 |
3 |
4 | 5 | 6 | 7 |

rbuilder benchmark "__BENCHMARK_NAME__" for __HEAD_SHA_SHORT__

8 | ↑ parent page 9 |
10 | 11 |
12 | 13 |
14 | __CONTENT__ 15 |
16 | 17 |
18 | 19 | __FOOTER__ 20 |
21 | 22 | 23 | -------------------------------------------------------------------------------- /scripts/ci/templates/report-footer.html: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /scripts/ci/templates/report-head.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 15 | 16 | 17 | __TITLE__ 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | __STYLE__ 35 | -------------------------------------------------------------------------------- /scripts/ci/templates/report-index.html: -------------------------------------------------------------------------------- 1 | 2 |
3 |
4 | 5 | 6 | 7 |

rbuilder benchmarks for __HEAD_SHA_SHORT__

8 | 9 |

10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 |
Date__DATE__
Commit__HEAD_SHA__
Base SHA__BASE_SHA__
PR__PR_NUMBER__
28 |

29 |
30 | 31 | __CHANGES__ 32 | 33 |
34 | 35 |
36 |

Benchmarks:

37 | __CONTENT__ 38 |
39 | 40 |
41 | 42 | __FOOTER__ 43 |
44 | -------------------------------------------------------------------------------- /scripts/ci/templates/report-styles.css: -------------------------------------------------------------------------------- 1 | body { 2 | background-color: white; 3 | max-width: 1200px; 4 | margin: auto; 5 | } 6 | 7 | .container { 8 | margin: 2em 4em; 9 | } 10 | 11 | .header, 12 | .content, 13 | .footer { 14 | margin: 2em 0em; 15 | } 16 | 17 | .content ul { 18 | font-size: large; 19 | } 20 | 21 | .footer { 22 | font-size: small; 23 | } 24 | 25 | h1, 26 | h2, 27 | h3 { 28 | font-weight: 300; 29 | } 30 | 31 | h2 { 32 | font-size: 36px; 33 | } 34 | 35 | h3 { 36 | font-size: 24px; 37 | } 38 | 39 | hr { 40 | border: 0; 41 | height: 1px; 42 | background: #AAA; 43 | } 44 | 45 | .head-infos { 46 | font-size: 0.9em; 47 | } 48 | 49 | .head-infos tt { 50 | font-size: 1.1em; 51 | } 52 | 53 | /* 54 | * Report 55 | */ 56 | .stats { 57 | width: 80%; 58 | margin: auto; 59 | display: flex; 60 | } 61 | 62 | .additional_stats { 63 | flex: 0 0 60% 64 | } 65 | 66 | .additional_plots { 67 | flex: 1 68 | } 69 | 70 | .explanation { 71 | margin-top: 4em; 72 | margin-bottom: 6em; 73 | color: #444; 74 | font-size: small; 75 | } 76 | 77 | th.ci-bound { 78 | opacity: 0.6 79 | } 80 | 81 | td.ci-bound { 82 | opacity: 0.5 83 | } -------------------------------------------------------------------------------- /zepter.yaml: -------------------------------------------------------------------------------- 1 | version: 2 | format: 1 3 | # Minimum zepter version that is expected to work. This is just for printing a nice error 4 | # message when someone tries to use an older version. 5 | binary: 0.13.2 6 | 7 | # The examples in the following comments assume crate `A` to have a dependency on crate `B`. 8 | workflows: 9 | check: 10 | - [ 11 | "lint", 12 | # Check that `A` activates the features of `B`. 13 | "propagate-feature", 14 | # These are the features to check: 15 | "--features=std,optimism,dev,asm-keccak,jemalloc,jemalloc-prof,tracy-allocator,serde-bincode-compat,serde,test-utils,arbitrary,bench,redact-sensitive", 16 | # Do not try to add a new section into `[features]` of `A` only because `B` expose that feature. There are edge-cases where this is still needed, but we can add them manually. 17 | "--left-side-feature-missing=ignore", 18 | # Ignore the case that `A` it outside of the workspace. Otherwise it will report errors in external dependencies that we have no influence on. 19 | "--left-side-outside-workspace=ignore", 20 | # Auxillary flags: 21 | "--offline", 22 | "--locked", 23 | "--show-path", 24 | "--quiet", 25 | ] 26 | default: 27 | # Running `zepter` with no subcommand will check & fix. 28 | - [$check.0, "--fix"] 29 | 30 | # Will be displayed when any workflow fails: 31 | help: 32 | text: | 33 | rbuilder uses the Zepter CLI to detect abnormalities in Cargo features, e.g. missing propagation. 34 | 35 | It looks like one more more checks failed; please check the console output. 36 | 37 | You can try to automatically address them by installing zepter (`cargo install zepter --locked`) and simply running `zepter` in the workspace root. 38 | links: 39 | - "https://github.com/ggwpez/zepter" 40 | --------------------------------------------------------------------------------