├── .gitignore ├── rustfmt.toml ├── .vscode └── settings.json ├── SECURITY.md ├── src ├── bidding_service_wrapper │ ├── client │ │ ├── mod.rs │ │ ├── unfinished_block_building_sink_client.rs │ │ └── bidding_service_client_adapter.rs │ ├── mod.rs │ ├── proto │ │ └── bidding_service.proto │ ├── conversion.rs │ └── bidding_service.rs ├── lib.rs ├── bin │ ├── backtest-build-range.rs │ ├── backtest-build-block.rs │ └── rbuilder.rs ├── true_block_value_push │ ├── mod.rs │ ├── redis_backend.rs │ ├── blocks_processor_backend.rs │ ├── best_true_value_observer.rs │ └── best_true_value_pusher.rs ├── metrics.rs ├── build_info.rs ├── signed_http_client.rs ├── reconnect.rs ├── flashbots_signer.rs ├── blocks_processor.rs └── flashbots_config.rs ├── README.md ├── .github ├── CODEOWNERS ├── dependabot.yaml └── workflows │ ├── checks.yaml │ └── release.yaml ├── .editorconfig ├── Dockerfile ├── LICENSE-MIT ├── config-live-example.toml ├── Makefile ├── Cargo.toml └── LICENSE-APACHE /.gitignore: -------------------------------------------------------------------------------- 1 | /cargo 2 | /data 3 | /target 4 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | reorder_imports = true 2 | imports_granularity = "Crate" 3 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "cSpell.words": [ 3 | "innecesary" 4 | ] 5 | } 6 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Reporting a Vulnerability 4 | 5 | Contact: security@flashbots.net -------------------------------------------------------------------------------- /src/bidding_service_wrapper/client/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod bidding_service_client_adapter; 2 | mod unfinished_block_building_sink_client; 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # rbuilder-operator 2 | Specific implementation (based on the public rbuilder) of a block builder to be used on a TDX context. 3 | -------------------------------------------------------------------------------- /src/bidding_service_wrapper/mod.rs: -------------------------------------------------------------------------------- 1 | #[rustfmt::skip] 2 | pub mod bidding_service; 3 | pub mod client; 4 | pub mod conversion; 5 | pub use bidding_service::*; 6 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod bidding_service_wrapper; 2 | pub mod blocks_processor; 3 | pub mod build_info; 4 | pub mod flashbots_config; 5 | pub mod flashbots_signer; 6 | pub mod metrics; 7 | pub mod reconnect; 8 | pub mod signed_http_client; 9 | mod true_block_value_push; 10 | -------------------------------------------------------------------------------- /src/bin/backtest-build-range.rs: -------------------------------------------------------------------------------- 1 | use rbuilder::backtest::run_backtest_build_range; 2 | use rbuilder_operator::flashbots_config::FlashbotsConfig; 3 | 4 | #[tokio::main] 5 | async fn main() -> eyre::Result<()> { 6 | run_backtest_build_range::().await 7 | } 8 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # These owners will be the default owners for everything in 2 | # the repo. Unless a later match takes precedence, 3 | # they will be requested for review when someone opens a pull request. 4 | * @dvush @ZanCorDX @metachris 5 | /crates/ @dvush @ZanCorDX 6 | -------------------------------------------------------------------------------- /src/bin/backtest-build-block.rs: -------------------------------------------------------------------------------- 1 | use rbuilder::backtest::build_block::landed_block_from_db::run_backtest; 2 | use rbuilder_operator::flashbots_config::FlashbotsConfig; 3 | 4 | #[tokio::main] 5 | async fn main() -> eyre::Result<()> { 6 | run_backtest::().await 7 | } 8 | -------------------------------------------------------------------------------- /src/bin/rbuilder.rs: -------------------------------------------------------------------------------- 1 | use rbuilder::live_builder::cli::{self}; 2 | use rbuilder_operator::{ 3 | build_info::{print_version_info, rbuilder_version}, 4 | flashbots_config::FlashbotsConfig, 5 | }; 6 | use tracing::info; 7 | 8 | fn on_run() { 9 | info!(version = ?rbuilder_version(), "Flashbots rbuilder version"); 10 | } 11 | 12 | #[tokio::main] 13 | async fn main() -> eyre::Result<()> { 14 | return cli::run::(print_version_info, Some(on_run)).await; 15 | } 16 | -------------------------------------------------------------------------------- /src/true_block_value_push/mod.rs: -------------------------------------------------------------------------------- 1 | //! This module handles the push of the best block (true block value) to a redis channel. 2 | //! This information is used by the smart-multiplexing core to decide when to stop multiplexing order flow. 3 | //! We use a redis channel for historical reasons but it could be changed to a direct streaming. 4 | //! Could be improved but this is just a refactoring resuscitating the old code. 5 | 6 | pub mod best_true_value_observer; 7 | pub mod best_true_value_pusher; 8 | mod blocks_processor_backend; 9 | mod redis_backend; 10 | -------------------------------------------------------------------------------- /.github/dependabot.yaml: -------------------------------------------------------------------------------- 1 | # Please see the documentation for all configuration options: 2 | # https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 3 | 4 | version: 2 5 | updates: 6 | - package-ecosystem: "cargo" 7 | directory: "/" 8 | schedule: 9 | interval: "weekly" 10 | # ignore: 11 | # # These are peer deps of Cargo and should not be automatically bumped 12 | # - dependency-name: "semver" 13 | # - dependency-name: "crates-io" 14 | # rebase-strategy: "disabled" 15 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # EditorConfig helps developers define and maintain consistent 2 | # coding styles between different editors and IDEs 3 | # editorconfig.org 4 | 5 | root = true 6 | 7 | [*] 8 | end_of_line = lf 9 | charset = utf-8 10 | trim_trailing_whitespace = true 11 | insert_final_newline = true 12 | indent_style = space 13 | indent_size = 4 14 | 15 | [*.rs] 16 | max_line_length = 100 17 | 18 | [*.{yml,yaml}] 19 | indent_size = 2 20 | 21 | [*.md] 22 | # double whitespace at end of line 23 | # denotes a line break in Markdown 24 | trim_trailing_whitespace = false 25 | 26 | [Makefile] 27 | indent_style = tab 28 | 29 | [] 30 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:1.88.0-bullseye@sha256:b315f988b86912bafa7afd39a6ded0a497bf850ec36578ca9a3bdd6a14d5db4e AS builder 2 | 3 | ARG BUILD_PROFILE=release 4 | ENV BUILD_PROFILE=$BUILD_PROFILE 5 | 6 | RUN apt-get update && apt-get install -y \ 7 | libclang-dev=1:11.0-51+nmu5 \ 8 | protobuf-compiler=3.12.4-1+deb11u1 \ 9 | cmake 10 | 11 | # Clone the repository at the specific branch 12 | WORKDIR /app 13 | COPY ./ /app 14 | 15 | # Build the project with the reproducible settings 16 | RUN make build-reproducible 17 | 18 | RUN mv /app/target/x86_64-unknown-linux-gnu/"${BUILD_PROFILE}"/rbuilder /rbuilder 19 | 20 | FROM gcr.io/distroless/cc-debian12:nonroot-6755e21ccd99ddead6edc8106ba03888cbeed41a 21 | COPY --from=builder /rbuilder /rbuilder 22 | ENTRYPOINT [ "/rbuilder" ] 23 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023-2024 rbuilder Contributors 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /src/metrics.rs: -------------------------------------------------------------------------------- 1 | #![allow(unexpected_cfgs)] 2 | 3 | use ctor::ctor; 4 | use lazy_static::lazy_static; 5 | use metrics_macros::register_metrics; 6 | use prometheus::{IntCounterVec, IntGaugeVec, Opts}; 7 | use rbuilder::{telemetry::REGISTRY, utils::build_info::Version}; 8 | 9 | register_metrics! { 10 | pub static BLOCK_API_ERRORS: IntCounterVec = IntCounterVec::new( 11 | Opts::new("block_api_errors", "counter of the block processor errors"), 12 | &["api_name"] 13 | ) 14 | .unwrap(); 15 | 16 | pub static BIDDING_SERVICE_VERSION: IntGaugeVec = IntGaugeVec::new( 17 | Opts::new("bidding_service_version", "Version of the bidding service"), 18 | &["git", "git_ref", "build_time_utc"] 19 | ) 20 | .unwrap(); 21 | 22 | } 23 | 24 | pub fn inc_submit_block_errors() { 25 | BLOCK_API_ERRORS.with_label_values(&["submit_block"]).inc() 26 | } 27 | 28 | pub fn inc_publish_tbv_errors() { 29 | BLOCK_API_ERRORS.with_label_values(&["publish_tbv"]).inc() 30 | } 31 | 32 | pub(super) fn set_bidding_service_version(version: Version) { 33 | BIDDING_SERVICE_VERSION 34 | .with_label_values(&[ 35 | &version.git_commit, 36 | &version.git_ref, 37 | &version.build_time_utc, 38 | ]) 39 | .set(1); 40 | } 41 | -------------------------------------------------------------------------------- /.github/workflows/checks.yaml: -------------------------------------------------------------------------------- 1 | name: Checks 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | merge_group: 7 | push: 8 | branches: [main] 9 | 10 | env: 11 | CARGO_TERM_COLOR: always 12 | 13 | jobs: 14 | lint_and_test: 15 | name: Lint and test 16 | runs-on: warp-ubuntu-latest-x64-16x 17 | strategy: 18 | matrix: 19 | toolchain: 20 | - stable 21 | 22 | steps: 23 | - name: Checkout sources 24 | uses: actions/checkout@v4 25 | 26 | - name: Setup rust toolchain 27 | uses: dtolnay/rust-toolchain@stable 28 | with: 29 | toolchain: ${{ matrix.toolchain }} 30 | 31 | - name: Install Protoc 32 | uses: arduino/setup-protoc@v3 33 | 34 | - name: Run WarpBuilds/rust-cache 35 | uses: WarpBuilds/rust-cache@v2 36 | with: 37 | cache-on-failure: true 38 | 39 | - name: Run sccache-action 40 | uses: mozilla-actions/sccache-action@v0.0.9 41 | 42 | - name: Set sccache env vars 43 | run: | 44 | echo "SCCACHE_GHA_ENABLED=true" >> $GITHUB_ENV 45 | echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV 46 | 47 | - name: Install native dependencies 48 | run: sudo apt-get update && sudo apt-get install -y libsqlite3-dev 49 | 50 | - run: make lint 51 | - run: make test 52 | -------------------------------------------------------------------------------- /src/true_block_value_push/redis_backend.rs: -------------------------------------------------------------------------------- 1 | use redis::Commands; 2 | use tracing::error; 3 | 4 | use super::best_true_value_pusher::{Backend, BuiltBlockInfo}; 5 | 6 | #[derive(thiserror::Error, Debug)] 7 | pub enum Error { 8 | #[error("Redis error {0}")] 9 | Redis(#[from] redis::RedisError), 10 | #[error("Json serialization error {0}")] 11 | JsonSerialization(#[from] serde_json::Error), 12 | } 13 | 14 | /// Backend for BestTrueValuePusher that publish data on a redis channel. 15 | pub struct RedisBackend { 16 | redis: redis::Client, 17 | channel_name: String, 18 | } 19 | 20 | impl RedisBackend { 21 | pub fn new(redis: redis::Client, channel_name: String) -> Self { 22 | Self { 23 | redis, 24 | channel_name, 25 | } 26 | } 27 | } 28 | 29 | impl Backend for RedisBackend { 30 | type Connection = redis::Connection; 31 | type BackendError = Error; 32 | 33 | fn connect(&self) -> Result { 34 | Ok(self.redis.get_connection()?) 35 | } 36 | 37 | fn publish( 38 | &self, 39 | connection: &mut Self::Connection, 40 | best_true_value: &BuiltBlockInfo, 41 | ) -> Result<(), Self::BackendError> { 42 | let best_true_value = serde_json::to_string(&best_true_value)?; 43 | Ok(connection.publish(&self.channel_name, &best_true_value)?) 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/build_info.rs: -------------------------------------------------------------------------------- 1 | // The file has been placed there by the build script. 2 | 3 | mod internal { 4 | include!(concat!(env!("OUT_DIR"), "/built.rs")); 5 | } 6 | 7 | use internal::{ 8 | BUILT_TIME_UTC, CI_PLATFORM, FEATURES, GIT_COMMIT_HASH_SHORT, GIT_HEAD_REF, PROFILE, 9 | RUSTC_VERSION, 10 | }; 11 | use rbuilder::utils::build_info::Version; 12 | 13 | pub fn print_version_info() { 14 | println!( 15 | "commit: {}", 16 | GIT_COMMIT_HASH_SHORT.unwrap_or_default() 17 | ); 18 | println!("branch: {}", GIT_HEAD_REF.unwrap_or_default()); 19 | println!("build_platform: {:?}", CI_PLATFORM.unwrap_or_default()); 20 | println!("build_time: {BUILT_TIME_UTC}"); 21 | println!("features: {FEATURES:?}"); 22 | println!("profile: {PROFILE}"); 23 | println!("rustc: {RUSTC_VERSION}"); 24 | } 25 | 26 | pub fn rbuilder_version() -> Version { 27 | let git_commit = { 28 | let mut commit = String::new(); 29 | if let Some(hash) = GIT_COMMIT_HASH_SHORT { 30 | commit.push_str(hash); 31 | } 32 | if commit.is_empty() { 33 | commit.push_str("unknown"); 34 | } 35 | commit 36 | }; 37 | 38 | let git_ref = GIT_HEAD_REF.unwrap_or("unknown").to_string(); 39 | 40 | Version { 41 | git_commit, 42 | git_ref, 43 | build_time_utc: BUILT_TIME_UTC.to_string(), 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/signed_http_client.rs: -------------------------------------------------------------------------------- 1 | use crate::flashbots_signer::{FlashbotsSigner, FlashbotsSignerLayer}; 2 | use alloy_signer_local::PrivateKeySigner; 3 | use jsonrpsee::http_client::transport::Error as JsonError; 4 | use jsonrpsee::http_client::HttpClientBuilder; 5 | use jsonrpsee::http_client::{transport::HttpBackend, HttpClient}; 6 | use tower::ServiceBuilder; 7 | type MapErrorFn = fn(Box) -> JsonError; 8 | 9 | const fn map_error(err: Box) -> JsonError { 10 | JsonError::Http(err) 11 | } 12 | 13 | pub type SignedHttpClient = 14 | HttpClient, MapErrorFn>>; 15 | 16 | pub fn create_client( 17 | url: &str, 18 | signer: PrivateKeySigner, 19 | max_request_size: u32, 20 | max_concurrent_requests: usize, 21 | ) -> Result { 22 | let signing_middleware = FlashbotsSignerLayer::new(signer); 23 | let service_builder = ServiceBuilder::new() 24 | // Coerce to function pointer and remove the + 'static added to the closure 25 | .map_err(map_error as MapErrorFn) 26 | .layer(signing_middleware); 27 | let client = HttpClientBuilder::default() 28 | .max_request_size(max_request_size) 29 | .max_concurrent_requests(max_concurrent_requests) 30 | .set_middleware(service_builder) 31 | .build(url)?; 32 | Ok(client) 33 | } 34 | -------------------------------------------------------------------------------- /config-live-example.toml: -------------------------------------------------------------------------------- 1 | log_json = true 2 | log_level = "info,rbuilder=debug" 3 | redacted_telemetry_server_port = 6061 4 | redacted_telemetry_server_ip = "0.0.0.0" 5 | full_telemetry_server_port = 6060 6 | full_telemetry_server_ip = "0.0.0.0" 7 | 8 | chain = "mainnet" 9 | reth_datadir = "/mnt/data/reth" 10 | 11 | coinbase_secret_key = "env:COINBASE_SECRET_KEY" 12 | 13 | cl_node_url = ["http://localhost:3500"] 14 | jsonrpc_server_port = 8645 15 | jsonrpc_server_ip = "0.0.0.0" 16 | el_node_ipc_path = "/tmp/reth.ipc" 17 | extra_data = "⚡🤖" 18 | 19 | blocklist_file_path = "./blocklist.json" 20 | 21 | 22 | blocks_processor_url = "http://block_processor.internal" 23 | key_registration_url = "http://127.0.0.1:8090" 24 | ignore_cancellable_orders = true 25 | 26 | sbundle_mergeabe_signers = [] 27 | live_builders = ["mp-ordering", "mgp-ordering", "parallel"] 28 | 29 | bidding_service_ipc_path = "/tmp/rpc_bidding_server.sock" 30 | 31 | [tbv_push_redis] 32 | url = "env:BIDDING_REDIS_URL" 33 | channel = "best_true_value" 34 | 35 | 36 | [[relays]] 37 | name = "flashbots" 38 | url = "https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net" 39 | priority = 0 40 | use_ssz_for_submit = false 41 | use_gzip_for_submit = false 42 | 43 | [[builders]] 44 | name = "mgp-ordering" 45 | algo = "ordering-builder" 46 | discard_txs = true 47 | sorting = "mev-gas-price" 48 | failed_order_retries = 1 49 | drop_failed_orders = true 50 | 51 | [[builders]] 52 | name = "mp-ordering" 53 | algo = "ordering-builder" 54 | discard_txs = true 55 | sorting = "max-profit" 56 | failed_order_retries = 1 57 | drop_failed_orders = true 58 | 59 | [[builders]] 60 | name = "parallel" 61 | algo = "parallel-builder" 62 | discard_txs = true 63 | num_threads = 5 64 | safe_sorting_only = false 65 | -------------------------------------------------------------------------------- /src/bidding_service_wrapper/client/unfinished_block_building_sink_client.rs: -------------------------------------------------------------------------------- 1 | use rbuilder::{ 2 | live_builder::block_output::bidding_service_interface::{ 3 | BuiltBlockDescriptorForSlotBidder, SlotBidder, 4 | }, 5 | utils::offset_datetime_to_timestamp_us, 6 | }; 7 | use tokio::sync::mpsc; 8 | 9 | use crate::bidding_service_wrapper::{DestroySlotBidderParams, NewBlockParams}; 10 | 11 | use super::bidding_service_client_adapter::BiddingServiceClientCommand; 12 | 13 | /// Implementation of SlotBidder. 14 | /// Commands are forwarded everything to a UnboundedSender. 15 | /// BidMaker is wrapped with ... that contains a poling task that makes the bids. 16 | #[derive(Debug)] 17 | pub struct UnfinishedBlockBuildingSinkClient { 18 | session_id: u64, 19 | commands_sender: mpsc::UnboundedSender, 20 | } 21 | 22 | impl UnfinishedBlockBuildingSinkClient { 23 | pub fn new( 24 | session_id: u64, 25 | commands_sender: mpsc::UnboundedSender, 26 | ) -> Self { 27 | UnfinishedBlockBuildingSinkClient { 28 | commands_sender, 29 | session_id, 30 | } 31 | } 32 | } 33 | 34 | impl SlotBidder for UnfinishedBlockBuildingSinkClient { 35 | fn notify_new_built_block(&self, block_descriptor: BuiltBlockDescriptorForSlotBidder) { 36 | let _ = self 37 | .commands_sender 38 | .send(BiddingServiceClientCommand::NewBlock(NewBlockParams { 39 | session_id: self.session_id, 40 | true_block_value: block_descriptor.true_block_value.as_limbs().to_vec(), 41 | can_add_payout_tx: true, 42 | block_id: block_descriptor.id.0, 43 | creation_time_us: offset_datetime_to_timestamp_us(block_descriptor.creation_time), 44 | })); 45 | } 46 | } 47 | 48 | impl Drop for UnfinishedBlockBuildingSinkClient { 49 | fn drop(&mut self) { 50 | let _ = self 51 | .commands_sender 52 | .send(BiddingServiceClientCommand::DestroySlotBidder( 53 | DestroySlotBidderParams { 54 | session_id: self.session_id, 55 | }, 56 | )); 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/reconnect.rs: -------------------------------------------------------------------------------- 1 | use exponential_backoff::Backoff; 2 | use std::{future::Future, time::Duration}; 3 | use tokio_util::sync::CancellationToken; 4 | use tracing::{debug, error, info, info_span, warn, Instrument}; 5 | 6 | #[derive(Debug)] 7 | pub enum RunCommand { 8 | Reconnect, 9 | Finish, 10 | } 11 | 12 | fn default_backoff() -> Backoff { 13 | Backoff::new(u32::MAX, Duration::from_secs(1), Duration::from_secs(12)) 14 | } 15 | 16 | pub async fn run_async_loop_with_reconnect< 17 | Connection, 18 | ConnectErr: std::error::Error, 19 | ConnectFut: Future>, 20 | RunFut: Future, 21 | Connect: Fn() -> ConnectFut, 22 | Run: Fn(Connection) -> RunFut, 23 | >( 24 | context: &str, 25 | connect: Connect, 26 | run: Run, 27 | backoff: Option, 28 | cancellation_token: CancellationToken, 29 | ) { 30 | let span = info_span!("connect_loop_context", context); 31 | 32 | 'reconnect: loop { 33 | if cancellation_token.is_cancelled() { 34 | break 'reconnect; 35 | } 36 | let backoff = backoff.clone().unwrap_or_else(default_backoff); 37 | let mut backoff_iter = backoff.iter(); 38 | 39 | let connection = 'backoff: loop { 40 | let timeout = if let Some(timeout) = backoff_iter.next() { 41 | timeout 42 | } else { 43 | warn!(parent: &span, "Backoff for connection reached max retries"); 44 | break 'reconnect; 45 | }; 46 | 47 | match connect().instrument(span.clone()).await { 48 | Ok(conn) => { 49 | debug!(parent: &span, "Established connection"); 50 | break 'backoff conn; 51 | } 52 | Err(err) => { 53 | error!(parent: &span, ?err, "Failed to establish connection"); 54 | tokio::time::sleep(timeout).await; 55 | } 56 | } 57 | }; 58 | 59 | match run(connection).instrument(span.clone()).await { 60 | RunCommand::Reconnect => continue 'reconnect, 61 | RunCommand::Finish => break 'reconnect, 62 | } 63 | } 64 | info!("Exiting connect loop"); 65 | } 66 | -------------------------------------------------------------------------------- /src/true_block_value_push/blocks_processor_backend.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | flashbots_config::default_blocks_processor_max_request_size_bytes, 3 | signed_http_client::SignedHttpClient, 4 | }; 5 | use alloy_signer_local::PrivateKeySigner; 6 | use jsonrpsee::core::client::ClientT; 7 | use tokio::runtime::Runtime; 8 | use tracing::error; 9 | 10 | use super::best_true_value_pusher::{Backend, BuiltBlockInfo}; 11 | 12 | const REPORT_BEST_TRUE_VALUE_METHOD: &str = "flashbots_reportBestTrueValue"; 13 | 14 | #[derive(thiserror::Error, Debug)] 15 | pub enum Error { 16 | #[error("Unable to build http client {0}")] 17 | BuildHttpClient(#[from] jsonrpsee::core::Error), 18 | #[error("Tokio runtime creation error {0}")] 19 | TokioRuntimeCreation(#[from] std::io::Error), 20 | } 21 | 22 | /// Backend for BestTrueValuePusher that sends signed JSON RPC to BlocksProcessor service. 23 | pub struct BlocksProcessorBackend { 24 | url: String, 25 | signer: PrivateKeySigner, 26 | /// A `current_thread` runtime for executing operations on the 27 | /// asynchronous client in a blocking manner. For more info: https://tokio.rs/tokio/topics/bridging 28 | runtime: Runtime, 29 | max_concurrent_requests: usize, 30 | } 31 | 32 | impl BlocksProcessorBackend { 33 | pub fn new( 34 | url: String, 35 | signer: PrivateKeySigner, 36 | max_concurrent_requests: usize, 37 | ) -> Result { 38 | let runtime = tokio::runtime::Builder::new_current_thread() 39 | .enable_all() 40 | .build()?; 41 | Ok(Self { 42 | url, 43 | signer, 44 | runtime, 45 | max_concurrent_requests, 46 | }) 47 | } 48 | } 49 | 50 | impl Backend for BlocksProcessorBackend { 51 | type Connection = SignedHttpClient; 52 | type BackendError = Error; 53 | 54 | fn connect(&self) -> Result { 55 | Ok(crate::signed_http_client::create_client( 56 | &self.url, 57 | self.signer.clone(), 58 | // we use default here because request is small 59 | default_blocks_processor_max_request_size_bytes(), 60 | self.max_concurrent_requests, 61 | )?) 62 | } 63 | 64 | fn publish( 65 | &self, 66 | connection: &mut Self::Connection, 67 | best_true_value: &BuiltBlockInfo, 68 | ) -> Result<(), Self::BackendError> { 69 | let params = [best_true_value]; 70 | Ok(self 71 | .runtime 72 | .block_on(connection.request(REPORT_BEST_TRUE_VALUE_METHOD, params))?) 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Heavily inspired by Lighthouse: https://github.com/sigp/lighthouse/blob/stable/Makefile 2 | # and Reth: https://github.com/paradigmxyz/reth/blob/main/Makefile 3 | .DEFAULT_GOAL := help 4 | 5 | GIT_VER ?= $(shell git describe --tags --always --dirty="-dev") 6 | GIT_TAG ?= $(shell git describe --tags --abbrev=0) 7 | 8 | ##@ Help 9 | 10 | .PHONY: help 11 | help: ## Display this help. 12 | @awk 'BEGIN {FS = ":.*##"; printf "Usage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) 13 | 14 | .PHONY: v 15 | v: ## Show the current version 16 | @echo "Version: ${GIT_VER}" 17 | 18 | ##@ Build 19 | 20 | .PHONY: clean 21 | clean: ## Clean up 22 | cargo clean 23 | 24 | .PHONY: build 25 | build: ## Build static binary for x86_64 26 | cargo build --release --target x86_64-unknown-linux-gnu 27 | 28 | # Environment variables for reproducible builds 29 | # Initialize RUSTFLAGS 30 | RUST_BUILD_FLAGS = 31 | 32 | # Remove build ID from the binary to ensure reproducibility across builds 33 | RUST_BUILD_FLAGS += -C link-arg=-Wl,--build-id=none 34 | 35 | # Remove metadata hash from symbol names to ensure reproducible builds 36 | RUST_BUILD_FLAGS += -C metadata='' 37 | 38 | # Set timestamp from last git commit for reproducible builds 39 | SOURCE_DATE ?= $(shell git log -1 --pretty=%ct) 40 | 41 | # Disable incremental compilation to avoid non-deterministic artifacts 42 | CARGO_INCREMENTAL_VAL = 0 43 | 44 | # Set C locale for consistent string handling and sorting 45 | LOCALE_VAL = C 46 | 47 | # Set UTC timezone for consistent time handling across builds 48 | TZ_VAL = UTC 49 | 50 | # Set the target for the build, default to x86_64 51 | TARGET ?= x86_64-unknown-linux-gnu 52 | 53 | .PHONY: build-reproducible 54 | build-reproducible: ## Build reproducible static binary for x86_64 55 | # Set timestamp from last git commit for reproducible builds 56 | SOURCE_DATE_EPOCH=$(SOURCE_DATE) \ 57 | RUSTFLAGS="${RUST_BUILD_FLAGS} --remap-path-prefix $$(pwd)=." \ 58 | CARGO_INCREMENTAL=${CARGO_INCREMENTAL_VAL} \ 59 | LC_ALL=${LOCALE_VAL} \ 60 | TZ=${TZ_VAL} \ 61 | cargo build --release --locked --target $(TARGET) 62 | 63 | .PHONY: docker-image 64 | docker-image: ## Build a rbuilder Docker image 65 | docker build --platform linux/amd64 . -t rbuilder 66 | 67 | ##@ Dev 68 | 69 | .PHONY: lint 70 | lint: ## Run the linters 71 | cargo fmt -- --check 72 | cargo clippy -- -D warnings 73 | 74 | .PHONY: test 75 | test: ## Run the tests 76 | cargo test --verbose 77 | 78 | .PHONY: lt 79 | lt: lint test ## Run "lint" and "test" 80 | 81 | fmt: ## Format the code 82 | cargo fmt 83 | cargo fix --allow-staged 84 | cargo clippy --fix --allow-staged 85 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rbuilder-operator" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | reth = { git = "https://github.com/paradigmxyz/reth", rev = "0b316160a9915ac80c4ae867f69e304aca85ec01" } 8 | reth-db = { git = "https://github.com/paradigmxyz/reth", rev = "0b316160a9915ac80c4ae867f69e304aca85ec01" } 9 | reth-payload-builder = { git = "https://github.com/paradigmxyz/reth", rev = "0b316160a9915ac80c4ae867f69e304aca85ec01" } 10 | 11 | 12 | alloy-primitives = { version = "1.3.1", default-features = false } 13 | alloy-provider = { version = "1.0.27", features = ["ipc", "pubsub"] } 14 | alloy-json-rpc = { version = "1.0.27" } 15 | alloy-transport-http = { version = "1.0.27" } 16 | alloy-transport = { version = "1.0.27" } 17 | alloy-rpc-types-beacon = { version = "1.0.27", features = ["ssz"] } 18 | alloy-signer-local = { version = "1.0.27" } 19 | 20 | alloy-signer = { version = "1.0.27" } 21 | alloy-rpc-client = { version = "1.0.27" } 22 | 23 | clap = { version = "4.4.3", features = ["derive", "env"] } 24 | tokio = "1.40.0" 25 | tokio-util = "0.7.12" 26 | eyre = "0.6.12" 27 | serde = "1.0.210" 28 | serde_json = "1.0.128" 29 | serde_with = { version = "3.9.0", features = ["time_0_3"] } 30 | toml = "0.8.8" 31 | jsonrpsee = { version = "0.20.3", features = ["full"] } 32 | tracing = "0.1.37" 33 | time = { version = "0.3.36", features = ["macros", "formatting", "parsing"] } 34 | thiserror = "1.0.64" 35 | ahash = "0.8.6" 36 | itertools = "0.11.0" 37 | rand = "0.8.5" 38 | crossbeam-queue = "0.3.10" 39 | lazy_static = "1.4.0" 40 | clickhouse = { version = "0.12.2", features = ["time", "uuid", "native-tls"] } 41 | uuid = { version = "1.10.0", features = ["serde", "v4", "v5"] } 42 | mockall = "0.12.1" 43 | prometheus = "0.13.4" 44 | ctor = "0.2" 45 | flume = "0.11.0" 46 | redis = "0.25.4" 47 | tonic = "0.8" 48 | prost = "0.11" 49 | tokio-stream = { version = "0.1", features = ["net"] } 50 | futures = "0.3.28" 51 | tower = "0.4" 52 | reqwest = { version = "0.11.20", features = ["blocking"] } 53 | secp256k1 = { version = "0.29" } 54 | url = "2.5.2" 55 | http = "0.2.9" 56 | hyper = "0.14" 57 | futures-util = "0.3.31" 58 | parking_lot = { version = "0.12.3" } 59 | derivative = "2.2.0" 60 | 61 | #rbuilder = {path="./../rbuilder/crates/rbuilder"} 62 | rbuilder = { git = "https://github.com/flashbots/rbuilder.git", rev = "8ac0565baf3fb46271f4f4ed73425c5b5a31fafb"} 63 | 64 | #rbuilder-primitives = {path="./../rbuilder/crates/rbuilder-primitives"} 65 | rbuilder-primitives = { git = "https://github.com/flashbots/rbuilder.git", rev = "8ac0565baf3fb46271f4f4ed73425c5b5a31fafb"} 66 | 67 | #metrics_macros = {path="./../rbuilder/crates/rbuilder/src/telemetry/metrics_macros"} 68 | metrics_macros = { git = "https://github.com/flashbots/rbuilder.git", rev = "8ac0565baf3fb46271f4f4ed73425c5b5a31fafb"} 69 | 70 | #bid-scraper = {path="./../rbuilder/crates/bid-scraper"} 71 | bid-scraper = { git = "https://github.com/flashbots/rbuilder.git", rev = "8ac0565baf3fb46271f4f4ed73425c5b5a31fafb"} 72 | 73 | 74 | tokio-tungstenite = "0.26.2" 75 | exponential-backoff = "1.2.0" 76 | 77 | [build-dependencies] 78 | built = { version = "0.7.1", features = ["git2", "chrono"] } 79 | tonic-build = "0.8" 80 | -------------------------------------------------------------------------------- /src/true_block_value_push/best_true_value_observer.rs: -------------------------------------------------------------------------------- 1 | use alloy_signer_local::PrivateKeySigner; 2 | use rbuilder::{ 3 | building::BuiltBlockTrace, 4 | live_builder::{ 5 | block_output::bidding_service_interface::BidObserver, payload_events::MevBoostSlotData, 6 | }, 7 | }; 8 | use rbuilder_primitives::mev_boost::SubmitBlockRequest; 9 | use redis::RedisError; 10 | use reth::primitives::SealedBlock; 11 | use tokio_util::sync::CancellationToken; 12 | 13 | use super::{ 14 | best_true_value_pusher::{ 15 | Backend, BuiltBlockInfo, BuiltBlockInfoPusher, LastBuiltBlockInfoCell, 16 | }, 17 | blocks_processor_backend::BlocksProcessorBackend, 18 | redis_backend::RedisBackend, 19 | }; 20 | 21 | #[derive(thiserror::Error, Debug)] 22 | pub enum Error { 23 | #[error("Unable to init redis connection : {0}")] 24 | Redis(#[from] RedisError), 25 | #[error("BlocksProcessor backend error: {0}")] 26 | BlocksProcessor(#[from] super::blocks_processor_backend::Error), 27 | } 28 | 29 | pub type Result = core::result::Result; 30 | 31 | #[derive(Debug)] 32 | pub struct BestTrueValueObserver { 33 | best_local_value: LastBuiltBlockInfoCell, 34 | } 35 | 36 | impl BestTrueValueObserver { 37 | /// Constructor using a redis channel backend 38 | pub fn new_redis( 39 | tbv_push_redis_url: String, 40 | tbv_push_redis_channel: String, 41 | cancellation_token: CancellationToken, 42 | ) -> Result { 43 | let best_true_value_redis = redis::Client::open(tbv_push_redis_url)?; 44 | let redis_backend = RedisBackend::new(best_true_value_redis, tbv_push_redis_channel); 45 | Self::new(redis_backend, cancellation_token) 46 | } 47 | 48 | /// Constructor using signed JSON-RPC block-processor API 49 | pub fn new_block_processor( 50 | url: String, 51 | signer: PrivateKeySigner, 52 | max_concurrent_requests: usize, 53 | cancellation_token: CancellationToken, 54 | ) -> Result { 55 | let backend = BlocksProcessorBackend::new(url, signer, max_concurrent_requests)?; 56 | Self::new(backend, cancellation_token) 57 | } 58 | 59 | fn new( 60 | backend: BackendType, 61 | cancellation_token: CancellationToken, 62 | ) -> Result { 63 | let last_local_value = LastBuiltBlockInfoCell::default(); 64 | let pusher = 65 | BuiltBlockInfoPusher::new(last_local_value.clone(), backend, cancellation_token); 66 | std::thread::spawn(move || pusher.run_push_task()); 67 | Ok(BestTrueValueObserver { 68 | best_local_value: last_local_value, 69 | }) 70 | } 71 | } 72 | 73 | impl BidObserver for BestTrueValueObserver { 74 | fn block_submitted( 75 | &self, 76 | slot_data: &MevBoostSlotData, 77 | _sealed_block: &SealedBlock, 78 | _submit_block_request: &SubmitBlockRequest, 79 | built_block_trace: &BuiltBlockTrace, 80 | builder_name: String, 81 | _best_bid_value: alloy_primitives::U256, 82 | ) { 83 | let block_info = BuiltBlockInfo::new( 84 | slot_data.block(), 85 | slot_data.slot(), 86 | built_block_trace.true_bid_value, 87 | built_block_trace.bid_value, 88 | builder_name, 89 | slot_data.timestamp().unix_timestamp() as u64, 90 | ); 91 | self.best_local_value.update_value_safe(block_info); 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /src/bidding_service_wrapper/proto/bidding_service.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package bidding_service; 3 | 4 | // Protocol for the bidding service. It's used to marshal all the traits in src/block_descriptor_bidding/traits.rs 5 | // Usage: 6 | // The client connects to the server and calls Initialize, this call should create the real BiddingService on the server side. 7 | // Before calling Initialize any other call will fail. Initialize can be called again to recreate the BiddingService (eg: rbuilder reconnection). 8 | // After that, for each slot the client should call CreateSlotBidder to create the SlotBidder on the server side and DestroySlotBidder when the SlotBidder is not needed anymore. 9 | // Other calls are almost 1 to 1 with the original traits but for SlotBidder calls block/slot are added to identify the SlotBidder. 10 | // Notice that CreateSlotBidder returns a stream of Callback. This stream is used for 2 things: 11 | // - Send back bids made by the SlotBidder. 12 | // - Notify changes on the state of SlotBidder's can_use_suggested_fee_recipient_as_coinbase. We use this methodology instead of a 13 | // forward RPC call since can_use_suggested_fee_recipient_as_coinbase almost does not change and we want to avoid innecesary RPC calls during block building. 14 | service BiddingService { 15 | 16 | // Call after connection before calling anything. This will really create the BiddingService on the server side. 17 | // Returns the version info for the server side. 18 | rpc Initialize(LandedBlocksParams) returns (BidderVersionInfo); 19 | 20 | // BiddingService 21 | rpc CreateSlotBidder(CreateSlotBidderParams) returns (stream Callback); 22 | rpc DestroySlotBidder(DestroySlotBidderParams) returns (Empty); 23 | rpc MustWinBlock(MustWinBlockParams) returns (Empty); 24 | rpc UpdateNewLandedBlocksDetected(LandedBlocksParams) returns (Empty); 25 | rpc UpdateFailedReadingNewLandedBlocks(Empty) returns (Empty); 26 | 27 | // BiddingService->BlockBidWithStatsObs 28 | rpc UpdateNewBid(UpdateNewBidParams) returns (Empty); 29 | 30 | 31 | // UnfinishedBlockBuildingSink 32 | rpc NewBlock(NewBlockParams) returns (Empty); 33 | 34 | 35 | 36 | } 37 | // Not using sub messages to avoid the extra Option generated in rust code. 38 | // uint64 block + uint64 slot should be something like BidderId 39 | 40 | 41 | // Mapping of build_info::Version 42 | message BidderVersionInfo { 43 | string git_commit = 1; 44 | string git_ref = 2; 45 | string build_time_utc = 3; 46 | } 47 | 48 | message Empty { 49 | } 50 | 51 | 52 | message MustWinBlockParams { 53 | uint64 block = 1; 54 | } 55 | 56 | enum PublisherType { 57 | RelayBids = 0; 58 | RelayHeaders = 1; 59 | UltrasoundWs = 2; 60 | BloxrouteWs = 3; 61 | ExternalWs = 4; 62 | } 63 | 64 | 65 | message UpdateNewBidParams { 66 | double seen_time = 1; 67 | string publisher_name = 2; 68 | PublisherType publisher_type = 3; 69 | optional double relay_time = 4; 70 | string relay_name = 5; 71 | bytes block_hash = 6; // Array of 32 bytes 72 | bytes parent_hash = 7; // Array of 32 bytes 73 | repeated uint64 value = 8; // Array of 4 uint64 74 | uint64 slot_number = 9; 75 | uint64 block_number = 10; 76 | bytes builder_pubkey = 11; // Array of 0 or 48 bytes 77 | optional string extra_data = 12; 78 | bytes fee_recipient = 13;// Array of 0 or 20 bytes 79 | bytes proposer_fee_recipient = 14;// Array of 0 or 20 bytes 80 | optional uint64 gas_used = 15; 81 | optional bool optimistic_submission = 16; 82 | uint64 creation_time_us = 17; // For metrics 83 | } 84 | 85 | message NewBlockParams{ 86 | uint64 session_id = 1; 87 | repeated uint64 true_block_value = 2; // Array of 4 uint64 88 | bool can_add_payout_tx = 3; 89 | uint64 block_id = 4; 90 | uint64 creation_time_us = 5;// For metrics 91 | } 92 | 93 | message DestroySlotBidderParams { 94 | uint64 session_id = 1; 95 | } 96 | 97 | message CreateSlotBidderParams { 98 | uint64 block = 1; 99 | uint64 slot = 2; 100 | bytes parent_hash = 3; // Array of 32 bytes 101 | // Id identifying the session. Used in all following calls. 102 | uint64 session_id = 4; 103 | // unix ts 104 | int64 slot_timestamp = 5; 105 | } 106 | 107 | 108 | // Info about a onchain block from reth. 109 | message LandedBlockInfo { 110 | uint64 block_number = 1; 111 | int64 block_timestamp = 2; 112 | repeated uint64 builder_balance = 3; // Array of 4 uint64 113 | // true -> we landed this block. 114 | // If false we could have landed it in coinbase == fee recipient mode but balance wouldn't change so we don't care. 115 | bool beneficiary_is_builder = 4; 116 | } 117 | 118 | message LandedBlocksParams { 119 | repeated LandedBlockInfo landed_block_info = 1; // Added field name 120 | } 121 | 122 | message Bid { 123 | // Optional implicitly by allowing empty 124 | repeated uint64 payout_tx_value = 1; // Array of 4 uint64 125 | uint64 block_id = 2; 126 | // Optional implicitly by allowing empty 127 | repeated uint64 seen_competition_bid = 3; // Array of 4 uint64 128 | optional uint64 trigger_creation_time_us = 4; 129 | } 130 | 131 | 132 | // Exactly 1 member will be not null. 133 | // Since this is not mapped to an enum we must be careful to manually update BiddingServiceClientAdapter. 134 | message Callback { 135 | Bid bid = 1; 136 | optional bool can_use_suggested_fee_recipient_as_coinbase_change = 2; 137 | } 138 | -------------------------------------------------------------------------------- /src/true_block_value_push/best_true_value_pusher.rs: -------------------------------------------------------------------------------- 1 | //! This module is responsible for syncing the best true value bid between the local state and redis. 2 | 3 | use alloy_primitives::U256; 4 | 5 | use parking_lot::Mutex; 6 | use rbuilder::utils::{ 7 | reconnect::{run_loop_with_reconnect, RunCommand}, 8 | u256decimal_serde_helper, 9 | }; 10 | use serde::{Deserialize, Serialize}; 11 | use std::{sync::Arc, thread::sleep, time::Duration}; 12 | use time::OffsetDateTime; 13 | use tokio_util::sync::CancellationToken; 14 | use tracing::{error, trace}; 15 | 16 | use crate::metrics::inc_publish_tbv_errors; 17 | 18 | #[derive(Debug, Clone, Serialize, Deserialize, Default)] 19 | #[serde(rename_all = "camelCase")] 20 | pub struct BuiltBlockInfo { 21 | pub timestamp_ms: u64, 22 | pub block_number: u64, 23 | pub slot_number: u64, 24 | /// Best true value of submitted block (has subtracted the payout tx cost) 25 | #[serde(with = "u256decimal_serde_helper")] 26 | pub best_true_value: U256, 27 | /// Bid we made to the relay. 28 | #[serde(with = "u256decimal_serde_helper")] 29 | pub bid: U256, 30 | pub builder: String, 31 | pub slot_end_timestamp: u64, 32 | } 33 | 34 | impl BuiltBlockInfo { 35 | pub fn new( 36 | block_number: u64, 37 | slot_number: u64, 38 | best_true_value: U256, 39 | bid: U256, 40 | builder: String, 41 | slot_end_timestamp: u64, 42 | ) -> Self { 43 | BuiltBlockInfo { 44 | timestamp_ms: (OffsetDateTime::now_utc().unix_timestamp_nanos() / 1_000_000) as u64, 45 | block_number, 46 | slot_number, 47 | best_true_value, 48 | bid, 49 | builder, 50 | slot_end_timestamp, 51 | } 52 | } 53 | 54 | /// Compares things related to bidding: block_number,slot_number,best_true_value and best_relay_value 55 | pub fn is_same_bid_info(&self, other: &Self) -> bool { 56 | self.block_number == other.block_number 57 | && self.slot_number == other.slot_number 58 | && self.best_true_value == other.best_true_value 59 | && self.bid == other.bid 60 | } 61 | } 62 | 63 | #[derive(Debug, Default, Clone)] 64 | pub struct LastBuiltBlockInfoCell { 65 | data: Arc>, 66 | } 67 | 68 | impl LastBuiltBlockInfoCell { 69 | pub fn update_value_safe(&self, value: BuiltBlockInfo) { 70 | let mut best_value = self.data.lock(); 71 | if value.slot_number < best_value.slot_number { 72 | // don't update value for the past slot 73 | return; 74 | } 75 | *best_value = value; 76 | } 77 | 78 | pub fn read(&self) -> BuiltBlockInfo { 79 | self.data.lock().clone() 80 | } 81 | } 82 | 83 | /// BuiltBlockInfoPusher periodically sends last BuiltBlockInfo via a configurable backend. 84 | #[derive(Debug, Clone)] 85 | pub struct BuiltBlockInfoPusher { 86 | /// Best value we got from our building algorithms. 87 | last_local_value: LastBuiltBlockInfoCell, 88 | backend: BackendType, 89 | 90 | cancellation_token: CancellationToken, 91 | } 92 | 93 | const PUSH_INTERVAL: Duration = Duration::from_millis(50); 94 | const MAX_IO_ERRORS: usize = 5; 95 | 96 | /// Trait to connect and publish new BuiltBlockInfo data (as a &str) 97 | /// For simplification mixes a little the factory role and the publish role. 98 | pub trait Backend { 99 | type Connection; 100 | type BackendError: std::error::Error; 101 | /// Creates a new connection to the sink of tbv info. 102 | fn connect(&self) -> Result; 103 | /// Call with the connection obtained by connect() 104 | fn publish( 105 | &self, 106 | connection: &mut Self::Connection, 107 | best_true_value: &BuiltBlockInfo, 108 | ) -> Result<(), Self::BackendError>; 109 | } 110 | 111 | impl BuiltBlockInfoPusher { 112 | pub fn new( 113 | last_local_value: LastBuiltBlockInfoCell, 114 | backend: BackendType, 115 | cancellation_token: CancellationToken, 116 | ) -> Self { 117 | Self { 118 | last_local_value, 119 | backend, 120 | cancellation_token, 121 | } 122 | } 123 | 124 | /// Run the task that pushes the last BuiltBlockInfo. 125 | /// The value is read from last_local_value and pushed to redis. 126 | pub fn run_push_task(self) { 127 | run_loop_with_reconnect( 128 | "push_best_bid", 129 | || -> Result { 130 | self.backend.connect() 131 | }, 132 | |mut conn| -> RunCommand { 133 | let mut io_errors = 0; 134 | let mut last_pushed_value: Option = None; 135 | loop { 136 | if self.cancellation_token.is_cancelled() { 137 | break; 138 | } 139 | 140 | if io_errors > MAX_IO_ERRORS { 141 | return RunCommand::Reconnect; 142 | } 143 | 144 | sleep(PUSH_INTERVAL); 145 | let last_local_value = self.last_local_value.read(); 146 | if last_pushed_value 147 | .as_ref() 148 | .is_none_or(|value| !value.is_same_bid_info(&last_local_value)) 149 | { 150 | last_pushed_value = Some(last_local_value.clone()); 151 | match self.backend.publish(&mut conn, &last_local_value) { 152 | Ok(()) => { 153 | trace!(?last_local_value, "Pushed last local value"); 154 | } 155 | Err(err) => { 156 | error!(?err, "Failed to publish last true value bid"); 157 | // inc_publish_tbv_errors is supposed to be called for block_processor errors but I added the metric here so 158 | // it logs for al backends. 159 | inc_publish_tbv_errors(); 160 | io_errors += 1; 161 | } 162 | } 163 | } 164 | } 165 | RunCommand::Finish 166 | }, 167 | self.cancellation_token.clone(), 168 | ) 169 | } 170 | } 171 | -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*' 7 | workflow_dispatch: 8 | inputs: 9 | build-binary: 10 | description: 'Build Binary' 11 | required: false 12 | type: boolean 13 | default: true 14 | build-docker: 15 | description: 'Build Docker' 16 | required: false 17 | type: boolean 18 | default: true 19 | draft-release: 20 | description: 'Draft Release' 21 | required: false 22 | type: boolean 23 | default: false 24 | ubuntu-version: 25 | description: 'Ubuntu Version' 26 | required: false 27 | type: choice 28 | options: 29 | - '24.04' 30 | - '22.04' 31 | default: '24.04' 32 | 33 | jobs: 34 | extract-version: 35 | name: Extract version 36 | runs-on: warp-ubuntu-latest-x64-16x 37 | outputs: 38 | VERSION: ${{ steps.extract_version.outputs.VERSION }} 39 | steps: 40 | - name: Extract version 41 | id: extract_version 42 | run: | 43 | if [[ "${GITHUB_REF_TYPE}" == "tag" ]]; then 44 | VERSION="${GITHUB_REF#refs/tags/}" 45 | else 46 | SHA_SHORT="$(echo ${GITHUB_SHA} | cut -c1-7)" 47 | BRANCH_NAME_SAFE="${GITHUB_REF_NAME//\//-}" # replaces "/" in branch name with "-" 48 | VERSION="${BRANCH_NAME_SAFE}-${SHA_SHORT}" 49 | fi 50 | echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT 51 | echo "${VERSION}" 52 | 53 | echo "### Version: \`${VERSION}\`" >> $GITHUB_STEP_SUMMARY 54 | echo "| | |" >> $GITHUB_STEP_SUMMARY 55 | echo "| ------------------- | ---------------------- |" >> $GITHUB_STEP_SUMMARY 56 | echo "| \`GITHUB_REF_TYPE\` | \`${GITHUB_REF_TYPE}\` |" >> $GITHUB_STEP_SUMMARY 57 | echo "| \`GITHUB_REF_NAME\` | \`${GITHUB_REF_NAME}\` |" >> $GITHUB_STEP_SUMMARY 58 | echo "| \`GITHUB_REF\` | \`${GITHUB_REF}\` |" >> $GITHUB_STEP_SUMMARY 59 | echo "| \`GITHUB_SHA\` | \`${GITHUB_SHA}\` |" >> $GITHUB_STEP_SUMMARY 60 | echo "| \`VERSION\` | \`${VERSION}\` |" >> $GITHUB_STEP_SUMMARY 61 | 62 | build-binary: 63 | name: Build binary 64 | needs: extract-version 65 | if: ${{ github.event.inputs.build-binary == 'true' || github.event_name == 'push'}} # when manually triggered or version tagged 66 | runs-on: ${{ matrix.configs.runner }} 67 | container: 68 | image: ubuntu:${{ github.event.inputs.ubuntu-version || '24.04' }} 69 | env: 70 | VERSION: ${{ needs.extract-version.outputs.VERSION }} 71 | UBUNTU_VERSION: ${{ github.event.inputs.ubuntu-version || '24.04' }} 72 | permissions: 73 | contents: write 74 | packages: write 75 | strategy: 76 | matrix: 77 | configs: 78 | - target: x86_64-unknown-linux-gnu 79 | runner: warp-ubuntu-latest-x64-16x 80 | 81 | steps: 82 | - name: Install dependencies 83 | run: | 84 | apt-get update 85 | apt-get install -y \ 86 | build-essential \ 87 | cmake \ 88 | curl \ 89 | git \ 90 | libclang-dev \ 91 | libssl-dev \ 92 | pkg-config \ 93 | protobuf-compiler 94 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y 95 | 96 | - uses: actions/checkout@v4 # must install git before checkout and set safe.directory after checkout because of container 97 | with: 98 | fetch-depth: 0 99 | 100 | - name: Prepare filename 101 | run: echo "OUTPUT_FILENAME=rbuilder-${VERSION}-${{ matrix.configs.target }}-ubuntu${UBUNTU_VERSION}" >> $GITHUB_ENV 102 | 103 | - name: Build binary 104 | run: | 105 | git config --global --add safe.directory "$(pwd)" 106 | . $HOME/.cargo/env 107 | make build-reproducible TARGET=${{ matrix.configs.target }} 108 | ./target/${{ matrix.configs.target }}/release/rbuilder version 109 | 110 | - name: Upload artifact 111 | uses: actions/upload-artifact@v4 112 | with: 113 | name: ${{ env.OUTPUT_FILENAME }} 114 | path: target/${{ matrix.configs.target }}/release/rbuilder 115 | 116 | build-docker: 117 | name: Build and publish Docker image 118 | if: ${{ github.event.inputs.build-docker == 'true' || github.event_name == 'push'}} 119 | needs: extract-version 120 | runs-on: warp-ubuntu-latest-x64-16x 121 | env: 122 | VERSION: ${{ needs.extract-version.outputs.VERSION }} 123 | permissions: 124 | contents: read 125 | packages: write 126 | 127 | steps: 128 | - name: Checkout sources 129 | uses: actions/checkout@v4 130 | with: 131 | fetch-depth: 0 132 | 133 | - name: Set up Docker Buildx 134 | uses: docker/setup-buildx-action@v3 135 | 136 | - name: Docker metadata 137 | uses: docker/metadata-action@v5 138 | id: meta 139 | with: 140 | images: ghcr.io/${{ github.repository }} 141 | labels: org.opencontainers.image.source=${{ github.repositoryUrl }} 142 | tags: | 143 | type=sha 144 | type=semver,pattern={{version}},value=${{ env.VERSION }} 145 | type=semver,pattern={{major}}.{{minor}},value=${{ env.VERSION }} 146 | type=semver,pattern={{major}},value=${{ env.VERSION }} 147 | type=raw,value=latest,enable=${{ !contains(env.VERSION, '-') }} 148 | 149 | - name: Login to GHCR 150 | uses: docker/login-action@v3 151 | with: 152 | registry: ghcr.io 153 | username: ${{ github.actor }} 154 | password: ${{ secrets.GITHUB_TOKEN }} 155 | 156 | - name: Build and push Docker image 157 | uses: docker/build-push-action@v5 158 | with: 159 | context: . 160 | push: true 161 | tags: ${{ steps.meta.outputs.tags }} 162 | labels: ${{ steps.meta.outputs.labels }} 163 | platforms: linux/amd64 164 | provenance: false 165 | cache-from: type=gha 166 | cache-to: type=gha,mode=max 167 | build-args: | 168 | BUILD_PROFILE=release 169 | 170 | draft-release: 171 | name: Draft release 172 | if: ${{ github.event.inputs.draft-release == 'true' || github.event_name == 'push'}} # when manually triggered or version tagged 173 | needs: [extract-version, build-binary] 174 | runs-on: warp-ubuntu-latest-x64-16x 175 | env: 176 | VERSION: ${{ needs.extract-version.outputs.VERSION }} 177 | permissions: 178 | contents: write 179 | steps: 180 | - name: Checkout 181 | uses: actions/checkout@v4 182 | 183 | - name: Download artifacts 184 | uses: actions/download-artifact@v4 185 | with: 186 | merge-multiple: true 187 | path: artifacts 188 | 189 | - name: Record artifacts checksums 190 | working-directory: artifacts 191 | run: | 192 | find ./ || true 193 | for file in *; do sha256sum "$file" >> sha256sums.txt; done; 194 | cat sha256sums.txt 195 | 196 | - name: Create release draft 197 | uses: softprops/action-gh-release@v2.0.5 198 | id: create-release-draft 199 | with: 200 | draft: true 201 | files: artifacts/* 202 | generate_release_notes: true 203 | name: ${{ env.VERSION }} 204 | tag_name: ${{ env.VERSION }} 205 | 206 | - name: Write Github Step Summary 207 | run: | 208 | echo "---" 209 | echo "### Release Draft: ${{ env.VERSION }}" >> $GITHUB_STEP_SUMMARY 210 | echo "${{ steps.create-release-draft.outputs.url }}" >> $GITHUB_STEP_SUMMARY -------------------------------------------------------------------------------- /src/bidding_service_wrapper/conversion.rs: -------------------------------------------------------------------------------- 1 | //! Conversion real data <-> rpc data 2 | use crate::bidding_service_wrapper::{LandedBlockInfo as RPCLandedBlockInfo, UpdateNewBidParams}; 3 | 4 | use alloy_primitives::{Address, BlockHash, U256}; 5 | use alloy_rpc_types_beacon::BlsPublicKey; 6 | use bid_scraper::types::ScrapedRelayBlockBid; 7 | use rbuilder::{ 8 | live_builder::block_output::bidding_service_interface::{ 9 | LandedBlockInfo as RealLandedBlockInfo, ScrapedRelayBlockBidWithStats, 10 | }, 11 | utils::{offset_datetime_to_timestamp_us, timestamp_us_to_offset_datetime}, 12 | }; 13 | use time::OffsetDateTime; 14 | use tonic::Status; 15 | 16 | pub fn real2rpc_landed_block_info(l: &RealLandedBlockInfo) -> RPCLandedBlockInfo { 17 | RPCLandedBlockInfo { 18 | block_number: l.block_number, 19 | block_timestamp: l.block_timestamp.unix_timestamp(), 20 | builder_balance: l.builder_balance.as_limbs().to_vec(), 21 | beneficiary_is_builder: l.beneficiary_is_builder, 22 | } 23 | } 24 | 25 | #[allow(clippy::result_large_err)] 26 | pub fn rpc2real_landed_block_info(l: &RPCLandedBlockInfo) -> Result { 27 | Ok(RealLandedBlockInfo { 28 | block_number: l.block_number, 29 | block_timestamp: OffsetDateTime::from_unix_timestamp(l.block_timestamp) 30 | .map_err(|_| Status::invalid_argument("block_timestamp"))?, 31 | builder_balance: U256::from_limbs_slice(&l.builder_balance), 32 | beneficiary_is_builder: l.beneficiary_is_builder, 33 | }) 34 | } 35 | 36 | pub fn real2rpc_u256(v: U256) -> Vec { 37 | v.as_limbs().to_vec() 38 | } 39 | 40 | #[allow(clippy::result_large_err)] 41 | pub fn rpc2real_u256(v: Vec) -> Result { 42 | U256::checked_from_limbs_slice(&v).ok_or(Status::invalid_argument("rpc U256 limbs error")) 43 | } 44 | 45 | pub fn real2rpc_address(v: Address) -> Vec { 46 | v.as_slice().to_vec() 47 | } 48 | 49 | #[allow(clippy::result_large_err)] 50 | pub fn rpc2real_address(v: Vec) -> Result { 51 | Address::try_from(v.as_slice()).map_err(|_| Status::invalid_argument("rpc Address error")) 52 | } 53 | 54 | pub fn real2rpc_bls_public_key(v: BlsPublicKey) -> Vec { 55 | v.as_slice().to_vec() 56 | } 57 | 58 | #[allow(clippy::result_large_err)] 59 | pub fn rpc2real_bls_public_key(v: Vec) -> Result { 60 | BlsPublicKey::try_from(v.as_slice()) 61 | .map_err(|_| Status::invalid_argument("rpc BlsPublicKey error")) 62 | } 63 | 64 | pub fn real2rpc_block_hash(v: BlockHash) -> Vec { 65 | v.as_slice().to_vec() 66 | } 67 | 68 | #[allow(clippy::result_large_err)] 69 | pub fn rpc2real_block_hash(v: &Vec) -> Result { 70 | BlockHash::try_from(v.as_slice()).map_err(|_| Status::invalid_argument("rpc BlockHash error")) 71 | } 72 | 73 | pub fn real2rpc_block_bid(bid_with_stats: ScrapedRelayBlockBidWithStats) -> UpdateNewBidParams { 74 | let creation_time_us = offset_datetime_to_timestamp_us(bid_with_stats.creation_time); 75 | let bid = bid_with_stats.bid; 76 | UpdateNewBidParams { 77 | seen_time: bid.seen_time, 78 | publisher_name: bid.publisher_name, 79 | publisher_type: real2rpc_publisher_type(bid.publisher_type), 80 | relay_time: bid.relay_time, 81 | relay_name: bid.relay_name, 82 | block_hash: real2rpc_block_hash(bid.block_hash), 83 | parent_hash: real2rpc_block_hash(bid.parent_hash), 84 | value: real2rpc_u256(bid.value), 85 | slot_number: bid.slot_number, 86 | block_number: bid.block_number, 87 | builder_pubkey: bid 88 | .builder_pubkey 89 | .map(real2rpc_bls_public_key) 90 | .unwrap_or_default(), 91 | extra_data: bid.extra_data, 92 | fee_recipient: bid.fee_recipient.map(real2rpc_address).unwrap_or_default(), 93 | proposer_fee_recipient: bid 94 | .proposer_fee_recipient 95 | .map(real2rpc_address) 96 | .unwrap_or_default(), 97 | gas_used: bid.gas_used, 98 | optimistic_submission: bid.optimistic_submission, 99 | creation_time_us, 100 | } 101 | } 102 | 103 | #[allow(clippy::result_large_err)] 104 | pub fn rpc2real_block_bid( 105 | bid: UpdateNewBidParams, 106 | ) -> Result { 107 | Ok(ScrapedRelayBlockBidWithStats::new_for_deserialization( 108 | ScrapedRelayBlockBid { 109 | seen_time: bid.seen_time, 110 | publisher_name: bid.publisher_name, 111 | publisher_type: rpc2real_publisher_type(bid.publisher_type)?, 112 | relay_time: bid.relay_time, 113 | relay_name: bid.relay_name, 114 | block_hash: rpc2real_block_hash(&bid.block_hash)?, 115 | parent_hash: rpc2real_block_hash(&bid.parent_hash)?, 116 | value: rpc2real_u256(bid.value)?, 117 | slot_number: bid.slot_number, 118 | block_number: bid.block_number, 119 | builder_pubkey: if bid.builder_pubkey.is_empty() { 120 | None 121 | } else { 122 | Some(rpc2real_bls_public_key(bid.builder_pubkey)?) 123 | }, 124 | extra_data: bid.extra_data, 125 | fee_recipient: if bid.fee_recipient.is_empty() { 126 | None 127 | } else { 128 | Some(rpc2real_address(bid.fee_recipient)?) 129 | }, 130 | proposer_fee_recipient: if bid.proposer_fee_recipient.is_empty() { 131 | None 132 | } else { 133 | Some(rpc2real_address(bid.proposer_fee_recipient)?) 134 | }, 135 | gas_used: bid.gas_used, 136 | optimistic_submission: bid.optimistic_submission, 137 | }, 138 | timestamp_us_to_offset_datetime(bid.creation_time_us), 139 | )) 140 | } 141 | 142 | pub fn real2rpc_publisher_type(ty: bid_scraper::types::PublisherType) -> i32 { 143 | match ty { 144 | bid_scraper::types::PublisherType::RelayBids => super::PublisherType::RelayBids as i32, 145 | bid_scraper::types::PublisherType::RelayHeaders => { 146 | super::PublisherType::RelayHeaders as i32 147 | } 148 | bid_scraper::types::PublisherType::UltrasoundWs => { 149 | super::PublisherType::UltrasoundWs as i32 150 | } 151 | bid_scraper::types::PublisherType::BloxrouteWs => super::PublisherType::BloxrouteWs as i32, 152 | bid_scraper::types::PublisherType::ExternalWs => super::PublisherType::ExternalWs as i32, 153 | } 154 | } 155 | 156 | #[allow(clippy::result_large_err)] 157 | pub fn rpc2real_publisher_type(ty: i32) -> Result { 158 | if let Some(ty) = super::PublisherType::from_i32(ty) { 159 | Ok(match ty { 160 | super::PublisherType::RelayBids => bid_scraper::types::PublisherType::RelayBids, 161 | super::PublisherType::RelayHeaders => bid_scraper::types::PublisherType::RelayHeaders, 162 | super::PublisherType::UltrasoundWs => bid_scraper::types::PublisherType::UltrasoundWs, 163 | super::PublisherType::BloxrouteWs => bid_scraper::types::PublisherType::BloxrouteWs, 164 | super::PublisherType::ExternalWs => bid_scraper::types::PublisherType::ExternalWs, 165 | }) 166 | } else { 167 | Err(Status::invalid_argument("rpc PublisherType error")) 168 | } 169 | } 170 | 171 | #[cfg(test)] 172 | mod tests { 173 | use alloy_primitives::{address, BlockHash, U256}; 174 | use alloy_rpc_types_beacon::BlsPublicKey; 175 | use bid_scraper::types::ScrapedRelayBlockBid; 176 | use rbuilder::{ 177 | live_builder::block_output::bidding_service_interface::ScrapedRelayBlockBidWithStats, 178 | utils::timestamp_ms_to_offset_datetime, 179 | }; 180 | use std::str::FromStr; 181 | 182 | use crate::bidding_service_wrapper::conversion::{real2rpc_block_bid, rpc2real_block_bid}; 183 | 184 | fn test_roundtrip(bid: ScrapedRelayBlockBid) { 185 | let bid_with_stats = ScrapedRelayBlockBidWithStats::new_for_deserialization( 186 | bid, 187 | timestamp_ms_to_offset_datetime(1000), 188 | ); 189 | let rpc_bid = real2rpc_block_bid(bid_with_stats.clone()); 190 | assert_eq!(rpc2real_block_bid(rpc_bid).unwrap(), bid_with_stats); 191 | } 192 | 193 | #[test] 194 | /// Test all with all options as Some 195 | fn test_block_bid_conversion_some() { 196 | let bid = ScrapedRelayBlockBid { 197 | seen_time: 1234.0, 198 | publisher_name: "Mafalda".to_owned(), 199 | publisher_type: bid_scraper::types::PublisherType::BloxrouteWs, 200 | relay_time: Some(2345.6), 201 | relay_name: "Flashbots".to_owned(), 202 | block_hash: BlockHash::from_str( 203 | "0xe57c063ad96fb5b6fe7696dc8509f3a986ace89d06a19951f3e4404f877bb0ca", 204 | ) 205 | .unwrap(), 206 | parent_hash: BlockHash::from_str( 207 | "0xf2ae3ad64c285ab1de2195f23c19b2b2dcf4949b6f71a4a3406bac9734e1ff27", 208 | ) 209 | .unwrap(), 210 | value: U256::from(876543210), 211 | slot_number: 31415, 212 | block_number: 27182, 213 | builder_pubkey: Some(BlsPublicKey::from_str("0xf2ae3ad64c285ab1de2195f23c19b2b2dcf4949b6f71a4a3406bac9734e1ff2701234567890123456789012345678901").unwrap()), 214 | extra_data: Some("extra_data!".to_owned()), 215 | fee_recipient: Some(address!("f39Fd6e51aad88F6F4ce6aB8827279cffFb92266")), 216 | proposer_fee_recipient: Some(address!("1234d6e51aad88F6F4ce6aB8827279cffFb92266")), 217 | gas_used: Some(666), 218 | optimistic_submission: Some(true), 219 | }; 220 | test_roundtrip(bid); 221 | } 222 | 223 | #[test] 224 | 225 | /// Test all with all options as None 226 | fn test_block_bid_conversion_none() { 227 | let bid = ScrapedRelayBlockBid { 228 | seen_time: 1234.0, 229 | publisher_name: "".to_owned(), 230 | publisher_type: bid_scraper::types::PublisherType::BloxrouteWs, 231 | relay_time: None, 232 | relay_name: "".to_owned(), 233 | block_hash: BlockHash::from_str( 234 | "0xe57c063ad96fb5b6fe7696dc8509f3a986ace89d06a19951f3e4404f877bb0ca", 235 | ) 236 | .unwrap(), 237 | parent_hash: BlockHash::from_str( 238 | "0xf2ae3ad64c285ab1de2195f23c19b2b2dcf4949b6f71a4a3406bac9734e1ff27", 239 | ) 240 | .unwrap(), 241 | value: U256::from(876543210), 242 | slot_number: 31415, 243 | block_number: 27182, 244 | builder_pubkey: None, 245 | extra_data: None, 246 | fee_recipient: None, 247 | proposer_fee_recipient: None, 248 | gas_used: None, 249 | optimistic_submission: None, 250 | }; 251 | test_roundtrip(bid); 252 | } 253 | } 254 | -------------------------------------------------------------------------------- /src/flashbots_signer.rs: -------------------------------------------------------------------------------- 1 | //! A layer responsible for implementing flashbots-style authentication 2 | //! by signing the request body with a private key and adding the signature 3 | //! to the request headers. 4 | //! Based on https://github.com/paradigmxyz/mev-share-rs/tree/a75c5959e98a79031a89f8893c97528e8f726826 but upgraded to alloy 5 | 6 | use std::{ 7 | error::Error, 8 | task::{Context, Poll}, 9 | }; 10 | 11 | use alloy_primitives::{hex, keccak256}; 12 | use alloy_signer::Signer; 13 | use futures_util::future::BoxFuture; 14 | 15 | use http::{header::HeaderValue, HeaderName, Request}; 16 | use hyper::Body; 17 | 18 | use tower::{Layer, Service}; 19 | 20 | static FLASHBOTS_HEADER: HeaderName = HeaderName::from_static("x-flashbots-signature"); 21 | 22 | /// Layer that applies [`FlashbotsSigner`] which adds a request header with a signed payload. 23 | #[derive(Clone, Debug)] 24 | pub struct FlashbotsSignerLayer { 25 | signer: S, 26 | } 27 | 28 | impl FlashbotsSignerLayer { 29 | /// Creates a new [`FlashbotsSignerLayer`] with the given signer. 30 | pub fn new(signer: S) -> Self { 31 | FlashbotsSignerLayer { signer } 32 | } 33 | } 34 | 35 | impl Layer for FlashbotsSignerLayer { 36 | type Service = FlashbotsSigner; 37 | 38 | fn layer(&self, inner: I) -> Self::Service { 39 | FlashbotsSigner { 40 | signer: self.signer.clone(), 41 | inner, 42 | } 43 | } 44 | } 45 | 46 | /// Middleware that signs the request body and adds the signature to the x-flashbots-signature 47 | /// header. For more info, see 48 | #[derive(Clone, Debug)] 49 | pub struct FlashbotsSigner { 50 | signer: S, 51 | inner: I, 52 | } 53 | 54 | impl Service> for FlashbotsSigner 55 | where 56 | I: Service> + Clone + Send + 'static, 57 | I::Future: Send, 58 | I::Error: Into> + 'static, 59 | S: Signer + Clone + Send + Sync + 'static, 60 | { 61 | type Response = I::Response; 62 | type Error = Box; 63 | type Future = BoxFuture<'static, Result>; 64 | 65 | fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { 66 | self.inner.poll_ready(cx).map_err(Into::into) 67 | } 68 | 69 | fn call(&mut self, request: Request) -> Self::Future { 70 | let clone = self.inner.clone(); 71 | // wait for service to be ready 72 | let mut inner = std::mem::replace(&mut self.inner, clone); 73 | let signer = self.signer.clone(); 74 | 75 | let (mut parts, body) = request.into_parts(); 76 | 77 | // if method is not POST, return an error. 78 | if parts.method != http::Method::POST { 79 | return Box::pin(async move { 80 | Err(format!("Invalid method: {}", parts.method.as_str()).into()) 81 | }); 82 | } 83 | 84 | // if content-type is not json, or signature already exists, just pass through the request 85 | let is_json = parts 86 | .headers 87 | .get(http::header::CONTENT_TYPE) 88 | .map(|v| v == HeaderValue::from_static("application/json")) 89 | .unwrap_or(false); 90 | let has_sig = parts.headers.contains_key(FLASHBOTS_HEADER.clone()); 91 | 92 | if !is_json || has_sig { 93 | return Box::pin(async move { 94 | let request = Request::from_parts(parts, body); 95 | inner.call(request).await.map_err(Into::into) 96 | }); 97 | } 98 | 99 | // otherwise, sign the request body and add the signature to the header 100 | Box::pin(async move { 101 | let body_bytes = hyper::body::to_bytes(body).await?; 102 | 103 | // sign request body and insert header 104 | let signature = signer 105 | .sign_message(format!("{:?}", keccak256(&body_bytes)).as_bytes()) 106 | .await?; 107 | 108 | let header_val = HeaderValue::from_str(&format!( 109 | "{:?}:0x{}", 110 | signer.address(), 111 | hex::encode(signature.as_bytes()) 112 | ))?; 113 | parts.headers.insert(FLASHBOTS_HEADER.clone(), header_val); 114 | 115 | let request = Request::from_parts(parts, Body::from(body_bytes.clone())); 116 | inner.call(request).await.map_err(Into::into) 117 | }) 118 | } 119 | } 120 | 121 | #[cfg(test)] 122 | mod tests { 123 | use super::*; 124 | use alloy_signer_local::PrivateKeySigner; 125 | use http::Response; 126 | use hyper::Body; 127 | use std::convert::Infallible; 128 | use tower::{service_fn, ServiceExt}; 129 | 130 | #[tokio::test] 131 | async fn test_signature() { 132 | let fb_signer = PrivateKeySigner::random(); 133 | 134 | // mock service that returns the request headers 135 | let svc = FlashbotsSigner { 136 | signer: fb_signer.clone(), 137 | inner: service_fn(|_req: Request| async { 138 | let (parts, _) = _req.into_parts(); 139 | 140 | let mut res = Response::builder(); 141 | for (k, v) in parts.headers.iter() { 142 | res = res.header(k, v); 143 | } 144 | let res = res.body(Body::empty()).unwrap(); 145 | Ok::<_, Infallible>(res) 146 | }), 147 | }; 148 | 149 | // build request 150 | let bytes = vec![1u8; 32]; 151 | let req = Request::builder() 152 | .method(http::Method::POST) 153 | .header(http::header::CONTENT_TYPE, "application/json") 154 | .body(Body::from(bytes.clone())) 155 | .unwrap(); 156 | 157 | let res = svc.oneshot(req).await.unwrap(); 158 | 159 | let header = res.headers().get("x-flashbots-signature").unwrap(); 160 | let header = header.to_str().unwrap(); 161 | let header = header.split(":0x").collect::>(); 162 | let header_address = header[0]; 163 | let header_signature = header[1]; 164 | 165 | let signer_address = format!("{:?}", fb_signer.address()); 166 | let expected_signature = fb_signer 167 | .sign_message(format!("{:?}", keccak256(bytes.clone())).as_bytes()) 168 | .await 169 | .unwrap(); 170 | let expected_signature = hex::encode(expected_signature.as_bytes()); 171 | // verify that the header contains expected address and signature 172 | assert_eq!(header_address, signer_address); 173 | assert_eq!(header_signature, expected_signature); 174 | } 175 | 176 | #[tokio::test] 177 | async fn test_skips_non_json() { 178 | let fb_signer = PrivateKeySigner::random(); 179 | 180 | // mock service that returns the request headers 181 | let svc = FlashbotsSigner { 182 | signer: fb_signer.clone(), 183 | inner: service_fn(|_req: Request| async { 184 | let (parts, _) = _req.into_parts(); 185 | 186 | let mut res = Response::builder(); 187 | for (k, v) in parts.headers.iter() { 188 | res = res.header(k, v); 189 | } 190 | let res = res.body(Body::empty()).unwrap(); 191 | Ok::<_, Infallible>(res) 192 | }), 193 | }; 194 | 195 | // build plain text request 196 | let bytes = vec![1u8; 32]; 197 | let req = Request::builder() 198 | .method(http::Method::POST) 199 | .header(http::header::CONTENT_TYPE, "text/plain") 200 | .body(Body::from(bytes.clone())) 201 | .unwrap(); 202 | 203 | let res = svc.oneshot(req).await.unwrap(); 204 | 205 | // response should not contain a signature header 206 | let header = res.headers().get("x-flashbots-signature"); 207 | assert!(header.is_none()); 208 | } 209 | 210 | #[tokio::test] 211 | async fn test_returns_error_when_not_post() { 212 | let fb_signer = PrivateKeySigner::random(); 213 | 214 | // mock service that returns the request headers 215 | let svc = FlashbotsSigner { 216 | signer: fb_signer.clone(), 217 | inner: service_fn(|_req: Request| async { 218 | let (parts, _) = _req.into_parts(); 219 | 220 | let mut res = Response::builder(); 221 | for (k, v) in parts.headers.iter() { 222 | res = res.header(k, v); 223 | } 224 | let res = res.body(Body::empty()).unwrap(); 225 | Ok::<_, Infallible>(res) 226 | }), 227 | }; 228 | 229 | // build plain text request 230 | let bytes = vec![1u8; 32]; 231 | let req = Request::builder() 232 | .method(http::Method::GET) 233 | .header(http::header::CONTENT_TYPE, "application/json") 234 | .body(Body::from(bytes.clone())) 235 | .unwrap(); 236 | 237 | let res = svc.oneshot(req).await; 238 | 239 | // should be an error 240 | assert!(res.is_err()); 241 | } 242 | 243 | /// Uses a static private key and compares the signature generated by this package to the signature 244 | /// generated by the `cast` CLI. 245 | /// Test copied from https://github.com/flashbots/go-utils/blob/main/signature/signature_test.go#L146 501d395be6a9802494ef1ef25a755acaa4448c17 (TestSignatureCreateCompareToCastAndEthers) 246 | #[tokio::test] 247 | async fn test_signature_cast() { 248 | let fb_signer: PrivateKeySigner = 249 | "fad9c8855b740a0b7ed4c221dbad0f33a83a49cad6b3fe8d5817ac83d38b6a19" 250 | .parse() 251 | .unwrap(); 252 | // mock service that returns the request headers 253 | let svc = FlashbotsSigner { 254 | signer: fb_signer.clone(), 255 | inner: service_fn(|_req: Request| async { 256 | let (parts, _) = _req.into_parts(); 257 | 258 | let mut res = Response::builder(); 259 | for (k, v) in parts.headers.iter() { 260 | res = res.header(k, v); 261 | } 262 | let res = res.body(Body::empty()).unwrap(); 263 | Ok::<_, Infallible>(res) 264 | }), 265 | }; 266 | 267 | // build request 268 | let bytes = "Hello".as_bytes(); 269 | let req = Request::builder() 270 | .method(http::Method::POST) 271 | .header(http::header::CONTENT_TYPE, "application/json") 272 | .body(Body::from(bytes)) 273 | .unwrap(); 274 | 275 | let res = svc.oneshot(req).await.unwrap(); 276 | 277 | let header = res.headers().get("x-flashbots-signature").unwrap(); 278 | let header = header.to_str().unwrap(); 279 | let header = header.split(":0x").collect::>(); 280 | let header_address = header[0]; 281 | let header_signature = header[1]; 282 | // I generated the signature using the cast CLI: 283 | // cast wallet sign --private-key fad9c8855b740a0b7ed4c221dbad0f33a83a49cad6b3fe8d5817ac83d38b6a19 $(cast from-utf8 $(cast keccak Hello)) 284 | let signer_address = "0x96216849c49358B10257cb55b28eA603c874b05E".to_lowercase(); 285 | let expected_signature = "1446053488f02d460c012c84c4091cd5054d98c6cfca01b65f6c1a72773e80e60b8a4931aeee7ed18ce3adb45b2107e8c59e25556c1f871a8334e30e5bddbed21c"; 286 | // verify that the header contains expected address and signature 287 | assert_eq!(header_address, signer_address); 288 | assert_eq!(header_signature, expected_signature); 289 | } 290 | } 291 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright 2023-2024 rbuilder Contributors 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. -------------------------------------------------------------------------------- /src/bidding_service_wrapper/client/bidding_service_client_adapter.rs: -------------------------------------------------------------------------------- 1 | use alloy_primitives::U256; 2 | use rbuilder::{ 3 | live_builder::block_output::bidding_service_interface::{ 4 | BiddingService, BlockId, BlockSealInterfaceForSlotBidder, 5 | LandedBlockInfo as RealLandedBlockInfo, ScrapedRelayBlockBidWithStats, SlotBidder, 6 | SlotBidderSealBidCommand, SlotBlockId, 7 | }, 8 | utils::{build_info::Version, timestamp_us_to_offset_datetime}, 9 | }; 10 | use std::{ 11 | path::PathBuf, 12 | sync::{ 13 | atomic::{AtomicU64, Ordering}, 14 | Arc, 15 | }, 16 | }; 17 | use time::OffsetDateTime; 18 | use tokio::sync::mpsc; 19 | use tokio_stream::StreamExt; 20 | use tokio_util::sync::CancellationToken; 21 | use tonic::transport::{Channel, Endpoint, Uri}; 22 | use tower::service_fn; 23 | use tracing::{error, trace, warn}; 24 | 25 | use crate::{ 26 | bidding_service_wrapper::{ 27 | bidding_service_client::BiddingServiceClient, 28 | conversion::{real2rpc_block_bid, real2rpc_block_hash, real2rpc_landed_block_info}, 29 | CreateSlotBidderParams, DestroySlotBidderParams, Empty, LandedBlocksParams, 30 | MustWinBlockParams, NewBlockParams, UpdateNewBidParams, 31 | }, 32 | metrics::set_bidding_service_version, 33 | }; 34 | 35 | use super::unfinished_block_building_sink_client::UnfinishedBlockBuildingSinkClient; 36 | 37 | pub struct CreateSlotBidderCommandData { 38 | params: CreateSlotBidderParams, 39 | block_seal_handle: Box, 40 | cancel: tokio_util::sync::CancellationToken, 41 | } 42 | 43 | #[allow(clippy::large_enum_variant)] 44 | pub enum BiddingServiceClientCommand { 45 | CreateSlotBidder(CreateSlotBidderCommandData), 46 | NewBlock(NewBlockParams), 47 | UpdateNewBid(UpdateNewBidParams), 48 | MustWinBlock(MustWinBlockParams), 49 | UpdateNewLandedBlocksDetected(LandedBlocksParams), 50 | UpdateFailedReadingNewLandedBlocks, 51 | DestroySlotBidder(DestroySlotBidderParams), 52 | } 53 | 54 | /// Adapts [BiddingServiceClient] to [BiddingService]. 55 | /// To adapt sync world ([BiddingService]) to async ([BiddingServiceClient]) it receives commands via a channel (commands_sender) 56 | /// which is handled by a tokio task. 57 | /// It creates a UnfinishedBlockBuildingSinkClient implementing UnfinishedBlockBuildingSink per create_slot_bidder call. 58 | /// For each UnfinishedBlockBuildingSinkClient created a task is created to poll callbacks (eg: bids and can_use_suggested_fee_recipient_as_coinbase updates). 59 | /// The created UnfinishedBlockBuildingSinkClient forwards all calls to the BiddingServiceClientAdapter as commands. 60 | #[derive(Debug)] 61 | pub struct BiddingServiceClientAdapter { 62 | commands_sender: mpsc::UnboundedSender, 63 | last_session_id: AtomicU64, 64 | } 65 | 66 | #[derive(thiserror::Error, Debug)] 67 | pub enum Error { 68 | #[error("Unable to connect : {0}")] 69 | TonicTrasport(#[from] tonic::transport::Error), 70 | #[error("RPC error : {0}")] 71 | TonicStatus(#[from] tonic::Status), 72 | #[error("Initialization failed : {0}")] 73 | InitFailed(tonic::Status), 74 | } 75 | 76 | pub type Result = core::result::Result; 77 | 78 | impl BiddingServiceClientAdapter { 79 | /// @Remove async and reconnect on all create_slot_bidder calls. 80 | pub async fn new( 81 | uds_path: &str, 82 | landed_blocks_history: &[RealLandedBlockInfo], 83 | ) -> Result { 84 | let commands_sender = Self::init_sender_task(uds_path, landed_blocks_history).await?; 85 | Ok(Self { 86 | commands_sender, 87 | last_session_id: AtomicU64::new(0), 88 | }) 89 | } 90 | 91 | fn new_session_id(&self) -> u64 { 92 | self.last_session_id.fetch_add(1, Ordering::Relaxed) 93 | } 94 | 95 | async fn init_sender_task( 96 | uds_path: &str, 97 | landed_blocks_history: &[RealLandedBlockInfo], 98 | ) -> Result> { 99 | let uds_path = uds_path.to_string(); 100 | // Url us dummy but needed to create the Endpoint. 101 | let channel = Endpoint::try_from("http://[::]:50051") 102 | .unwrap() 103 | .connect_with_connector(service_fn(move |_: Uri| { 104 | // Connect to a Uds socket 105 | let path = PathBuf::from(uds_path.clone()); 106 | tokio::net::UnixStream::connect(path) 107 | })) 108 | .await?; 109 | // Create a client 110 | let mut client = BiddingServiceClient::new(channel); 111 | let init_params = LandedBlocksParams { 112 | landed_block_info: landed_blocks_history 113 | .iter() 114 | .map(real2rpc_landed_block_info) 115 | .collect(), 116 | }; 117 | let bidding_service_version = client 118 | .initialize(init_params) 119 | .await 120 | .map_err(Error::InitFailed)?; 121 | let bidding_service_version = bidding_service_version.into_inner(); 122 | set_bidding_service_version(Version { 123 | git_commit: bidding_service_version.git_commit, 124 | git_ref: bidding_service_version.git_ref, 125 | build_time_utc: bidding_service_version.build_time_utc, 126 | }); 127 | let (commands_sender, mut rx) = mpsc::unbounded_channel::(); 128 | // Spawn a task to execute received futures 129 | tokio::spawn(async move { 130 | while let Some(command) = rx.recv().await { 131 | match command { 132 | BiddingServiceClientCommand::CreateSlotBidder(create_slot_data) => { 133 | Self::create_slot_bidder(&mut client, create_slot_data).await; 134 | } 135 | BiddingServiceClientCommand::NewBlock(new_block_params) => { 136 | Self::handle_error(client.new_block(new_block_params).await); 137 | } 138 | BiddingServiceClientCommand::UpdateNewBid(update_new_bid_params) => { 139 | Self::handle_error(client.update_new_bid(update_new_bid_params).await); 140 | } 141 | BiddingServiceClientCommand::MustWinBlock(must_win_block_params) => { 142 | Self::handle_error(client.must_win_block(must_win_block_params).await); 143 | } 144 | BiddingServiceClientCommand::UpdateNewLandedBlocksDetected(params) => { 145 | Self::handle_error(client.update_new_landed_blocks_detected(params).await); 146 | } 147 | BiddingServiceClientCommand::UpdateFailedReadingNewLandedBlocks => { 148 | Self::handle_error( 149 | client 150 | .update_failed_reading_new_landed_blocks(Empty {}) 151 | .await, 152 | ); 153 | } 154 | BiddingServiceClientCommand::DestroySlotBidder(destroy_slot_bidder_params) => { 155 | Self::handle_error( 156 | client.destroy_slot_bidder(destroy_slot_bidder_params).await, 157 | ); 158 | } 159 | } 160 | } 161 | }); 162 | Ok(commands_sender) 163 | } 164 | 165 | fn parse_option_u256(limbs: Vec) -> Option { 166 | if limbs.is_empty() { 167 | None 168 | } else { 169 | Some(U256::from_limbs_slice(&limbs)) 170 | } 171 | } 172 | 173 | /// Calls create_slot_bidder via RPC to init the bidder. 174 | async fn create_slot_bidder( 175 | client: &mut BiddingServiceClient, 176 | create_slot_bidder_data: CreateSlotBidderCommandData, 177 | ) { 178 | match client 179 | .create_slot_bidder(create_slot_bidder_data.params) 180 | .await 181 | { 182 | Ok(response) => { 183 | let mut stream = response.into_inner(); 184 | 185 | tokio::spawn(async move { 186 | loop { 187 | tokio::select! { 188 | _ = create_slot_bidder_data.cancel.cancelled() => { 189 | return; 190 | } 191 | callback = stream.next() => { 192 | if let Some(Ok(callback)) = callback { 193 | if let Some(bid) = callback.bid { 194 | let payout_tx_value = Self::parse_option_u256(bid.payout_tx_value); 195 | let seen_competition_bid = Self::parse_option_u256(bid.seen_competition_bid); 196 | let trigger_creation_time = bid.trigger_creation_time_us.map(timestamp_us_to_offset_datetime); 197 | let payout_tx_value = if let Some(payout_tx_value) = payout_tx_value { 198 | payout_tx_value 199 | } else { 200 | warn!("payout_tx_value is None"); 201 | continue; 202 | }; 203 | 204 | let seal_command = SlotBidderSealBidCommand { 205 | block_id: BlockId(bid.block_id), 206 | payout_tx_value, 207 | seen_competition_bid, 208 | trigger_creation_time, 209 | }; 210 | create_slot_bidder_data.block_seal_handle.seal_bid(seal_command); 211 | } else if let Some(value) = callback.can_use_suggested_fee_recipient_as_coinbase_change { 212 | 213 | // do nothing as can_use_suggested_fee_recipient_as_coinbase_change is not supported 214 | trace!(value, "Got can_use_suggested_fee_recipient_as_coinbase_change from bidding service"); 215 | } 216 | } 217 | else { 218 | return; 219 | } 220 | } 221 | } 222 | } 223 | }); 224 | } 225 | Err(err) => { 226 | Self::handle_error(Err(err)); 227 | } 228 | }; 229 | } 230 | 231 | /// If error logs it. 232 | /// return result is error 233 | fn handle_error(result: tonic::Result>) -> bool { 234 | if let Err(error) = &result { 235 | error!(error=?error,"RPC call error, killing process so it reconnects"); 236 | std::process::exit(1); 237 | } else { 238 | false 239 | } 240 | } 241 | 242 | pub async fn must_win_block(&self, block: u64) { 243 | let _ = self 244 | .commands_sender 245 | .send(BiddingServiceClientCommand::MustWinBlock( 246 | MustWinBlockParams { block }, 247 | )); 248 | } 249 | } 250 | 251 | impl BiddingService for BiddingServiceClientAdapter { 252 | fn create_slot_bidder( 253 | &self, 254 | slot_block_id: SlotBlockId, 255 | slot_timestamp: OffsetDateTime, 256 | block_seal_handle: Box, 257 | cancel: CancellationToken, 258 | ) -> Arc { 259 | // This default will be immediately changed by a callback. 260 | let session_id = self.new_session_id(); 261 | let _ = self 262 | .commands_sender 263 | .send(BiddingServiceClientCommand::CreateSlotBidder( 264 | CreateSlotBidderCommandData { 265 | params: CreateSlotBidderParams { 266 | block: slot_block_id.block, 267 | slot: slot_block_id.slot, 268 | parent_hash: real2rpc_block_hash(slot_block_id.parent_block_hash), 269 | session_id, 270 | slot_timestamp: slot_timestamp.unix_timestamp(), 271 | }, 272 | block_seal_handle, 273 | cancel, 274 | }, 275 | )); 276 | Arc::new(UnfinishedBlockBuildingSinkClient::new( 277 | session_id, 278 | self.commands_sender.clone(), 279 | )) 280 | } 281 | 282 | fn update_new_landed_blocks_detected(&self, landed_blocks: &[RealLandedBlockInfo]) { 283 | let param = LandedBlocksParams { 284 | landed_block_info: landed_blocks 285 | .iter() 286 | .map(real2rpc_landed_block_info) 287 | .collect(), 288 | }; 289 | let _ = 290 | self.commands_sender 291 | .send(BiddingServiceClientCommand::UpdateNewLandedBlocksDetected( 292 | param, 293 | )); 294 | } 295 | 296 | fn update_failed_reading_new_landed_blocks(&self) { 297 | let _ = self 298 | .commands_sender 299 | .send(BiddingServiceClientCommand::UpdateFailedReadingNewLandedBlocks); 300 | } 301 | 302 | fn observe_relay_bids(&self, bid_with_stats: ScrapedRelayBlockBidWithStats) { 303 | let _ = self 304 | .commands_sender 305 | .send(BiddingServiceClientCommand::UpdateNewBid( 306 | real2rpc_block_bid(bid_with_stats), 307 | )); 308 | } 309 | } 310 | -------------------------------------------------------------------------------- /src/blocks_processor.rs: -------------------------------------------------------------------------------- 1 | use alloy_primitives::{BlockHash, U256}; 2 | use exponential_backoff::Backoff; 3 | use jsonrpsee::{ 4 | core::{client::ClientT, traits::ToRpcParams}, 5 | http_client::{HttpClient, HttpClientBuilder}, 6 | }; 7 | use rbuilder::{ 8 | building::BuiltBlockTrace, 9 | live_builder::{ 10 | block_output::bidding_service_interface::BidObserver, payload_events::MevBoostSlotData, 11 | }, 12 | utils::error_storage::store_error_event, 13 | }; 14 | use rbuilder_primitives::{ 15 | mev_boost::SubmitBlockRequest, 16 | serialize::{RawBundle, RawShareBundle}, 17 | Bundle, Order, 18 | }; 19 | use reth::primitives::SealedBlock; 20 | use serde::{Deserialize, Serialize}; 21 | use serde_json::value::RawValue; 22 | use serde_with::{serde_as, DisplayFromStr}; 23 | use std::{sync::Arc, time::Duration}; 24 | use time::format_description::well_known; 25 | use tracing::{error, warn, Span}; 26 | 27 | use crate::metrics::inc_submit_block_errors; 28 | 29 | const BLOCK_PROCESSOR_ERROR_CATEGORY: &str = "block_processor"; 30 | const DEFAULT_BLOCK_CONSUME_BUILT_BLOCK_METHOD: &str = "block_consumeBuiltBlockV2"; 31 | pub const SIGNED_BLOCK_CONSUME_BUILT_BLOCK_METHOD: &str = "flashbots_consumeBuiltBlockV2"; 32 | 33 | #[derive(Debug, Serialize, Deserialize)] 34 | #[serde(rename_all = "camelCase")] 35 | struct UsedSbundle { 36 | bundle: RawShareBundle, 37 | success: bool, 38 | } 39 | 40 | #[serde_as] 41 | #[derive(Debug, Clone, Serialize, Deserialize)] 42 | #[serde(rename_all = "camelCase")] 43 | struct UsedBundle { 44 | #[serde_as(as = "DisplayFromStr")] 45 | mev_gas_price: U256, 46 | #[serde_as(as = "DisplayFromStr")] 47 | total_eth: U256, 48 | #[serde_as(as = "DisplayFromStr")] 49 | eth_send_to_coinbase: U256, 50 | #[serde_as(as = "DisplayFromStr")] 51 | total_gas_used: u64, 52 | original_bundle: RawBundle, 53 | } 54 | 55 | /// Header used by block_consumeBuiltBlockV2. Since docs are not up to date I copied RbuilderHeader from block-processor/ports/models.go (commit b341b35) 56 | /// Based on alloy_primitives::Block 57 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] 58 | #[serde(rename_all = "camelCase")] 59 | struct BlocksProcessorHeader { 60 | pub hash: BlockHash, 61 | pub gas_limit: U256, 62 | pub gas_used: U256, 63 | #[serde(skip_serializing_if = "Option::is_none")] 64 | pub base_fee_per_gas: Option, 65 | pub parent_hash: BlockHash, 66 | pub timestamp: U256, 67 | pub number: Option, 68 | } 69 | 70 | type ConsumeBuiltBlockRequest = ( 71 | BlocksProcessorHeader, 72 | String, 73 | String, 74 | Vec, 75 | Vec, 76 | Vec, 77 | alloy_rpc_types_beacon::relay::BidTrace, 78 | String, 79 | U256, 80 | U256, 81 | ); 82 | 83 | /// Struct to avoid copying ConsumeBuiltBlockRequest since HttpClient::request eats the parameter. 84 | #[derive(Clone)] 85 | struct ConsumeBuiltBlockRequestArc { 86 | inner: Arc, 87 | } 88 | 89 | impl ConsumeBuiltBlockRequestArc { 90 | fn new(request: ConsumeBuiltBlockRequest) -> Self { 91 | Self { 92 | inner: Arc::new(request), 93 | } 94 | } 95 | fn as_ref(&self) -> &ConsumeBuiltBlockRequest { 96 | self.inner.as_ref() 97 | } 98 | } 99 | 100 | impl ToRpcParams for ConsumeBuiltBlockRequestArc { 101 | fn to_rpc_params(self) -> Result>, jsonrpsee::core::Error> { 102 | let json = serde_json::to_string(self.inner.as_ref()) 103 | .map_err(jsonrpsee::core::Error::ParseError)?; 104 | RawValue::from_string(json) 105 | .map(Some) 106 | .map_err(jsonrpsee::core::Error::ParseError) 107 | } 108 | } 109 | 110 | #[derive(Debug, Clone)] 111 | pub struct BlocksProcessorClient { 112 | client: HttpClientType, 113 | consume_built_block_method: &'static str, 114 | } 115 | 116 | impl BlocksProcessorClient { 117 | pub fn try_from( 118 | url: &str, 119 | max_request_size: u32, 120 | max_concurrent_requests: usize, 121 | ) -> eyre::Result { 122 | Ok(Self { 123 | client: HttpClientBuilder::default() 124 | .max_request_size(max_request_size) 125 | .max_concurrent_requests(max_concurrent_requests) 126 | .build(url)?, 127 | consume_built_block_method: DEFAULT_BLOCK_CONSUME_BUILT_BLOCK_METHOD, 128 | }) 129 | } 130 | } 131 | 132 | /// RawBundle::encode_no_blobs but more compatible. 133 | fn encode_bundle_for_blocks_processor(mut bundle: Bundle) -> RawBundle { 134 | // set to 0 when none 135 | bundle.block = bundle.block.or(Some(0)); 136 | RawBundle::encode_no_blobs(bundle.clone()) 137 | } 138 | 139 | impl BlocksProcessorClient { 140 | pub fn new(client: HttpClientType, consume_built_block_method: &'static str) -> Self { 141 | Self { 142 | client, 143 | consume_built_block_method, 144 | } 145 | } 146 | pub async fn submit_built_block( 147 | &self, 148 | sealed_block: &SealedBlock, 149 | submit_block_request: &SubmitBlockRequest, 150 | built_block_trace: &BuiltBlockTrace, 151 | builder_name: String, 152 | best_bid_value: U256, 153 | ) -> eyre::Result<()> { 154 | let header = BlocksProcessorHeader { 155 | hash: sealed_block.hash(), 156 | gas_limit: U256::from(sealed_block.gas_limit), 157 | gas_used: U256::from(sealed_block.gas_used), 158 | base_fee_per_gas: sealed_block.base_fee_per_gas.map(U256::from), 159 | parent_hash: sealed_block.parent_hash, 160 | timestamp: U256::from(sealed_block.timestamp), 161 | number: Some(U256::from(sealed_block.number)), 162 | }; 163 | let closed_at = built_block_trace 164 | .orders_closed_at 165 | .format(&well_known::Iso8601::DEFAULT)?; 166 | let sealed_at = built_block_trace 167 | .orders_sealed_at 168 | .format(&well_known::Iso8601::DEFAULT)?; 169 | 170 | let committed_bundles = built_block_trace 171 | .included_orders 172 | .iter() 173 | .filter_map(|res| { 174 | if let Order::Bundle(bundle) = &res.order { 175 | Some(UsedBundle { 176 | mev_gas_price: res.inplace_sim.full_profit_info().mev_gas_price(), 177 | total_eth: res.inplace_sim.full_profit_info().coinbase_profit(), 178 | eth_send_to_coinbase: U256::ZERO, 179 | total_gas_used: res.inplace_sim.gas_used(), 180 | original_bundle: encode_bundle_for_blocks_processor(bundle.clone()), 181 | }) 182 | } else { 183 | None 184 | } 185 | }) 186 | .collect::>(); 187 | 188 | let used_share_bundles = Self::get_used_sbundles(built_block_trace); 189 | 190 | let params: ConsumeBuiltBlockRequest = ( 191 | header, 192 | closed_at, 193 | sealed_at, 194 | committed_bundles, 195 | Vec::::new(), 196 | used_share_bundles, 197 | submit_block_request.bid_trace().clone(), 198 | builder_name, 199 | built_block_trace.true_bid_value, 200 | best_bid_value, 201 | ); 202 | let request = ConsumeBuiltBlockRequestArc::new(params); 203 | let backoff = backoff(); 204 | let mut backoff_iter = backoff.iter(); 205 | loop { 206 | let sleep_time = backoff_iter.next(); 207 | match self 208 | .client 209 | .request(self.consume_built_block_method, request.clone()) 210 | .await 211 | { 212 | Ok(()) => { 213 | return Ok(()); 214 | } 215 | Err(err) => match sleep_time { 216 | Some(time) => { 217 | warn!(?err, "Block processor returned error, retrying."); 218 | tokio::time::sleep(time).await; 219 | } 220 | None => { 221 | Self::handle_rpc_error(&err, request.as_ref()); 222 | return Err(err.into()); 223 | } 224 | }, 225 | } 226 | } 227 | } 228 | 229 | fn handle_rpc_error(err: &jsonrpsee::core::Error, request: &ConsumeBuiltBlockRequest) { 230 | const RPC_ERROR_TEXT: &str = "Block processor RPC"; 231 | match err { 232 | jsonrpsee::core::Error::Call(error_object) => { 233 | error!(err = ?error_object, kind = "error_returned", RPC_ERROR_TEXT); 234 | store_error_event(BLOCK_PROCESSOR_ERROR_CATEGORY, &err.to_string(), request); 235 | } 236 | jsonrpsee::core::Error::Transport(_) => { 237 | error!(err = ?err, kind = "transport", RPC_ERROR_TEXT); 238 | store_error_event(BLOCK_PROCESSOR_ERROR_CATEGORY, &err.to_string(), request); 239 | } 240 | jsonrpsee::core::Error::ParseError(error) => { 241 | error!(err = ?err, kind = "deserialize", RPC_ERROR_TEXT); 242 | let error_txt = error.to_string(); 243 | if !(error_txt.contains("504 Gateway Time-out") 244 | || error_txt.contains("502 Bad Gateway")) 245 | { 246 | store_error_event(BLOCK_PROCESSOR_ERROR_CATEGORY, &err.to_string(), request); 247 | } 248 | } 249 | _ => { 250 | error!(err = ?err, kind = "other", RPC_ERROR_TEXT); 251 | } 252 | } 253 | } 254 | 255 | /// Gets the UsedSbundle carefully considering virtual orders formed by other original orders. 256 | fn get_used_sbundles(built_block_trace: &BuiltBlockTrace) -> Vec { 257 | built_block_trace 258 | .included_orders 259 | .iter() 260 | .flat_map(|exec_result| { 261 | if let Order::ShareBundle(sbundle) = &exec_result.order { 262 | // don't like having special cases (merged vs not merged), can we improve this? 263 | let filtered_sbundles = if sbundle.is_merged_order() { 264 | // We include only original orders that are contained in original_order_ids. 265 | // If not contained in original_order_ids then the sub sbundle failed or was an empty execution. 266 | sbundle 267 | .original_orders 268 | .iter() 269 | .filter_map(|sub_order| { 270 | if let Order::ShareBundle(sbundle) = sub_order { 271 | if exec_result.original_order_ids.contains(&sub_order.id()) { 272 | Some(sbundle) 273 | } else { 274 | None 275 | } 276 | } else { 277 | None 278 | } 279 | }) 280 | .collect() 281 | } else if exec_result.tx_infos.is_empty() { 282 | // non merged empty execution sbundle 283 | vec![] 284 | } else { 285 | // non merged non empty execution sbundle 286 | vec![sbundle] 287 | }; 288 | filtered_sbundles 289 | .into_iter() 290 | .map(|sbundle| UsedSbundle { 291 | bundle: RawShareBundle::encode_no_blobs(sbundle.clone()), 292 | success: true, 293 | }) 294 | .collect() 295 | } else { 296 | Vec::new() 297 | } 298 | }) 299 | .collect::>() 300 | } 301 | } 302 | 303 | /// BidObserver sending all data to a BlocksProcessorClient 304 | #[derive(Debug)] 305 | pub struct BlocksProcessorClientBidObserver { 306 | client: BlocksProcessorClient, 307 | } 308 | 309 | impl BlocksProcessorClientBidObserver { 310 | pub fn new(client: BlocksProcessorClient) -> Self { 311 | Self { client } 312 | } 313 | } 314 | 315 | impl BidObserver 316 | for BlocksProcessorClientBidObserver 317 | { 318 | fn block_submitted( 319 | &self, 320 | _slot_data: &MevBoostSlotData, 321 | sealed_block: &SealedBlock, 322 | submit_block_request: &SubmitBlockRequest, 323 | built_block_trace: &BuiltBlockTrace, 324 | builder_name: String, 325 | best_bid_value: U256, 326 | ) { 327 | let client = self.client.clone(); 328 | let parent_span = Span::current(); 329 | let sealed_block = sealed_block.clone(); 330 | let submit_block_request = submit_block_request.clone(); 331 | let built_block_trace = built_block_trace.clone(); 332 | tokio::spawn(async move { 333 | let block_processor_result = client 334 | .submit_built_block( 335 | &sealed_block, 336 | &submit_block_request, 337 | &built_block_trace, 338 | builder_name, 339 | best_bid_value, 340 | ) 341 | .await; 342 | if let Err(err) = block_processor_result { 343 | inc_submit_block_errors(); 344 | warn!(parent: &parent_span, ?err, "Failed to submit block to the blocks api"); 345 | } 346 | }); 347 | } 348 | } 349 | 350 | // backoff is around 1 minute and total number of requests per payload will be 4 351 | // assuming 200 blocks per slot and if API is down we will max at around 1k of blocks in memory 352 | fn backoff() -> Backoff { 353 | let mut backoff = Backoff::new(3, Duration::from_secs(5), None); 354 | backoff.set_factor(2); 355 | backoff.set_jitter(0.1); 356 | backoff 357 | } 358 | 359 | #[cfg(test)] 360 | mod tests { 361 | use super::*; 362 | 363 | #[test] 364 | fn backoff_total_time_assert() { 365 | let mut requests = 0; 366 | let mut total_sleep_time = Duration::default(); 367 | let backoff = backoff(); 368 | let backoff_iter = backoff.iter(); 369 | for duration in backoff_iter { 370 | requests += 1; 371 | total_sleep_time += duration; 372 | } 373 | assert_eq!(requests, 4); 374 | let total_sleep_time = total_sleep_time.as_secs(); 375 | dbg!(total_sleep_time); 376 | assert!(total_sleep_time > 40 && total_sleep_time < 90); 377 | } 378 | } 379 | -------------------------------------------------------------------------------- /src/flashbots_config.rs: -------------------------------------------------------------------------------- 1 | //! Config should always be deserializable, default values should be used 2 | //! This code has lots of copy/paste from the example config but it's not really copy/paste since we use our own private types. 3 | //! @Pending make this copy/paste generic code on the library 4 | use alloy_primitives::U256; 5 | use alloy_signer_local::PrivateKeySigner; 6 | use derivative::Derivative; 7 | use eyre::Context; 8 | use http::StatusCode; 9 | use jsonrpsee::RpcModule; 10 | use rbuilder::building::builders::parallel_builder::parallel_build_backtest; 11 | use rbuilder::building::order_priority::{FullProfitInfoGetter, NonMempoolProfitInfoGetter}; 12 | use rbuilder::building::{BuiltBlockTrace, PartialBlockExecutionTracer}; 13 | use rbuilder::live_builder::base_config::EnvOrValue; 14 | use rbuilder::live_builder::block_output::bidding_service_interface::{ 15 | BidObserver, LandedBlockInfo, 16 | }; 17 | use rbuilder::live_builder::config::{ 18 | build_backtest_block_ordering_builder, create_builder_from_sink, create_builders, 19 | create_sink_factory_and_relays, create_wallet_balance_watcher, BuilderConfig, 20 | SpecificBuilderConfig, 21 | }; 22 | use rbuilder::live_builder::payload_events::MevBoostSlotData; 23 | use rbuilder::provider::StateProviderFactory; 24 | use rbuilder::{ 25 | building::builders::{BacktestSimulateBlockInput, Block}, 26 | live_builder::{ 27 | base_config::BaseConfig, cli::LiveBuilderConfig, config::L1Config, LiveBuilder, 28 | }, 29 | utils::build_info::Version, 30 | }; 31 | use rbuilder_primitives::mev_boost::SubmitBlockRequest; 32 | use reth::primitives::SealedBlock; 33 | use serde::Deserialize; 34 | use serde_with::serde_as; 35 | use tokio_util::sync::CancellationToken; 36 | use tracing::{error, warn}; 37 | use url::Url; 38 | 39 | use crate::bidding_service_wrapper::client::bidding_service_client_adapter::BiddingServiceClientAdapter; 40 | use crate::blocks_processor::{ 41 | BlocksProcessorClient, BlocksProcessorClientBidObserver, 42 | SIGNED_BLOCK_CONSUME_BUILT_BLOCK_METHOD, 43 | }; 44 | use crate::build_info::rbuilder_version; 45 | use crate::true_block_value_push::best_true_value_observer::BestTrueValueObserver; 46 | 47 | use clickhouse::Client; 48 | use std::sync::Arc; 49 | 50 | #[derive(Debug, Clone, Deserialize, PartialEq, Eq, Default)] 51 | pub struct ClickhouseConfig { 52 | /// clickhouse host url (starts with http/https) 53 | pub clickhouse_host_url: Option>, 54 | pub clickhouse_user: Option>, 55 | pub clickhouse_password: Option>, 56 | } 57 | 58 | #[serde_as] 59 | #[derive(Debug, Clone, Deserialize, Default, PartialEq, Eq)] 60 | #[serde(default, deny_unknown_fields)] 61 | /// Config to push TBV to a redis channel. 62 | struct TBVPushRedisConfig { 63 | /// redis connection string for pushing best bid value 64 | /// Option so we can have Default for Deserialize but always required. 65 | pub url: Option>, 66 | 67 | /// redis channel name for syncing best bid value 68 | pub channel: String, 69 | } 70 | 71 | #[serde_as] 72 | #[derive(Debug, Clone, Deserialize, PartialEq, Eq, Derivative)] 73 | #[serde(default, deny_unknown_fields)] 74 | #[derivative(Default)] 75 | pub struct FlashbotsConfig { 76 | #[serde(flatten)] 77 | pub base_config: BaseConfig, 78 | 79 | #[serde(flatten)] 80 | pub l1_config: L1Config, 81 | 82 | #[serde(flatten)] 83 | clickhouse: ClickhouseConfig, 84 | 85 | #[serde(default)] 86 | pub flashbots_builder_pubkeys: Vec, 87 | 88 | // bidding server ipc path config. 89 | bidding_service_ipc_path: String, 90 | 91 | /// selected builder configurations 92 | pub builders: Vec, 93 | 94 | /// If this is Some then blocks_processor_url MUST be some and: 95 | /// - signed mode is used for blocks_processor. 96 | /// - tbv_push is done via blocks_processor_url (signed block-processor also handles flashbots_reportBestTrueValue). 97 | pub key_registration_url: Option, 98 | 99 | pub blocks_processor_url: Option, 100 | 101 | #[serde(default = "default_blocks_processor_max_concurrent_requests")] 102 | #[derivative(Default(value = "default_blocks_processor_max_concurrent_requests()"))] 103 | pub blocks_processor_max_concurrent_requests: usize, 104 | #[serde(default = "default_blocks_processor_max_request_size_bytes")] 105 | #[derivative(Default(value = "default_blocks_processor_max_request_size_bytes()"))] 106 | pub blocks_processor_max_request_size_bytes: u32, 107 | 108 | /// Cfg to push tbv to redis. 109 | /// For production we always need some tbv push (since it's used by smart-multiplexing.) so: 110 | /// !Some(key_registration_url) => Some(tbv_push_redis) 111 | tbv_push_redis: Option, 112 | } 113 | 114 | impl LiveBuilderConfig for FlashbotsConfig { 115 | fn base_config(&self) -> &BaseConfig { 116 | &self.base_config 117 | } 118 | 119 | async fn new_builder

( 120 | &self, 121 | provider: P, 122 | cancellation_token: CancellationToken, 123 | ) -> eyre::Result> 124 | where 125 | P: StateProviderFactory + Clone + 'static, 126 | { 127 | if self.l1_config.scraped_bids_publisher_url.is_none() { 128 | eyre::bail!("scraped_bids_publisher_url is not set"); 129 | } 130 | 131 | let (wallet_balance_watcher, landed_blocks) = 132 | create_wallet_balance_watcher(provider.clone(), &self.base_config).await?; 133 | 134 | let bidding_service = self.create_bidding_service(&landed_blocks).await?; 135 | 136 | let bid_observer = self.create_bid_observer(&cancellation_token).await?; 137 | 138 | let (sink_factory, slot_info_provider, adjustment_fee_payers) = 139 | create_sink_factory_and_relays( 140 | &self.base_config, 141 | &self.l1_config, 142 | wallet_balance_watcher, 143 | bid_observer, 144 | bidding_service.clone(), 145 | cancellation_token.clone(), 146 | ) 147 | .await?; 148 | 149 | let live_builder = create_builder_from_sink( 150 | &self.base_config, 151 | &self.l1_config, 152 | provider, 153 | sink_factory, 154 | slot_info_provider, 155 | adjustment_fee_payers, 156 | cancellation_token, 157 | ) 158 | .await?; 159 | 160 | let mut module = RpcModule::new(()); 161 | module.register_async_method("bid_subsidiseBlock", move |params, _| { 162 | handle_subsidise_block(bidding_service.clone(), params) 163 | })?; 164 | let live_builder = live_builder.with_extra_rpc(module); 165 | let builders = create_builders(self.live_builders()?); 166 | Ok(live_builder.with_builders(builders)) 167 | } 168 | 169 | fn version_for_telemetry(&self) -> Version { 170 | rbuilder_version() 171 | } 172 | 173 | /// @Pending fix this ugly copy/paste 174 | fn build_backtest_block< 175 | P, 176 | PartialBlockExecutionTracerType: PartialBlockExecutionTracer + Clone + Send + Sync + 'static, 177 | >( 178 | &self, 179 | building_algorithm_name: &str, 180 | input: BacktestSimulateBlockInput<'_, P>, 181 | partial_block_execution_tracer: PartialBlockExecutionTracerType, 182 | ) -> eyre::Result 183 | where 184 | P: StateProviderFactory + Clone + 'static, 185 | { 186 | let builder_cfg = self.builder(building_algorithm_name)?; 187 | match builder_cfg.builder { 188 | SpecificBuilderConfig::OrderingBuilder(config) => { 189 | if config.ignore_mempool_profit_on_bundles { 190 | build_backtest_block_ordering_builder::< 191 | P, 192 | NonMempoolProfitInfoGetter, 193 | PartialBlockExecutionTracerType, 194 | >(config, input, partial_block_execution_tracer) 195 | } else { 196 | build_backtest_block_ordering_builder::< 197 | P, 198 | FullProfitInfoGetter, 199 | PartialBlockExecutionTracerType, 200 | >(config, input, partial_block_execution_tracer) 201 | } 202 | } 203 | SpecificBuilderConfig::ParallelBuilder(config) => { 204 | parallel_build_backtest::

(input, config) 205 | } 206 | } 207 | } 208 | } 209 | 210 | async fn handle_subsidise_block( 211 | bidding_service: Arc, 212 | params: jsonrpsee::types::Params<'static>, 213 | ) { 214 | match params.one() { 215 | Ok(block_number) => bidding_service.must_win_block(block_number).await, 216 | Err(err) => warn!(?err, "Failed to parse block_number"), 217 | }; 218 | } 219 | 220 | #[derive(thiserror::Error, Debug)] 221 | enum RegisterKeyError { 222 | #[error("Register key error parsing url: {0:?}")] 223 | UrlParse(#[from] url::ParseError), 224 | #[error("Register key network error: {0:?}")] 225 | Network(#[from] reqwest::Error), 226 | #[error("Register key service error: {0:?}")] 227 | Service(StatusCode), 228 | } 229 | 230 | impl FlashbotsConfig { 231 | /// Returns the BiddingService + an optional FlashbotsBlockSubsidySelector so smart multiplexing can force blocks. 232 | /// FlashbotsBlockSubsidySelector can be None if subcidy is disabled. 233 | pub async fn create_bidding_service( 234 | &self, 235 | landed_blocks_history: &[LandedBlockInfo], 236 | ) -> eyre::Result> { 237 | let bidding_service_client = 238 | BiddingServiceClientAdapter::new(&self.bidding_service_ipc_path, landed_blocks_history) 239 | .await 240 | .map_err(|e| eyre::Report::new(e).wrap_err("Unable to connect to remote bidder"))?; 241 | Ok(Arc::new(bidding_service_client)) 242 | } 243 | 244 | /// Creates a new PrivateKeySigner and registers the associated address on key_registration_url 245 | async fn register_key( 246 | &self, 247 | key_registration_url: &str, 248 | ) -> Result { 249 | let signer = PrivateKeySigner::random(); 250 | let client = reqwest::Client::new(); 251 | let url = { 252 | let mut url = Url::parse(key_registration_url)?; 253 | url.set_path("/api/l1-builder/v1/register_credentials/rbuilder"); 254 | url 255 | }; 256 | let body = format!("{{ \"ecdsa_pubkey_address\": \"{}\" }}", signer.address()); 257 | let res = client.post(url).body(body).send().await?; 258 | if res.status().is_success() { 259 | Ok(signer) 260 | } else { 261 | Err(RegisterKeyError::Service(res.status())) 262 | } 263 | } 264 | 265 | /// Depending on the cfg may create: 266 | /// - Dummy sink (no blocks_processor_url) 267 | /// - Standard block processor client 268 | /// - Secure block processor client (using block_processor_key to sign) 269 | fn create_block_processor_client( 270 | &self, 271 | block_processor_key: Option, 272 | ) -> eyre::Result>> { 273 | if let Some(url) = &self.blocks_processor_url { 274 | let bid_observer: Box = 275 | if let Some(block_processor_key) = block_processor_key { 276 | let client = crate::signed_http_client::create_client( 277 | url, 278 | block_processor_key, 279 | self.blocks_processor_max_request_size_bytes, 280 | self.blocks_processor_max_concurrent_requests, 281 | )?; 282 | let block_processor = 283 | BlocksProcessorClient::new(client, SIGNED_BLOCK_CONSUME_BUILT_BLOCK_METHOD); 284 | Box::new(BlocksProcessorClientBidObserver::new(block_processor)) 285 | } else { 286 | let client = BlocksProcessorClient::try_from( 287 | url, 288 | self.blocks_processor_max_request_size_bytes, 289 | self.blocks_processor_max_concurrent_requests, 290 | )?; 291 | Box::new(BlocksProcessorClientBidObserver::new(client)) 292 | }; 293 | Ok(Some(bid_observer)) 294 | } else { 295 | if block_processor_key.is_some() { 296 | return Self::bail_blocks_processor_url_not_set(); 297 | } 298 | Ok(None) 299 | } 300 | } 301 | 302 | fn bail_blocks_processor_url_not_set() -> Result { 303 | eyre::bail!("blocks_processor_url should always be set if key_registration_url is set"); 304 | } 305 | 306 | /// Depending on the cfg add a BlocksProcessorClientBidObserver and/or a true value pusher. 307 | async fn create_bid_observer( 308 | &self, 309 | cancellation_token: &CancellationToken, 310 | ) -> eyre::Result> { 311 | let block_processor_key = if let Some(key_registration_url) = &self.key_registration_url { 312 | if self.blocks_processor_url.is_none() { 313 | return Self::bail_blocks_processor_url_not_set(); 314 | } 315 | Some(self.register_key(key_registration_url).await?) 316 | } else { 317 | None 318 | }; 319 | 320 | let bid_observer = RbuilderOperatorBidObserver { 321 | block_processor: self.create_block_processor_client(block_processor_key.clone())?, 322 | tbv_pusher: self.create_tbv_pusher(block_processor_key, cancellation_token)?, 323 | }; 324 | Ok(Box::new(bid_observer)) 325 | } 326 | 327 | fn create_tbv_pusher( 328 | &self, 329 | block_processor_key: Option, 330 | cancellation_token: &CancellationToken, 331 | ) -> eyre::Result>> { 332 | // Avoid sending TBV is we are not on buildernet 333 | if self.key_registration_url.is_none() { 334 | return Ok(None); 335 | } 336 | 337 | if let Some(block_processor_key) = block_processor_key { 338 | if let Some(blocks_processor_url) = &self.blocks_processor_url { 339 | Ok(Some(Box::new(BestTrueValueObserver::new_block_processor( 340 | blocks_processor_url.clone(), 341 | block_processor_key, 342 | self.blocks_processor_max_concurrent_requests, 343 | cancellation_token.clone(), 344 | )?))) 345 | } else { 346 | Self::bail_blocks_processor_url_not_set() 347 | } 348 | } else if let Some(cfg) = &self.tbv_push_redis { 349 | let tbv_push_redis_url_value = cfg 350 | .url 351 | .as_ref() 352 | .ok_or(eyre::Report::msg("Missing tbv_push_redis_url"))? 353 | .value() 354 | .context("tbv_push_redis_url")?; 355 | Ok(Some(Box::new(BestTrueValueObserver::new_redis( 356 | tbv_push_redis_url_value, 357 | cfg.channel.clone(), 358 | cancellation_token.clone(), 359 | )?))) 360 | } else { 361 | Ok(None) 362 | } 363 | } 364 | 365 | fn live_builders(&self) -> eyre::Result> { 366 | self.base_config 367 | .live_builders 368 | .iter() 369 | .map(|cfg_name| self.builder(cfg_name)) 370 | .collect() 371 | } 372 | 373 | fn builder(&self, name: &str) -> eyre::Result { 374 | self.builders 375 | .iter() 376 | .find(|b| b.name == name) 377 | .cloned() 378 | .ok_or_else(|| eyre::eyre!("Builder {} not found in builders list", name)) 379 | } 380 | 381 | pub fn clickhouse_client(&self) -> eyre::Result> { 382 | let host_url = if let Some(host) = &self.clickhouse.clickhouse_host_url { 383 | host.value()? 384 | } else { 385 | return Ok(None); 386 | }; 387 | let user = self 388 | .clickhouse 389 | .clickhouse_user 390 | .as_ref() 391 | .ok_or(eyre::eyre!("clickhouse_user not found"))? 392 | .value()?; 393 | let password = self 394 | .clickhouse 395 | .clickhouse_password 396 | .as_ref() 397 | .ok_or(eyre::eyre!("clickhouse_password not found"))? 398 | .value()?; 399 | 400 | let client = Client::default() 401 | .with_url(host_url) 402 | .with_user(user) 403 | .with_password(password); 404 | Ok(Some(client)) 405 | } 406 | } 407 | 408 | pub fn default_blocks_processor_max_concurrent_requests() -> usize { 409 | 1024 410 | } 411 | 412 | pub fn default_blocks_processor_max_request_size_bytes() -> u32 { 413 | 31457280 // 30MB 414 | } 415 | 416 | #[derive(Debug)] 417 | struct RbuilderOperatorBidObserver { 418 | block_processor: Option>, 419 | tbv_pusher: Option>, 420 | } 421 | 422 | impl BidObserver for RbuilderOperatorBidObserver { 423 | fn block_submitted( 424 | &self, 425 | slot_data: &MevBoostSlotData, 426 | sealed_block: &SealedBlock, 427 | submit_block_request: &SubmitBlockRequest, 428 | built_block_trace: &BuiltBlockTrace, 429 | builder_name: String, 430 | best_bid_value: U256, 431 | ) { 432 | if let Some(p) = self.block_processor.as_ref() { 433 | p.block_submitted( 434 | slot_data, 435 | sealed_block, 436 | submit_block_request, 437 | built_block_trace, 438 | builder_name.clone(), 439 | best_bid_value, 440 | ) 441 | } 442 | if let Some(p) = self.tbv_pusher.as_ref() { 443 | p.block_submitted( 444 | slot_data, 445 | sealed_block, 446 | submit_block_request, 447 | built_block_trace, 448 | builder_name, 449 | best_bid_value, 450 | ) 451 | } 452 | } 453 | } 454 | 455 | #[cfg(test)] 456 | mod test { 457 | use rbuilder::live_builder::base_config::load_config_toml_and_env; 458 | 459 | use super::*; 460 | use std::{env, path::PathBuf}; 461 | 462 | #[test] 463 | fn test_default_config() { 464 | let config: FlashbotsConfig = serde_json::from_str("{}").unwrap(); 465 | let config_default = FlashbotsConfig::default(); 466 | 467 | assert_eq!(config, config_default); 468 | } 469 | 470 | #[test] 471 | fn test_parse_example_config() { 472 | let mut p = PathBuf::from(env!("CARGO_MANIFEST_DIR")); 473 | p.push("config-live-example.toml"); 474 | 475 | load_config_toml_and_env::(p.clone()).expect("Config load"); 476 | } 477 | } 478 | -------------------------------------------------------------------------------- /src/bidding_service_wrapper/bidding_service.rs: -------------------------------------------------------------------------------- 1 | /// Mapping of build_info::Version 2 | #[allow(clippy::derive_partial_eq_without_eq)] 3 | #[derive(Clone, PartialEq, ::prost::Message)] 4 | pub struct BidderVersionInfo { 5 | #[prost(string, tag = "1")] 6 | pub git_commit: ::prost::alloc::string::String, 7 | #[prost(string, tag = "2")] 8 | pub git_ref: ::prost::alloc::string::String, 9 | #[prost(string, tag = "3")] 10 | pub build_time_utc: ::prost::alloc::string::String, 11 | } 12 | #[allow(clippy::derive_partial_eq_without_eq)] 13 | #[derive(Clone, PartialEq, ::prost::Message)] 14 | pub struct Empty {} 15 | #[allow(clippy::derive_partial_eq_without_eq)] 16 | #[derive(Clone, PartialEq, ::prost::Message)] 17 | pub struct MustWinBlockParams { 18 | #[prost(uint64, tag = "1")] 19 | pub block: u64, 20 | } 21 | #[allow(clippy::derive_partial_eq_without_eq)] 22 | #[derive(Clone, PartialEq, ::prost::Message)] 23 | pub struct UpdateNewBidParams { 24 | #[prost(double, tag = "1")] 25 | pub seen_time: f64, 26 | #[prost(string, tag = "2")] 27 | pub publisher_name: ::prost::alloc::string::String, 28 | #[prost(enumeration = "PublisherType", tag = "3")] 29 | pub publisher_type: i32, 30 | #[prost(double, optional, tag = "4")] 31 | pub relay_time: ::core::option::Option, 32 | #[prost(string, tag = "5")] 33 | pub relay_name: ::prost::alloc::string::String, 34 | /// Array of 32 bytes 35 | #[prost(bytes = "vec", tag = "6")] 36 | pub block_hash: ::prost::alloc::vec::Vec, 37 | /// Array of 32 bytes 38 | #[prost(bytes = "vec", tag = "7")] 39 | pub parent_hash: ::prost::alloc::vec::Vec, 40 | /// Array of 4 uint64 41 | #[prost(uint64, repeated, tag = "8")] 42 | pub value: ::prost::alloc::vec::Vec, 43 | #[prost(uint64, tag = "9")] 44 | pub slot_number: u64, 45 | #[prost(uint64, tag = "10")] 46 | pub block_number: u64, 47 | /// Array of 0 or 48 bytes 48 | #[prost(bytes = "vec", tag = "11")] 49 | pub builder_pubkey: ::prost::alloc::vec::Vec, 50 | #[prost(string, optional, tag = "12")] 51 | pub extra_data: ::core::option::Option<::prost::alloc::string::String>, 52 | /// Array of 0 or 20 bytes 53 | #[prost(bytes = "vec", tag = "13")] 54 | pub fee_recipient: ::prost::alloc::vec::Vec, 55 | /// Array of 0 or 20 bytes 56 | #[prost(bytes = "vec", tag = "14")] 57 | pub proposer_fee_recipient: ::prost::alloc::vec::Vec, 58 | #[prost(uint64, optional, tag = "15")] 59 | pub gas_used: ::core::option::Option, 60 | #[prost(bool, optional, tag = "16")] 61 | pub optimistic_submission: ::core::option::Option, 62 | /// For metrics 63 | #[prost(uint64, tag = "17")] 64 | pub creation_time_us: u64, 65 | } 66 | #[allow(clippy::derive_partial_eq_without_eq)] 67 | #[derive(Clone, PartialEq, ::prost::Message)] 68 | pub struct NewBlockParams { 69 | #[prost(uint64, tag = "1")] 70 | pub session_id: u64, 71 | /// Array of 4 uint64 72 | #[prost(uint64, repeated, tag = "2")] 73 | pub true_block_value: ::prost::alloc::vec::Vec, 74 | #[prost(bool, tag = "3")] 75 | pub can_add_payout_tx: bool, 76 | #[prost(uint64, tag = "4")] 77 | pub block_id: u64, 78 | /// For metrics 79 | #[prost(uint64, tag = "5")] 80 | pub creation_time_us: u64, 81 | } 82 | #[allow(clippy::derive_partial_eq_without_eq)] 83 | #[derive(Clone, PartialEq, ::prost::Message)] 84 | pub struct DestroySlotBidderParams { 85 | #[prost(uint64, tag = "1")] 86 | pub session_id: u64, 87 | } 88 | #[allow(clippy::derive_partial_eq_without_eq)] 89 | #[derive(Clone, PartialEq, ::prost::Message)] 90 | pub struct CreateSlotBidderParams { 91 | #[prost(uint64, tag = "1")] 92 | pub block: u64, 93 | #[prost(uint64, tag = "2")] 94 | pub slot: u64, 95 | /// Array of 32 bytes 96 | #[prost(bytes = "vec", tag = "3")] 97 | pub parent_hash: ::prost::alloc::vec::Vec, 98 | /// Id identifying the session. Used in all following calls. 99 | #[prost(uint64, tag = "4")] 100 | pub session_id: u64, 101 | /// unix ts 102 | #[prost(int64, tag = "5")] 103 | pub slot_timestamp: i64, 104 | } 105 | /// Info about a onchain block from reth. 106 | #[allow(clippy::derive_partial_eq_without_eq)] 107 | #[derive(Clone, PartialEq, ::prost::Message)] 108 | pub struct LandedBlockInfo { 109 | #[prost(uint64, tag = "1")] 110 | pub block_number: u64, 111 | #[prost(int64, tag = "2")] 112 | pub block_timestamp: i64, 113 | /// Array of 4 uint64 114 | #[prost(uint64, repeated, tag = "3")] 115 | pub builder_balance: ::prost::alloc::vec::Vec, 116 | /// true -> we landed this block. 117 | /// If false we could have landed it in coinbase == fee recipient mode but balance wouldn't change so we don't care. 118 | #[prost(bool, tag = "4")] 119 | pub beneficiary_is_builder: bool, 120 | } 121 | #[allow(clippy::derive_partial_eq_without_eq)] 122 | #[derive(Clone, PartialEq, ::prost::Message)] 123 | pub struct LandedBlocksParams { 124 | /// Added field name 125 | #[prost(message, repeated, tag = "1")] 126 | pub landed_block_info: ::prost::alloc::vec::Vec, 127 | } 128 | #[allow(clippy::derive_partial_eq_without_eq)] 129 | #[derive(Clone, PartialEq, ::prost::Message)] 130 | pub struct Bid { 131 | /// Optional implicitly by allowing empty 132 | /// 133 | /// Array of 4 uint64 134 | #[prost(uint64, repeated, tag = "1")] 135 | pub payout_tx_value: ::prost::alloc::vec::Vec, 136 | #[prost(uint64, tag = "2")] 137 | pub block_id: u64, 138 | /// Optional implicitly by allowing empty 139 | /// 140 | /// Array of 4 uint64 141 | #[prost(uint64, repeated, tag = "3")] 142 | pub seen_competition_bid: ::prost::alloc::vec::Vec, 143 | #[prost(uint64, optional, tag = "4")] 144 | pub trigger_creation_time_us: ::core::option::Option, 145 | } 146 | /// Exactly 1 member will be not null. 147 | /// Since this is not mapped to an enum we must be careful to manually update BiddingServiceClientAdapter. 148 | #[allow(clippy::derive_partial_eq_without_eq)] 149 | #[derive(Clone, PartialEq, ::prost::Message)] 150 | pub struct Callback { 151 | #[prost(message, optional, tag = "1")] 152 | pub bid: ::core::option::Option, 153 | #[prost(bool, optional, tag = "2")] 154 | pub can_use_suggested_fee_recipient_as_coinbase_change: ::core::option::Option, 155 | } 156 | #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] 157 | #[repr(i32)] 158 | pub enum PublisherType { 159 | RelayBids = 0, 160 | RelayHeaders = 1, 161 | UltrasoundWs = 2, 162 | BloxrouteWs = 3, 163 | ExternalWs = 4, 164 | } 165 | impl PublisherType { 166 | /// String value of the enum field names used in the ProtoBuf definition. 167 | /// 168 | /// The values are not transformed in any way and thus are considered stable 169 | /// (if the ProtoBuf definition does not change) and safe for programmatic use. 170 | pub fn as_str_name(&self) -> &'static str { 171 | match self { 172 | PublisherType::RelayBids => "RelayBids", 173 | PublisherType::RelayHeaders => "RelayHeaders", 174 | PublisherType::UltrasoundWs => "UltrasoundWs", 175 | PublisherType::BloxrouteWs => "BloxrouteWs", 176 | PublisherType::ExternalWs => "ExternalWs", 177 | } 178 | } 179 | /// Creates an enum from field names used in the ProtoBuf definition. 180 | pub fn from_str_name(value: &str) -> ::core::option::Option { 181 | match value { 182 | "RelayBids" => Some(Self::RelayBids), 183 | "RelayHeaders" => Some(Self::RelayHeaders), 184 | "UltrasoundWs" => Some(Self::UltrasoundWs), 185 | "BloxrouteWs" => Some(Self::BloxrouteWs), 186 | "ExternalWs" => Some(Self::ExternalWs), 187 | _ => None, 188 | } 189 | } 190 | } 191 | /// Generated client implementations. 192 | pub mod bidding_service_client { 193 | #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] 194 | use tonic::codegen::*; 195 | use tonic::codegen::http::Uri; 196 | /// Protocol for the bidding service. It's used to marshal all the traits in src/block_descriptor_bidding/traits.rs 197 | /// Usage: 198 | /// The client connects to the server and calls Initialize, this call should create the real BiddingService on the server side. 199 | /// Before calling Initialize any other call will fail. Initialize can be called again to recreate the BiddingService (eg: rbuilder reconnection). 200 | /// After that, for each slot the client should call CreateSlotBidder to create the SlotBidder on the server side and DestroySlotBidder when the SlotBidder is not needed anymore. 201 | /// Other calls are almost 1 to 1 with the original traits but for SlotBidder calls block/slot are added to identify the SlotBidder. 202 | /// Notice that CreateSlotBidder returns a stream of Callback. This stream is used for 2 things: 203 | /// - Send back bids made by the SlotBidder. 204 | /// - Notify changes on the state of SlotBidder's can_use_suggested_fee_recipient_as_coinbase. We use this methodology instead of a 205 | /// forward RPC call since can_use_suggested_fee_recipient_as_coinbase almost does not change and we want to avoid innecesary RPC calls during block building. 206 | #[derive(Debug, Clone)] 207 | pub struct BiddingServiceClient { 208 | inner: tonic::client::Grpc, 209 | } 210 | impl BiddingServiceClient { 211 | /// Attempt to create a new client by connecting to a given endpoint. 212 | pub async fn connect(dst: D) -> Result 213 | where 214 | D: std::convert::TryInto, 215 | D::Error: Into, 216 | { 217 | let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; 218 | Ok(Self::new(conn)) 219 | } 220 | } 221 | impl BiddingServiceClient 222 | where 223 | T: tonic::client::GrpcService, 224 | T::Error: Into, 225 | T::ResponseBody: Body + Send + 'static, 226 | ::Error: Into + Send, 227 | { 228 | pub fn new(inner: T) -> Self { 229 | let inner = tonic::client::Grpc::new(inner); 230 | Self { inner } 231 | } 232 | pub fn with_origin(inner: T, origin: Uri) -> Self { 233 | let inner = tonic::client::Grpc::with_origin(inner, origin); 234 | Self { inner } 235 | } 236 | pub fn with_interceptor( 237 | inner: T, 238 | interceptor: F, 239 | ) -> BiddingServiceClient> 240 | where 241 | F: tonic::service::Interceptor, 242 | T::ResponseBody: Default, 243 | T: tonic::codegen::Service< 244 | http::Request, 245 | Response = http::Response< 246 | >::ResponseBody, 247 | >, 248 | >, 249 | , 251 | >>::Error: Into + Send + Sync, 252 | { 253 | BiddingServiceClient::new(InterceptedService::new(inner, interceptor)) 254 | } 255 | /// Compress requests with the given encoding. 256 | /// 257 | /// This requires the server to support it otherwise it might respond with an 258 | /// error. 259 | #[must_use] 260 | pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { 261 | self.inner = self.inner.send_compressed(encoding); 262 | self 263 | } 264 | /// Enable decompressing responses. 265 | #[must_use] 266 | pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { 267 | self.inner = self.inner.accept_compressed(encoding); 268 | self 269 | } 270 | /// Call after connection before calling anything. This will really create the BiddingService on the server side. 271 | /// Returns the version info for the server side. 272 | pub async fn initialize( 273 | &mut self, 274 | request: impl tonic::IntoRequest, 275 | ) -> Result, tonic::Status> { 276 | self.inner 277 | .ready() 278 | .await 279 | .map_err(|e| { 280 | tonic::Status::new( 281 | tonic::Code::Unknown, 282 | format!("Service was not ready: {}", e.into()), 283 | ) 284 | })?; 285 | let codec = tonic::codec::ProstCodec::default(); 286 | let path = http::uri::PathAndQuery::from_static( 287 | "/bidding_service.BiddingService/Initialize", 288 | ); 289 | self.inner.unary(request.into_request(), path, codec).await 290 | } 291 | /// BiddingService 292 | pub async fn create_slot_bidder( 293 | &mut self, 294 | request: impl tonic::IntoRequest, 295 | ) -> Result< 296 | tonic::Response>, 297 | tonic::Status, 298 | > { 299 | self.inner 300 | .ready() 301 | .await 302 | .map_err(|e| { 303 | tonic::Status::new( 304 | tonic::Code::Unknown, 305 | format!("Service was not ready: {}", e.into()), 306 | ) 307 | })?; 308 | let codec = tonic::codec::ProstCodec::default(); 309 | let path = http::uri::PathAndQuery::from_static( 310 | "/bidding_service.BiddingService/CreateSlotBidder", 311 | ); 312 | self.inner.server_streaming(request.into_request(), path, codec).await 313 | } 314 | pub async fn destroy_slot_bidder( 315 | &mut self, 316 | request: impl tonic::IntoRequest, 317 | ) -> Result, tonic::Status> { 318 | self.inner 319 | .ready() 320 | .await 321 | .map_err(|e| { 322 | tonic::Status::new( 323 | tonic::Code::Unknown, 324 | format!("Service was not ready: {}", e.into()), 325 | ) 326 | })?; 327 | let codec = tonic::codec::ProstCodec::default(); 328 | let path = http::uri::PathAndQuery::from_static( 329 | "/bidding_service.BiddingService/DestroySlotBidder", 330 | ); 331 | self.inner.unary(request.into_request(), path, codec).await 332 | } 333 | pub async fn must_win_block( 334 | &mut self, 335 | request: impl tonic::IntoRequest, 336 | ) -> Result, tonic::Status> { 337 | self.inner 338 | .ready() 339 | .await 340 | .map_err(|e| { 341 | tonic::Status::new( 342 | tonic::Code::Unknown, 343 | format!("Service was not ready: {}", e.into()), 344 | ) 345 | })?; 346 | let codec = tonic::codec::ProstCodec::default(); 347 | let path = http::uri::PathAndQuery::from_static( 348 | "/bidding_service.BiddingService/MustWinBlock", 349 | ); 350 | self.inner.unary(request.into_request(), path, codec).await 351 | } 352 | pub async fn update_new_landed_blocks_detected( 353 | &mut self, 354 | request: impl tonic::IntoRequest, 355 | ) -> Result, tonic::Status> { 356 | self.inner 357 | .ready() 358 | .await 359 | .map_err(|e| { 360 | tonic::Status::new( 361 | tonic::Code::Unknown, 362 | format!("Service was not ready: {}", e.into()), 363 | ) 364 | })?; 365 | let codec = tonic::codec::ProstCodec::default(); 366 | let path = http::uri::PathAndQuery::from_static( 367 | "/bidding_service.BiddingService/UpdateNewLandedBlocksDetected", 368 | ); 369 | self.inner.unary(request.into_request(), path, codec).await 370 | } 371 | pub async fn update_failed_reading_new_landed_blocks( 372 | &mut self, 373 | request: impl tonic::IntoRequest, 374 | ) -> Result, tonic::Status> { 375 | self.inner 376 | .ready() 377 | .await 378 | .map_err(|e| { 379 | tonic::Status::new( 380 | tonic::Code::Unknown, 381 | format!("Service was not ready: {}", e.into()), 382 | ) 383 | })?; 384 | let codec = tonic::codec::ProstCodec::default(); 385 | let path = http::uri::PathAndQuery::from_static( 386 | "/bidding_service.BiddingService/UpdateFailedReadingNewLandedBlocks", 387 | ); 388 | self.inner.unary(request.into_request(), path, codec).await 389 | } 390 | /// BiddingService->BlockBidWithStatsObs 391 | pub async fn update_new_bid( 392 | &mut self, 393 | request: impl tonic::IntoRequest, 394 | ) -> Result, tonic::Status> { 395 | self.inner 396 | .ready() 397 | .await 398 | .map_err(|e| { 399 | tonic::Status::new( 400 | tonic::Code::Unknown, 401 | format!("Service was not ready: {}", e.into()), 402 | ) 403 | })?; 404 | let codec = tonic::codec::ProstCodec::default(); 405 | let path = http::uri::PathAndQuery::from_static( 406 | "/bidding_service.BiddingService/UpdateNewBid", 407 | ); 408 | self.inner.unary(request.into_request(), path, codec).await 409 | } 410 | /// UnfinishedBlockBuildingSink 411 | pub async fn new_block( 412 | &mut self, 413 | request: impl tonic::IntoRequest, 414 | ) -> Result, tonic::Status> { 415 | self.inner 416 | .ready() 417 | .await 418 | .map_err(|e| { 419 | tonic::Status::new( 420 | tonic::Code::Unknown, 421 | format!("Service was not ready: {}", e.into()), 422 | ) 423 | })?; 424 | let codec = tonic::codec::ProstCodec::default(); 425 | let path = http::uri::PathAndQuery::from_static( 426 | "/bidding_service.BiddingService/NewBlock", 427 | ); 428 | self.inner.unary(request.into_request(), path, codec).await 429 | } 430 | } 431 | } 432 | /// Generated server implementations. 433 | pub mod bidding_service_server { 434 | #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] 435 | use tonic::codegen::*; 436 | /// Generated trait containing gRPC methods that should be implemented for use with BiddingServiceServer. 437 | #[async_trait] 438 | pub trait BiddingService: Send + Sync + 'static { 439 | /// Call after connection before calling anything. This will really create the BiddingService on the server side. 440 | /// Returns the version info for the server side. 441 | async fn initialize( 442 | &self, 443 | request: tonic::Request, 444 | ) -> Result, tonic::Status>; 445 | /// Server streaming response type for the CreateSlotBidder method. 446 | type CreateSlotBidderStream: futures_core::Stream< 447 | Item = Result, 448 | > 449 | + Send 450 | + 'static; 451 | /// BiddingService 452 | async fn create_slot_bidder( 453 | &self, 454 | request: tonic::Request, 455 | ) -> Result, tonic::Status>; 456 | async fn destroy_slot_bidder( 457 | &self, 458 | request: tonic::Request, 459 | ) -> Result, tonic::Status>; 460 | async fn must_win_block( 461 | &self, 462 | request: tonic::Request, 463 | ) -> Result, tonic::Status>; 464 | async fn update_new_landed_blocks_detected( 465 | &self, 466 | request: tonic::Request, 467 | ) -> Result, tonic::Status>; 468 | async fn update_failed_reading_new_landed_blocks( 469 | &self, 470 | request: tonic::Request, 471 | ) -> Result, tonic::Status>; 472 | /// BiddingService->BlockBidWithStatsObs 473 | async fn update_new_bid( 474 | &self, 475 | request: tonic::Request, 476 | ) -> Result, tonic::Status>; 477 | /// UnfinishedBlockBuildingSink 478 | async fn new_block( 479 | &self, 480 | request: tonic::Request, 481 | ) -> Result, tonic::Status>; 482 | } 483 | /// Protocol for the bidding service. It's used to marshal all the traits in src/block_descriptor_bidding/traits.rs 484 | /// Usage: 485 | /// The client connects to the server and calls Initialize, this call should create the real BiddingService on the server side. 486 | /// Before calling Initialize any other call will fail. Initialize can be called again to recreate the BiddingService (eg: rbuilder reconnection). 487 | /// After that, for each slot the client should call CreateSlotBidder to create the SlotBidder on the server side and DestroySlotBidder when the SlotBidder is not needed anymore. 488 | /// Other calls are almost 1 to 1 with the original traits but for SlotBidder calls block/slot are added to identify the SlotBidder. 489 | /// Notice that CreateSlotBidder returns a stream of Callback. This stream is used for 2 things: 490 | /// - Send back bids made by the SlotBidder. 491 | /// - Notify changes on the state of SlotBidder's can_use_suggested_fee_recipient_as_coinbase. We use this methodology instead of a 492 | /// forward RPC call since can_use_suggested_fee_recipient_as_coinbase almost does not change and we want to avoid innecesary RPC calls during block building. 493 | #[derive(Debug)] 494 | pub struct BiddingServiceServer { 495 | inner: _Inner, 496 | accept_compression_encodings: EnabledCompressionEncodings, 497 | send_compression_encodings: EnabledCompressionEncodings, 498 | } 499 | struct _Inner(Arc); 500 | impl BiddingServiceServer { 501 | pub fn new(inner: T) -> Self { 502 | Self::from_arc(Arc::new(inner)) 503 | } 504 | pub fn from_arc(inner: Arc) -> Self { 505 | let inner = _Inner(inner); 506 | Self { 507 | inner, 508 | accept_compression_encodings: Default::default(), 509 | send_compression_encodings: Default::default(), 510 | } 511 | } 512 | pub fn with_interceptor( 513 | inner: T, 514 | interceptor: F, 515 | ) -> InterceptedService 516 | where 517 | F: tonic::service::Interceptor, 518 | { 519 | InterceptedService::new(Self::new(inner), interceptor) 520 | } 521 | /// Enable decompressing requests with the given encoding. 522 | #[must_use] 523 | pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { 524 | self.accept_compression_encodings.enable(encoding); 525 | self 526 | } 527 | /// Compress responses with the given encoding, if the client supports it. 528 | #[must_use] 529 | pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { 530 | self.send_compression_encodings.enable(encoding); 531 | self 532 | } 533 | } 534 | impl tonic::codegen::Service> for BiddingServiceServer 535 | where 536 | T: BiddingService, 537 | B: Body + Send + 'static, 538 | B::Error: Into + Send + 'static, 539 | { 540 | type Response = http::Response; 541 | type Error = std::convert::Infallible; 542 | type Future = BoxFuture; 543 | fn poll_ready( 544 | &mut self, 545 | _cx: &mut Context<'_>, 546 | ) -> Poll> { 547 | Poll::Ready(Ok(())) 548 | } 549 | fn call(&mut self, req: http::Request) -> Self::Future { 550 | let inner = self.inner.clone(); 551 | match req.uri().path() { 552 | "/bidding_service.BiddingService/Initialize" => { 553 | #[allow(non_camel_case_types)] 554 | struct InitializeSvc(pub Arc); 555 | impl< 556 | T: BiddingService, 557 | > tonic::server::UnaryService 558 | for InitializeSvc { 559 | type Response = super::BidderVersionInfo; 560 | type Future = BoxFuture< 561 | tonic::Response, 562 | tonic::Status, 563 | >; 564 | fn call( 565 | &mut self, 566 | request: tonic::Request, 567 | ) -> Self::Future { 568 | let inner = self.0.clone(); 569 | let fut = async move { (*inner).initialize(request).await }; 570 | Box::pin(fut) 571 | } 572 | } 573 | let accept_compression_encodings = self.accept_compression_encodings; 574 | let send_compression_encodings = self.send_compression_encodings; 575 | let inner = self.inner.clone(); 576 | let fut = async move { 577 | let inner = inner.0; 578 | let method = InitializeSvc(inner); 579 | let codec = tonic::codec::ProstCodec::default(); 580 | let mut grpc = tonic::server::Grpc::new(codec) 581 | .apply_compression_config( 582 | accept_compression_encodings, 583 | send_compression_encodings, 584 | ); 585 | let res = grpc.unary(method, req).await; 586 | Ok(res) 587 | }; 588 | Box::pin(fut) 589 | } 590 | "/bidding_service.BiddingService/CreateSlotBidder" => { 591 | #[allow(non_camel_case_types)] 592 | struct CreateSlotBidderSvc(pub Arc); 593 | impl< 594 | T: BiddingService, 595 | > tonic::server::ServerStreamingService< 596 | super::CreateSlotBidderParams, 597 | > for CreateSlotBidderSvc { 598 | type Response = super::Callback; 599 | type ResponseStream = T::CreateSlotBidderStream; 600 | type Future = BoxFuture< 601 | tonic::Response, 602 | tonic::Status, 603 | >; 604 | fn call( 605 | &mut self, 606 | request: tonic::Request, 607 | ) -> Self::Future { 608 | let inner = self.0.clone(); 609 | let fut = async move { 610 | (*inner).create_slot_bidder(request).await 611 | }; 612 | Box::pin(fut) 613 | } 614 | } 615 | let accept_compression_encodings = self.accept_compression_encodings; 616 | let send_compression_encodings = self.send_compression_encodings; 617 | let inner = self.inner.clone(); 618 | let fut = async move { 619 | let inner = inner.0; 620 | let method = CreateSlotBidderSvc(inner); 621 | let codec = tonic::codec::ProstCodec::default(); 622 | let mut grpc = tonic::server::Grpc::new(codec) 623 | .apply_compression_config( 624 | accept_compression_encodings, 625 | send_compression_encodings, 626 | ); 627 | let res = grpc.server_streaming(method, req).await; 628 | Ok(res) 629 | }; 630 | Box::pin(fut) 631 | } 632 | "/bidding_service.BiddingService/DestroySlotBidder" => { 633 | #[allow(non_camel_case_types)] 634 | struct DestroySlotBidderSvc(pub Arc); 635 | impl< 636 | T: BiddingService, 637 | > tonic::server::UnaryService 638 | for DestroySlotBidderSvc { 639 | type Response = super::Empty; 640 | type Future = BoxFuture< 641 | tonic::Response, 642 | tonic::Status, 643 | >; 644 | fn call( 645 | &mut self, 646 | request: tonic::Request, 647 | ) -> Self::Future { 648 | let inner = self.0.clone(); 649 | let fut = async move { 650 | (*inner).destroy_slot_bidder(request).await 651 | }; 652 | Box::pin(fut) 653 | } 654 | } 655 | let accept_compression_encodings = self.accept_compression_encodings; 656 | let send_compression_encodings = self.send_compression_encodings; 657 | let inner = self.inner.clone(); 658 | let fut = async move { 659 | let inner = inner.0; 660 | let method = DestroySlotBidderSvc(inner); 661 | let codec = tonic::codec::ProstCodec::default(); 662 | let mut grpc = tonic::server::Grpc::new(codec) 663 | .apply_compression_config( 664 | accept_compression_encodings, 665 | send_compression_encodings, 666 | ); 667 | let res = grpc.unary(method, req).await; 668 | Ok(res) 669 | }; 670 | Box::pin(fut) 671 | } 672 | "/bidding_service.BiddingService/MustWinBlock" => { 673 | #[allow(non_camel_case_types)] 674 | struct MustWinBlockSvc(pub Arc); 675 | impl< 676 | T: BiddingService, 677 | > tonic::server::UnaryService 678 | for MustWinBlockSvc { 679 | type Response = super::Empty; 680 | type Future = BoxFuture< 681 | tonic::Response, 682 | tonic::Status, 683 | >; 684 | fn call( 685 | &mut self, 686 | request: tonic::Request, 687 | ) -> Self::Future { 688 | let inner = self.0.clone(); 689 | let fut = async move { 690 | (*inner).must_win_block(request).await 691 | }; 692 | Box::pin(fut) 693 | } 694 | } 695 | let accept_compression_encodings = self.accept_compression_encodings; 696 | let send_compression_encodings = self.send_compression_encodings; 697 | let inner = self.inner.clone(); 698 | let fut = async move { 699 | let inner = inner.0; 700 | let method = MustWinBlockSvc(inner); 701 | let codec = tonic::codec::ProstCodec::default(); 702 | let mut grpc = tonic::server::Grpc::new(codec) 703 | .apply_compression_config( 704 | accept_compression_encodings, 705 | send_compression_encodings, 706 | ); 707 | let res = grpc.unary(method, req).await; 708 | Ok(res) 709 | }; 710 | Box::pin(fut) 711 | } 712 | "/bidding_service.BiddingService/UpdateNewLandedBlocksDetected" => { 713 | #[allow(non_camel_case_types)] 714 | struct UpdateNewLandedBlocksDetectedSvc( 715 | pub Arc, 716 | ); 717 | impl< 718 | T: BiddingService, 719 | > tonic::server::UnaryService 720 | for UpdateNewLandedBlocksDetectedSvc { 721 | type Response = super::Empty; 722 | type Future = BoxFuture< 723 | tonic::Response, 724 | tonic::Status, 725 | >; 726 | fn call( 727 | &mut self, 728 | request: tonic::Request, 729 | ) -> Self::Future { 730 | let inner = self.0.clone(); 731 | let fut = async move { 732 | (*inner).update_new_landed_blocks_detected(request).await 733 | }; 734 | Box::pin(fut) 735 | } 736 | } 737 | let accept_compression_encodings = self.accept_compression_encodings; 738 | let send_compression_encodings = self.send_compression_encodings; 739 | let inner = self.inner.clone(); 740 | let fut = async move { 741 | let inner = inner.0; 742 | let method = UpdateNewLandedBlocksDetectedSvc(inner); 743 | let codec = tonic::codec::ProstCodec::default(); 744 | let mut grpc = tonic::server::Grpc::new(codec) 745 | .apply_compression_config( 746 | accept_compression_encodings, 747 | send_compression_encodings, 748 | ); 749 | let res = grpc.unary(method, req).await; 750 | Ok(res) 751 | }; 752 | Box::pin(fut) 753 | } 754 | "/bidding_service.BiddingService/UpdateFailedReadingNewLandedBlocks" => { 755 | #[allow(non_camel_case_types)] 756 | struct UpdateFailedReadingNewLandedBlocksSvc( 757 | pub Arc, 758 | ); 759 | impl tonic::server::UnaryService 760 | for UpdateFailedReadingNewLandedBlocksSvc { 761 | type Response = super::Empty; 762 | type Future = BoxFuture< 763 | tonic::Response, 764 | tonic::Status, 765 | >; 766 | fn call( 767 | &mut self, 768 | request: tonic::Request, 769 | ) -> Self::Future { 770 | let inner = self.0.clone(); 771 | let fut = async move { 772 | (*inner) 773 | .update_failed_reading_new_landed_blocks(request) 774 | .await 775 | }; 776 | Box::pin(fut) 777 | } 778 | } 779 | let accept_compression_encodings = self.accept_compression_encodings; 780 | let send_compression_encodings = self.send_compression_encodings; 781 | let inner = self.inner.clone(); 782 | let fut = async move { 783 | let inner = inner.0; 784 | let method = UpdateFailedReadingNewLandedBlocksSvc(inner); 785 | let codec = tonic::codec::ProstCodec::default(); 786 | let mut grpc = tonic::server::Grpc::new(codec) 787 | .apply_compression_config( 788 | accept_compression_encodings, 789 | send_compression_encodings, 790 | ); 791 | let res = grpc.unary(method, req).await; 792 | Ok(res) 793 | }; 794 | Box::pin(fut) 795 | } 796 | "/bidding_service.BiddingService/UpdateNewBid" => { 797 | #[allow(non_camel_case_types)] 798 | struct UpdateNewBidSvc(pub Arc); 799 | impl< 800 | T: BiddingService, 801 | > tonic::server::UnaryService 802 | for UpdateNewBidSvc { 803 | type Response = super::Empty; 804 | type Future = BoxFuture< 805 | tonic::Response, 806 | tonic::Status, 807 | >; 808 | fn call( 809 | &mut self, 810 | request: tonic::Request, 811 | ) -> Self::Future { 812 | let inner = self.0.clone(); 813 | let fut = async move { 814 | (*inner).update_new_bid(request).await 815 | }; 816 | Box::pin(fut) 817 | } 818 | } 819 | let accept_compression_encodings = self.accept_compression_encodings; 820 | let send_compression_encodings = self.send_compression_encodings; 821 | let inner = self.inner.clone(); 822 | let fut = async move { 823 | let inner = inner.0; 824 | let method = UpdateNewBidSvc(inner); 825 | let codec = tonic::codec::ProstCodec::default(); 826 | let mut grpc = tonic::server::Grpc::new(codec) 827 | .apply_compression_config( 828 | accept_compression_encodings, 829 | send_compression_encodings, 830 | ); 831 | let res = grpc.unary(method, req).await; 832 | Ok(res) 833 | }; 834 | Box::pin(fut) 835 | } 836 | "/bidding_service.BiddingService/NewBlock" => { 837 | #[allow(non_camel_case_types)] 838 | struct NewBlockSvc(pub Arc); 839 | impl< 840 | T: BiddingService, 841 | > tonic::server::UnaryService 842 | for NewBlockSvc { 843 | type Response = super::Empty; 844 | type Future = BoxFuture< 845 | tonic::Response, 846 | tonic::Status, 847 | >; 848 | fn call( 849 | &mut self, 850 | request: tonic::Request, 851 | ) -> Self::Future { 852 | let inner = self.0.clone(); 853 | let fut = async move { (*inner).new_block(request).await }; 854 | Box::pin(fut) 855 | } 856 | } 857 | let accept_compression_encodings = self.accept_compression_encodings; 858 | let send_compression_encodings = self.send_compression_encodings; 859 | let inner = self.inner.clone(); 860 | let fut = async move { 861 | let inner = inner.0; 862 | let method = NewBlockSvc(inner); 863 | let codec = tonic::codec::ProstCodec::default(); 864 | let mut grpc = tonic::server::Grpc::new(codec) 865 | .apply_compression_config( 866 | accept_compression_encodings, 867 | send_compression_encodings, 868 | ); 869 | let res = grpc.unary(method, req).await; 870 | Ok(res) 871 | }; 872 | Box::pin(fut) 873 | } 874 | _ => { 875 | Box::pin(async move { 876 | Ok( 877 | http::Response::builder() 878 | .status(200) 879 | .header("grpc-status", "12") 880 | .header("content-type", "application/grpc") 881 | .body(empty_body()) 882 | .unwrap(), 883 | ) 884 | }) 885 | } 886 | } 887 | } 888 | } 889 | impl Clone for BiddingServiceServer { 890 | fn clone(&self) -> Self { 891 | let inner = self.inner.clone(); 892 | Self { 893 | inner, 894 | accept_compression_encodings: self.accept_compression_encodings, 895 | send_compression_encodings: self.send_compression_encodings, 896 | } 897 | } 898 | } 899 | impl Clone for _Inner { 900 | fn clone(&self) -> Self { 901 | Self(self.0.clone()) 902 | } 903 | } 904 | impl std::fmt::Debug for _Inner { 905 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 906 | write!(f, "{:?}", self.0) 907 | } 908 | } 909 | impl tonic::server::NamedService for BiddingServiceServer { 910 | const NAME: &'static str = "bidding_service.BiddingService"; 911 | } 912 | } 913 | --------------------------------------------------------------------------------