├── docker ├── .gitignore ├── grafana │ └── provisioning │ │ ├── dashboards │ │ └── dashboard.yml │ │ └── datasources │ │ └── datasource.yml ├── prometheus.yml ├── start-op-erigon.sh ├── README.md ├── start-magi.sh ├── start-op-challenger.sh ├── start-op-geth.sh ├── .env.default └── docker-compose.yml ├── .dockerignore ├── .gitignore ├── src ├── derive │ ├── purgeable.rs │ ├── stages │ │ ├── mod.rs │ │ ├── block_input.rs │ │ ├── single_batch.rs │ │ └── batcher_transactions.rs │ ├── state.rs │ └── mod.rs ├── network │ ├── mod.rs │ ├── handlers │ │ └── mod.rs │ └── service │ │ ├── types.rs │ │ ├── discovery.rs │ │ └── mod.rs ├── l1 │ ├── mod.rs │ ├── l1_info.rs │ ├── config_updates.rs │ ├── blob_encoding.rs │ └── blob_fetcher.rs ├── telemetry │ ├── metrics.rs │ ├── mod.rs │ └── logging.rs ├── engine │ ├── types.rs │ ├── mod.rs │ ├── mock_engine.rs │ ├── fork.rs │ ├── traits.rs │ ├── auth.rs │ ├── payload.rs │ └── api.rs ├── version │ └── mod.rs ├── lib.rs ├── common │ ├── mod.rs │ └── attributes_deposited.rs ├── driver │ ├── info.rs │ └── engine_driver.rs ├── rpc │ └── mod.rs └── runner │ └── mod.rs ├── platform.sh ├── bin ├── network.rs └── magi.rs ├── Dockerfile ├── .github └── workflows │ ├── publish-docker.yml │ └── test.yml ├── Justfile ├── Cargo.toml ├── README.md └── docs ├── devnet.md └── architecture.md /docker/.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .git/ 2 | .github/ 3 | .gitignore 4 | target/ 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **target 2 | 3 | **.env 4 | 5 | **.DS_Store 6 | 7 | .platform 8 | .compiler 9 | .rustflags 10 | -------------------------------------------------------------------------------- /src/derive/purgeable.rs: -------------------------------------------------------------------------------- 1 | /// Iterator that can purge itself 2 | pub trait PurgeableIterator: Iterator { 3 | /// Purges and resets an iterator 4 | fn purge(&mut self); 5 | } 6 | -------------------------------------------------------------------------------- /src/network/mod.rs: -------------------------------------------------------------------------------- 1 | /// A module for managing incoming p2p gossip messages 2 | pub mod handlers; 3 | /// A module for managing the Discv5 discovery & libp2p services 4 | pub mod service; 5 | -------------------------------------------------------------------------------- /docker/grafana/provisioning/dashboards/dashboard.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | - name: 'Dashboards' 5 | orgId: 1 6 | folder: '' 7 | type: file 8 | disableDeletion: false 9 | editable: true 10 | options: 11 | path: /etc/grafana/provisioning/dashboards 12 | -------------------------------------------------------------------------------- /docker/grafana/provisioning/datasources/datasource.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | deleteDatasources: 4 | - name: 'Prometheus' 5 | 6 | datasources: 7 | - access: 'proxy' 8 | editable: true 9 | is_default: true 10 | name: 'Prometheus' 11 | uid: 'hFwPqYB4z' 12 | org_id: 1 13 | type: 'prometheus' 14 | url: 'http://prometheus:9090' 15 | version: 1 16 | -------------------------------------------------------------------------------- /docker/prometheus.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: node-exporter 3 | scrape_interval: 5s 4 | static_configs: 5 | - targets: 6 | - node-exporter:9100 7 | 8 | - job_name: magi 9 | scrape_interval: 5s 10 | static_configs: 11 | - targets: 12 | - magi:9200 13 | 14 | - job_name: op-challenger 15 | scrape_interval: 5s 16 | static_configs: 17 | - targets: 18 | - op-challenger:9201 19 | -------------------------------------------------------------------------------- /platform.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | case $TARGETARCH in 4 | "amd64") 5 | echo "x86_64-unknown-linux-musl" > /.platform 6 | echo "musl-tools gcc-x86-64-linux-gnu" > /.compiler 7 | echo "-C linker=x86_64-linux-gnu-gcc" > /.rustflags 8 | ;; 9 | "arm64") 10 | echo "aarch64-unknown-linux-gnu" > /.platform 11 | echo "gcc-aarch64-linux-gnu" > /.compiler 12 | echo "" > /.rustflags 13 | ;; 14 | esac 15 | -------------------------------------------------------------------------------- /docker/start-op-erigon.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | DATADIR=/data/erigon 5 | 6 | echo $JWT_SECRET > jwtsecret.txt 7 | 8 | exec erigon \ 9 | --datadir=$DATADIR \ 10 | --private.api.addr=localhost:9090 \ 11 | --http.addr=0.0.0.0 \ 12 | --http.port=8545 \ 13 | --http.corsdomain="*" \ 14 | --http.vhosts="*" \ 15 | --authrpc.addr=0.0.0.0 \ 16 | --authrpc.port=8551 \ 17 | --authrpc.vhosts="*" \ 18 | --authrpc.jwtsecret=/jwtsecret.txt \ 19 | --rollup.sequencerhttp="https://sepolia.optimism.io" \ 20 | -------------------------------------------------------------------------------- /src/derive/stages/mod.rs: -------------------------------------------------------------------------------- 1 | /// A module to handle the payload attributes derivation stage. 2 | pub mod attributes; 3 | 4 | /// A module to handle batcher transactions and frames. 5 | pub mod batcher_transactions; 6 | 7 | /// A module to handle processing of a Batch. 8 | pub mod batches; 9 | 10 | /// A module to handle building a Block Input. 11 | mod block_input; 12 | 13 | /// A module to handle the channel bank derivation stage. 14 | pub mod channels; 15 | 16 | /// A module to handle processing of a Single Batch. 17 | mod single_batch; 18 | 19 | /// A module to handle processing of a Span Batch. 20 | mod span_batch; 21 | -------------------------------------------------------------------------------- /src/network/handlers/mod.rs: -------------------------------------------------------------------------------- 1 | use libp2p::gossipsub::{Message, MessageAcceptance, TopicHash}; 2 | 3 | /// A module for managing incoming p2p gossip messages 4 | pub mod block_handler; 5 | 6 | /// This trait defines the functionality required to process incoming messages 7 | /// and determine their acceptance within the network. Implementors of this trait 8 | /// can specify how messages are handled and which topics they are interested in. 9 | pub trait Handler: Send { 10 | /// Manages validation and further processing of messages 11 | fn handle(&self, msg: Message) -> MessageAcceptance; 12 | /// Specifies which topics the handler is interested in 13 | fn topics(&self) -> Vec; 14 | } 15 | -------------------------------------------------------------------------------- /src/l1/mod.rs: -------------------------------------------------------------------------------- 1 | /// Module reposnsible for listening to the L1 chain and monitoring for new 2 | /// blocks and events. 3 | pub mod chain_watcher; 4 | pub use chain_watcher::{BlockUpdate, ChainWatcher}; 5 | 6 | /// module responsible for parsing logs to extract system config updates 7 | pub mod config_updates; 8 | pub use config_updates::SystemConfigUpdate; 9 | 10 | /// L1 block info 11 | pub mod l1_info; 12 | pub use l1_info::L1Info; 13 | 14 | /// Module responsible for extracting batcher transaction data from 15 | /// L1 batcher transaction data or blobs (after the Ecotone hardfork) 16 | pub mod blob_fetcher; 17 | pub use blob_fetcher::{BlobFetcher, BlobSidecar}; 18 | 19 | /// Helper module for decoding blob data 20 | pub mod blob_encoding; 21 | pub use blob_encoding::decode_blob_data; 22 | -------------------------------------------------------------------------------- /bin/network.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | use ethers::types::Address; 4 | use eyre::Result; 5 | 6 | use magi::{ 7 | network::{handlers::block_handler::BlockHandler, service::Service}, 8 | telemetry, 9 | }; 10 | use tokio::sync::watch; 11 | 12 | #[tokio::main] 13 | async fn main() -> Result<()> { 14 | let _guards = telemetry::init(false, None, None); 15 | 16 | let addr = "0.0.0.0:9876".parse()?; 17 | let chain_id = 420; 18 | let (_, recv) = watch::channel(Address::from_str( 19 | "0x715b7219d986641df9efd9c7ef01218d528e19ec", 20 | )?); 21 | let (block_handler, block_recv) = BlockHandler::new(chain_id, recv); 22 | 23 | Service::new(addr, chain_id) 24 | .add_handler(Box::new(block_handler)) 25 | .start()?; 26 | 27 | while let Ok(payload) = block_recv.recv() { 28 | tracing::info!("received unsafe block with hash: {:?}", payload.block_hash); 29 | } 30 | 31 | Ok(()) 32 | } 33 | -------------------------------------------------------------------------------- /src/telemetry/metrics.rs: -------------------------------------------------------------------------------- 1 | use eyre::{Result, WrapErr}; 2 | use lazy_static::lazy_static; 3 | use prometheus_exporter::{ 4 | prometheus::{register_int_gauge, IntGauge}, 5 | start, 6 | }; 7 | 8 | lazy_static! { 9 | /// Tracks the block number of the most recent finalized head. 10 | pub static ref FINALIZED_HEAD: IntGauge = 11 | register_int_gauge!("finalized_head", "finalized head number").unwrap(); 12 | /// Tracks the block number considered to be the safe head. 13 | pub static ref SAFE_HEAD: IntGauge = 14 | register_int_gauge!("safe_head", "safe head number").unwrap(); 15 | /// Monitors if the node is fully synced 16 | pub static ref SYNCED: IntGauge = register_int_gauge!("synced", "synced flag").unwrap(); 17 | } 18 | 19 | /// Starts the metrics server on port 9200 20 | pub fn init() -> Result<()> { 21 | start("0.0.0.0:9200".parse().wrap_err("Could not parse address")?)?; 22 | Ok(()) 23 | } 24 | -------------------------------------------------------------------------------- /docker/README.md: -------------------------------------------------------------------------------- 1 | ## Infra 2 | 3 | This contains a simple docker setup for running magi and op-geth. 4 | 5 | ## Running 6 | 7 | Begin by copying `.env.default` to `.env`. You can set the network to sync to by changing the `NETWORK` value (supported options are optimism-sepolia and base-sepolia). 8 | Make sure to set the `L1_RPC_URL` value to a valid RPC URL for the L1 being used by the given network. If you are running in production, you may also want to set a secure `JWT_SECRET` value. 9 | You can create a new secret by running `openssl rand -hex 32`. 10 | 11 | To run both magi and op-geth together, run `docker compose up`. To run just op-geth without magi for local development, run `COMPOSE_PROFILES=no-magi docker compose up` 12 | 13 | ## Troubleshooting 14 | 15 | If you are getting `permission denied` errors when attempting to run `docker-compose`, try `sudo docker compose` instead. This is often required when running docker depending on how it was installed. 16 | -------------------------------------------------------------------------------- /src/engine/types.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | /// The default engine api authentication port. 4 | pub const DEFAULT_AUTH_PORT: u16 = 8551; 5 | 6 | /// The ID of the static payload 7 | pub const STATIC_ID: u32 = 1; 8 | 9 | /// The json rpc version string 10 | pub const JSONRPC_VERSION: &str = "2.0"; 11 | 12 | /// The new payload method string 13 | pub const ENGINE_NEW_PAYLOAD_V2: &str = "engine_newPayloadV2"; 14 | 15 | /// The new payload timeout 16 | pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(8); 17 | 18 | /// The get payload method string 19 | pub const ENGINE_GET_PAYLOAD_V2: &str = "engine_getPayloadV2"; 20 | 21 | /// The get payload timeout 22 | pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); 23 | 24 | /// The forkchoice updated method string 25 | pub const ENGINE_FORKCHOICE_UPDATED_V2: &str = "engine_forkchoiceUpdatedV2"; 26 | 27 | /// The forkchoice updated timeout 28 | pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(8); 29 | -------------------------------------------------------------------------------- /src/telemetry/mod.rs: -------------------------------------------------------------------------------- 1 | #![deny(missing_debug_implementations)] 2 | 3 | //! Telemetry module 4 | //! 5 | //! This module encompasses telemetry for `magi`. 6 | //! Core components are described below. 7 | //! 8 | //! ### Logging 9 | //! 10 | //! Logging is constructed using the [tracing](https://crates.io/crates/tracing) crate. 11 | //! The `tracing` crate is a framework for instrumenting Rust programs to collect 12 | //! structured, event-based diagnostic information. You can use the [crate::telemetry::init] function 13 | //! to initialize a global logger, passing in a boolean `verbose` parameter. This function 14 | //! will return an error if a logger has already been initialized. 15 | //! 16 | 17 | /// The Logging Module 18 | pub mod logging; 19 | 20 | /// Prometheus metrics 21 | pub mod metrics; 22 | 23 | // Re-export inner modules 24 | pub use logging::*; 25 | 26 | /// Export a prelude to re-export common traits and types 27 | pub mod prelude { 28 | pub use super::*; 29 | pub use tracing::{debug, error, info, span, trace, warn, Level}; 30 | pub use tracing_subscriber::{fmt, prelude::*}; 31 | } 32 | -------------------------------------------------------------------------------- /src/engine/mod.rs: -------------------------------------------------------------------------------- 1 | #![warn(unreachable_pub)] 2 | #![deny(missing_docs, missing_debug_implementations)] 3 | 4 | /// Payload Types 5 | mod payload; 6 | pub use payload::*; 7 | 8 | /// Forkchoice Types 9 | mod fork; 10 | pub use fork::*; 11 | 12 | /// The Engine Drive 13 | mod api; 14 | pub use api::*; 15 | 16 | /// Auth module 17 | mod auth; 18 | pub use auth::*; 19 | 20 | /// Common Types 21 | mod types; 22 | pub use types::*; 23 | 24 | /// Core Trait 25 | mod traits; 26 | pub use traits::*; 27 | 28 | /// Mock Engine 29 | mod mock_engine; 30 | pub use mock_engine::*; 31 | 32 | #[cfg(test)] 33 | mod tests { 34 | use crate::engine::EngineApi; 35 | 36 | #[test] 37 | fn test_engine_api() { 38 | let jwt_secret = "bf549f5188556ce0951048ef467ec93067bc4ea21acebe46ef675cd4e8e015ff"; 39 | let url = "http://localhost:8551"; 40 | 41 | let engine_api = EngineApi::new(url, jwt_secret); 42 | 43 | let base_body = engine_api.base_body(); 44 | assert_eq!(base_body.get("jsonrpc").unwrap(), "2.0"); 45 | assert_eq!(base_body.get("id").unwrap(), 1); 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM --platform=$BUILDPLATFORM debian:bullseye-slim as base 2 | RUN apt update && apt install -y libudev-dev build-essential ca-certificates clang curl git libpq-dev libssl-dev pkg-config lsof lld 3 | RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y 4 | ENV PATH "$PATH:/root/.cargo/bin" 5 | 6 | FROM base as build 7 | WORKDIR /magi 8 | ARG TARGETARCH 9 | COPY ./platform.sh . 10 | RUN ./platform.sh 11 | RUN rustup target add $(cat /.platform) 12 | RUN apt-get update && apt-get install -y $(cat /.compiler) 13 | ENV CC_x86_64_unknown_linux_musl=x86_64-linux-gnu-gcc 14 | 15 | COPY Cargo.toml Cargo.lock ./ 16 | RUN mkdir -p src/bin && echo "fn main() {}" > src/bin/dummy.rs 17 | RUN RUSTFLAGS="$(cat /.rustflags)" cargo build --release --config net.git-fetch-with-cli=true --target $(cat /.platform) --bin dummy 18 | 19 | COPY ./ ./ 20 | RUN RUSTFLAGS="$(cat /.rustflags)" cargo build --release --config net.git-fetch-with-cli=true --target $(cat /.platform) 21 | RUN cp /magi/target/$(cat /.platform)/release/magi /magi/magi 22 | 23 | FROM debian:bullseye-slim 24 | RUN apt-get update && apt-get install -y libssl-dev ca-certificates && rm -rf /var/lib/apt/lists/* 25 | COPY --from=build /magi/magi /usr/local/bin 26 | -------------------------------------------------------------------------------- /src/version/mod.rs: -------------------------------------------------------------------------------- 1 | /// Represents the Magi version 2 | #[derive(Debug)] 3 | pub struct Version { 4 | /// The package name specified in `Cargo.toml` 5 | name: String, 6 | /// The package version specified in `Cargo.toml` 7 | version: String, 8 | /// `Dev` if compiled in debug mode. `Release` otherwise. 9 | meta: String, 10 | } 11 | 12 | impl Version { 13 | /// Build and returns a [Version] struct 14 | pub fn build() -> Self { 15 | let meta = if cfg!(debug_assertions) { 16 | "dev" 17 | } else { 18 | "release" 19 | }; 20 | 21 | Version { 22 | name: env!("CARGO_PKG_NAME").to_string(), 23 | version: env!("CARGO_PKG_VERSION").to_string(), 24 | meta: meta.to_string(), 25 | } 26 | } 27 | } 28 | 29 | impl std::fmt::Display for Version { 30 | /// Formatted as: {name}{version}-{meta} 31 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 32 | write!(f, "{}{}-{}", self.name, self.version, self.meta) 33 | } 34 | } 35 | 36 | #[cfg(test)] 37 | mod tests { 38 | use crate::version::Version; 39 | 40 | #[test] 41 | fn version() { 42 | let version = Version::build(); 43 | assert!(version.to_string() == "magi0.1.0-dev"); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /docker/start-magi.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | DEVNET="" 5 | 6 | if [ $NETWORK = "custom" ] || [ $NETWORK = "devnet" ] 7 | then 8 | NETWORK="./rollup.json" 9 | if [ "$NETWORK" = "devnet" ] 10 | then 11 | DEVNET="--devnet" 12 | fi 13 | fi 14 | 15 | if [ $SYNC_MODE = "full" ] 16 | then 17 | exec magi \ 18 | --network $NETWORK \ 19 | --jwt-secret $JWT_SECRET \ 20 | --l1-rpc-url $L1_RPC_URL \ 21 | --l1-beacon-url $L1_BEACON_RPC_URL \ 22 | --l2-rpc-url http://${EXECUTION_CLIENT}:8545 \ 23 | --l2-engine-url http://${EXECUTION_CLIENT}:8551 \ 24 | --rpc-port $RPC_PORT \ 25 | --rpc-addr $RPC_ADDR \ 26 | $DEVNET \ 27 | --sync-mode $SYNC_MODE 28 | elif [ $SYNC_MODE = "checkpoint" ] 29 | then 30 | exec magi \ 31 | --network $NETWORK \ 32 | --jwt-secret $JWT_SECRET \ 33 | --l1-rpc-url $L1_RPC_URL \ 34 | --l1-beacon-url $L1_BEACON_RPC_URL \ 35 | --l2-rpc-url http://${EXECUTION_CLIENT}:8545 \ 36 | --l2-engine-url http://${EXECUTION_CLIENT}:8551 \ 37 | --rpc-port $RPC_PORT \ 38 | --rpc-addr $RPC_ADDR \ 39 | $DEVNET \ 40 | --sync-mode $SYNC_MODE \ 41 | --checkpoint-sync-url $CHECKPOINT_SYNC_URL \ 42 | --checkpoint-hash $CHECKPOINT_HASH 43 | else 44 | echo "Sync mode not recognized. Available options are full and checkpoint" 45 | fi 46 | -------------------------------------------------------------------------------- /src/derive/stages/block_input.rs: -------------------------------------------------------------------------------- 1 | use std::sync::{Arc, RwLock}; 2 | 3 | use eyre::Result; 4 | 5 | use crate::{ 6 | common::{Epoch, RawTransaction}, 7 | derive::state::State, 8 | }; 9 | 10 | /// A marker trait to allow representing an epoch as either a block number or an [Epoch] 11 | pub trait EpochType {} 12 | impl EpochType for u64 {} 13 | impl EpochType for Epoch {} 14 | 15 | /// A single L2 block derived from a batch. 16 | #[derive(Debug)] 17 | pub struct BlockInput { 18 | /// Timestamp of the L2 block 19 | pub timestamp: u64, 20 | /// The corresponding epoch 21 | pub epoch: E, 22 | /// Transactions included in this block 23 | pub transactions: Vec, 24 | /// The L1 block this batch was fully derived from 25 | pub l1_inclusion_block: u64, 26 | } 27 | 28 | impl BlockInput { 29 | /// Returns the Block Input with full [Epoch] details. 30 | pub fn with_full_epoch(self, state: &Arc>) -> Result> { 31 | let state = state.read().map_err(|_| eyre::eyre!("lock poisoned"))?; 32 | let epoch = state 33 | .epoch_by_number(self.epoch) 34 | .ok_or(eyre::eyre!("epoch not found"))?; 35 | 36 | Ok(BlockInput { 37 | timestamp: self.timestamp, 38 | epoch, 39 | transactions: self.transactions, 40 | l1_inclusion_block: self.l1_inclusion_block, 41 | }) 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /docker/start-op-challenger.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | if [ $NETWORK = "optimism-sepolia" ] 5 | then 6 | DISPUTE_GAME_FACTORY=0x000000000000000000000000000000000000dEaD # TODO: Update after deployment 7 | L2_OUTPUT_ORACLE=0xE6Dfba0953616Bacab0c9A8ecb3a9BBa77FC15c0 8 | elif [ $NETWORK = "base-sepolia" ] 9 | then 10 | DISPUTE_GAME_FACTORY=0x000000000000000000000000000000000000dEaD # TODO: Update after deployment 11 | L2_OUTPUT_ORACLE=0x2A35891ff30313CcFa6CE88dcf3858bb075A2298 12 | else 13 | echo "Network not recognized. Available options are optimism-sepolia and base-sepolia" 14 | exit 1 15 | fi 16 | 17 | if [ $OP_CHALLENGER_MODE = "listen-only" ] 18 | then 19 | exec op-challenger \ 20 | --l1-ws-endpoint ${L1_WS_RPC_URL} \ 21 | --trusted-op-node-endpoint http://magi:${RPC_PORT} \ 22 | --dispute-game-factory $DISPUTE_GAME_FACTORY \ 23 | --l2-output-oracle $L2_OUTPUT_ORACLE \ 24 | --mode listen-only \ 25 | -vv 26 | elif [ $OP_CHALLENGER_MODE = "listen-and-respond" ] 27 | then 28 | exec op-challenger \ 29 | --l1-ws-endpoint ${L1_WS_RPC_URL} \ 30 | --trusted-op-node-endpoint http://magi:${RPC_PORT} \ 31 | --signer-key $OP_CHALLENGER_SIGNER_KEY \ 32 | --dispute-game-factory $DISPUTE_GAME_FACTORY \ 33 | --l2-output-oracle $L2_OUTPUT_ORACLE \ 34 | --mode listen-and-respond \ 35 | -vv 36 | else 37 | echo "Challenger mode not recognized. Available options are listen-only and listen-and-respond" 38 | fi 39 | -------------------------------------------------------------------------------- /src/engine/mock_engine.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use eyre::Result; 3 | 4 | use super::{ 5 | Engine, ExecutionPayload, ForkChoiceUpdate, ForkchoiceState, PayloadAttributes, PayloadId, 6 | PayloadStatus, 7 | }; 8 | 9 | /// Mock L2 Engine API that returns preset responses 10 | #[derive(Debug, Clone)] 11 | pub struct MockEngine { 12 | /// Forkchoice updated call response when payload is Some 13 | pub forkchoice_updated_payloads_res: ForkChoiceUpdate, 14 | /// Forkchoice updated call response when payload is None 15 | pub forkchoice_updated_res: ForkChoiceUpdate, 16 | /// New payload call response 17 | pub new_payload_res: PayloadStatus, 18 | /// Get payload call response 19 | pub get_payload_res: ExecutionPayload, 20 | } 21 | 22 | #[async_trait] 23 | impl Engine for MockEngine { 24 | async fn forkchoice_updated( 25 | &self, 26 | _forkchoice_state: ForkchoiceState, 27 | payload_attributes: Option, 28 | ) -> Result { 29 | Ok(if payload_attributes.is_some() { 30 | self.forkchoice_updated_payloads_res.clone() 31 | } else { 32 | self.forkchoice_updated_res.clone() 33 | }) 34 | } 35 | 36 | async fn new_payload(&self, _execution_payload: ExecutionPayload) -> Result { 37 | Ok(self.new_payload_res.clone()) 38 | } 39 | 40 | async fn get_payload(&self, _payload_id: PayloadId) -> Result { 41 | Ok(self.get_payload_res.clone()) 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /.github/workflows/publish-docker.yml: -------------------------------------------------------------------------------- 1 | name: Build and Publish Docker 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | workflow_dispatch: {} 8 | 9 | env: 10 | REGISTRY: docker.io 11 | IMAGE_NAME: a16zcrypto/magi 12 | 13 | jobs: 14 | container: 15 | runs-on: ubuntu-latest 16 | permissions: 17 | id-token: write 18 | packages: write 19 | contents: read 20 | timeout-minutes: 60 21 | 22 | steps: 23 | - name: Checkout repository 24 | id: checkout 25 | uses: actions/checkout@v4 26 | 27 | - name: Install Docker BuildX 28 | uses: docker/setup-buildx-action@v3 29 | id: buildx 30 | with: 31 | install: true 32 | 33 | - name: Log into Docker Hub 34 | uses: docker/login-action@v3 35 | with: 36 | registry: ${{ env.REGISTRY }} 37 | username: ${{ secrets.DOCKER_USERNAME }} 38 | password: ${{ secrets.DOCKER_PASSWORD }} 39 | 40 | - name: Finalize Docker Metadata 41 | id: docker_tagging 42 | run: | 43 | echo "::set-output name=docker_tags::${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${GITHUB_REF##*/}" 44 | 45 | - name: Set up Docker Buildx cache 46 | uses: actions/cache@v4 47 | with: 48 | path: /tmp/.buildx-cache 49 | key: ${{ runner.os }}-buildx-${{ github.sha }} 50 | restore-keys: | 51 | ${{ runner.os }}-buildx- 52 | 53 | - name: Build and push Docker image 54 | uses: docker/build-push-action@v5 55 | with: 56 | context: . 57 | push: true 58 | tags: ${{ steps.docker_tagging.outputs.docker_tags }} 59 | cache-from: type=local,src=/tmp/.buildx-cache 60 | cache-to: type=local,dest=/tmp/.buildx-cache 61 | -------------------------------------------------------------------------------- /src/engine/fork.rs: -------------------------------------------------------------------------------- 1 | use ethers::types::H256; 2 | use serde::{Deserialize, Serialize}; 3 | 4 | use super::{PayloadId, PayloadStatus}; 5 | 6 | /// The result of a fork choice update. 7 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 8 | #[serde(rename_all = "camelCase")] 9 | pub struct ForkChoiceUpdate { 10 | /// Payload status. 11 | /// Note: values of the status field in the context of this method are restricted to the following subset: VALID, INVALID, SYNCING. 12 | pub payload_status: PayloadStatus, 13 | /// 8 byte identifier of the payload build process or null 14 | pub payload_id: Option, 15 | } 16 | 17 | /// ## ForkchoiceStateV1 18 | /// 19 | /// Note: [ForkchoiceState.safe_block_hash] and [ForkchoiceState.finalized_block_hash]fields are allowed to have 20 | /// 0x0000000000000000000000000000000000000000000000000000000000000000 value unless transition block is finalized. 21 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Copy)] 22 | #[serde(rename_all = "camelCase")] 23 | pub struct ForkchoiceState { 24 | /// 32 byte block hash of the head of the canonical chain 25 | pub head_block_hash: H256, 26 | /// 32 byte "safe" block hash of the canonical chain under certain synchrony and honesty assumptions 27 | /// This value MUST be either equal to or an ancestor of headBlockHash 28 | pub safe_block_hash: H256, 29 | /// 32 byte block hash of the most recent finalized block 30 | pub finalized_block_hash: H256, 31 | } 32 | 33 | impl ForkchoiceState { 34 | /// Creates a new fork choice state with the given head block hash. 35 | /// The safe and finalized block hashes are set to the head block hash. 36 | pub fn from_single_head(head_block_hash: H256) -> Self { 37 | Self { 38 | head_block_hash, 39 | safe_block_hash: head_block_hash, 40 | finalized_block_hash: head_block_hash, 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /Justfile: -------------------------------------------------------------------------------- 1 | set positional-arguments 2 | set dotenv-load := true 3 | 4 | alias t := test 5 | alias b := build-all 6 | alias l := lint 7 | alias f := fmt 8 | 9 | # default recipe to display help information 10 | default: 11 | @just --list 12 | 13 | # Builds all docker images 14 | build-all: 15 | docker buildx build --platform linux/arm64,linux/amd64 -t a16zcrypto/magi --push . 16 | 17 | # Builds local magi docker images 18 | build-local: 19 | docker buildx build -t noah7545/magi --load . 20 | 21 | # Pulls all docker images 22 | pull: 23 | cd docker && docker compose pull 24 | 25 | # Cleans all docker images 26 | clean: 27 | cd docker && docker compose down -v --remove-orphans 28 | 29 | # Composes docker 30 | run: 31 | cd docker && docker compose up 32 | 33 | # Composes docker with local images 34 | run-local: 35 | just build-local && cd docker && docker compose up 36 | 37 | # Runs op-geth with docker 38 | run-geth: 39 | cd docker && COMPOSE_PROFILES=op-geth docker compose up 40 | 41 | # Runs op-erigon with docker 42 | run-erigon: 43 | cd docker && COMPOSE_PROFILES=op-erigon docker compose up 44 | 45 | # Run all tests 46 | tests: test test-docs 47 | 48 | # Test for the native target with all features 49 | test *args='': 50 | cargo nextest run --all --all-features $@ 51 | 52 | # Lint for all available targets 53 | lint: lint-native lint-docs 54 | 55 | # Fixes and checks the formatting 56 | fmt: fmt-native-fix fmt-native-check 57 | 58 | # Fixes the formatting 59 | fmt-native-fix: 60 | cargo +nightly fmt --all 61 | 62 | # Check the formatting 63 | fmt-native-check: 64 | cargo +nightly fmt --all -- --check 65 | 66 | # Lints 67 | lint-native: fmt-native-check 68 | cargo +nightly clippy --all --all-features --all-targets -- -D warnings 69 | 70 | # Lint the Rust documentation 71 | lint-docs: 72 | RUSTDOCFLAGS="-D warnings" cargo doc --all --no-deps --document-private-items 73 | 74 | # Test the Rust documentation 75 | test-docs: 76 | cargo test --doc --all --locked 77 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "magi" 3 | version = "0.1.0" 4 | license = "AGPL-3.0-only" 5 | edition = "2021" 6 | default-run = "magi" 7 | 8 | [[bin]] 9 | name = "magi" 10 | path = "./bin/magi.rs" 11 | 12 | [[bin]] 13 | name = "network" 14 | path = "./bin/network.rs" 15 | 16 | [dependencies] 17 | tokio = { version = "1.28.0", features = ["full"] } 18 | async-trait = "0.1.73" 19 | eyre = "0.6.8" 20 | ethers = { version = "2.0.11", features = ["optimism"] } 21 | hex = "0.4.3" 22 | libflate = "1.2.0" 23 | openssl = { version = "0.10", features = ["vendored"] } 24 | once_cell = "1" 25 | jsonrpsee = {version = "0.17.0", features = ["server", "macros"]} 26 | futures = "0.3.28" 27 | futures-timer = "0.3.0" 28 | again = "0.1" 29 | 30 | # Alloy Types 31 | alloy-primitives = { version = "0.7.7", features = [ "std", "k256", "serde" ] } 32 | 33 | # Logging and Metrics 34 | chrono = "0.4.22" 35 | tracing = "0.1.36" 36 | ansi_term = "0.12.1" 37 | tracing-appender = "0.2.2" 38 | tracing-subscriber = { version = "0.3.16", features = [ 39 | "fmt", 40 | "env-filter", 41 | "ansi", 42 | "tracing-log", 43 | ] } 44 | prometheus_exporter = "0.8.5" 45 | lazy_static = "1.4.0" 46 | 47 | # Serialization 48 | serde = { version = "1.0.152", features = ["derive"] } 49 | serde_json = "1.0.93" 50 | 51 | # Backend Crates 52 | uuid = { version = "1.3.0", features = ["v4"] } 53 | bytes = "1.4.0" 54 | reqwest = "0.11.14" 55 | jsonwebtoken = "8.2.0" 56 | rand = "0.8.5" 57 | 58 | # Networking 59 | discv5 = "0.2.2" 60 | libp2p = { version = "0.51.3", features = ["macros", "tokio", "tcp", "mplex", "noise", "gossipsub", "ping"] } 61 | libp2p-identity = { version = "0.1.2", features = ["secp256k1"] } 62 | unsigned-varint = "0.7.1" 63 | snap = "1" 64 | ssz_rs = "0.8.0" 65 | 66 | # CLI 67 | figment = { version = "0.10.8", features = ["toml", "env"] } 68 | ctrlc = { version = "3.2.3", features = ["termination"] } 69 | clap = { version = "3.2.18", features = ["derive", "env"] } 70 | dirs = "4.0.0" 71 | 72 | [features] 73 | default = ["test-utils"] 74 | test-utils = [] 75 | -------------------------------------------------------------------------------- /src/derive/stages/single_batch.rs: -------------------------------------------------------------------------------- 1 | use ethers::{ 2 | types::H256, 3 | utils::rlp::{DecoderError, Rlp}, 4 | }; 5 | 6 | use crate::common::RawTransaction; 7 | 8 | use super::block_input::BlockInput; 9 | 10 | /// Represents a single batch: a single encoded L2 block 11 | #[derive(Debug, Clone)] 12 | pub struct SingleBatch { 13 | /// Block hash of the previous L2 block 14 | pub parent_hash: H256, 15 | /// The batch epoch number. Same as the first L1 block number in the epoch. 16 | pub epoch_num: u64, 17 | /// The block hash of the first L1 block in the epoch 18 | pub epoch_hash: H256, 19 | /// The L2 block timestamp of this batch 20 | pub timestamp: u64, 21 | /// The L2 block transactions in this batch 22 | pub transactions: Vec, 23 | /// The L1 block number this batch was fully derived from. 24 | pub l1_inclusion_block: u64, 25 | } 26 | 27 | impl SingleBatch { 28 | /// Decodes RLP bytes into a [SingleBatch] 29 | pub fn decode(rlp: &Rlp, l1_inclusion_block: u64) -> Result { 30 | let parent_hash = rlp.val_at(0)?; 31 | let epoch_num = rlp.val_at(1)?; 32 | let epoch_hash = rlp.val_at(2)?; 33 | let timestamp = rlp.val_at(3)?; 34 | let transactions = rlp.list_at(4)?; 35 | 36 | Ok(SingleBatch { 37 | parent_hash, 38 | epoch_num, 39 | epoch_hash, 40 | timestamp, 41 | transactions, 42 | l1_inclusion_block, 43 | }) 44 | } 45 | 46 | /// If any transactions are empty or deposited transaction types. 47 | pub fn has_invalid_transactions(&self) -> bool { 48 | self.transactions 49 | .iter() 50 | .any(|tx| tx.0.is_empty() || tx.0[0] == 0x7E) 51 | } 52 | 53 | /// Returns a Block Input instance for this batch. Represents a single L2 block. 54 | pub fn block_input(&self) -> BlockInput { 55 | BlockInput { 56 | timestamp: self.timestamp, 57 | epoch: self.epoch_num, 58 | transactions: self.transactions.clone(), 59 | l1_inclusion_block: self.l1_inclusion_block, 60 | } 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /src/l1/l1_info.rs: -------------------------------------------------------------------------------- 1 | use ethers::types::{Block, Transaction, H256, U256}; 2 | 3 | use crate::{config::SystemConfig, derive::stages::attributes::UserDeposited}; 4 | 5 | use super::chain_watcher::BatcherTransactionData; 6 | 7 | /// Data tied to a specific L1 block 8 | #[derive(Debug)] 9 | pub struct L1Info { 10 | /// L1 block data 11 | pub block_info: L1BlockInfo, 12 | /// The system config at the block 13 | pub system_config: SystemConfig, 14 | /// User deposits from that block 15 | pub user_deposits: Vec, 16 | /// Batcher transactions in block 17 | pub batcher_transactions: Vec, 18 | /// Whether the block has finalized 19 | pub finalized: bool, 20 | } 21 | 22 | /// L1 block info 23 | #[derive(Debug, Clone)] 24 | pub struct L1BlockInfo { 25 | /// L1 block number 26 | pub number: u64, 27 | /// L1 block hash 28 | pub hash: H256, 29 | /// L1 block timestamp 30 | pub timestamp: u64, 31 | /// L1 base fee per gas 32 | pub base_fee: U256, 33 | /// L1 mix hash (prevrandao) 34 | pub mix_hash: H256, 35 | /// Post-Ecotone beacon block root 36 | pub parent_beacon_block_root: Option, 37 | } 38 | 39 | impl TryFrom<&Block> for L1BlockInfo { 40 | type Error = eyre::Error; 41 | 42 | fn try_from(value: &Block) -> std::result::Result { 43 | let number = value 44 | .number 45 | .ok_or(eyre::eyre!("block not included"))? 46 | .as_u64(); 47 | 48 | let hash = value.hash.ok_or(eyre::eyre!("block not included"))?; 49 | 50 | let timestamp = value.timestamp.as_u64(); 51 | 52 | let base_fee = value 53 | .base_fee_per_gas 54 | .ok_or(eyre::eyre!("block is pre london"))?; 55 | 56 | let mix_hash = value.mix_hash.ok_or(eyre::eyre!("block not included"))?; 57 | 58 | let parent_beacon_block_root = value.parent_beacon_block_root; 59 | 60 | Ok(L1BlockInfo { 61 | number, 62 | hash, 63 | timestamp, 64 | base_fee, 65 | mix_hash, 66 | parent_beacon_block_root, 67 | }) 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: test 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | 9 | env: 10 | ENGINE_API_URL: ${{ secrets.ENGINE_API_URL }} 11 | JWT_SECRET: ${{ secrets.JWT_SECRET }} 12 | 13 | jobs: 14 | check: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: actions/checkout@v3 18 | - uses: actions-rs/toolchain@v1 19 | with: 20 | profile: minimal 21 | toolchain: stable 22 | override: true 23 | - uses: Swatinem/rust-cache@v2 24 | - uses: actions-rs/cargo@v1 25 | with: 26 | command: check 27 | 28 | test: 29 | runs-on: ubuntu-latest 30 | steps: 31 | - uses: actions/checkout@v3 32 | - uses: actions-rs/toolchain@v1 33 | with: 34 | profile: minimal 35 | toolchain: stable 36 | override: true 37 | - uses: Swatinem/rust-cache@v2 38 | - uses: actions-rs/cargo@v1 39 | with: 40 | command: test 41 | args: --all 42 | build: 43 | runs-on: ubuntu-latest 44 | steps: 45 | - uses: actions/checkout@v3 46 | - uses: actions-rs/toolchain@v1 47 | with: 48 | profile: minimal 49 | toolchain: stable 50 | override: true 51 | - uses: Swatinem/rust-cache@v2 52 | - uses: actions-rs/cargo@v1 53 | with: 54 | command: build 55 | args: --all 56 | 57 | fmt: 58 | runs-on: ubuntu-latest 59 | steps: 60 | - uses: actions/checkout@v3 61 | - uses: actions-rs/toolchain@v1 62 | with: 63 | profile: minimal 64 | toolchain: stable 65 | override: true 66 | components: rustfmt 67 | - uses: Swatinem/rust-cache@v2 68 | - uses: actions-rs/cargo@v1 69 | with: 70 | command: fmt 71 | args: --all -- --check 72 | 73 | clippy: 74 | runs-on: ubuntu-latest 75 | steps: 76 | - uses: actions/checkout@v3 77 | - uses: actions-rs/toolchain@v1 78 | with: 79 | profile: minimal 80 | toolchain: stable 81 | override: true 82 | components: clippy 83 | - uses: Swatinem/rust-cache@v2 84 | - uses: actions-rs/cargo@v1 85 | with: 86 | command: clippy 87 | args: --all -- -D warnings 88 | -------------------------------------------------------------------------------- /docker/start-op-geth.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | apk add zstd 5 | apk add jq 6 | 7 | DATADIR=/data/geth 8 | 9 | if [ $NETWORK = "optimism" ] 10 | then 11 | CHAIN_ID=10 12 | if [ ! -d $DATADIR ] 13 | then 14 | mkdir $DATADIR 15 | wget "https://datadirs.optimism.io/mainnet-bedrock.tar.zst" -P $DATADIR 16 | zstd -cd $DATADIR/mainnet-bedrock.tar.zst | tar xvf - -C $DATADIR 17 | fi 18 | elif [ $NETWORK = "base" ] 19 | then 20 | CHAIN_ID=8453 21 | if [ ! -d $DATADIR ] 22 | then 23 | mkdir $DATADIR 24 | wget "https://raw.githubusercontent.com/base-org/node/main/mainnet/genesis-l2.json" -O ./genesis-l2.json 25 | geth init --datadir=$DATADIR ./genesis-l2.json 26 | fi 27 | elif [ "$NETWORK" = "optimism-sepolia" ] 28 | then 29 | CHAIN_ID=11155420 30 | if [ ! -d "$DATADIR" ] 31 | then 32 | wget "https://storage.googleapis.com/oplabs-network-data/Sepolia/genesis.json" -O ./genesis-l2.json 33 | geth init --datadir=$DATADIR ./genesis-l2.json 34 | fi 35 | elif [ $NETWORK = "base-sepolia" ] 36 | then 37 | CHAIN_ID=84532 38 | if [ ! -d $DATADIR ] 39 | then 40 | wget "https://raw.githubusercontent.com/base-org/node/main/sepolia/genesis-l2.json" -O ./genesis-l2.json 41 | geth init --datadir=$DATADIR ./genesis-l2.json 42 | fi 43 | elif [ $NETWORK = "custom" ] || [ $NETWORK = "devnet" ] 44 | then 45 | CHAIN_ID=$(jq '.config.chainId' ./genesis-l2-attached.json) 46 | 47 | if [ ! -d $DATADIR ] 48 | then 49 | mkdir $DATADIR 50 | geth init --datadir=$DATADIR ./genesis-l2-attached.json 51 | fi 52 | else 53 | echo "Network not recognized. Available options are optimsim, optimism-sepolia, base, base-sepolia, custom" 54 | exit 1 55 | fi 56 | 57 | 58 | echo $JWT_SECRET > jwtsecret.txt 59 | 60 | echo "chain id" 61 | echo $CHAIN_ID 62 | 63 | exec geth \ 64 | --datadir="$DATADIR" \ 65 | --networkid="$CHAIN_ID" \ 66 | --http \ 67 | --http.corsdomain="*" \ 68 | --http.vhosts="*" \ 69 | --http.addr=0.0.0.0 \ 70 | --http.port=8545 \ 71 | --http.api=web3,debug,eth,txpool,net,engine,admin \ 72 | --syncmode=full \ 73 | --gcmode=full \ 74 | --authrpc.vhosts="*" \ 75 | --authrpc.addr=0.0.0.0 \ 76 | --authrpc.port=8551 \ 77 | --authrpc.jwtsecret=/jwtsecret.txt \ 78 | --rollup.disabletxpoolgossip=true \ 79 | --snapshot=false 80 | $@ 81 | -------------------------------------------------------------------------------- /src/network/service/types.rs: -------------------------------------------------------------------------------- 1 | use std::net::{IpAddr, Ipv4Addr, SocketAddr}; 2 | 3 | use discv5::enr::{CombinedKey, Enr}; 4 | use eyre::Result; 5 | use libp2p::{multiaddr::Protocol, Multiaddr}; 6 | 7 | /// An [Ipv4Addr] and port. 8 | #[derive(Debug, Clone, Copy)] 9 | pub struct NetworkAddress { 10 | /// An [Ipv4Addr] 11 | pub ip: Ipv4Addr, 12 | /// A port 13 | pub port: u16, 14 | } 15 | 16 | /// A wrapper around a peer's Network Address. 17 | #[derive(Debug)] 18 | pub struct Peer { 19 | /// The peer's [Ipv4Addr] and port 20 | pub addr: NetworkAddress, 21 | } 22 | 23 | impl TryFrom<&Enr> for NetworkAddress { 24 | type Error = eyre::Report; 25 | 26 | /// Convert an [Enr] to a Network Address. 27 | fn try_from(value: &Enr) -> Result { 28 | let ip = value.ip4().ok_or(eyre::eyre!("missing ip"))?; 29 | let port = value.tcp4().ok_or(eyre::eyre!("missing port"))?; 30 | 31 | Ok(Self { ip, port }) 32 | } 33 | } 34 | 35 | impl From for Multiaddr { 36 | /// Converts a Network Address to a [Multiaddr] 37 | fn from(value: NetworkAddress) -> Self { 38 | let mut multiaddr = Multiaddr::empty(); 39 | multiaddr.push(Protocol::Ip4(value.ip)); 40 | multiaddr.push(Protocol::Tcp(value.port)); 41 | 42 | multiaddr 43 | } 44 | } 45 | 46 | impl From for SocketAddr { 47 | /// Converts a Network Address to a [SocketAddr]. 48 | fn from(value: NetworkAddress) -> Self { 49 | SocketAddr::new(IpAddr::V4(value.ip), value.port) 50 | } 51 | } 52 | 53 | impl TryFrom for NetworkAddress { 54 | type Error = eyre::Report; 55 | 56 | /// Converts a [SocketAddr] to a Network Address. 57 | fn try_from(value: SocketAddr) -> Result { 58 | let ip = match value.ip() { 59 | IpAddr::V4(ip) => ip, 60 | IpAddr::V6(_) => eyre::bail!("ipv6 not supported"), 61 | }; 62 | 63 | Ok(Self { 64 | ip, 65 | port: value.port(), 66 | }) 67 | } 68 | } 69 | 70 | impl TryFrom<&Enr> for Peer { 71 | type Error = eyre::Report; 72 | 73 | /// Converts an [Enr] to a Peer 74 | fn try_from(value: &Enr) -> Result { 75 | let addr = NetworkAddress::try_from(value)?; 76 | Ok(Peer { addr }) 77 | } 78 | } 79 | 80 | impl From for Multiaddr { 81 | /// Converts a Peer to a [Multiaddr] 82 | fn from(value: Peer) -> Self { 83 | value.addr.into() 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /docker/.env.default: -------------------------------------------------------------------------------- 1 | # L1 network option: can be either 2 | # `optimism`, `optimism-sepolia`, `base`, `base-sepolia`, `custom` or `devnet`. 3 | NETWORK=optimism 4 | 5 | # The HTTP RPC endpoint of an L1 node 6 | L1_RPC_URL=https://eth-mainnet.g.alchemy.com/v2/ 7 | 8 | # The HTTP Beacon chain RPC endpoint of an L1 node 9 | # (this is required if using EIP-4844 DA) 10 | L1_BEACON_RPC_URL= 11 | 12 | # The WebSocket RPC endpoint of an L1 node 13 | L1_WS_RPC_URL=wss://eth-mainnet.g.alchemy.com/v2/ 14 | 15 | # JWT secret for the engine API 16 | JWT_SECRET=bf549f5188556ce0951048ef467ec93067bc4ea21acebe46ef675cd4e8e015ff 17 | 18 | # Magi's external rpc service port 19 | RPC_PORT=9545 20 | 21 | # Magi's external rpc service socket address 22 | RPC_ADDR="0.0.0.0" 23 | 24 | # Execution client: can be either `op-geth` or `op-erigon` 25 | EXECUTION_CLIENT=op-geth 26 | 27 | # The exeuction client Auth RPC port. 28 | EXECUTION_CLIENT_AUTH_RPC_PORT=8551 29 | 30 | # The execution client RPC port. 31 | EXECUTION_CLIENT_RPC_PORT=8545 32 | 33 | # The execution client WebSocket port. 34 | EXECUTION_CLIENT_WS_PORT=8546 35 | 36 | # Sync mode: can be either `full` or `checkpoint` 37 | SYNC_MODE=full 38 | 39 | # Only for `custom` or `devnet` network. 40 | # Specify the path to the `rollup.json` file generated by `op-node`. 41 | # For the devnet configuration, this file should be located in the `.devnet` folder within the Optimism directory. 42 | # OP_ROLLUP_JSON_FILEPATH= 43 | 44 | # Only for `custom` or `devnet` network. 45 | # Specify the path to the `genesis-l2.json` file generated by `op-node`. 46 | # For the devnet configuration, this file should be located in the `.devnet` folder within the Optimism directory. 47 | # OP_GENESIS_JSON_FILEPATH= 48 | 49 | # If the OP-Challenger should be run as a service alongside Magi 50 | # (comment out the next line if you don't want to run this service) 51 | # RUN_OP_CHALLENGER=run-op-challenger 52 | 53 | # OP-Challenger mode: can be `listen-only` or `listen-and-respond` 54 | OP_CHALLENGER_MODE=listen-only 55 | 56 | 57 | # ----------------- Only needed with `checkpoint` sync mode ----------------- 58 | CHECKPOINT_SYNC_URL=https://opt-mainnet.g.alchemy.com/v2/ 59 | 60 | 61 | # ----- Only needed with the OP-Challenger in `listen-and-respond` mode ----- 62 | OP_CHALLENGER_SIGNER_KEY=a1742ee5f7898541224d6a91d9f3b34ad442e27bcb43223c01e47e58fc0a0c12 63 | 64 | 65 | # --------------------- Only needed for testing locally --------------------- 66 | L1_TEST_RPC_URL=https://eth-sepolia.g.alchemy.com/v2/ 67 | L2_TEST_RPC_URL=https://opt-sepolia.g.alchemy.com/v2/ 68 | 69 | 70 | # ------------------------------ Do not modify ------------------------------ 71 | COMPOSE_PROJECT_NAME=$NETWORK 72 | COMPOSE_PROFILES=magi,metrics,${RUN_OP_CHALLENGER:+challenger},${EXECUTION_CLIENT} 73 | -------------------------------------------------------------------------------- /src/l1/config_updates.rs: -------------------------------------------------------------------------------- 1 | use ethers::types::{Address, Log, U256}; 2 | use eyre::Result; 3 | 4 | /// Represents a system config update event 5 | #[derive(Debug)] 6 | pub enum SystemConfigUpdate { 7 | /// The batch sender address has been updated 8 | BatchSender(Address), 9 | /// The fee overhead and scalar have been updated 10 | Fees(U256, U256), 11 | /// The gas has been updated 12 | Gas(U256), 13 | /// The unsafe block signer has been updated 14 | UnsafeBlockSigner(Address), 15 | } 16 | 17 | impl TryFrom for SystemConfigUpdate { 18 | type Error = eyre::Report; 19 | 20 | fn try_from(log: Log) -> Result { 21 | let version = log 22 | .topics 23 | .get(1) 24 | .ok_or(eyre::eyre!("invalid system config update"))? 25 | .to_low_u64_be(); 26 | 27 | if version != 0 { 28 | return Err(eyre::eyre!("invalid system config update")); 29 | } 30 | 31 | let update_type = log 32 | .topics 33 | .get(2) 34 | .ok_or(eyre::eyre!("invalid system config update"))? 35 | .to_low_u64_be(); 36 | 37 | match update_type { 38 | 0 => { 39 | let addr_bytes = log 40 | .data 41 | .get(76..96) 42 | .ok_or(eyre::eyre!("invalid system config update"))?; 43 | 44 | let addr = Address::from_slice(addr_bytes); 45 | Ok(Self::BatchSender(addr)) 46 | } 47 | 1 => { 48 | let fee_overhead = log 49 | .data 50 | .get(64..96) 51 | .ok_or(eyre::eyre!("invalid system config update"))?; 52 | 53 | let fee_scalar = log 54 | .data 55 | .get(96..128) 56 | .ok_or(eyre::eyre!("invalid system config update"))?; 57 | 58 | let fee_overhead = U256::from_big_endian(fee_overhead); 59 | let fee_scalar = U256::from_big_endian(fee_scalar); 60 | 61 | Ok(Self::Fees(fee_overhead, fee_scalar)) 62 | } 63 | 2 => { 64 | let gas_bytes = log 65 | .data 66 | .get(64..96) 67 | .ok_or(eyre::eyre!("invalid system config update"))?; 68 | 69 | let gas = U256::from_big_endian(gas_bytes); 70 | Ok(Self::Gas(gas)) 71 | } 72 | 3 => { 73 | let addr_bytes = log 74 | .data 75 | .get(76..96) 76 | .ok_or(eyre::eyre!("invalid system config update"))?; 77 | 78 | let addr = Address::from_slice(addr_bytes); 79 | Ok(Self::UnsafeBlockSigner(addr)) 80 | } 81 | _ => Err(eyre::eyre!("invalid system config update")), 82 | } 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! # Magi 2 | //! 3 | //! `Magi` is a Rust implementation of an OP stack rollup node, designed to serve as a replacement for `op-node`. It facilitates interaction with both the L1 (Layer 1) chain and the canonical L2 (Layer 2) chain, enabling efficient data ingestion, processing, and serving via an RPC interface. 4 | //! 5 | //! This crate is structured to provide functionality for running an OP stack rollup node, including configuration management, data derivation, and P2P network communication. 6 | //! 7 | //! ## Features 8 | //! 9 | //! - **L1 Chain Ingestion**: Processes and ingests data from the L1 chain to keep the rollup node synchronized. 10 | //! - **Canonical L2 Chain Derivation**: Derives the canonical L2 chain state based on ingested L1 data. 11 | //! - **L2 Engine API**: Interfaces with `op-geth` for L2 state execution and consensus. 12 | //! - **Networking**: Manages peer-to-peer networking for P2P data dissemination and retrieval. 13 | //! - **RPC Server**: Hosts an RPC server for querying rollup node data. 14 | //! - **Configurable Sync Modes**: Supports different synchronization modes. 15 | //! - **Telemetry and Logging**: Provides application telemetry and logging for monitoring and debugging. 16 | //! 17 | //! ## Modules 18 | //! 19 | //! - [`l1`]: Ingests and processes L1 chain data. 20 | //! - [`common`]: Contains common types and functions used throughout the crate. 21 | //! - [`config`]: Manages configuration settings for the node. 22 | //! - [`mod@derive`]: Handles the derivation pipeline for the L2 chain. 23 | //! - [`driver`]: Drives `op-geth` via the L2 Engine API. 24 | //! - [`engine`]: Provides an implementation of the L2 Engine API. 25 | //! - [`network`]: Manages peer-to-peer networking. 26 | //! - [`telemetry`]: Handles application telemetry and logging. 27 | //! - [`rpc`]: Implements the RPC server for external queries. 28 | //! - [`runner`]: Manages the node's operation in various synchronization modes. 29 | //! - [`version`]: Provides version information for the `magi` crate. 30 | //! 31 | //! ## Getting Started 32 | //! 33 | //! To start using `magi`, add it as a dependency in your `Cargo.toml`: 34 | //! 35 | //! ```toml 36 | //! [dependencies] 37 | //! magi = "0.1.0" 38 | //! ``` 39 | //! 40 | //! Then, refer to the individual modules for specific functionality. 41 | //! 42 | #![warn(missing_docs)] 43 | /// A module for ingesting L1 chain data 44 | pub mod l1; 45 | 46 | /// Common types and functions 47 | pub mod common; 48 | 49 | /// Configuration management 50 | pub mod config; 51 | 52 | /// The derivation pipeline module for deriving the canonical L2 chain 53 | pub mod derive; 54 | 55 | /// A module for driving op-geth via the L2 Engine API 56 | pub mod driver; 57 | 58 | /// A module for the L2 Engine API 59 | pub mod engine; 60 | 61 | /// Peer to peer networking 62 | pub mod network; 63 | 64 | /// Application telemetry and logging 65 | pub mod telemetry; 66 | 67 | /// RPC module to host rpc server 68 | pub mod rpc; 69 | 70 | /// A module to handle running Magi in different sync modes 71 | pub mod runner; 72 | 73 | /// A module to get current Magi version. 74 | pub mod version; 75 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Magi  🟠 2 | 3 | [![build](https://github.com/a16z/magi/actions/workflows/test.yml/badge.svg)](https://github.com/a16z/magi/actions/workflows/test.yml) [![license: AGPL v3](https://img.shields.io/badge/License-AGPL_v3-blue.svg)](https://opensource.org/license/agpl-v3/) [![chat](https://img.shields.io/badge/chat-telegram-blue)](https://t.me/+6zrIsnaLO0hjNmZh) 4 | 5 | Magi is an OP Stack rollup client written in Rust, designed to perform the same functionality as op-node. It is compatible with execution clients like op-geth. As an independent implementation, Magi aims to enhance the safety and liveness of the entire OP Stack ecosystem. Magi is still new, so we expect to find some bugs in the coming months. For critical infrastructure, we recommend using op-node. 6 | 7 | ## Running 8 | 9 | For convenience, we provide a simple Docker setup to run Magi and op-geth together. This guide assumes you have both docker and git installed on your machine. 10 | 11 | Start by cloning the Magi repository and entering the docker subdirectory 12 | ```sh 13 | git clone https://github.com/a16z/magi.git && cd magi/docker 14 | ``` 15 | 16 | Next copy `.env.default` to `.env` 17 | ```sh 18 | cp .env.default .env 19 | ``` 20 | 21 | In the `.env` file, modify the `L1_RPC_URL` field to contain a valid Ethereum RPC. For the Optimism and Base testnets, this must be a Sepolia RPC URL. This RPC can either be from a local node, or a provider such as Alchemy or Infura. 22 | 23 | By default, the `NETWORK` field in `.env` is `optimism-sepolia`, however `base-sepolia` is also supported. 24 | 25 | Start the docker containers 26 | ```sh 27 | docker compose up -d 28 | ``` 29 | 30 | If the previous step fails with a permission denied error, try running the command with `sudo`. 31 | 32 | The docker setup contains a Grafana dashboard. To view sync progress, you can check the dashboard at `http://localhost:3000` with the username `magi` and password `op`. Alternatively, you can view Magi's logs by running `docker logs magi --follow`. 33 | 34 | ## Contributing 35 | 36 | All contributions to Magi are welcome. Before opening a PR, please submit an issue detailing the bug or feature. Please ensure that your contribution builds on the stable Rust toolchain, has been linted with `cargo fmt`, passes `cargo clippy`, and contains tests when applicable. 37 | 38 | ## Disclaimer 39 | 40 | _This code is being provided as is. No guarantee, representation or warranty is being made, express or implied, as to the safety or correctness of the code. It has not been audited and as such there can be no assurance it will work as intended, and users may experience delays, failures, errors, omissions or loss of transmitted information. Nothing in this repo should be construed as investment advice or legal advice for any particular facts or circumstances and is not meant to replace competent counsel. It is strongly advised for you to contact a reputable attorney in your jurisdiction for any questions or concerns with respect thereto. a16z is not liable for any use of the foregoing, and users should proceed with caution and use at their own risk. See a16z.com/disclosures for more info._ 41 | -------------------------------------------------------------------------------- /src/l1/blob_encoding.rs: -------------------------------------------------------------------------------- 1 | use bytes::Bytes; 2 | use eyre::Result; 3 | 4 | const MAX_BLOB_DATA_SIZE: usize = (4 * 31 + 3) * 1024 - 4; 5 | const ENCODING_VERSION: u8 = 0; 6 | const VERSION_OFFSET: usize = 1; 7 | const ROUNDS: usize = 1024; 8 | 9 | /// Encodes a blob of data into a byte array 10 | pub fn decode_blob_data(blob: &[u8]) -> Result { 11 | let mut output = vec![0; MAX_BLOB_DATA_SIZE]; 12 | 13 | if blob[VERSION_OFFSET] != ENCODING_VERSION { 14 | eyre::bail!( 15 | "Blob decoding: Invalid encoding version: want {}, got {}", 16 | ENCODING_VERSION, 17 | blob[VERSION_OFFSET] 18 | ); 19 | } 20 | 21 | // decode the 3-byte big-endian length value into a 4-byte integer 22 | let output_len = u32::from_be_bytes([0, blob[2], blob[3], blob[4]]) as usize; 23 | if output_len > MAX_BLOB_DATA_SIZE { 24 | eyre::bail!( 25 | "Blob decoding: Invalid length: {} exceeds maximum {}", 26 | output_len, 27 | MAX_BLOB_DATA_SIZE 28 | ); 29 | } 30 | 31 | output[0..27].copy_from_slice(&blob[5..32]); 32 | 33 | let mut output_pos = 28; 34 | let mut input_pos = 32; 35 | 36 | // buffer for the 4 6-bit chunks 37 | let mut encoded_byte = [0; 4]; 38 | 39 | encoded_byte[0] = blob[0]; 40 | for byte in encoded_byte.iter_mut().skip(1) { 41 | *byte = decode_field_element(&mut output_pos, &mut input_pos, blob, &mut output)?; 42 | } 43 | reassemble_bytes(&mut output_pos, encoded_byte, &mut output); 44 | 45 | for _ in 1..ROUNDS { 46 | if output_pos >= output_len { 47 | break; 48 | } 49 | 50 | for byte in encoded_byte.iter_mut() { 51 | *byte = decode_field_element(&mut output_pos, &mut input_pos, blob, &mut output)?; 52 | } 53 | reassemble_bytes(&mut output_pos, encoded_byte, &mut output); 54 | } 55 | 56 | for output_byte in output.iter().take(MAX_BLOB_DATA_SIZE).skip(output_len) { 57 | if output_byte != &0 { 58 | eyre::bail!( 59 | "Blob decoding: Extraneous data in field element {}", 60 | output_pos / 32 61 | ); 62 | } 63 | } 64 | 65 | output.truncate(output_len); 66 | 67 | for byte in blob.iter().skip(input_pos) { 68 | if byte != &0 { 69 | eyre::bail!( 70 | "Blob decoding: Extraneous data in input position {}", 71 | input_pos 72 | ); 73 | } 74 | } 75 | 76 | Ok(output.into()) 77 | } 78 | 79 | fn decode_field_element( 80 | output_pos: &mut usize, 81 | input_pos: &mut usize, 82 | blob: &[u8], 83 | output: &mut [u8], 84 | ) -> Result { 85 | let result = blob[*input_pos]; 86 | 87 | // two highest order bits of the first byte of each field element should always be 0 88 | if result & 0b1100_0000 != 0 { 89 | eyre::bail!("Blob decoding: Invalid field element"); 90 | } 91 | 92 | output[*output_pos..*output_pos + 31].copy_from_slice(&blob[*input_pos + 1..*input_pos + 32]); 93 | 94 | *output_pos += 32; 95 | *input_pos += 32; 96 | 97 | Ok(result) 98 | } 99 | 100 | fn reassemble_bytes(output_pos: &mut usize, encoded_byte: [u8; 4], output: &mut [u8]) { 101 | *output_pos -= 1; 102 | 103 | let x = (encoded_byte[0] & 0b0011_1111) | ((encoded_byte[1] & 0b0011_0000) << 2); 104 | let y = (encoded_byte[1] & 0b0000_1111) | ((encoded_byte[3] & 0b0000_1111) << 4); 105 | let z = (encoded_byte[2] & 0b0011_1111) | ((encoded_byte[3] & 0b0011_0000) << 2); 106 | 107 | output[*output_pos - 32] = z; 108 | output[*output_pos - (32 * 2)] = y; 109 | output[*output_pos - (32 * 3)] = x; 110 | } 111 | -------------------------------------------------------------------------------- /docker/docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3.4" 3 | x-logging: &logging 4 | logging: 5 | driver: json-file 6 | options: 7 | max-size: 10m 8 | max-file: "3" 9 | 10 | services: 11 | magi: 12 | image: a16zcrypto/magi:latest 13 | container_name: magi 14 | build: 15 | dockerfile: Dockerfile 16 | context: ../ 17 | profiles: 18 | - magi 19 | restart: unless-stopped 20 | stop_grace_period: 3m 21 | entrypoint: /scripts/start-magi.sh 22 | depends_on: 23 | - op-geth 24 | env_file: 25 | - .env 26 | ports: 27 | - 9200:9200 28 | - "${RPC_PORT}:${RPC_PORT}" 29 | volumes: 30 | - ./:/scripts 31 | - data:/data 32 | - ${OP_ROLLUP_JSON_FILEPATH:-.}:/rollup.json 33 | <<: *logging 34 | 35 | op-geth: 36 | image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:v1.101308.2 37 | container_name: op-geth 38 | profiles: 39 | - op-geth 40 | restart: unless-stopped 41 | stop_grace_period: 3m 42 | entrypoint: /scripts/start-op-geth.sh 43 | env_file: 44 | - .env 45 | ports: 46 | - ${EXECUTION_CLIENT_AUTH_RPC_PORT}:8551 47 | - ${EXECUTION_CLIENT_RPC_PORT}:8545 48 | - ${EXECUTION_CLIENT_WS_PORT}:8546 49 | volumes: 50 | - ./:/scripts 51 | - data:/data 52 | - ${OP_GENESIS_JSON_FILEPATH:-.}:/genesis-l2-attached.json 53 | <<: *logging 54 | 55 | op-erigon: 56 | image: testinprod/op-erigon:latest 57 | container_name: op-erigon 58 | profiles: 59 | - op-erigon 60 | restart: unless-stopped 61 | stop_grace_period: 3m 62 | entrypoint: /scripts/start-op-erigon.sh 63 | env_file: 64 | - .env 65 | ports: 66 | - ${EXECUTION_CLIENT_AUTH_RPC_PORT}:8551 67 | - ${EXECUTION_CLIENT_RPC_PORT}:8545 68 | - ${EXECUTION_CLIENT_WS_PORT}:8546 69 | volumes: 70 | - ./:/scripts 71 | - data:/data 72 | <<: *logging 73 | 74 | op-challenger: 75 | image: merklefruit/op-challenger:latest 76 | container_name: op-challenger 77 | profiles: 78 | - challenger 79 | restart: unless-stopped 80 | stop_grace_period: 3m 81 | entrypoint: /scripts/start-op-challenger.sh 82 | env_file: 83 | - .env 84 | ports: 85 | - 9201:9201 86 | volumes: 87 | - ./:/scripts 88 | - data:/data 89 | <<: *logging 90 | 91 | prometheus: 92 | image: prom/prometheus:latest 93 | container_name: prometheus 94 | profiles: 95 | - metrics 96 | ports: 97 | - 9090:9090 98 | command: 99 | - --config.file=/etc/prometheus/prometheus.yml 100 | volumes: 101 | - ./prometheus.yml:/etc/prometheus/prometheus.yml:ro 102 | <<: *logging 103 | 104 | node-exporter: 105 | image: prom/node-exporter:latest 106 | container_name: node-exporter 107 | profiles: 108 | - metrics 109 | restart: unless-stopped 110 | volumes: 111 | - /proc:/host/proc:ro 112 | - /sys:/host/sys:ro 113 | - /:/rootfs:ro 114 | command: 115 | - --path.procfs=/host/proc 116 | - --path.rootfs=/rootfs 117 | - --path.sysfs=/host/sys 118 | - --collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/) 119 | expose: 120 | - 9100 121 | 122 | grafana: 123 | image: grafana/grafana 124 | container_name: grafana 125 | profiles: 126 | - metrics 127 | ports: 128 | - "3000:3000" 129 | volumes: 130 | - ./grafana/provisioning/:/etc/grafana/provisioning/:ro 131 | environment: 132 | - GF_SECURITY_ADMIN_USER=magi 133 | - GF_SECURITY_ADMIN_PASSWORD=op 134 | - GF_DASHBOARDS_DEFAULT_HOME_DASHBOARD_PATH=/etc/grafana/provisioning/dashboards/dashboard.json 135 | 136 | volumes: 137 | scripts: 138 | data: 139 | -------------------------------------------------------------------------------- /src/engine/traits.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use eyre::Result; 3 | 4 | use super::{ 5 | ExecutionPayload, ForkChoiceUpdate, ForkchoiceState, PayloadAttributes, PayloadId, 6 | PayloadStatus, 7 | }; 8 | 9 | /// ## Engine 10 | /// 11 | /// A set of methods that allow a consensus client to interact with an execution engine. 12 | /// This is a modified version of the [Ethereum Execution API Specs](https://github.com/ethereum/execution-apis), 13 | /// as defined in the [Optimism Exec Engine Specs](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/exec-engine.md). 14 | #[async_trait] 15 | pub trait Engine: Send + Sync + 'static { 16 | /// ## forkchoice_updated 17 | /// 18 | /// Updates were made to [`engine_forkchoiceUpdatedV2`](https://github.com/ethereum/execution-apis/blob/main/src/engine/shanghai.md#engine_forkchoiceupdatedv2) 19 | /// for L2: an extended [PayloadAttributes] 20 | /// This updates which L2 blocks the engine considers to be canonical ([ForkchoiceState] argument), 21 | /// and optionally initiates block production ([PayloadAttributes] argument). 22 | /// 23 | /// ### Specification 24 | /// 25 | /// method: engine_forkchoiceUpdatedV2 26 | /// params: 27 | /// - [ForkchoiceState] 28 | /// - [PayloadAttributes] 29 | /// 30 | /// timeout: 8s 31 | /// 32 | /// returns: 33 | /// - [ForkChoiceUpdate] 34 | /// 35 | /// potential errors: 36 | /// - code and message set in case an exception happens while the validating payload, updating the forkchoice or initiating the payload build process. 37 | /// 38 | /// ### Reference 39 | /// 40 | /// See more details in the [Optimism Specs](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/exec-engine.md#engine_forkchoiceupdatedv1). 41 | async fn forkchoice_updated( 42 | &self, 43 | forkchoice_state: ForkchoiceState, 44 | payload_attributes: Option, 45 | ) -> Result; 46 | 47 | /// ## new_payload 48 | /// 49 | /// No modifications to [`engine_newPayloadV2`](https://github.com/ethereum/execution-apis/blob/main/src/engine/shanghai.md#engine_newpayloadv2) 50 | /// were made for L2. Applies a L2 block to the engine state. 51 | /// 52 | /// ### Specification 53 | /// 54 | /// method: engine_newPayloadV2 55 | /// 56 | /// params: 57 | /// - [ExecutionPayload] 58 | /// 59 | /// timeout: 8s 60 | /// 61 | /// returns: 62 | /// - [PayloadStatus] 63 | /// 64 | /// potential errors: 65 | /// - code and message set in case an exception happens while processing the payload. 66 | /// 67 | /// ### Reference 68 | /// 69 | /// See more details in the [Optimism Specs](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/exec-engine.md#engine_newPayloadv1). 70 | async fn new_payload(&self, execution_payload: ExecutionPayload) -> Result; 71 | 72 | /// ## get_payload 73 | /// 74 | /// No modifications to [`engine_getPayloadV2`](https://github.com/ethereum/execution-apis/blob/main/src/engine/shanghai.md#engine_getpayloadv2) 75 | /// were made for L2. Retrieves a payload by ID, prepared by [engine_forkchoiceUpdatedV2](super::EngineApi) 76 | /// when called with [PayloadAttributes]. 77 | /// 78 | /// ### Specification 79 | /// 80 | /// method: engine_getPayloadV2 81 | /// 82 | /// params: 83 | /// - [PayloadId]: DATA, 8 Bytes - Identifier of the payload build process 84 | /// 85 | /// timeout: 1s 86 | /// 87 | /// returns: 88 | /// - [ExecutionPayload] 89 | /// 90 | /// potential errors: 91 | /// - code and message set in case an exception happens while getting the payload. 92 | /// 93 | /// ### Reference 94 | /// 95 | /// See more details in the [Optimism Specs](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/exec-engine.md#engine_getPayloadv1). 96 | async fn get_payload(&self, payload_id: PayloadId) -> Result; 97 | } 98 | -------------------------------------------------------------------------------- /bin/magi.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | use std::{env::current_dir, process}; 3 | 4 | use clap::Parser; 5 | use dirs::home_dir; 6 | use eyre::Result; 7 | 8 | use magi::{ 9 | config::{ChainConfig, CliConfig, Config, SyncMode}, 10 | runner::Runner, 11 | telemetry::{self, metrics}, 12 | }; 13 | use serde::Serialize; 14 | 15 | #[tokio::main] 16 | async fn main() -> Result<()> { 17 | let cli = Cli::parse(); 18 | let sync_mode = cli.sync_mode; 19 | let verbose = cli.verbose; 20 | let logs_dir = cli.logs_dir.clone(); 21 | let logs_rotation = cli.logs_rotation.clone(); 22 | let checkpoint_hash = cli.checkpoint_hash.clone(); 23 | let config = cli.to_config(); 24 | 25 | let _guards = telemetry::init(verbose, logs_dir, logs_rotation); 26 | metrics::init()?; 27 | 28 | tracing::info!( 29 | target: "magi", 30 | "Starting Magi. sync mode={}, network={}", 31 | sync_mode, config.chain.network 32 | ); 33 | 34 | let runner = Runner::from_config(config) 35 | .with_sync_mode(sync_mode) 36 | .with_checkpoint_hash(checkpoint_hash); 37 | 38 | if let Err(err) = runner.run().await { 39 | tracing::error!(target: "magi", "{}", err); 40 | process::exit(1); 41 | } 42 | 43 | Ok(()) 44 | } 45 | 46 | #[derive(Parser, Serialize)] 47 | pub struct Cli { 48 | #[clap(short, long, default_value = "optimism")] 49 | network: String, 50 | #[clap(long)] 51 | l1_rpc_url: Option, 52 | #[clap(long)] 53 | l1_beacon_url: Option, 54 | #[clap(long)] 55 | l2_rpc_url: Option, 56 | #[clap(short = 'm', long, default_value = "full")] 57 | sync_mode: SyncMode, 58 | #[clap(long)] 59 | l2_engine_url: Option, 60 | #[clap(long)] 61 | jwt_secret: Option, 62 | /// Path to a JWT secret to use for authenticated RPC endpoints 63 | #[clap(long)] 64 | jwt_file: Option, 65 | #[clap(short = 'v', long)] 66 | verbose: bool, 67 | #[clap(short = 'p', long)] 68 | rpc_port: Option, 69 | #[clap(long)] 70 | rpc_addr: Option, 71 | #[clap(long)] 72 | logs_dir: Option, 73 | #[clap(long)] 74 | logs_rotation: Option, 75 | #[clap(long)] 76 | checkpoint_hash: Option, 77 | #[clap(long)] 78 | checkpoint_sync_url: Option, 79 | #[clap(long)] 80 | devnet: bool, 81 | } 82 | 83 | impl Cli { 84 | pub fn to_config(self) -> Config { 85 | let chain = ChainConfig::from_network_name(&self.network); 86 | let config_path = home_dir().unwrap().join(".magi/magi.toml"); 87 | let cli_config = CliConfig::from(self); 88 | Config::new(&config_path, cli_config, chain) 89 | } 90 | 91 | pub fn jwt_secret(&self) -> Option { 92 | self.jwt_secret.clone().or(self.jwt_secret_from_file()) 93 | } 94 | 95 | pub fn jwt_secret_from_file(&self) -> Option { 96 | let jwt_file = self.jwt_file.as_ref()?; 97 | match std::fs::read_to_string(jwt_file) { 98 | Ok(content) => Some(content), 99 | Err(_) => Cli::default_jwt_secret(), 100 | } 101 | } 102 | 103 | pub fn default_jwt_secret() -> Option { 104 | let cur_dir = current_dir().ok()?; 105 | match std::fs::read_to_string(cur_dir.join("jwt.hex")) { 106 | Ok(content) => Some(content), 107 | Err(_) => { 108 | tracing::error!(target: "magi", "Failed to read JWT secret from file: {:?}", cur_dir); 109 | None 110 | } 111 | } 112 | } 113 | } 114 | 115 | impl From for CliConfig { 116 | fn from(value: Cli) -> Self { 117 | let jwt_secret = value.jwt_secret(); 118 | Self { 119 | l1_rpc_url: value.l1_rpc_url, 120 | l1_beacon_url: value.l1_beacon_url, 121 | l2_rpc_url: value.l2_rpc_url, 122 | l2_engine_url: value.l2_engine_url, 123 | jwt_secret, 124 | checkpoint_sync_url: value.checkpoint_sync_url, 125 | rpc_port: value.rpc_port, 126 | rpc_addr: value.rpc_addr, 127 | devnet: value.devnet, 128 | } 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /src/common/mod.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Debug; 2 | 3 | use ethers::{ 4 | types::{Block, Transaction, H256}, 5 | utils::rlp::{Decodable, DecoderError, Rlp}, 6 | }; 7 | use eyre::Result; 8 | use figment::value::{Dict, Tag, Value}; 9 | use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; 10 | 11 | use crate::engine::ExecutionPayload; 12 | 13 | /// Attributes deposited transaction helpers 14 | pub mod attributes_deposited; 15 | pub use attributes_deposited::AttributesDepositedCall; 16 | 17 | /// Selected block header info 18 | #[derive(Debug, Clone, Copy, Eq, PartialEq, Default, Serialize, Deserialize)] 19 | pub struct BlockInfo { 20 | /// The block hash 21 | pub hash: H256, 22 | /// The block number 23 | pub number: u64, 24 | /// The parent block hash 25 | pub parent_hash: H256, 26 | /// The block timestamp 27 | pub timestamp: u64, 28 | } 29 | 30 | /// A raw transaction 31 | #[derive(Clone, PartialEq, Eq)] 32 | pub struct RawTransaction(pub Vec); 33 | 34 | /// L1 epoch block 35 | #[derive(Copy, Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)] 36 | pub struct Epoch { 37 | /// The block number 38 | pub number: u64, 39 | /// The block hash 40 | pub hash: H256, 41 | /// The block timestamp 42 | pub timestamp: u64, 43 | } 44 | 45 | impl From for Value { 46 | fn from(value: BlockInfo) -> Value { 47 | let mut dict = Dict::new(); 48 | dict.insert("hash".to_string(), Value::from(value.hash.as_bytes())); 49 | dict.insert("number".to_string(), Value::from(value.number)); 50 | dict.insert("timestamp".to_string(), Value::from(value.timestamp)); 51 | dict.insert( 52 | "parent_hash".to_string(), 53 | Value::from(value.parent_hash.as_bytes()), 54 | ); 55 | Value::Dict(Tag::Default, dict) 56 | } 57 | } 58 | 59 | impl TryFrom> for BlockInfo { 60 | type Error = eyre::Report; 61 | 62 | /// Converts a [Block] to [BlockInfo] 63 | fn try_from(block: Block) -> Result { 64 | let number = block 65 | .number 66 | .ok_or(eyre::eyre!("block not included"))? 67 | .as_u64(); 68 | 69 | let hash = block.hash.ok_or(eyre::eyre!("block not included"))?; 70 | 71 | Ok(BlockInfo { 72 | number, 73 | hash, 74 | parent_hash: block.parent_hash, 75 | timestamp: block.timestamp.as_u64(), 76 | }) 77 | } 78 | } 79 | 80 | impl From for Value { 81 | fn from(value: Epoch) -> Self { 82 | let mut dict = Dict::new(); 83 | dict.insert("hash".to_string(), Value::from(value.hash.as_bytes())); 84 | dict.insert("number".to_string(), Value::from(value.number)); 85 | dict.insert("timestamp".to_string(), Value::from(value.timestamp)); 86 | Value::Dict(Tag::Default, dict) 87 | } 88 | } 89 | 90 | impl From<&ExecutionPayload> for BlockInfo { 91 | /// Converts an [ExecutionPayload] to [BlockInfo] 92 | fn from(value: &ExecutionPayload) -> Self { 93 | Self { 94 | number: value.block_number.as_u64(), 95 | hash: value.block_hash, 96 | parent_hash: value.parent_hash, 97 | timestamp: value.timestamp.as_u64(), 98 | } 99 | } 100 | } 101 | 102 | impl From<&AttributesDepositedCall> for Epoch { 103 | /// Converts [AttributesDepositedCall] to an [Epoch] consisting of the number, hash & timestamp of the corresponding L1 epoch block. 104 | fn from(call: &AttributesDepositedCall) -> Self { 105 | Self { 106 | number: call.number, 107 | timestamp: call.timestamp, 108 | hash: call.hash, 109 | } 110 | } 111 | } 112 | 113 | impl Decodable for RawTransaction { 114 | /// Decodes RLP encoded bytes into [RawTransaction] bytes 115 | fn decode(rlp: &Rlp) -> Result { 116 | let tx_bytes: Vec = rlp.as_val()?; 117 | Ok(Self(tx_bytes)) 118 | } 119 | } 120 | 121 | impl Debug for RawTransaction { 122 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 123 | write!(f, "0x{}", hex::encode(&self.0)) 124 | } 125 | } 126 | 127 | impl Serialize for RawTransaction { 128 | fn serialize(&self, serializer: S) -> Result { 129 | serializer.serialize_str(&format!("0x{}", hex::encode(&self.0))) 130 | } 131 | } 132 | 133 | impl<'de> Deserialize<'de> for RawTransaction { 134 | fn deserialize>(deserializer: D) -> Result { 135 | let tx: String = serde::Deserialize::deserialize(deserializer)?; 136 | let tx = tx.strip_prefix("0x").unwrap_or(&tx); 137 | Ok(RawTransaction(hex::decode(tx).map_err(D::Error::custom)?)) 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /src/engine/auth.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | //! Authentication module for the Engine API. 4 | //! 5 | //! This module was built using [reth](https://github.com/paradigmxyz/reth). 6 | 7 | use eyre::Result; 8 | use jsonwebtoken::Algorithm; 9 | use rand::Rng; 10 | use serde::{Deserialize, Serialize}; 11 | use std::time::{Duration, SystemTime, UNIX_EPOCH}; 12 | 13 | /// JWT hex encoded 256 bit secret key length. 14 | const JWT_SECRET_LEN: usize = 64; 15 | 16 | /// The maximum amount of drift from the JWT claims issued-at `iat` time. 17 | const JWT_MAX_IAT_DIFF: Duration = Duration::from_secs(60); 18 | 19 | /// The execution layer client MUST support at least the following alg HMAC + SHA256 (HS256) 20 | const JWT_SIGNATURE_ALGO: Algorithm = Algorithm::HS256; 21 | 22 | /// JwtSecret is a 256-bit hex-encoded secret key used to perform JWT-based authentication. 23 | /// 24 | /// See: [Secret key - Engine API specs](https://github.com/ethereum/execution-apis/blob/main/src/engine/authentication.md#key-distribution) 25 | #[derive(Clone)] 26 | pub struct JwtSecret([u8; 32]); 27 | 28 | impl JwtSecret { 29 | /// Creates an instance of JwtSecret. 30 | /// The provided `secret` must be a valid hexadecimal string of length 64. 31 | pub fn from_hex>(hex: S) -> Result { 32 | let hex: &str = hex.as_ref().trim(); 33 | // Remove the "0x" or "0X" prefix if it exists 34 | let hex = hex 35 | .strip_prefix("0x") 36 | .or_else(|| hex.strip_prefix("0X")) 37 | .unwrap_or(hex); 38 | if hex.len() != JWT_SECRET_LEN { 39 | Err(eyre::eyre!( 40 | "Invalid JWT secret key length. Expected {} characters, got {}.", 41 | JWT_SECRET_LEN, 42 | hex.len() 43 | )) 44 | } else { 45 | let hex_bytes = hex::decode(hex)?; 46 | let bytes = hex_bytes.try_into().expect("is expected len"); 47 | Ok(JwtSecret(bytes)) 48 | } 49 | } 50 | 51 | /// Generates a random [`JwtSecret`] 52 | pub fn random() -> Self { 53 | let random_bytes: [u8; 32] = rand::thread_rng().gen(); 54 | let secret = hex::encode(random_bytes); 55 | JwtSecret::from_hex(secret).unwrap() 56 | } 57 | 58 | /// Returns if the provided JWT token is equal to the JWT secret. 59 | pub fn equal(&self, token: &str) -> bool { 60 | hex::encode(self.0) == token 61 | } 62 | 63 | /// Generate claims constructs a [`Claims`] instance. 64 | /// 65 | /// ## Panics 66 | /// 67 | /// This function will panic if the system time is before the UNIX_EPOCH. 68 | pub(crate) fn generate_claims(&self, time: Option) -> Claims { 69 | let now = time.unwrap_or_else(SystemTime::now); 70 | let now_secs = now.duration_since(UNIX_EPOCH).unwrap().as_secs(); 71 | Claims { 72 | iat: now_secs, 73 | exp: now_secs + 60, 74 | } 75 | } 76 | 77 | /// Encodes the [`Claims`] in a [jsonwebtoken::Header] String format. 78 | pub(crate) fn encode(&self, claims: &Claims) -> Result> { 79 | let bytes = &self.0; 80 | let key = jsonwebtoken::EncodingKey::from_secret(bytes); 81 | let algo = jsonwebtoken::Header::new(Algorithm::HS256); 82 | Ok(jsonwebtoken::encode(&algo, claims, &key)?) 83 | } 84 | } 85 | 86 | impl std::fmt::Debug for JwtSecret { 87 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 88 | f.debug_tuple("JwtSecret").field(&"{{}}").finish() 89 | } 90 | } 91 | 92 | /// Claims are a set of information about an actor authorized by a JWT. 93 | /// 94 | /// The Engine API requires that the `iat` (issued-at) claim is provided. 95 | #[derive(Debug, Serialize, Deserialize)] 96 | pub(crate) struct Claims { 97 | /// The number of seconds since the UNIX_EPOCH. 98 | pub(crate) iat: u64, 99 | /// The expiration time of the JWT. 100 | pub(crate) exp: u64, 101 | } 102 | 103 | impl Claims { 104 | /// Valid returns if the given claims are valid. 105 | pub(crate) fn valid(&self) -> bool { 106 | let now = SystemTime::now(); 107 | let now_secs = now.duration_since(UNIX_EPOCH).unwrap().as_secs(); 108 | now_secs.abs_diff(self.iat) <= JWT_MAX_IAT_DIFF.as_secs() 109 | } 110 | } 111 | 112 | #[cfg(test)] 113 | mod tests { 114 | use super::*; 115 | use std::time::{SystemTime, UNIX_EPOCH}; 116 | 117 | const SECRET: &str = "f79ae5046bc11c9927afe911db7143c51a806c4a537cc08e0d37140b0192f430"; 118 | 119 | #[tokio::test] 120 | async fn construct_valid_raw_claims() { 121 | let claims = Claims { 122 | iat: SystemTime::now() 123 | .duration_since(UNIX_EPOCH) 124 | .unwrap() 125 | .as_secs(), 126 | exp: 10000000000, 127 | }; 128 | assert!(claims.valid()); 129 | } 130 | 131 | #[tokio::test] 132 | async fn construct_valid_secret_claims() { 133 | let secret = JwtSecret::from_hex(SECRET).unwrap(); 134 | let secret_claims = secret.generate_claims(None); 135 | assert!(secret_claims.valid()); 136 | } 137 | 138 | #[tokio::test] 139 | async fn encode_secret() { 140 | let secret = JwtSecret::from_hex(SECRET).unwrap(); 141 | let claims = secret.generate_claims(Some(SystemTime::UNIX_EPOCH)); 142 | let jwt = secret.encode(&claims).unwrap(); 143 | assert!(!jwt.is_empty()); 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /src/l1/blob_fetcher.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicU64, Ordering}; 2 | 3 | use eyre::Result; 4 | use serde::Deserialize; 5 | use serde_json::Value; 6 | 7 | /// The blob fetcher is responsible for fetching blob data from the L1 beacon chain, 8 | /// along with relevant parsing and validation. 9 | /// 10 | /// Consensus layer info required for deriving the slot at which a specific blob was 11 | /// included in the beacon chain is fetched on the first call to [`Self::get_slot_from_time`] 12 | /// and cached for all subsequent calls. 13 | pub struct BlobFetcher { 14 | l1_beacon_url: String, 15 | client: reqwest::Client, 16 | genesis_timestamp: AtomicU64, 17 | seconds_per_slot: AtomicU64, 18 | } 19 | 20 | /// A beacon chain blob sidecar object. 21 | /// KZG commitment and proof fields are not used in the current implementation. 22 | #[derive(Debug, Deserialize)] 23 | pub struct BlobSidecar { 24 | /// Blob index (transactions can have more than one blob) 25 | #[serde(deserialize_with = "deserialize_string_to_u64")] 26 | pub index: u64, 27 | /// Blob data (not decoded) 28 | #[serde(deserialize_with = "deserialize_blob_bytes")] 29 | pub blob: Vec, 30 | } 31 | 32 | impl BlobFetcher { 33 | /// Create a new blob fetcher with the given config. 34 | pub fn new(l1_beacon_url: String) -> Self { 35 | Self { 36 | l1_beacon_url, 37 | client: reqwest::Client::new(), 38 | genesis_timestamp: AtomicU64::new(0), 39 | seconds_per_slot: AtomicU64::new(0), 40 | } 41 | } 42 | 43 | /// Given a timestamp, return the slot number at which the timestamp 44 | /// was included in the beacon chain. 45 | /// 46 | /// This method uses a cached genesis timestamp and seconds per slot 47 | /// value to calculate the slot number. If the cache is empty, it fetches 48 | /// the required data from the beacon RPC. 49 | pub async fn get_slot_from_time(&self, time: u64) -> Result { 50 | let mut genesis_timestamp = self.genesis_timestamp.load(Ordering::Relaxed); 51 | let mut seconds_per_slot = self.seconds_per_slot.load(Ordering::Relaxed); 52 | 53 | // If we don't have data about the genesis timestamp, we need to fetch it 54 | // from the CL first along with the "SECONDS_PER_SLOT" value from the spec. 55 | if genesis_timestamp == 0 { 56 | genesis_timestamp = self.fetch_beacon_genesis_timestamp().await?; 57 | self.genesis_timestamp 58 | .store(genesis_timestamp, Ordering::Relaxed); 59 | 60 | let spec = self.fetch_beacon_spec().await?; 61 | seconds_per_slot = spec 62 | .get("SECONDS_PER_SLOT") 63 | .ok_or(eyre::eyre!("No seconds per slot in beacon spec"))? 64 | .as_str() 65 | .ok_or(eyre::eyre!("Seconds per slot: expected string"))? 66 | .parse::()?; 67 | 68 | if seconds_per_slot == 0 { 69 | eyre::bail!("Seconds per slot is 0; cannot calculate slot number"); 70 | } 71 | 72 | self.seconds_per_slot 73 | .store(seconds_per_slot, Ordering::Relaxed); 74 | } 75 | 76 | if time < genesis_timestamp { 77 | eyre::bail!("Time is before genesis; cannot calculate slot number"); 78 | } 79 | 80 | Ok((time - genesis_timestamp) / seconds_per_slot) 81 | } 82 | 83 | /// Fetch the blob sidecars for a given slot. 84 | pub async fn fetch_blob_sidecars(&self, slot: u64) -> Result> { 85 | let base_url = format!("{}/eth/v1/beacon/blob_sidecars", self.l1_beacon_url); 86 | let full_url = format!("{}/{}", base_url, slot); 87 | 88 | let res = self.client.get(full_url).send().await?.error_for_status()?; 89 | let res = serde_json::from_slice::(&res.bytes().await?)?; 90 | let res = res.get("data").ok_or(eyre::eyre!("No data in response"))?; 91 | 92 | let blobs = serde_json::from_value::>(res.clone())?; 93 | 94 | Ok(blobs) 95 | } 96 | 97 | /// Fetch the genesis timestamp from the beacon chain. 98 | pub async fn fetch_beacon_genesis_timestamp(&self) -> Result { 99 | let base_url = format!("{}/eth/v1/beacon/genesis", self.l1_beacon_url); 100 | 101 | let res = self.client.get(base_url).send().await?.error_for_status()?; 102 | let res = serde_json::from_slice::(&res.bytes().await?)?; 103 | let res = res.get("data").ok_or(eyre::eyre!("No data in response"))?; 104 | let res = res.get("genesis_time").ok_or(eyre::eyre!("No time"))?; 105 | 106 | let genesis_time = res.as_str().ok_or(eyre::eyre!("Expected string"))?; 107 | let genesis_time = genesis_time.parse::()?; 108 | 109 | Ok(genesis_time) 110 | } 111 | 112 | /// Fetch the beacon chain spec. 113 | pub async fn fetch_beacon_spec(&self) -> Result { 114 | let base_url = format!("{}/eth/v1/config/spec", self.l1_beacon_url); 115 | 116 | let res = self.client.get(base_url).send().await?.error_for_status()?; 117 | let res = serde_json::from_slice::(&res.bytes().await?)?; 118 | let res = res.get("data").ok_or(eyre::eyre!("No data in response"))?; 119 | 120 | Ok(res.clone()) 121 | } 122 | } 123 | 124 | fn deserialize_string_to_u64<'de, D>(deserializer: D) -> Result 125 | where 126 | D: serde::Deserializer<'de>, 127 | { 128 | let s: String = Deserialize::deserialize(deserializer)?; 129 | s.parse::().map_err(serde::de::Error::custom) 130 | } 131 | 132 | fn deserialize_blob_bytes<'de, D>(deserializer: D) -> Result, D::Error> 133 | where 134 | D: serde::Deserializer<'de>, 135 | { 136 | let s: String = Deserialize::deserialize(deserializer)?; 137 | let s = s.trim_start_matches("0x"); 138 | let bytes = hex::decode(s).map_err(serde::de::Error::custom)?; 139 | Ok(bytes) 140 | } 141 | -------------------------------------------------------------------------------- /docs/devnet.md: -------------------------------------------------------------------------------- 1 | # Devnet Environment 2 | 3 | The devnet environment is built on the foundation of the locally available Optimism Bedrock devnet. Essentially, this environment runs `op-geth` and `geth` node in development mode, excluding the PoS client, and without finality. This is why the `--devnet` flag for Magi is necessary. This flag allows for the acceptance of any block as finalized. Otherwise, the Magi would operate exclusively with finalized blocks. 4 | 5 | The devnet environment for Magi connects to the initiated OP L1 devnet node and launches its own corresponding `op-geth` node from the Magi docker. Magi employs this as its L2 node and execution engine. 6 | 7 | ## Let's start 8 | 9 | ### Configuring the OP Stack 10 | 11 | Begin by cloning the Optimism repository: 12 | 13 | git clone git@github.com:ethereum-optimism/optimism.git 14 | 15 | The `develop` branch is typically suitable. However, if issues arise, consider using the specific revision `18bcfb0dcc16de4287cb614f0a0b7bb3c276cbd3`. 16 | 17 | To launch the OP devnet, ensure you have [Docker](https://www.docker.com/): 18 | 19 | cd optimism 20 | git submodule update --init --recursive 21 | just devnet-up 22 | 23 | For troubleshooting, please refer to the official [documentation](https://community.optimism.io/docs/developers/build/dev-node/#). 24 | 25 | ### Configure OP-Geth for Magi 26 | 27 | Once you have successfully started the OP devnet docker containers, proceed to the next step: launching an `op-geth` L2 instance for Magi. 28 | 29 | Navigate to the `magi/docker` directory and copy the configuration: 30 | 31 | cp .env.default .env 32 | 33 | Edit the `.env` file located within the directory: 34 | 35 | ```toml 36 | # Set the network value to `devnet` in the configuration. 37 | NETWORK=devnet 38 | 39 | # To avoid potential conflicts with the default ports in the OP devnet, it's recommended to modify the RPC ports. 40 | 41 | # The execution client Auth RPC port. 42 | EXECUTION_CLIENT_AUTH_RPC_PORT=5551 43 | 44 | # The execution client RPC port. 45 | EXECUTION_CLIENT_RPC_PORT=5545 46 | 47 | # The execution client WebSocket port. 48 | EXECUTION_CLIENT_WS_PORT=5546 49 | 50 | # Only for `custom` or `devnet` network. 51 | # Specify the path to the `genesis-l2.json` file generated by `op-node`. 52 | # For the devnet configuration, this file should be located in the `.devnet` folder within the Optimism directory. 53 | OP_GENESIS_JSON_FILEPATH= 54 | ``` 55 | 56 | To initiate, start the `op-geth` node: 57 | 58 | docker-compose up op-geth 59 | 60 | ### Launch Magi 61 | 62 | Build Magi: 63 | 64 | cargo build -p magi # --release 65 | 66 | Launch Magi: 67 | 68 | ./target/debug/magi \ 69 | --network path \ # Specify the path to the `rollup.json` file, akin to `OP_ROLLUP_JSON_FILEPATH` 70 | --jwt-secret bf549f5188556ce0951048ef467ec93067bc4ea21acebe46ef675cd4e8e015ff \ # Replace only if altered in `.env`. 71 | --l1-rpc-url http://127.0.0.1:8545 \ # This depends on the OP devnet configuration. 72 | --l2-rpc-url http://127.0.0.1:5545 \ # As per the Magi `op-geth` configuration (refer to devnet profile). 73 | --l2-engine-url http://127.0.0.1:5551 \ # As per the Magi `op-geth` configuration (see devnet profile). 74 | --rpc-port 10500 \ # Choose any available port. 75 | --devnet \ # Indicates it's a devnet. 76 | --sync-mode full # Other modes haven't been tested. 77 | 78 | Remember to adjust the parameters as necessary based on your setup and configurations. 79 | 80 | If everything is set up successfully, the Magi node should log a message similar to: 81 | 82 | ``` 83 | [Tue, 29 Aug 2023 20:12:10] INFO: safe head updated: 312 0xd3c43585a005b6ae5e5fb70eb4bd408a707751a19f2b2fd1def29b36e633f0cc 84 | ``` 85 | 86 | ### Query node 87 | 88 | Let's query our Magi `op-geth`: 89 | 90 | cast block latest --rpc-url localhost:5545 91 | 92 | Compare the results with from OP devnet L2: 93 | 94 | cast block latest --rpc-url localhost:9545 95 | 96 | The responses should show approximately equal block heights. A difference of around 10-20 blocks is acceptable. 97 | 98 | Additionally, you can retrieve a block hash from the Magi `op-geth` and query this hash in OP L2: 99 | 100 | cast block --rpc-url localhost:9545 101 | 102 | ### Test deposits/withdrawal 103 | 104 | Navigate to the directory `optimism/packages/sdk`. 105 | 106 | If you have previously built the TypeScript components within `packages/`, there's no need to rebuild the SDK. If not, execute the following commands: 107 | 108 | pnpm install 109 | pnpm all 110 | 111 | Occasionally, you may also need to build `packages/core-utils`. If required, follow the same procedure as above for this package. 112 | 113 | **Deposit** 114 | 115 | Depositing ETH to your L2 Account 116 | 117 | cast wallet new # Generate a Random Account (for example purposes). 118 | cast balance
--rpc-url localhost:5545 # Query the ETH Balance (it should be zero initially) 119 | npx hardhat deposit-eth --amount 1 --to
--network devnetL1 --withdraw false # Replace `
` with your own address. 120 | 121 | **Deposit & Withdraw** 122 | 123 | To simultaneously deposit ETH and initiate a withdrawal: 124 | 125 | npx hardhat deposit-eth --amount 1 --to
--network devnetL1 126 | 127 | Note: This command will withdraw some ETH from pre-deposited accounts on L2. However, it will not withdraw from the provided address as the private key is required for that operation. 128 | 129 | To test withdrawal from your account refer to this [tutorial](https://stack.optimism.io/docs/security/forced-withdrawal/). 130 | 131 | ## Trouble shooting 132 | 133 | If the Magi node stops syncing, you can reset the devnet. 134 | To do this, navigate to the root of the Optimism monorepo directory 135 | and run the following command: `make devnet-clean`. 136 | Afterward, start again both the devnet and Magi. 137 | 138 | If the issue continues, please create a new issue on GitHub providing detailed information about the problem. 139 | -------------------------------------------------------------------------------- /src/network/service/discovery.rs: -------------------------------------------------------------------------------- 1 | use std::{str::FromStr, time::Duration}; 2 | 3 | use discv5::{ 4 | enr::{CombinedKey, Enr, EnrBuilder, NodeId}, 5 | Discv5, Discv5Config, 6 | }; 7 | use ethers::utils::rlp; 8 | use eyre::Result; 9 | use tokio::{ 10 | sync::mpsc::{self, Receiver}, 11 | time::sleep, 12 | }; 13 | use unsigned_varint::{decode, encode}; 14 | 15 | use super::types::{NetworkAddress, Peer}; 16 | 17 | /// Starts the [Discv5] discovery service and continually tries to find new peers. 18 | /// Returns a [Receiver] to receive [Peer] structs 19 | pub fn start(addr: NetworkAddress, chain_id: u64) -> Result> { 20 | let bootnodes = bootnodes(); 21 | let mut disc = create_disc(chain_id)?; 22 | 23 | let (sender, recv) = mpsc::channel::(256); 24 | 25 | tokio::spawn(async move { 26 | bootnodes.into_iter().for_each(|enr| _ = disc.add_enr(enr)); 27 | disc.start(addr.into()).await.unwrap(); 28 | 29 | tracing::info!("started peer discovery"); 30 | 31 | loop { 32 | let target = NodeId::random(); 33 | match disc.find_node(target).await { 34 | Ok(nodes) => { 35 | let peers = nodes 36 | .iter() 37 | .filter(|node| is_valid_node(node, chain_id)) 38 | .flat_map(Peer::try_from); 39 | 40 | for peer in peers { 41 | _ = sender.send(peer).await; 42 | } 43 | } 44 | Err(err) => { 45 | tracing::warn!("discovery error: {:?}", err); 46 | } 47 | } 48 | 49 | sleep(Duration::from_secs(10)).await; 50 | } 51 | }); 52 | 53 | Ok(recv) 54 | } 55 | 56 | /// Returns `true` if a node [Enr] contains an `opstack` key and is on the same network. 57 | fn is_valid_node(node: &Enr, chain_id: u64) -> bool { 58 | node.get_raw_rlp("opstack") 59 | .map(|opstack| { 60 | OpStackEnrData::try_from(opstack) 61 | .map(|opstack| opstack.chain_id == chain_id && opstack.version == 0) 62 | .unwrap_or_default() 63 | }) 64 | .unwrap_or_default() 65 | } 66 | 67 | /// Generates an [Enr] and creates a [Discv5] service struct 68 | fn create_disc(chain_id: u64) -> Result { 69 | let opstack = OpStackEnrData { 70 | chain_id, 71 | version: 0, 72 | }; 73 | let opstack_data: Vec = opstack.into(); 74 | 75 | let key = CombinedKey::generate_secp256k1(); 76 | let enr = EnrBuilder::new("v4") 77 | .add_value_rlp("opstack", opstack_data.into()) 78 | .build(&key)?; 79 | let config = Discv5Config::default(); 80 | 81 | Discv5::new(enr, key, config).map_err(|_| eyre::eyre!("could not create disc service")) 82 | } 83 | 84 | /// The unique L2 network identifier 85 | #[derive(Debug)] 86 | struct OpStackEnrData { 87 | /// Chain ID 88 | chain_id: u64, 89 | /// The version. Always set to 0. 90 | version: u64, 91 | } 92 | 93 | impl TryFrom<&[u8]> for OpStackEnrData { 94 | type Error = eyre::Report; 95 | 96 | /// Converts a slice of RLP encoded bytes to Op Stack Enr Data. 97 | fn try_from(value: &[u8]) -> Result { 98 | let bytes: Vec = rlp::decode(value)?; 99 | let (chain_id, rest) = decode::u64(&bytes)?; 100 | let (version, _) = decode::u64(rest)?; 101 | 102 | Ok(Self { chain_id, version }) 103 | } 104 | } 105 | 106 | impl From for Vec { 107 | /// Converts Op Stack Enr data to a vector of bytes. 108 | fn from(value: OpStackEnrData) -> Vec { 109 | let mut chain_id_buf = encode::u128_buffer(); 110 | let chain_id_slice = encode::u128(value.chain_id as u128, &mut chain_id_buf); 111 | 112 | let mut version_buf = encode::u128_buffer(); 113 | let version_slice = encode::u128(value.version as u128, &mut version_buf); 114 | 115 | let opstack = [chain_id_slice, version_slice].concat(); 116 | 117 | rlp::encode(&opstack).to_vec() 118 | } 119 | } 120 | 121 | /// Default bootnodes to use. Currently consists of 2 Base bootnodes & 1 Optimism bootnode. 122 | fn bootnodes() -> Vec> { 123 | let bootnodes = [ 124 | "enr:-J64QBbwPjPLZ6IOOToOLsSjtFUjjzN66qmBZdUexpO32Klrc458Q24kbty2PdRaLacHM5z-cZQr8mjeQu3pik6jPSOGAYYFIqBfgmlkgnY0gmlwhDaRWFWHb3BzdGFja4SzlAUAiXNlY3AyNTZrMaECmeSnJh7zjKrDSPoNMGXoopeDF4hhpj5I0OsQUUt4u8uDdGNwgiQGg3VkcIIkBg", 125 | "enr:-J64QAlTCDa188Hl1OGv5_2Kj2nWCsvxMVc_rEnLtw7RPFbOfqUOV6khXT_PH6cC603I2ynY31rSQ8sI9gLeJbfFGaWGAYYFIrpdgmlkgnY0gmlwhANWgzCHb3BzdGFja4SzlAUAiXNlY3AyNTZrMaECkySjcg-2v0uWAsFsZZu43qNHppGr2D5F913Qqs5jDCGDdGNwgiQGg3VkcIIkBg", 126 | "enr:-J24QGEzN4mJgLWNTUNwj7riVJ2ZjRLenOFccl2dbRFxHHOCCZx8SXWzgf-sLzrGs6QgqSFCvGXVgGPBkRkfOWlT1-iGAYe6Cu93gmlkgnY0gmlwhCJBEUSHb3BzdGFja4OkAwCJc2VjcDI1NmsxoQLuYIwaYOHg3CUQhCkS-RsSHmUd1b_x93-9yQ5ItS6udIN0Y3CCIyuDdWRwgiMr", 127 | 128 | // Base bootnodes 129 | "enr:-J24QNz9lbrKbN4iSmmjtnr7SjUMk4zB7f1krHZcTZx-JRKZd0kA2gjufUROD6T3sOWDVDnFJRvqBBo62zuF-hYCohOGAYiOoEyEgmlkgnY0gmlwhAPniryHb3BzdGFja4OFQgCJc2VjcDI1NmsxoQKNVFlCxh_B-716tTs-h1vMzZkSs1FTu_OYTNjgufplG4N0Y3CCJAaDdWRwgiQG", 130 | "enr:-J24QH-f1wt99sfpHy4c0QJM-NfmsIfmlLAMMcgZCUEgKG_BBYFc6FwYgaMJMQN5dsRBJApIok0jFn-9CS842lGpLmqGAYiOoDRAgmlkgnY0gmlwhLhIgb2Hb3BzdGFja4OFQgCJc2VjcDI1NmsxoQJ9FTIv8B9myn1MWaC_2lJ-sMoeCDkusCsk4BYHjjCq04N0Y3CCJAaDdWRwgiQG", 131 | "enr:-J24QDXyyxvQYsd0yfsN0cRr1lZ1N11zGTplMNlW4xNEc7LkPXh0NAJ9iSOVdRO95GPYAIc6xmyoCCG6_0JxdL3a0zaGAYiOoAjFgmlkgnY0gmlwhAPckbGHb3BzdGFja4OFQgCJc2VjcDI1NmsxoQJwoS7tzwxqXSyFL7g0JM-KWVbgvjfB8JA__T7yY_cYboN0Y3CCJAaDdWRwgiQG", 132 | "enr:-J24QHmGyBwUZXIcsGYMaUqGGSl4CFdx9Tozu-vQCn5bHIQbR7On7dZbU61vYvfrJr30t0iahSqhc64J46MnUO2JvQaGAYiOoCKKgmlkgnY0gmlwhAPnCzSHb3BzdGFja4OFQgCJc2VjcDI1NmsxoQINc4fSijfbNIiGhcgvwjsjxVFJHUstK9L1T8OTKUjgloN0Y3CCJAaDdWRwgiQG", 133 | "enr:-J24QG3ypT4xSu0gjb5PABCmVxZqBjVw9ca7pvsI8jl4KATYAnxBmfkaIuEqy9sKvDHKuNCsy57WwK9wTt2aQgcaDDyGAYiOoGAXgmlkgnY0gmlwhDbGmZaHb3BzdGFja4OFQgCJc2VjcDI1NmsxoQIeAK_--tcLEiu7HvoUlbV52MspE0uCocsx1f_rYvRenIN0Y3CCJAaDdWRwgiQG", 134 | ]; 135 | 136 | bootnodes 137 | .iter() 138 | .filter_map(|enr| Enr::from_str(enr).ok()) 139 | .collect() 140 | } 141 | -------------------------------------------------------------------------------- /src/derive/state.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::BTreeMap, sync::Arc}; 2 | 3 | use ethers::{ 4 | providers::{Http, Middleware, Provider}, 5 | types::H256, 6 | }; 7 | 8 | use crate::{ 9 | common::{BlockInfo, Epoch}, 10 | config::Config, 11 | driver::HeadInfo, 12 | l1::L1Info, 13 | }; 14 | 15 | /// Represents the current derivation state. Consists of cached L1 & L2 blocks, and details of the current safe head & safe epoch. 16 | pub struct State { 17 | /// Map of L1 blocks from the current L1 safe epoch - ``seq_window_size`` 18 | l1_info: BTreeMap, 19 | /// Map of L1 block hashes from the current L1 safe epoch - ``seq_window_size`` 20 | l1_hashes: BTreeMap, 21 | /// Map of L2 blocks from the current L2 safe head - (``max_seq_drift`` / ``blocktime``) 22 | l2_refs: BTreeMap, 23 | /// The current safe head 24 | pub safe_head: BlockInfo, 25 | /// The current safe epoch 26 | pub safe_epoch: Epoch, 27 | /// The current epoch number. Same as the first L1 block number in this sequencing window. 28 | pub current_epoch_num: u64, 29 | /// Global config 30 | config: Arc, 31 | } 32 | 33 | impl State { 34 | /// Creates a new [State] and fetches and caches a range of L2 blocks. 35 | pub async fn new( 36 | finalized_head: BlockInfo, 37 | finalized_epoch: Epoch, 38 | provider: &Provider, 39 | config: Arc, 40 | ) -> Self { 41 | let l2_refs = l2_refs(finalized_head.number, provider, &config).await; 42 | 43 | Self { 44 | l1_info: BTreeMap::new(), 45 | l1_hashes: BTreeMap::new(), 46 | l2_refs, 47 | safe_head: finalized_head, 48 | safe_epoch: finalized_epoch, 49 | current_epoch_num: 0, 50 | config, 51 | } 52 | } 53 | 54 | /// Returns a cached L1 block by block hash 55 | pub fn l1_info_by_hash(&self, hash: H256) -> Option<&L1Info> { 56 | self.l1_info.get(&hash) 57 | } 58 | 59 | /// Returns a cached L1 block by block number 60 | pub fn l1_info_by_number(&self, num: u64) -> Option<&L1Info> { 61 | self.l1_hashes 62 | .get(&num) 63 | .and_then(|hash| self.l1_info.get(hash)) 64 | } 65 | 66 | /// Returns a cached L2 block by block timestamp 67 | pub fn l2_info_by_timestamp(&self, timestamp: u64) -> Option<&(BlockInfo, Epoch)> { 68 | let block_num = (timestamp - self.config.chain.l2_genesis.timestamp) 69 | / self.config.chain.blocktime 70 | + self.config.chain.l2_genesis.number; 71 | 72 | self.l2_refs.get(&block_num) 73 | } 74 | 75 | /// Returns an epoch from an L1 block hash 76 | pub fn epoch_by_hash(&self, hash: H256) -> Option { 77 | self.l1_info_by_hash(hash).map(|info| Epoch { 78 | number: info.block_info.number, 79 | hash: info.block_info.hash, 80 | timestamp: info.block_info.timestamp, 81 | }) 82 | } 83 | 84 | /// Returns an epoch by number. Same as the first L1 block number in the epoch's sequencing window. 85 | pub fn epoch_by_number(&self, num: u64) -> Option { 86 | self.l1_info_by_number(num).map(|info| Epoch { 87 | number: info.block_info.number, 88 | hash: info.block_info.hash, 89 | timestamp: info.block_info.timestamp, 90 | }) 91 | } 92 | 93 | /// Inserts data from the ``l1_info`` parameter into ``l1_hashes`` & ``l1_info`` maps. 94 | /// 95 | /// This also updates ``current_epoch_num`` to the block number of the given ``l1_info``. 96 | pub fn update_l1_info(&mut self, l1_info: L1Info) { 97 | self.current_epoch_num = l1_info.block_info.number; 98 | 99 | self.l1_hashes 100 | .insert(l1_info.block_info.number, l1_info.block_info.hash); 101 | self.l1_info.insert(l1_info.block_info.hash, l1_info); 102 | 103 | self.prune(); 104 | } 105 | 106 | /// Resets the state and updates the safe head with the given parameters. 107 | /// 108 | /// ``current_epoch_num`` is set to 0. 109 | /// 110 | /// ``l1_info`` & ``l1_hashes`` mappings are cleared. 111 | pub fn purge(&mut self, safe_head: BlockInfo, safe_epoch: Epoch) { 112 | self.current_epoch_num = 0; 113 | self.l1_info.clear(); 114 | self.l1_hashes.clear(); 115 | 116 | self.update_safe_head(safe_head, safe_epoch); 117 | } 118 | 119 | /// Sets ``safe_head`` & ``safe_epoch`` to the given parameters. 120 | /// 121 | /// Also inserts these details into ``l2_refs``. 122 | pub fn update_safe_head(&mut self, safe_head: BlockInfo, safe_epoch: Epoch) { 123 | self.safe_head = safe_head; 124 | self.safe_epoch = safe_epoch; 125 | 126 | self.l2_refs 127 | .insert(self.safe_head.number, (self.safe_head, self.safe_epoch)); 128 | } 129 | 130 | /// Removes keys from ``l1_info`` & ``l1_hashes`` mappings if older than ``self.safe_epoch.number`` - ``seq_window_size``. 131 | /// 132 | /// Removes keys from the ``l2_refs`` mapping if older than ``self.safe_head.number`` - (``max_seq_drift`` / ``blocktime``) 133 | fn prune(&mut self) { 134 | let prune_until = self 135 | .safe_epoch 136 | .number 137 | .saturating_sub(self.config.chain.seq_window_size); 138 | 139 | while let Some((block_num, block_hash)) = self.l1_hashes.first_key_value() { 140 | if *block_num >= prune_until { 141 | break; 142 | } 143 | 144 | self.l1_info.remove(block_hash); 145 | self.l1_hashes.pop_first(); 146 | } 147 | 148 | let prune_until = 149 | self.safe_head.number - self.config.chain.max_seq_drift / self.config.chain.blocktime; 150 | 151 | while let Some((num, _)) = self.l2_refs.first_key_value() { 152 | if *num >= prune_until { 153 | break; 154 | } 155 | 156 | self.l2_refs.pop_first(); 157 | } 158 | } 159 | } 160 | 161 | /// Returns the L2 blocks from the given ``head_num`` - (``max_seq_drift`` / ``blocktime``) to ``head_num``. 162 | /// 163 | /// If the lookback period is before the genesis block, it will return L2 blocks starting from genesis. 164 | async fn l2_refs( 165 | head_num: u64, 166 | provider: &Provider, 167 | config: &Config, 168 | ) -> BTreeMap { 169 | let lookback = config.chain.max_seq_drift / config.chain.blocktime; 170 | let start = head_num 171 | .saturating_sub(lookback) 172 | .max(config.chain.l2_genesis.number); 173 | 174 | let mut refs = BTreeMap::new(); 175 | for i in start..=head_num { 176 | let l2_block = provider.get_block_with_txs(i).await; 177 | if let Ok(Some(l2_block)) = l2_block { 178 | match HeadInfo::try_from_l2_block(config, l2_block) { 179 | Ok(head_info) => { 180 | refs.insert( 181 | head_info.l2_block_info.number, 182 | (head_info.l2_block_info, head_info.l1_epoch), 183 | ); 184 | } 185 | Err(e) => { 186 | tracing::warn!(err = ?e, "could not get head info for L2 block {}", i); 187 | } 188 | } 189 | } 190 | } 191 | 192 | refs 193 | } 194 | -------------------------------------------------------------------------------- /src/derive/mod.rs: -------------------------------------------------------------------------------- 1 | use std::sync::{mpsc, Arc, RwLock}; 2 | 3 | use bytes::Bytes; 4 | use eyre::Result; 5 | 6 | use crate::{config::Config, engine::PayloadAttributes}; 7 | 8 | use self::{ 9 | stages::{ 10 | attributes::Attributes, 11 | batcher_transactions::{BatcherTransactionMessage, BatcherTransactions}, 12 | batches::Batches, 13 | channels::Channels, 14 | }, 15 | state::State, 16 | }; 17 | 18 | /// A module that handles the block derivation stages 19 | pub mod stages; 20 | /// A module that keeps track of the current derivation state, caching previous L1 and L2 blocks 21 | pub mod state; 22 | 23 | /// A module that handles the Ecotone hardfork upgrade 24 | pub mod ecotone_upgrade; 25 | pub use ecotone_upgrade::get_ecotone_upgrade_transactions; 26 | 27 | /// A module that extends the [Iterator] trait with a `purge` method 28 | mod purgeable; 29 | pub use purgeable::PurgeableIterator; 30 | 31 | /// The derivation pipeline is iterated on to update attributes for new blocks. 32 | pub struct Pipeline { 33 | /// A channel sender to send a `BatcherTransactionMessage` 34 | batcher_transaction_sender: mpsc::Sender, 35 | /// An `Attributes` object 36 | attributes: Attributes, 37 | /// Pending `PayloadAttributes` 38 | pending_attributes: Option, 39 | } 40 | 41 | impl Iterator for Pipeline { 42 | type Item = PayloadAttributes; 43 | 44 | /// Returns the pending [PayloadAttributes]. 45 | /// If none exist it will call `Attributes::next()` to advance to the next block and return those attributes instead. 46 | fn next(&mut self) -> Option { 47 | if self.pending_attributes.is_some() { 48 | self.pending_attributes.take() 49 | } else { 50 | self.attributes.next() 51 | } 52 | } 53 | } 54 | 55 | impl Pipeline { 56 | /// Creates a new [Pipeline] and initializes Batcher Transactions, [Channels], [Batches], and [Attributes] 57 | pub fn new(state: Arc>, config: Arc, seq: u64) -> Result { 58 | let (tx, rx) = mpsc::channel(); 59 | let batcher_transactions = BatcherTransactions::new(rx); 60 | let channels = Channels::new(batcher_transactions, config.clone()); 61 | let batches = Batches::new(channels, state.clone(), config.clone()); 62 | let attributes = Attributes::new(Box::new(batches), state, config, seq); 63 | 64 | Ok(Self { 65 | batcher_transaction_sender: tx, 66 | attributes, 67 | pending_attributes: None, 68 | }) 69 | } 70 | 71 | /// Sends Batcher Transactions & the L1 block they were received in to the Batcher Transactions receiver. 72 | pub fn push_batcher_transactions(&self, txs: Vec, l1_origin: u64) -> Result<()> { 73 | self.batcher_transaction_sender 74 | .send(BatcherTransactionMessage { txs, l1_origin })?; 75 | Ok(()) 76 | } 77 | 78 | /// Returns a reference to the pending [PayloadAttributes]. 79 | /// If none are pending, it will call `self.next()` to advance to the next block and return those attributes instead. 80 | pub fn peek(&mut self) -> Option<&PayloadAttributes> { 81 | if self.pending_attributes.is_none() { 82 | let next_attributes = self.next(); 83 | self.pending_attributes = next_attributes; 84 | } 85 | 86 | self.pending_attributes.as_ref() 87 | } 88 | 89 | /// Resets the state of `self.attributes` by calling `Attributes::purge()` 90 | pub fn purge(&mut self) -> Result<()> { 91 | self.attributes.purge(); 92 | Ok(()) 93 | } 94 | } 95 | 96 | #[cfg(test)] 97 | mod tests { 98 | use std::{ 99 | env, 100 | sync::{Arc, RwLock}, 101 | }; 102 | 103 | use ethers::{ 104 | providers::{Middleware, Provider}, 105 | types::H256, 106 | utils::keccak256, 107 | }; 108 | 109 | use crate::{ 110 | common::RawTransaction, 111 | config::{ChainConfig, Config}, 112 | derive::*, 113 | l1::{BlockUpdate, ChainWatcher}, 114 | }; 115 | 116 | #[tokio::test(flavor = "multi_thread")] 117 | async fn test_attributes_match() { 118 | if std::env::var("L1_TEST_RPC_URL").is_ok() && std::env::var("L2_TEST_RPC_URL").is_ok() { 119 | let rpc = env::var("L1_TEST_RPC_URL").unwrap(); 120 | let l2_rpc = env::var("L2_TEST_RPC_URL").unwrap(); 121 | 122 | let config = Arc::new(Config { 123 | l1_rpc_url: rpc.to_string(), 124 | l1_beacon_url: String::new(), 125 | l2_rpc_url: l2_rpc.to_string(), 126 | chain: ChainConfig::optimism_sepolia(), 127 | l2_engine_url: String::new(), 128 | jwt_secret: String::new(), 129 | checkpoint_sync_url: None, 130 | rpc_port: 9545, 131 | rpc_addr: "127.0.0.1".to_string(), 132 | devnet: false, 133 | }); 134 | 135 | let mut chain_watcher = ChainWatcher::new( 136 | config.chain.l1_start_epoch.number, 137 | config.chain.l2_genesis.number, 138 | config.clone(), 139 | ) 140 | .unwrap(); 141 | 142 | chain_watcher.start().unwrap(); 143 | 144 | let provider = Provider::try_from(env::var("L2_TEST_RPC_URL").unwrap()).unwrap(); 145 | let state = Arc::new(RwLock::new( 146 | State::new( 147 | config.chain.l2_genesis, 148 | config.chain.l1_start_epoch, 149 | &provider, 150 | config.clone(), 151 | ) 152 | .await, 153 | )); 154 | 155 | let mut pipeline = Pipeline::new(state.clone(), config.clone(), 0).unwrap(); 156 | 157 | chain_watcher.recv_from_channel().await.unwrap(); 158 | let update = chain_watcher.recv_from_channel().await.unwrap(); 159 | 160 | let l1_info = match update { 161 | BlockUpdate::NewBlock(block) => *block, 162 | _ => panic!("wrong update type"), 163 | }; 164 | 165 | pipeline 166 | .push_batcher_transactions( 167 | l1_info.batcher_transactions.clone(), 168 | l1_info.block_info.number, 169 | ) 170 | .unwrap(); 171 | 172 | state.write().unwrap().update_l1_info(l1_info); 173 | 174 | if let Some(payload) = pipeline.next() { 175 | let hashes = get_tx_hashes(&payload.transactions.unwrap()); 176 | let expected_hashes = get_expected_hashes(config.chain.l2_genesis.number + 1).await; 177 | 178 | assert_eq!(hashes, expected_hashes); 179 | } 180 | } 181 | } 182 | 183 | async fn get_expected_hashes(block_num: u64) -> Vec { 184 | let provider = Provider::try_from(env::var("L2_TEST_RPC_URL").unwrap()).unwrap(); 185 | 186 | provider 187 | .get_block(block_num) 188 | .await 189 | .unwrap() 190 | .unwrap() 191 | .transactions 192 | } 193 | 194 | fn get_tx_hashes(txs: &[RawTransaction]) -> Vec { 195 | txs.iter() 196 | .map(|tx| H256::from_slice(&keccak256(&tx.0))) 197 | .collect() 198 | } 199 | } 200 | -------------------------------------------------------------------------------- /src/driver/info.rs: -------------------------------------------------------------------------------- 1 | use crate::config::Config; 2 | use crate::driver::types::HeadInfo; 3 | use ethers::middleware::Middleware; 4 | use ethers::providers::{JsonRpcClient, Provider, ProviderError}; 5 | use ethers::types::{Block, BlockId, BlockNumber, Transaction}; 6 | 7 | /// An asynchronous trait for fetching blocks along with their transactions. 8 | #[async_trait::async_trait] 9 | pub trait InnerProvider { 10 | /// Retrieves a block and its transactions 11 | async fn get_block_with_txs( 12 | &self, 13 | block_id: BlockId, 14 | ) -> Result>, ProviderError>; 15 | } 16 | 17 | /// Wrapper around a [Provider] 18 | pub struct HeadInfoFetcher<'a, P: JsonRpcClient> { 19 | /// An ethers [Provider] implementing the [JsonRpcClient] trait 20 | inner: &'a Provider

, 21 | } 22 | 23 | impl<'a, P: JsonRpcClient> From<&'a Provider

> for HeadInfoFetcher<'a, P> { 24 | /// Converts a [Provider] to a [HeadInfoFetcher] 25 | fn from(inner: &'a Provider

) -> Self { 26 | Self { inner } 27 | } 28 | } 29 | 30 | #[async_trait::async_trait] 31 | impl<'a, P: JsonRpcClient> InnerProvider for HeadInfoFetcher<'a, P> { 32 | /// Fetches a block with transactions 33 | async fn get_block_with_txs( 34 | &self, 35 | block_id: BlockId, 36 | ) -> Result>, ProviderError> { 37 | self.inner.get_block_with_txs(block_id).await 38 | } 39 | } 40 | 41 | /// Provides a method to fetch the latest finalized block 42 | pub struct HeadInfoQuery {} 43 | 44 | impl HeadInfoQuery { 45 | /// Fetches the latest finalized L2 block 46 | pub async fn get_head_info(p: &P, config: &Config) -> HeadInfo { 47 | let parsed_head_info = match p 48 | .get_block_with_txs(BlockId::Number(BlockNumber::Finalized)) 49 | .await 50 | { 51 | Ok(Some(block)) => match HeadInfo::try_from_l2_block(config, block) { 52 | Ok(head_info) => Some(head_info), 53 | Err(e) => { 54 | tracing::debug!(err = ?e, "could not parse L2 block into head info"); 55 | None 56 | } 57 | }, 58 | e => { 59 | tracing::debug!("could not get finalied L2 block: {:?}", e); 60 | None 61 | } 62 | }; 63 | 64 | if let Some(head_info) = parsed_head_info { 65 | head_info 66 | } else { 67 | tracing::warn!("could not get head info. Falling back to the genesis head."); 68 | HeadInfo { 69 | l2_block_info: config.chain.l2_genesis, 70 | l1_epoch: config.chain.l1_start_epoch, 71 | sequence_number: 0, 72 | } 73 | } 74 | } 75 | } 76 | 77 | #[cfg(all(test, feature = "test-utils"))] 78 | mod test_utils { 79 | use super::*; 80 | use crate::common::{BlockInfo, Epoch}; 81 | use crate::config::{ChainConfig, Config}; 82 | use ethers::types::H256; 83 | use std::str::FromStr; 84 | 85 | pub struct MockProvider { 86 | pub block: Option>, 87 | } 88 | 89 | pub fn mock_provider(block: Option>) -> MockProvider { 90 | MockProvider { block } 91 | } 92 | 93 | pub fn default_head_info() -> HeadInfo { 94 | HeadInfo { 95 | l2_block_info: BlockInfo { 96 | hash: H256::from_str( 97 | "dbf6a80fef073de06add9b0d14026d6e5a86c85f6d102c36d3d8e9cf89c2afd3", 98 | ) 99 | .unwrap(), 100 | number: 105235063, 101 | parent_hash: H256::from_str( 102 | "21a168dfa5e727926063a28ba16fd5ee84c814e847c81a699c7a0ea551e4ca50", 103 | ) 104 | .unwrap(), 105 | timestamp: 1686068903, 106 | }, 107 | l1_epoch: Epoch { 108 | number: 17422590, 109 | hash: H256::from_str( 110 | "438335a20d98863a4c0c97999eb2481921ccd28553eac6f913af7c12aec04108", 111 | ) 112 | .unwrap(), 113 | timestamp: 1686068903, 114 | }, 115 | sequence_number: 0, 116 | } 117 | } 118 | 119 | pub fn valid_block() -> Option> { 120 | let raw_block = r#"{ 121 | "hash": "0x2e4f4aff36bb7951be9742ad349fb1db84643c6bbac5014f3d196fd88fe333eb", 122 | "parentHash": "0xeccf4c06ad0d27be1cadee5720a509d31a9de0462b52f2cf6045d9a73c9aa504", 123 | "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", 124 | "miner": "0x4200000000000000000000000000000000000011", 125 | "stateRoot": "0x5905b2423f299a29db41e377d7ceadf4baa49eed04e1b72957e8c0985e04e730", 126 | "transactionsRoot": "0x030e481411042a769edde83d790d583ed69f9d3098d4a78d00e008f749fcfd97", 127 | "receiptsRoot": "0x29079b696c12a19999f3bb303fddb6fc12fb701f427678cca24954b91080ada3", 128 | "number": "0x7fe52f", 129 | "gasUsed": "0xb711", 130 | "gasLimit": "0x17d7840", 131 | "extraData": "0x", 132 | "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", 133 | "timestamp": "0x644434c2", 134 | "difficulty": "0x0", 135 | "totalDifficulty": "0x0", 136 | "sealFields": [], 137 | "uncles": [], 138 | "transactions": [], 139 | "size": "0x365", 140 | "mixHash": "0x7aeec5550a9b0616701e49ab835af5f10eadba2a0582016f0e256c9cace0c046", 141 | "nonce": "0x0000000000000000", 142 | "baseFeePerGas": "0x32" 143 | } 144 | "#; 145 | serde_json::from_str(raw_block).ok() 146 | } 147 | 148 | pub fn optimism_config() -> Config { 149 | Config { 150 | l1_rpc_url: Default::default(), 151 | l1_beacon_url: Default::default(), 152 | l2_rpc_url: Default::default(), 153 | l2_engine_url: Default::default(), 154 | chain: ChainConfig::optimism(), 155 | jwt_secret: Default::default(), 156 | checkpoint_sync_url: Default::default(), 157 | rpc_port: Default::default(), 158 | rpc_addr: Default::default(), 159 | devnet: false, 160 | } 161 | } 162 | 163 | #[async_trait::async_trait] 164 | impl InnerProvider for MockProvider { 165 | async fn get_block_with_txs( 166 | &self, 167 | _: BlockId, 168 | ) -> Result>, ProviderError> { 169 | Ok(self.block.clone()) 170 | } 171 | } 172 | } 173 | 174 | #[cfg(test)] 175 | mod tests { 176 | use super::*; 177 | 178 | #[tokio::test] 179 | async fn test_get_head_info_fails() { 180 | let provider = test_utils::mock_provider(None); 181 | let config = test_utils::optimism_config(); 182 | let head_info = HeadInfoQuery::get_head_info(&provider, &config).await; 183 | assert_eq!(test_utils::default_head_info(), head_info); 184 | } 185 | 186 | #[tokio::test] 187 | async fn test_get_head_info_empty_block() { 188 | let provider = test_utils::mock_provider(Some(Block::default())); 189 | let config = test_utils::optimism_config(); 190 | let head_info = HeadInfoQuery::get_head_info(&provider, &config).await; 191 | assert_eq!(test_utils::default_head_info(), head_info); 192 | } 193 | 194 | #[tokio::test] 195 | async fn test_get_head_info_valid_block() { 196 | let provider = test_utils::mock_provider(test_utils::valid_block()); 197 | let config = test_utils::optimism_config(); 198 | let head_info = HeadInfoQuery::get_head_info(&provider, &config).await; 199 | assert_eq!(test_utils::default_head_info(), head_info); 200 | } 201 | } 202 | -------------------------------------------------------------------------------- /src/engine/payload.rs: -------------------------------------------------------------------------------- 1 | use ethers::types::{Block, Bytes, Transaction, H160, H256, U64}; 2 | use eyre::Result; 3 | use serde::{Deserialize, Serialize}; 4 | 5 | use crate::{ 6 | common::{Epoch, RawTransaction}, 7 | config::SystemAccounts, 8 | }; 9 | 10 | /// ## ExecutionPayload 11 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)] 12 | #[serde(rename_all = "camelCase")] 13 | pub struct ExecutionPayload { 14 | /// A 32 byte hash of the parent payload 15 | pub parent_hash: H256, 16 | /// A 20 byte hash (aka Address) for the feeRecipient field of the new payload 17 | pub fee_recipient: H160, 18 | /// A 32 byte state root hash 19 | pub state_root: H256, 20 | /// A 32 byte receipt root hash 21 | pub receipts_root: H256, 22 | /// A 32 byte logs bloom filter 23 | pub logs_bloom: Bytes, 24 | /// A 32 byte beacon chain randomness value 25 | pub prev_randao: H256, 26 | /// A 64 bit number for the current block index 27 | pub block_number: U64, 28 | /// A 64 bit value for the gas limit 29 | pub gas_limit: U64, 30 | /// A 64 bit value for the gas used 31 | pub gas_used: U64, 32 | /// A 64 bit value for the timestamp field of the new payload 33 | pub timestamp: U64, 34 | /// 0 to 32 byte value for extra data 35 | pub extra_data: Bytes, 36 | /// 256 bits for the base fee per gas 37 | pub base_fee_per_gas: U64, 38 | /// The 32 byte block hash 39 | pub block_hash: H256, 40 | /// An array of transaction objects where each object is a byte list 41 | pub transactions: Vec, 42 | /// An array of beaconchain withdrawals. Always empty as this exists only for L1 compatibility 43 | #[serde(skip_serializing_if = "Option::is_none")] 44 | pub withdrawals: Option>, 45 | /// None if not present (pre-Ecotone) 46 | #[serde(skip_serializing_if = "Option::is_none")] 47 | pub blob_gas_used: Option, 48 | /// None if not present (pre-Ecotone) 49 | #[serde(skip_serializing_if = "Option::is_none")] 50 | pub excess_blob_gas: Option, 51 | } 52 | 53 | impl TryFrom> for ExecutionPayload { 54 | type Error = eyre::Report; 55 | 56 | /// Converts a [Block] to an [ExecutionPayload] 57 | fn try_from(value: Block) -> Result { 58 | let encoded_txs = (*value 59 | .transactions 60 | .into_iter() 61 | .map(|tx| RawTransaction(tx.rlp().to_vec())) 62 | .collect::>()) 63 | .to_vec(); 64 | 65 | Ok(ExecutionPayload { 66 | parent_hash: value.parent_hash, 67 | fee_recipient: ethers::types::Address::from_slice( 68 | SystemAccounts::default().fee_vault.as_slice(), 69 | ), 70 | state_root: value.state_root, 71 | receipts_root: value.receipts_root, 72 | logs_bloom: value.logs_bloom.unwrap().as_bytes().to_vec().into(), 73 | prev_randao: value.mix_hash.unwrap(), 74 | block_number: value.number.unwrap(), 75 | gas_limit: value.gas_limit.as_u64().into(), 76 | gas_used: value.gas_used.as_u64().into(), 77 | timestamp: value.timestamp.as_u64().into(), 78 | extra_data: value.extra_data.clone(), 79 | base_fee_per_gas: value 80 | .base_fee_per_gas 81 | .unwrap_or_else(|| 0u64.into()) 82 | .as_u64() 83 | .into(), 84 | block_hash: value.hash.unwrap(), 85 | transactions: encoded_txs, 86 | withdrawals: Some(Vec::new()), 87 | blob_gas_used: value.blob_gas_used.map(|v| v.as_u64().into()), 88 | excess_blob_gas: value.excess_blob_gas.map(|v| v.as_u64().into()), 89 | }) 90 | } 91 | } 92 | 93 | /// ## PayloadAttributes 94 | /// 95 | /// L2 extended payload attributes for Optimism. 96 | /// For more details, visit the [Optimism specs](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/exec-engine.md#extended-payloadattributesv1). 97 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)] 98 | #[serde(rename_all = "camelCase")] 99 | pub struct PayloadAttributes { 100 | /// 64 bit value for the timestamp field of the new payload. 101 | pub timestamp: U64, 102 | /// 32 byte value for the prevRandao field of the new payload. 103 | pub prev_randao: H256, 104 | /// 20 bytes suggested value for the feeRecipient field of the new payload. 105 | pub suggested_fee_recipient: H160, 106 | /// Array of transactions to be included in the new payload. 107 | pub transactions: Option>, 108 | /// Boolean value indicating whether or not the payload should be built without including transactions from the txpool. 109 | pub no_tx_pool: bool, 110 | /// 64 bit value for the gasLimit field of the new payload. 111 | /// The gasLimit is optional w.r.t. compatibility with L1, but required when used as rollup. 112 | /// This field overrides the gas limit used during block-building. 113 | /// If not specified as rollup, a STATUS_INVALID is returned. 114 | pub gas_limit: U64, 115 | /// Beaconchain withdrawals. This exists only for compatibility with L1, and is not used. Prior 116 | /// to Canyon, this value is always None. After Canyon it is an empty array. Note that we use 117 | /// the () type here since we never have a non empty array. 118 | pub withdrawals: Option>, 119 | /// The batch epoch number from derivation. This value is not expected by the engine is skipped 120 | /// during serialization and deserialization. 121 | #[serde(skip)] 122 | pub epoch: Option, 123 | /// The L1 block number when this batch was first fully derived. This value is not expected by 124 | /// the engine and is skipped during serialization and deserialization. 125 | #[serde(skip)] 126 | pub l1_inclusion_block: Option, 127 | /// The L2 sequence number of the block. This value is not expected by the engine and is 128 | /// skipped during serialization and deserialization. 129 | #[serde(skip)] 130 | pub seq_number: Option, 131 | } 132 | 133 | /// ## PayloadId 134 | pub type PayloadId = U64; 135 | 136 | /// ## PayloadStatus 137 | /// 138 | /// The status of a payload. 139 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 140 | #[serde(rename_all = "camelCase")] 141 | pub struct PayloadStatus { 142 | /// The status of the payload. 143 | pub status: Status, 144 | /// 32 Bytes - the hash of the most recent valid block in the branch defined by payload and its ancestors 145 | pub latest_valid_hash: Option, 146 | /// A message providing additional details on the validation error if the payload is classified as INVALID or INVALID_BLOCK_HASH. 147 | #[serde(default)] 148 | pub validation_error: Option, 149 | } 150 | 151 | /// ## Status 152 | /// 153 | /// The status of the payload. 154 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 155 | #[serde(rename_all = "SCREAMING_SNAKE_CASE")] 156 | pub enum Status { 157 | /// Valid Payload 158 | Valid, 159 | /// Invalid Payload 160 | Invalid, 161 | /// Currently syncing 162 | Syncing, 163 | /// Payload is accepted 164 | Accepted, 165 | /// Payload contains an invalid block hash 166 | InvalidBlockHash, 167 | } 168 | 169 | #[cfg(test)] 170 | mod tests { 171 | 172 | use ethers::{ 173 | providers::{Http, Middleware, Provider}, 174 | types::H256, 175 | }; 176 | use eyre::Result; 177 | 178 | use crate::engine::ExecutionPayload; 179 | 180 | #[tokio::test] 181 | async fn test_from_block_hash_to_execution_paylaod() -> Result<()> { 182 | if std::env::var("L2_TEST_RPC_URL").is_ok() { 183 | let checkpoint_hash: H256 = 184 | "0xc2794a16acacd9f7670379ffd12b6968ff98e2a602f57d7d1f880220aa5a4973".parse()?; 185 | 186 | let l2_rpc = std::env::var("L2_TEST_RPC_URL")?; 187 | let checkpoint_sync_url = Provider::::try_from(l2_rpc)?; 188 | let checkpoint_block = checkpoint_sync_url 189 | .get_block_with_txs(checkpoint_hash) 190 | .await? 191 | .unwrap(); 192 | 193 | let payload = ExecutionPayload::try_from(checkpoint_block)?; 194 | 195 | assert_eq!( 196 | payload.block_hash, 197 | "0xc2794a16acacd9f7670379ffd12b6968ff98e2a602f57d7d1f880220aa5a4973".parse()? 198 | ); 199 | assert_eq!(payload.block_number, 8453214u64.into()); 200 | assert_eq!(payload.base_fee_per_gas, 50u64.into()); 201 | } 202 | 203 | Ok(()) 204 | } 205 | } 206 | -------------------------------------------------------------------------------- /src/rpc/mod.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt::Display, net::SocketAddr, sync::Arc}; 2 | 3 | use crate::{ 4 | config::{Config, ExternalChainConfig}, 5 | version::Version, 6 | }; 7 | 8 | use eyre::Result; 9 | 10 | use ethers::{ 11 | providers::{Middleware, Provider}, 12 | types::{Block, BlockId, H256}, 13 | utils::keccak256, 14 | }; 15 | 16 | use jsonrpsee::{ 17 | core::{async_trait, Error}, 18 | proc_macros::rpc, 19 | server::ServerBuilder, 20 | }; 21 | 22 | use serde::{Deserialize, Serialize}; 23 | 24 | /// This trait defines a set of RPC methods that can be 25 | /// queried by clients under the `optimism` namespace 26 | #[rpc(server, namespace = "optimism")] 27 | pub trait Rpc { 28 | /// Returns the L2 output information for a given block. 29 | /// See the [Optimism spec](https://specs.optimism.io/protocol/rollup-node.html?highlight=rpc#l2-output-rpc-method) for more details 30 | #[method(name = "outputAtBlock")] 31 | async fn output_at_block(&self, block_number: u64) -> Result; 32 | 33 | /// Returns the rollup configuration options. 34 | #[method(name = "rollupConfig")] 35 | async fn rollup_config(&self) -> Result; 36 | 37 | /// Returns details about the Magi version of the node. 38 | #[method(name = "version")] 39 | async fn version(&self) -> Result; 40 | } 41 | 42 | /// The Magi RPC server which implements the same `optimism` namespace methods as `op-node` 43 | #[derive(Debug)] 44 | pub struct RpcServerImpl { 45 | /// The Magi version of the node 46 | version: Version, 47 | /// The Magi [Config] 48 | config: Arc, 49 | } 50 | 51 | #[async_trait] 52 | impl RpcServer for RpcServerImpl { 53 | /// Returns the L2 output information for a given block. 54 | /// See the [Optimism spec](https://specs.optimism.io/protocol/rollup-node.html?highlight=rpc#l2-output-rpc-method) for more details 55 | async fn output_at_block(&self, block_number: u64) -> Result { 56 | let l2_provider = convert_err(Provider::try_from(self.config.l2_rpc_url.clone()))?; 57 | 58 | let block = convert_err(l2_provider.get_block(block_number).await)? 59 | .ok_or(Error::Custom("unable to get block".to_string()))?; 60 | let state_root = block.state_root; 61 | let block_hash = block 62 | .hash 63 | .ok_or(Error::Custom("block hash not found".to_string()))?; 64 | let locations = vec![]; 65 | let block_id = Some(BlockId::from(block_hash)); 66 | 67 | let state_proof = convert_err( 68 | l2_provider 69 | .get_proof( 70 | ethers::types::Address::from_slice( 71 | self.config.chain.l2_to_l1_message_passer.as_slice(), 72 | ), 73 | locations, 74 | block_id, 75 | ) 76 | .await, 77 | )?; 78 | 79 | let withdrawal_storage_root = state_proof.storage_hash; 80 | 81 | let output_root = compute_l2_output_root(block, state_proof.storage_hash); 82 | 83 | let version: H256 = Default::default(); 84 | 85 | Ok(OutputRootResponse { 86 | output_root, 87 | version, 88 | state_root, 89 | withdrawal_storage_root, 90 | }) 91 | } 92 | 93 | /// Returns the rollup configuration options. 94 | async fn rollup_config(&self) -> Result { 95 | let config = (*self.config).clone(); 96 | 97 | Ok(ExternalChainConfig::from(config.chain)) 98 | } 99 | 100 | /// Returns details about the Magi version of the node. 101 | async fn version(&self) -> Result { 102 | Ok(self.version.to_string()) 103 | } 104 | } 105 | 106 | /// Converts a generic error to a [jsonrpsee::core::error] if one exists 107 | fn convert_err(res: Result) -> Result { 108 | res.map_err(|err| Error::Custom(err.to_string())) 109 | } 110 | 111 | /// Computes the L2 output root. 112 | /// Refer to the [Optimism Spec](https://specs.optimism.io/protocol/proposals.html#l2-output-commitment-construction) for details 113 | fn compute_l2_output_root(block: Block, storage_root: H256) -> H256 { 114 | let version: H256 = Default::default(); 115 | let digest = keccak256( 116 | [ 117 | version.to_fixed_bytes(), 118 | block.state_root.to_fixed_bytes(), 119 | storage_root.to_fixed_bytes(), 120 | block.hash.unwrap().to_fixed_bytes(), 121 | ] 122 | .concat(), 123 | ); 124 | 125 | H256::from_slice(&digest) 126 | } 127 | 128 | /// Starts the Magi RPC server 129 | pub async fn run_server(config: Arc) -> Result { 130 | let port = config.rpc_port; 131 | let addr = config.rpc_addr.clone(); 132 | 133 | let server = ServerBuilder::default() 134 | .build(format!("{}:{}", addr, port)) 135 | .await?; 136 | let addr = server.local_addr()?; 137 | let rpc_impl = RpcServerImpl { 138 | config, 139 | version: Version::build(), 140 | }; 141 | let handle = server.start(rpc_impl.into_rpc())?; 142 | 143 | // In this example we don't care about doing shutdown so let's it run forever. 144 | // You may use the `ServerHandle` to shut it down or manage it yourself. 145 | tokio::spawn(handle.stopped()); 146 | tracing::info!("rpc server started at port {}", port); 147 | 148 | Ok(addr) 149 | } 150 | 151 | /// The response for the `optimism_outputAtBlock` RPC method. 152 | #[derive(Serialize, Deserialize, Clone)] 153 | #[serde(rename_all = "camelCase")] 154 | pub struct OutputRootResponse { 155 | /// The output root which serves as a commitment to the current state of the chain 156 | pub output_root: H256, 157 | /// The output root version number, beginning with 0 158 | pub version: H256, 159 | /// The state root 160 | pub state_root: H256, 161 | /// The 32 byte storage root of the `L2toL1MessagePasser` contract address 162 | pub withdrawal_storage_root: H256, 163 | } 164 | 165 | #[cfg(test)] 166 | mod tests { 167 | use super::*; 168 | use crate::config::{ChainConfig, CliConfig, ExternalChainConfig}; 169 | use reqwest; 170 | use serde_json::json; 171 | use std::{path::PathBuf, str::FromStr}; 172 | use tokio::time::{sleep, Duration}; 173 | use tracing_subscriber; 174 | 175 | #[derive(Serialize, Deserialize, Debug)] 176 | struct RpcResponse { 177 | jsonrpc: String, 178 | result: ExternalChainConfig, 179 | id: u64, 180 | } 181 | 182 | #[tokio::test] 183 | async fn test_run_server() -> Result<()> { 184 | // std::env::set_var("RUST_LOG", "trace"); 185 | let cli_config = CliConfig { 186 | l1_rpc_url: Some("".to_string()), 187 | l1_beacon_url: Some("".to_string()), 188 | l2_rpc_url: None, 189 | l2_engine_url: None, 190 | jwt_secret: Some("".to_string()), 191 | checkpoint_sync_url: None, 192 | rpc_port: Some(8080), 193 | rpc_addr: Some("127.0.0.1".to_string()), 194 | devnet: false, 195 | }; 196 | 197 | tracing_subscriber::fmt().init(); 198 | 199 | let config_path = PathBuf::from_str("config.toml")?; 200 | let config = Arc::new(Config::new( 201 | &config_path, 202 | cli_config, 203 | ChainConfig::optimism_sepolia(), 204 | )); 205 | 206 | let addr = run_server(config.clone()) 207 | .await 208 | .expect("Failed to start server"); 209 | 210 | sleep(Duration::from_millis(100)).await; 211 | 212 | let client = reqwest::Client::new(); 213 | 214 | let request_body = json!({ 215 | "jsonrpc": "2.0", 216 | "method": "optimism_rollupConfig", 217 | "params": [], 218 | "id": 1, 219 | }); 220 | 221 | let response = client 222 | .post(format!("http://{}", addr)) 223 | .json(&request_body) 224 | .send() 225 | .await 226 | .expect("Failed to send request"); 227 | 228 | assert!(response.status().is_success()); 229 | 230 | let rpc_response: RpcResponse = response.json().await.expect("Failed to parse response"); 231 | 232 | let rpc_chain_config: ChainConfig = rpc_response.result.into(); 233 | 234 | assert_eq!(config.chain.l2_genesis, rpc_chain_config.l2_genesis); 235 | assert_eq!( 236 | config.chain.l2_to_l1_message_passer, 237 | rpc_chain_config.l2_to_l1_message_passer 238 | ); 239 | 240 | println!("{:#?}", rpc_chain_config); 241 | Ok(()) 242 | } 243 | } 244 | -------------------------------------------------------------------------------- /src/network/service/mod.rs: -------------------------------------------------------------------------------- 1 | use std::{net::SocketAddr, time::Duration}; 2 | 3 | use eyre::Result; 4 | use futures::{prelude::*, select}; 5 | use libp2p::{ 6 | gossipsub::{self, IdentTopic, Message, MessageId}, 7 | mplex::MplexConfig, 8 | noise, ping, 9 | swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}, 10 | tcp, Multiaddr, PeerId, Swarm, Transport, 11 | }; 12 | use libp2p_identity::Keypair; 13 | use openssl::sha::sha256; 14 | 15 | use super::{handlers::Handler, service::types::NetworkAddress}; 16 | 17 | /// A module to handle peer discovery 18 | mod discovery; 19 | /// A module to handle commonly used types in the p2p system. 20 | mod types; 21 | 22 | /// Responsible for management of the `Discv5` & `libp2p` services. 23 | pub struct Service { 24 | /// Handles validation & processing of inbound messages 25 | handlers: Vec>, 26 | /// The socket address that the service is listening on. 27 | addr: SocketAddr, 28 | /// The chain ID of the network 29 | chain_id: u64, 30 | /// A unique keypair to validate the node's identity 31 | keypair: Option, 32 | } 33 | 34 | impl Service { 35 | /// Creates a new [Service] 36 | pub fn new(addr: SocketAddr, chain_id: u64) -> Self { 37 | Self { 38 | handlers: Vec::new(), 39 | addr, 40 | chain_id, 41 | keypair: None, 42 | } 43 | } 44 | 45 | /// Adds a handler to [Service] 46 | pub fn add_handler(mut self, handler: Box) -> Self { 47 | self.handlers.push(handler); 48 | self 49 | } 50 | 51 | /// Sets the keypair for [Service] 52 | pub fn set_keypair(mut self, keypair: Keypair) -> Self { 53 | self.keypair = Some(keypair); 54 | self 55 | } 56 | 57 | /// Starts the Discv5 peer discovery & libp2p services 58 | /// and continually listens for new peers and messages to handle 59 | pub fn start(mut self) -> Result<()> { 60 | let addr = NetworkAddress::try_from(self.addr)?; 61 | let keypair = self.keypair.unwrap_or_else(Keypair::generate_secp256k1); 62 | 63 | let mut swarm = create_swarm(keypair, &self.handlers)?; 64 | let mut peer_recv = discovery::start(addr, self.chain_id)?; 65 | 66 | let multiaddr = Multiaddr::from(addr); 67 | swarm 68 | .listen_on(multiaddr) 69 | .map_err(|_| eyre::eyre!("swarm listen failed"))?; 70 | 71 | let mut handlers = Vec::new(); 72 | handlers.append(&mut self.handlers); 73 | 74 | tokio::spawn(async move { 75 | loop { 76 | select! { 77 | peer = peer_recv.recv().fuse() => { 78 | if let Some(peer) = peer { 79 | let peer = Multiaddr::from(peer); 80 | _ = swarm.dial(peer); 81 | } 82 | }, 83 | event = swarm.select_next_some() => { 84 | if let SwarmEvent::Behaviour(event) = event { 85 | event.handle(&mut swarm, &handlers); 86 | } 87 | }, 88 | } 89 | } 90 | }); 91 | 92 | Ok(()) 93 | } 94 | } 95 | 96 | /// Computes the message ID of a `gossipsub` message 97 | fn compute_message_id(msg: &Message) -> MessageId { 98 | let mut decoder = snap::raw::Decoder::new(); 99 | let id = match decoder.decompress_vec(&msg.data) { 100 | Ok(data) => { 101 | let domain_valid_snappy: Vec = vec![0x1, 0x0, 0x0, 0x0]; 102 | sha256( 103 | [domain_valid_snappy.as_slice(), data.as_slice()] 104 | .concat() 105 | .as_slice(), 106 | )[..20] 107 | .to_vec() 108 | } 109 | Err(_) => { 110 | let domain_invalid_snappy: Vec = vec![0x0, 0x0, 0x0, 0x0]; 111 | sha256( 112 | [domain_invalid_snappy.as_slice(), msg.data.as_slice()] 113 | .concat() 114 | .as_slice(), 115 | )[..20] 116 | .to_vec() 117 | } 118 | }; 119 | 120 | MessageId(id) 121 | } 122 | 123 | /// Creates the libp2p [Swarm] 124 | fn create_swarm(keypair: Keypair, handlers: &[Box]) -> Result> { 125 | let transport = tcp::tokio::Transport::new(tcp::Config::default()) 126 | .upgrade(libp2p::core::upgrade::Version::V1Lazy) 127 | .authenticate(noise::Config::new(&keypair)?) 128 | .multiplex(MplexConfig::default()) 129 | .boxed(); 130 | 131 | let behaviour = Behaviour::new(handlers)?; 132 | 133 | Ok( 134 | SwarmBuilder::with_tokio_executor(transport, behaviour, PeerId::from(keypair.public())) 135 | .build(), 136 | ) 137 | } 138 | 139 | /// Specifies the [NetworkBehaviour] of the node 140 | #[derive(NetworkBehaviour)] 141 | #[behaviour(out_event = "Event")] 142 | struct Behaviour { 143 | /// Adds [libp2p::ping] to respond to inbound pings, and send periodic outbound pings 144 | ping: ping::Behaviour, 145 | /// Adds [libp2p::gossipsub] to enable gossipsub as the routing layer 146 | gossipsub: gossipsub::Behaviour, 147 | } 148 | 149 | impl Behaviour { 150 | /// Configures the swarm behaviors, subscribes to the gossip topics, and returns a new [Behaviour] 151 | fn new(handlers: &[Box]) -> Result { 152 | let ping = ping::Behaviour::default(); 153 | 154 | let gossipsub_config = gossipsub::ConfigBuilder::default() 155 | .mesh_n(8) 156 | .mesh_n_low(6) 157 | .mesh_n_high(12) 158 | .gossip_lazy(6) 159 | .heartbeat_interval(Duration::from_millis(500)) 160 | .fanout_ttl(Duration::from_secs(24)) 161 | .history_length(12) 162 | .history_gossip(3) 163 | .duplicate_cache_time(Duration::from_secs(65)) 164 | .validation_mode(gossipsub::ValidationMode::None) 165 | .validate_messages() 166 | .message_id_fn(compute_message_id) 167 | .build() 168 | .map_err(|_| eyre::eyre!("gossipsub config creation failed"))?; 169 | 170 | let mut gossipsub = 171 | gossipsub::Behaviour::new(gossipsub::MessageAuthenticity::Anonymous, gossipsub_config) 172 | .map_err(|_| eyre::eyre!("gossipsub behaviour creation failed"))?; 173 | 174 | handlers 175 | .iter() 176 | .flat_map(|handler| { 177 | handler 178 | .topics() 179 | .iter() 180 | .map(|topic| { 181 | let topic = IdentTopic::new(topic.to_string()); 182 | gossipsub 183 | .subscribe(&topic) 184 | .map_err(|_| eyre::eyre!("subscription failed")) 185 | }) 186 | .collect::>() 187 | }) 188 | .collect::>>()?; 189 | 190 | Ok(Self { ping, gossipsub }) 191 | } 192 | } 193 | 194 | /// The type of message received 195 | enum Event { 196 | /// Represents a [ping::Event] 197 | #[allow(dead_code)] 198 | Ping(ping::Event), 199 | /// Represents a [gossipsub::Event] 200 | Gossipsub(gossipsub::Event), 201 | } 202 | 203 | impl Event { 204 | /// Handles received gossipsub messages. Ping messages are ignored. 205 | /// Reports back to [libp2p::gossipsub] to apply peer scoring and forward the message to other peers if accepted. 206 | fn handle(self, swarm: &mut Swarm, handlers: &[Box]) { 207 | if let Self::Gossipsub(gossipsub::Event::Message { 208 | propagation_source, 209 | message_id, 210 | message, 211 | }) = self 212 | { 213 | let handler = handlers 214 | .iter() 215 | .find(|h| h.topics().contains(&message.topic)); 216 | if let Some(handler) = handler { 217 | let status = handler.handle(message); 218 | 219 | _ = swarm 220 | .behaviour_mut() 221 | .gossipsub 222 | .report_message_validation_result(&message_id, &propagation_source, status); 223 | } 224 | } 225 | } 226 | } 227 | 228 | impl From for Event { 229 | /// Converts [ping::Event] to [Event] 230 | fn from(value: ping::Event) -> Self { 231 | Event::Ping(value) 232 | } 233 | } 234 | 235 | impl From for Event { 236 | /// Converts [gossipsub::Event] to [Event] 237 | fn from(value: gossipsub::Event) -> Self { 238 | Event::Gossipsub(value) 239 | } 240 | } 241 | -------------------------------------------------------------------------------- /src/telemetry/logging.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | env::current_dir, 3 | path::{Path, PathBuf}, 4 | }; 5 | 6 | use tracing::Level; 7 | use tracing_appender::{ 8 | non_blocking::WorkerGuard, 9 | rolling::{self, RollingFileAppender, Rotation}, 10 | }; 11 | use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter, Layer}; 12 | 13 | use ansi_term::Colour::{Blue, Cyan, Purple, Red, Yellow}; 14 | 15 | /// Standard log file name prefix. This will be optionally appended with a timestamp 16 | /// depending on the rotation strategy. 17 | const LOG_FILE_NAME_PREFIX: &str = "magi.log"; 18 | 19 | /// Default log file rotation strategy. This can be overridden by the `logs_rotation` config. 20 | const DEFAULT_ROTATION: &str = "daily"; 21 | 22 | /// Configure logging telemetry with a global handler. 23 | pub fn init( 24 | verbose: bool, 25 | logs_dir: Option, 26 | logs_rotation: Option, 27 | ) -> Vec { 28 | // If a directory is provided, log to file and stdout 29 | if let Some(dir) = logs_dir { 30 | let directory = PathBuf::from(dir); 31 | let rotation = 32 | get_rotation_strategy(&logs_rotation.unwrap_or_else(|| DEFAULT_ROTATION.into())); 33 | let appender = Some(get_rolling_file_appender( 34 | directory, 35 | rotation, 36 | LOG_FILE_NAME_PREFIX, 37 | )); 38 | return build_subscriber(verbose, appender); 39 | } 40 | 41 | // If no directory is provided, log to stdout only 42 | build_subscriber(verbose, None) 43 | } 44 | 45 | /// Subscriber Composer 46 | /// 47 | /// Builds a subscriber with multiple layers into a [tracing](https://crates.io/crates/tracing) subscriber 48 | /// and initializes it as the global default. This subscriber will log to stdout and optionally to a file. 49 | pub fn build_subscriber(verbose: bool, appender: Option) -> Vec { 50 | let mut guards = Vec::new(); 51 | 52 | let stdout_env_filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| { 53 | EnvFilter::new(match verbose { 54 | true => "magi=debug,network=debug".to_owned(), 55 | false => "magi=info,network=debug".to_owned(), 56 | }) 57 | }); 58 | 59 | let stdout_formatting_layer = AnsiTermLayer { verbose }.with_filter(stdout_env_filter); 60 | 61 | // If a file appender is provided, log to it and stdout, otherwise just log to stdout 62 | if let Some(appender) = appender { 63 | let (non_blocking, guard) = tracing_appender::non_blocking(appender); 64 | guards.push(guard); 65 | 66 | // Force the file logger to log at `debug` level 67 | let file_env_filter = EnvFilter::from("magi=debug,network=debug"); 68 | 69 | tracing_subscriber::registry() 70 | .with(stdout_formatting_layer) 71 | .with( 72 | tracing_subscriber::fmt::layer() 73 | .with_ansi(false) 74 | .with_writer(non_blocking) 75 | .with_filter(file_env_filter), 76 | ) 77 | .init(); 78 | } else { 79 | tracing_subscriber::registry() 80 | .with(stdout_formatting_layer) 81 | .init(); 82 | } 83 | 84 | guards 85 | } 86 | 87 | /// The AnsiVisitor 88 | #[derive(Debug)] 89 | pub struct AnsiVisitor; 90 | 91 | impl tracing::field::Visit for AnsiVisitor { 92 | fn record_f64(&mut self, _: &tracing::field::Field, value: f64) { 93 | println!("{value}") 94 | } 95 | 96 | fn record_i64(&mut self, _: &tracing::field::Field, value: i64) { 97 | println!("{value}") 98 | } 99 | 100 | fn record_u64(&mut self, _: &tracing::field::Field, value: u64) { 101 | println!("{value}") 102 | } 103 | 104 | fn record_bool(&mut self, _: &tracing::field::Field, value: bool) { 105 | println!("{value}") 106 | } 107 | 108 | fn record_str(&mut self, _: &tracing::field::Field, value: &str) { 109 | println!("{value}") 110 | } 111 | 112 | fn record_error( 113 | &mut self, 114 | _: &tracing::field::Field, 115 | value: &(dyn std::error::Error + 'static), 116 | ) { 117 | println!("{value}") 118 | } 119 | 120 | fn record_debug(&mut self, _: &tracing::field::Field, value: &dyn std::fmt::Debug) { 121 | println!("{value:?}") 122 | } 123 | } 124 | 125 | /// An Ansi Term layer for tracing 126 | #[derive(Debug)] 127 | pub struct AnsiTermLayer { 128 | /// Whether verbose tracing is enabled. Prints additional metadata if `true` 129 | verbose: bool, 130 | } 131 | 132 | impl Layer for AnsiTermLayer 133 | where 134 | S: tracing::Subscriber, 135 | { 136 | fn on_event( 137 | &self, 138 | event: &tracing::Event<'_>, 139 | _ctx: tracing_subscriber::layer::Context<'_, S>, 140 | ) { 141 | // Print the timestamp 142 | let utc: String = chrono::Utc::now().to_rfc2822(); 143 | let strip_len = utc.find('+').unwrap_or(utc.len()); 144 | let formatted_utc = utc[..strip_len].trim_end(); 145 | print!("[{}] ", Cyan.paint(formatted_utc)); 146 | 147 | // Print the level prefix 148 | match *event.metadata().level() { 149 | Level::ERROR => { 150 | print!("{}: ", Red.paint("ERROR")); 151 | } 152 | Level::WARN => { 153 | print!("{}: ", Yellow.paint("WARN")); 154 | } 155 | Level::INFO => { 156 | print!("{}: ", Blue.paint("INFO")); 157 | } 158 | Level::DEBUG => { 159 | print!("DEBUG: "); 160 | } 161 | Level::TRACE => { 162 | print!("{}: ", Purple.paint("TRACE")); 163 | } 164 | } 165 | 166 | if self.verbose { 167 | print!("{} ", Purple.paint(event.metadata().target())); 168 | 169 | let original_location = event 170 | .metadata() 171 | .name() 172 | .split(' ') 173 | .last() 174 | .unwrap_or_default(); 175 | 176 | let relative_path = current_dir() 177 | .unwrap_or_default() 178 | .to_string_lossy() 179 | .to_string(); 180 | 181 | // Remove common prefixes from the location and relative path 182 | let location_path = std::path::Path::new(original_location); 183 | let relative_path_path = std::path::Path::new(&relative_path); 184 | let common_prefix = location_path 185 | .ancestors() 186 | .collect::>() 187 | .iter() 188 | .cloned() 189 | .rev() 190 | .zip( 191 | relative_path_path 192 | .ancestors() 193 | .collect::>() 194 | .iter() 195 | .cloned() 196 | .rev(), 197 | ) 198 | .take_while(|(a, b)| a == b) 199 | .last() 200 | .map(|(a, _)| a) 201 | .unwrap_or_else(|| std::path::Path::new("")); 202 | 203 | let location = location_path 204 | .strip_prefix(common_prefix) 205 | .unwrap_or(location_path) 206 | .to_str() 207 | .unwrap_or(original_location); 208 | 209 | let location = location.strip_prefix('/').unwrap_or(location); 210 | print!("at {} ", Cyan.paint(location.to_string())); 211 | } 212 | 213 | let mut visitor = AnsiVisitor; 214 | event.record(&mut visitor); 215 | } 216 | } 217 | 218 | /// Get the rotation strategy from the given string. 219 | /// Defaults to rotating daily. 220 | fn get_rotation_strategy(val: &str) -> Rotation { 221 | match val { 222 | "never" => Rotation::NEVER, 223 | "daily" => Rotation::DAILY, 224 | "hourly" => Rotation::HOURLY, 225 | "minutely" => Rotation::MINUTELY, 226 | _ => { 227 | eprintln!("Invalid log rotation strategy provided. Defaulting to rotating daily."); 228 | eprintln!("Valid rotation options are: 'never', 'daily', 'hourly', 'minutely'."); 229 | Rotation::DAILY 230 | } 231 | } 232 | } 233 | 234 | /// Get a rolling file appender for the given directory, rotation and file name prefix. 235 | fn get_rolling_file_appender( 236 | directory: PathBuf, 237 | rotation: Rotation, 238 | file_name_prefix: &str, 239 | ) -> RollingFileAppender { 240 | match rotation { 241 | Rotation::NEVER => rolling::never(directory, file_name_prefix), 242 | Rotation::DAILY => rolling::daily(directory, file_name_prefix), 243 | Rotation::HOURLY => rolling::hourly(directory, file_name_prefix), 244 | Rotation::MINUTELY => rolling::minutely(directory, file_name_prefix), 245 | } 246 | } 247 | -------------------------------------------------------------------------------- /src/runner/mod.rs: -------------------------------------------------------------------------------- 1 | use std::{process, time::Duration}; 2 | 3 | use ethers::{ 4 | providers::{Middleware, Provider}, 5 | types::{Block, BlockId, BlockNumber, Transaction, H256}, 6 | }; 7 | use eyre::Result; 8 | use tokio::{ 9 | sync::watch::{channel, Receiver}, 10 | time::sleep, 11 | }; 12 | 13 | use crate::{ 14 | config::{Config, SyncMode}, 15 | driver::{Driver, HeadInfo}, 16 | engine::{Engine, EngineApi, ExecutionPayload, ForkchoiceState, Status}, 17 | }; 18 | 19 | /// Temporary trusted/static peer used for checkpoint sync mode. 20 | // TODO: use a list of whitelisted bootnodes instead 21 | const TRUSTED_PEER_ENODE: &str = "enode://e85ba0beec172b17f53b373b0ab72238754259aa39f1ae5290e3244e0120882f4cf95acd203661a27c8618b27ca014d4e193266cb3feae43655ed55358eedb06@3.86.143.120:30303?discport=21693"; 22 | 23 | /// The main entrypoint for starting a Magi node. 24 | /// Responsible for starting the syncing process. 25 | pub struct Runner { 26 | /// The Magi [Config] 27 | config: Config, 28 | /// The [SyncMode] - currently full & checkpoint sync are supported 29 | sync_mode: SyncMode, 30 | /// The L2 block hash to begin syncing from 31 | checkpoint_hash: Option, 32 | /// Receiver to listen for SIGINT signals 33 | shutdown_recv: Receiver, 34 | } 35 | 36 | impl Runner { 37 | /// Creates a new [Runner] from a [Config] and registers the SIGINT signal handler. 38 | pub fn from_config(config: Config) -> Self { 39 | let (shutdown_sender, shutdown_recv) = channel(false); 40 | ctrlc::set_handler(move || { 41 | tracing::info!("shutting down"); 42 | shutdown_sender 43 | .send(true) 44 | .expect("could not send shutdown signal"); 45 | }) 46 | .expect("could not register shutdown handler"); 47 | 48 | Self { 49 | config, 50 | sync_mode: SyncMode::Full, 51 | checkpoint_hash: None, 52 | shutdown_recv, 53 | } 54 | } 55 | 56 | /// Sets the [SyncMode] 57 | pub fn with_sync_mode(mut self, sync_mode: SyncMode) -> Self { 58 | self.sync_mode = sync_mode; 59 | self 60 | } 61 | 62 | /// Sets the `checkpoint_hash` if running in checkpoint [SyncMode] 63 | pub fn with_checkpoint_hash(mut self, checkpoint_hash: Option) -> Self { 64 | self.checkpoint_hash = checkpoint_hash; 65 | self 66 | } 67 | 68 | /// Begins the syncing process 69 | pub async fn run(self) -> Result<()> { 70 | match self.sync_mode { 71 | SyncMode::Fast => self.fast_sync().await, 72 | SyncMode::Challenge => self.challenge_sync().await, 73 | SyncMode::Full => self.full_sync().await, 74 | SyncMode::Checkpoint => self.checkpoint_sync().await, 75 | } 76 | } 77 | 78 | /// Fast sync mode - currently unsupported 79 | pub async fn fast_sync(&self) -> Result<()> { 80 | tracing::error!("fast sync is not implemented yet"); 81 | unimplemented!(); 82 | } 83 | 84 | /// Fast challenge mode - currently unsupported 85 | pub async fn challenge_sync(&self) -> Result<()> { 86 | tracing::error!("challenge sync is not implemented yet"); 87 | unimplemented!(); 88 | } 89 | 90 | /// Full sync mode. 91 | /// Syncs via L1 block derivation from the latest finalized block the execution client has synced to. 92 | /// Otherwise syncs from genesis 93 | pub async fn full_sync(&self) -> Result<()> { 94 | self.start_driver().await?; 95 | Ok(()) 96 | } 97 | 98 | /// Checkpoint sync mode. 99 | /// Syncs the execution client to a given checkpoint block, and then begins the normal derivation sync process via the [Driver] 100 | /// 101 | /// Note: the `admin` RPC method must be available on the execution client as checkpoint_sync relies on `admin_addPeer` 102 | pub async fn checkpoint_sync(&self) -> Result<()> { 103 | let l2_provider = Provider::try_from(&self.config.l2_rpc_url)?; 104 | let checkpoint_sync_url = 105 | Provider::try_from(self.config.checkpoint_sync_url.as_ref().ok_or(eyre::eyre!( 106 | "a checkpoint sync rpc url is required for checkpoint sync" 107 | ))?)?; 108 | 109 | let checkpoint_block = match self.checkpoint_hash { 110 | Some(ref checkpoint) => { 111 | let block_hash: H256 = checkpoint 112 | .parse() 113 | .expect("invalid checkpoint block hash provided"); 114 | 115 | let l2_block = checkpoint_sync_url 116 | .get_block_with_txs(block_hash) 117 | .await? 118 | .ok_or_else(|| eyre::eyre!("could not find block"))?; 119 | 120 | match is_epoch_boundary(l2_block, &self.config)? { 121 | true => checkpoint_sync_url 122 | .get_block_with_txs(block_hash) 123 | .await? 124 | .expect("could not get checkpoint block"), 125 | false => { 126 | tracing::error!("the provided checkpoint block is not an epoch boundary"); 127 | process::exit(1); 128 | } 129 | } 130 | } 131 | None => { 132 | tracing::info!("finding the latest epoch boundary to use as checkpoint"); 133 | 134 | let mut block_number = checkpoint_sync_url.get_block_number().await?; 135 | let l2_block = checkpoint_sync_url 136 | .get_block_with_txs(block_number) 137 | .await? 138 | .ok_or_else(|| eyre::eyre!("could not find block"))?; 139 | 140 | while !is_epoch_boundary(l2_block.clone(), &self.config)? { 141 | self.check_shutdown()?; 142 | block_number -= 1.into(); 143 | } 144 | 145 | let block = checkpoint_sync_url 146 | .get_block(BlockId::Number(BlockNumber::Number(block_number))) 147 | .await? 148 | .expect("could not get block"); 149 | 150 | checkpoint_sync_url 151 | .get_block_with_txs(block.hash.expect("block hash is missing")) 152 | .await? 153 | .expect("could not get checkpoint block") 154 | } 155 | }; 156 | 157 | let checkpoint_hash = checkpoint_block.hash.expect("block hash is missing"); 158 | tracing::info!("using checkpoint block {}", checkpoint_hash); 159 | 160 | let engine_api = EngineApi::new(&self.config.l2_engine_url, &self.config.jwt_secret); 161 | while !engine_api.is_available().await { 162 | self.check_shutdown()?; 163 | sleep(Duration::from_secs(3)).await; 164 | } 165 | 166 | // if the checkpoint block is already synced, start from the finalized head 167 | if l2_provider.get_block(checkpoint_hash).await?.is_some() { 168 | tracing::warn!("finalized head is above the checkpoint block"); 169 | self.start_driver().await?; 170 | return Ok(()); 171 | } 172 | 173 | // this is a temporary fix to allow execution layer peering to work 174 | // TODO: use a list of whitelisted bootnodes instead 175 | tracing::info!("adding trusted peer to the execution layer"); 176 | l2_provider.add_peer(TRUSTED_PEER_ENODE.to_string()).await?; 177 | 178 | // build the execution payload from the checkpoint block and send it to the execution client 179 | let checkpoint_payload = ExecutionPayload::try_from(checkpoint_block)?; 180 | 181 | let payload_res = engine_api.new_payload(checkpoint_payload.clone()).await?; 182 | if let Status::Invalid | Status::InvalidBlockHash = payload_res.status { 183 | tracing::error!("the provided checkpoint payload is invalid, exiting"); 184 | process::exit(1); 185 | } 186 | 187 | // make the execution client start syncing up to the checkpoint 188 | let forkchoice_state = ForkchoiceState::from_single_head(checkpoint_hash); 189 | let forkchoice_res = engine_api 190 | .forkchoice_updated(forkchoice_state, None) 191 | .await?; 192 | if let Status::Invalid | Status::InvalidBlockHash = forkchoice_res.payload_status.status { 193 | tracing::error!("could not accept forkchoice, exiting"); 194 | process::exit(1); 195 | } 196 | 197 | tracing::info!("syncing execution client to the checkpoint block...",); 198 | 199 | while l2_provider.get_block_number().await? < checkpoint_payload.block_number { 200 | self.check_shutdown()?; 201 | sleep(Duration::from_secs(3)).await; 202 | } 203 | 204 | tracing::info!("execution client successfully synced to the checkpoint block"); 205 | 206 | self.start_driver().await?; 207 | Ok(()) 208 | } 209 | 210 | /// Creates and starts the [Driver] which handles the derivation sync process. 211 | async fn start_driver(&self) -> Result<()> { 212 | let mut driver = 213 | Driver::from_config(self.config.clone(), self.shutdown_recv.clone()).await?; 214 | 215 | if let Err(err) = driver.start().await { 216 | tracing::error!("driver failure: {}", err); 217 | std::process::exit(1); 218 | } 219 | 220 | Ok(()) 221 | } 222 | 223 | /// Exits if a SIGINT signal is received 224 | fn check_shutdown(&self) -> Result<()> { 225 | if *self.shutdown_recv.borrow() { 226 | tracing::warn!("shutting down"); 227 | process::exit(0); 228 | } 229 | 230 | Ok(()) 231 | } 232 | } 233 | 234 | /// Returns `true` if the L2 block is the first in an epoch (sequence number 0) 235 | fn is_epoch_boundary(l2_block: Block, config: &Config) -> Result { 236 | let head_info = HeadInfo::try_from_l2_block(config, l2_block)?; 237 | let sequence_number = head_info.sequence_number; 238 | 239 | Ok(sequence_number == 0) 240 | } 241 | -------------------------------------------------------------------------------- /src/common/attributes_deposited.rs: -------------------------------------------------------------------------------- 1 | use ethers::{ 2 | types::{Bytes, H256, U256}, 3 | utils::keccak256, 4 | }; 5 | use eyre::Result; 6 | use lazy_static::lazy_static; 7 | 8 | /// Represents the attributes deposited transcation call 9 | #[derive(Debug)] 10 | pub struct AttributesDepositedCall { 11 | /// block number 12 | pub number: u64, 13 | /// block timestamp 14 | pub timestamp: u64, 15 | /// base fee 16 | pub basefee: U256, 17 | /// block hash 18 | pub hash: H256, 19 | /// sequence number of the L2 block 20 | pub sequence_number: u64, 21 | /// batcher hash (should contain an address) 22 | pub batcher_hash: H256, 23 | /// L1 fee overhead 24 | pub fee_overhead: U256, 25 | /// L1 fee scalar 26 | pub fee_scalar: U256, 27 | /// Blob base fee scalar (after Ecotone) 28 | pub blob_base_fee_scalar: Option, 29 | /// Blob base fee (after Ecotone) 30 | pub blob_base_fee: Option, 31 | } 32 | 33 | const L1_INFO_BEDROCK_LEN: usize = 4 + 32 * 8; 34 | const L1_INFO_BEDROCK_SIGNATURE: &str = 35 | "setL1BlockValues(uint64,uint64,uint256,bytes32,uint64,bytes32,uint256,uint256)"; 36 | 37 | const L1_INFO_ECOTONE_LEN: usize = 4 + 32 * 5; 38 | const L1_INFO_ECOTONE_SIGNATURE: &str = "setL1BlockValuesEcotone()"; 39 | 40 | lazy_static! { 41 | static ref SET_L1_BLOCK_VALUES_BEDROCK_SELECTOR: [u8; 4] = keccak256(L1_INFO_BEDROCK_SIGNATURE) 42 | [..4] 43 | .try_into() 44 | .unwrap(); 45 | static ref SET_L1_BLOCK_VALUES_ECOTONE_SELECTOR: [u8; 4] = keccak256(L1_INFO_ECOTONE_SIGNATURE) 46 | [..4] 47 | .try_into() 48 | .unwrap(); 49 | } 50 | 51 | impl AttributesDepositedCall { 52 | /// Bedrock Binary Format 53 | /// ```md 54 | /// +---------+--------------------------+ 55 | /// | Bytes | Field | 56 | /// +---------+--------------------------+ 57 | /// | 4 | Function signature | 58 | /// | 32 | Number | 59 | /// | 32 | Time | 60 | /// | 32 | BaseFee | 61 | /// | 32 | BlockHash | 62 | /// | 32 | SequenceNumber | 63 | /// | 32 | BatcherHash | 64 | /// | 32 | L1FeeOverhead | 65 | /// | 32 | L1FeeScalar | 66 | /// +---------+--------------------------+ 67 | /// ``` 68 | pub fn try_from_bedrock(calldata: Bytes) -> Result { 69 | let mut cursor = 0; 70 | 71 | if calldata.len() != L1_INFO_BEDROCK_LEN { 72 | eyre::bail!("invalid calldata length"); 73 | } 74 | 75 | let selector = &calldata[cursor..cursor + 4]; 76 | if selector != *SET_L1_BLOCK_VALUES_BEDROCK_SELECTOR { 77 | eyre::bail!("invalid selector"); 78 | } 79 | cursor += 4; 80 | 81 | let number = U256::from_big_endian(calldata[cursor..cursor + 32].try_into()?); 82 | let number = number.as_u64(); // down-casting to u64 is safe for the block number 83 | cursor += 32; 84 | 85 | let timestamp = U256::from_big_endian(calldata[cursor..cursor + 32].try_into()?); 86 | let timestamp = timestamp.as_u64(); // down-casting to u64 is safe for UNIX timestamp 87 | cursor += 32; 88 | 89 | let basefee = U256::from_big_endian(&calldata[cursor..cursor + 32]); 90 | cursor += 32; 91 | 92 | let hash = H256::from_slice(&calldata[cursor..cursor + 32]); 93 | cursor += 32; 94 | 95 | let sequence_number = U256::from_big_endian(calldata[cursor..cursor + 32].try_into()?); 96 | let sequence_number = sequence_number.as_u64(); // down-casting to u64 is safe for the sequence number 97 | cursor += 32; 98 | 99 | let batcher_hash = H256::from_slice(&calldata[cursor..cursor + 32]); 100 | cursor += 32; 101 | 102 | let fee_overhead = U256::from_big_endian(&calldata[cursor..cursor + 32]); 103 | cursor += 32; 104 | 105 | let fee_scalar = U256::from_big_endian(&calldata[cursor..cursor + 32]); 106 | 107 | Ok(Self { 108 | number, 109 | timestamp, 110 | basefee, 111 | hash, 112 | sequence_number, 113 | batcher_hash, 114 | fee_overhead, 115 | fee_scalar, 116 | 117 | // Ecotone fields are not present in Bedrock attributes deposited calls 118 | blob_base_fee_scalar: None, 119 | blob_base_fee: None, 120 | }) 121 | } 122 | 123 | /// Ecotone Binary Format 124 | /// ```md 125 | /// +---------+--------------------------+ 126 | /// | Bytes | Field | 127 | /// +---------+--------------------------+ 128 | /// | 4 | Function signature | 129 | /// | 4 | BaseFeeScalar | 130 | /// | 4 | BlobBaseFeeScalar | 131 | /// | 8 | SequenceNumber | 132 | /// | 8 | Timestamp | 133 | /// | 8 | L1BlockNumber | 134 | /// | 32 | BaseFee | 135 | /// | 32 | BlobBaseFee | 136 | /// | 32 | BlockHash | 137 | /// | 32 | BatcherHash | 138 | /// +---------+--------------------------+ 139 | /// ``` 140 | pub fn try_from_ecotone(calldata: Bytes) -> Result { 141 | let mut cursor = 0; 142 | 143 | if calldata.len() != L1_INFO_ECOTONE_LEN { 144 | eyre::bail!("invalid calldata length"); 145 | } 146 | 147 | let selector = &calldata[cursor..cursor + 4]; 148 | if selector != *SET_L1_BLOCK_VALUES_ECOTONE_SELECTOR { 149 | eyre::bail!("invalid selector"); 150 | } 151 | cursor += 4; 152 | 153 | let fee_scalar = u32::from_be_bytes(calldata[cursor..cursor + 4].try_into()?); 154 | let fee_scalar = U256::from(fee_scalar); // up-casting for backwards compatibility 155 | cursor += 4; 156 | 157 | let blob_base_fee_scalar = 158 | Some(u32::from_be_bytes(calldata[cursor..cursor + 4].try_into()?)); 159 | cursor += 4; 160 | 161 | let sequence_number = u64::from_be_bytes(calldata[cursor..cursor + 8].try_into()?); 162 | cursor += 8; 163 | 164 | let timestamp = u64::from_be_bytes(calldata[cursor..cursor + 8].try_into()?); 165 | cursor += 8; 166 | 167 | let number = u64::from_be_bytes(calldata[cursor..cursor + 8].try_into()?); 168 | cursor += 8; 169 | 170 | let basefee = U256::from_big_endian(&calldata[cursor..cursor + 32]); 171 | cursor += 32; 172 | 173 | let blob_base_fee = Some(U256::from_big_endian(&calldata[cursor..cursor + 32])); 174 | cursor += 32; 175 | 176 | let hash = H256::from_slice(&calldata[cursor..cursor + 32]); 177 | cursor += 32; 178 | 179 | let batcher_hash = H256::from_slice(&calldata[cursor..cursor + 32]); 180 | 181 | Ok(Self { 182 | number, 183 | timestamp, 184 | basefee, 185 | hash, 186 | sequence_number, 187 | batcher_hash, 188 | fee_scalar, 189 | blob_base_fee, 190 | blob_base_fee_scalar, 191 | 192 | // The pre-Ecotone L1 fee overhead value is dropped in Ecotone 193 | fee_overhead: U256::zero(), 194 | }) 195 | } 196 | } 197 | 198 | #[cfg(test)] 199 | mod tests { 200 | mod attributed_deposited_call { 201 | use std::str::FromStr; 202 | 203 | use ethers::types::{Bytes, H256, U256}; 204 | 205 | use crate::common::AttributesDepositedCall; 206 | 207 | #[test] 208 | fn decode_from_bytes_bedrock() -> eyre::Result<()> { 209 | // Arrange 210 | let calldata = "0x015d8eb900000000000000000000000000000000000000000000000000000000008768240000000000000000000000000000000000000000000000000000000064443450000000000000000000000000000000000000000000000000000000000000000e0444c991c5fe1d7291ff34b3f5c3b44ee861f021396d33ba3255b83df30e357d00000000000000000000000000000000000000000000000000000000000000050000000000000000000000007431310e026b69bfc676c0013e12a1a11411eec9000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240"; 211 | 212 | let expected_hash = 213 | H256::from_str("0444c991c5fe1d7291ff34b3f5c3b44ee861f021396d33ba3255b83df30e357d")?; 214 | let expected_block_number = 8874020; 215 | let expected_timestamp = 1682191440; 216 | 217 | // Act 218 | let call = AttributesDepositedCall::try_from_bedrock(Bytes::from_str(calldata)?); 219 | 220 | // Assert 221 | assert!(call.is_ok()); 222 | let call = call.unwrap(); 223 | 224 | assert_eq!(call.hash, expected_hash); 225 | assert_eq!(call.number, expected_block_number); 226 | assert_eq!(call.timestamp, expected_timestamp); 227 | 228 | Ok(()) 229 | } 230 | 231 | #[test] 232 | fn decode_from_bytes_ecotone() -> eyre::Result<()> { 233 | // Arrange 234 | // https://goerli-optimism.etherscan.io/tx/0xc2288c5d1f6123406bfe8662bdbc1a3c999394da2e6f444f5aa8df78136f36ba 235 | let calldata = "0x440a5e2000001db0000d273000000000000000050000000065c8ad6c0000000000a085a20000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000041dfd80f2c8af7d7ba1c1a3962026e5c96b9105d528f8fed65c56cfa731a8751c7f712eb70000000000000000000000007431310e026b69bfc676c0013e12a1a11411eec9"; 236 | 237 | let expected_hash = H256::from_str( 238 | "0xc8af7d7ba1c1a3962026e5c96b9105d528f8fed65c56cfa731a8751c7f712eb7", 239 | ); 240 | let expected_block_number = 10519970; 241 | let expected_timestamp = 1707650412; 242 | let expected_blob_base_fee_scalar = Some(862000); 243 | let expected_blob_base_fee = Some(U256::from(17683022066u64)); 244 | 245 | // Act 246 | let call = AttributesDepositedCall::try_from_ecotone(Bytes::from_str(calldata)?); 247 | 248 | // Assert 249 | assert!(call.is_ok()); 250 | let call = call.unwrap(); 251 | 252 | assert_eq!(call.hash, expected_hash?); 253 | assert_eq!(call.number, expected_block_number); 254 | assert_eq!(call.timestamp, expected_timestamp); 255 | assert_eq!(call.blob_base_fee_scalar, expected_blob_base_fee_scalar); 256 | assert_eq!(call.blob_base_fee, expected_blob_base_fee); 257 | 258 | Ok(()) 259 | } 260 | } 261 | } 262 | -------------------------------------------------------------------------------- /src/driver/engine_driver.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use ethers::providers::{Http, Middleware, Provider}; 4 | use ethers::types::Transaction; 5 | use ethers::{ 6 | types::{Block, H256}, 7 | utils::keccak256, 8 | }; 9 | use eyre::Result; 10 | 11 | use crate::{ 12 | common::{BlockInfo, Epoch}, 13 | config::Config, 14 | engine::{Engine, EngineApi, ExecutionPayload, ForkchoiceState, PayloadAttributes, Status}, 15 | }; 16 | 17 | /// The EngineDriver is responsible for initiating block production & validation via the [EngineApi] 18 | pub struct EngineDriver { 19 | /// The L2 execution engine 20 | engine: Arc, 21 | /// Provider for the local L2 execution RPC 22 | provider: Provider, 23 | /// Blocktime of the L2 chain 24 | blocktime: u64, 25 | /// Most recent block found on the p2p network 26 | pub unsafe_head: BlockInfo, 27 | /// Most recent block that can be derived from L1 data 28 | pub safe_head: BlockInfo, 29 | /// Batch epoch of the safe head 30 | pub safe_epoch: Epoch, 31 | /// Most recent block that can be derived from finalized L1 data 32 | pub finalized_head: BlockInfo, 33 | /// Batch epoch of the finalized head 34 | pub finalized_epoch: Epoch, 35 | } 36 | 37 | impl EngineDriver { 38 | /// Initiates validation & production of a new L2 block from the given [PayloadAttributes] and updates the forkchoice 39 | pub async fn handle_attributes(&mut self, attributes: PayloadAttributes) -> Result<()> { 40 | let block: Option> = self.block_at(attributes.timestamp.as_u64()).await; 41 | 42 | if let Some(block) = block { 43 | if should_skip(&block, &attributes)? { 44 | self.skip_attributes(attributes, block).await 45 | } else { 46 | self.unsafe_head = self.safe_head; 47 | self.process_attributes(attributes).await 48 | } 49 | } else { 50 | self.process_attributes(attributes).await 51 | } 52 | } 53 | 54 | /// Instructs the engine to create a block and updates the forkchoice, based on a payload received via p2p gossip. 55 | pub async fn handle_unsafe_payload(&mut self, payload: &ExecutionPayload) -> Result<()> { 56 | self.push_payload(payload.clone()).await?; 57 | self.unsafe_head = payload.into(); 58 | self.update_forkchoice().await?; 59 | 60 | tracing::info!( 61 | "head updated: {} {:?}", 62 | self.unsafe_head.number, 63 | self.unsafe_head.hash, 64 | ); 65 | 66 | Ok(()) 67 | } 68 | 69 | /// Updates the [EngineDriver] finalized head & epoch 70 | pub fn update_finalized(&mut self, head: BlockInfo, epoch: Epoch) { 71 | self.finalized_head = head; 72 | self.finalized_epoch = epoch; 73 | } 74 | 75 | /// Sets the [EngineDriver] unsafe & safe heads, and safe epoch to the current finalized head & epoch. 76 | pub fn reorg(&mut self) { 77 | self.unsafe_head = self.finalized_head; 78 | self.safe_head = self.finalized_head; 79 | self.safe_epoch = self.finalized_epoch; 80 | } 81 | 82 | /// Sends a `ForkchoiceUpdated` message to check if the [Engine] is ready. 83 | pub async fn engine_ready(&self) -> bool { 84 | let forkchoice = self.create_forkchoice_state(); 85 | self.engine 86 | .forkchoice_updated(forkchoice, None) 87 | .await 88 | .is_ok() 89 | } 90 | 91 | /// Initiates validation & production of a new block: 92 | /// - Sends the [PayloadAttributes] to the engine via `engine_forkchoiceUpdatedV2` (V3 post Ecotone) and retrieves the [ExecutionPayload] 93 | /// - Executes the [ExecutionPayload] to create a block via `engine_newPayloadV2` (V3 post Ecotone) 94 | /// - Updates the [EngineDriver] `safe_head`, `safe_epoch`, and `unsafe_head` 95 | /// - Updates the forkchoice and sends this to the engine via `engine_forkchoiceUpdatedV2` (v3 post Ecotone) 96 | async fn process_attributes(&mut self, attributes: PayloadAttributes) -> Result<()> { 97 | let new_epoch = *attributes.epoch.as_ref().unwrap(); 98 | 99 | let payload = self.build_payload(attributes).await?; 100 | 101 | let new_head = BlockInfo { 102 | number: payload.block_number.as_u64(), 103 | hash: payload.block_hash, 104 | parent_hash: payload.parent_hash, 105 | timestamp: payload.timestamp.as_u64(), 106 | }; 107 | 108 | self.push_payload(payload).await?; 109 | self.update_safe_head(new_head, new_epoch, true)?; 110 | self.update_forkchoice().await?; 111 | 112 | Ok(()) 113 | } 114 | 115 | /// Updates the forkchoice by sending `engine_forkchoiceUpdatedV2` (v3 post Ecotone) to the engine with no payload. 116 | async fn skip_attributes( 117 | &mut self, 118 | attributes: PayloadAttributes, 119 | block: Block, 120 | ) -> Result<()> { 121 | let new_epoch = *attributes.epoch.as_ref().unwrap(); 122 | let new_head = BlockInfo::try_from(block)?; 123 | self.update_safe_head(new_head, new_epoch, false)?; 124 | self.update_forkchoice().await?; 125 | 126 | Ok(()) 127 | } 128 | 129 | /// Sends [PayloadAttributes] via a `ForkChoiceUpdated` message to the [Engine] and returns the [ExecutionPayload] sent by the Execution Client. 130 | async fn build_payload(&self, attributes: PayloadAttributes) -> Result { 131 | let forkchoice = self.create_forkchoice_state(); 132 | 133 | let update = self 134 | .engine 135 | .forkchoice_updated(forkchoice, Some(attributes)) 136 | .await?; 137 | 138 | if update.payload_status.status != Status::Valid { 139 | eyre::bail!("invalid payload attributes"); 140 | } 141 | 142 | let id = update 143 | .payload_id 144 | .ok_or(eyre::eyre!("engine did not return payload id"))?; 145 | 146 | self.engine.get_payload(id).await 147 | } 148 | 149 | /// Sends the given [ExecutionPayload] to the [Engine] via `NewPayload` 150 | async fn push_payload(&self, payload: ExecutionPayload) -> Result<()> { 151 | let status = self.engine.new_payload(payload).await?; 152 | if status.status != Status::Valid && status.status != Status::Accepted { 153 | eyre::bail!("invalid execution payload"); 154 | } 155 | 156 | Ok(()) 157 | } 158 | 159 | /// Sends a `ForkChoiceUpdated` message to the [Engine] with the current `Forkchoice State` and no payload. 160 | async fn update_forkchoice(&self) -> Result<()> { 161 | let forkchoice = self.create_forkchoice_state(); 162 | 163 | let update = self.engine.forkchoice_updated(forkchoice, None).await?; 164 | if update.payload_status.status != Status::Valid { 165 | eyre::bail!( 166 | "could not accept new forkchoice: {:?}", 167 | update.payload_status.validation_error 168 | ); 169 | } 170 | 171 | Ok(()) 172 | } 173 | 174 | /// Updates the current `safe_head` & `safe_epoch`. 175 | /// 176 | /// Also updates the current `unsafe_head` to the given `new_head` if `reorg_unsafe` is `true`, or if the updated `safe_head` is newer than the current `unsafe_head` 177 | fn update_safe_head( 178 | &mut self, 179 | new_head: BlockInfo, 180 | new_epoch: Epoch, 181 | reorg_unsafe: bool, 182 | ) -> Result<()> { 183 | if self.safe_head != new_head { 184 | self.safe_head = new_head; 185 | self.safe_epoch = new_epoch; 186 | } 187 | 188 | if reorg_unsafe || self.safe_head.number > self.unsafe_head.number { 189 | self.unsafe_head = new_head; 190 | } 191 | 192 | Ok(()) 193 | } 194 | 195 | /// Creates a [ForkchoiceState]: 196 | /// - `head_block` = `unsafe_head` 197 | /// - `safe_block` = `safe_head` 198 | /// - `finalized_block` = `finalized_head` 199 | fn create_forkchoice_state(&self) -> ForkchoiceState { 200 | ForkchoiceState { 201 | head_block_hash: self.unsafe_head.hash, 202 | safe_block_hash: self.safe_head.hash, 203 | finalized_block_hash: self.finalized_head.hash, 204 | } 205 | } 206 | 207 | /// Fetches the L2 block for a given timestamp from the L2 Execution Client 208 | async fn block_at(&self, timestamp: u64) -> Option> { 209 | let time_diff = timestamp as i64 - self.finalized_head.timestamp as i64; 210 | let blocks = time_diff / self.blocktime as i64; 211 | let block_num = self.finalized_head.number as i64 + blocks; 212 | self.provider 213 | .get_block_with_txs(block_num as u64) 214 | .await 215 | .ok()? 216 | } 217 | } 218 | 219 | /// True if transactions in [PayloadAttributes] are not the same as those in a fetched L2 [Block] 220 | fn should_skip(block: &Block, attributes: &PayloadAttributes) -> Result { 221 | tracing::debug!( 222 | "comparing block at {} with attributes at {}", 223 | block.timestamp, 224 | attributes.timestamp 225 | ); 226 | 227 | let attributes_hashes = attributes 228 | .transactions 229 | .as_ref() 230 | .unwrap() 231 | .iter() 232 | .map(|tx| H256(keccak256(&tx.0))) 233 | .collect::>(); 234 | 235 | let block_hashes = block 236 | .transactions 237 | .iter() 238 | .map(|tx| tx.hash()) 239 | .collect::>(); 240 | 241 | tracing::debug!("attribute hashes: {:?}", attributes_hashes); 242 | tracing::debug!("block hashes: {:?}", block_hashes); 243 | 244 | let is_same = attributes_hashes == block_hashes 245 | && attributes.timestamp.as_u64() == block.timestamp.as_u64() 246 | && attributes.prev_randao == block.mix_hash.unwrap() 247 | && attributes.suggested_fee_recipient == block.author.unwrap() 248 | && attributes.gas_limit.as_u64() == block.gas_limit.as_u64(); 249 | 250 | Ok(is_same) 251 | } 252 | 253 | impl EngineDriver { 254 | /// Creates a new [EngineDriver] and builds the [EngineApi] client 255 | pub fn new( 256 | finalized_head: BlockInfo, 257 | finalized_epoch: Epoch, 258 | provider: Provider, 259 | config: &Arc, 260 | ) -> Result { 261 | let engine = Arc::new(EngineApi::new(&config.l2_engine_url, &config.jwt_secret)); 262 | 263 | Ok(Self { 264 | engine, 265 | provider, 266 | blocktime: config.chain.blocktime, 267 | unsafe_head: finalized_head, 268 | safe_head: finalized_head, 269 | safe_epoch: finalized_epoch, 270 | finalized_head, 271 | finalized_epoch, 272 | }) 273 | } 274 | } 275 | -------------------------------------------------------------------------------- /docs/architecture.md: -------------------------------------------------------------------------------- 1 | ## Specifications 2 | 3 | ### Driver 4 | 5 | The [Driver](../src/driver/mod.rs) is the highest-level component in `magi`. It is responsible for driving the L2 chain forward by processing L1 blocks and deriving the L2 chain from them. 6 | 7 | On instantiation, the [Driver](../src/driver/mod.rs) is provided with an instance of the [Engine API](#engine-api), [Pipeline](#derivation-pipeline), and [Config](#config). 8 | 9 | Advancing the driver forward one block is then as simple as calling the [Driver::advance](../src/driver/mod.rs#L132) method as done in `magi`'s [main](../bin/magi.rs) binary. 10 | 11 | Advancing the driver involves a few steps. First, the [Driver](../src/driver/mod.rs) will increment the [Pipeline](#derivation-pipeline) (as an iterator) to derive [PayloadAttributes](../src/engine/payload.rs). Then, the [Driver](../src/driver/mod.rs) will construct an [ExecutionPayload](../src/engine/payload.rs) that it can send through the [Engine API](#engine-api) as a `engine_newPayloadV1` request. Finally, the [ForkChoiceState](../src/engine/fork.rs) is updated by the driver, sending an `engine_forkchoiceUpdatedV1` request to the [Engine API](#engine-api). 12 | 13 | At this point, `magi` has successfully advanced the L2 chain forward by one block. 14 | 15 | ### Engine API 16 | 17 | The [EngineApi](../src/engine/mod.rs) exposes an interface for interacting with an external [execution client](https://ethereum.org/en/developers/docs/nodes-and-clients/#execution-clients), in our case [op-geth](https://github.com/ethereum-optimism/op-geth) or [op-reth](https://github.com/paradigmxyz/reth) (soon™). Notice, we cannot use [go-ethereum](https://github.com/ethereum/go-ethereum) as the execution client because Optimism's [execution client](https://github.com/ethereum-optimism/op-geth) requires a [minimal diff](https://op-geth.optimism.io/) to the [Engine API](https://github.com/ethereum/execution-apis/tree/main/src/engine). 18 | 19 | To construct an [EngineApi](../src/engine/mod.rs) as done in the `magi` [main binary](../bin/magi.rs), we must provide it with a base url (port is optional, and by default `8551`) as well as a 256 bit, hex-encoded secret string that is used to authenticate requests to the node. This secret is configured on the execution node's side using the `--authrpc.jwtsecret` flag. See [start-op-geth.sh](../docker/start-op-geth.sh) for an example of how to configure and run an [op-geth](https://github.com/ethereum-optimism/op-geth) instance. 20 | 21 | As mentioned in [Driver](#driver) section, the [Driver](../src/driver/mod.rs) uses the [EngineApi](../src/engine/mod.rs) to send constructed [ExecutionPayload](../src/engine/payload.rs) to the execution client using the [new_payload](../src/engine/api.rs#L187) method. It also updates the [ForkChoiceState](../src/engine/fork.rs) using the [forkchoice_updated](../src/engine/api.rs#L171) method. 22 | 23 | Additionally, the [EngineApi](../src/engine/mod.rs) exposes a [get_payload](../src/engine/api.rs#L194) method to fetch the [ExecutionPayload](../src/engine/payload.rs) for a given block hash. 24 | 25 | ### Derivation Pipeline 26 | 27 | As we mention in the [Driver](#driver) section, the [Derivation Pipeline](../src/derive/mod.rs) is responsible for much of `magi`'s functionality. It is used by the [Driver](#driver) to construct a [PayloadAttributes](../src/engine/payload.rs) from only an L1 RPC URL, passed through a [Config](#config) object. 28 | 29 | When constructed, the [Pipeline](../src/derive/mod.rs) spawns an [L1 Chain Watcher](#l1-chain-watcher) and listens to the returned channel receivers for new L1 blocks and Deposit Transactions. It then uses its [stages](../src/derive/stages/mod.rs) as iterators to sequentially construct a [PayloadAttributes](../src/engine/payload.rs) from the L1 blocks and Deposit Transactions. 30 | 31 | The Pipeline is broken up into [stages](../src/derive/stages/mod.rs) as follows. 32 | 33 | #### Stages 34 | 35 | ##### Batcher Transactions 36 | 37 | The [Batcher Transactions](../src/derive/stages/batcher_transactions.rs) stage pulls transactions from its configured channel receiver, passed down from the [Pipeline](../src/derive/mod.rs) parent. To construct a [Batcher Transaction](../src/derive/stages/batcher_transactions.rs) from the raw transaction data, it constructs [Frames](../src/derive/stages/batcher_transactions.rs) following the [Batch Submission Wire Format](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/derivation.md#batch-submission-wire-format) documented in the [Optimism Specs](https://github.com/ethereum-optimism/specs/tree/main). 38 | 39 | ##### Channels 40 | 41 | In the next stage, [Channels](../src/derive/stages/channels.rs), the [Batcher Transactions](../src/derive/stages/batcher_transactions.rs) is passed in and used as an iterator over the [Batcher Transaction](../src/derive/stages/batcher_transactions.rs) objects. The [Channels](../src/derive/stages/channels.rs) stage extracts [Frames](../src/derive/stages/batcher_transactions.rs) from the [Batcher Transaction](../src/derive/stages/batcher_transactions.rs) objects and places them in their corresponding [Channel](../src/derive/stages/channels.rs) objects. Since multiple channels can be built simultaneously, so-called `PendingChannel`s, the [Channel](../src/derive/stages/channels.rs) stage tracks if a channel is ready, and returns this when the Channel stage is called as an iterator. 42 | 43 | Remember, since the [L1 Chain Watcher](#l1-chain-watcher) is spawned as a separate thread, it asynchronously feeds transactions and blocks over channels to the pipeline stages. As such, iterating over a stage like this one will return `None` until transactions are received from the [L1 Chain Watcher](#l1-chain-watcher) that can be split into frames and processed to fill up a full channel. 44 | 45 | ##### Batches 46 | 47 | Next up, the [Batches](../src/derive/stages/batches.rs) stage iterates over the prior [Channel](../src/derive/stages/channels.rs) stage, decoding [Batch](../src/derive/stages/batches.rs) objects from the inner channel data. [Batch](../src/derive/stages/batches.rs) objects are RLP-decoded from the channel data following the [Batch Encoding Format](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/derivation.md#batch-format), detailed below. 48 | 49 | For version 0, [Batch](../src/derive/stages/batches.rs) objects are encoded as follows: 50 | 51 | ```golang 52 | rlp_encode([parent_hash, epoch_number, epoch_hash, timestamp, transaction_list]) 53 | ``` 54 | 55 | In this encoding, 56 | - `rlp_encode` encodes batches following the RLP format 57 | - `parent_hash` is the block hash of the previous L2 block 58 | - `epoch_number`is the number of the L1 block corresponding to the sequencing epoch of the L2 block 59 | - `epoch_hash` is the hash of the L1 block corresponding to the sequencing epoch of the L2 block 60 | - `timestamp` is the timestamp of the L2 block 61 | - `transaction_list` is an RLP-encoded list of EIP-2718 encoded transactions. 62 | 63 | ##### Attributes 64 | 65 | Lastly, the [Pipeline](../src/derive/mod.rs) applies the [Attributes](../src/derive/stages/attributes.rs) stage to the previous [Batch](../src/derive/stages/batches.rs) stage, iterating over [Attributes](../src/derive/stages/attributes.rs). 66 | 67 | In this step, the final [PayloadAttributes](../src/derive/stages/attributes.rs) object is constructed by combining the [Batch](../src/derive/stages/batches.rs) object data with its corresponding L1 Block, as well as applying system configuration values like the `suggested_fee_recipient`, `no_tx_pool`, and `gas_limit`. 68 | 69 | ### L1 Chain Watcher 70 | 71 | The L1 chain watcher is responsible for watching L1 for new blocks with deposits and batcher transactions. `magi` spawns the L1 [`ChainWatcher`](../src/l1/mod.rs) in a separate thread and uses channels to communicate with the upstream consumers. 72 | 73 | In `magi`'s case, the upstream consumers are the [`Pipeline`](../src/derive/mod.rs), which contains an instance of the [`ChainWatcher`](../src/l1/mod.rs) and passes the channel receivers into the pipeline [stages](../src/derive/stages/mod.rs). 74 | 75 | When constructed in the [`Pipeline`](../src/derive/mod.rs), the [`ChainWatcher`](../src/l1/mod.rs) is provided with a [Config](../src/config/mod.rs) object that contains a critical config values for the L1 chain watcher. This includes: 76 | - [L1 RPC Endpoint](../src/config/mod.rs#L41) 77 | - [Deposit Contract Address](../src/config/mod.rs#L117) 78 | - [Batch Sender Address](../src/config/mod.rs#L139) 79 | - [Batch Inbox Address](../src/config/mod.rs#L115) 80 | 81 | Note, when the `ChainWatcher` object is dropped, it will abort tasks associated with its handlers using [`tokio::task::JoinHandle::abort`](https://docs.rs/tokio/1.13.0/tokio/task/struct.JoinHandle.html#method.abort). 82 | 83 | ### Sync modes 84 | 85 | Magi supports different [SyncModes](../src/config/mod.rs#L14) to sync the L2 chain. The sync mode can be set when calling the main binary with the `--sync-mode` flag. The following sync modes are supported: 86 | 87 | - `full`: The full sync mode will sync the L2 chain from the genesis block. This is the default sync mode. 88 | - `checkpoint`: The checkpoint sync mode will use a trusted L2 RPC endpoint to bootstrap the sync phase. It works by sending a forkchoice update request to the engine API to the latest block, which will make the execution client start the sync process using its p2p network, which is faster than syncing each block via L1. Once the execution client has synced, Magi takes over and starts the driver as normal. 89 | 90 | ### Config 91 | 92 | The [Config](../src/config/mod.rs) object contains the system configuration for the `magi` node. 93 | 94 | **Config** 95 | - `l1_rpc_url`: The L1 RPC endpoint to use for the L1 chain watcher. 96 | - `l1_beacon_url`: The L1 beacon chain RPC endpoint. 97 | - `l2_rpc_url`: The L2 chain RPC endpoint 98 | - `l2_engine_url`: The L2 chain engine API URL (see [Engine API](#engine-api)). 99 | - `chain`: A `ChainConfig` object detailed below. 100 | - `jwt_secret`: A hex-encoded secret string used to authenticate requests to the engine API. 101 | - `checkpoint_sync_url`: The URL of the trusted L2 RPC endpoint to use for checkpoint syncing. 102 | - `rpc_port`: The port to use for the Magi RPC server. 103 | - `rpc_addr`: The socket address to use for the Magi RPC server. 104 | 105 | **ChainConfig** 106 | - `network`: The network name. 107 | - `chain_id`: The chain id. 108 | - `l1_start_epoch`: The L1 block number to start the L1 chain watcher at. 109 | - `l2_genesis`: The L2 genesis block. 110 | - `system_config`: The initial system config struct. 111 | - `batch_inbox`: The batch inbox address. 112 | - `deposit_contract`: The L1 address of the deposit contract. 113 | - `system_config_contract`: The L1 address of the system config contract. 114 | - `max_channel_size`: The maximum byte size of all pending channels. 115 | - `channel_timeout`: The max timeout for a channel (as measured by the frame L1 block number). 116 | - `seq_window_size`: Number of L1 blocks in a sequence window. 117 | - `max_seq_drift`: Maximum timestamp drift. 118 | - `regolith_time`: Timestamp of the regolith hardfork. 119 | - `blocktime`: The L2 blocktime. 120 | 121 | The [ChainConfig](../src/config/mod.rs) contains default implementations for certain chains. For example, an `optimism` [ChainConfig](../src/config/mod.rs) instance can be created by calling `ChainConfig::optimism()`, and a `base` instance can be created by calling `ChainConfig::base()`. 122 | -------------------------------------------------------------------------------- /src/engine/api.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::time::{Duration, SystemTime}; 3 | 4 | use again::RetryPolicy; 5 | use eyre::Result; 6 | use futures::prelude::*; 7 | use futures_timer::TryFutureExt; 8 | use reqwest::{header, Client}; 9 | use serde::de::DeserializeOwned; 10 | use serde::{Deserialize, Serialize}; 11 | use serde_json::Value; 12 | 13 | use crate::engine::DEFAULT_AUTH_PORT; 14 | 15 | use super::{ 16 | Engine, ExecutionPayload, ForkChoiceUpdate, ForkchoiceState, JwtSecret, PayloadAttributes, 17 | PayloadId, PayloadStatus, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_GET_PAYLOAD_V2, 18 | ENGINE_NEW_PAYLOAD_V2, 19 | }; 20 | 21 | use super::{JSONRPC_VERSION, STATIC_ID}; 22 | 23 | /// An external op-geth engine api client 24 | #[derive(Debug, Clone)] 25 | pub struct EngineApi { 26 | /// Base request url 27 | pub base_url: String, 28 | /// The url port 29 | pub port: u16, 30 | /// HTTP Client 31 | pub client: Option, 32 | /// A JWT secret used to authenticate with the engine api 33 | secret: JwtSecret, 34 | } 35 | 36 | impl EngineApi { 37 | /// Creates a new [`EngineApi`] with a base url and secret. 38 | pub fn new(base_url: &str, secret_str: &str) -> Self { 39 | let secret = JwtSecret::from_hex(secret_str).unwrap(); 40 | 41 | // Gracefully parse the port from the base url 42 | let parts: Vec<&str> = base_url.split(':').collect(); 43 | let port = parts[parts.len() - 1] 44 | .parse::() 45 | .unwrap_or(DEFAULT_AUTH_PORT); 46 | let base_url = if parts.len() <= 2 { 47 | parts[0].to_string() 48 | } else { 49 | parts.join(":") 50 | }; 51 | 52 | let client = reqwest::Client::builder() 53 | .default_headers({ 54 | header::HeaderMap::from_iter([( 55 | header::CONTENT_TYPE, 56 | header::HeaderValue::from_static("application/json"), 57 | )]) 58 | }) 59 | .timeout(Duration::from_secs(5)) 60 | .build() 61 | .expect("reqwest::Client could not be built, TLS backend could not be initialized"); 62 | 63 | Self { 64 | base_url, 65 | port, 66 | client: Some(client), 67 | secret, 68 | } 69 | } 70 | 71 | /// Constructs the base engine api url for the given address 72 | pub fn auth_url_from_addr(addr: &str, port: Option) -> String { 73 | let stripped = addr.strip_prefix("http://").unwrap_or(addr); 74 | let stripped = addr.strip_prefix("https://").unwrap_or(stripped); 75 | let port = port.unwrap_or(DEFAULT_AUTH_PORT); 76 | format!("http://{stripped}:{port}") 77 | } 78 | 79 | /// Returns if the provided secret matches the secret used to authenticate with the engine api. 80 | pub fn check_secret(&self, secret: &str) -> bool { 81 | self.secret.equal(secret) 82 | } 83 | 84 | /// Creates an engine api from environment variables 85 | pub fn from_env() -> Self { 86 | let base_url = std::env::var("ENGINE_API_URL").unwrap_or_else(|_| { 87 | panic!( 88 | "ENGINE_API_URL environment variable not set. \ 89 | Please set this to the base url of the engine api" 90 | ) 91 | }); 92 | let secret_key = std::env::var("JWT_SECRET").unwrap_or_else(|_| { 93 | panic!( 94 | "JWT_SECRET environment variable not set. \ 95 | Please set this to the 256 bit hex-encoded secret key used to authenticate with the engine api. \ 96 | This should be the same as set in the `--auth.secret` flag when executing go-ethereum." 97 | ) 98 | }); 99 | let base_url = EngineApi::auth_url_from_addr(&base_url, None); 100 | Self::new(&base_url, &secret_key) 101 | } 102 | 103 | /// Construct base body 104 | pub fn base_body(&self) -> HashMap { 105 | let mut map = HashMap::new(); 106 | map.insert( 107 | "jsonrpc".to_string(), 108 | Value::String(JSONRPC_VERSION.to_string()), 109 | ); 110 | map.insert("id".to_string(), Value::Number(STATIC_ID.into())); 111 | map 112 | } 113 | 114 | /// Helper to construct a post request through the client 115 | async fn post

(&self, method: &str, params: Vec) -> Result

116 | where 117 | P: DeserializeOwned, 118 | { 119 | // Construct the request params 120 | let mut body = self.base_body(); 121 | body.insert("method".to_string(), Value::String(method.to_string())); 122 | body.insert("params".to_string(), Value::Array(params)); 123 | 124 | tracing::trace!("Sending request to url: {:?}", self.base_url); 125 | tracing::trace!("Sending request: {:?}", serde_json::to_string(&body)); 126 | 127 | // Send the client request 128 | let client = self 129 | .client 130 | .as_ref() 131 | .ok_or(eyre::eyre!("Driver missing http client"))?; 132 | 133 | // Clone the secret so we can use it in the retry policy. 134 | let secret_clone = self.secret.clone(); 135 | 136 | let policy = RetryPolicy::fixed(Duration::ZERO).with_max_retries(5); 137 | 138 | // Send the request 139 | let res = policy 140 | .retry(|| async { 141 | // Construct the JWT Authorization Token 142 | let claims = secret_clone.generate_claims(Some(SystemTime::now())); 143 | let jwt = secret_clone 144 | .encode(&claims) 145 | .map_err(|_| eyre::eyre!("EngineApi failed to encode jwt with claims!"))?; 146 | 147 | // Send the request 148 | client 149 | .post(&self.base_url) 150 | .header(header::AUTHORIZATION, format!("Bearer {}", jwt)) 151 | .json(&body) 152 | .send() 153 | .map_err(|e| eyre::eyre!(e)) 154 | .timeout(Duration::from_secs(2)) 155 | .await? 156 | .json::>() 157 | .map_err(|e| eyre::eyre!(e)) 158 | .timeout(Duration::from_secs(2)) 159 | .map_err(|e| eyre::eyre!(e)) 160 | .await 161 | }) 162 | .await?; 163 | 164 | if let Some(res) = res.result { 165 | return Ok(res); 166 | } 167 | 168 | if let Some(err) = res.error { 169 | eyre::bail!("Engine API POST error: {}", err.message); 170 | } 171 | 172 | // This scenario shouldn't occur as the response should always have either data or an error 173 | eyre::bail!("Failed to parse Engine API response") 174 | } 175 | 176 | /// Calls the engine to verify it's available to receive requests 177 | pub async fn is_available(&self) -> bool { 178 | self.post::("eth_chainId", vec![]).await.is_ok() 179 | } 180 | } 181 | 182 | /// Generic Engine API response 183 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 184 | #[serde(rename_all = "camelCase")] 185 | pub struct EngineApiResponse

{ 186 | /// JSON RPC version 187 | jsonrpc: String, 188 | /// Request ID 189 | id: u64, 190 | /// JSON RPC payload 191 | result: Option

, 192 | /// JSON RPC error payload 193 | error: Option, 194 | } 195 | 196 | /// Engine API error payload 197 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 198 | pub struct EngineApiErrorPayload { 199 | /// The error code 200 | pub code: i64, 201 | /// The error message 202 | pub message: String, 203 | /// Optional additional error data 204 | pub data: Option, 205 | } 206 | 207 | #[async_trait::async_trait] 208 | impl Engine for EngineApi { 209 | /// Sends an `engine_forkchoiceUpdatedV2` (V3 post Ecotone) message to the engine. 210 | async fn forkchoice_updated( 211 | &self, 212 | forkchoice_state: ForkchoiceState, 213 | payload_attributes: Option, 214 | ) -> Result { 215 | let payload_attributes_param = match payload_attributes { 216 | Some(payload_attributes) => serde_json::to_value(payload_attributes)?, 217 | None => Value::Null, 218 | }; 219 | let forkchoice_state_param = serde_json::to_value(forkchoice_state)?; 220 | let params = vec![forkchoice_state_param, payload_attributes_param]; 221 | let res = self.post(ENGINE_FORKCHOICE_UPDATED_V2, params).await?; 222 | Ok(res) 223 | } 224 | 225 | /// Sends an `engine_newPayloadV2` (V3 post Ecotone) message to the engine. 226 | async fn new_payload(&self, execution_payload: ExecutionPayload) -> Result { 227 | let params = vec![serde_json::to_value(execution_payload)?]; 228 | let res = self.post(ENGINE_NEW_PAYLOAD_V2, params).await?; 229 | Ok(res) 230 | } 231 | 232 | /// Sends an `engine_getPayloadV2` (V3 post Ecotone) message to the engine. 233 | async fn get_payload(&self, payload_id: PayloadId) -> Result { 234 | let encoded = format!("{:x}", payload_id); 235 | let padded = format!("0x{:0>16}", encoded); 236 | let params = vec![Value::String(padded)]; 237 | let res = self 238 | .post::(ENGINE_GET_PAYLOAD_V2, params) 239 | .await?; 240 | Ok(res.execution_payload) 241 | } 242 | } 243 | 244 | /// Wrapper around an [ExecutionPayload] 245 | #[derive(Debug, Serialize, Deserialize, Default)] 246 | #[serde(rename_all = "camelCase")] 247 | struct GetPayloadResponse { 248 | /// The execution payload returned by the engine via `engine_getPayloadV2` (`engine_getPayloadV3` post Ecotone) 249 | execution_payload: ExecutionPayload, 250 | } 251 | 252 | #[cfg(test)] 253 | mod tests { 254 | use std::time::SystemTime; 255 | 256 | // use std::str::FromStr; 257 | // use ethers_core::types::H256; 258 | 259 | use super::*; 260 | 261 | const AUTH_ADDR: &str = "0.0.0.0"; 262 | const SECRET: &str = "f79ae8046bc11c9927afe911db7143c51a806c4a537cc08e0d37140b0192f430"; 263 | 264 | #[tokio::test] 265 | async fn test_engine_get_payload() { 266 | // Construct the engine api client 267 | let base_url = EngineApi::auth_url_from_addr(AUTH_ADDR, Some(8551)); 268 | assert_eq!(base_url, "http://0.0.0.0:8551"); 269 | let engine_api = EngineApi::new(&base_url, SECRET); 270 | assert_eq!(engine_api.base_url, "http://0.0.0.0:8551"); 271 | assert_eq!(engine_api.port, 8551); 272 | 273 | // Construct mock server params 274 | let secret = JwtSecret::from_hex(SECRET).unwrap(); 275 | let claims = secret.generate_claims(Some(SystemTime::UNIX_EPOCH)); 276 | let jwt = secret.encode(&claims).unwrap(); 277 | assert_eq!(jwt, String::from("eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpYXQiOjAsImV4cCI6NjB9.rJv_krfkQefjWnZxrpnDimR1NN1UEUffK3hQzD1KInA")); 278 | // let bearer = format!("Bearer {jwt}"); 279 | // let expected_body = r#"{"jsonrpc": "2.0", "method": "engine_getPayloadV1", "params": [""], "id": 1}"#; 280 | // let mock_response = ExecutionPayloadResponse { 281 | // jsonrpc: "2.0".to_string(), 282 | // id: 1, 283 | // result: ExecutionPayload { 284 | // parent_hash: H256::from( 285 | // } 286 | // }; 287 | 288 | // Create the mock server 289 | // let server = ServerBuilder::default() 290 | // .set_id_provider(RandomStringIdProvider::new(16)) 291 | // .set_middleware(middleware) 292 | // .build(addr.parse::().unwrap()) 293 | // .await 294 | // .unwrap(); 295 | 296 | // Query the engine api client 297 | // let execution_payload = engine_api.get_payload(PayloadId::default()).await.unwrap(); 298 | // let expected_block_hash = 299 | // H256::from_str("0xdc0818cf78f21a8e70579cb46a43643f78291264dda342ae31049421c82d21ae") 300 | // .unwrap(); 301 | // assert_eq!(expected_block_hash, execution_payload.block_hash); 302 | 303 | // Stop the server 304 | // server.stop().unwrap(); 305 | // server.stopped().await; 306 | } 307 | } 308 | -------------------------------------------------------------------------------- /src/derive/stages/batcher_transactions.rs: -------------------------------------------------------------------------------- 1 | use std::sync::mpsc; 2 | 3 | use eyre::Result; 4 | use std::collections::VecDeque; 5 | 6 | use crate::derive::PurgeableIterator; 7 | 8 | /// Represents a transaction sent to the `Batch Inbox` on L1. 9 | pub struct BatcherTransactionMessage { 10 | /// The L2 transactions included in this batch 11 | pub txs: Vec, 12 | /// The L1 block number this transaction was included in 13 | pub l1_origin: u64, 14 | } 15 | 16 | /// Receives [BatcherTransactionMessage] messages from a channel and stores these in a [VecDeque]. 17 | pub struct BatcherTransactions { 18 | /// [VecDeque] containing Batcher Transaction 19 | txs: VecDeque, 20 | /// [BatcherTransactionMessage] channel [receiver](mpsc::Receiver) 21 | transaction_rx: mpsc::Receiver, 22 | } 23 | 24 | impl Iterator for BatcherTransactions { 25 | type Item = BatcherTransaction; 26 | 27 | /// Receives new [BatcherTransactionMessage] messages from the channel and adds these to the deque. Pops and returns the first deque element. 28 | fn next(&mut self) -> Option { 29 | self.process_incoming(); 30 | self.txs.pop_front() 31 | } 32 | } 33 | 34 | impl PurgeableIterator for BatcherTransactions { 35 | /// Resets itself by clearing the channel and deque 36 | fn purge(&mut self) { 37 | // drain the channel first 38 | while self.transaction_rx.try_recv().is_ok() {} 39 | self.txs.clear(); 40 | } 41 | } 42 | 43 | impl BatcherTransactions { 44 | /// Creates a new Batcher Transactions. 45 | pub fn new(transaction_rx: mpsc::Receiver) -> Self { 46 | Self { 47 | transaction_rx, 48 | txs: VecDeque::new(), 49 | } 50 | } 51 | 52 | /// Receives new [BatcherTransactionMessage] messages from the channel and adds these to the end of the deque. 53 | pub fn process_incoming(&mut self) { 54 | while let Ok(BatcherTransactionMessage { txs, l1_origin }) = self.transaction_rx.try_recv() 55 | { 56 | for data in txs { 57 | let res = BatcherTransaction::new(&data, l1_origin).map(|tx| { 58 | self.txs.push_back(tx); 59 | }); 60 | 61 | if res.is_err() { 62 | tracing::warn!("dropping invalid batcher transaction"); 63 | } 64 | } 65 | } 66 | } 67 | } 68 | 69 | /// A single batcher transaction 70 | #[derive(Debug, Clone)] 71 | pub struct BatcherTransaction { 72 | /// The version byte. 73 | pub version: u8, 74 | /// The rollup payload consisting of 1 or more frames. 75 | pub frames: Vec, 76 | } 77 | 78 | impl BatcherTransaction { 79 | /// Creates a new [BatcherTransaction] 80 | pub fn new(data: &[u8], l1_origin: u64) -> Result { 81 | let version = data[0]; 82 | let frame_data = data.get(1..).ok_or(eyre::eyre!("No frame data"))?; 83 | 84 | let mut offset = 0; 85 | let mut frames = Vec::new(); 86 | while offset < frame_data.len() { 87 | let (frame, next_offset) = Frame::from_data(frame_data, offset, l1_origin)?; 88 | frames.push(frame); 89 | offset = next_offset; 90 | } 91 | 92 | Ok(Self { version, frames }) 93 | } 94 | } 95 | 96 | /// A channel frame. 97 | #[derive(Debug, Default, Clone)] 98 | pub struct Frame { 99 | /// A unique identifier for the channel containing the frame. 100 | pub channel_id: u128, 101 | /// The index of the frame within the channel 102 | pub frame_number: u16, 103 | /// The byte length of frame_data. Capped to 1,000,000 bytes. 104 | pub frame_data_len: u32, 105 | /// A sequence of bytes belonging to the channel, logically after the previous frames 106 | pub frame_data: Vec, 107 | /// If the frame is the last in the channel 108 | pub is_last: bool, 109 | /// The L1 block number this frame was submitted in. 110 | pub l1_inclusion_block: u64, 111 | } 112 | 113 | impl Frame { 114 | /// Converts a sequence of bytes into a [Frame] 115 | fn from_data(data: &[u8], offset: usize, l1_inclusion_block: u64) -> Result<(Self, usize)> { 116 | let data = &data[offset..]; 117 | 118 | if data.len() < 23 { 119 | eyre::bail!("invalid frame size"); 120 | } 121 | 122 | let channel_id = u128::from_be_bytes(data[0..16].try_into()?); 123 | let frame_number = u16::from_be_bytes(data[16..18].try_into()?); 124 | let frame_data_len = u32::from_be_bytes(data[18..22].try_into()?); 125 | 126 | let frame_data_end = 22 + frame_data_len as usize; 127 | if data.len() < frame_data_end { 128 | eyre::bail!("invalid frame size"); 129 | } 130 | 131 | let frame_data = data[22..frame_data_end].to_vec(); 132 | 133 | let is_last = if data[frame_data_end] > 1 { 134 | eyre::bail!("invalid is_last flag"); 135 | } else { 136 | data[frame_data_end] != 0 137 | }; 138 | 139 | let frame = Self { 140 | channel_id, 141 | frame_number, 142 | frame_data_len, 143 | frame_data, 144 | is_last, 145 | l1_inclusion_block, 146 | }; 147 | 148 | tracing::debug!( 149 | "saw batcher tx: block={}, number={}, is_last={}", 150 | l1_inclusion_block, 151 | frame_number, 152 | is_last 153 | ); 154 | 155 | Ok((frame, offset + data.len())) 156 | } 157 | } 158 | 159 | #[cfg(test)] 160 | mod tests { 161 | use super::*; 162 | 163 | const TX_DATA: &str = "00b3ec7df691dc58384222fbdc05891b08000000000bd478dad459793894fbdb7f9e19db20eb902dbb9086b410b2af2939b66c255bd60991c8a133c6845276c9daa36c21bb3211932c8908591a6509132a3b1959decbe93ade73aeebbc745ee7f773755fd7fcf599effdfd3cf7f5b93ff7f7f93e786a5d804cad05255ef05f6445189cc97f1b4ef3656d2cdd318bcbe30a93f689737dea1f3297ed8d83029fa64364f70716e8c138e517e81606f661f754e982039eda1366dc277286510cf7142b717613166832d56279316cb1000ba65305f1e230eb3fec23da58628342a55fc9ee47fa1db79e1d672c3968bfd4740253ae81b0ca2a01fe1456ad32f374dd47270af5fcc69839881590a92137b059305c9d2280500faf1a489d7179f26143eb2923944efb05a1381b4536499f9ed9be14ff2817142427de6d4a59af3be62c8fa3d5927fef3615e6226f4bc1ad412d4b8c609853dc8b87b591612d4170a5d9df4953a7f1c73ebc397a8f742d3526ac08559a86953e948c9e75c7e061f68d186f3960f3c06c0e83d0e6380c0041601bf197c591f9a7553e1647f6f171fa191419c90d03f08605100061f06d6c60bd054eb119788b6b8ff14ee2eb052e0af978632db54e63fed6900a3ad0b179456da86a97b9134d00b9d0b04b97a604dd743bb92fa035f0412bec13a2793e7a9ad5d33bb1bdcbf20d22146377f9d0ca56f9d51733a63507dc9270cc575fd67821d24e1d76a18bce5c503c7105ed33cd51c62075c2284ee2e2120bf1154d553ccc2694c37ef478185d64e7c7e23d8d1ca784c7b17034d436d228729fd385b9a73a2900b0adc7ec9ebe6a12bbd61c2b23cc5ab27a0bd80beda6203f2ef8e02540f41dd4154ca8b52563434b3a0d6dae239607cff261e9f4cbf317f3b030b72030180a02cf45c6d6f5b401fb6e5f1ae6541b1a1fafe55ab9b462e28729d77840995cf167f2bd365a1af9538a93022353d6019218be002b7cfba60fbb348559e7cb9ca6cc20642cf82997cb7d58b7c2c919b96f29f9f0c52ceb792c4ec403adcf025d38461918536ade57d6256794c54d9591726b85ae5ca645790264f5ce99be48fcce9766836f76e9b73c52a9fd2c2a675e4122f85d148b406cd3f6f8c2ca860ad88b4201609def590ffbe3d8667b8495284986b19e918fd4f26e7aacf5e8d7bc6733e3bda1f65a90a4b901166e8317198816e8b8f6a235b2735954b95a877177b165b1dd19064d9eef7cb936f83a68a52447c996a14e2d7967b2a0f20a8e440bc8fc8bf54da41df6d00a95ee76eea6a1e43cd90b374dee48a889b33ec87480a8d776204b17e24aa9f787efc9cb246560634d57bf1ac252549f9d9f9f4b141f0ba3435c09837fe71bb8c1f7ffb0e4edf20518d554d6f97211849d7bdf9e1d4d6dad75f3ffaa29f5f5bed74c291159ddb4d274dd4c7f72113a2f9fe17534fc9b46f02ffcb153d6a0efcd41c7de92d78f16e73cbeec5b2496f17fe71bbcc1175fd6914a7890e046782b25d58a0e33c8e046996e932f68a7e97bf6c6773dd414db0992ee66f862efd7b0d4cbb38a2725a6b15af899c579f5f73395a46ac6439a19c1ac17300a69dd16434ea3f0abc7382c254daeedb28cb28ce8a4715a16f3c0532e0164ca052880911a317f464a05ac6f507f15e4d2507c37acc2672f2a65ba89452cd462e4c10f0f53373265f61f83c987716330c5ad883c130aef10d535124188963915286248c63fe160a25aa04ce01182bdcf7cabffe445c9c402006fa1d9c9c12406bec7637610ffbdc0114419d3d1c2665984e38779b84aa0406a349297e54ec1a783c92edc841c4a5f8af3ab9fa54b24fb31dfd02339b55153b01c472a83b7bf729c6ea4d16268a519df2abfc77da516e51cbad5b523bacf2fa0510ca7809952648a79ee1749ae815455db8bbf5adc99f5ca08a2486c653e8ab649921b701814ef71ed1c312261efe82c0c7960e1aed0ac772a7a2d4a8ad5c72cfe4b4153af34aa62f09866423392fe1ee9158054e7877883c2be453f6f873fbcc5bfa785cf96646d7020bba6b16726f7bd76bf8e6b9ec886a69936346d9eef031cbddfef860b9aa276fc98d9e57b7282f0dfd2f4f6e22f9adecf6ec5acb74cef4d49beeedc4b607f0cc01b0c7750d3300d5ea95f13770efffea7ee9214aa608830831027a6cac7e43f5263b609ec5ac8392856353d8d543ca1f56c7fa91581533ba051a7521ea8b3406775e144c3f49fa69ee7c4b19d344a99df2abfad67aa357a685e092af3f27baa103215d1299e79bcdf523975e98d79bc8892bf67f091e78d11d8525ac973c7925330ef4a1f45f7e851fa464c16e2bc6fb8ea74ad9bbf6cad30116d6eef0e98654be15e71c33a9d6a54709f9cd192375a7b68ba8509905f524396ac59cb99b80757cbd2ae33093dbd51d426ee10ec98b966fde1e81919bb727d60f12444e546317fcd852c9fa41a622735d32f28716c9a7726dcedf3613a7782a67888c40f5bbf07e18f69a29975d88f645a878b8f9889ef2f9c2f2aa6d5e7111be9e71825db4ebef6375bf9e1949e7f9a264a731b9d57aa9d548c58ae610dcc797a805e9e0920b0d405ff849d3737009e8af53f45acfddc95f16a36c40c80bfe6ded1d71c9670466827f1f502fb36485df66b7c3d35669fdb34dd9ed97fd3d78a973eb0c1c4452f212660cc155545bc93f3755f150a56e0453410f37a721e465d48f09b5f26a97356cac9cb176f957f8f0ca7d01518275b5c9cf7a3eb7908dc9bc84ee704915bb4353aba2bc01d9b2277fc527487470d429f45f8dd2ac154d9a24af8c85be039e5a0125f95414f1b6ebdf3507abe4371059ecb17564fe60829d393a4af4dc91ba02869451ba5579a726f8f43f23315d143b465b436cbd5c65c2c7eec76e99ae3d1e6c885f7b9b56d079db9fff7d57d7e43d346056b4b3e80fd41a4ab83bfe3924fd91bca2b0a3fe1098961d9770959672e55d1203cce4573c60180d7b351eda4a62588777c77125f2f3045fa5304178bfee869bb89570f6119d16abb5e8f7334266864d5791cacd655e1ad9b2b9cd60aebb5d2b538322818315e3bd9fd793f4cea6925ca7c363d2d245170abfcdad50d221509fa89e7083c4f92436dbe527a7f48fdd6c24edb36991e8874e83cab0406a0463b966ff376f194e14c4171a5b05d3cfb4cd69e0512e063ed87e32faf9f900afd761f9e7858d96fc600e3e353e7bae4d0dbe455f6f5b9e31beef4625537273988514d2088e8d79c14162c29955b91ef33a8467208283ffdd0750fcbeebd6c621578582e408665419705c9a3495ac8b9ea9595986cf5cc03579bd43d898e96c55cc5828691b5f8ea1f36ff4b6498391e761a46861962c1f4200a5c355694092bca1404fa88c536b029cbce2c0d1cfb86465a4a08ed0ebe7badc715830787d113aec15b946b8b7600f9b7c0adb7d76effac9ffe26b6e007506b1aeb48991869fca7f6a7d9c67ad1b9884307b6b93f4800a1eceb15cb4e3ebc394e77da220de3b227739a05094f3e4848d3199b2255ba431ca0dfa8f5625fba3725f9d3c514c5513c763b7caffbfaa43a77411e876ac8b94fbc56788a11804c31089994cc79d273068924c7ef9f5de11a4ea6da0f321316f7cf7774f5843712448c7e58ad97c914311bb6beb061eb6946166e1c98bdef8e2c921e63a4ed085d0db4693fa1addb84a7db0f7649c488528df6a9f1be1c05e0a37d7010beade3d0b66c1d085966df161e8adafcc6355496632bdbcd825623f88f18b7f1b9c2cfa949bf793859c51a57a8c23cbc7f7af5aa5155f1dcf1c71de23c0bfcb40a09aa4deda6050c8569ab2f5c537eb9e087c42c3a670c286e959f5fcf1e57393465caf598def15e14c588dd70884248da9c6b6bd44d54cc73bde72a23aa259d7b8ff77d8ae97b3150e021245ddf4ada65661daf806e9d9dabec5558b7f550ebf7ec260b16b6eeca8b7a1aaaf9c5a26c0d951e22723402ab211f1e29dba840729edee9496582beaad4554e5e2eed3d11a14283c9e23ace5d2b4e433d0fcc3078b0124606cbb1603aec8f6f23415408e358da0a8b733edac893e8b77bef4f59328a6ae5d3ca87b0e58e7f115001f0a0c6214938f69fb4f9df5d94fd7349511c8be8f76872e109bd9bc6c2fdfff03993e49ed485a226b1da209b4d975acc32f9a900ffa6cfffddf31340280d2efa59844d59a7ec592dd5a87998b6113506c44c665ca197cebff1c90e5484cc8a6cb2c5b1badab35aefa35c1384f0bb6459061ad574c2f37f8bbbd2e8dff5f27f020000ffff8db4683801"; 164 | 165 | #[test] 166 | fn test_decode_tx() { 167 | let data = hex::decode(TX_DATA).unwrap(); 168 | 169 | let tx = BatcherTransaction::new(&data, 123456).unwrap(); 170 | let frame = &tx.frames[0]; 171 | 172 | assert_eq!(tx.version, 0); 173 | assert_eq!(frame.channel_id, 239159748140584302248388764660258118408); 174 | assert_eq!(frame.frame_data_len, 3028); 175 | assert!(frame.is_last); 176 | assert_eq!(frame.frame_data, data[23..data.len() - 1]); 177 | } 178 | 179 | #[test] 180 | fn test_push_tx() { 181 | let data = bytes::Bytes::from(hex::decode(TX_DATA).unwrap()); 182 | let txs = vec![data]; 183 | 184 | let (tx, rx) = mpsc::channel(); 185 | let mut stage = BatcherTransactions::new(rx); 186 | 187 | let res = tx.send(BatcherTransactionMessage { 188 | txs, 189 | l1_origin: 123456, 190 | }); 191 | assert!(res.is_ok()); 192 | 193 | stage.process_incoming(); 194 | 195 | let tx = &stage.txs[0]; 196 | let frame = &tx.frames[0]; 197 | 198 | assert_eq!(stage.txs.len(), 1); 199 | assert_eq!(tx.version, 0); 200 | assert_eq!(tx.frames.len(), 1); 201 | assert_eq!(frame.channel_id, 239159748140584302248388764660258118408); 202 | } 203 | } 204 | --------------------------------------------------------------------------------