├── .github ├── CODEOWNERS ├── scripts │ ├── verify_tag.sh │ └── verify_chart_version.sh └── workflows │ ├── scripts │ └── verify_tag.sh │ └── helm-publish.yaml ├── packages ├── adapters │ ├── signers │ │ ├── src │ │ │ ├── eigen │ │ │ │ └── private_key.rs │ │ │ ├── lib.rs │ │ │ ├── kms_utils.rs │ │ │ ├── eigen.rs │ │ │ └── eth.rs │ │ └── Cargo.toml │ ├── storage │ │ ├── src │ │ │ ├── mappings.rs │ │ │ ├── error.rs │ │ │ └── mappings │ │ │ │ └── eigen_tables.rs │ │ ├── migrations │ │ │ ├── 0001_initial.down.sql │ │ │ ├── 0006_fuel_block_drop_hash_and_set_height_as_pkey.up.sql │ │ │ ├── 0005_tx_state_added.up.sql │ │ │ ├── 0008_remove_squeezed_out_state.sql │ │ │ ├── 0004_blob_gas_bumping.sql │ │ │ ├── 0009_eigen_da_submission.sql │ │ │ ├── 0007_cost_tracking.sql │ │ │ ├── 0003_block_submission_tx_id.up.sql │ │ │ ├── 0001_initial.up.sql │ │ │ └── 0002_better_fragmentation.up.sql │ │ └── Cargo.toml │ ├── da │ │ ├── src │ │ │ └── lib.rs │ │ └── Cargo.toml │ ├── eth │ │ ├── src │ │ │ ├── lib.rs │ │ │ ├── metrics.rs │ │ │ ├── aws.rs │ │ │ └── http.rs │ │ └── Cargo.toml │ ├── fuel │ │ ├── build.rs │ │ ├── Cargo.toml │ │ └── src │ │ │ └── metrics.rs │ ├── clock │ │ ├── Cargo.toml │ │ └── src │ │ │ └── lib.rs │ └── eigenda │ │ ├── src │ │ ├── lib.rs │ │ ├── error.rs │ │ ├── bindings.rs │ │ └── codec.rs │ │ └── Cargo.toml ├── services │ ├── src │ │ ├── fee_metrics_tracker.rs │ │ ├── state_listener.rs │ │ ├── state_committer.rs │ │ ├── types │ │ │ ├── fragment.rs │ │ │ ├── fuel_block_committed_on_l1.rs │ │ │ ├── bundle_cost.rs │ │ │ ├── state_submission.rs │ │ │ ├── eigen_submission.rs │ │ │ ├── non_empty.rs │ │ │ ├── transactions.rs │ │ │ ├── block_submission.rs │ │ │ ├── l1_height.rs │ │ │ └── serial_id.rs │ │ ├── types.rs │ │ ├── state_committer │ │ │ └── commit_helpers.rs │ │ ├── health_reporter.rs │ │ ├── status_reporter.rs │ │ ├── lib.rs │ │ ├── cost_reporter.rs │ │ ├── fees │ │ │ └── testing.rs │ │ ├── fee_metrics_tracker │ │ │ ├── eigen_da.rs │ │ │ └── ethereum_da.rs │ │ ├── state_listener │ │ │ └── port.rs │ │ └── block_bundler │ │ │ └── common.rs │ ├── tests │ │ └── status_reporter.rs │ └── Cargo.toml ├── encoding │ ├── src │ │ ├── lib.rs │ │ ├── bundle.rs │ │ ├── blob.rs │ │ ├── blob │ │ │ └── encoder.rs │ │ └── bundle │ │ │ └── decoder.rs │ └── Cargo.toml ├── metrics │ ├── Cargo.toml │ └── src │ │ ├── lib.rs │ │ └── connection_health_tracker.rs ├── test-helpers │ └── Cargo.toml └── benchmarks │ ├── Cargo.toml │ ├── src │ └── lib.rs │ └── README.md ├── _typos.toml ├── .env ├── .dockerignore ├── e2e ├── helpers │ ├── src │ │ ├── fuel_node_simulated.rs │ │ ├── lib.rs │ │ ├── fuel_node_simulated │ │ │ ├── server.rs │ │ │ └── graphql.rs │ │ └── bin │ │ │ └── start_committer.rs │ ├── tests │ │ └── harness.rs │ └── Cargo.toml ├── benches │ ├── src │ │ ├── data.rs │ │ ├── template.rs │ │ ├── handlers.rs │ │ └── main.rs │ └── Cargo.toml └── tests │ ├── Cargo.toml │ └── tests │ ├── eigen_kms.rs │ └── eigen_state.rs ├── fee_algo_simulation ├── src │ ├── state.rs │ ├── main.rs │ ├── utils.rs │ └── models.rs └── Cargo.toml ├── run_local_db.sh ├── .gitignore ├── sql-compose.yaml ├── fuel_simulator ├── Cargo.toml └── src │ └── main.rs ├── rustfmt.toml ├── helm └── fuel-block-committer │ ├── templates │ ├── serviceaccount.yaml │ ├── service.yaml │ ├── servicemonitor.yaml │ ├── tests │ │ └── test-connection.yaml │ ├── hpa.yaml │ ├── _helpers.tpl │ └── deployment.yaml │ ├── .helmignore │ ├── Chart.yaml │ └── values.yaml ├── .sqlx ├── query-c975c167c0dcbdc38e13157169bf17487cc5e86a4fd28d344da7f7ef55358601.json ├── query-9fad1eeaa60ea30606182ffef41d62900c0343c83e6258a2e9f287c2b4e0281e.json ├── query-eb46c62ffe6a49191e586a06f7c6823385925569f27394bad58ee6134522f03b.json ├── query-111a622bad3c042be9ee7fdf3ce9db91900473950c921260eaee6c3bff4c7096.json ├── query-e5a293ee645d92f428e060ecea14869c5dcb9a44a3e431357f9032bf864d6db2.json ├── query-26b0ea141fd42d49dba3af670d49256cb56247f1e23dc8ff6c4955f99a5d980a.json ├── query-dbd4dc48ee0c62be2690c7894bfd111cfe64383add2ae8d78f90975b74af6b57.json ├── query-ca516d5a6f16e7877c129ec68edfb8eb4358479355cc4fe0a85ad1ea8d6bd014.json ├── query-266a53bb892dca106ef57ed4617a44e2d08f1e3fe2d525722020d1a32aa8111c.json ├── query-0262541213bf75b490f385cca5b76bcd130d2fcb4cdd88ababb3348d28999fa8.json ├── query-bf7f6bfbf266430a212e9b8e8e8ee5e1ea3b97cdb624324a07f018fdc045341d.json ├── query-8c34c4c454b5b6d8bfc2eb67a38eff1ceb985eb38321fe5760ef02dfa40e3b3f.json ├── query-a2673e2e158bbaf129a8c5cb2f0891f2c073841f52ae893e7590562d574609c4.json ├── query-2096d1ddfd76357493610c7d1a11022ce99ec1d9fd603fb10d4723ca05267627.json ├── query-e01e9734a56ca8dd2f7fcbd62570b390b3348ecd2bb6754a04e31958a9391028.json ├── query-d6ad3af8502339a76d4a880f25b887858f300b7b36d0490c244e05d3fd20ee4c.json ├── query-851e5744c6d1c35341d1314e9485b4bdd6bf19af580170da626ab9740a4f4c02.json ├── query-4636ebc3164dd8f0e7161e13feb5cbdd41fbe7546425bfbd1c1ff47d51a79e25.json ├── query-c38ac634ddb7743a7c37a84a8537b0fed4cf0607a0c04fd2bee4efae8b6ac6b8.json ├── query-f62bc595ae70229a6e37d7f1459ffe569d9ae2701cee893f2d2ef77a5e20e6f1.json ├── query-d28ad71b43b831e8104e83ddd69359622fe688f850f896aeab4b324817129aa5.json ├── query-c820bfe642b85081bd8a594d94ff4524a3889215e5056401b4bcd5fe4b027b58.json ├── query-cc10c6369b02a35fee75d611dbd8bf096f832cde79333f23a28dbef155c6526e.json ├── query-0198e3e04ffa7668562e2e485ef4ccde2a267f0a14161b75bd5c4b6b252eb687.json ├── query-6f7e6ba876d49bef1bf870514ed38be642af65ed848f53a191ef58c2e02f227c.json ├── query-2502a8cec8e562082f3e990e73934a2bf55e0ea2347c2c58323c4aa951f18d0e.json ├── query-18049cc4c58a991f166a1548c43fb4f0fe9ff4820be993afe9dba8e57f6e1df8.json ├── query-2207b448e46117ad64084feefc49e3f45511e91468b32f5ef0024f92730588a6.json ├── query-14a630268f676e961c35d4804724aa35ac1d5ae9763f9383506cd05b42bb0213.json ├── query-ddc1a18d0d257b9065830b46a10ce42fee96b0925eb2c30a0b98cf9f79c6ed76.json ├── query-7e7591cc5c22205ede24f0f41bf43c0c8ede9352b6622b3095cb2bcd8a41ee41.json ├── query-eb2316bcf588bee755224ad3503e5e9ff10ba310c44ecf67e55d27b06e8449e6.json ├── query-3f1f4aaee324cb731aacf0b34ea6a1c76dade6d2234e2fe238a6aab5a15b17fa.json ├── query-f9b6289ce3be042ec58c756e5b5b010b5010091a69431f51acc1adf1db12f708.json ├── query-39d3fae6fdd67a2324fae4d5e828f69f2298cd5b0f7eb1609ed189269c6f677c.json ├── query-365408c350d49ba41303b49b319a0fb317ced0dc5bae79b8a491b0807cf97958.json ├── query-4e6581709112d5d929e525113dc4ed279da2291a4812fa1be1de3e636e6d573d.json ├── query-ed56ffeb0264867943f7891de21ff99a2bfb27dd1e51d0f877f939e29b7f3a52.json └── query-f928d9523d9539c64ae054750ed6ed4e12b72585c80dd938f23ef1907ced3aec.json ├── run_bench.sh ├── run_tests.sh ├── Dockerfile ├── committer ├── Cargo.toml └── src │ ├── errors.rs │ └── main.rs └── SECURITY.md /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @FuelLabs/client 2 | -------------------------------------------------------------------------------- /packages/adapters/signers/src/eigen/private_key.rs: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /_typos.toml: -------------------------------------------------------------------------------- 1 | [files] 2 | extend-exclude = ["db_preview"] 3 | -------------------------------------------------------------------------------- /.env: -------------------------------------------------------------------------------- 1 | SQLX_OFFLINE=true 2 | DATABASE_URL=postgres://username:password@localhost/test 3 | -------------------------------------------------------------------------------- /packages/services/src/fee_metrics_tracker.rs: -------------------------------------------------------------------------------- 1 | pub mod eigen_da; 2 | pub mod ethereum_da; 3 | -------------------------------------------------------------------------------- /packages/adapters/storage/src/mappings.rs: -------------------------------------------------------------------------------- 1 | pub(crate) mod eigen_tables; 2 | pub(crate) mod tables; 3 | -------------------------------------------------------------------------------- /packages/services/src/state_listener.rs: -------------------------------------------------------------------------------- 1 | pub mod eigen_service; 2 | pub mod port; 3 | pub mod service; 4 | -------------------------------------------------------------------------------- /packages/adapters/signers/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod eigen; 2 | pub mod eth; 3 | pub mod kms_utils; 4 | pub use aws_config; 5 | pub use aws_sdk_kms; 6 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | target 2 | deployment 3 | configurations 4 | eth_node 5 | fuel_node 6 | helm 7 | compose.yml 8 | .git 9 | *.rs.bk 10 | .dockerignore 11 | -------------------------------------------------------------------------------- /e2e/helpers/src/fuel_node_simulated.rs: -------------------------------------------------------------------------------- 1 | mod graphql; 2 | mod server; 3 | mod simulation; 4 | 5 | pub use server::FuelNode; 6 | pub use simulation::{Compressibility, SimulationConfig}; 7 | -------------------------------------------------------------------------------- /packages/adapters/da/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub enum DALayerError { 2 | Error, 3 | } 4 | 5 | pub enum SubmissionStatus { 6 | Pending, 7 | Completed, 8 | Finalized, 9 | Failed, 10 | } 11 | -------------------------------------------------------------------------------- /e2e/helpers/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod committer; 2 | pub mod eth_node; 3 | pub mod fuel_node; 4 | #[cfg(feature = "fuel-core-simulated")] 5 | pub mod fuel_node_simulated; 6 | pub mod kms; 7 | pub mod whole_stack; 8 | -------------------------------------------------------------------------------- /fee_algo_simulation/src/state.rs: -------------------------------------------------------------------------------- 1 | use services::fees::cache::CachingApi; 2 | 3 | /// Shared state across routes. 4 | #[derive(Clone)] 5 | pub struct AppState { 6 | pub fee_api: CachingApi, 7 | } 8 | -------------------------------------------------------------------------------- /run_local_db.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Drop the database 4 | docker compose -f sql-compose.yaml down --volumes 5 | docker compose -f sql-compose.yaml up -d 6 | (cd ./packages/adapters/storage && sqlx migrate run) 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | **/target/ 4 | 5 | # These are backup files generated by rustfmt 6 | **/*.rs.bk 7 | 8 | # Don't add the generated MDBook artifacts 9 | docs/book/ 10 | .DS_Store 11 | -------------------------------------------------------------------------------- /packages/services/src/state_committer.rs: -------------------------------------------------------------------------------- 1 | mod fee_algo; 2 | pub use fee_algo::{ 3 | Config as AlgoConfig, FeeMultiplierRange, FeeThresholds, SmaFeeAlgo, SmaPeriods, 4 | }; 5 | pub mod commit_helpers; 6 | pub mod eigen_service; 7 | pub mod port; 8 | pub mod service; 9 | -------------------------------------------------------------------------------- /packages/adapters/storage/migrations/0001_initial.down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS l1_fuel_block_submission; 2 | DROP TABLE IF EXISTS l1_transaction_fragments; 3 | DROP TABLE IF EXISTS l1_fragments; 4 | DROP TABLE IF EXISTS l1_submissions; 5 | DROP TABLE IF EXISTS l1_transactions; 6 | -------------------------------------------------------------------------------- /packages/adapters/storage/migrations/0006_fuel_block_drop_hash_and_set_height_as_pkey.up.sql: -------------------------------------------------------------------------------- 1 | BEGIN; 2 | 3 | ALTER TABLE fuel_blocks DROP CONSTRAINT fuel_blocks_pkey; 4 | 5 | ALTER TABLE fuel_blocks DROP COLUMN hash; 6 | 7 | ALTER TABLE fuel_blocks ADD PRIMARY KEY (height); 8 | 9 | COMMIT; 10 | -------------------------------------------------------------------------------- /sql-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | 3 | services: 4 | postgres: 5 | image: postgres:latest 6 | container_name: my_postgres 7 | environment: 8 | POSTGRES_USER: username 9 | POSTGRES_PASSWORD: password 10 | POSTGRES_DB: test 11 | ports: 12 | - "5432:5432" 13 | -------------------------------------------------------------------------------- /fuel_simulator/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "fuel_simulator" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | tokio = { version = "1.28", features = ["full"] } 8 | warp = "0.3" 9 | async-graphql = "7" 10 | async-graphql-warp = "7" 11 | hex = "0.4" 12 | clap = { version = "4", features = ["derive"] } 13 | -------------------------------------------------------------------------------- /packages/adapters/storage/migrations/0005_tx_state_added.up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE l1_blob_transaction 2 | DROP CONSTRAINT l1_transactions_state_check; 3 | 4 | ALTER TABLE l1_blob_transaction 5 | ADD CONSTRAINT l1_blob_transaction_state_check 6 | CHECK ( 7 | state IN (0, 1, 2, 3) 8 | AND (state != 1 OR finalized_at IS NOT NULL) 9 | ); -------------------------------------------------------------------------------- /packages/adapters/da/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "da" 3 | authors = { workspace = true } 4 | edition = { workspace = true } 5 | homepage = { workspace = true } 6 | license = { workspace = true } 7 | repository = { workspace = true } 8 | version = { workspace = true } 9 | publish = { workspace = true } 10 | rust-version = { workspace = true } 11 | -------------------------------------------------------------------------------- /packages/adapters/storage/migrations/0008_remove_squeezed_out_state.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE l1_blob_transaction 2 | DROP CONSTRAINT l1_blob_transaction_state_check; 3 | 4 | ALTER TABLE l1_blob_transaction 5 | ADD CONSTRAINT l1_blob_transaction_state_check 6 | CHECK ( 7 | state IN (0, 1, 2, 3) 8 | AND (state != 1 OR finalized_at IS NOT NULL) 9 | ); 10 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | fn_params_layout = "Tall" 2 | hard_tabs = false 3 | match_arm_leading_pipes = "Never" 4 | max_width = 100 5 | merge_derives = true 6 | remove_nested_parens = true 7 | reorder_imports = true 8 | reorder_modules = true 9 | tab_spaces = 4 10 | use_field_init_shorthand = false 11 | use_small_heuristics = "Default" 12 | use_try_shorthand = false 13 | -------------------------------------------------------------------------------- /packages/adapters/eth/src/lib.rs: -------------------------------------------------------------------------------- 1 | use services::Result; 2 | 3 | mod blob_encoder; 4 | mod error; 5 | mod fee_api_helpers; 6 | mod http; 7 | mod metrics; 8 | mod websocket; 9 | 10 | pub use alloy::primitives::Address; 11 | pub use blob_encoder::BlobEncoder; 12 | pub use http::Provider as HttpClient; 13 | pub use websocket::{AcceptablePriorityFeePercentages, L1Signers, Sign, TxConfig, WebsocketClient}; 14 | -------------------------------------------------------------------------------- /packages/encoding/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod blob; 2 | pub mod bundle; 3 | 4 | pub(crate) mod constants { 5 | // These are copied over from alloy so that we don't make fuel-core pull in alloy just for the 6 | // constants. 7 | pub const BYTES_PER_BLOB: usize = 131_072; 8 | pub const FIELD_ELEMENTS_PER_BLOB: usize = 4096; 9 | pub const USABLE_BITS_PER_FIELD_ELEMENT: usize = 254; 10 | } 11 | -------------------------------------------------------------------------------- /packages/metrics/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "metrics" 3 | authors = { workspace = true } 4 | edition = { workspace = true } 5 | homepage = { workspace = true } 6 | license = { workspace = true } 7 | repository = { workspace = true } 8 | version = { workspace = true } 9 | publish = { workspace = true } 10 | rust-version = { workspace = true } 11 | 12 | [dependencies] 13 | prometheus = { workspace = true } 14 | -------------------------------------------------------------------------------- /e2e/benches/src/data.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | /// Shared application data for the control panel. 3 | pub struct AppData { 4 | pub simulation_config: Arc>, 5 | pub metrics_url: String, 6 | } 7 | 8 | /// Form definition for updating the configuration. 9 | #[derive(Debug, Deserialize)] 10 | pub struct ConfigForm { 11 | pub block_size: usize, 12 | pub compressibility: String, 13 | } 14 | -------------------------------------------------------------------------------- /helm/fuel-block-committer/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "fuel-block-committer.serviceAccountName" . }} 6 | labels: 7 | {{- include "fuel-block-committer.labels" . | nindent 4 }} 8 | {{- with .Values.serviceAccount.annotations }} 9 | annotations: 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | {{- end }} 13 | -------------------------------------------------------------------------------- /.sqlx/query-c975c167c0dcbdc38e13157169bf17487cc5e86a4fd28d344da7f7ef55358601.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "UPDATE eigen_submission SET status = $1 WHERE id = $2", 4 | "describe": { 5 | "columns": [], 6 | "parameters": { 7 | "Left": [ 8 | "Int2", 9 | "Int4" 10 | ] 11 | }, 12 | "nullable": [] 13 | }, 14 | "hash": "c975c167c0dcbdc38e13157169bf17487cc5e86a4fd28d344da7f7ef55358601" 15 | } 16 | -------------------------------------------------------------------------------- /packages/adapters/fuel/build.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | fn main() { 4 | let out_dir = PathBuf::from(std::env::var("OUT_DIR").unwrap()); 5 | 6 | let schema_path = out_dir.join("schema.sdl"); 7 | std::fs::write(&schema_path, fuel_core_client::SCHEMA_SDL).unwrap(); 8 | 9 | cynic_codegen::register_schema("fuelcore") 10 | .from_sdl_file(schema_path) 11 | .unwrap() 12 | .as_default() 13 | .unwrap(); 14 | } 15 | -------------------------------------------------------------------------------- /run_bench.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | script_location="$(readlink -f "$(dirname "$0")")" 5 | 6 | workspace_cargo_manifest="$script_location/Cargo.toml" 7 | 8 | # So that we may have a binary in `target/release` 9 | cargo build --release --manifest-path "$workspace_cargo_manifest" --bin fuel-block-committer 10 | PATH="$script_location/target/release:$PATH" cargo run --release --manifest-path "$workspace_cargo_manifest" --bin benches -- "$@" 11 | -------------------------------------------------------------------------------- /.sqlx/query-9fad1eeaa60ea30606182ffef41d62900c0343c83e6258a2e9f287c2b4e0281e.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "INSERT INTO l1_transaction_fragments (transaction_id, fragment_id) VALUES ($1, $2)", 4 | "describe": { 5 | "columns": [], 6 | "parameters": { 7 | "Left": [ 8 | "Int4", 9 | "Int4" 10 | ] 11 | }, 12 | "nullable": [] 13 | }, 14 | "hash": "9fad1eeaa60ea30606182ffef41d62900c0343c83e6258a2e9f287c2b4e0281e" 15 | } 16 | -------------------------------------------------------------------------------- /helm/fuel-block-committer/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /.sqlx/query-eb46c62ffe6a49191e586a06f7c6823385925569f27394bad58ee6134522f03b.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "INSERT INTO eigen_submission_fragments (submission_id, fragment_id) VALUES ($1, $2)", 4 | "describe": { 5 | "columns": [], 6 | "parameters": { 7 | "Left": [ 8 | "Int4", 9 | "Int4" 10 | ] 11 | }, 12 | "nullable": [] 13 | }, 14 | "hash": "eb46c62ffe6a49191e586a06f7c6823385925569f27394bad58ee6134522f03b" 15 | } 16 | -------------------------------------------------------------------------------- /packages/adapters/storage/migrations/0004_blob_gas_bumping.sql: -------------------------------------------------------------------------------- 1 | DELETE FROM l1_transaction_fragments; 2 | DELETE FROM l1_blob_transaction; 3 | 4 | ALTER TABLE l1_blob_transaction 5 | ADD COLUMN nonce BIGINT NOT NULL, 6 | ADD COLUMN max_fee NUMERIC(39, 0) NOT NULL, 7 | ADD COLUMN priority_fee NUMERIC(39, 0) NOT NULL, 8 | ADD COLUMN blob_fee NUMERIC(39, 0) NOT NULL, 9 | ADD COLUMN created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP; 10 | -------------------------------------------------------------------------------- /.sqlx/query-111a622bad3c042be9ee7fdf3ce9db91900473950c921260eaee6c3bff4c7096.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "UPDATE l1_blob_transaction SET state = $1, finalized_at = $2 WHERE nonce = $3", 4 | "describe": { 5 | "columns": [], 6 | "parameters": { 7 | "Left": [ 8 | "Int2", 9 | "Timestamptz", 10 | "Int8" 11 | ] 12 | }, 13 | "nullable": [] 14 | }, 15 | "hash": "111a622bad3c042be9ee7fdf3ce9db91900473950c921260eaee6c3bff4c7096" 16 | } 17 | -------------------------------------------------------------------------------- /.sqlx/query-e5a293ee645d92f428e060ecea14869c5dcb9a44a3e431357f9032bf864d6db2.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "UPDATE l1_blob_transaction SET state = $1, finalized_at = $2 WHERE hash = $3", 4 | "describe": { 5 | "columns": [], 6 | "parameters": { 7 | "Left": [ 8 | "Int2", 9 | "Timestamptz", 10 | "Bytea" 11 | ] 12 | }, 13 | "nullable": [] 14 | }, 15 | "hash": "e5a293ee645d92f428e060ecea14869c5dcb9a44a3e431357f9032bf864d6db2" 16 | } 17 | -------------------------------------------------------------------------------- /helm/fuel-block-committer/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "fuel-block-committer.fullname" . }} 5 | labels: 6 | {{- include "fuel-block-committer.labels" . | nindent 4 }} 7 | spec: 8 | type: {{ .Values.service.type }} 9 | ports: 10 | - port: {{ .Values.service.port }} 11 | targetPort: http 12 | protocol: TCP 13 | name: http 14 | selector: 15 | {{- include "fuel-block-committer.selectorLabels" . | nindent 4 }} 16 | -------------------------------------------------------------------------------- /helm/fuel-block-committer/templates/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.app.serviceMonitor.enabled }} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ include "fuel-block-committer.fullname" . }} 6 | labels: 7 | release: {{ .Values.app.serviceMonitor.prometheusRelease }} 8 | spec: 9 | selector: 10 | matchLabels: 11 | {{- include "fuel-block-committer.labels" . | nindent 4 }} 12 | endpoints: 13 | - path: /metrics 14 | port: http 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /helm/fuel-block-committer/templates/tests/test-connection.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: "{{ include "fuel-block-committer.fullname" . }}-test-connection" 5 | labels: 6 | {{- include "fuel-block-committer.labels" . | nindent 4 }} 7 | annotations: 8 | "helm.sh/hook": test 9 | spec: 10 | containers: 11 | - name: wget 12 | image: busybox 13 | command: ['wget'] 14 | args: ['{{ include "fuel-block-committer.fullname" . }}:{{ .Values.service.port }}'] 15 | restartPolicy: Never 16 | -------------------------------------------------------------------------------- /.sqlx/query-26b0ea141fd42d49dba3af670d49256cb56247f1e23dc8ff6c4955f99a5d980a.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "SELECT MAX(height) as max_height FROM fuel_blocks", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "max_height", 9 | "type_info": "Int8" 10 | } 11 | ], 12 | "parameters": { 13 | "Left": [] 14 | }, 15 | "nullable": [ 16 | null 17 | ] 18 | }, 19 | "hash": "26b0ea141fd42d49dba3af670d49256cb56247f1e23dc8ff6c4955f99a5d980a" 20 | } 21 | -------------------------------------------------------------------------------- /.sqlx/query-dbd4dc48ee0c62be2690c7894bfd111cfe64383add2ae8d78f90975b74af6b57.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "SELECT nextval(pg_get_serial_sequence('bundles', 'id'))", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "nextval", 9 | "type_info": "Int8" 10 | } 11 | ], 12 | "parameters": { 13 | "Left": [] 14 | }, 15 | "nullable": [ 16 | null 17 | ] 18 | }, 19 | "hash": "dbd4dc48ee0c62be2690c7894bfd111cfe64383add2ae8d78f90975b74af6b57" 20 | } 21 | -------------------------------------------------------------------------------- /run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | script_location="$(readlink -f "$(dirname "$0")")" 5 | 6 | workspace_cargo_manifest="$script_location/Cargo.toml" 7 | 8 | cargo test --manifest-path "$workspace_cargo_manifest" --workspace --exclude e2e-tests 9 | 10 | # So that we may have a binary in `target/release` 11 | cargo build --release --manifest-path "$workspace_cargo_manifest" --bin fuel-block-committer 12 | PATH="$script_location/target/release:$PATH" cargo test --manifest-path "$workspace_cargo_manifest" --package e2e-tests -- --test-threads=1 --nocapture 13 | -------------------------------------------------------------------------------- /.sqlx/query-ca516d5a6f16e7877c129ec68edfb8eb4358479355cc4fe0a85ad1ea8d6bd014.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "SELECT MAX(end_height) AS latest_bundled_height FROM bundles", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "latest_bundled_height", 9 | "type_info": "Int8" 10 | } 11 | ], 12 | "parameters": { 13 | "Left": [] 14 | }, 15 | "nullable": [ 16 | null 17 | ] 18 | }, 19 | "hash": "ca516d5a6f16e7877c129ec68edfb8eb4358479355cc4fe0a85ad1ea8d6bd014" 20 | } 21 | -------------------------------------------------------------------------------- /fuel_simulator/src/main.rs: -------------------------------------------------------------------------------- 1 | mod server; 2 | 3 | use clap::Parser; 4 | use server::run_server; 5 | 6 | #[derive(Parser, Debug)] 7 | #[command(author, version, about, long_about = None)] 8 | struct Args { 9 | /// block size in bytes for each produced block. 10 | #[arg(long, default_value_t = 128)] 11 | block_size: usize, 12 | 13 | /// port to run the server on. 14 | #[arg(long, default_value_t = 4000)] 15 | port: u16, 16 | } 17 | 18 | #[tokio::main] 19 | async fn main() { 20 | let args = Args::parse(); 21 | run_server(args.block_size, args.port).await; 22 | } 23 | -------------------------------------------------------------------------------- /packages/adapters/clock/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "clock" 3 | authors = { workspace = true } 4 | edition = { workspace = true } 5 | homepage = { workspace = true } 6 | license = { workspace = true } 7 | repository = { workspace = true } 8 | version = { workspace = true } 9 | publish = { workspace = true } 10 | rust-version = { workspace = true } 11 | 12 | [dependencies] 13 | services = { workspace = true } 14 | 15 | [dev-dependencies] 16 | clock = { workspace = true, features = ["test-helpers"] } 17 | tokio = { workspace = true, features = ["macros", "rt"] } 18 | 19 | [features] 20 | test-helpers = [] 21 | -------------------------------------------------------------------------------- /.sqlx/query-266a53bb892dca106ef57ed4617a44e2d08f1e3fe2d525722020d1a32aa8111c.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "SELECT created_at FROM eigen_submission WHERE status = $1 ORDER BY created_at DESC LIMIT 1;", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "created_at", 9 | "type_info": "Timestamptz" 10 | } 11 | ], 12 | "parameters": { 13 | "Left": [ 14 | "Int2" 15 | ] 16 | }, 17 | "nullable": [ 18 | false 19 | ] 20 | }, 21 | "hash": "266a53bb892dca106ef57ed4617a44e2d08f1e3fe2d525722020d1a32aa8111c" 22 | } 23 | -------------------------------------------------------------------------------- /packages/services/src/types/fragment.rs: -------------------------------------------------------------------------------- 1 | use std::num::{NonZeroU32, NonZeroUsize}; 2 | 3 | use crate::types::NonEmpty; 4 | 5 | #[derive(Debug, Clone, Copy)] 6 | pub struct FragmentsSubmitted { 7 | pub num_fragments: NonZeroUsize, 8 | } 9 | 10 | #[derive(Debug, Clone, PartialEq, Eq)] 11 | pub struct Fragment { 12 | pub data: NonEmpty, 13 | pub unused_bytes: u32, 14 | pub total_bytes: NonZeroU32, 15 | } 16 | 17 | impl Fragment { 18 | pub fn utilization(&self) -> f64 { 19 | f64::from(self.total_bytes.get().saturating_sub(self.unused_bytes)) 20 | / f64::from(self.total_bytes.get()) 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /.sqlx/query-0262541213bf75b490f385cca5b76bcd130d2fcb4cdd88ababb3348d28999fa8.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "SELECT EXISTS (SELECT 1 FROM l1_blob_transaction WHERE state = $1) AS has_pending_transactions;", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "has_pending_transactions", 9 | "type_info": "Bool" 10 | } 11 | ], 12 | "parameters": { 13 | "Left": [ 14 | "Int2" 15 | ] 16 | }, 17 | "nullable": [ 18 | null 19 | ] 20 | }, 21 | "hash": "0262541213bf75b490f385cca5b76bcd130d2fcb4cdd88ababb3348d28999fa8" 22 | } 23 | -------------------------------------------------------------------------------- /e2e/benches/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "benches" 3 | version.workspace = true 4 | authors.workspace = true 5 | edition.workspace = true 6 | homepage.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | rust-version.workspace = true 10 | publish.workspace = true 11 | 12 | [dependencies] 13 | e2e-helpers = { workspace = true, features = ["fuel-core-simulated"] } 14 | tokio = { workspace = true, features = ["full"] } 15 | anyhow = { workspace = true } 16 | fuel = { workspace = true } 17 | serde = { workspace = true } 18 | actix-web = { workspace = true, features = ["macros"] } 19 | reqwest = { workspace = true } 20 | -------------------------------------------------------------------------------- /.sqlx/query-bf7f6bfbf266430a212e9b8e8e8ee5e1ea3b97cdb624324a07f018fdc045341d.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "INSERT INTO l1_transaction (hash, nonce, max_fee, priority_fee, submission_id, state, created_at) VALUES ($1, $2, $3, $4, $5, $6, $7)", 4 | "describe": { 5 | "columns": [], 6 | "parameters": { 7 | "Left": [ 8 | "Bytea", 9 | "Int8", 10 | "Numeric", 11 | "Numeric", 12 | "Int4", 13 | "Int2", 14 | "Timestamptz" 15 | ] 16 | }, 17 | "nullable": [] 18 | }, 19 | "hash": "bf7f6bfbf266430a212e9b8e8e8ee5e1ea3b97cdb624324a07f018fdc045341d" 20 | } 21 | -------------------------------------------------------------------------------- /.sqlx/query-8c34c4c454b5b6d8bfc2eb67a38eff1ceb985eb38321fe5760ef02dfa40e3b3f.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "INSERT INTO bundles(id, start_height, end_height) VALUES ($1, $2, $3) RETURNING id", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "id", 9 | "type_info": "Int4" 10 | } 11 | ], 12 | "parameters": { 13 | "Left": [ 14 | "Int4", 15 | "Int8", 16 | "Int8" 17 | ] 18 | }, 19 | "nullable": [ 20 | false 21 | ] 22 | }, 23 | "hash": "8c34c4c454b5b6d8bfc2eb67a38eff1ceb985eb38321fe5760ef02dfa40e3b3f" 24 | } 25 | -------------------------------------------------------------------------------- /.sqlx/query-a2673e2e158bbaf129a8c5cb2f0891f2c073841f52ae893e7590562d574609c4.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "INSERT INTO eigen_submission (request_id, status, created_at) VALUES ($1, $2, $3) RETURNING id", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "id", 9 | "type_info": "Int4" 10 | } 11 | ], 12 | "parameters": { 13 | "Left": [ 14 | "Bytea", 15 | "Int2", 16 | "Timestamptz" 17 | ] 18 | }, 19 | "nullable": [ 20 | false 21 | ] 22 | }, 23 | "hash": "a2673e2e158bbaf129a8c5cb2f0891f2c073841f52ae893e7590562d574609c4" 24 | } 25 | -------------------------------------------------------------------------------- /.sqlx/query-2096d1ddfd76357493610c7d1a11022ce99ec1d9fd603fb10d4723ca05267627.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "SELECT EXISTS (SELECT 1 FROM l1_blob_transaction WHERE state = $1 OR state = $2) AS has_nonfinalized_transactions;", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "has_nonfinalized_transactions", 9 | "type_info": "Bool" 10 | } 11 | ], 12 | "parameters": { 13 | "Left": [ 14 | "Int2", 15 | "Int2" 16 | ] 17 | }, 18 | "nullable": [ 19 | null 20 | ] 21 | }, 22 | "hash": "2096d1ddfd76357493610c7d1a11022ce99ec1d9fd603fb10d4723ca05267627" 23 | } 24 | -------------------------------------------------------------------------------- /.sqlx/query-e01e9734a56ca8dd2f7fcbd62570b390b3348ecd2bb6754a04e31958a9391028.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "INSERT INTO l1_fuel_block_submission (fuel_block_hash, fuel_block_height, completed) VALUES ($1, $2, $3) RETURNING id", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "id", 9 | "type_info": "Int4" 10 | } 11 | ], 12 | "parameters": { 13 | "Left": [ 14 | "Bytea", 15 | "Int8", 16 | "Bool" 17 | ] 18 | }, 19 | "nullable": [ 20 | false 21 | ] 22 | }, 23 | "hash": "e01e9734a56ca8dd2f7fcbd62570b390b3348ecd2bb6754a04e31958a9391028" 24 | } 25 | -------------------------------------------------------------------------------- /packages/services/src/types.rs: -------------------------------------------------------------------------------- 1 | pub use alloy::primitives::{Address, U256}; 2 | pub use futures::Stream; 3 | 4 | mod non_empty; 5 | pub use non_empty::*; 6 | 7 | mod block_submission; 8 | mod bundle_cost; 9 | mod eigen_submission; 10 | mod fragment; 11 | mod fuel_block_committed_on_l1; 12 | mod l1_height; 13 | mod serial_id; 14 | mod state_submission; 15 | mod transactions; 16 | 17 | pub mod storage; 18 | 19 | pub use block_submission::*; 20 | pub use bundle_cost::*; 21 | pub use eigen_submission::*; 22 | pub use fragment::*; 23 | pub use fuel_block_committed_on_l1::*; 24 | pub use l1_height::*; 25 | pub use serial_id::*; 26 | pub use state_submission::*; 27 | pub use transactions::*; 28 | -------------------------------------------------------------------------------- /packages/encoding/src/bundle.rs: -------------------------------------------------------------------------------- 1 | mod decoder; 2 | mod encoder; 3 | pub use decoder::*; 4 | pub use encoder::*; 5 | 6 | #[derive(Clone, Eq, PartialEq, serde::Serialize, serde::Deserialize)] 7 | pub struct BundleV1 { 8 | pub blocks: Vec>, 9 | } 10 | 11 | impl std::fmt::Debug for BundleV1 { 12 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 13 | let hex_encoded = self.blocks.iter().map(hex::encode).collect::>(); 14 | 15 | f.debug_struct("BundleV1") 16 | .field("blocks", &hex_encoded) 17 | .finish() 18 | } 19 | } 20 | 21 | #[derive(Debug, Clone, PartialEq, Eq)] 22 | pub enum Bundle { 23 | V1(BundleV1), 24 | } 25 | -------------------------------------------------------------------------------- /packages/services/src/types/fuel_block_committed_on_l1.rs: -------------------------------------------------------------------------------- 1 | use crate::types::U256; 2 | 3 | #[derive(Clone, Copy)] 4 | pub struct FuelBlockCommittedOnL1 { 5 | pub fuel_block_hash: [u8; 32], 6 | pub commit_height: U256, 7 | } 8 | 9 | impl std::fmt::Debug for FuelBlockCommittedOnL1 { 10 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 11 | let hash = self 12 | .fuel_block_hash 13 | .map(|byte| format!("{byte:02x?}")) 14 | .join(""); 15 | f.debug_struct("FuelBlockCommittedOnL1") 16 | .field("hash", &hash) 17 | .field("commit_height", &self.commit_height) 18 | .finish() 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /.sqlx/query-d6ad3af8502339a76d4a880f25b887858f300b7b36d0490c244e05d3fd20ee4c.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "SELECT\n MIN(created_at) AS earliest_submission_time\n FROM\n eigen_submission\n WHERE\n request_id = $1;\n ", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "earliest_submission_time", 9 | "type_info": "Timestamptz" 10 | } 11 | ], 12 | "parameters": { 13 | "Left": [ 14 | "Bytea" 15 | ] 16 | }, 17 | "nullable": [ 18 | null 19 | ] 20 | }, 21 | "hash": "d6ad3af8502339a76d4a880f25b887858f300b7b36d0490c244e05d3fd20ee4c" 22 | } 23 | -------------------------------------------------------------------------------- /.sqlx/query-851e5744c6d1c35341d1314e9485b4bdd6bf19af580170da626ab9740a4f4c02.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "SELECT\n MIN(l1_blob_transaction.created_at) AS earliest_tx_time\n FROM\n l1_blob_transaction\n WHERE\n l1_blob_transaction.nonce = $1;\n ", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "earliest_tx_time", 9 | "type_info": "Timestamptz" 10 | } 11 | ], 12 | "parameters": { 13 | "Left": [ 14 | "Int8" 15 | ] 16 | }, 17 | "nullable": [ 18 | null 19 | ] 20 | }, 21 | "hash": "851e5744c6d1c35341d1314e9485b4bdd6bf19af580170da626ab9740a4f4c02" 22 | } 23 | -------------------------------------------------------------------------------- /packages/adapters/storage/migrations/0009_eigen_da_submission.sql: -------------------------------------------------------------------------------- 1 | BEGIN; 2 | 3 | CREATE TABLE IF NOT EXISTS eigen_submission ( 4 | id SERIAL PRIMARY KEY, 5 | request_id BYTEA NOT NULL UNIQUE, 6 | created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP, 7 | status SMALLINT NOT NULL, 8 | CONSTRAINT eigen_submission_status_check 9 | CHECK (status IN (0, 1, 2, 3)) 10 | ); 11 | 12 | CREATE TABLE IF NOT EXISTS eigen_submission_fragments ( 13 | id SERIAL PRIMARY KEY, 14 | submission_id INTEGER NOT NULL REFERENCES eigen_submission(id) ON DELETE CASCADE, 15 | fragment_id INTEGER NOT NULL, 16 | UNIQUE(submission_id, fragment_id) 17 | ); 18 | 19 | COMMIT; 20 | -------------------------------------------------------------------------------- /packages/adapters/eigenda/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod bindings; 2 | mod codec; 3 | mod connector; 4 | mod error; 5 | mod throttler; 6 | 7 | use bindings::BlobStatus; 8 | pub use connector::*; 9 | use services::types::DispersalStatus; 10 | pub use throttler::Throughput; 11 | 12 | impl From for DispersalStatus { 13 | fn from(status: BlobStatus) -> Self { 14 | match status { 15 | BlobStatus::Unknown | BlobStatus::Encoded | BlobStatus::Queued => { 16 | DispersalStatus::Processing 17 | } 18 | BlobStatus::GatheringSignatures => DispersalStatus::Confirmed, 19 | BlobStatus::Complete => DispersalStatus::Finalized, 20 | BlobStatus::Failed => DispersalStatus::Failed, 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /packages/services/src/state_committer/commit_helpers.rs: -------------------------------------------------------------------------------- 1 | use crate::Result; 2 | use crate::state_committer::port::{Storage, fuel::Api as FuelApi}; 3 | use metrics::prometheus::IntGauge; 4 | 5 | pub async fn update_current_height_to_commit_metric( 6 | fuel_api: &F, 7 | storage: &S, 8 | lookback_window: u32, 9 | gauge: &IntGauge, 10 | ) -> Result<()> 11 | where 12 | F: FuelApi, 13 | S: Storage, 14 | { 15 | let current_height_to_commit = if let Some(height) = storage.latest_bundled_height().await? { 16 | height.saturating_add(1) 17 | } else { 18 | fuel_api 19 | .latest_height() 20 | .await? 21 | .saturating_sub(lookback_window) 22 | }; 23 | 24 | gauge.set(current_height_to_commit.into()); 25 | Ok(()) 26 | } 27 | -------------------------------------------------------------------------------- /.sqlx/query-4636ebc3164dd8f0e7161e13feb5cbdd41fbe7546425bfbd1c1ff47d51a79e25.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "INSERT INTO l1_blob_transaction (hash, state, nonce, max_fee, priority_fee, blob_fee, created_at) VALUES ($1, $2, $3, $4, $5, $6, $7) RETURNING id", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "id", 9 | "type_info": "Int4" 10 | } 11 | ], 12 | "parameters": { 13 | "Left": [ 14 | "Bytea", 15 | "Int2", 16 | "Int8", 17 | "Numeric", 18 | "Numeric", 19 | "Numeric", 20 | "Timestamptz" 21 | ] 22 | }, 23 | "nullable": [ 24 | false 25 | ] 26 | }, 27 | "hash": "4636ebc3164dd8f0e7161e13feb5cbdd41fbe7546425bfbd1c1ff47d51a79e25" 28 | } 29 | -------------------------------------------------------------------------------- /.github/scripts/verify_tag.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | err() { 5 | echo -e "\e[31m\e[1merror:\e[0m $@" 1>&2; 6 | } 7 | 8 | status() { 9 | WIDTH=12 10 | printf "\e[32m\e[1m%${WIDTH}s\e[0m %s\n" "$1" "$2" 11 | } 12 | 13 | REF=$1 14 | MANIFEST=$2 15 | 16 | if [ -z "$REF" ]; then 17 | err "Expected ref to be set" 18 | exit 1 19 | fi 20 | 21 | if [ -z "$MANIFEST" ]; then 22 | err "Expected manifest to be set" 23 | exit 1 24 | fi 25 | 26 | # strip preceeding 'v' if it exists on tag 27 | REF=${REF/#v} 28 | TOML_VERSION=$(cat $MANIFEST | dasel -r toml 'package.version') 29 | 30 | if [ "$TOML_VERSION" != "$REF" ]; then 31 | err "Crate version $TOML_VERSION, doesn't match tag version $REF" 32 | exit 1 33 | else 34 | status "Crate version matches tag $TOML_VERSION" 35 | fi 36 | -------------------------------------------------------------------------------- /packages/services/src/types/bundle_cost.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | #[derive(Debug, Clone)] 4 | pub struct TransactionCostUpdate { 5 | pub tx_hash: [u8; 32], 6 | pub total_fee: u128, 7 | pub da_block_height: u64, 8 | } 9 | 10 | #[derive(Debug, Serialize, Deserialize)] 11 | pub struct BundleCost { 12 | // the bundle id 13 | pub id: u64, 14 | // total cost of the bundle 15 | pub cost: u128, 16 | // total size of the data contained in the bundle 17 | pub size: u64, 18 | // da height of the final transaction carrying the bundle 19 | pub da_block_height: u64, 20 | // starting height of the block contained block range 21 | pub start_height: u64, 22 | // ending height of the block contained block range (inclusive) 23 | pub end_height: u64, 24 | } 25 | -------------------------------------------------------------------------------- /.sqlx/query-c38ac634ddb7743a7c37a84a8537b0fed4cf0607a0c04fd2bee4efae8b6ac6b8.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "SELECT COUNT(*)\n FROM fuel_blocks fb\n WHERE fb.height >= $1\n AND NOT EXISTS (\n SELECT 1 FROM bundles b\n WHERE fb.height BETWEEN b.start_height AND b.end_height\n AND b.end_height >= $1\n )", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "count", 9 | "type_info": "Int8" 10 | } 11 | ], 12 | "parameters": { 13 | "Left": [ 14 | "Int8" 15 | ] 16 | }, 17 | "nullable": [ 18 | null 19 | ] 20 | }, 21 | "hash": "c38ac634ddb7743a7c37a84a8537b0fed4cf0607a0c04fd2bee4efae8b6ac6b8" 22 | } 23 | -------------------------------------------------------------------------------- /.sqlx/query-f62bc595ae70229a6e37d7f1459ffe569d9ae2701cee893f2d2ef77a5e20e6f1.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "WITH all_heights AS (SELECT generate_series($1::BIGINT, $2::BIGINT) AS height)\n SELECT ah.height\n FROM all_heights ah\n LEFT JOIN fuel_blocks fb ON fb.height = ah.height\n WHERE fb.height IS NULL\n ORDER BY ah.height;", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "height", 9 | "type_info": "Int8" 10 | } 11 | ], 12 | "parameters": { 13 | "Left": [ 14 | "Int8", 15 | "Int8" 16 | ] 17 | }, 18 | "nullable": [ 19 | null 20 | ] 21 | }, 22 | "hash": "f62bc595ae70229a6e37d7f1459ffe569d9ae2701cee893f2d2ef77a5e20e6f1" 23 | } 24 | -------------------------------------------------------------------------------- /packages/services/src/types/state_submission.rs: -------------------------------------------------------------------------------- 1 | pub use sqlx::types::chrono::{DateTime, Utc}; 2 | 3 | use super::TransactionState; 4 | 5 | #[derive(Debug, Clone, PartialEq, Eq)] 6 | pub struct L1Tx { 7 | pub id: Option, 8 | pub hash: [u8; 32], 9 | pub nonce: u32, 10 | pub max_fee: u128, 11 | pub priority_fee: u128, 12 | pub blob_fee: u128, 13 | pub created_at: Option>, 14 | pub state: TransactionState, 15 | } 16 | 17 | impl Default for L1Tx { 18 | fn default() -> Self { 19 | Self { 20 | id: None, 21 | hash: [0; 32], 22 | nonce: 0, 23 | max_fee: 0, 24 | priority_fee: 0, 25 | blob_fee: 0, 26 | state: TransactionState::Pending, 27 | created_at: None, 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /.github/workflows/scripts/verify_tag.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | err() { 5 | echo -e "\e[31m\e[1merror:\e[0m $@" 1>&2; 6 | } 7 | 8 | status() { 9 | WIDTH=12 10 | printf "\e[32m\e[1m%${WIDTH}s\e[0m %s\n" "$1" "$2" 11 | } 12 | 13 | REF=$1 14 | MANIFEST=$2 15 | 16 | if [ -z "$REF" ]; then 17 | err "Expected ref to be set" 18 | exit 1 19 | fi 20 | 21 | if [ -z "$MANIFEST" ]; then 22 | err "Expected manifest to be set" 23 | exit 1 24 | fi 25 | 26 | # strip preceding 'v' if it exists on tag 27 | REF=${REF/#v} 28 | TOML_VERSION=$(cat $MANIFEST | dasel -r toml -w plain 'workspace.package.version') 29 | 30 | if [ "$TOML_VERSION" != "$REF" ]; then 31 | err "Crate version $TOML_VERSION, doesn't match tag version $REF" 32 | exit 1 33 | else 34 | status "Crate version matches tag $TOML_VERSION" 35 | fi 36 | -------------------------------------------------------------------------------- /packages/adapters/eth/src/metrics.rs: -------------------------------------------------------------------------------- 1 | use ::metrics::{ 2 | RegistersMetrics, 3 | prometheus::{IntCounter, Opts, core::Collector}, 4 | }; 5 | 6 | #[derive(Clone)] 7 | pub struct Metrics { 8 | pub(crate) eth_network_errors: IntCounter, 9 | } 10 | 11 | impl RegistersMetrics for Metrics { 12 | fn metrics(&self) -> Vec> { 13 | vec![Box::new(self.eth_network_errors.clone())] 14 | } 15 | } 16 | 17 | impl Default for Metrics { 18 | fn default() -> Self { 19 | let eth_network_errors = IntCounter::with_opts(Opts::new( 20 | "eth_network_errors", 21 | "Number of network errors encountered while running Ethereum RPCs.", 22 | )) 23 | .expect("eth_network_errors metric to be correctly configured"); 24 | 25 | Self { eth_network_errors } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /packages/services/src/types/eigen_submission.rs: -------------------------------------------------------------------------------- 1 | use base64::Engine; 2 | use sqlx::types::chrono::{DateTime, Utc}; 3 | 4 | #[derive(Debug, Clone, PartialEq, Eq)] 5 | pub enum DispersalStatus { 6 | Processing, 7 | Confirmed, 8 | Finalized, 9 | Failed, 10 | Other(String), 11 | } 12 | 13 | pub type EigenDARequestId = Vec; 14 | 15 | #[derive(Debug, Clone, PartialEq, Eq)] 16 | pub struct EigenDASubmission { 17 | pub id: Option, 18 | pub request_id: EigenDARequestId, 19 | pub created_at: Option>, 20 | pub status: DispersalStatus, 21 | } 22 | 23 | pub trait AsB64 { 24 | fn as_base64(&self) -> String; 25 | } 26 | 27 | impl AsB64 for EigenDASubmission { 28 | fn as_base64(&self) -> String { 29 | base64::engine::general_purpose::STANDARD.encode(&self.request_id) 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /.sqlx/query-d28ad71b43b831e8104e83ddd69359622fe688f850f896aeab4b324817129aa5.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "SELECT\n MAX(l1_blob_transaction.finalized_at) AS last_fragment_time\n FROM\n l1_transaction_fragments\n JOIN\n l1_blob_transaction ON l1_blob_transaction.id = l1_transaction_fragments.transaction_id\n WHERE\n l1_blob_transaction.state = $1;\n ", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "last_fragment_time", 9 | "type_info": "Timestamptz" 10 | } 11 | ], 12 | "parameters": { 13 | "Left": [ 14 | "Int2" 15 | ] 16 | }, 17 | "nullable": [ 18 | null 19 | ] 20 | }, 21 | "hash": "d28ad71b43b831e8104e83ddd69359622fe688f850f896aeab4b324817129aa5" 22 | } 23 | -------------------------------------------------------------------------------- /.github/scripts/verify_chart_version.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | err() { 5 | echo -e "\e[31m\e[1merror:\e[0m $@" 1>&2 6 | } 7 | 8 | status() { 9 | WIDTH=12 10 | printf "\e[32m\e[1m%${WIDTH}s\e[0m %s\n" "$1" "$2" 11 | } 12 | # install dasel 13 | curl -sSLf "https://github.com/TomWright/dasel/releases/download/v1.24.3/dasel_linux_amd64" -L -o dasel 14 | chmod +x dasel 15 | mv ./dasel /usr/local/bin/dasel 16 | # check appVersion with crate package metadata 17 | HELM_APP_VERSION=$(cat helm/fuel-block-committer/Chart.yaml | dasel -r yaml 'appVersion') 18 | CRATE_VERSION=$(cat Cargo.toml | dasel -r toml 'workspace.package.version') 19 | if [ "$HELM_APP_VERSION" != "$CRATE_VERSION" ]; then 20 | err "crate version $CRATE_VERSION, doesn't match helm app version $HELM_APP_VERSION" 21 | exit 1 22 | else 23 | status "crate version matches helm chart app version $HELM_APP_VERSION" 24 | fi 25 | -------------------------------------------------------------------------------- /packages/adapters/storage/src/error.rs: -------------------------------------------------------------------------------- 1 | pub type Result = std::result::Result; 2 | 3 | #[derive(Debug, thiserror::Error)] 4 | pub enum Error { 5 | #[error("Database Error {0}")] 6 | Database(String), 7 | #[error("Could not convert to/from domain/db type {0}")] 8 | Conversion(String), 9 | } 10 | 11 | impl From for services::Error { 12 | fn from(value: Error) -> Self { 13 | match value { 14 | Error::Database(e) => Self::Storage(e), 15 | Error::Conversion(e) => Self::Storage(e), 16 | } 17 | } 18 | } 19 | 20 | impl From for Error { 21 | fn from(e: sqlx::Error) -> Self { 22 | Self::Database(e.to_string()) 23 | } 24 | } 25 | 26 | impl From for Error { 27 | fn from(e: sqlx::migrate::MigrateError) -> Self { 28 | Self::Database(e.to_string()) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /.sqlx/query-c820bfe642b85081bd8a594d94ff4524a3889215e5056401b4bcd5fe4b027b58.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "\n SELECT COUNT(*) = 0 AS \"is_finalized!\"\n FROM l1_fragments f\n WHERE f.bundle_id = $1 AND NOT EXISTS (\n SELECT 1\n FROM l1_transaction_fragments tf\n JOIN l1_blob_transaction t ON tf.transaction_id = t.id\n WHERE tf.fragment_id = f.id AND t.state = $2\n )\n ", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "is_finalized!", 9 | "type_info": "Bool" 10 | } 11 | ], 12 | "parameters": { 13 | "Left": [ 14 | "Int4", 15 | "Int2" 16 | ] 17 | }, 18 | "nullable": [ 19 | null 20 | ] 21 | }, 22 | "hash": "c820bfe642b85081bd8a594d94ff4524a3889215e5056401b4bcd5fe4b027b58" 23 | } 24 | -------------------------------------------------------------------------------- /packages/adapters/signers/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "signers" 3 | version.workspace = true 4 | authors.workspace = true 5 | edition.workspace = true 6 | homepage.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | rust-version.workspace = true 10 | publish.workspace = true 11 | 12 | [dependencies] 13 | eigenda = { workspace = true } 14 | eth = { workspace = true } 15 | alloy = { workspace = true, features = ["signer-aws"] } 16 | anyhow = { workspace = true } 17 | aws-config = { workspace = true } 18 | aws-sdk-kms = { workspace = true } 19 | async-trait = { workspace = true } 20 | k256 = { workspace = true, features = ["pkcs8"] } 21 | serde = { workspace = true } 22 | serde_json = { workspace = true } 23 | hex = { workspace = true } 24 | base64 = { workspace = true } 25 | rust-eigenda-signers = { workspace = true } 26 | thiserror = { workspace = true } 27 | 28 | [features] 29 | test-helpers = [] 30 | -------------------------------------------------------------------------------- /.sqlx/query-cc10c6369b02a35fee75d611dbd8bf096f832cde79333f23a28dbef155c6526e.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "\n INSERT INTO bundle_cost (\n bundle_id, cost, size, da_block_height, is_finalized\n ) VALUES (\n $1, $2, $3, $4, $5\n )\n ON CONFLICT (bundle_id) DO UPDATE SET\n cost = bundle_cost.cost + EXCLUDED.cost,\n size = bundle_cost.size + EXCLUDED.size,\n da_block_height = EXCLUDED.da_block_height,\n is_finalized = EXCLUDED.is_finalized\n ", 4 | "describe": { 5 | "columns": [], 6 | "parameters": { 7 | "Left": [ 8 | "Int4", 9 | "Numeric", 10 | "Int8", 11 | "Int8", 12 | "Bool" 13 | ] 14 | }, 15 | "nullable": [] 16 | }, 17 | "hash": "cc10c6369b02a35fee75d611dbd8bf096f832cde79333f23a28dbef155c6526e" 18 | } 19 | -------------------------------------------------------------------------------- /.sqlx/query-0198e3e04ffa7668562e2e485ef4ccde2a267f0a14161b75bd5c4b6b252eb687.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "\n SELECT fb.*\n FROM fuel_blocks fb\n WHERE fb.height >= $1\n AND NOT EXISTS (\n SELECT 1 FROM bundles b\n WHERE fb.height BETWEEN b.start_height AND b.end_height\n AND b.end_height >= $1\n )\n ORDER BY fb.height", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "height", 9 | "type_info": "Int8" 10 | }, 11 | { 12 | "ordinal": 1, 13 | "name": "data", 14 | "type_info": "Bytea" 15 | } 16 | ], 17 | "parameters": { 18 | "Left": [ 19 | "Int8" 20 | ] 21 | }, 22 | "nullable": [ 23 | false, 24 | false 25 | ] 26 | }, 27 | "hash": "0198e3e04ffa7668562e2e485ef4ccde2a267f0a14161b75bd5c4b6b252eb687" 28 | } 29 | -------------------------------------------------------------------------------- /packages/test-helpers/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "test-helpers" 3 | authors = { workspace = true } 4 | edition = { workspace = true } 5 | homepage = { workspace = true } 6 | license = { workspace = true } 7 | repository = { workspace = true } 8 | version = { workspace = true } 9 | publish = { workspace = true } 10 | rust-version = { workspace = true } 11 | 12 | [dependencies] 13 | clock = { workspace = true, features = ["test-helpers"] } 14 | delegate = { workspace = true } 15 | eth = { workspace = true, features = ["test-helpers"] } 16 | fuel-block-committer-encoding = { workspace = true } 17 | futures = { workspace = true } 18 | itertools = { workspace = true, features = ["use_alloc"] } 19 | metrics = { workspace = true } 20 | mockall = { workspace = true } 21 | rand = { workspace = true, features = ["small_rng"] } 22 | services = { workspace = true, features = ["test-helpers"] } 23 | storage = { workspace = true, features = ["test-helpers"] } 24 | -------------------------------------------------------------------------------- /packages/adapters/fuel/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "fuel" 3 | authors = { workspace = true } 4 | edition = { workspace = true } 5 | homepage = { workspace = true } 6 | license = { workspace = true } 7 | repository = { workspace = true } 8 | version = { workspace = true } 9 | publish = { workspace = true } 10 | rust-version = { workspace = true } 11 | 12 | [dependencies] 13 | delegate = { workspace = true } 14 | fuel-core-client = { workspace = true, features = ["subscriptions"] } 15 | fuel-core-types = { workspace = true } 16 | futures = { workspace = true } 17 | metrics = { workspace = true } 18 | services = { workspace = true } 19 | url = { workspace = true } 20 | cynic = { workspace = true, features = ["default"] } 21 | 22 | [dev-dependencies] 23 | tokio = { workspace = true, features = ["macros"] } 24 | 25 | [build-dependencies] 26 | cynic-codegen = { workspace = true } 27 | fuel-core-client = { workspace = true } 28 | 29 | [features] 30 | test-helpers = [] 31 | -------------------------------------------------------------------------------- /packages/services/src/types/non_empty.rs: -------------------------------------------------------------------------------- 1 | pub use nonempty::{NonEmpty, nonempty}; 2 | 3 | pub trait CollectNonEmpty: Iterator { 4 | fn collect_nonempty(self) -> Option> 5 | where 6 | Self: Sized, 7 | { 8 | NonEmpty::collect(self) 9 | } 10 | } 11 | impl CollectNonEmpty for I {} 12 | 13 | pub trait TryCollectNonEmpty: Iterator> { 14 | type Ok; 15 | type Err; 16 | 17 | fn try_collect_nonempty(self) -> Result>, Self::Err> 18 | where 19 | Self: Sized, 20 | Self::Err: std::error::Error, 21 | { 22 | let collected: Result, _> = self.collect(); 23 | collected.map(NonEmpty::collect) 24 | } 25 | } 26 | 27 | impl TryCollectNonEmpty for I 28 | where 29 | I: Iterator>, 30 | E: std::error::Error, 31 | { 32 | type Ok = T; 33 | type Err = E; 34 | } 35 | -------------------------------------------------------------------------------- /fee_algo_simulation/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "fee_algo_simulation" 3 | authors = { workspace = true } 4 | edition = { workspace = true } 5 | homepage = { workspace = true } 6 | license = { workspace = true } 7 | repository = { workspace = true } 8 | version = { workspace = true } 9 | publish = { workspace = true } 10 | rust-version = { workspace = true } 11 | 12 | [dependencies] 13 | futures = { workspace = true } 14 | thiserror = { workspace = true } 15 | actix-web = { workspace = true } 16 | anyhow = { workspace = true } 17 | eth = { workspace = true } 18 | itertools = { workspace = true } 19 | serde = { workspace = true, features = ["derive"] } 20 | serde_json = { workspace = true } 21 | services = { workspace = true } 22 | tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } 23 | tracing = { workspace = true } 24 | tracing-subscriber = { workspace = true, features = [ 25 | "fmt", 26 | "std", 27 | "env-filter", 28 | ] } 29 | xdg = { workspace = true } 30 | -------------------------------------------------------------------------------- /.sqlx/query-6f7e6ba876d49bef1bf870514ed38be642af65ed848f53a191ef58c2e02f227c.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "SELECT * FROM l1_fuel_block_submission ORDER BY fuel_block_height DESC LIMIT 1", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "id", 9 | "type_info": "Int4" 10 | }, 11 | { 12 | "ordinal": 1, 13 | "name": "fuel_block_hash", 14 | "type_info": "Bytea" 15 | }, 16 | { 17 | "ordinal": 2, 18 | "name": "fuel_block_height", 19 | "type_info": "Int8" 20 | }, 21 | { 22 | "ordinal": 3, 23 | "name": "completed", 24 | "type_info": "Bool" 25 | } 26 | ], 27 | "parameters": { 28 | "Left": [] 29 | }, 30 | "nullable": [ 31 | false, 32 | false, 33 | false, 34 | false 35 | ] 36 | }, 37 | "hash": "6f7e6ba876d49bef1bf870514ed38be642af65ed848f53a191ef58c2e02f227c" 38 | } 39 | -------------------------------------------------------------------------------- /.sqlx/query-2502a8cec8e562082f3e990e73934a2bf55e0ea2347c2c58323c4aa951f18d0e.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "SELECT * FROM eigen_submission WHERE status = $1 or status = $2", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "id", 9 | "type_info": "Int4" 10 | }, 11 | { 12 | "ordinal": 1, 13 | "name": "request_id", 14 | "type_info": "Bytea" 15 | }, 16 | { 17 | "ordinal": 2, 18 | "name": "created_at", 19 | "type_info": "Timestamptz" 20 | }, 21 | { 22 | "ordinal": 3, 23 | "name": "status", 24 | "type_info": "Int2" 25 | } 26 | ], 27 | "parameters": { 28 | "Left": [ 29 | "Int2", 30 | "Int2" 31 | ] 32 | }, 33 | "nullable": [ 34 | false, 35 | false, 36 | false, 37 | false 38 | ] 39 | }, 40 | "hash": "2502a8cec8e562082f3e990e73934a2bf55e0ea2347c2c58323c4aa951f18d0e" 41 | } 42 | -------------------------------------------------------------------------------- /.sqlx/query-18049cc4c58a991f166a1548c43fb4f0fe9ff4820be993afe9dba8e57f6e1df8.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "UPDATE l1_fuel_block_submission SET completed = true WHERE id = $1 RETURNING *", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "id", 9 | "type_info": "Int4" 10 | }, 11 | { 12 | "ordinal": 1, 13 | "name": "fuel_block_hash", 14 | "type_info": "Bytea" 15 | }, 16 | { 17 | "ordinal": 2, 18 | "name": "fuel_block_height", 19 | "type_info": "Int8" 20 | }, 21 | { 22 | "ordinal": 3, 23 | "name": "completed", 24 | "type_info": "Bool" 25 | } 26 | ], 27 | "parameters": { 28 | "Left": [ 29 | "Int4" 30 | ] 31 | }, 32 | "nullable": [ 33 | false, 34 | false, 35 | false, 36 | false 37 | ] 38 | }, 39 | "hash": "18049cc4c58a991f166a1548c43fb4f0fe9ff4820be993afe9dba8e57f6e1df8" 40 | } 41 | -------------------------------------------------------------------------------- /.sqlx/query-2207b448e46117ad64084feefc49e3f45511e91468b32f5ef0024f92730588a6.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "UPDATE l1_fuel_block_submission SET completed = true WHERE fuel_block_hash = $1 RETURNING *", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "id", 9 | "type_info": "Int4" 10 | }, 11 | { 12 | "ordinal": 1, 13 | "name": "fuel_block_hash", 14 | "type_info": "Bytea" 15 | }, 16 | { 17 | "ordinal": 2, 18 | "name": "fuel_block_height", 19 | "type_info": "Int8" 20 | }, 21 | { 22 | "ordinal": 3, 23 | "name": "completed", 24 | "type_info": "Bool" 25 | } 26 | ], 27 | "parameters": { 28 | "Left": [ 29 | "Bytea" 30 | ] 31 | }, 32 | "nullable": [ 33 | false, 34 | false, 35 | false, 36 | false 37 | ] 38 | }, 39 | "hash": "2207b448e46117ad64084feefc49e3f45511e91468b32f5ef0024f92730588a6" 40 | } 41 | -------------------------------------------------------------------------------- /e2e/helpers/tests/harness.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use anyhow::Result; 4 | use e2e_helpers::fuel_node_simulated::{FuelNode, SimulationConfig}; 5 | use fuel::HttpClient; 6 | use tokio::sync::Mutex; 7 | 8 | #[tokio::test] 9 | async fn simulated_fuel_node_can_reply_to_adapter() -> Result<()> { 10 | let mut simulated_node = FuelNode::new(4000, Arc::new(Mutex::new(SimulationConfig::default()))); 11 | simulated_node.run().await?; 12 | 13 | let client = HttpClient::new(&simulated_node.url(), 100, 1.try_into().unwrap()); 14 | 15 | let latest_block = client.latest_block().await?; 16 | 17 | let block_at_height = client 18 | .block_at_height(latest_block.height) 19 | .await? 20 | .expect("block to exist"); 21 | assert_eq!(latest_block, block_at_height); 22 | 23 | let da_compressed_block = client 24 | .compressed_block_at_height(block_at_height.height) 25 | .await? 26 | .expect("block to exist"); 27 | assert_eq!(da_compressed_block.height, block_at_height.height); 28 | 29 | Ok(()) 30 | } 31 | -------------------------------------------------------------------------------- /packages/benchmarks/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "benchmarks" 3 | authors = { workspace = true } 4 | edition = { workspace = true } 5 | homepage = { workspace = true } 6 | license = { workspace = true } 7 | repository = { workspace = true } 8 | version = { workspace = true } 9 | publish = false 10 | rust-version = { workspace = true } 11 | 12 | [dependencies] 13 | fuel-block-committer-encoding = { workspace = true } 14 | services = { workspace = true } 15 | test-helpers = { workspace = true } 16 | futures = { workspace = true } 17 | itertools = { workspace = true, features = ["use_alloc"] } 18 | rand = { workspace = true, features = ["small_rng"] } 19 | criterion = { version = "0.5", features = ["html_reports"] } 20 | num-bigint = { workspace = true } 21 | tokio = { workspace = true, features = ["full"] } 22 | tracing = { workspace = true } 23 | tracing-subscriber = { workspace = true, features = [ 24 | "env-filter", 25 | "std", 26 | "fmt", 27 | "json", 28 | ] } 29 | clock = { workspace = true } 30 | 31 | [[bin]] 32 | name = "service_bundler" 33 | path = "src/service_bundler.rs" 34 | -------------------------------------------------------------------------------- /.github/workflows/helm-publish.yaml: -------------------------------------------------------------------------------- 1 | name: Build and publish Helm Chart 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | paths: 8 | - 'helm/fuel-block-committer/Chart.yaml' 9 | 10 | permissions: 11 | contents: read 12 | 13 | jobs: 14 | helm-release: 15 | name: Build Helm Chart 16 | runs-on: buildjet-4vcpu-ubuntu-2204 17 | if: | 18 | (github.event_name == 'release' && github.event.action == 'published') || 19 | github.ref == 'refs/heads/master' || github.event_name == 'pull_request' 20 | permissions: 21 | contents: read 22 | packages: write 23 | 24 | steps: 25 | - name: Check out code 26 | uses: actions/checkout@v3 27 | 28 | - name: Package and Push Charts 29 | uses: bsord/helm-push@v4.1.0 30 | with: 31 | useOCIRegistry: true 32 | registry-url: oci://ghcr.io/fuellabs/helmcharts 33 | username: ${{ github.repository_owner }} 34 | access-token: ${{ secrets.GITHUB_TOKEN }} 35 | force: true 36 | chart-folder: ./helm/fuel-block-committer -------------------------------------------------------------------------------- /packages/adapters/signers/src/kms_utils.rs: -------------------------------------------------------------------------------- 1 | use aws_config::{SdkConfig, default_provider::credentials::DefaultCredentialsChain}; 2 | use aws_sdk_kms::config::BehaviorVersion; 3 | 4 | pub async fn load_config_from_env() -> SdkConfig { 5 | let loader = aws_config::defaults(BehaviorVersion::latest()) 6 | .credentials_provider(DefaultCredentialsChain::builder().build().await); 7 | 8 | let loader = match std::env::var("E2E_TEST_AWS_ENDPOINT") { 9 | Ok(url) => loader.endpoint_url(url), 10 | _ => loader, 11 | }; 12 | 13 | loader.load().await 14 | } 15 | 16 | #[cfg(feature = "test-helpers")] 17 | pub async fn config_for_testing(url: String) -> SdkConfig { 18 | aws_config::defaults(BehaviorVersion::latest()) 19 | .credentials_provider(aws_sdk_kms::config::Credentials::new( 20 | "test", 21 | "test", 22 | None, 23 | None, 24 | "Static Credentials", 25 | )) 26 | .endpoint_url(url) 27 | .region(aws_config::Region::new("us-east-1")) // placeholder region for test 28 | .load() 29 | .await 30 | } 31 | -------------------------------------------------------------------------------- /packages/metrics/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny(unused_crate_dependencies)] 2 | 3 | mod connection_health_tracker; 4 | pub use connection_health_tracker::*; 5 | 6 | pub type HealthChecker = Box; 7 | pub trait HealthCheck: Send + Sync { 8 | fn healthy(&self) -> bool; 9 | } 10 | 11 | pub use prometheus; 12 | 13 | pub trait RegistersMetrics { 14 | fn register_metrics(&self, registry: &crate::prometheus::Registry) { 15 | self.metrics().into_iter().for_each(|metric| { 16 | registry 17 | .register(metric) 18 | .expect("app to have correctly named metrics"); 19 | }); 20 | } 21 | 22 | fn metrics(&self) -> Vec>; 23 | } 24 | 25 | pub fn custom_exponential_buckets(start: f64, end: f64, steps: usize) -> Vec { 26 | let factor = (end / start).powf(1.0 / (steps - 1) as f64); 27 | let mut buckets = Vec::with_capacity(steps); 28 | 29 | let mut value = start; 30 | for _ in 0..(steps - 1) { 31 | buckets.push(value.ceil()); 32 | value *= factor; 33 | } 34 | 35 | buckets.push(end.ceil()); 36 | 37 | buckets 38 | } 39 | -------------------------------------------------------------------------------- /packages/services/src/health_reporter.rs: -------------------------------------------------------------------------------- 1 | pub mod service { 2 | use metrics::HealthChecker; 3 | use serde::Serialize; 4 | 5 | #[derive(Debug, Serialize)] 6 | pub struct HealthReport { 7 | fuel_connection_up: bool, 8 | eth_connection_healthy: bool, 9 | } 10 | 11 | impl HealthReport { 12 | pub fn healthy(&self) -> bool { 13 | self.fuel_connection_up && self.eth_connection_healthy 14 | } 15 | } 16 | 17 | pub struct HealthReporter { 18 | fuel_connection: HealthChecker, 19 | eth_connection: HealthChecker, 20 | } 21 | 22 | impl HealthReporter { 23 | pub fn new(fuel_health_check: HealthChecker, eth_health_check: HealthChecker) -> Self { 24 | Self { 25 | fuel_connection: fuel_health_check, 26 | eth_connection: eth_health_check, 27 | } 28 | } 29 | 30 | pub fn report(&self) -> HealthReport { 31 | HealthReport { 32 | fuel_connection_up: self.fuel_connection.healthy(), 33 | eth_connection_healthy: self.eth_connection.healthy(), 34 | } 35 | } 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Stage 1: Build 2 | FROM lukemathwalker/cargo-chef:latest-rust-1.85 AS chef 3 | WORKDIR /build/ 4 | # hadolint ignore=DL3008 5 | 6 | FROM chef AS planner 7 | ENV CARGO_NET_GIT_FETCH_WITH_CLI=true 8 | COPY . . 9 | RUN cargo chef prepare --recipe-path recipe.json --bin fuel-block-committer 10 | 11 | FROM chef AS builder 12 | COPY --from=planner /build/recipe.json recipe.json 13 | # Build our project dependencies, not our application! 14 | RUN cargo chef cook --release --recipe-path recipe.json --bin fuel-block-committer 15 | # Up to this point, if our dependency tree stays the same, 16 | # all layers should be cached. 17 | COPY . . 18 | RUN cargo build --release --bin fuel-block-committer 19 | 20 | # Stage 2: Run 21 | FROM ubuntu:22.04 AS run 22 | 23 | RUN apt-get update -y \ 24 | && apt-get install -y --no-install-recommends ca-certificates \ 25 | # Clean up 26 | && apt-get autoremove -y \ 27 | && apt-get clean -y \ 28 | && rm -rf /var/lib/apt/lists/* 29 | 30 | WORKDIR /root/ 31 | 32 | COPY --from=builder /build/target/release/fuel-block-committer . 33 | COPY --from=builder /build/target/release/fuel-block-committer.d . 34 | 35 | ENTRYPOINT ["./fuel-block-committer"] 36 | -------------------------------------------------------------------------------- /e2e/benches/src/template.rs: -------------------------------------------------------------------------------- 1 | static CONTROL_PANEL_TEMPLATE: &str = include_str!("../templates/control_panel.html"); 2 | 3 | pub fn render_control_panel(current_block_size: usize, current_compress: &str) -> String { 4 | let sel_random = if current_compress == "random" { 5 | "selected" 6 | } else { 7 | "" 8 | }; 9 | let sel_low = if current_compress == "low" { 10 | "selected" 11 | } else { 12 | "" 13 | }; 14 | let sel_medium = if current_compress == "medium" { 15 | "selected" 16 | } else { 17 | "" 18 | }; 19 | let sel_high = if current_compress == "high" { 20 | "selected" 21 | } else { 22 | "" 23 | }; 24 | let sel_full = if current_compress == "full" { 25 | "selected" 26 | } else { 27 | "" 28 | }; 29 | 30 | CONTROL_PANEL_TEMPLATE 31 | .replace("{{current_block_size}}", ¤t_block_size.to_string()) 32 | .replace("{{sel_random}}", sel_random) 33 | .replace("{{sel_low}}", sel_low) 34 | .replace("{{sel_medium}}", sel_medium) 35 | .replace("{{sel_high}}", sel_high) 36 | .replace("{{sel_full}}", sel_full) 37 | } 38 | -------------------------------------------------------------------------------- /packages/adapters/storage/migrations/0007_cost_tracking.sql: -------------------------------------------------------------------------------- 1 | BEGIN; 2 | 3 | CREATE TABLE IF NOT EXISTS bundle_cost ( 4 | bundle_id INTEGER PRIMARY KEY REFERENCES bundles(id), 5 | da_block_height BIGINT NOT NULL, -- DA block height of the last transaction in the bundle 6 | cost NUMERIC(39, 0) NOT NULL, 7 | size BIGINT NOT NULL, 8 | is_finalized BOOLEAN NOT NULL 9 | ); 10 | 11 | ALTER TABLE bundle_cost 12 | ADD CONSTRAINT bundle_cost_da_block_height_check 13 | CHECK ( 14 | da_block_height >= 0 15 | ); 16 | 17 | ALTER TABLE bundle_cost 18 | ADD CONSTRAINT bundle_cost_cost_check 19 | CHECK ( 20 | cost >= 0 21 | ); 22 | 23 | ALTER TABLE bundle_cost 24 | ADD CONSTRAINT bundle_cost_size_check 25 | CHECK ( 26 | size >= 0 27 | ); 28 | 29 | ALTER TABLE l1_blob_transaction 30 | DROP CONSTRAINT l1_blob_transaction_state_check; 31 | 32 | ALTER TABLE l1_blob_transaction 33 | ADD CONSTRAINT l1_blob_transaction_state_check 34 | CHECK ( 35 | state IN (0, 1, 2, 3, 4) 36 | AND (state != 1 OR finalized_at IS NOT NULL) 37 | ); 38 | 39 | CREATE INDEX idx_bundles_start_height ON bundles(start_height); 40 | 41 | COMMIT; -------------------------------------------------------------------------------- /helm/fuel-block-committer/templates/hpa.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.autoscaling.enabled }} 2 | apiVersion: autoscaling/v2 3 | kind: HorizontalPodAutoscaler 4 | metadata: 5 | name: {{ include "fuel-block-committer.fullname" . }} 6 | labels: 7 | {{- include "fuel-block-committer.labels" . | nindent 4 }} 8 | spec: 9 | scaleTargetRef: 10 | apiVersion: apps/v1 11 | kind: Deployment 12 | name: {{ include "fuel-block-committer.fullname" . }} 13 | minReplicas: {{ .Values.autoscaling.minReplicas }} 14 | maxReplicas: {{ .Values.autoscaling.maxReplicas }} 15 | metrics: 16 | {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} 17 | - type: Resource 18 | resource: 19 | name: cpu 20 | target: 21 | type: Utilization 22 | averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} 23 | {{- end }} 24 | {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} 25 | - type: Resource 26 | resource: 27 | name: memory 28 | target: 29 | type: Utilization 30 | averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} 31 | {{- end }} 32 | {{- end }} 33 | -------------------------------------------------------------------------------- /packages/adapters/fuel/src/metrics.rs: -------------------------------------------------------------------------------- 1 | use metrics::{ 2 | RegistersMetrics, 3 | prometheus::{IntCounter, IntGauge, Opts, core::Collector}, 4 | }; 5 | 6 | #[derive(Clone)] 7 | pub struct Metrics { 8 | pub fuel_network_errors: IntCounter, 9 | pub fuel_height: IntGauge, 10 | } 11 | 12 | impl RegistersMetrics for Metrics { 13 | fn metrics(&self) -> Vec> { 14 | vec![ 15 | Box::new(self.fuel_network_errors.clone()), 16 | Box::new(self.fuel_height.clone()), 17 | ] 18 | } 19 | } 20 | 21 | impl Default for Metrics { 22 | fn default() -> Self { 23 | let fuel_network_errors = IntCounter::with_opts(Opts::new( 24 | "fuel_network_errors", 25 | "Number of network errors encountered while polling for a new Fuel block.", 26 | )) 27 | .expect("fuel_network_errors metric to be correctly configured"); 28 | let fuel_height = 29 | IntGauge::with_opts(Opts::new("fuel_height", "Latest block height in Fuel")) 30 | .expect("fuel_height metric to be correctly configured"); 31 | 32 | Self { 33 | fuel_network_errors, 34 | fuel_height, 35 | } 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /packages/adapters/storage/migrations/0003_block_submission_tx_id.up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE l1_transactions RENAME TO l1_blob_transaction; 2 | 3 | DROP TABLE IF EXISTS l1_fuel_block_submission; 4 | 5 | CREATE TABLE IF NOT EXISTS l1_fuel_block_submission ( 6 | id SERIAL PRIMARY KEY NOT NULL, 7 | fuel_block_hash BYTEA NOT NULL, 8 | fuel_block_height BIGINT NOT NULL UNIQUE CHECK (fuel_block_height >= 0), 9 | completed BOOLEAN NOT NULL, 10 | CHECK (octet_length(fuel_block_hash) = 32), 11 | UNIQUE (fuel_block_hash) 12 | ); 13 | 14 | CREATE TABLE IF NOT EXISTS l1_transaction ( 15 | id SERIAL PRIMARY KEY NOT NULL, 16 | submission_id INTEGER NOT NULL REFERENCES l1_fuel_block_submission(id), 17 | hash BYTEA NOT NULL UNIQUE, 18 | nonce BIGINT NOT NULL, 19 | max_fee NUMERIC(39, 0) NOT NULL, -- u128 20 | priority_fee NUMERIC(39, 0) NOT NULL, -- u128 21 | created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP, 22 | finalized_at TIMESTAMPTZ, 23 | state SMALLINT NOT NULL, 24 | CHECK (octet_length(hash) = 32), 25 | CHECK (state IN (0, 1, 2) AND (state != 1 OR finalized_at IS NOT NULL)) 26 | ); 27 | -------------------------------------------------------------------------------- /packages/benchmarks/src/lib.rs: -------------------------------------------------------------------------------- 1 | // This package contains benchmarks for the Eigenbundler 2 | // See benches/eigenbundler.rs for the benchmark implementation 3 | 4 | // Re-export types that might be useful for other benchmarks 5 | pub mod utils { 6 | use rand::{Rng, SeedableRng, rngs::SmallRng}; 7 | 8 | /// Generate random data of specified size 9 | pub fn generate_random_data(size_bytes: usize, seed: u64) -> Vec { 10 | let mut rng = SmallRng::seed_from_u64(seed); 11 | let mut data = vec![0u8; size_bytes]; 12 | rng.fill(&mut data[..]); 13 | data 14 | } 15 | 16 | /// Generate highly compressible data (repeated patterns) 17 | pub fn generate_compressible_data(size_bytes: usize, seed: u64) -> Vec { 18 | let mut rng = SmallRng::seed_from_u64(seed); 19 | 20 | // Create a pattern to repeat 21 | let pattern_size = 64; 22 | let mut pattern = vec![0u8; pattern_size]; 23 | rng.fill(&mut pattern[..]); 24 | 25 | let mut data = Vec::with_capacity(size_bytes); 26 | while data.len() < size_bytes { 27 | data.extend_from_slice(&pattern); 28 | } 29 | data.truncate(size_bytes); 30 | 31 | data 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /packages/services/src/types/transactions.rs: -------------------------------------------------------------------------------- 1 | use sqlx::types::chrono::{DateTime, Utc}; 2 | 3 | #[derive(Debug, Clone, PartialEq, Eq)] 4 | pub enum TransactionState { 5 | Pending, 6 | IncludedInBlock, 7 | Finalized(DateTime), 8 | Failed, 9 | } 10 | 11 | #[derive(Debug, Clone, PartialEq, Eq)] 12 | pub struct TransactionResponse { 13 | block_number: u64, 14 | succeeded: bool, 15 | fee: u128, 16 | blob_fee: u128, 17 | } 18 | 19 | impl TransactionResponse { 20 | pub fn new(block_number: u64, succeeded: bool, fee: u128, blob_fee: u128) -> Self { 21 | Self { 22 | block_number, 23 | succeeded, 24 | fee, 25 | blob_fee, 26 | } 27 | } 28 | 29 | pub fn block_number(&self) -> u64 { 30 | self.block_number 31 | } 32 | 33 | pub fn succeeded(&self) -> bool { 34 | self.succeeded 35 | } 36 | 37 | pub fn total_fee(&self) -> u128 { 38 | self.fee.saturating_add(self.blob_fee) 39 | } 40 | 41 | pub fn confirmations(&self, current_block_number: u64) -> u64 { 42 | if !self.succeeded() { 43 | return 0; 44 | } 45 | 46 | current_block_number.saturating_sub(self.block_number) 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /helm/fuel-block-committer/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: fuel-block-committer 3 | description: A Helm chart for Kubernetes 4 | 5 | # A chart can be either an 'application' or a 'library' chart. 6 | # 7 | # Application charts are a collection of templates that can be packaged into versioned archives 8 | # to be deployed. 9 | # 10 | # Library charts provide useful utilities or functions for the chart developer. They're included as 11 | # a dependency of application charts to inject those utilities and functions into the rendering 12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 13 | type: application 14 | 15 | # This is the chart version. This version number should be incremented each time you make changes 16 | # to the chart and its templates, including the app version. 17 | # Versions are expected to follow Semantic Versioning (https://semver.org/) 18 | version: 0.10.10 19 | 20 | # This is the version number of the application being deployed. This version number should be 21 | # incremented each time you make changes to the application. Versions are not expected to 22 | # follow Semantic Versioning. They should reflect the version the application is using. 23 | # It is recommended to use it with quotes. 24 | appVersion: "0.10.10" 25 | -------------------------------------------------------------------------------- /packages/adapters/signers/src/eigen.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use rust_eigenda_signers::{Message, PublicKey, RecoverableSignature}; 3 | 4 | pub mod private_key { 5 | pub use rust_eigenda_signers::signers::private_key::Signer; 6 | } 7 | pub mod kms; 8 | 9 | #[derive(Debug, Clone)] 10 | pub enum Signer { 11 | Private(private_key::Signer), 12 | Kms(kms::Signer), 13 | } 14 | 15 | #[async_trait] 16 | impl eigenda::Sign for Signer { 17 | type Error = kms::Error; 18 | 19 | /// Signs a digest using the signer's key. 20 | async fn sign_digest(&self, message: &Message) -> Result { 21 | match self { 22 | Signer::Private(signer) => { 23 | // private_key.sign_digest cannot fail 24 | let sig = signer 25 | .sign_digest(message) 26 | .await 27 | .expect("Private key signing should never fail"); 28 | Ok(sig) 29 | } 30 | Signer::Kms(signer) => signer.sign_digest(message).await, 31 | } 32 | } 33 | 34 | /// Returns the public key associated with this signer. 35 | fn public_key(&self) -> PublicKey { 36 | match self { 37 | Signer::Private(signer) => signer.public_key(), 38 | Signer::Kms(signer) => signer.public_key(), 39 | } 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /packages/adapters/eigenda/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "eigenda" 3 | authors = { workspace = true } 4 | edition = { workspace = true } 5 | homepage = { workspace = true } 6 | license = { workspace = true } 7 | repository = { workspace = true } 8 | version = { workspace = true } 9 | publish = { workspace = true } 10 | rust-version = { workspace = true } 11 | 12 | [dependencies] 13 | services = { workspace = true } 14 | sha3 = { workspace = true } 15 | hex = { workspace = true } 16 | k256 = { workspace = true, features = ["ecdsa"] } 17 | futures = { workspace = true } 18 | metrics = { workspace = true } 19 | prost = { workspace = true } 20 | thiserror = { workspace = true } 21 | tokio = { workspace = true } 22 | tonic = { workspace = true, features = [ 23 | "tls", 24 | "transport", 25 | "tls-roots", 26 | "codegen", 27 | "prost", 28 | ] } 29 | tonic-build = { workspace = true } 30 | tracing = { workspace = true } 31 | url = { workspace = true } 32 | governor = { workspace = true, features = ["default"] } 33 | humantime = { workspace = true } 34 | byte-unit = { workspace = true, features = ["byte", "u128"] } 35 | rust-eigenda-v2-client = { workspace = true } 36 | rust-eigenda-v2-common = { workspace = true } 37 | ethereum-types = { workspace = true } 38 | async-trait = { workspace = true } 39 | secp256k1 = { workspace = true } 40 | anyhow = { workspace = true } 41 | 42 | [dev-dependencies] 43 | 44 | 45 | [features] 46 | test-helpers = [] 47 | -------------------------------------------------------------------------------- /.sqlx/query-14a630268f676e961c35d4804724aa35ac1d5ae9763f9383506cd05b42bb0213.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "\n SELECT\n f.bundle_id,\n SUM(f.total_bytes)::BIGINT AS total_bytes,\n SUM(f.unused_bytes)::BIGINT AS unused_bytes,\n COUNT(*)::BIGINT AS fragment_count\n FROM l1_blob_transaction t\n JOIN l1_transaction_fragments tf ON t.id = tf.transaction_id\n JOIN l1_fragments f ON tf.fragment_id = f.id\n WHERE t.hash = $1\n GROUP BY f.bundle_id\n ", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "bundle_id", 9 | "type_info": "Int4" 10 | }, 11 | { 12 | "ordinal": 1, 13 | "name": "total_bytes", 14 | "type_info": "Int8" 15 | }, 16 | { 17 | "ordinal": 2, 18 | "name": "unused_bytes", 19 | "type_info": "Int8" 20 | }, 21 | { 22 | "ordinal": 3, 23 | "name": "fragment_count", 24 | "type_info": "Int8" 25 | } 26 | ], 27 | "parameters": { 28 | "Left": [ 29 | "Bytea" 30 | ] 31 | }, 32 | "nullable": [ 33 | false, 34 | null, 35 | null, 36 | null 37 | ] 38 | }, 39 | "hash": "14a630268f676e961c35d4804724aa35ac1d5ae9763f9383506cd05b42bb0213" 40 | } 41 | -------------------------------------------------------------------------------- /e2e/tests/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "e2e-tests" 3 | authors = { workspace = true } 4 | edition = { workspace = true } 5 | homepage = { workspace = true } 6 | license = { workspace = true } 7 | repository = { workspace = true } 8 | version = { workspace = true } 9 | publish = { workspace = true } 10 | rust-version = { workspace = true } 11 | 12 | [dependencies] 13 | e2e-helpers = { workspace = true } 14 | 15 | [dev-dependencies] 16 | anyhow = { workspace = true } 17 | tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } 18 | storage = { workspace = true } 19 | services = { workspace = true } 20 | rust-eigenda-signers = { workspace = true } 21 | rust-eigenda-v2-client = { workspace = true } 22 | eigenda = { workspace = true } 23 | async-trait = { workspace = true } 24 | aws-config = { workspace = true } 25 | tracing = { workspace = true } 26 | tracing-subscriber = { workspace = true, features = [ 27 | "env-filter", 28 | "fmt", 29 | "json", 30 | ] } 31 | url = { workspace = true } 32 | hex = { workspace = true } 33 | k256 = { workspace = true, features = ["ecdsa"] } 34 | rand = { workspace = true } 35 | secp256k1 = { workspace = true, features = ["default", "recovery", "rand-std"] } 36 | sha2 = { workspace = true } 37 | ethereum-types = { workspace = true } 38 | reqwest = { version = "0.11.24", features = ["json"] } 39 | signers = { workspace = true, features = ["test-helpers"] } 40 | 41 | [features] 42 | default = ["fuel-core-simulated"] 43 | fuel-core-simulated = ["e2e-helpers/fuel-core-simulated"] 44 | -------------------------------------------------------------------------------- /packages/adapters/eigenda/src/error.rs: -------------------------------------------------------------------------------- 1 | use rust_eigenda_v2_client::errors::{ConversionError, PayloadDisperserError}; 2 | 3 | #[derive(Debug, thiserror::Error)] 4 | pub enum Error { 5 | #[error("transport error: {0}")] 6 | Transport(#[from] tonic::transport::Error), 7 | #[error("RPC error: {0}")] 8 | Rpc(#[from] tonic::Status), 9 | #[error("authentication failed")] 10 | AuthenticationFailed, 11 | #[error("Invalid Ethereum RPC URL: {0}")] 12 | InvalidRPCUrl(#[from] url::ParseError), 13 | #[error("EigenDA client initialization failed: {0}")] 14 | EigenDAClientInit(anyhow::Error), 15 | #[error("EigenDA client error: {0}")] 16 | EigenDAClient(#[from] rust_eigenda_v2_client::errors::EigenClientError), 17 | #[error("Failed to dispatch blob: {0}")] 18 | BlockDispatchFailed(#[from] PayloadDisperserError), 19 | #[error("Invalid hex representation of blob key: {0}")] 20 | InvalidBlobKey(#[from] ConversionError), 21 | #[error("Failed throughput query: {0}")] 22 | ThroughputQueryFailed(#[from] governor::InsufficientCapacity), 23 | } 24 | 25 | pub type Result = std::result::Result; 26 | 27 | impl From for services::Error { 28 | fn from(err: Error) -> Self { 29 | match err { 30 | Error::Transport(_) | Error::Rpc(_) => services::Error::Network(err.to_string()), 31 | Error::EigenDAClient(_) => services::Error::Network(err.to_string()), 32 | _ => services::Error::Other(err.to_string()), 33 | } 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /packages/adapters/storage/migrations/0001_initial.up.sql: -------------------------------------------------------------------------------- 1 | BEGIN; 2 | 3 | CREATE TABLE IF NOT EXISTS l1_fuel_block_submission ( 4 | fuel_block_hash BYTEA PRIMARY KEY NOT NULL, 5 | fuel_block_height BIGINT NOT NULL UNIQUE CHECK (fuel_block_height >= 0), 6 | completed BOOLEAN NOT NULL, 7 | submittal_height BIGINT NOT NULL CHECK (submittal_height >= 0), 8 | CHECK (octet_length(fuel_block_hash) = 32) 9 | ); 10 | 11 | CREATE TABLE IF NOT EXISTS l1_submissions ( 12 | id SERIAL PRIMARY KEY, 13 | fuel_block_hash BYTEA NOT NULL, 14 | fuel_block_height BIGINT NOT NULL UNIQUE CHECK (fuel_block_height >= 0), 15 | CHECK (octet_length(fuel_block_hash) = 32) 16 | ); 17 | 18 | CREATE TABLE IF NOT EXISTS l1_fragments ( 19 | id SERIAL PRIMARY KEY, 20 | fragment_idx BIGINT NOT NULL CHECK (fragment_idx >= 0), 21 | submission_id INTEGER NOT NULL REFERENCES l1_submissions(id), 22 | data BYTEA NOT NULL, 23 | created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP 24 | ); 25 | 26 | CREATE TABLE IF NOT EXISTS l1_transactions ( 27 | id SERIAL PRIMARY KEY, 28 | hash BYTEA NOT NULL UNIQUE, 29 | state SMALLINT NOT NULL, 30 | CHECK (octet_length(hash) = 32), 31 | CHECK (state IN (0, 1, 2)) 32 | ); 33 | 34 | CREATE TABLE IF NOT EXISTS l1_transaction_fragments ( 35 | transaction_id INTEGER NOT NULL REFERENCES l1_transactions(id), 36 | fragment_id INTEGER NOT NULL REFERENCES l1_fragments(id), 37 | PRIMARY KEY (transaction_id, fragment_id) 38 | ); 39 | 40 | COMMIT; 41 | -------------------------------------------------------------------------------- /committer/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "fuel-block-committer" 3 | authors = { workspace = true } 4 | edition = { workspace = true } 5 | homepage = { workspace = true } 6 | license = { workspace = true } 7 | repository = { workspace = true } 8 | version = { workspace = true } 9 | publish = { workspace = true } 10 | rust-version = { workspace = true } 11 | 12 | [dependencies] 13 | actix-web = { workspace = true, features = ["macros"] } 14 | byte-unit = { workspace = true, features = ["byte", "u128"] } 15 | clap = { workspace = true, features = ["default", "derive"] } 16 | clock = { workspace = true } 17 | config = { workspace = true, features = ["async"] } 18 | eigenda = { workspace = true } 19 | eth = { workspace = true } 20 | fuel = { workspace = true } 21 | fuel-block-committer-encoding = { workspace = true } 22 | humantime = { workspace = true } 23 | metrics = { workspace = true } 24 | num_cpus = { workspace = true } 25 | serde = { workspace = true } 26 | signers = { workspace = true } 27 | hex = { workspace = true } 28 | serde_json = { workspace = true } 29 | services = { workspace = true } 30 | storage = { workspace = true } 31 | thiserror = { workspace = true } 32 | tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } 33 | tokio-util = { workspace = true } 34 | tracing = { workspace = true } 35 | tracing-subscriber = { workspace = true, features = ["fmt", "json"] } 36 | url = { workspace = true } 37 | 38 | [dev-dependencies] 39 | anyhow = { workspace = true } 40 | services = { workspace = true, features = ["test-helpers"] } 41 | storage = { workspace = true, features = ["test-helpers"] } 42 | -------------------------------------------------------------------------------- /packages/adapters/storage/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "storage" 3 | authors = { workspace = true } 4 | edition = { workspace = true } 5 | homepage = { workspace = true } 6 | license = { workspace = true } 7 | repository = { workspace = true } 8 | version = { workspace = true } 9 | publish = { workspace = true } 10 | rust-version = { workspace = true } 11 | 12 | [dependencies] 13 | tracing = { workspace = true } 14 | futures = { workspace = true } 15 | delegate = { workspace = true, optional = true } 16 | hex = { workspace = true } 17 | itertools = { workspace = true, features = ["use_alloc"] } 18 | metrics = { workspace = true } 19 | num-bigint = { workspace = true } 20 | services = { workspace = true } 21 | rand = { workspace = true, optional = true } 22 | serde = { workspace = true } 23 | serde_json = { workspace = true } 24 | sqlx = { workspace = true, features = [ 25 | "bigdecimal", 26 | "postgres", 27 | "runtime-tokio", 28 | "migrate", 29 | "macros", 30 | "tls-rustls", 31 | "time", 32 | "chrono", 33 | "json", 34 | ] } 35 | testcontainers = { workspace = true, optional = true, features = [ 36 | "signal-hook", 37 | "watchdog", 38 | ] } 39 | thiserror = { workspace = true } 40 | tokio = { workspace = true, optional = true } 41 | 42 | [dev-dependencies] 43 | hex = { workspace = true } 44 | clock = { workspace = true, features = ["test-helpers"] } 45 | rand = { workspace = true, features = ["small_rng"] } 46 | storage = { workspace = true, features = ["test-helpers"] } 47 | tokio = { workspace = true } 48 | 49 | [features] 50 | test-helpers = ["dep:testcontainers", "tokio/sync", "dep:rand", "dep:delegate"] 51 | -------------------------------------------------------------------------------- /packages/services/src/status_reporter.rs: -------------------------------------------------------------------------------- 1 | pub mod service { 2 | use serde::Serialize; 3 | 4 | use crate::Result; 5 | 6 | #[derive(Debug, Serialize, Default, PartialEq, Eq)] 7 | pub struct StatusReport { 8 | pub status: Status, 9 | } 10 | 11 | #[derive(Serialize, Debug, Default, PartialEq, Eq)] 12 | pub enum Status { 13 | #[default] 14 | Idle, 15 | Committing, 16 | } 17 | 18 | pub struct StatusReporter { 19 | storage: Db, 20 | } 21 | 22 | impl StatusReporter { 23 | pub fn new(storage: Db) -> Self { 24 | Self { storage } 25 | } 26 | } 27 | impl StatusReporter 28 | where 29 | Db: crate::status_reporter::port::Storage, 30 | { 31 | pub async fn current_status(&self) -> Result { 32 | let last_submission_completed = self 33 | .storage 34 | .submission_w_latest_block() 35 | .await? 36 | .map(|submission| submission.completed); 37 | 38 | let status = if last_submission_completed == Some(false) { 39 | Status::Committing 40 | } else { 41 | Status::Idle 42 | }; 43 | 44 | Ok(StatusReport { status }) 45 | } 46 | } 47 | } 48 | 49 | pub mod port { 50 | use crate::{Result, types::BlockSubmission}; 51 | 52 | #[allow(async_fn_in_trait)] 53 | #[trait_variant::make(Send)] 54 | pub trait Storage: Send + Sync { 55 | async fn submission_w_latest_block(&self) -> Result>; 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /packages/encoding/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "fuel-block-committer-encoding" 3 | version = { workspace = true } 4 | authors = { workspace = true } 5 | homepage = { workspace = true } 6 | license = { workspace = true } 7 | repository = { workspace = true } 8 | edition = "2021" 9 | rust-version = "1.81" 10 | description = "Used to encode/decode bundles created by the fuel block committer." 11 | publish = true 12 | 13 | [dependencies] 14 | # alloy only really needed when generating proofs, and since the core doesn't 15 | # need to do that we've gated it behind the `kzg` feature flag. 16 | alloy = { workspace = true, features = ["consensus", "eips"], optional = true } 17 | anyhow = { workspace = true, features = ["default"] } 18 | bitvec = { workspace = true, features = ["default"] } 19 | c-kzg = { workspace = true, optional = true } 20 | flate2 = { workspace = true, features = ["default"] } 21 | hex = { workspace = true, features = ["alloc"] } 22 | itertools = { workspace = true, features = ["use_std"] } 23 | postcard = { workspace = true, features = ["use-std"] } 24 | serde = { workspace = true } 25 | static_assertions = { workspace = true } 26 | 27 | [dev-dependencies] 28 | alloy = { workspace = true, features = ["consensus", "eips", "kzg"] } 29 | fuel-block-committer-encoding = { workspace = true, features = [ 30 | "default", 31 | "kzg", 32 | ] } 33 | itertools = { workspace = true, features = ["use_alloc"] } 34 | proptest = { workspace = true, features = ["default"] } 35 | rand = { workspace = true, features = ["std", "std_rng", "small_rng"] } 36 | test-case = { workspace = true } 37 | 38 | [features] 39 | default = [] 40 | kzg = ["alloy/kzg", "dep:c-kzg"] 41 | -------------------------------------------------------------------------------- /fee_algo_simulation/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddr; 2 | 3 | use actix_web::web::{self, Data}; 4 | use anyhow::Result; 5 | use services::fees::cache::CachingApi; 6 | use tracing::level_filters::LevelFilter; 7 | use tracing_subscriber::EnvFilter; 8 | 9 | mod handlers; 10 | mod models; 11 | mod state; 12 | mod utils; 13 | 14 | #[tokio::main] 15 | async fn main() -> Result<()> { 16 | let filter = EnvFilter::builder() 17 | .with_default_directive(LevelFilter::INFO.into()) 18 | .from_env()? 19 | .add_directive("services::state_committer::fee_algo=off".parse()?); 20 | 21 | tracing_subscriber::fmt() 22 | .with_env_filter(filter) 23 | .compact() 24 | .init(); 25 | 26 | let client = eth::HttpClient::new(models::URL).unwrap(); 27 | 28 | let num_blocks_per_month = 30 * 24 * 3600 / 12; 29 | 30 | let caching_api = CachingApi::new(client, num_blocks_per_month * 2); 31 | caching_api.import(utils::load_cache()).await; 32 | 33 | let state = state::AppState { 34 | fee_api: caching_api.clone(), 35 | }; 36 | 37 | let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); 38 | 39 | let server = actix_web::HttpServer::new(move || { 40 | actix_web::App::new() 41 | .app_data(Data::new(state.clone())) 42 | .service(web::resource("/").route(web::get().to(handlers::index_html))) 43 | .service(web::resource("/fees").route(web::get().to(handlers::get_fees))) 44 | }) 45 | .bind(addr)?; 46 | 47 | eprintln!("Server listening on http://{}", addr); 48 | 49 | server.run().await?; 50 | 51 | utils::save_cache(caching_api.export().await)?; 52 | 53 | Ok(()) 54 | } 55 | -------------------------------------------------------------------------------- /.sqlx/query-ddc1a18d0d257b9065830b46a10ce42fee96b0925eb2c30a0b98cf9f79c6ed76.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "\n SELECT\n f.*,\n b.start_height\n FROM l1_fragments f\n JOIN l1_transaction_fragments tf ON tf.fragment_id = f.id\n JOIN l1_blob_transaction t ON t.id = tf.transaction_id\n JOIN bundles b ON b.id = f.bundle_id\n WHERE t.hash = $1\n ", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "id", 9 | "type_info": "Int4" 10 | }, 11 | { 12 | "ordinal": 1, 13 | "name": "idx", 14 | "type_info": "Int4" 15 | }, 16 | { 17 | "ordinal": 2, 18 | "name": "data", 19 | "type_info": "Bytea" 20 | }, 21 | { 22 | "ordinal": 3, 23 | "name": "total_bytes", 24 | "type_info": "Int8" 25 | }, 26 | { 27 | "ordinal": 4, 28 | "name": "unused_bytes", 29 | "type_info": "Int8" 30 | }, 31 | { 32 | "ordinal": 5, 33 | "name": "bundle_id", 34 | "type_info": "Int4" 35 | }, 36 | { 37 | "ordinal": 6, 38 | "name": "start_height", 39 | "type_info": "Int8" 40 | } 41 | ], 42 | "parameters": { 43 | "Left": [ 44 | "Bytea" 45 | ] 46 | }, 47 | "nullable": [ 48 | false, 49 | false, 50 | false, 51 | false, 52 | false, 53 | false, 54 | false 55 | ] 56 | }, 57 | "hash": "ddc1a18d0d257b9065830b46a10ce42fee96b0925eb2c30a0b98cf9f79c6ed76" 58 | } 59 | -------------------------------------------------------------------------------- /.sqlx/query-7e7591cc5c22205ede24f0f41bf43c0c8ede9352b6622b3095cb2bcd8a41ee41.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "SELECT * FROM l1_blob_transaction WHERE state = $1 or state = $2", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "id", 9 | "type_info": "Int4" 10 | }, 11 | { 12 | "ordinal": 1, 13 | "name": "hash", 14 | "type_info": "Bytea" 15 | }, 16 | { 17 | "ordinal": 2, 18 | "name": "state", 19 | "type_info": "Int2" 20 | }, 21 | { 22 | "ordinal": 3, 23 | "name": "finalized_at", 24 | "type_info": "Timestamptz" 25 | }, 26 | { 27 | "ordinal": 4, 28 | "name": "nonce", 29 | "type_info": "Int8" 30 | }, 31 | { 32 | "ordinal": 5, 33 | "name": "max_fee", 34 | "type_info": "Numeric" 35 | }, 36 | { 37 | "ordinal": 6, 38 | "name": "priority_fee", 39 | "type_info": "Numeric" 40 | }, 41 | { 42 | "ordinal": 7, 43 | "name": "blob_fee", 44 | "type_info": "Numeric" 45 | }, 46 | { 47 | "ordinal": 8, 48 | "name": "created_at", 49 | "type_info": "Timestamptz" 50 | } 51 | ], 52 | "parameters": { 53 | "Left": [ 54 | "Int2", 55 | "Int2" 56 | ] 57 | }, 58 | "nullable": [ 59 | false, 60 | false, 61 | false, 62 | true, 63 | false, 64 | false, 65 | false, 66 | false, 67 | false 68 | ] 69 | }, 70 | "hash": "7e7591cc5c22205ede24f0f41bf43c0c8ede9352b6622b3095cb2bcd8a41ee41" 71 | } 72 | -------------------------------------------------------------------------------- /.sqlx/query-eb2316bcf588bee755224ad3503e5e9ff10ba310c44ecf67e55d27b06e8449e6.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "SELECT * FROM l1_blob_transaction WHERE state = $1 ORDER BY created_at DESC LIMIT 1", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "id", 9 | "type_info": "Int4" 10 | }, 11 | { 12 | "ordinal": 1, 13 | "name": "hash", 14 | "type_info": "Bytea" 15 | }, 16 | { 17 | "ordinal": 2, 18 | "name": "state", 19 | "type_info": "Int2" 20 | }, 21 | { 22 | "ordinal": 3, 23 | "name": "finalized_at", 24 | "type_info": "Timestamptz" 25 | }, 26 | { 27 | "ordinal": 4, 28 | "name": "nonce", 29 | "type_info": "Int8" 30 | }, 31 | { 32 | "ordinal": 5, 33 | "name": "max_fee", 34 | "type_info": "Numeric" 35 | }, 36 | { 37 | "ordinal": 6, 38 | "name": "priority_fee", 39 | "type_info": "Numeric" 40 | }, 41 | { 42 | "ordinal": 7, 43 | "name": "blob_fee", 44 | "type_info": "Numeric" 45 | }, 46 | { 47 | "ordinal": 8, 48 | "name": "created_at", 49 | "type_info": "Timestamptz" 50 | } 51 | ], 52 | "parameters": { 53 | "Left": [ 54 | "Int2" 55 | ] 56 | }, 57 | "nullable": [ 58 | false, 59 | false, 60 | false, 61 | true, 62 | false, 63 | false, 64 | false, 65 | false, 66 | false 67 | ] 68 | }, 69 | "hash": "eb2316bcf588bee755224ad3503e5e9ff10ba310c44ecf67e55d27b06e8449e6" 70 | } 71 | -------------------------------------------------------------------------------- /.sqlx/query-3f1f4aaee324cb731aacf0b34ea6a1c76dade6d2234e2fe238a6aab5a15b17fa.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "SELECT * FROM l1_transaction WHERE state = $1 AND submission_id = $2", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "id", 9 | "type_info": "Int4" 10 | }, 11 | { 12 | "ordinal": 1, 13 | "name": "submission_id", 14 | "type_info": "Int4" 15 | }, 16 | { 17 | "ordinal": 2, 18 | "name": "hash", 19 | "type_info": "Bytea" 20 | }, 21 | { 22 | "ordinal": 3, 23 | "name": "nonce", 24 | "type_info": "Int8" 25 | }, 26 | { 27 | "ordinal": 4, 28 | "name": "max_fee", 29 | "type_info": "Numeric" 30 | }, 31 | { 32 | "ordinal": 5, 33 | "name": "priority_fee", 34 | "type_info": "Numeric" 35 | }, 36 | { 37 | "ordinal": 6, 38 | "name": "created_at", 39 | "type_info": "Timestamptz" 40 | }, 41 | { 42 | "ordinal": 7, 43 | "name": "finalized_at", 44 | "type_info": "Timestamptz" 45 | }, 46 | { 47 | "ordinal": 8, 48 | "name": "state", 49 | "type_info": "Int2" 50 | } 51 | ], 52 | "parameters": { 53 | "Left": [ 54 | "Int2", 55 | "Int4" 56 | ] 57 | }, 58 | "nullable": [ 59 | false, 60 | false, 61 | false, 62 | false, 63 | false, 64 | false, 65 | false, 66 | true, 67 | false 68 | ] 69 | }, 70 | "hash": "3f1f4aaee324cb731aacf0b34ea6a1c76dade6d2234e2fe238a6aab5a15b17fa" 71 | } 72 | -------------------------------------------------------------------------------- /packages/adapters/eigenda/src/bindings.rs: -------------------------------------------------------------------------------- 1 | // This file is retained for compatibility but is deprecated. 2 | // The bindings have been replaced by the rust-eigenda-client library. 3 | // This file will be removed in a future update. 4 | 5 | pub mod common { 6 | // Stub for compatibility 7 | } 8 | 9 | pub mod disperser { 10 | // Stub for compatibility 11 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 12 | #[repr(i32)] 13 | pub enum BlobStatus { 14 | Unknown = 0, 15 | Queued = 1, 16 | Encoded = 2, 17 | GatheringSignatures = 3, 18 | Complete = 4, 19 | Failed = 5, 20 | } 21 | 22 | impl BlobStatus { 23 | pub fn as_str_name(&self) -> &'static str { 24 | match self { 25 | BlobStatus::Unknown => "UNKNOWN", 26 | BlobStatus::Queued => "QUEUED", 27 | BlobStatus::Encoded => "ENCODED", 28 | BlobStatus::GatheringSignatures => "GATHERING_SIGNATURES", 29 | BlobStatus::Complete => "COMPLETE", 30 | BlobStatus::Failed => "FAILED", 31 | } 32 | } 33 | } 34 | 35 | impl From for BlobStatus { 36 | fn from(value: i32) -> Self { 37 | match value { 38 | 0 => BlobStatus::Unknown, 39 | 1 => BlobStatus::Queued, 40 | 2 => BlobStatus::Encoded, 41 | 3 => BlobStatus::GatheringSignatures, 42 | 4 => BlobStatus::Complete, 43 | 5 => BlobStatus::Failed, 44 | _ => BlobStatus::Unknown, // Default case 45 | } 46 | } 47 | } 48 | } 49 | 50 | // Re-export for compatibility 51 | pub use disperser::*; 52 | -------------------------------------------------------------------------------- /.sqlx/query-f9b6289ce3be042ec58c756e5b5b010b5010091a69431f51acc1adf1db12f708.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "UPDATE l1_transaction SET state = $1, finalized_at = $2 WHERE hash = $3 RETURNING *", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "id", 9 | "type_info": "Int4" 10 | }, 11 | { 12 | "ordinal": 1, 13 | "name": "submission_id", 14 | "type_info": "Int4" 15 | }, 16 | { 17 | "ordinal": 2, 18 | "name": "hash", 19 | "type_info": "Bytea" 20 | }, 21 | { 22 | "ordinal": 3, 23 | "name": "nonce", 24 | "type_info": "Int8" 25 | }, 26 | { 27 | "ordinal": 4, 28 | "name": "max_fee", 29 | "type_info": "Numeric" 30 | }, 31 | { 32 | "ordinal": 5, 33 | "name": "priority_fee", 34 | "type_info": "Numeric" 35 | }, 36 | { 37 | "ordinal": 6, 38 | "name": "created_at", 39 | "type_info": "Timestamptz" 40 | }, 41 | { 42 | "ordinal": 7, 43 | "name": "finalized_at", 44 | "type_info": "Timestamptz" 45 | }, 46 | { 47 | "ordinal": 8, 48 | "name": "state", 49 | "type_info": "Int2" 50 | } 51 | ], 52 | "parameters": { 53 | "Left": [ 54 | "Int2", 55 | "Timestamptz", 56 | "Bytea" 57 | ] 58 | }, 59 | "nullable": [ 60 | false, 61 | false, 62 | false, 63 | false, 64 | false, 65 | false, 66 | false, 67 | true, 68 | false 69 | ] 70 | }, 71 | "hash": "f9b6289ce3be042ec58c756e5b5b010b5010091a69431f51acc1adf1db12f708" 72 | } 73 | -------------------------------------------------------------------------------- /packages/encoding/src/blob.rs: -------------------------------------------------------------------------------- 1 | mod decoder; 2 | mod encoder; 3 | mod header; 4 | 5 | pub use decoder::Decoder; 6 | pub use encoder::Encoder; 7 | pub use header::*; 8 | 9 | use crate::constants::BYTES_PER_BLOB; 10 | 11 | pub type Blob = Box<[u8; BYTES_PER_BLOB]>; 12 | 13 | #[cfg(feature = "kzg")] 14 | pub fn generate_sidecar( 15 | blobs: impl IntoIterator, 16 | ) -> anyhow::Result { 17 | let blobs = blobs 18 | .into_iter() 19 | .map(|blob| alloy::eips::eip4844::Blob::from(*blob)) 20 | .collect::>(); 21 | let mut commitments = Vec::with_capacity(blobs.len()); 22 | let mut proofs = Vec::with_capacity(blobs.len()); 23 | let env_settings = alloy::consensus::EnvKzgSettings::default(); 24 | let settings = env_settings.get(); 25 | 26 | for blob in &blobs { 27 | // SAFETY: same size 28 | let blob = 29 | unsafe { core::mem::transmute::<&alloy::eips::eip4844::Blob, &c_kzg::Blob>(blob) }; 30 | let commitment = settings.blob_to_kzg_commitment(blob)?; 31 | let proof = settings.compute_blob_kzg_proof(blob, &commitment.to_bytes())?; 32 | 33 | // SAFETY: same size 34 | unsafe { 35 | commitments.push(core::mem::transmute::< 36 | c_kzg::Bytes48, 37 | alloy::eips::eip4844::Bytes48, 38 | >(commitment.to_bytes())); 39 | proofs.push(core::mem::transmute::< 40 | c_kzg::Bytes48, 41 | alloy::eips::eip4844::Bytes48, 42 | >(proof.to_bytes())); 43 | } 44 | } 45 | 46 | Ok(alloy::consensus::BlobTransactionSidecar::new( 47 | blobs, 48 | commitments, 49 | proofs, 50 | )) 51 | } 52 | -------------------------------------------------------------------------------- /fee_algo_simulation/src/utils.rs: -------------------------------------------------------------------------------- 1 | use std::{ops::RangeInclusive, path::PathBuf}; 2 | 3 | use services::fees::{Fees, FeesAtHeight}; 4 | use tracing::{error, info}; 5 | use xdg::BaseDirectories; 6 | 7 | use super::models::SavedFees; 8 | 9 | /// Path to the fee cache file. 10 | pub fn fee_file() -> PathBuf { 11 | let xdg = BaseDirectories::with_prefix("fee_simulation").unwrap(); 12 | if let Some(cache) = xdg.find_cache_file("fee_cache.json") { 13 | cache 14 | } else { 15 | xdg.place_data_file("fee_cache.json").unwrap() 16 | } 17 | } 18 | 19 | pub fn load_cache() -> Vec<(u64, Fees)> { 20 | let contents = match std::fs::read_to_string(fee_file()) { 21 | Ok(contents) => contents, 22 | Err(e) => { 23 | error!("Failed to read fee cache file: {e}"); 24 | return vec![]; 25 | } 26 | }; 27 | 28 | let fees: SavedFees = serde_json::from_str(&contents) 29 | .inspect_err(|e| error!("error while deserializing json cache!: {e}")) 30 | .unwrap_or_default(); 31 | 32 | info!("loaded from cache: {} fees", fees.fees.len()); 33 | 34 | fees.fees.into_iter().map(|f| (f.height, f.fees)).collect() 35 | } 36 | 37 | pub fn save_cache(cache: impl IntoIterator) -> anyhow::Result<()> { 38 | let fees = SavedFees { 39 | fees: cache 40 | .into_iter() 41 | .map(|(height, fees)| FeesAtHeight { height, fees }) 42 | .collect(), 43 | }; 44 | std::fs::write(fee_file(), serde_json::to_string(&fees)?)?; 45 | info!("saved to cache: {} fees", fees.fees.len()); 46 | Ok(()) 47 | } 48 | 49 | pub fn last_n_blocks(current_block: u64, n: std::num::NonZeroU64) -> RangeInclusive { 50 | current_block.saturating_sub(n.get().saturating_sub(1))..=current_block 51 | } 52 | -------------------------------------------------------------------------------- /packages/metrics/src/connection_health_tracker.rs: -------------------------------------------------------------------------------- 1 | use std::sync::{ 2 | Arc, 3 | atomic::{AtomicBool, AtomicUsize, Ordering}, 4 | }; 5 | 6 | use super::{HealthCheck, HealthChecker}; 7 | 8 | #[derive(Debug, Clone)] 9 | pub struct ConnectionHealthTracker { 10 | // how many failures are needed before the connection is deemed unhealthy 11 | max_consecutive_failures: usize, 12 | // how many consecutive failures there currently are 13 | consecutive_failures: Arc, 14 | permanent_failure: Arc, 15 | } 16 | 17 | impl ConnectionHealthTracker { 18 | pub fn new(max_consecutive_failures: usize) -> Self { 19 | Self { 20 | max_consecutive_failures, 21 | consecutive_failures: Arc::new(AtomicUsize::new(0)), 22 | permanent_failure: Arc::new(AtomicBool::new(false)), 23 | } 24 | } 25 | 26 | pub fn note_permanent_failure(&self) { 27 | self.permanent_failure.store(true, Ordering::SeqCst); 28 | } 29 | 30 | pub fn note_failure(&self) { 31 | if !self.permanent_failure.load(Ordering::Relaxed) { 32 | self.consecutive_failures.fetch_add(1, Ordering::SeqCst); 33 | } 34 | } 35 | 36 | pub fn note_success(&self) { 37 | if !self.permanent_failure.load(Ordering::Relaxed) { 38 | self.consecutive_failures.store(0, Ordering::SeqCst); 39 | } 40 | } 41 | 42 | pub fn tracker(&self) -> HealthChecker { 43 | Box::new(self.clone()) 44 | } 45 | } 46 | 47 | impl HealthCheck for ConnectionHealthTracker { 48 | fn healthy(&self) -> bool { 49 | if self.permanent_failure.load(Ordering::Relaxed) { 50 | return false; 51 | } 52 | 53 | self.consecutive_failures.load(Ordering::Relaxed) < self.max_consecutive_failures 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /.sqlx/query-39d3fae6fdd67a2324fae4d5e828f69f2298cd5b0f7eb1609ed189269c6f677c.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "\n SELECT\n bc.bundle_id,\n bc.cost,\n bc.size,\n bc.da_block_height,\n bc.is_finalized,\n b.start_height,\n b.end_height\n FROM\n bundle_cost bc\n JOIN bundles b ON bc.bundle_id = b.id\n WHERE\n bc.is_finalized = TRUE\n ORDER BY\n b.start_height DESC\n LIMIT $1\n ", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "bundle_id", 9 | "type_info": "Int4" 10 | }, 11 | { 12 | "ordinal": 1, 13 | "name": "cost", 14 | "type_info": "Numeric" 15 | }, 16 | { 17 | "ordinal": 2, 18 | "name": "size", 19 | "type_info": "Int8" 20 | }, 21 | { 22 | "ordinal": 3, 23 | "name": "da_block_height", 24 | "type_info": "Int8" 25 | }, 26 | { 27 | "ordinal": 4, 28 | "name": "is_finalized", 29 | "type_info": "Bool" 30 | }, 31 | { 32 | "ordinal": 5, 33 | "name": "start_height", 34 | "type_info": "Int8" 35 | }, 36 | { 37 | "ordinal": 6, 38 | "name": "end_height", 39 | "type_info": "Int8" 40 | } 41 | ], 42 | "parameters": { 43 | "Left": [ 44 | "Int8" 45 | ] 46 | }, 47 | "nullable": [ 48 | false, 49 | false, 50 | false, 51 | false, 52 | false, 53 | false, 54 | false 55 | ] 56 | }, 57 | "hash": "39d3fae6fdd67a2324fae4d5e828f69f2298cd5b0f7eb1609ed189269c6f677c" 58 | } 59 | -------------------------------------------------------------------------------- /packages/adapters/storage/migrations/0002_better_fragmentation.up.sql: -------------------------------------------------------------------------------- 1 | BEGIN; 2 | 3 | DELETE FROM l1_transaction_fragments; 4 | DELETE FROM l1_fragments; 5 | 6 | CREATE TABLE IF NOT EXISTS fuel_blocks ( 7 | hash BYTEA PRIMARY KEY NOT NULL, 8 | height BIGINT NOT NULL UNIQUE CHECK (height >= 0), 9 | CHECK (octet_length(hash) = 32), 10 | data BYTEA NOT NULL 11 | ); 12 | 13 | -- Create new 'bundles' table to represent groups of blocks 14 | CREATE TABLE IF NOT EXISTS bundles ( 15 | id SERIAL PRIMARY KEY, 16 | start_height BIGINT NOT NULL CHECK (start_height >= 0), 17 | end_height BIGINT NOT NULL CHECK (end_height >= start_height) -- Ensure valid range 18 | ); 19 | 20 | CREATE INDEX idx_bundles_start_end ON bundles (start_height, end_height); 21 | 22 | 23 | ALTER TABLE l1_fragments 24 | DROP COLUMN submission_id, 25 | DROP COLUMN created_at, 26 | ADD COLUMN total_bytes BIGINT NOT NULL CHECK (total_bytes > 0), 27 | ADD COLUMN unused_bytes BIGINT NOT NULL CHECK (unused_bytes >= 0), 28 | ADD COLUMN bundle_id INTEGER REFERENCES bundles(id) NOT NULL, 29 | ADD CONSTRAINT check_data_not_empty CHECK (octet_length(data) > 0), 30 | ALTER COLUMN fragment_idx TYPE INTEGER; 31 | 32 | ALTER TABLE l1_fragments 33 | RENAME COLUMN fragment_idx TO idx; 34 | 35 | 36 | -- Add the new finalized_at column with UTC timestamp, allowing NULL values initially 37 | ALTER TABLE l1_transactions 38 | ADD COLUMN finalized_at TIMESTAMPTZ; 39 | 40 | -- Update rows where state is 1 and set finalized_at to the current timestamp 41 | UPDATE l1_transactions 42 | SET finalized_at = NOW() 43 | WHERE state = 1; 44 | 45 | -- Add a check constraint to ensure finalized_at is not null when state is 1 46 | ALTER TABLE l1_transactions 47 | ADD CONSTRAINT state_finalized_check 48 | CHECK (state != 1 OR finalized_at IS NOT NULL); 49 | 50 | 51 | COMMIT; 52 | -------------------------------------------------------------------------------- /.sqlx/query-365408c350d49ba41303b49b319a0fb317ced0dc5bae79b8a491b0807cf97958.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "\n SELECT\n bc.bundle_id,\n bc.cost,\n bc.size,\n bc.da_block_height,\n bc.is_finalized,\n b.start_height,\n b.end_height\n FROM\n bundle_cost bc\n JOIN bundles b ON bc.bundle_id = b.id\n WHERE\n b.end_height >= $1 AND bc.is_finalized = TRUE\n ORDER BY\n b.start_height ASC\n LIMIT $2\n ", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "bundle_id", 9 | "type_info": "Int4" 10 | }, 11 | { 12 | "ordinal": 1, 13 | "name": "cost", 14 | "type_info": "Numeric" 15 | }, 16 | { 17 | "ordinal": 2, 18 | "name": "size", 19 | "type_info": "Int8" 20 | }, 21 | { 22 | "ordinal": 3, 23 | "name": "da_block_height", 24 | "type_info": "Int8" 25 | }, 26 | { 27 | "ordinal": 4, 28 | "name": "is_finalized", 29 | "type_info": "Bool" 30 | }, 31 | { 32 | "ordinal": 5, 33 | "name": "start_height", 34 | "type_info": "Int8" 35 | }, 36 | { 37 | "ordinal": 6, 38 | "name": "end_height", 39 | "type_info": "Int8" 40 | } 41 | ], 42 | "parameters": { 43 | "Left": [ 44 | "Int8", 45 | "Int8" 46 | ] 47 | }, 48 | "nullable": [ 49 | false, 50 | false, 51 | false, 52 | false, 53 | false, 54 | false, 55 | false 56 | ] 57 | }, 58 | "hash": "365408c350d49ba41303b49b319a0fb317ced0dc5bae79b8a491b0807cf97958" 59 | } 60 | -------------------------------------------------------------------------------- /packages/encoding/src/blob/encoder.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | use bitvec::{order::Msb0, slice::BitSlice}; 4 | use static_assertions::const_assert; 5 | mod storage; 6 | use storage::Storage; 7 | 8 | use super::{header::Header, Blob}; 9 | use crate::constants::{FIELD_ELEMENTS_PER_BLOB, USABLE_BITS_PER_FIELD_ELEMENT}; 10 | 11 | #[derive(Default, Debug, Clone)] 12 | pub struct Encoder { 13 | _private: PhantomData<()>, 14 | } 15 | 16 | impl Encoder { 17 | pub fn new() -> Self { 18 | Self::default() 19 | } 20 | } 21 | 22 | impl Encoder { 23 | pub const fn blobs_needed_to_encode(&self, num_bytes: usize) -> usize { 24 | #[allow(clippy::cast_possible_truncation)] 25 | const USABLE_BITS_PER_BLOB: usize = USABLE_BITS_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB; 26 | 27 | const NUM_BITS_FOR_METADATA: usize = Header::V1_SIZE_BITS; 28 | 29 | const NUM_BYTES_FOR_DATA: usize = 30 | (USABLE_BITS_PER_BLOB - NUM_BITS_FOR_METADATA).saturating_div(8); 31 | 32 | const_assert!(NUM_BYTES_FOR_DATA > 0); 33 | 34 | num_bytes.div_ceil(NUM_BYTES_FOR_DATA) 35 | } 36 | 37 | pub fn encode(&self, orig_data: &[u8], id: u32) -> anyhow::Result> { 38 | let mut storage = Storage::new(); 39 | 40 | let mut data = BitSlice::::from_slice(orig_data); 41 | while !data.is_empty() { 42 | let amount_ingested = storage.ingest(data); 43 | data = &data[amount_ingested..]; 44 | } 45 | 46 | Ok(storage.finalize(id)) 47 | } 48 | } 49 | 50 | #[cfg(test)] 51 | mod tests { 52 | #[test] 53 | fn can_handle_zero_input() { 54 | // given 55 | let no_data = []; 56 | 57 | // when 58 | let blobs = super::Encoder::new().encode(&no_data, 0).unwrap(); 59 | 60 | // then 61 | assert!(blobs.is_empty()); 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /packages/services/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod block_bundler; 2 | pub mod block_committer; 3 | pub mod block_importer; 4 | pub mod cost_reporter; 5 | pub mod fee_metrics_tracker; 6 | pub mod health_reporter; 7 | pub mod state_committer; 8 | pub mod state_listener; 9 | pub mod state_pruner; 10 | pub mod status_reporter; 11 | pub mod types; 12 | pub mod wallet_balance_tracker; 13 | 14 | pub mod fees; 15 | 16 | #[cfg(feature = "test-helpers")] 17 | pub use block_bundler::{ 18 | bundler::Bundler, 19 | common::{Bundle, BundleProposal, Metadata}, 20 | test_helpers::ControllableBundlerFactory, 21 | }; 22 | pub use block_bundler::{ 23 | bundler::Factory as BundlerFactory, 24 | eigen_bundler::Factory as EigenBundlerFactory, 25 | service::{BlockBundler, Config as BlockBundlerConfig}, 26 | }; 27 | pub use state_committer::eigen_service::{ 28 | Config as EigenStatecommitterConfig, StateCommitter as EigenStateCommitter, 29 | }; 30 | pub use state_committer::service::{Config as StateCommitterConfig, StateCommitter}; 31 | use types::InvalidL1Height; 32 | 33 | #[derive(thiserror::Error, Debug, PartialEq, Eq)] 34 | pub enum Error { 35 | #[error("{0}")] 36 | Other(String), 37 | #[error("Network error: {0}")] 38 | Network(String), 39 | #[error("Storage error: {0}")] 40 | Storage(String), 41 | #[error("Block validation error: {0}")] 42 | BlockValidation(String), 43 | #[error("Bundling error: {0}")] 44 | Bundler(String), 45 | } 46 | 47 | impl From for Error { 48 | fn from(err: InvalidL1Height) -> Self { 49 | Self::Other(err.to_string()) 50 | } 51 | } 52 | 53 | impl From for Error { 54 | fn from(error: String) -> Self { 55 | Self::Other(error) 56 | } 57 | } 58 | 59 | pub type Result = std::result::Result; 60 | 61 | #[trait_variant::make(Send)] 62 | pub trait Runner: Sync { 63 | async fn run(&mut self) -> Result<()>; 64 | } 65 | -------------------------------------------------------------------------------- /packages/adapters/eth/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "eth" 3 | authors = { workspace = true } 4 | edition = { workspace = true } 5 | homepage = { workspace = true } 6 | license = { workspace = true } 7 | repository = { workspace = true } 8 | version = { workspace = true } 9 | publish = { workspace = true } 10 | rust-version = { workspace = true } 11 | 12 | [dependencies] 13 | alloy = { workspace = true, features = [ 14 | "consensus", 15 | "network", 16 | "provider-ws", 17 | "kzg", 18 | "contract", 19 | "signer-aws", 20 | "signer-local", 21 | "rpc-types", 22 | "reqwest-rustls-tls", 23 | ] } 24 | static_assertions = { workspace = true } 25 | async-trait = { workspace = true } 26 | aws-config = { workspace = true, features = ["default"] } 27 | aws-sdk-kms = { workspace = true, features = ["default"] } 28 | delegate = { workspace = true } 29 | fuel-block-committer-encoding = { workspace = true, features = ["kzg"] } 30 | futures = { workspace = true } 31 | itertools = { workspace = true, features = ["use_alloc"] } 32 | metrics = { workspace = true } 33 | services = { workspace = true } 34 | serde = { workspace = true } 35 | thiserror = { workspace = true } 36 | tokio = { workspace = true, features = ["time"] } 37 | tracing = { workspace = true } 38 | url = { workspace = true } 39 | 40 | [dev-dependencies] 41 | eth = { workspace = true, features = ["test-helpers"] } 42 | alloy = { workspace = true, features = [ 43 | "signer-local", 44 | "node-bindings", 45 | "json-rpc", 46 | ] } 47 | serde_json = { workspace = true } 48 | mockall = { workspace = true } 49 | services = { workspace = true, features = ["test-helpers"] } 50 | pretty_assertions = { workspace = true, features = ["default"] } 51 | proptest = { workspace = true, features = ["default"] } 52 | rand = { workspace = true, features = ["small_rng"] } 53 | test-case = { workspace = true } 54 | tokio = { workspace = true, features = ["macros"] } 55 | 56 | [features] 57 | test-helpers = [] 58 | -------------------------------------------------------------------------------- /packages/services/tests/status_reporter.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use clock::TestClock; 4 | use rand::Rng; 5 | use services::{ 6 | status_reporter::service::{Status, StatusReport, StatusReporter}, 7 | types::{BlockSubmission, BlockSubmissionTx}, 8 | }; 9 | use storage::PostgresProcess; 10 | 11 | #[tokio::test] 12 | async fn status_depends_on_last_submission() { 13 | use services::block_committer::port::Storage; 14 | 15 | let process = PostgresProcess::shared().await.unwrap(); 16 | let test = |submission_status, expected_app_status| { 17 | let process = Arc::clone(&process); 18 | async move { 19 | // given 20 | let mut rng = rand::thread_rng(); 21 | let db = process.create_random_db().await.unwrap(); 22 | 23 | if let Some(is_completed) = submission_status { 24 | let latest_submission = BlockSubmission { 25 | completed: is_completed, 26 | ..rng.r#gen() 27 | }; 28 | db.record_block_submission( 29 | BlockSubmissionTx::default(), 30 | latest_submission, 31 | TestClock::default().now(), 32 | ) 33 | .await 34 | .unwrap(); 35 | } 36 | 37 | let status_reporter = StatusReporter::new(db); 38 | 39 | // when 40 | let status = status_reporter.current_status().await.unwrap(); 41 | 42 | // then 43 | assert_eq!( 44 | status, 45 | StatusReport { 46 | status: expected_app_status 47 | } 48 | ); 49 | } 50 | }; 51 | 52 | // has an entry, not completed 53 | test(Some(false), Status::Committing).await; 54 | // has an entry, completed 55 | test(Some(true), Status::Idle).await; 56 | // has no entry 57 | test(None, Status::Idle).await; 58 | } 59 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Fuel Security Policy 2 | 3 | Thank you for helping make the Fuel ecosystem safe for everyone. The Fuel team take security bugs very seriously. We appreciate your efforts to responsibly disclose your findings, and will make every effort to acknowledge your contributions. 4 | 5 | ## Reporting Security Issues 6 | 7 | If you believe you have found a security vulnerability in any Fuel-owned repository, please report it to us through coordinated disclosure. 8 | 9 | **Please do not report security vulnerabilities through public GitHub issues, discussions, or pull requests.** 10 | 11 | Instead, please use the GitHub Security Advisory ["Report a Vulnerability"](https://github.com/FuelLabs/fuel-block-committer/security/advisories/new) tab. 12 | 13 | The Fuel team will send a response indicating the next steps in handling your report. After the initial reply to your report, the team will keep you informed of the progress towards a fix and full announcement, and may ask for additional information or guidance. 14 | 15 | Please include as much of the information listed below as you can to help us better understand and resolve the issue: 16 | 17 | * The type of issue (e.g., buffer overflow, SQL injection, or cross-site scripting) 18 | * Full paths of source file(s) related to the manifestation of the issue 19 | * The location of the affected source code (tag/branch/commit or direct URL) 20 | * Any special configuration required to reproduce the issue 21 | * Step-by-step instructions to reproduce the issue 22 | * Proof-of-concept or exploit code (if possible) 23 | * Impact of the issue, including how an attacker might exploit the issue 24 | 25 | This information will help us triage your report more quickly. 26 | 27 | Report security bugs in third-party modules to the person or team maintaining the module. 28 | 29 | ## Non-Security Issues 30 | 31 | If the issue is not security-related, please report it publicly by opening a [GitHub Issue](https://github.com/FuelLabs/fuel-block-committer/issues/new). 32 | -------------------------------------------------------------------------------- /packages/services/src/types/block_submission.rs: -------------------------------------------------------------------------------- 1 | use sqlx::types::chrono::{DateTime, Utc}; 2 | 3 | use super::{NonEmpty, NonNegative, TransactionState}; 4 | 5 | #[derive(Debug, Clone, PartialEq, Eq)] 6 | pub struct CompressedFuelBlock { 7 | pub height: u32, 8 | pub data: NonEmpty, 9 | } 10 | 11 | #[derive(Debug, Clone, PartialEq, Eq)] 12 | pub struct BlockSubmissionTx { 13 | pub id: Option>, 14 | pub submission_id: Option>, 15 | pub hash: [u8; 32], 16 | pub nonce: u32, 17 | pub max_fee: u128, 18 | pub priority_fee: u128, 19 | pub state: TransactionState, 20 | pub created_at: Option>, 21 | } 22 | 23 | impl Default for BlockSubmissionTx { 24 | fn default() -> Self { 25 | Self { 26 | id: None, 27 | submission_id: None, 28 | hash: [0; 32], 29 | nonce: 0, 30 | max_fee: 0, 31 | priority_fee: 0, 32 | state: TransactionState::Pending, 33 | created_at: None, 34 | } 35 | } 36 | } 37 | 38 | #[derive(Debug, Clone, PartialEq, Eq)] 39 | pub struct BlockSubmission { 40 | pub id: Option>, 41 | pub block_hash: [u8; 32], 42 | pub block_height: u32, 43 | pub completed: bool, 44 | } 45 | 46 | impl BlockSubmission { 47 | pub fn new(block_hash: [u8; 32], block_height: u32) -> Self { 48 | Self { 49 | id: None, 50 | block_hash, 51 | block_height, 52 | completed: false, 53 | } 54 | } 55 | } 56 | 57 | #[cfg(feature = "test-helpers")] 58 | impl rand::distributions::Distribution for rand::distributions::Standard { 59 | fn sample(&self, rng: &mut R) -> BlockSubmission { 60 | BlockSubmission { 61 | id: Some(rng.r#gen()), 62 | block_hash: rng.r#gen(), 63 | block_height: rng.r#gen(), 64 | completed: rng.r#gen(), 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /packages/services/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "services" 3 | authors = { workspace = true } 4 | edition = { workspace = true } 5 | homepage = { workspace = true } 6 | license = { workspace = true } 7 | repository = { workspace = true } 8 | version = { workspace = true } 9 | publish = { workspace = true } 10 | rust-version = { workspace = true } 11 | 12 | [dependencies] 13 | humantime = { workspace = true } 14 | byte-unit = { workspace = true, features = ["byte", "u128"] } 15 | alloy = { workspace = true } 16 | async-trait = { workspace = true } 17 | base64 = { workspace = true } 18 | bytesize = { workspace = true } 19 | fuel-block-committer-encoding = { workspace = true } 20 | futures = { workspace = true } 21 | hex = { workspace = true } 22 | itertools = { workspace = true, features = ["use_alloc"] } 23 | metrics = { workspace = true } 24 | mockall = { workspace = true, optional = true } 25 | nonempty = { workspace = true } 26 | rand = { workspace = true, optional = true } 27 | rayon = { workspace = true } 28 | serde = { workspace = true } 29 | sqlx = { workspace = true, features = ["chrono"] } 30 | thiserror = { workspace = true } 31 | tokio = { workspace = true } 32 | tracing = { workspace = true } 33 | trait-variant = { workspace = true } 34 | 35 | [dev-dependencies] 36 | derive_more = { workspace = true, features = ["error", "display"] } 37 | test-case = { workspace = true } 38 | clock = { workspace = true, features = ["test-helpers"] } 39 | eth = { workspace = true, features = ["test-helpers"] } 40 | eigenda = { workspace = true } 41 | mockall = { workspace = true } 42 | services = { workspace = true, features = ["test-helpers"] } 43 | pretty_assertions = { workspace = true, features = ["std"] } 44 | storage = { workspace = true, features = ["test-helpers"] } 45 | tai64 = { workspace = true } 46 | tokio = { workspace = true, features = ["macros"] } 47 | test-helpers = { workspace = true } 48 | rand = { workspace = true, features = ["small_rng", "std", "std_rng"] } 49 | 50 | [features] 51 | test-helpers = ["dep:mockall", "dep:rand"] 52 | -------------------------------------------------------------------------------- /packages/services/src/cost_reporter.rs: -------------------------------------------------------------------------------- 1 | pub mod service { 2 | 3 | use crate::{Error, Result, types::BundleCost}; 4 | 5 | pub struct CostReporter { 6 | storage: Db, 7 | request_limit: usize, 8 | } 9 | 10 | impl CostReporter { 11 | pub fn new(storage: Db, request_limit: usize) -> Self { 12 | Self { 13 | storage, 14 | request_limit, 15 | } 16 | } 17 | } 18 | 19 | impl CostReporter 20 | where 21 | Db: crate::cost_reporter::port::Storage, 22 | { 23 | pub async fn get_costs( 24 | &self, 25 | from_block_height: u32, 26 | limit: usize, 27 | ) -> Result> { 28 | if limit > self.request_limit { 29 | return Err(Error::Other(format!( 30 | "requested: {} items, but limit is: {}", 31 | limit, self.request_limit 32 | ))); 33 | } 34 | 35 | self.storage 36 | .get_finalized_costs(from_block_height, limit) 37 | .await 38 | } 39 | 40 | pub async fn get_latest_costs(&self, limit: usize) -> Result> { 41 | if limit > self.request_limit { 42 | return Err(Error::Other(format!( 43 | "requested: {} items, but limit is: {}", 44 | limit, self.request_limit 45 | ))); 46 | } 47 | 48 | self.storage.get_latest_costs(limit).await 49 | } 50 | } 51 | } 52 | 53 | pub mod port { 54 | use crate::{Result, types::BundleCost}; 55 | 56 | #[allow(async_fn_in_trait)] 57 | #[trait_variant::make(Send)] 58 | pub trait Storage: Send + Sync { 59 | async fn get_finalized_costs( 60 | &self, 61 | from_block_height: u32, 62 | limit: usize, 63 | ) -> Result>; 64 | 65 | async fn get_latest_costs(&self, limit: usize) -> Result>; 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /packages/services/src/types/l1_height.rs: -------------------------------------------------------------------------------- 1 | #[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd)] 2 | pub struct L1Height { 3 | height: i64, 4 | } 5 | 6 | #[derive(Debug, Clone)] 7 | pub struct InvalidL1Height(String); 8 | impl std::fmt::Display for InvalidL1Height { 9 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 10 | write!(f, "Invalid l1 height: {}", self.0) 11 | } 12 | } 13 | impl std::error::Error for InvalidL1Height {} 14 | 15 | #[cfg(feature = "test-helpers")] 16 | impl rand::distributions::Distribution for rand::distributions::Standard { 17 | fn sample(&self, rng: &mut R) -> L1Height { 18 | let height: i64 = rng.gen_range(0..=i64::MAX); 19 | height.try_into().expect("Must be valid EthHeight") 20 | } 21 | } 22 | 23 | impl TryFrom for L1Height { 24 | type Error = InvalidL1Height; 25 | 26 | fn try_from(height: i64) -> Result { 27 | if height < 0 { 28 | return Err(InvalidL1Height(format!( 29 | "must be non-negative, got {height}", 30 | ))); 31 | } 32 | Ok(Self { height }) 33 | } 34 | } 35 | 36 | impl TryFrom for L1Height { 37 | type Error = InvalidL1Height; 38 | fn try_from(height: u64) -> Result { 39 | if height >= i64::MAX as u64 { 40 | return Err(InvalidL1Height(format!( 41 | "{height} too large. DB can handle at most {}", 42 | i64::MAX 43 | ))); 44 | } 45 | Ok(Self { 46 | height: height as i64, 47 | }) 48 | } 49 | } 50 | 51 | impl From for L1Height { 52 | fn from(height: u32) -> Self { 53 | Self { 54 | height: i64::from(height), 55 | } 56 | } 57 | } 58 | 59 | impl From for i64 { 60 | fn from(height: L1Height) -> Self { 61 | height.height 62 | } 63 | } 64 | 65 | impl From for u64 { 66 | fn from(height: L1Height) -> Self { 67 | height.height as Self 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /helm/fuel-block-committer/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for fuel-block-committer. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | replicaCount: 1 6 | 7 | image: 8 | repository: ghcr.io/fuellabs/fuel-block-committer 9 | pullPolicy: Always 10 | # Overrides the image tag whose default is the chart appVersion. 11 | tag: "" 12 | 13 | imagePullSecrets: [] 14 | nameOverride: "" 15 | fullnameOverride: "" 16 | 17 | serviceAccount: 18 | # Specifies whether a service account should be created 19 | create: true 20 | # Annotations to add to the service account 21 | annotations: {} 22 | # The name of the service account to use. 23 | # If not set and create is true, a name is generated using the fullname template 24 | name: "" 25 | 26 | podAnnotations: {} 27 | 28 | podSecurityContext: {} 29 | # fsGroup: 2000 30 | 31 | securityContext: {} 32 | # capabilities: 33 | # drop: 34 | # - ALL 35 | # readOnlyRootFilesystem: true 36 | # runAsNonRoot: true 37 | # runAsUser: 1000 38 | 39 | service: 40 | type: ClusterIP 41 | port: 8080 42 | 43 | resources: {} 44 | # We usually recommend not to specify default resources and to leave this as a conscious 45 | # choice for the user. This also increases chances charts run on environments with little 46 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 47 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 48 | # limits: 49 | # cpu: 100m 50 | # memory: 128Mi 51 | # requests: 52 | # cpu: 100m 53 | # memory: 128Mi 54 | 55 | autoscaling: 56 | enabled: false 57 | minReplicas: 1 58 | maxReplicas: 100 59 | targetCPUUtilizationPercentage: 80 60 | # targetMemoryUtilizationPercentage: 80 61 | 62 | updateStrategyType: Recreate 63 | 64 | nodeSelector: {} 65 | 66 | tolerations: [] 67 | 68 | affinity: {} 69 | 70 | app: 71 | volume: 72 | storageclass: gp3-generic 73 | accessmodes: ReadWriteOnce 74 | storagerequests: 4Gi 75 | serviceMonitor: 76 | enabled: true 77 | prometheusRelease: kube-prometheus 78 | -------------------------------------------------------------------------------- /packages/services/src/fees/testing.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::BTreeMap, ops::RangeInclusive}; 2 | 3 | use itertools::Itertools; 4 | 5 | use super::{Api, Fees, FeesAtHeight, SequentialBlockFees}; 6 | 7 | #[derive(Debug, Clone, Copy)] 8 | pub struct ConstantFeeApi { 9 | fees: Fees, 10 | } 11 | 12 | impl ConstantFeeApi { 13 | pub const fn new(fees: Fees) -> Self { 14 | Self { fees } 15 | } 16 | } 17 | 18 | impl Api for ConstantFeeApi { 19 | async fn fees(&self, height_range: RangeInclusive) -> crate::Result { 20 | let fees = height_range 21 | .into_iter() 22 | .map(|height| FeesAtHeight { 23 | height, 24 | fees: self.fees, 25 | }) 26 | .collect_vec(); 27 | 28 | Ok(fees.try_into().unwrap()) 29 | } 30 | 31 | async fn current_height(&self) -> crate::Result { 32 | Ok(0) 33 | } 34 | } 35 | 36 | #[derive(Debug, Clone)] 37 | pub struct PreconfiguredFeeApi { 38 | fees: BTreeMap, 39 | } 40 | 41 | impl Api for PreconfiguredFeeApi { 42 | async fn current_height(&self) -> crate::Result { 43 | Ok(*self 44 | .fees 45 | .keys() 46 | .last() 47 | .expect("no fees registered with PreconfiguredFeesProvider")) 48 | } 49 | 50 | async fn fees(&self, height_range: RangeInclusive) -> crate::Result { 51 | let fees = self 52 | .fees 53 | .iter() 54 | .skip_while(|(height, _)| !height_range.contains(*height)) 55 | .take_while(|(height, _)| height_range.contains(*height)) 56 | .map(|(height, fees)| FeesAtHeight { 57 | height: *height, 58 | fees: *fees, 59 | }) 60 | .collect_vec(); 61 | 62 | Ok(fees.try_into().expect("block fees not sequential")) 63 | } 64 | } 65 | 66 | impl PreconfiguredFeeApi { 67 | pub fn new(blocks: impl IntoIterator) -> Self { 68 | Self { 69 | fees: blocks.into_iter().collect(), 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /packages/services/src/fee_metrics_tracker/eigen_da.rs: -------------------------------------------------------------------------------- 1 | pub mod service { 2 | use metrics::{ 3 | RegistersMetrics, 4 | prometheus::{IntGauge, Opts, core::Collector}, 5 | }; 6 | 7 | use crate::{Result, Runner, fees::Api}; 8 | 9 | #[derive(Debug, Clone)] 10 | struct FeeMetrics { 11 | current: IntGauge, 12 | } 13 | 14 | impl Default for FeeMetrics { 15 | fn default() -> Self { 16 | let current = IntGauge::with_opts(Opts::new( 17 | "current_blob_tx_fee", 18 | "The current fee for a transaction with 6 blobs", 19 | )) 20 | .expect("metric config to be correct"); 21 | 22 | Self { current } 23 | } 24 | } 25 | 26 | impl

RegistersMetrics for FeeMetricsTracker

{ 27 | fn metrics(&self) -> Vec> { 28 | vec![Box::new(self.metrics.current.clone())] 29 | } 30 | } 31 | 32 | #[derive(Clone)] 33 | pub struct FeeMetricsTracker

{ 34 | fee_provider: P, 35 | metrics: FeeMetrics, 36 | } 37 | 38 | impl

FeeMetricsTracker

{ 39 | pub fn new(fee_provider: P) -> Self { 40 | Self { 41 | fee_provider, 42 | metrics: FeeMetrics::default(), 43 | } 44 | } 45 | } 46 | 47 | impl FeeMetricsTracker

{ 48 | pub async fn update_metrics(&self) -> Result<()> { 49 | // eigenda has a reservation for fees, not dynamically changing fees 50 | // their fee api doesn't exist *yet* 51 | let fees = self.fee_provider.fees(0..=1).await?; 52 | let last_fee = 53 | i64::try_from(fees.last().fees.base_fee_per_blob_gas).unwrap_or(i64::MAX); 54 | 55 | self.metrics.current.set(last_fee); 56 | 57 | Ok(()) 58 | } 59 | } 60 | 61 | impl

Runner for FeeMetricsTracker

62 | where 63 | P: Api + Send + Sync, 64 | { 65 | async fn run(&mut self) -> Result<()> { 66 | self.update_metrics().await?; 67 | Ok(()) 68 | } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /.sqlx/query-4e6581709112d5d929e525113dc4ed279da2291a4812fa1be1de3e636e6d573d.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "SELECT\n sub.id,\n sub.idx,\n sub.bundle_id,\n sub.data,\n sub.unused_bytes,\n sub.total_bytes,\n sub.start_height\n FROM (\n SELECT DISTINCT ON (f.id)\n f.*,\n b.start_height\n FROM l1_fragments f\n JOIN bundles b ON b.id = f.bundle_id\n WHERE\n b.end_height >= $2\n AND NOT EXISTS (\n SELECT 1\n FROM eigen_submission_fragments tf\n JOIN eigen_submission t ON t.id = tf.submission_id\n WHERE tf.fragment_id = f.id\n AND t.status <> $1\n )\n ORDER BY\n f.id,\n b.start_height ASC,\n f.idx ASC\n ) AS sub\n ORDER BY\n sub.start_height ASC,\n sub.idx ASC\n LIMIT $3;\n", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "id", 9 | "type_info": "Int4" 10 | }, 11 | { 12 | "ordinal": 1, 13 | "name": "idx", 14 | "type_info": "Int4" 15 | }, 16 | { 17 | "ordinal": 2, 18 | "name": "bundle_id", 19 | "type_info": "Int4" 20 | }, 21 | { 22 | "ordinal": 3, 23 | "name": "data", 24 | "type_info": "Bytea" 25 | }, 26 | { 27 | "ordinal": 4, 28 | "name": "unused_bytes", 29 | "type_info": "Int8" 30 | }, 31 | { 32 | "ordinal": 5, 33 | "name": "total_bytes", 34 | "type_info": "Int8" 35 | }, 36 | { 37 | "ordinal": 6, 38 | "name": "start_height", 39 | "type_info": "Int8" 40 | } 41 | ], 42 | "parameters": { 43 | "Left": [ 44 | "Int2", 45 | "Int8", 46 | "Int8" 47 | ] 48 | }, 49 | "nullable": [ 50 | false, 51 | false, 52 | false, 53 | false, 54 | false, 55 | false, 56 | false 57 | ] 58 | }, 59 | "hash": "4e6581709112d5d929e525113dc4ed279da2291a4812fa1be1de3e636e6d573d" 60 | } 61 | -------------------------------------------------------------------------------- /.sqlx/query-ed56ffeb0264867943f7891de21ff99a2bfb27dd1e51d0f877f939e29b7f3a52.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "SELECT\n sub.id,\n sub.idx,\n sub.bundle_id,\n sub.data,\n sub.unused_bytes,\n sub.total_bytes,\n sub.start_height\n FROM (\n SELECT DISTINCT ON (f.id)\n f.*,\n b.start_height\n FROM l1_fragments f\n JOIN bundles b ON b.id = f.bundle_id\n WHERE\n b.end_height >= $2\n AND NOT EXISTS (\n SELECT 1\n FROM l1_transaction_fragments tf\n JOIN l1_blob_transaction t ON t.id = tf.transaction_id\n WHERE tf.fragment_id = f.id\n AND t.state <> $1\n )\n ORDER BY\n f.id,\n b.start_height ASC,\n f.idx ASC\n ) AS sub\n ORDER BY\n sub.start_height ASC,\n sub.idx ASC\n LIMIT $3;\n", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "id", 9 | "type_info": "Int4" 10 | }, 11 | { 12 | "ordinal": 1, 13 | "name": "idx", 14 | "type_info": "Int4" 15 | }, 16 | { 17 | "ordinal": 2, 18 | "name": "bundle_id", 19 | "type_info": "Int4" 20 | }, 21 | { 22 | "ordinal": 3, 23 | "name": "data", 24 | "type_info": "Bytea" 25 | }, 26 | { 27 | "ordinal": 4, 28 | "name": "unused_bytes", 29 | "type_info": "Int8" 30 | }, 31 | { 32 | "ordinal": 5, 33 | "name": "total_bytes", 34 | "type_info": "Int8" 35 | }, 36 | { 37 | "ordinal": 6, 38 | "name": "start_height", 39 | "type_info": "Int8" 40 | } 41 | ], 42 | "parameters": { 43 | "Left": [ 44 | "Int2", 45 | "Int8", 46 | "Int8" 47 | ] 48 | }, 49 | "nullable": [ 50 | false, 51 | false, 52 | false, 53 | false, 54 | false, 55 | false, 56 | false 57 | ] 58 | }, 59 | "hash": "ed56ffeb0264867943f7891de21ff99a2bfb27dd1e51d0f877f939e29b7f3a52" 60 | } 61 | -------------------------------------------------------------------------------- /helm/fuel-block-committer/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "fuel-block-committer.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "fuel-block-committer.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "fuel-block-committer.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Common labels 35 | */}} 36 | {{- define "fuel-block-committer.labels" -}} 37 | helm.sh/chart: {{ include "fuel-block-committer.chart" . }} 38 | {{ include "fuel-block-committer.selectorLabels" . }} 39 | {{- if .Chart.AppVersion }} 40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 41 | {{- end }} 42 | app.kubernetes.io/managed-by: {{ .Release.Service }} 43 | {{- end }} 44 | 45 | {{/* 46 | Selector labels 47 | */}} 48 | {{- define "fuel-block-committer.selectorLabels" -}} 49 | app.kubernetes.io/name: {{ include "fuel-block-committer.name" . }} 50 | app.kubernetes.io/instance: {{ .Release.Name }} 51 | {{- end }} 52 | 53 | {{/* 54 | Create the name of the service account to use 55 | */}} 56 | {{- define "fuel-block-committer.serviceAccountName" -}} 57 | {{- if .Values.serviceAccount.create }} 58 | {{- default (include "fuel-block-committer.fullname" .) .Values.serviceAccount.name }} 59 | {{- else }} 60 | {{- default "default" .Values.serviceAccount.name }} 61 | {{- end }} 62 | {{- end }} 63 | -------------------------------------------------------------------------------- /e2e/benches/src/handlers.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use crate::data::{AppData, ConfigForm}; 3 | use crate::template; 4 | 5 | pub async fn serve_control_panel(data: web::Data) -> HttpResponse { 6 | let cfg = data.simulation_config.lock().await; 7 | let current_block_size = cfg.block_size; 8 | let current_compress = cfg.compressibility.to_string().to_lowercase(); 9 | drop(cfg); 10 | 11 | let html = template::render_control_panel(current_block_size, ¤t_compress); 12 | HttpResponse::Ok().content_type("text/html").body(html) 13 | } 14 | 15 | /// Handles form submission to update the simulation configuration. 16 | /// Returns a 303 See Other redirect back to the control panel. 17 | pub async fn update_config(form: web::Form, data: web::Data) -> HttpResponse { 18 | let compressibility = match form.compressibility.parse::() { 19 | Ok(c) => c, 20 | Err(e) => { 21 | eprintln!("Error parsing compressibility: {}", e); 22 | Compressibility::Medium 23 | } 24 | }; 25 | 26 | { 27 | let mut cfg = data.simulation_config.lock().await; 28 | cfg.block_size = form.block_size; 29 | cfg.compressibility = compressibility; 30 | eprintln!( 31 | "Updated config: block_size={}, compressibility={}", 32 | cfg.block_size, cfg.compressibility 33 | ); 34 | } 35 | HttpResponse::SeeOther() 36 | .append_header(("location", "/")) 37 | .finish() 38 | } 39 | 40 | /// Proxies a GET request for `/proxy/metrics` to the committer metrics URL. Needed for CORS. 41 | pub async fn proxy_metrics(data: web::Data) -> HttpResponse { 42 | let url = data.metrics_url.clone(); 43 | match reqwest::get(&url).await { 44 | Ok(resp) => match resp.text().await { 45 | Ok(body) => HttpResponse::Ok().content_type("text/plain").body(body), 46 | Err(e) => HttpResponse::InternalServerError() 47 | .body(format!("Error reading metrics response: {}", e)), 48 | }, 49 | Err(e) => { 50 | HttpResponse::InternalServerError().body(format!("Error fetching metrics: {}", e)) 51 | } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /packages/services/src/state_listener/port.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | Result, 3 | types::{ 4 | DateTime, DispersalStatus, EigenDARequestId, EigenDASubmission, L1Tx, 5 | TransactionCostUpdate, TransactionState, Utc, 6 | }, 7 | }; 8 | 9 | pub mod l1 { 10 | use crate::{ 11 | Result, 12 | types::{L1Height, TransactionResponse}, 13 | }; 14 | 15 | #[allow(async_fn_in_trait)] 16 | #[trait_variant::make(Send)] 17 | #[cfg_attr(feature = "test-helpers", mockall::automock)] 18 | pub trait Api { 19 | async fn get_block_number(&self) -> Result; 20 | async fn get_transaction_response( 21 | &self, 22 | tx_hash: [u8; 32], 23 | ) -> Result>; 24 | async fn is_squeezed_out(&self, tx_hash: [u8; 32]) -> Result; 25 | } 26 | } 27 | 28 | pub mod eigen_da { 29 | use crate::{Result, types::DispersalStatus}; 30 | 31 | #[allow(async_fn_in_trait)] 32 | #[trait_variant::make(Send)] 33 | #[cfg_attr(feature = "test-helpers", mockall::automock)] 34 | pub trait Api { 35 | async fn get_blob_status(&self, id: Vec) -> Result; 36 | } 37 | } 38 | 39 | #[allow(async_fn_in_trait)] 40 | #[trait_variant::make(Send)] 41 | pub trait Storage: Sync { 42 | async fn get_non_finalized_txs(&self) -> Result>; 43 | async fn update_tx_states_and_costs( 44 | &self, 45 | selective_changes: Vec<([u8; 32], TransactionState)>, 46 | noncewide_changes: Vec<([u8; 32], u32, TransactionState)>, 47 | cost_per_tx: Vec, 48 | ) -> Result<()>; 49 | async fn has_pending_txs(&self) -> Result; 50 | async fn earliest_submission_attempt(&self, nonce: u32) -> Result>>; 51 | 52 | // EigenDA 53 | async fn get_non_finalized_eigen_submission(&self) -> Result>; 54 | async fn earliest_eigen_submission_attempt( 55 | &self, 56 | request_id: &EigenDARequestId, 57 | ) -> Result>>; 58 | async fn update_eigen_submissions(&self, changes: Vec<(u32, DispersalStatus)>) -> Result<()>; 59 | } 60 | 61 | pub trait Clock { 62 | fn now(&self) -> DateTime; 63 | } 64 | -------------------------------------------------------------------------------- /e2e/benches/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | use std::time::Duration; 3 | 4 | use actix_web::{App, HttpResponse, HttpServer, web}; 5 | use anyhow::Result; 6 | use e2e_helpers::{ 7 | fuel_node_simulated::{Compressibility, FuelNode, SimulationConfig}, 8 | whole_stack::{ 9 | create_and_fund_kms_signers, deploy_contract, start_db, start_eigen_committer, start_eth, 10 | start_kms, 11 | }, 12 | }; 13 | use serde::Deserialize; 14 | use tokio::sync::Mutex; 15 | 16 | mod data; 17 | mod handlers; 18 | mod template; 19 | 20 | #[actix_web::main] 21 | async fn main() -> Result<()> { 22 | let simulation_config = Arc::new(Mutex::new(SimulationConfig::new( 23 | 150_000, 24 | Compressibility::Medium, 25 | ))); 26 | 27 | let mut fuel_node = FuelNode::new(4000, simulation_config.clone()); 28 | fuel_node.run().await?; 29 | 30 | let logs = false; 31 | let kms = start_kms(logs).await?; 32 | let eth_node = start_eth(logs).await?; 33 | let eth_signers = create_and_fund_kms_signers(&kms, ð_node).await?; 34 | let eigen_key = std::env::var("EIGEN_KEY").expect("EIGEN_KEY environment variable must be set"); 35 | let max_fee = 1_000_000_000_000; 36 | let (_contract_args, deployed_contract) = 37 | deploy_contract(ð_node, eth_signers.clone(), max_fee, request_timeout).await?; 38 | let db = start_db().await?; 39 | 40 | let logs = true; 41 | let committer = start_eigen_committer( 42 | logs, 43 | db.clone(), 44 | ð_node, 45 | &fuel_node.url(), 46 | &deployed_contract, 47 | eth_signers.main, 48 | eigen_key, 49 | "28 MB", 50 | ) 51 | .await?; 52 | 53 | let app_data = web::Data::new(data::AppData { 54 | simulation_config: simulation_config.clone(), 55 | metrics_url: committer.metrics_url().to_string(), 56 | }); 57 | 58 | println!("Control panel available at http://localhost:3030"); 59 | 60 | HttpServer::new(move || { 61 | App::new() 62 | .app_data(app_data.clone()) 63 | .route("/", web::get().to(handlers::serve_control_panel)) 64 | .route("/update", web::post().to(handlers::update_config)) 65 | .route("/proxy/metrics", web::get().to(handlers::proxy_metrics)) 66 | }) 67 | .bind("0.0.0.0:3030")? 68 | .run() 69 | .await?; 70 | 71 | Ok(()) 72 | } 73 | -------------------------------------------------------------------------------- /packages/encoding/src/bundle/decoder.rs: -------------------------------------------------------------------------------- 1 | use std::{io::Read, marker::PhantomData}; 2 | 3 | use anyhow::Context; 4 | use flate2::read::GzDecoder; 5 | 6 | use crate::bundle::BundleV1; 7 | 8 | #[derive(Clone, Debug, Default)] 9 | pub struct Decoder { 10 | private: PhantomData<()>, 11 | } 12 | 13 | impl Decoder { 14 | pub fn decode(&self, data: &[u8]) -> anyhow::Result { 15 | if data.len() < 2 { 16 | anyhow::bail!("Bundle data too short to contain version"); 17 | } 18 | let version = u16::from_be_bytes([data[0], data[1]]); 19 | if version != 1 { 20 | anyhow::bail!("Unsupported bundle version: {version}"); 21 | } 22 | 23 | let data = Self::decompress(&data[2..]) 24 | .with_context(|| "failed to decompress BundleV1 contents")?; 25 | 26 | let blocks: BundleV1 = postcard::from_bytes(&data) 27 | .with_context(|| "failed to postcard decode decompressed contents of BundleV1")?; 28 | 29 | Ok(super::Bundle::V1(blocks)) 30 | } 31 | 32 | fn decompress(data: &[u8]) -> anyhow::Result> { 33 | let mut decoder = GzDecoder::new(data); 34 | 35 | let mut buf = vec![]; 36 | decoder.read_to_end(&mut buf)?; 37 | 38 | Ok(buf) 39 | } 40 | } 41 | 42 | #[cfg(test)] 43 | mod tests { 44 | use crate::bundle::{Bundle, BundleEncoder, BundleV1, Encoder}; 45 | 46 | #[test] 47 | fn complains_about_unsupported_version() { 48 | // given 49 | let encoder = Encoder::default(); 50 | let mut encoded_bundle = encoder 51 | .encode(Bundle::V1(BundleV1 { blocks: vec![] })) 52 | .unwrap(); 53 | encoded_bundle[..2].copy_from_slice(&5u16.to_be_bytes()); 54 | let decoder = super::Decoder::default(); 55 | 56 | // when 57 | let err = decoder.decode(&encoded_bundle).unwrap_err(); 58 | 59 | // then 60 | let expected = "Unsupported bundle version: 5"; 61 | assert_eq!(err.to_string(), expected); 62 | } 63 | 64 | #[test] 65 | fn complains_about_not_enough_data() { 66 | // given 67 | let decoder = super::Decoder::default(); 68 | 69 | // when 70 | let err = decoder.decode(&[1]).unwrap_err(); 71 | 72 | // then 73 | assert_eq!(err.to_string(), "Bundle data too short to contain version"); 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /packages/adapters/eth/src/aws.rs: -------------------------------------------------------------------------------- 1 | use alloy::signers::aws::AwsSigner; 2 | use aws_config::{Region, SdkConfig, default_provider::credentials::DefaultCredentialsChain}; 3 | #[cfg(feature = "test-helpers")] 4 | use aws_sdk_kms::config::Credentials; 5 | use aws_sdk_kms::{Client, config::BehaviorVersion}; 6 | use services::{Error, Result}; 7 | 8 | #[derive(Debug, Clone)] 9 | pub struct AwsConfig { 10 | sdk_config: SdkConfig, 11 | } 12 | 13 | impl AwsConfig { 14 | pub async fn from_env() -> Self { 15 | let loader = aws_config::defaults(BehaviorVersion::latest()) 16 | .credentials_provider(DefaultCredentialsChain::builder().build().await); 17 | 18 | let loader = match std::env::var("E2E_TEST_AWS_ENDPOINT") { 19 | Ok(url) => loader.endpoint_url(url), 20 | _ => loader, 21 | }; 22 | 23 | Self { 24 | sdk_config: loader.load().await, 25 | } 26 | } 27 | 28 | #[cfg(feature = "test-helpers")] 29 | pub async fn for_testing(url: String) -> Self { 30 | let sdk_config = aws_config::defaults(BehaviorVersion::latest()) 31 | .credentials_provider(Credentials::new( 32 | "test", 33 | "test", 34 | None, 35 | None, 36 | "Static Credentials", 37 | )) 38 | .endpoint_url(url) 39 | .region(Region::new("us-east-1")) // placeholder region for test 40 | .load() 41 | .await; 42 | 43 | Self { sdk_config } 44 | } 45 | 46 | pub fn url(&self) -> Option<&str> { 47 | self.sdk_config.endpoint_url() 48 | } 49 | 50 | pub fn region(&self) -> Option<&Region> { 51 | self.sdk_config.region() 52 | } 53 | } 54 | 55 | #[derive(Clone)] 56 | pub struct AwsClient { 57 | client: Client, 58 | } 59 | 60 | impl AwsClient { 61 | pub fn new(config: AwsConfig) -> Self { 62 | let config = config.sdk_config; 63 | let client = Client::new(&config); 64 | 65 | Self { client } 66 | } 67 | 68 | pub fn inner(&self) -> &Client { 69 | &self.client 70 | } 71 | 72 | pub async fn make_signer(&self, key_arn: String) -> Result { 73 | AwsSigner::new(self.client.clone(), key_arn, None) 74 | .await 75 | .map_err(|err| Error::Other(format!("Error making aws signer: {err:?}"))) 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /packages/benchmarks/README.md: -------------------------------------------------------------------------------- 1 | # Eigenbundler Benchmarks 2 | 3 | This package contains a benchmark for measuring the performance of the Eigenbundler component for a specific use case. 4 | 5 | ## Benchmark Case 6 | 7 | The benchmark tests the following specific scenario: 8 | 9 | - Total data size: 28 MB 10 | - Block size: 4 MB (7 blocks in total) 11 | - Target compression ratio: 2.2x (highly compressible data) 12 | - Fragment size: 3.5 MB 13 | - Maximum fragments per bundle: 12 14 | - Compression level: Level6 (default) 15 | 16 | This specific test case simulates a real-world scenario where we need to bundle a significant amount of data with good compression characteristics. 17 | 18 | ## Running the Benchmark 19 | 20 | To run the benchmark: 21 | 22 | ```bash 23 | cargo bench -p benchmarks 24 | ``` 25 | 26 | ## Interpreting Results 27 | 28 | The benchmark results show: 29 | 1. Execution time for the bundling operation 30 | 2. **Throughput in blocks per second** - This is the critical metric, as it tells you how many blocks can be processed per second 31 | 3. The actual compression ratio achieved (printed to console) 32 | 33 | Since new blocks arrive at a rate of 1 per second in the production environment, the benchmark will help you determine if the bundling process can keep up with this rate. The bundle operation should show a throughput of more than 1 block per second to ensure it can handle the incoming data without falling behind. 34 | 35 | The results will help you determine: 36 | 1. Whether the bundling process can keep up with the incoming block rate (1 block/second) 37 | 2. The actual performance in terms of blocks processed per second with Level6 compression 38 | 3. Whether the target compression ratio of 2.2x is actually achieved 39 | 40 | The benchmark report is generated in HTML format and can be found in `target/criterion/eigenbundler_specific_case/`. 41 | 42 | ## Example Output 43 | 44 | Here's an example of what the benchmark output might look like: 45 | 46 | ``` 47 | Compression ratio: 2.24x, Processing rate: 126.83 blocks/sec 48 | 49 | eigenbundler_specific_case/28MB_data_7blocks/Level6 50 | time: [54.394 ms 56.490 ms 58.599 ms] 51 | thrpt: [119.45 elem/s 123.92 elem/s 128.69 elem/s] 52 | ``` 53 | 54 | This shows that with compression level 6, the bundler can process around 120-129 blocks per second, which is well above the requirement of 1 block per second, and achieves a compression ratio of 2.24x, which is very close to the target of 2.2x. -------------------------------------------------------------------------------- /helm/fuel-block-committer/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ include "fuel-block-committer.fullname" . }} 5 | labels: 6 | {{- include "fuel-block-committer.labels" . | nindent 4 }} 7 | spec: 8 | strategy: 9 | type: {{ .Values.updateStrategyType }} 10 | {{- if not .Values.autoscaling.enabled }} 11 | replicas: {{ .Values.replicaCount }} 12 | {{- end }} 13 | selector: 14 | matchLabels: 15 | {{- include "fuel-block-committer.selectorLabels" . | nindent 6 }} 16 | template: 17 | metadata: 18 | {{- with .Values.podAnnotations }} 19 | annotations: 20 | {{- toYaml . | nindent 8 }} 21 | {{- end }} 22 | labels: 23 | {{- include "fuel-block-committer.selectorLabels" . | nindent 8 }} 24 | spec: 25 | {{- with .Values.imagePullSecrets }} 26 | imagePullSecrets: 27 | {{- toYaml . | nindent 8 }} 28 | {{- end }} 29 | serviceAccountName: {{ include "fuel-block-committer.serviceAccountName" . }} 30 | securityContext: 31 | {{- toYaml .Values.podSecurityContext | nindent 8 }} 32 | containers: 33 | - name: {{ .Chart.Name }} 34 | securityContext: 35 | {{- toYaml .Values.securityContext | nindent 12 }} 36 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" 37 | imagePullPolicy: {{ .Values.image.pullPolicy }} 38 | command: [ "./fuel-block-committer" ] 39 | envFrom: 40 | - configMapRef: 41 | name: fuel-block-committer 42 | - secretRef: 43 | name: fuel-block-committer 44 | ports: 45 | - name: http 46 | containerPort: {{ .Values.service.port }} 47 | protocol: TCP 48 | livenessProbe: 49 | httpGet: 50 | path: /health 51 | port: http 52 | initialDelaySeconds: 10 53 | periodSeconds: 5 54 | timeoutSeconds: 10 55 | resources: 56 | {{- toYaml .Values.resources | nindent 12 }} 57 | {{- with .Values.nodeSelector }} 58 | nodeSelector: 59 | {{- toYaml . | nindent 8 }} 60 | {{- end }} 61 | {{- with .Values.affinity }} 62 | affinity: 63 | {{- toYaml . | nindent 8 }} 64 | {{- end }} 65 | {{- with .Values.tolerations }} 66 | tolerations: 67 | {{- toYaml . | nindent 8 }} 68 | {{- end }} 69 | -------------------------------------------------------------------------------- /.sqlx/query-f928d9523d9539c64ae054750ed6ed4e12b72585c80dd938f23ef1907ced3aec.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_name": "PostgreSQL", 3 | "query": "\n SELECT\n (SELECT COUNT(*) FROM l1_blob_transaction) AS size_blob_transactions,\n (SELECT COUNT(*) FROM l1_transaction_fragments) AS size_transaction_fragments,\n (SELECT COUNT(*) FROM l1_fragments) AS size_fragments,\n (SELECT COUNT(*) FROM bundles) AS size_bundles,\n (SELECT COUNT(*) FROM bundle_cost) AS size_bundle_costs,\n (SELECT COUNT(*) FROM fuel_blocks) AS size_fuel_blocks,\n (SELECT COUNT(*) FROM l1_transaction) AS size_contract_transactions,\n (SELECT COUNT(*) FROM l1_fuel_block_submission) AS size_contract_submissions,\n (SELECT COUNT(*) FROM eigen_submission) AS size_eigen_submissions,\n (SELECT COUNT(*) FROM eigen_submission_fragments) AS size_eigen_submission_fragments\n ", 4 | "describe": { 5 | "columns": [ 6 | { 7 | "ordinal": 0, 8 | "name": "size_blob_transactions", 9 | "type_info": "Int8" 10 | }, 11 | { 12 | "ordinal": 1, 13 | "name": "size_transaction_fragments", 14 | "type_info": "Int8" 15 | }, 16 | { 17 | "ordinal": 2, 18 | "name": "size_fragments", 19 | "type_info": "Int8" 20 | }, 21 | { 22 | "ordinal": 3, 23 | "name": "size_bundles", 24 | "type_info": "Int8" 25 | }, 26 | { 27 | "ordinal": 4, 28 | "name": "size_bundle_costs", 29 | "type_info": "Int8" 30 | }, 31 | { 32 | "ordinal": 5, 33 | "name": "size_fuel_blocks", 34 | "type_info": "Int8" 35 | }, 36 | { 37 | "ordinal": 6, 38 | "name": "size_contract_transactions", 39 | "type_info": "Int8" 40 | }, 41 | { 42 | "ordinal": 7, 43 | "name": "size_contract_submissions", 44 | "type_info": "Int8" 45 | }, 46 | { 47 | "ordinal": 8, 48 | "name": "size_eigen_submissions", 49 | "type_info": "Int8" 50 | }, 51 | { 52 | "ordinal": 9, 53 | "name": "size_eigen_submission_fragments", 54 | "type_info": "Int8" 55 | } 56 | ], 57 | "parameters": { 58 | "Left": [] 59 | }, 60 | "nullable": [ 61 | null, 62 | null, 63 | null, 64 | null, 65 | null, 66 | null, 67 | null, 68 | null, 69 | null, 70 | null 71 | ] 72 | }, 73 | "hash": "f928d9523d9539c64ae054750ed6ed4e12b72585c80dd938f23ef1907ced3aec" 74 | } 75 | -------------------------------------------------------------------------------- /e2e/helpers/src/fuel_node_simulated/server.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use actix_web::{App, HttpServer, guard, web}; 4 | use async_graphql::Schema; 5 | use async_graphql_actix_web::{GraphQLRequest, GraphQLResponse}; 6 | use tokio::sync::Mutex; 7 | use url::Url; 8 | 9 | use super::{ 10 | graphql::{QueryRoot, build_schema}, 11 | simulation::{AppState, SimulationConfig, produce_blocks}, 12 | }; 13 | 14 | pub struct FuelNode { 15 | shutdown_handle: Option, 16 | port: u16, 17 | config: Arc>, 18 | } 19 | 20 | impl FuelNode { 21 | pub fn new(port: u16, config: Arc>) -> Self { 22 | Self { 23 | shutdown_handle: None, 24 | port, 25 | config, 26 | } 27 | } 28 | 29 | /// Returns the GraphQL endpoint URL. 30 | pub fn url(&self) -> Url { 31 | Url::parse(&format!("http://localhost:{}/v1/graphql", self.port)).unwrap() 32 | } 33 | 34 | /// Runs the Actix‑Web server and concurrently spawns the block production loop. 35 | pub async fn run(&mut self) -> std::io::Result<()> { 36 | let initial_block_size = { self.config.lock().await.block_size }; 37 | let state = Arc::new(AppState::new(initial_block_size)); 38 | let state_clone = state.clone(); 39 | let config_clone = self.config.clone(); 40 | 41 | // Spawn block production. 42 | tokio::spawn(async move { 43 | produce_blocks(state_clone, config_clone).await; 44 | }); 45 | 46 | // Build the GraphQL schema with the application state. 47 | let schema = build_schema().data(state).finish(); 48 | 49 | let port = self.port; 50 | let server = HttpServer::new(move || { 51 | App::new().app_data(web::Data::new(schema.clone())).service( 52 | web::resource("/v1/graphql") 53 | .guard(guard::Post()) 54 | .to(graphql_handler), 55 | ) 56 | }) 57 | .bind(("0.0.0.0", port))? 58 | .run(); 59 | 60 | self.shutdown_handle = Some(server.handle()); 61 | tokio::spawn(server); 62 | 63 | Ok(()) 64 | } 65 | 66 | pub async fn stop(&mut self) { 67 | if let Some(handle) = self.shutdown_handle.take() { 68 | handle.stop(true).await; 69 | } 70 | } 71 | } 72 | 73 | async fn graphql_handler( 74 | schema: web::Data< 75 | Schema, 76 | >, 77 | req: GraphQLRequest, 78 | ) -> GraphQLResponse { 79 | schema.execute(req.into_inner()).await.into() 80 | } 81 | -------------------------------------------------------------------------------- /e2e/helpers/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "e2e-helpers" 3 | version.workspace = true 4 | authors.workspace = true 5 | edition.workspace = true 6 | homepage.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | rust-version.workspace = true 10 | publish.workspace = true 11 | 12 | build = "build.rs" 13 | 14 | [build-dependencies] 15 | anyhow = { workspace = true, features = ["std"] } 16 | itertools = { workspace = true, features = ["use_alloc"] } 17 | reqwest = { workspace = true, features = ["default-tls"] } 18 | tokio = { workspace = true, features = [ 19 | "macros", 20 | "rt-multi-thread", 21 | "process", 22 | "fs", 23 | "io-util", 24 | ] } 25 | walkdir = { workspace = true } 26 | zip = { workspace = true, features = ["deflate"] } 27 | 28 | [dependencies] 29 | alloy = { workspace = true, features = [ 30 | "signer-aws", 31 | "signer-mnemonic", 32 | "serde", 33 | ] } 34 | signers = { workspace = true, features = ["test-helpers"] } 35 | anyhow = { workspace = true, features = ["std"] } 36 | aws-config = { workspace = true, features = ["rustls"] } 37 | aws-sdk-kms = { workspace = true, features = ["rustls"] } 38 | eth = { workspace = true, features = ["test-helpers"] } 39 | fs_extra = { workspace = true } 40 | fuel = { workspace = true, features = ["test-helpers"] } 41 | fuel-core-chain-config = { workspace = true, features = [ 42 | "std", 43 | "test-helpers", 44 | ] } 45 | fuel-core-types = { workspace = true } 46 | futures = { workspace = true } 47 | hex = { workspace = true } 48 | humantime = { workspace = true } 49 | itertools = { workspace = true, features = ["use_alloc"] } 50 | portpicker = { workspace = true } 51 | services = { workspace = true } 52 | rand = { workspace = true, features = ["std"] } 53 | reqwest = { workspace = true } 54 | secp256k1 = { workspace = true, features = ["rand-std"] } 55 | serde = { workspace = true } 56 | serde_json = { workspace = true } 57 | storage = { workspace = true, features = ["test-helpers"] } 58 | tempfile = { workspace = true } 59 | testcontainers = { workspace = true, features = ["watchdog"] } 60 | tokio = { workspace = true, features = [ 61 | "macros", 62 | "rt-multi-thread", 63 | "process", 64 | "fs", 65 | ] } 66 | url = { workspace = true } 67 | async-graphql = { workspace = true, optional = true } 68 | async-graphql-actix-web = { workspace = true, optional = true } 69 | actix-web = { workspace = true, optional = true } 70 | base64 = { workspace = true } 71 | 72 | [dev-dependencies] 73 | e2e-helpers = { workspace = true, features = ["fuel-core-simulated"] } 74 | 75 | [features] 76 | fuel-core-simulated = [ 77 | "dep:async-graphql", 78 | "dep:async-graphql-actix-web", 79 | "dep:actix-web", 80 | "rand/small_rng", 81 | ] 82 | -------------------------------------------------------------------------------- /packages/adapters/storage/src/mappings/eigen_tables.rs: -------------------------------------------------------------------------------- 1 | use services::types::{DateTime, DispersalStatus, Utc}; 2 | 3 | #[derive(sqlx::FromRow)] 4 | pub struct EigenDASubmission { 5 | pub id: i32, 6 | pub request_id: Vec, 7 | pub created_at: Option>, 8 | pub status: i16, 9 | } 10 | 11 | macro_rules! bail { 12 | ($msg: literal, $($args: expr),*) => { 13 | return Err($crate::error::Error::Conversion(format!($msg, $($args),*))) 14 | }; 15 | } 16 | 17 | impl EigenDASubmission { 18 | pub fn parse_status(&self) -> Result { 19 | match self.status { 20 | 0 => Ok(DispersalStatus::Processing), 21 | 1 => Ok(DispersalStatus::Finalized), 22 | 2 => Ok(DispersalStatus::Failed), 23 | 3 => Ok(DispersalStatus::Confirmed), 24 | _ => { 25 | bail!( 26 | "EigenDASubmission(id={}) has invalid status {}", 27 | self.id, 28 | self.status 29 | ) 30 | } 31 | } 32 | } 33 | } 34 | 35 | impl TryFrom for services::types::EigenDASubmission { 36 | type Error = crate::error::Error; 37 | 38 | fn try_from(value: EigenDASubmission) -> Result { 39 | let status = value.parse_status()?; 40 | 41 | let id = value.id.try_into().map_err(|_| { 42 | Self::Error::Conversion(format!( 43 | "Could not convert `id` to u64. Got: {} from db", 44 | value.id 45 | )) 46 | })?; 47 | 48 | Ok(Self { 49 | id: Some(id), 50 | request_id: value.request_id, 51 | status, 52 | created_at: value.created_at, 53 | }) 54 | } 55 | } 56 | 57 | pub enum SubmissionStatus { 58 | Processing, 59 | Confirmed, 60 | Finalized, 61 | Failed, 62 | } 63 | 64 | impl From for SubmissionStatus { 65 | fn from(status: DispersalStatus) -> Self { 66 | match status { 67 | DispersalStatus::Processing => SubmissionStatus::Processing, 68 | DispersalStatus::Confirmed => SubmissionStatus::Confirmed, 69 | DispersalStatus::Finalized => SubmissionStatus::Finalized, 70 | DispersalStatus::Failed | DispersalStatus::Other(_) => SubmissionStatus::Failed, 71 | } 72 | } 73 | } 74 | 75 | impl From for i16 { 76 | fn from(status: SubmissionStatus) -> Self { 77 | match status { 78 | SubmissionStatus::Processing => 0, 79 | SubmissionStatus::Finalized => 1, 80 | SubmissionStatus::Failed => 2, 81 | SubmissionStatus::Confirmed => 3, 82 | } 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /committer/src/errors.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Display; 2 | 3 | use actix_web::ResponseError; 4 | use tokio::task::JoinError; 5 | 6 | #[derive(thiserror::Error, Debug)] 7 | pub enum Error { 8 | #[error("{0}")] 9 | Other(String), 10 | #[error("Network error: {0}")] 11 | Network(String), 12 | #[error("Storage error: {0}")] 13 | Storage(String), 14 | #[error("Block validation error: {0}")] 15 | BlockValidation(String), 16 | #[error("Bundler error: {0}")] 17 | Bundler(String), 18 | } 19 | 20 | pub trait WithContext { 21 | fn with_context(self, context: F) -> Result 22 | where 23 | C: Display + Send + Sync + 'static, 24 | F: FnOnce() -> C; 25 | } 26 | 27 | impl From for Error { 28 | fn from(error: serde_json::Error) -> Self { 29 | Self::Other(error.to_string()) 30 | } 31 | } 32 | 33 | impl From for Error { 34 | fn from(error: std::io::Error) -> Self { 35 | Self::Other(error.to_string()) 36 | } 37 | } 38 | 39 | impl From for Error { 40 | fn from(error: JoinError) -> Self { 41 | Self::Other(error.to_string()) 42 | } 43 | } 44 | 45 | impl From for Error { 46 | fn from(error: services::Error) -> Self { 47 | match error { 48 | services::Error::Network(e) => Self::Network(e), 49 | services::Error::Storage(e) => Self::Storage(e), 50 | services::Error::BlockValidation(e) => Self::BlockValidation(e), 51 | services::Error::Bundler(e) => Self::Bundler(e), 52 | services::Error::Other(e) => Self::Other(e.to_string()), 53 | } 54 | } 55 | } 56 | 57 | impl From for Error { 58 | fn from(error: config::ConfigError) -> Self { 59 | Self::Other(error.to_string()) 60 | } 61 | } 62 | 63 | impl ResponseError for Error {} 64 | 65 | pub type Result = std::result::Result; 66 | 67 | impl WithContext for Result { 68 | fn with_context(self, context: F) -> Result 69 | where 70 | C: Display + Send + Sync + 'static, 71 | F: FnOnce() -> C, 72 | { 73 | if let Err(err) = self { 74 | let new_err = match err { 75 | Error::Other(e) => Error::Other(format!("{}: {}", context(), e)), 76 | Error::Network(e) => Error::Network(format!("{}: {}", context(), e)), 77 | Error::Storage(e) => Error::Storage(format!("{}: {}", context(), e)), 78 | Error::BlockValidation(e) => { 79 | Error::BlockValidation(format!("{}: {}", context(), e)) 80 | } 81 | Error::Bundler(e) => Error::Bundler(format!("{}: {}", context(), e)), 82 | }; 83 | Err(new_err) 84 | } else { 85 | self 86 | } 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /packages/adapters/eth/src/http.rs: -------------------------------------------------------------------------------- 1 | use std::ops::RangeInclusive; 2 | 3 | use alloy::providers::{ 4 | Provider as AlloyProvider, ProviderBuilder, RootProvider, 5 | fillers::{BlobGasFiller, ChainIdFiller, FillProvider, GasFiller, JoinFill, NonceFiller}, 6 | }; 7 | use services::{ 8 | fees::SequentialBlockFees, 9 | types::{DateTime, Utc}, 10 | }; 11 | use tracing::info; 12 | 13 | use crate::fee_api_helpers::batch_requests; 14 | 15 | type InnerProvider = FillProvider< 16 | JoinFill< 17 | alloy::providers::Identity, 18 | JoinFill>>, 19 | >, 20 | RootProvider, 21 | >; 22 | 23 | #[derive(Debug, Clone)] 24 | pub struct Provider { 25 | pub(crate) provider: InnerProvider, 26 | } 27 | 28 | impl Provider { 29 | pub fn new(url: &str) -> crate::Result { 30 | let url = url 31 | .parse() 32 | .map_err(|e| crate::error::Error::Other(format!("invalid url: {url}: {e}")))?; 33 | let provider = ProviderBuilder::new().connect_http(url); 34 | 35 | Ok(Self { provider }) 36 | } 37 | } 38 | 39 | impl Provider { 40 | pub async fn get_block_time(&self, block_num: u64) -> crate::Result>> { 41 | let block = self 42 | .provider 43 | .get_block_by_number(alloy::eips::BlockNumberOrTag::Number(block_num)) 44 | .await 45 | .map_err(|e| { 46 | crate::error::Error::Other(format!("failed to get block by number: {e}")) 47 | })?; 48 | 49 | let time = block.and_then(|block| { 50 | let timestamp = block.header.timestamp; 51 | DateTime::::from_timestamp(timestamp as i64, 0) 52 | }); 53 | 54 | Ok(time) 55 | } 56 | } 57 | impl services::fees::Api for Provider { 58 | async fn fees(&self, height_range: RangeInclusive) -> crate::Result { 59 | info!("Fetching fees for range: {:?}", height_range); 60 | batch_requests(height_range, |sub_range, percentiles| async move { 61 | let last_block = *sub_range.end(); 62 | let block_count = sub_range.count() as u64; 63 | let fees = self 64 | .provider 65 | .get_fee_history( 66 | block_count, 67 | alloy::eips::BlockNumberOrTag::Number(last_block), 68 | percentiles, 69 | ) 70 | .await 71 | .map_err(|e| services::Error::Network(format!("failed to get fee history: {e}")))?; 72 | 73 | Ok(fees) 74 | }) 75 | .await 76 | } 77 | async fn current_height(&self) -> crate::Result { 78 | self.provider 79 | .get_block_number() 80 | .await 81 | .map_err(|e| services::Error::Network(format!("failed to get block number: {e}"))) 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /packages/adapters/clock/src/lib.rs: -------------------------------------------------------------------------------- 1 | use services::types::{DateTime, Utc}; 2 | 3 | pub struct SystemClock; 4 | 5 | macro_rules! impl_port { 6 | ($($service:ident),+) => { 7 | $( 8 | impl services::$service::port::Clock for SystemClock { 9 | fn now(&self) -> DateTime { 10 | Utc::now() 11 | } 12 | } 13 | 14 | #[cfg(feature = "test-helpers")] 15 | impl services::$service::port::Clock for TestClock { 16 | fn now(&self) -> DateTime { 17 | self.now() 18 | } 19 | } 20 | )* 21 | }; 22 | } 23 | 24 | impl_port!( 25 | state_pruner, 26 | state_listener, 27 | state_committer, 28 | block_bundler, 29 | block_committer 30 | ); 31 | 32 | #[cfg(feature = "test-helpers")] 33 | mod test_helpers { 34 | use std::{ 35 | sync::{Arc, atomic::AtomicI64}, 36 | time::Duration, 37 | }; 38 | 39 | use services::types::{DateTime, Utc}; 40 | 41 | #[derive(Default, Clone)] 42 | pub struct TestClock { 43 | epoch_millis: Arc, 44 | } 45 | 46 | impl TestClock { 47 | pub fn new(time: DateTime) -> Self { 48 | Self { 49 | epoch_millis: Arc::new(AtomicI64::new(time.timestamp_millis())), 50 | } 51 | } 52 | 53 | pub fn now(&self) -> DateTime { 54 | DateTime::::from_timestamp_millis( 55 | self.epoch_millis.load(std::sync::atomic::Ordering::Relaxed), 56 | ) 57 | .expect("DateTime to be in range") 58 | } 59 | 60 | pub fn advance_time(&self, adv: Duration) { 61 | let new_time = self.now() + adv; 62 | self.epoch_millis.store( 63 | new_time.timestamp_millis(), 64 | std::sync::atomic::Ordering::Relaxed, 65 | ) 66 | } 67 | pub fn set_time(&self, new_time: DateTime) { 68 | self.epoch_millis.store( 69 | new_time.timestamp_millis(), 70 | std::sync::atomic::Ordering::Relaxed, 71 | ) 72 | } 73 | } 74 | } 75 | 76 | #[cfg(feature = "test-helpers")] 77 | pub use test_helpers::TestClock; 78 | 79 | #[cfg(test)] 80 | mod tests { 81 | use std::time::Duration; 82 | 83 | use crate::TestClock; 84 | 85 | #[tokio::test] 86 | async fn can_advance_clock() { 87 | // given 88 | let test_clock = TestClock::default(); 89 | let starting_time = test_clock.now(); 90 | let adv = Duration::from_secs(1); 91 | 92 | // when 93 | test_clock.advance_time(adv); 94 | 95 | // then 96 | let new_time = starting_time + adv; 97 | assert_eq!(test_clock.now(), new_time); 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /packages/services/src/block_bundler/common.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt::Display, num::NonZeroUsize, ops::RangeInclusive}; 2 | 3 | use crate::{ 4 | Result, 5 | types::{Fragment, NonEmpty, NonNegative, storage::SequentialFuelBlocks}, 6 | }; 7 | use bytesize::ByteSize; 8 | 9 | #[derive(Debug, Clone, PartialEq, Eq)] 10 | pub struct Metadata { 11 | pub block_heights: RangeInclusive, 12 | pub known_to_be_optimal: bool, 13 | pub block_num_upper_limit: NonZeroUsize, 14 | pub optimization_attempts: usize, 15 | pub gas_usage: u64, 16 | pub compressed_data_size: NonZeroUsize, 17 | pub uncompressed_data_size: NonZeroUsize, 18 | pub num_fragments: NonZeroUsize, 19 | } 20 | 21 | impl Metadata { 22 | pub fn num_blocks(&self) -> usize { 23 | self.block_heights.clone().count() 24 | } 25 | 26 | // This is for metrics anyway, precision loss is ok 27 | #[allow(clippy::cast_precision_loss)] 28 | pub fn compression_ratio(&self) -> f64 { 29 | self.uncompressed_data_size.get() as f64 / self.compressed_data_size.get() as f64 30 | } 31 | } 32 | 33 | impl Display for Metadata { 34 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 35 | f.debug_struct("Metadata") 36 | .field("num_blocks", &self.num_blocks()) 37 | .field("block_heights", &self.block_heights) 38 | .field("known_to_be_optimal", &self.known_to_be_optimal) 39 | .field("optimization_attempts", &self.optimization_attempts) 40 | .field("block_num_upper_limit", &self.block_num_upper_limit) 41 | .field("gas_usage", &self.gas_usage) 42 | .field( 43 | "compressed_data_size", 44 | &ByteSize(self.compressed_data_size.get() as u64), 45 | ) 46 | .field( 47 | "uncompressed_data_size", 48 | &ByteSize(self.uncompressed_data_size.get() as u64), 49 | ) 50 | .field("compression_ratio", &self.compression_ratio()) 51 | .field("num_fragments", &self.num_fragments.get()) 52 | .finish() 53 | } 54 | } 55 | 56 | #[derive(Debug, Clone, PartialEq, Eq)] 57 | pub struct BundleProposal { 58 | pub fragments: NonEmpty, 59 | pub metadata: Metadata, 60 | } 61 | 62 | #[trait_variant::make(Send)] 63 | #[cfg_attr(feature = "test-helpers", mockall::automock)] 64 | pub trait Bundle { 65 | /// Attempts to advance the bundler by trying out a new bundle configuration. 66 | /// 67 | /// Returns `true` if there are more configurations to process, or `false` otherwise. 68 | async fn advance(&mut self, num_concurrent: NonZeroUsize) -> Result; 69 | 70 | /// Finalizes the bundling process by selecting the best bundle based on current gas prices. 71 | /// 72 | /// Consumes the bundler. 73 | async fn finish(self) -> Result; 74 | } 75 | 76 | #[trait_variant::make(Send)] 77 | pub trait BundlerFactory { 78 | type Bundler: Bundle + Send + Sync; 79 | async fn build(&self, blocks: SequentialFuelBlocks, id: NonNegative) -> Self::Bundler; 80 | } 81 | -------------------------------------------------------------------------------- /packages/services/src/types/serial_id.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Display; 2 | 3 | #[derive(Debug, Clone)] 4 | pub struct InvalidConversion { 5 | pub message: String, 6 | } 7 | 8 | impl std::fmt::Display for InvalidConversion { 9 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 10 | write!(f, "Invalid numeric id: {}", self.message) 11 | } 12 | } 13 | 14 | impl std::error::Error for InvalidConversion {} 15 | 16 | #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] 17 | pub struct NonNegative { 18 | val: NUM, 19 | } 20 | 21 | impl NonNegative { 22 | pub fn get(&self) -> NUM { 23 | self.val 24 | } 25 | } 26 | 27 | impl Display for NonNegative { 28 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 29 | self.val.fmt(f) 30 | } 31 | } 32 | 33 | impl Default for NonNegative { 34 | fn default() -> Self { 35 | Self { 36 | val: Default::default(), 37 | } 38 | } 39 | } 40 | 41 | impl NonNegative { 42 | pub fn as_u32(&self) -> u32 { 43 | self.val as u32 44 | } 45 | 46 | pub fn as_i32(&self) -> i32 { 47 | self.val 48 | } 49 | } 50 | 51 | impl From for NonNegative { 52 | fn from(value: u16) -> Self { 53 | Self { 54 | val: i32::from(value), 55 | } 56 | } 57 | } 58 | 59 | impl NonNegative { 60 | pub fn as_u64(&self) -> u64 { 61 | self.val as u64 62 | } 63 | 64 | pub fn as_i64(&self) -> i64 { 65 | self.val 66 | } 67 | } 68 | 69 | impl From for NonNegative { 70 | fn from(value: u32) -> Self { 71 | Self { 72 | val: i64::from(value), 73 | } 74 | } 75 | } 76 | 77 | impl TryFrom for NonNegative { 78 | type Error = InvalidConversion; 79 | 80 | fn try_from(value: i32) -> Result { 81 | if value < 0 { 82 | return Err(InvalidConversion { 83 | message: format!("{value} is negative"), 84 | }); 85 | } 86 | Ok(Self { val: value }) 87 | } 88 | } 89 | 90 | impl TryFrom for NonNegative { 91 | type Error = InvalidConversion; 92 | fn try_from(id: i64) -> Result { 93 | if id < 0 { 94 | return Err(InvalidConversion { 95 | message: format!("{id} is negative"), 96 | }); 97 | } 98 | Ok(Self { val: id }) 99 | } 100 | } 101 | 102 | impl TryFrom for NonNegative { 103 | type Error = InvalidConversion; 104 | fn try_from(id: u32) -> Result { 105 | if id > i32::MAX as u32 { 106 | return Err(InvalidConversion { 107 | message: format!("{id} is too large for i32"), 108 | }); 109 | } 110 | Ok(Self { val: id as i32 }) 111 | } 112 | } 113 | 114 | #[cfg(feature = "test-helpers")] 115 | impl rand::distributions::Distribution> for rand::distributions::Standard { 116 | fn sample(&self, rng: &mut R) -> NonNegative { 117 | NonNegative::try_from(rng.gen_range(0..=i32::MAX)).expect("to generate a non-negative i32") 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /e2e/helpers/src/bin/start_committer.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use std::time::Duration; 3 | use tokio::time::sleep; 4 | use url::Url; 5 | 6 | use e2e_helpers::{ 7 | committer::Committer, 8 | eth_node::{EthNode, EthNodeProcess}, 9 | kms::{Kms, KmsProcess}, 10 | whole_stack::{create_and_fund_kms_signers, deploy_contract}, 11 | }; 12 | 13 | #[tokio::main] 14 | async fn main() -> Result<()> { 15 | let kms = start_kms(false).await?; 16 | 17 | let eth_node = start_eth(false).await?; 18 | let eth_signers = create_and_fund_kms_signers(&kms, ð_node).await?; 19 | let eigen_key = std::env::var("EIGEN_KEY").expect("EIGEN_KEY environment variable must be set"); 20 | 21 | let request_timeout = Duration::from_secs(5); 22 | let max_fee = 1_000_000_000_000; 23 | 24 | let (_contract_args, deployed_contract) = 25 | deploy_contract(ð_node, eth_signers.clone(), max_fee, request_timeout).await?; 26 | 27 | let db = start_db().await?; 28 | 29 | let fuel_node_url = Url::parse("http://localhost:4000").unwrap(); 30 | 31 | let _committer = { 32 | let committer_builder = Committer::default() 33 | .with_show_logs(true) 34 | .with_eth_rpc((eth_node).ws_url()) 35 | .with_fuel_rpc(fuel_node_url) 36 | .with_db_port(db.port()) 37 | .with_db_name(db.db_name()) 38 | .with_state_contract_address(deployed_contract.address()) 39 | .with_main_key_arn(eth_signers.main.key_id) 40 | .with_kms_url(eth_signers.main.url) 41 | .with_bundle_accumulation_timeout("3600s".to_owned()) 42 | .with_block_bytes_to_accumulate("3 MB".to_string()) 43 | .with_bundle_optimization_timeout("60s".to_owned()) 44 | .with_bundle_block_height_lookback("8500".to_owned()) 45 | .with_bundle_optimization_step("100".to_owned()) 46 | .with_bundle_fragments_to_accumulate("3".to_owned()) 47 | .with_bundle_fragment_accumulation_timeout("10m".to_owned()) 48 | .with_new_bundle_check_interval("3s".to_owned()) 49 | .with_state_pruner_retention("1s".to_owned()) 50 | .with_state_pruner_run_interval("30s".to_owned()) 51 | .with_alt_da_key(eigen_key) 52 | .with_da_fee_check_interval("30s".to_owned()) 53 | .with_da_layer_polling_interval("2s".to_owned()) 54 | .with_da_layer_api_throughput(16777216); 55 | 56 | committer_builder.start().await? 57 | }; 58 | 59 | println!("Setup complete!"); 60 | println!("Ethereum node WS URL: {}", eth_node.ws_url()); 61 | println!("Contract address: {}", deployed_contract.address()); 62 | println!("DB Port: {}", db.port()); 63 | println!("DB Name: {}", db.db_name()); 64 | 65 | // keep the process running 66 | loop { 67 | sleep(Duration::from_secs(1)).await; 68 | } 69 | } 70 | 71 | async fn start_kms(logs: bool) -> Result { 72 | Kms::default().with_show_logs(logs).start().await 73 | } 74 | 75 | async fn start_eth(logs: bool) -> Result { 76 | EthNode::default().with_show_logs(logs).start().await 77 | } 78 | 79 | async fn start_db() -> Result { 80 | storage::PostgresProcess::shared() 81 | .await? 82 | .create_random_db() 83 | .await 84 | .map_err(|e| anyhow::anyhow!("{e}")) 85 | } 86 | -------------------------------------------------------------------------------- /packages/services/src/fee_metrics_tracker/ethereum_da.rs: -------------------------------------------------------------------------------- 1 | pub mod service { 2 | use std::{num::NonZeroU64, ops::RangeInclusive}; 3 | 4 | use metrics::{ 5 | RegistersMetrics, 6 | prometheus::{IntGauge, Opts, core::Collector}, 7 | }; 8 | 9 | use crate::{ 10 | Result, Runner, 11 | fees::{Api, Fees}, 12 | }; 13 | 14 | #[derive(Debug, Clone)] 15 | struct FeeMetrics { 16 | current: IntGauge, 17 | } 18 | 19 | impl Default for FeeMetrics { 20 | fn default() -> Self { 21 | let current = IntGauge::with_opts(Opts::new( 22 | "current_blob_tx_fee", 23 | "The current fee for a transaction with 6 blobs", 24 | )) 25 | .expect("metric config to be correct"); 26 | 27 | Self { current } 28 | } 29 | } 30 | 31 | impl

RegistersMetrics for FeeMetricsTracker

{ 32 | fn metrics(&self) -> Vec> { 33 | vec![Box::new(self.metrics.current.clone())] 34 | } 35 | } 36 | 37 | #[derive(Clone)] 38 | pub struct FeeMetricsTracker

{ 39 | fee_provider: P, 40 | metrics: FeeMetrics, 41 | } 42 | 43 | pub fn calculate_blob_tx_fee(num_blobs: u32, fees: &Fees) -> u128 { 44 | const DATA_GAS_PER_BLOB: u128 = 131_072u128; 45 | const INTRINSIC_GAS: u128 = 21_000u128; 46 | 47 | let base_fee = INTRINSIC_GAS.saturating_mul(fees.base_fee_per_gas); 48 | let blob_fee = fees 49 | .base_fee_per_blob_gas 50 | .saturating_mul(u128::from(num_blobs)) 51 | .saturating_mul(DATA_GAS_PER_BLOB); 52 | let reward_fee = fees.reward.saturating_mul(INTRINSIC_GAS); 53 | 54 | base_fee.saturating_add(blob_fee).saturating_add(reward_fee) 55 | } 56 | 57 | const fn last_n_blocks(current_block: u64, n: NonZeroU64) -> RangeInclusive { 58 | current_block.saturating_sub(n.get().saturating_sub(1))..=current_block 59 | } 60 | 61 | impl

FeeMetricsTracker

{ 62 | pub fn new(fee_provider: P) -> Self { 63 | Self { 64 | fee_provider, 65 | metrics: FeeMetrics::default(), 66 | } 67 | } 68 | } 69 | 70 | impl FeeMetricsTracker

{ 71 | pub async fn update_metrics(&self) -> Result<()> { 72 | let current_block = self.fee_provider.current_height().await?; 73 | let tx_fees_for_last_n_blocks = |n| async move { 74 | let fees = self 75 | .fee_provider 76 | .fees(last_n_blocks(current_block, n)) 77 | .await? 78 | .mean(); 79 | 80 | Result::Ok(i64::try_from(calculate_blob_tx_fee(6, &fees)).unwrap_or(i64::MAX)) 81 | }; 82 | 83 | let current = tx_fees_for_last_n_blocks(1.try_into().expect("not zero")).await?; 84 | 85 | self.metrics.current.set(current); 86 | 87 | Ok(()) 88 | } 89 | } 90 | 91 | impl

Runner for FeeMetricsTracker

92 | where 93 | P: Api + Send + Sync, 94 | { 95 | async fn run(&mut self) -> Result<()> { 96 | self.update_metrics().await?; 97 | Ok(()) 98 | } 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /committer/src/main.rs: -------------------------------------------------------------------------------- 1 | mod api; 2 | mod config; 3 | mod errors; 4 | mod setup; 5 | 6 | use api::launch_api_server; 7 | use eigenda::EigenDAClient; 8 | use errors::{Result, WithContext}; 9 | use metrics::prometheus::Registry; 10 | use setup::last_finalization_metric; 11 | use tokio_util::sync::CancellationToken; 12 | 13 | use crate::setup::shut_down; 14 | 15 | pub type L1 = eth::WebsocketClient; 16 | pub type Database = storage::Postgres; 17 | pub type FuelApi = fuel::HttpClient; 18 | pub type EigenDA = EigenDAClient; 19 | 20 | #[tokio::main] 21 | async fn main() -> Result<()> { 22 | setup::logger(); 23 | 24 | let config = config::parse().with_context(|| "failed to parse config")?; 25 | config 26 | .validate() 27 | .with_context(|| "config validation failed")?; 28 | 29 | let metrics_registry = Registry::default(); 30 | 31 | let finalization_metric = last_finalization_metric(); 32 | let storage = setup::storage(&config, &metrics_registry, &finalization_metric) 33 | .await 34 | .with_context(|| "failed to connect to database")?; 35 | 36 | let internal_config = config::Internal::default(); 37 | let cancel_token = CancellationToken::new(); 38 | 39 | let (fuel_adapter, fuel_health_check) = 40 | setup::fuel_adapter(&config, &internal_config, &metrics_registry); 41 | 42 | let (ethereum_rpc, eth_health_check) = 43 | setup::l1_adapter(&config, &internal_config, &metrics_registry) 44 | .await 45 | .with_context(|| "could not setup l1 adapter")?; 46 | 47 | let wallet_balance_tracker_handle = setup::wallet_balance_tracker( 48 | &internal_config, 49 | &metrics_registry, 50 | ethereum_rpc.clone(), 51 | cancel_token.clone(), 52 | ); 53 | 54 | let committer_handle = setup::block_committer( 55 | ethereum_rpc.clone(), 56 | storage.clone(), 57 | fuel_adapter.clone(), 58 | &config, 59 | cancel_token.clone(), 60 | ); 61 | 62 | let mut handles = vec![wallet_balance_tracker_handle, committer_handle]; 63 | 64 | // If the blob pool wallet key is set, we need to start 65 | // the state committer and state importer 66 | let da_handles = if config.eth.l1_keys.blob.is_some() { 67 | setup::ethereum_da_services( 68 | fuel_adapter, 69 | ethereum_rpc, 70 | storage.clone(), 71 | cancel_token.clone(), 72 | &config, 73 | &internal_config, 74 | &metrics_registry, 75 | )? 76 | } else if config.da_layer.is_some() { 77 | setup::eigen_da_services( 78 | fuel_adapter, 79 | storage.clone(), 80 | cancel_token.clone(), 81 | &config, 82 | &internal_config, 83 | &metrics_registry, 84 | ) 85 | .await? 86 | } else { 87 | vec![] 88 | }; 89 | 90 | handles.extend(da_handles); 91 | 92 | launch_api_server( 93 | &config, 94 | &internal_config, 95 | metrics_registry, 96 | storage.clone(), 97 | fuel_health_check, 98 | eth_health_check, 99 | ) 100 | .await 101 | .with_context(|| "api server")?; 102 | 103 | shut_down(cancel_token, handles, storage).await 104 | } 105 | 106 | #[cfg(test)] 107 | mod tests { 108 | // used in the harness 109 | use anyhow as _; 110 | } 111 | -------------------------------------------------------------------------------- /fee_algo_simulation/src/models.rs: -------------------------------------------------------------------------------- 1 | use std::num::{NonZeroU32, NonZeroU64}; 2 | 3 | use anyhow::Context; 4 | use serde::{Deserialize, Serialize}; 5 | use services::{ 6 | fees::FeesAtHeight, 7 | state_committer::{AlgoConfig, FeeMultiplierRange, FeeThresholds, SmaPeriods}, 8 | }; 9 | 10 | pub const URL: &str = "https://eth.llamarpc.com"; 11 | 12 | /// Structure for saving fees to cache. 13 | #[derive(Debug, Serialize, Deserialize, Default)] 14 | pub struct SavedFees { 15 | pub fees: Vec, 16 | } 17 | 18 | /// Query parameters for the `/fees` endpoint. 19 | #[derive(Clone, Debug, Deserialize)] 20 | pub struct FeeParams { 21 | pub ending_height: Option, 22 | pub amount_of_blocks: u64, 23 | pub short: u64, 24 | pub long: u64, 25 | pub max_l2_blocks_behind: u32, 26 | pub start_max_fee_multiplier: f64, 27 | pub end_max_fee_multiplier: f64, 28 | pub always_acceptable_fee: String, 29 | pub num_blobs: u32, 30 | pub num_l2_blocks_behind: u32, 31 | } 32 | 33 | impl TryFrom for AlgoConfig { 34 | type Error = anyhow::Error; 35 | 36 | fn try_from(value: FeeParams) -> Result { 37 | let always_acceptable_fee = value 38 | .always_acceptable_fee 39 | .parse() 40 | .context("invalid always_acceptable_fee value")?; 41 | 42 | let short = NonZeroU64::new(value.short).context("short sma period must be non-zero")?; 43 | let long = NonZeroU64::new(value.long).context("long sma period must be non-zero")?; 44 | 45 | let sma_periods = SmaPeriods { short, long }; 46 | 47 | let max_l2_blocks_behind = NonZeroU32::new(value.max_l2_blocks_behind) 48 | .context("max_l2_blocks_behind must be non-zero")?; 49 | 50 | let multiplier_range = 51 | FeeMultiplierRange::new(value.start_max_fee_multiplier, value.end_max_fee_multiplier)?; 52 | 53 | Ok(AlgoConfig { 54 | sma_periods, 55 | fee_thresholds: FeeThresholds { 56 | max_l2_blocks_behind, 57 | multiplier_range, 58 | always_acceptable_fee, 59 | }, 60 | }) 61 | } 62 | } 63 | 64 | /// Response struct for each fee data point. 65 | #[derive(Debug, Serialize)] 66 | pub struct FeeDataPoint { 67 | #[serde(rename = "blockHeight")] 68 | pub block_height: u64, 69 | 70 | #[serde(rename = "blockTime")] 71 | pub block_time: String, // ISO 8601 format 72 | 73 | #[serde(rename = "currentFee")] 74 | pub current_fee: String, // ETH with 4 decimal places 75 | 76 | #[serde(rename = "shortFee")] 77 | pub short_fee: String, // ETH with 4 decimal places 78 | 79 | #[serde(rename = "longFee")] 80 | pub long_fee: String, // ETH with 4 decimal places 81 | 82 | pub acceptable: bool, 83 | } 84 | 85 | #[derive(Debug, Serialize)] 86 | pub struct FeeStats { 87 | #[serde(rename = "percentageAcceptable")] 88 | pub percentage_acceptable: f64, // Percentage of acceptable blocks 89 | 90 | #[serde(rename = "percentile95GapSize")] 91 | pub percentile_95_gap_size: u64, // 95th percentile of gap sizes in blocks 92 | 93 | #[serde(rename = "longestUnacceptableStreak")] 94 | pub longest_unacceptable_streak: u64, // Longest consecutive unacceptable blocks 95 | } 96 | 97 | /// Complete response struct. 98 | #[derive(Debug, Serialize)] 99 | pub struct FeeResponse { 100 | pub data: Vec, 101 | pub stats: FeeStats, 102 | } 103 | -------------------------------------------------------------------------------- /e2e/helpers/src/fuel_node_simulated/graphql.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use async_graphql::{Context, InputValueError, Object, Schema, SchemaBuilder, SimpleObject, Value}; 4 | use fuel_core_types::fuel_crypto::Hasher; 5 | use hex; 6 | 7 | use super::simulation::AppState; 8 | 9 | #[derive(Debug, Clone, PartialEq, Eq)] 10 | pub struct HexString(pub String); 11 | 12 | #[async_graphql::Scalar(name = "HexString")] 13 | impl async_graphql::ScalarType for HexString { 14 | fn parse(value: Value) -> async_graphql::InputValueResult { 15 | match value { 16 | Value::String(s) => Ok(HexString(s)), 17 | _ => Err(InputValueError::expected_type(value)), 18 | } 19 | } 20 | 21 | fn to_value(&self) -> Value { 22 | Value::String(self.0.clone()) 23 | } 24 | } 25 | 26 | #[derive(Debug, Clone, PartialEq, Eq)] 27 | pub struct U32(pub u32); 28 | 29 | #[async_graphql::Scalar(name = "U32")] 30 | impl async_graphql::ScalarType for U32 { 31 | fn parse(value: Value) -> async_graphql::InputValueResult { 32 | match value { 33 | Value::Number(n) => { 34 | if let Some(v) = n.as_u64() { 35 | Ok(U32(v as u32)) 36 | } else { 37 | Err(InputValueError::custom("Invalid number")) 38 | } 39 | } 40 | Value::String(s) => s 41 | .parse::() 42 | .map(U32) 43 | .map_err(|_| InputValueError::custom("Invalid u32 string")), 44 | _ => Err(InputValueError::expected_type(value)), 45 | } 46 | } 47 | 48 | fn to_value(&self) -> Value { 49 | Value::String(self.0.to_string()) 50 | } 51 | } 52 | 53 | #[derive(SimpleObject, Clone)] 54 | #[graphql(name = "Block")] 55 | pub struct Block { 56 | pub height: U32, 57 | pub id: String, 58 | } 59 | 60 | #[derive(SimpleObject, Clone)] 61 | #[graphql(name = "DaCompressedBlock")] 62 | pub struct DaCompressedBlock { 63 | pub bytes: HexString, 64 | } 65 | 66 | #[derive(SimpleObject)] 67 | #[graphql(name = "ChainInfo")] 68 | pub struct ChainInfo { 69 | pub latest_block: Block, 70 | } 71 | 72 | pub struct QueryRoot; 73 | 74 | #[Object] 75 | impl QueryRoot { 76 | async fn chain(&self, ctx: &Context<'_>) -> ChainInfo { 77 | let state = ctx.data::>().unwrap(); 78 | ChainInfo { 79 | latest_block: state.latest_block(), 80 | } 81 | } 82 | 83 | async fn block(&self, _ctx: &Context<'_>, height: Option) -> Option { 84 | height.map(|h| block_at_height(h.0)) 85 | } 86 | 87 | async fn da_compressed_block( 88 | &self, 89 | ctx: &Context<'_>, 90 | height: Option, 91 | ) -> Option { 92 | let state = ctx.data::>().unwrap(); 93 | Some(state.compressed_block(height.unwrap_or(U32(0)).0).await) 94 | } 95 | } 96 | 97 | pub fn block_at_height(height: u32) -> Block { 98 | Block { 99 | height: U32(height), 100 | id: id_for_height(height), 101 | } 102 | } 103 | 104 | pub fn id_for_height(height: u32) -> String { 105 | let mut hasher = Hasher::default(); 106 | hasher.input(height.to_be_bytes()); 107 | let digest = hasher.finalize(); 108 | hex::encode(*digest) 109 | } 110 | 111 | pub fn build_schema() 112 | -> SchemaBuilder { 113 | Schema::build( 114 | QueryRoot, 115 | async_graphql::EmptyMutation, 116 | async_graphql::EmptySubscription, 117 | ) 118 | } 119 | -------------------------------------------------------------------------------- /e2e/tests/tests/eigen_kms.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Context; 2 | use anyhow::Result; 3 | use e2e_helpers::kms::Kms; 4 | use e2e_helpers::kms::KmsProcess; 5 | use k256::ecdsa::SigningKey as K256SigningKey; 6 | use rand::rngs::OsRng; 7 | use rust_eigenda_signers::Message; 8 | use rust_eigenda_signers::PublicKey; 9 | use rust_eigenda_signers::RecoverableSignature; 10 | use rust_eigenda_signers::signers::private_key::Signer as PrivateKeySigner; 11 | use rust_eigenda_v2_client::rust_eigenda_signers::Sign; 12 | use secp256k1::Secp256k1; 13 | use sha2::{Digest, Sha256}; 14 | use signers::eigen::kms::Signer; 15 | 16 | async fn setup_kms_and_signer() -> Result<(KmsProcess, PrivateKeySigner, Signer)> { 17 | let kms_proc = Kms::default().with_show_logs(false).start().await?; 18 | 19 | let k256_secret_key = k256::SecretKey::random(&mut OsRng); 20 | let k256_signing_key = K256SigningKey::from(&k256_secret_key); 21 | 22 | let secp_secret_key = secp256k1::SecretKey::from_slice(k256_secret_key.to_bytes().as_slice()) 23 | .expect("Failed to create secp256k1 secret key from k256 bytes"); 24 | let local_signer = PrivateKeySigner::new(secp_secret_key.into()); 25 | 26 | let kms_key_id = kms_proc.inject_secp256k1_key(&k256_signing_key).await?; 27 | 28 | let aws_signer = Signer::new(kms_proc.client().clone(), kms_key_id).await?; 29 | 30 | Ok((kms_proc, local_signer, aws_signer)) 31 | } 32 | 33 | fn verify_signature_recovery( 34 | rec_sig: &RecoverableSignature, 35 | message: &Message, 36 | expected_pubkey: &PublicKey, 37 | ) -> Result<()> { 38 | let secp = Secp256k1::new(); 39 | let recovery_id = secp256k1::ecdsa::RecoveryId::from_i32(rec_sig.v.to_byte() as i32) 40 | .context("Invalid recovery ID")?; 41 | let message = secp256k1::Message::from_slice(message.as_bytes())?; 42 | let sig: [u8; 64] = rec_sig.to_bytes().as_slice()[..64] 43 | .try_into() 44 | .context("Failed to convert signature to 64-byte array")?; 45 | let recoverable_sig = 46 | secp256k1::ecdsa::RecoverableSignature::from_compact(sig.as_slice(), recovery_id) 47 | .context("Failed to create recoverable signature")?; 48 | let recovered_pk = secp 49 | .recover_ecdsa(&message, &recoverable_sig) 50 | .context("Failed to recover public key")?; 51 | 52 | if &PublicKey::from(recovered_pk) == expected_pubkey { 53 | Ok(()) 54 | } else { 55 | anyhow::bail!("Recovered public key does not match expected public key") 56 | } 57 | } 58 | 59 | #[tokio::test] 60 | async fn test_kms_signer_public_key_and_address() -> Result<()> { 61 | let (_kms_proc, local_signer, aws_signer) = setup_kms_and_signer().await?; 62 | 63 | let expected_secp_pubkey = local_signer.public_key(); 64 | let actual_secp_pubkey = aws_signer.public_key(); 65 | assert_eq!( 66 | actual_secp_pubkey, expected_secp_pubkey, 67 | "Public key from AwsKmsSigner does not match the expected key from LocalSigner" 68 | ); 69 | 70 | let expected_address = local_signer.public_key().address(); 71 | let actual_address = aws_signer.public_key().address(); 72 | assert_eq!( 73 | actual_address, expected_address, 74 | "Address from AwsKmsSigner does not match the expected address from LocalSigner" 75 | ); 76 | 77 | Ok(()) 78 | } 79 | 80 | #[tokio::test] 81 | async fn test_kms_signer_sign_and_verify() -> Result<()> { 82 | let (_kms_proc, local_signer, aws_signer) = setup_kms_and_signer().await?; 83 | let test_message_bytes = b"Test message for KMS signer trait implementation"; 84 | let message_hash_array: [u8; 32] = Sha256::digest(test_message_bytes).into(); 85 | let message = Message::from(message_hash_array); 86 | 87 | let rec_sig = aws_signer 88 | .sign_digest(&message) 89 | .await 90 | .context("Signing with AwsKmsSigner failed")?; 91 | 92 | let expected_pubkey = local_signer.public_key(); 93 | 94 | verify_signature_recovery(&rec_sig, &message, &expected_pubkey) 95 | .context("Signature verification failed")?; 96 | 97 | Ok(()) 98 | } 99 | -------------------------------------------------------------------------------- /packages/adapters/eigenda/src/codec.rs: -------------------------------------------------------------------------------- 1 | pub const BYTES_PER_SYMBOL: usize = 32; 2 | 3 | pub fn convert_by_padding_empty_byte(data: &[u8]) -> Vec { 4 | const PARSE_SIZE: usize = BYTES_PER_SYMBOL - 1; // 31 5 | 6 | let data_size = data.len(); 7 | let data_len = data_size.div_ceil(PARSE_SIZE); 8 | 9 | let mut valid_data = vec![0u8; data_len * BYTES_PER_SYMBOL]; 10 | let mut valid_end = data_len * BYTES_PER_SYMBOL; 11 | 12 | for i in 0..data_len { 13 | let start = i * PARSE_SIZE; 14 | let end = (i + 1) * PARSE_SIZE; 15 | let end = end.min(data_size); 16 | 17 | let output_start = i * BYTES_PER_SYMBOL; 18 | valid_data[output_start] = 0x00; 19 | let data_slice = &data[start..end]; 20 | let output_slice = &mut valid_data[output_start + 1..output_start + 1 + data_slice.len()]; 21 | output_slice.copy_from_slice(data_slice); 22 | 23 | if end == data_size { 24 | valid_end = output_start + 1 + (end - start); 25 | break; 26 | } 27 | } 28 | 29 | valid_data.truncate(valid_end); 30 | valid_data 31 | } 32 | 33 | #[cfg(test)] 34 | mod tests { 35 | use super::*; 36 | 37 | fn remove_empty_byte_from_padded_bytes(data: &[u8]) -> Vec { 38 | const PUT_SIZE: usize = BYTES_PER_SYMBOL - 1; // 31 39 | 40 | let data_size = data.len(); 41 | let data_len = data_size.div_ceil(BYTES_PER_SYMBOL); 42 | 43 | let mut valid_data = vec![0u8; data_len * PUT_SIZE]; 44 | let mut valid_len = data_len * PUT_SIZE; 45 | 46 | for i in 0..data_len { 47 | let start = i * BYTES_PER_SYMBOL + 1; 48 | let end = (i + 1) * BYTES_PER_SYMBOL; 49 | let end = end.min(data_size); 50 | 51 | let output_start = i * PUT_SIZE; 52 | let data_slice = &data[start..end]; 53 | let output_slice = &mut valid_data[output_start..output_start + data_slice.len()]; 54 | output_slice.copy_from_slice(data_slice); 55 | 56 | if end == data_size { 57 | valid_len = output_start + data_slice.len(); 58 | break; 59 | } 60 | } 61 | 62 | valid_data.truncate(valid_len); 63 | valid_data 64 | } 65 | 66 | #[test] 67 | fn test_round_trip() { 68 | let original = vec![0xAA; 100]; 69 | let padded = convert_by_padding_empty_byte(&original); 70 | let unpadded = remove_empty_byte_from_padded_bytes(&padded); 71 | assert_eq!(original, unpadded); 72 | } 73 | 74 | #[test] 75 | fn test_edge_cases() { 76 | let empty: Vec = vec![]; 77 | assert_eq!( 78 | empty, 79 | remove_empty_byte_from_padded_bytes(&convert_by_padding_empty_byte(&empty)) 80 | ); 81 | 82 | let small = vec![0xBB]; 83 | assert_eq!( 84 | small, 85 | remove_empty_byte_from_padded_bytes(&convert_by_padding_empty_byte(&small)) 86 | ); 87 | 88 | // exact multiple of 31 89 | let exact = vec![0xCC; 31 * 4]; 90 | assert_eq!( 91 | exact, 92 | remove_empty_byte_from_padded_bytes(&convert_by_padding_empty_byte(&exact)) 93 | ); 94 | } 95 | 96 | #[test] 97 | fn test_padding() { 98 | // test padding of 31 bytes becomes 32 bytes 99 | let input31 = vec![1; 31]; 100 | let padded = convert_by_padding_empty_byte(&input31); 101 | assert_eq!(padded.len(), 32); 102 | assert_eq!(padded[0], 0x00); 103 | assert_eq!(&padded[1..], &input31[..]); 104 | 105 | // test padding of 32 bytes becomes 34 bytes 106 | let input32 = vec![2; 32]; 107 | let padded = convert_by_padding_empty_byte(&input32); 108 | assert_eq!(padded.len(), 34); 109 | assert_eq!(padded[0], 0x00); 110 | assert_eq!(&padded[1..32], &input32[0..31]); 111 | assert_eq!(padded[32], 0x00); 112 | assert_eq!(padded[33], 2); 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /packages/adapters/signers/src/eth.rs: -------------------------------------------------------------------------------- 1 | use alloy::{ 2 | consensus::SignableTransaction, 3 | network::TxSigner, 4 | primitives::{B256, ChainId}, 5 | signers::Signature, 6 | }; 7 | use eth::Address; 8 | 9 | pub mod kms { 10 | pub use alloy::signers::aws::AwsSigner as Signer; 11 | #[cfg(feature = "test-helpers")] 12 | use alloy::{ 13 | consensus::SignableTransaction, 14 | network::TxSigner, 15 | primitives::{B256, ChainId}, 16 | signers::Signature, 17 | }; 18 | #[cfg(feature = "test-helpers")] 19 | use eth::Address; 20 | 21 | #[cfg(feature = "test-helpers")] 22 | #[derive(Clone)] 23 | pub struct TestEthKmsSigner { 24 | pub key_id: String, 25 | pub url: String, 26 | pub signer: Signer, 27 | } 28 | 29 | #[cfg(feature = "test-helpers")] 30 | #[async_trait::async_trait] 31 | impl TxSigner for TestEthKmsSigner { 32 | fn address(&self) -> Address { 33 | self.signer.address() 34 | } 35 | 36 | async fn sign_transaction( 37 | &self, 38 | tx: &mut dyn SignableTransaction, 39 | ) -> alloy::signers::Result { 40 | self.signer.sign_transaction(tx).await 41 | } 42 | } 43 | 44 | #[cfg(feature = "test-helpers")] 45 | #[async_trait::async_trait] 46 | impl alloy::signers::Signer for TestEthKmsSigner { 47 | async fn sign_hash(&self, hash: &B256) -> alloy::signers::Result { 48 | self.signer.sign_hash(hash).await 49 | } 50 | 51 | fn address(&self) -> Address { 52 | alloy::signers::Signer::::address(&self.signer) 53 | } 54 | 55 | fn chain_id(&self) -> Option { 56 | self.signer.chain_id() 57 | } 58 | 59 | fn set_chain_id(&mut self, chain_id: Option) { 60 | self.signer.set_chain_id(chain_id) 61 | } 62 | } 63 | } 64 | pub mod private_key { 65 | pub use alloy::signers::local::PrivateKeySigner as Signer; 66 | } 67 | 68 | #[derive(Clone)] 69 | pub enum Signer { 70 | Private(private_key::Signer), 71 | Kms(kms::Signer), 72 | } 73 | 74 | #[async_trait::async_trait] 75 | impl TxSigner for Signer { 76 | fn address(&self) -> Address { 77 | match self { 78 | Signer::Private(local_signer) => local_signer.address(), 79 | Signer::Kms(aws_signer) => aws_signer.address(), 80 | } 81 | } 82 | 83 | async fn sign_transaction( 84 | &self, 85 | tx: &mut dyn SignableTransaction, 86 | ) -> alloy::signers::Result { 87 | match self { 88 | Signer::Private(local_signer) => local_signer.sign_transaction(tx).await, 89 | Signer::Kms(aws_signer) => aws_signer.sign_transaction(tx).await, 90 | } 91 | } 92 | } 93 | 94 | #[async_trait::async_trait] 95 | impl alloy::signers::Signer for Signer { 96 | async fn sign_hash(&self, hash: &B256) -> alloy::signers::Result { 97 | match self { 98 | Signer::Private(local_signer) => local_signer.sign_hash(hash).await, 99 | Signer::Kms(aws_signer) => aws_signer.sign_hash(hash).await, 100 | } 101 | } 102 | 103 | fn address(&self) -> Address { 104 | match self { 105 | Signer::Private(local_signer) => local_signer.address(), 106 | Signer::Kms(aws_signer) => alloy::signers::Signer::::address(aws_signer), 107 | } 108 | } 109 | 110 | fn chain_id(&self) -> Option { 111 | match self { 112 | Signer::Private(local_signer) => local_signer.chain_id(), 113 | Signer::Kms(aws_signer) => aws_signer.chain_id(), 114 | } 115 | } 116 | 117 | fn set_chain_id(&mut self, chain_id: Option) { 118 | match self { 119 | Signer::Private(local_signer) => local_signer.set_chain_id(chain_id), 120 | Signer::Kms(aws_signer) => aws_signer.set_chain_id(chain_id), 121 | } 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /e2e/tests/tests/eigen_state.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use e2e_helpers::whole_stack::{ 3 | create_and_fund_kms_signers, deploy_contract, start_db, start_eigen_committer, start_eth, 4 | start_fuel_node, start_kms, 5 | }; 6 | use k256::ecdsa::SigningKey as K256SigningKey; 7 | use std::time::Duration; 8 | use tracing::info; 9 | 10 | #[tokio::test] 11 | async fn test_eigen_state() -> Result<()> { 12 | // Start required services 13 | let logs = true; 14 | let kms = start_kms(logs).await?; 15 | let eth_node = start_eth(logs).await?; 16 | let eth_signers = create_and_fund_kms_signers(&kms, ð_node).await?; 17 | 18 | // Get Eigen key from environment and inject into KMS 19 | let eigen_key_hex = std::env::var("EIGEN_KEY") 20 | .expect("EIGEN_KEY environment variable must be set for Eigen tests"); 21 | 22 | // Convert hex string to bytes 23 | let key_bytes: [u8; 32] = hex::decode(&eigen_key_hex) 24 | .expect("Failed to decode EIGEN_KEY hex") 25 | .try_into() 26 | .expect("EIGEN_KEY must be 32 bytes"); 27 | 28 | // Create signing key and inject into KMS 29 | let secret_key = k256::elliptic_curve::SecretKey::from_slice(&key_bytes)?; 30 | let k256_signing_key = K256SigningKey::from(&secret_key); 31 | let kms_key_id = kms.inject_secp256k1_key(&k256_signing_key).await?; 32 | 33 | // Deploy contract and start services 34 | let request_timeout = Duration::from_secs(50); 35 | let max_fee = 1_000_000_000_000; 36 | let (_contract_args, deployed_contract) = 37 | deploy_contract(ð_node, eth_signers.clone(), max_fee, request_timeout).await?; 38 | let db = start_db().await?; 39 | let fuel_node = start_fuel_node(logs, Some(Duration::from_millis(200))).await?; 40 | 41 | // Start Eigen committer with KMS key 42 | let logs = true; 43 | let committer = start_eigen_committer( 44 | logs, 45 | db.clone(), 46 | ð_node, 47 | fuel_node.url(), 48 | &deployed_contract, 49 | eth_signers.main, 50 | kms_key_id, // Use the KMS key ID instead of raw EIGEN_KEY 51 | "1 KB", 52 | ) 53 | .await?; 54 | 55 | info!("waiting for 10s."); 56 | tokio::time::sleep(Duration::from_secs(10)).await; 57 | 58 | // Test 1: Verify committer is running by checking metrics endpoint 59 | let metrics_url = committer.metrics_url(); 60 | let client = reqwest::Client::new(); 61 | let response = client.get(metrics_url.clone()).send().await?; 62 | assert!( 63 | response.status().is_success(), 64 | "Metrics endpoint should be accessible" 65 | ); 66 | 67 | // Test 2: Verify state synchronization 68 | // Wait for some blocks to be processed 69 | tokio::time::sleep(Duration::from_secs(100)).await; 70 | 71 | // TODO: we should investigate directly querying the database instead of using metrics. 72 | // Check if committer has processed any blocks 73 | let metrics = client.get(metrics_url).send().await?.text().await?; 74 | 75 | let last_finalized_time = 76 | extract_metric_value(&metrics, "seconds_since_last_finalized_fragment"); 77 | 78 | if let Some(value) = last_finalized_time { 79 | assert!( 80 | value > 0, 81 | "seconds_since_last_finalized_fragment should be non-zero, got: {}", 82 | value 83 | ); 84 | } else { 85 | panic!("seconds_since_last_finalized_fragment metric not found in metrics output"); 86 | } 87 | 88 | Ok(()) 89 | } 90 | 91 | fn extract_metric_value(input: &str, target_metric: &str) -> Option { 92 | for line in input.lines() { 93 | let line = line.trim(); 94 | 95 | // Skip comments and empty lines 96 | if line.starts_with('#') || line.is_empty() { 97 | continue; 98 | } 99 | 100 | // Check for target metric at start of line 101 | if line.starts_with(target_metric) { 102 | // Value is always the second component 103 | if let Ok(value) = line.split_whitespace().nth(1)?.parse() { 104 | return Some(value); 105 | } 106 | } 107 | } 108 | None 109 | } 110 | --------------------------------------------------------------------------------