├── .cargo ├── audit.toml └── config.toml ├── .config └── nextest.toml ├── .dockerignore ├── .env ├── .env.contracts.example ├── .env.deploy.esp.example ├── .env.docker.example ├── .env.lightweight ├── .envrc ├── .gas-snapshot ├── .gitattributes ├── .github ├── dependabot.yml ├── grcov.yml └── workflows │ ├── audit.yml │ ├── auto-merge-dependabot.yml │ ├── backport.yml │ ├── benchmark-build.yaml │ ├── build-crypto-helper.yml │ ├── build-without-lockfile.yml │ ├── build.yml │ ├── cargo-features.yml │ ├── contracts.yml │ ├── coverage.yml │ ├── doc-rust.yml │ ├── hotshot.yml │ ├── lint.yml │ ├── nix-env-macos-arm.yml │ ├── slowtest.yaml │ ├── test.yml │ ├── typos.yml │ ├── ubuntu-install-without-nix.yml │ ├── unused-deps.yml │ └── update_nix.yml ├── .gitignore ├── .gitmodules ├── .prettierignore ├── .prettierrc.yml ├── .solhint.json ├── .typos.toml ├── CODEOWNERS ├── Cargo.lock ├── Cargo.toml ├── Makefile ├── README.md ├── alloy-compat ├── Cargo.toml └── src │ └── lib.rs ├── audits ├── README.md ├── external-reviews │ ├── EspressoHotshotLightClient-2024.pdf │ └── EspressoPlonk-2024.pdf └── internal-reviews │ ├── EspressoFeeContract-2024internal.pdf │ ├── EspressoHotshot-2024internal.pdf │ ├── EspressoHotstuff2-2025internal.pdf │ └── EspressoSequencer-2024internal.pdf ├── benchmark-stats ├── Cargo.toml └── src │ └── main.rs ├── client ├── Cargo.toml └── src │ └── lib.rs ├── config ├── ValidatorConfigExample └── ValidatorConfigFile.toml ├── contracts ├── .gitignore ├── artifacts │ └── abi │ │ ├── EspToken.json │ │ ├── IRewardClaim.json │ │ ├── LightClient.json │ │ ├── LightClientMock.json │ │ ├── LightClientV2.json │ │ ├── LightClientV2Mock.json │ │ └── StakeTable.json ├── demo │ └── upgradeDemo │ │ ├── DemoBoxV1.sol │ │ ├── DemoBoxV2.sol │ │ └── README.md ├── echidna.yaml ├── rust │ ├── adapter │ │ ├── Cargo.toml │ │ └── src │ │ │ ├── bin │ │ │ └── eval_domain.rs │ │ │ ├── bindings │ │ │ ├── erc1967_proxy.rs │ │ │ ├── esp_token.rs │ │ │ ├── esp_token_v2.rs │ │ │ ├── fee_contract.rs │ │ │ ├── i_plonk_verifier.rs │ │ │ ├── i_reward_claim.rs │ │ │ ├── light_client.rs │ │ │ ├── light_client_arbitrum.rs │ │ │ ├── light_client_arbitrum_v2.rs │ │ │ ├── light_client_arbitrum_v3.rs │ │ │ ├── light_client_mock.rs │ │ │ ├── light_client_v2.rs │ │ │ ├── light_client_v2_mock.rs │ │ │ ├── light_client_v3.rs │ │ │ ├── light_client_v3_mock.rs │ │ │ ├── mod.rs │ │ │ ├── ops_timelock.rs │ │ │ ├── ownable_upgradeable.rs │ │ │ ├── plonk_verifier.rs │ │ │ ├── plonk_verifier_v2.rs │ │ │ ├── plonk_verifier_v3.rs │ │ │ ├── reward_claim.rs │ │ │ ├── safe_exit_timelock.rs │ │ │ ├── stake_table.rs │ │ │ └── stake_table_v2.rs │ │ │ ├── copy.rs │ │ │ ├── evm.rs │ │ │ ├── jellyfish.rs │ │ │ ├── lib.rs │ │ │ ├── light_client.rs │ │ │ ├── reward.rs │ │ │ ├── sol_types.rs │ │ │ └── stake_table.rs │ ├── deployer │ │ ├── Cargo.toml │ │ ├── README.md │ │ └── src │ │ │ ├── builder.rs │ │ │ ├── impersonate_filler.rs │ │ │ ├── lib.rs │ │ │ ├── network_config.rs │ │ │ ├── proposals │ │ │ ├── mod.rs │ │ │ ├── multisig.rs │ │ │ └── timelock.rs │ │ │ └── provider.rs │ ├── diff-test │ │ ├── Cargo.toml │ │ └── src │ │ │ └── main.rs │ └── gen-vk-contract │ │ ├── Cargo.toml │ │ └── src │ │ └── main.rs ├── script │ ├── FeeContract.s.sol │ ├── LightClient.s.sol │ ├── LightClientArb.s.sol │ ├── LightClientArbitrumStaging.s.sol │ ├── LightClientCallNewFinalizedState.s.sol │ ├── LightClientStaging.s.sol │ ├── PlonkVerifier.s.sol │ ├── PlonkVerifierV2.s.sol │ ├── README.md │ ├── StakeTable.s.sol │ ├── multisigTransactionProposals │ │ ├── README.md │ │ ├── images │ │ │ └── safeProposal.png │ │ ├── safeSDK │ │ │ ├── approveStake.ts │ │ │ ├── decodeFunctionData.ts │ │ │ ├── delegateStake.ts │ │ │ ├── modifyEpochStartBlock.ts │ │ │ ├── modifyProverModeProposal.ts │ │ │ ├── modifyStateHistoryRetentionPeriod.ts │ │ │ ├── transferOwnership.ts │ │ │ ├── upgradeProxy.ts │ │ │ └── utils.ts │ │ └── tests │ │ │ ├── decodeFunctionData.test.ts │ │ │ ├── upgradeProxy.test.ts │ │ │ └── utils.test.ts │ └── output │ │ └── defenderDeployments │ │ ├── LightClient.sol │ │ └── 11155111 │ │ │ ├── 12.json │ │ │ ├── 13.json │ │ │ ├── 14.json │ │ │ └── saltHistory.json │ │ └── PlonkVerifier.sol │ │ └── 11155111 │ │ ├── 2.json │ │ └── saltHistory.json ├── src │ ├── EspToken.sol │ ├── EspTokenV2.sol │ ├── FeeContract.sol │ ├── InitializedAt.sol │ ├── LightClient.sol │ ├── LightClientArbitrum.sol │ ├── LightClientArbitrumV2.sol │ ├── LightClientArbitrumV3.sol │ ├── LightClientV2.sol │ ├── LightClientV3.sol │ ├── OpsTimelock.sol │ ├── RewardClaim.sol │ ├── SafeExitTimelock.sol │ ├── StakeTable.sol │ ├── StakeTableV2.sol │ ├── interfaces │ │ ├── ILightClient.sol │ │ ├── IPlonkVerifier.sol │ │ └── IRewardClaim.sol │ ├── legacy │ │ └── Transcript.sol │ └── libraries │ │ ├── BLSSig.sol │ │ ├── ERC1967Proxy.sol │ │ ├── EdOnBn254.sol │ │ ├── LightClientStateUpdateVK.sol │ │ ├── LightClientStateUpdateVKV2.sol │ │ ├── LightClientStateUpdateVKV3.sol │ │ ├── PlonkVerifier.sol │ │ ├── PlonkVerifierV2.sol │ │ ├── PlonkVerifierV3.sol │ │ ├── PolynomialEval.sol │ │ ├── PolynomialEvalV2.sol │ │ ├── PolynomialEvalV3.sol │ │ └── RewardMerkleTreeVerifier.sol └── test │ ├── BLSSig.t.sol │ ├── BoxUpgrade.t.sol │ ├── EspToken.t.sol │ ├── EspTokenV2.t.sol │ ├── FeeContract.t.sol │ ├── LightClientArbitrumV2.t.sol │ ├── LightClientBenchmark.t.sol │ ├── LightClientStorageLayout.t.solt │ ├── LightClientUpgradeSafety.t.solt │ ├── LightClientUpgradeSameContract.t.sol │ ├── LightClientUpgradeToVx.t.sol │ ├── LightClientV3.t.sol │ ├── MockStakeTableV2.sol │ ├── PlonkVerifierV3.t.sol │ ├── PolynomialEvalV3.t.sol │ ├── README.md │ ├── RewardClaim.t.sol │ ├── RewardClaimAdmin.t.sol │ ├── RewardClaimInitialize.t.sol │ ├── RewardClaimPause.t.sol │ ├── RewardClaimRateLimit.t.sol │ ├── RewardClaimReentrancy.t.sol │ ├── RewardClaimRoles.t.sol │ ├── RewardClaimUpgrade.t.sol │ ├── StakeTable.t.sol │ ├── StakeTableV2.echidna.sol │ ├── StakeTableV2.invariant.t.sol │ ├── StakeTableV2Commission.t.sol │ ├── StakeTableV2PropTestBase.sol │ ├── legacy │ └── Transcript.t.sol │ ├── mocks │ ├── LightClientMock.sol │ ├── LightClientStateUpdateVKMock.sol │ ├── LightClientV2Fake.sol │ ├── LightClientV2Mock.sol │ ├── LightClientV3Fake.sol │ ├── LightClientV3Mock.sol │ └── MockRewardClaim.sol │ ├── script │ ├── Box.s.sol │ ├── DowngradeLightClientV2ToV1.s.sol │ ├── Fee.s.sol │ ├── LightClientTestScript.s.sol │ ├── UpgradeBox.s.sol │ ├── UpgradeLightClientToV2.s.sol │ ├── UpgradeLightClientToV3.s.sol │ ├── check-upgrade-safety.js │ ├── compare-storage-layout-deployed.js │ └── compare-storage-layout.js │ └── utils │ └── InvariantStats.sol ├── crates ├── builder │ ├── Cargo.toml │ └── src │ │ ├── bin │ │ └── permissionless-builder.rs │ │ ├── lib.rs │ │ └── non_permissioned.rs ├── hotshot-builder │ ├── legacy │ │ ├── Cargo.toml │ │ └── src │ │ │ ├── builder_state.rs │ │ │ ├── lib.rs │ │ │ ├── service.rs │ │ │ └── testing │ │ │ ├── basic_test.rs │ │ │ ├── finalization_test.rs │ │ │ └── mod.rs │ ├── refactored │ │ ├── Cargo.toml │ │ └── src │ │ │ ├── block_size_limits.rs │ │ │ ├── block_store.rs │ │ │ ├── lib.rs │ │ │ ├── service.rs │ │ │ └── testing │ │ │ ├── basic.rs │ │ │ ├── block_size.rs │ │ │ ├── finalization.rs │ │ │ ├── integration.rs │ │ │ └── mod.rs │ └── shared │ │ ├── Cargo.toml │ │ └── src │ │ ├── block.rs │ │ ├── coordinator │ │ ├── mod.rs │ │ └── tiered_view_map.rs │ │ ├── error.rs │ │ ├── lib.rs │ │ ├── state.rs │ │ ├── testing │ │ ├── consensus.rs │ │ ├── constants.rs │ │ ├── generation.rs │ │ ├── mock.rs │ │ ├── mod.rs │ │ └── validation.rs │ │ └── utils │ │ ├── event_service_wrapper.rs │ │ ├── mod.rs │ │ └── rotating_set.rs └── hotshot │ ├── builder-api │ ├── Cargo.toml │ ├── README.md │ ├── api │ │ ├── v0_1 │ │ │ ├── builder.toml │ │ │ └── submit.toml │ │ └── v0_3 │ │ │ ├── builder.toml │ │ │ └── submit.toml │ └── src │ │ ├── api.rs │ │ ├── lib.rs │ │ └── v0_1 │ │ ├── block_info.rs │ │ ├── builder.rs │ │ ├── data_source.rs │ │ ├── mod.rs │ │ └── query_data.rs │ ├── example-types │ ├── Cargo.toml │ └── src │ │ ├── block_types.rs │ │ ├── lib.rs │ │ ├── membership │ │ ├── fetcher.rs │ │ ├── helpers.rs │ │ ├── mod.rs │ │ ├── randomized_committee.rs │ │ ├── randomized_committee_members.rs │ │ ├── stake_table.rs │ │ ├── static_committee.rs │ │ ├── static_committee_leader_two_views.rs │ │ ├── strict_membership.rs │ │ └── two_static_committees.rs │ │ ├── node_types.rs │ │ ├── state_types.rs │ │ ├── storage_types.rs │ │ └── testable_delay.rs │ ├── examples │ ├── Cargo.toml │ ├── combined │ │ ├── all.rs │ │ ├── multi-validator.rs │ │ ├── orchestrator.rs │ │ ├── types.rs │ │ └── validator.rs │ ├── infra │ │ └── mod.rs │ ├── libp2p │ │ ├── all.rs │ │ ├── multi-validator.rs │ │ ├── types.rs │ │ └── validator.rs │ ├── orchestrator.rs │ └── push-cdn │ │ ├── README.md │ │ ├── all.rs │ │ ├── broker.rs │ │ ├── marshal.rs │ │ ├── multi-validator.rs │ │ ├── types.rs │ │ ├── validator.rs │ │ └── whitelist-updater.rs │ ├── hotshot │ ├── Cargo.toml │ └── src │ │ ├── documentation.rs │ │ ├── helpers.rs │ │ ├── lib.rs │ │ ├── tasks │ │ ├── mod.rs │ │ └── task_state.rs │ │ ├── traits.rs │ │ ├── traits │ │ ├── networking.rs │ │ ├── networking │ │ │ ├── combined_network.rs │ │ │ ├── libp2p_network.rs │ │ │ ├── memory_network.rs │ │ │ └── push_cdn_network.rs │ │ └── node_implementation.rs │ │ ├── types.rs │ │ └── types │ │ ├── event.rs │ │ └── handle.rs │ ├── libp2p-networking │ ├── .cargo │ │ └── config │ ├── .gitignore │ ├── Cargo.toml │ ├── flamegraph.sh │ ├── src │ │ ├── lib.rs │ │ └── network │ │ │ ├── behaviours │ │ │ ├── dht │ │ │ │ ├── bootstrap.rs │ │ │ │ ├── mod.rs │ │ │ │ ├── record.rs │ │ │ │ └── store │ │ │ │ │ ├── mod.rs │ │ │ │ │ ├── persistent.rs │ │ │ │ │ └── validated.rs │ │ │ ├── direct_message.rs │ │ │ ├── exponential_backoff.rs │ │ │ └── mod.rs │ │ │ ├── cbor.rs │ │ │ ├── def.rs │ │ │ ├── mod.rs │ │ │ ├── node.rs │ │ │ ├── node │ │ │ ├── config.rs │ │ │ └── handle.rs │ │ │ └── transport.rs │ └── web │ │ └── index.html │ ├── macros │ ├── Cargo.toml │ └── src │ │ └── lib.rs │ ├── orchestrator │ ├── Cargo.toml │ ├── README.md │ ├── api.toml │ ├── run-config.toml │ ├── src │ │ ├── client.rs │ │ └── lib.rs │ └── staging-config.toml │ ├── task-impls │ ├── Cargo.toml │ ├── HotShot_event_architecture.drawio │ ├── HotShot_event_architecture.png │ ├── README.md │ └── src │ │ ├── builder.rs │ │ ├── consensus │ │ ├── handlers.rs │ │ └── mod.rs │ │ ├── da.rs │ │ ├── events.rs │ │ ├── harness.rs │ │ ├── helpers.rs │ │ ├── lib.rs │ │ ├── network.rs │ │ ├── quorum_proposal │ │ ├── handlers.rs │ │ └── mod.rs │ │ ├── quorum_proposal_recv │ │ ├── handlers.rs │ │ └── mod.rs │ │ ├── quorum_vote │ │ ├── handlers.rs │ │ └── mod.rs │ │ ├── request.rs │ │ ├── response.rs │ │ ├── rewind.rs │ │ ├── stats.rs │ │ ├── transactions.rs │ │ ├── upgrade.rs │ │ ├── vid.rs │ │ ├── view_sync.rs │ │ └── vote_collection.rs │ ├── task │ ├── Cargo.toml │ └── src │ │ ├── dependency.rs │ │ ├── dependency_task.rs │ │ ├── lib.rs │ │ └── task.rs │ ├── testing │ ├── .gitignore │ ├── Cargo.toml │ ├── README.md │ ├── src │ │ ├── block_builder │ │ │ ├── mod.rs │ │ │ ├── random.rs │ │ │ └── simple.rs │ │ ├── byzantine │ │ │ ├── byzantine_behaviour.rs │ │ │ └── mod.rs │ │ ├── completion_task.rs │ │ ├── consistency_task.rs │ │ ├── helpers.rs │ │ ├── lib.rs │ │ ├── node_ctx.rs │ │ ├── node_stake.rs │ │ ├── overall_safety_task.rs │ │ ├── predicates │ │ │ ├── event.rs │ │ │ ├── mod.rs │ │ │ ├── upgrade_with_proposal.rs │ │ │ └── upgrade_with_vote.rs │ │ ├── script.rs │ │ ├── spinning_task.rs │ │ ├── test_builder.rs │ │ ├── test_helpers.rs │ │ ├── test_launcher.rs │ │ ├── test_runner.rs │ │ ├── test_task.rs │ │ ├── txn_task.rs │ │ ├── view_generator.rs │ │ └── view_sync_task.rs │ └── tests │ │ ├── test_epoch_catchup.rs │ │ ├── test_epoch_end.rs │ │ ├── test_epoch_staggered_restart.rs │ │ ├── test_epoch_success_catchup_types.rs │ │ ├── test_epoch_success_overlap_2_f.rs │ │ ├── test_epoch_success_overlap_2f_plus_1.rs │ │ ├── test_epoch_success_overlap_3f.rs │ │ ├── test_epoch_success_overlap_dynamic.rs │ │ ├── test_epoch_success_overlap_f.rs │ │ ├── test_epoch_success_overlap_f_plus_1.rs │ │ ├── test_epoch_success_types.rs │ │ ├── test_epoch_success_types_randomized_leader.rs │ │ ├── test_epoch_unequal_stake.rs │ │ ├── test_epoch_upgrade.rs │ │ ├── test_epochs_combined_network.rs │ │ ├── test_epochs_failures.rs │ │ ├── test_epochs_restart.rs │ │ ├── test_shorter_decide.rs │ │ ├── test_success_with_async_delay_2_with_epochs.rs │ │ ├── test_success_with_async_delay_with_epochs.rs │ │ ├── test_success_with_epochs.rs │ │ ├── test_with_double_leader_no_failures_with_epochs.rs │ │ ├── tests_1.rs │ │ ├── tests_1 │ │ ├── block_builder.rs │ │ ├── da_task.rs │ │ ├── libp2p.rs │ │ ├── message.rs │ │ ├── network_task.rs │ │ ├── quorum_proposal_recv_task.rs │ │ ├── quorum_proposal_task.rs │ │ ├── quorum_vote_task.rs │ │ ├── test_success.rs │ │ ├── test_with_failures_2.rs │ │ ├── transaction_task.rs │ │ ├── unit.rs │ │ ├── upgrade_task_with_proposal.rs │ │ ├── upgrade_task_with_vote.rs │ │ ├── vid_task.rs │ │ ├── view_sync_task.rs │ │ └── vote_dependency_handle.rs │ │ ├── tests_2.rs │ │ ├── tests_2 │ │ └── catchup.rs │ │ ├── tests_3.rs │ │ ├── tests_3 │ │ ├── byzantine_tests.rs │ │ ├── memory_network.rs │ │ └── test_with_failures_half_f.rs │ │ ├── tests_4.rs │ │ ├── tests_4 │ │ ├── byzantine_tests.rs │ │ ├── test_with_builder_failures.rs │ │ └── test_with_failures_f.rs │ │ ├── tests_5.rs │ │ └── tests_5 │ │ ├── broken_3_chain.rs │ │ ├── combined_network.rs │ │ ├── push_cdn.rs │ │ ├── test_with_failures.rs │ │ ├── timeout.rs │ │ └── unreliable_network.rs │ ├── types │ ├── Cargo.toml │ ├── bin │ │ └── mnemonic.rs │ └── src │ │ ├── bundle.rs │ │ ├── consensus.rs │ │ ├── constants.rs │ │ ├── data.rs │ │ ├── data │ │ ├── ns_table.rs │ │ └── vid_disperse.rs │ │ ├── drb.rs │ │ ├── epoch_membership.rs │ │ ├── error.rs │ │ ├── event.rs │ │ ├── hotshot_config_file.rs │ │ ├── lib.rs │ │ ├── light_client.rs │ │ ├── message.rs │ │ ├── network.rs │ │ ├── qc.rs │ │ ├── request_response.rs │ │ ├── signature_key.rs │ │ ├── simple_certificate.rs │ │ ├── simple_vote.rs │ │ ├── stake_table.rs │ │ ├── storage_metrics.rs │ │ ├── traits.rs │ │ ├── traits │ │ ├── block_contents.rs │ │ ├── consensus_api.rs │ │ ├── election.rs │ │ ├── metrics.rs │ │ ├── network.rs │ │ ├── node_implementation.rs │ │ ├── qc.rs │ │ ├── signature_key.rs │ │ ├── states.rs │ │ └── storage.rs │ │ ├── upgrade_config.rs │ │ ├── utils.rs │ │ ├── vid.rs │ │ ├── vid │ │ ├── advz.rs │ │ └── avidm.rs │ │ └── vote.rs │ └── utils │ ├── Cargo.toml │ └── src │ ├── anytrace.rs │ ├── anytrace │ └── macros.rs │ └── lib.rs ├── cross-shell.nix ├── data ├── README.md ├── genesis │ ├── benchmark.toml │ ├── cappuccino.toml │ ├── cocoa.toml │ ├── decaf.toml │ ├── demo-drb-header-upgrade.toml │ ├── demo-drb-header.toml │ ├── demo-fee-to-drb-header-upgrade.toml │ ├── demo-pos-base.toml │ ├── demo-pos.toml │ ├── demo.toml │ ├── mainnet.toml │ └── staging.toml ├── initial_stake_table.toml ├── insta_snapshots │ ├── espresso_types__reference_tests__reward_claim_input_v2.snap │ └── espresso_types__reference_tests__reward_proof_v2.snap ├── v1 │ ├── block_query_data.bin │ ├── block_query_data.json │ ├── chain_config.bin │ ├── chain_config.json │ ├── fee_info.bin │ ├── fee_info.json │ ├── header.bin │ ├── header.json │ ├── l1_block.bin │ ├── l1_block.json │ ├── leaf_query_data_legacy.bin │ ├── leaf_query_data_legacy.json │ ├── messages.bin │ ├── messages.json │ ├── ns_proof_legacy.bin │ ├── ns_proof_legacy.json │ ├── ns_table.bin │ ├── ns_table.json │ ├── payload.bin │ ├── payload.json │ ├── payload_query_data.bin │ ├── payload_query_data.json │ ├── transaction.bin │ ├── transaction.json │ ├── transaction_query_data.bin │ ├── transaction_query_data.json │ ├── vid_common_v0.bin │ ├── vid_common_v0.json │ ├── vid_common_v1.bin │ └── vid_common_v1.json ├── v2 │ ├── chain_config.bin │ ├── chain_config.json │ ├── header.bin │ ├── header.json │ ├── leaf_query_data_legacy.bin │ ├── leaf_query_data_legacy.json │ ├── messages.bin │ └── messages.json ├── v3 │ ├── chain_config.bin │ ├── chain_config.json │ ├── decaf_stake_table.json │ ├── decaf_stake_table_events.json │ ├── header.bin │ ├── header.json │ ├── leaf_query_data.bin │ ├── leaf_query_data.json │ ├── messages.bin │ ├── messages.json │ ├── ns_proof_V0.bin │ ├── ns_proof_V0.json │ ├── ns_proof_V1.bin │ ├── ns_proof_V1.json │ ├── state_cert.bin │ └── state_cert.json └── v4 │ ├── chain_config.bin │ ├── chain_config.json │ ├── header.bin │ ├── header.json │ ├── messages.bin │ ├── messages.json │ ├── ns_proof_V0.bin │ ├── ns_proof_V0.json │ ├── ns_proof_V1.bin │ ├── ns_proof_V1.json │ ├── state_cert.bin │ └── state_cert.json ├── doc ├── architecture.md ├── architecture.puml ├── architecture.svg ├── espresso-dev-node.md ├── espresso-overview.puml ├── espresso-overview.svg ├── full-node-espresso-integration.puml ├── full-node-espresso-integration.svg ├── prover-espresso-integration.puml ├── prover-espresso-integration.svg ├── sequence-diagram-overview.puml ├── sequence-diagram-overview.svg ├── sequence-diagram-simplified.puml ├── sequence-diagram-simplified.svg ├── sequence-diagram.puml ├── sequence-diagram.svg ├── smart-contract-upgrades.md ├── ubuntu.md ├── upgrades.md ├── zk-integration.md ├── zk-rollup-circuit-no-espresso-consensus.puml ├── zk-rollup-circuit-no-espresso-consensus.svg ├── zk-rollup-circuit.puml ├── zk-rollup-circuit.svg ├── zk-rollup-default-sequencer.puml └── zk-rollup-default-sequencer.svg ├── docker-compose.yaml ├── docker ├── bridge.Dockerfile ├── builder.Dockerfile ├── cdn-broker.Dockerfile ├── cdn-marshal.Dockerfile ├── cdn-whitelist.Dockerfile ├── deploy.Dockerfile ├── espresso-dev-node.Dockerfile ├── nasty-client.Dockerfile ├── node-validator.Dockerfile ├── orchestrator.Dockerfile ├── prover-service.Dockerfile ├── scripts │ └── sequencer-awssecretsmanager.sh ├── sequencer.Dockerfile ├── staking-cli.Dockerfile ├── state-relay-server.Dockerfile └── submit-transactions.Dockerfile ├── flake.lock ├── flake.nix ├── foundry.toml ├── genesis.json ├── geth-config ├── genesis-default.json └── test-jwt-secret.txt ├── hotshot-events-service ├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── api │ └── hotshot_events.toml ├── flake.lock └── src │ ├── api.rs │ ├── events.rs │ ├── events_source.rs │ ├── lib.rs │ └── test.rs ├── hotshot-query-service ├── .cargo │ ├── audit.toml │ └── config.toml ├── .config │ └── nextest.toml ├── .gitignore ├── .license-header.txt ├── CODEOWNERS ├── Cargo.toml ├── LICENSE ├── README.md ├── api │ ├── availability.toml │ ├── explorer.toml │ ├── node.toml │ ├── state.toml │ └── status.toml ├── doc │ ├── async-retrieval.md │ ├── fetch-block.drawio │ ├── fetch-block.png │ ├── fetch-leaf.drawio │ ├── fetch-leaf.png │ ├── fetching-workflow.drawio │ └── fetching-workflow.png ├── examples │ └── simple-server.rs ├── flake.lock ├── migrations │ ├── postgres │ │ ├── V1000__aggregate_table_ns.sql │ │ ├── V100__drop_leaf_payload.sql │ │ ├── V10__init_schema.sql │ │ ├── V1100__latest_qc_chain.sql │ │ ├── V200__create_aggregates_table.sql │ │ ├── V20__payload_hash_index.sql │ │ ├── V300__transactions_count.sql │ │ ├── V30__drop_leaf_block_hash_fkey_constraint.sql │ │ ├── V400__rename_transaction_table.sql │ │ ├── V500__types_migration.sql │ │ ├── V600__state_cert.sql │ │ ├── V700__add_migrated_rows_col.sql │ │ ├── V800__leaf2_remove_view.sql │ │ └── V900__tx_separate_index.sql │ └── sqlite │ │ ├── V100__init_schema.sql │ │ ├── V200__create_aggregates_table.sql │ │ ├── V300__types_migration.sql │ │ ├── V400__state_cert.sql │ │ ├── V500__add_migrated_rows_col.sql │ │ ├── V600__leaf2_remove_view.sql │ │ ├── V700__tx_separate_index.sql │ │ ├── V800__aggregate_table_ns.sql │ │ └── V900__latest_qc_chain.sql ├── poetry.lock ├── pyproject.toml ├── rust-toolchain.toml └── src │ ├── api.rs │ ├── availability.rs │ ├── availability │ ├── data_source.rs │ ├── fetch.rs │ └── query_data.rs │ ├── data_source.rs │ ├── data_source │ ├── extension.rs │ ├── fetching.rs │ ├── fetching │ │ ├── block.rs │ │ ├── header.rs │ │ ├── leaf.rs │ │ ├── state_cert.rs │ │ ├── transaction.rs │ │ └── vid.rs │ ├── fs.rs │ ├── metrics.rs │ ├── notifier.rs │ ├── sql.rs │ ├── storage.rs │ ├── storage │ │ ├── fail_storage.rs │ │ ├── fs.rs │ │ ├── ledger_log.rs │ │ ├── pruning.rs │ │ ├── sql.rs │ │ └── sql │ │ │ ├── db.rs │ │ │ ├── migrate.rs │ │ │ ├── queries.rs │ │ │ ├── queries │ │ │ ├── availability.rs │ │ │ ├── explorer.rs │ │ │ ├── node.rs │ │ │ └── state.rs │ │ │ └── transaction.rs │ └── update.rs │ ├── error.rs │ ├── explorer.rs │ ├── explorer │ ├── currency.rs │ ├── data_source.rs │ ├── errors.rs │ ├── monetary_value.rs │ ├── query_data.rs │ └── traits.rs │ ├── fetching.rs │ ├── fetching │ ├── provider.rs │ ├── provider │ │ ├── any.rs │ │ ├── query_service.rs │ │ └── testing.rs │ └── request.rs │ ├── lib.rs │ ├── merklized_state.rs │ ├── merklized_state │ └── data_source.rs │ ├── metrics.rs │ ├── node.rs │ ├── node │ ├── data_source.rs │ └── query_data.rs │ ├── resolvable.rs │ ├── status.rs │ ├── status │ └── data_source.rs │ ├── task.rs │ ├── testing.rs │ ├── testing │ ├── consensus.rs │ └── mocks.rs │ └── types.rs ├── hotshot-state-prover ├── Cargo.toml ├── README.md ├── RUNBOOK.md ├── api │ └── prover-service.toml └── src │ ├── bin │ ├── gen-demo-genesis.rs │ └── state-prover.rs │ ├── lib.rs │ ├── test_utils.rs │ ├── utils.rs │ ├── v1 │ ├── circuit.rs │ ├── mock_ledger.rs │ ├── mod.rs │ ├── service.rs │ └── snark.rs │ ├── v2 │ ├── circuit.rs │ ├── mock_ledger.rs │ ├── mod.rs │ ├── service.rs │ └── snark.rs │ └── v3 │ ├── circuit.rs │ ├── mock_ledger.rs │ ├── mod.rs │ ├── service.rs │ └── snark.rs ├── hotshot.just ├── jest.config.js ├── justfile ├── node-metrics ├── Cargo.toml └── src │ ├── api │ ├── mod.rs │ └── node_validator │ │ ├── mod.rs │ │ └── v0 │ │ ├── create_node_validator_api.rs │ │ ├── example_prometheus_metrics_output.txt │ │ ├── mod.rs │ │ └── node_validator.toml │ ├── lib.rs │ ├── main.rs │ └── service │ ├── client_id │ └── mod.rs │ ├── client_message │ └── mod.rs │ ├── client_state │ └── mod.rs │ ├── data_state │ ├── location_details.rs │ ├── mod.rs │ └── node_identity.rs │ ├── mod.rs │ ├── node_type │ └── mod.rs │ └── server_message │ └── mod.rs ├── package.json ├── process-compose.yaml ├── request-response ├── Cargo.toml └── src │ ├── data_source.rs │ ├── lib.rs │ ├── message.rs │ ├── network.rs │ ├── recipient_source.rs │ ├── request.rs │ └── util.rs ├── rust-toolchain.toml ├── rustfmt.toml ├── scripts ├── benchmarks_results │ └── upload_results.csv ├── build-docker-images-native ├── build-go-crypto-helper ├── ci-build-binary ├── cli ├── demo-native ├── fmt-pc-logs ├── multisig-upgrade-entrypoint ├── sequencer-entrypoint ├── show-toolchain-versions ├── smoke-test-demo ├── test-build-docker-images-native ├── test-deploy-with-ledger ├── ubuntu-install-test-no-nix └── ubuntu-install-test-no-nix-docker ├── sdks ├── crypto-helper │ ├── .cargo │ │ └── config.toml │ ├── Cargo.toml │ └── src │ │ └── lib.rs └── go │ ├── README.md │ ├── client-dev-node │ ├── client.go │ ├── client_test.go │ └── types.go │ ├── client │ ├── api.go │ ├── client.go │ ├── client_test.go │ ├── multiple_nodes_client.go │ ├── multiple_nodes_test.go │ ├── query.go │ └── submit.go │ ├── cmd │ └── espresso-network-lib-utility │ │ └── main.go │ ├── go.mod │ ├── go.sum │ ├── light-client-mock │ ├── lightclient.go │ └── test_utils.go │ ├── light-client │ ├── light_client_reader.go │ └── lightclient.go │ ├── log-helper │ ├── log_helper.go │ ├── log_helper_test.go │ └── strategy.go │ ├── tagged-base64 │ ├── tagged_base64.go │ └── tagged_base64_test.go │ ├── types │ ├── common │ │ ├── commit.go │ │ ├── consts.go │ │ ├── types.go │ │ └── types_test.go │ ├── header.go │ ├── header_test.go │ └── v0 │ │ ├── v0_1 │ │ ├── chain_config.go │ │ └── header.go │ │ ├── v0_2 │ │ └── header.go │ │ └── v0_3 │ │ ├── chain_config.go │ │ └── header.go │ └── verification │ ├── merkle_proof_test_data.json │ ├── merkle_proof_test_data_generation_test.go │ ├── namespace_proof_test_data.json │ ├── native.go │ ├── native_test.go │ ├── resp │ ├── header.json │ ├── transaction_in_block.json │ └── vid_common.json │ ├── verify.go │ └── verify_test.go ├── sequencer-sqlite ├── Cargo.toml └── src │ └── main.rs ├── sequencer ├── Cargo.toml ├── api │ ├── availability.toml │ ├── catchup.toml │ ├── commitment_task.toml │ ├── config.toml │ ├── espresso_dev_node.toml │ ├── fee.toml │ ├── migrations │ │ ├── postgres │ │ │ ├── V1001__add_drb_params.sql │ │ │ ├── V1002__add_block_reward_column.sql │ │ │ ├── V1003__create_reward_merkle_tree_v2.sql │ │ │ ├── V1004__eqc.sql │ │ │ ├── V1005__all_validators.sql │ │ │ ├── V12__network_config.sql │ │ │ ├── V13__consensus_state.sql │ │ │ ├── V14__state_tables.sql │ │ │ ├── V15__undecided_state.sql │ │ │ ├── V16__merkle_root_columns.sql │ │ │ ├── V301__merkle_root_column_indexes.sql │ │ │ ├── V302__update_state_tables_types.sql │ │ │ ├── V31__drop_merklized_state_created_index.sql │ │ │ ├── V32__saved_proposals.sql │ │ │ ├── V33__chain_config_table.sql │ │ │ ├── V34__builder_urls.sql │ │ │ ├── V35__add_upgrade_params.sql │ │ │ ├── V36__alter_merkle_root_column_expressions.sql │ │ │ ├── V38__add_quorum_proposal_hash.sql │ │ │ ├── V39__upgrade_certificate.sql │ │ │ ├── V401__archive_provider.sql │ │ │ ├── V402__next_epoch_qc.sql │ │ │ ├── V403__drop_undecided_state.sql │ │ │ ├── V40__anchor_leaf_chain.sql │ │ │ ├── V41__epoch_height.sql │ │ │ ├── V42__index_quorum_proposal_leaf_hash.sql │ │ │ ├── V501__epoch_tables.sql │ │ │ ├── V502__epoch_drb_and_root.sql │ │ │ ├── V504__reward_merkle_tree.sql │ │ │ ├── V506__migrated_rows_col.sql │ │ │ ├── V801__stake_table_events_table.sql │ │ │ ├── V802__redefine_stake_table_events.sql │ │ │ ├── V803__drb.sql │ │ │ ├── V804__libp2p_dht.sql │ │ │ └── V805__consensus_restart_view.sql │ │ └── sqlite │ │ │ ├── V102__network_config.sql │ │ │ ├── V103__consensus_state.sql │ │ │ ├── V104__state_tables.sql │ │ │ ├── V105__undecided_state.sql │ │ │ ├── V106__merkle_root_columns.sql │ │ │ ├── V107__saved_proposals.sql │ │ │ ├── V108__chain_config_table.sql │ │ │ ├── V109__upgrade_certificate.sql │ │ │ ├── V110__event_stream.sql │ │ │ ├── V201__archive_provider.sql │ │ │ ├── V203__next_epoch_qc.sql │ │ │ ├── V204__drop_undecided_state.sql │ │ │ ├── V301__epoch_tables.sql │ │ │ ├── V302__epoch_drb_and_root.sql │ │ │ ├── V304__reward_merkle_tree.sql │ │ │ ├── V306__migrated_rows_col.sql │ │ │ ├── V601__stake_table_events_table.sql │ │ │ ├── V602__redefine_stake_table_events.sql │ │ │ ├── V603__drb.sql │ │ │ ├── V604__libp2p_dht.sql │ │ │ ├── V605__consensus_restart_view.sql │ │ │ ├── V606__add_block_reward_column.sql │ │ │ ├── V607__create_reward_merkle_tree_v2.sql │ │ │ ├── V801__eqc.sql │ │ │ └── V802__all_validators.sql │ ├── node.toml │ ├── public-env-vars.toml │ ├── reward.toml │ ├── state_relay_server.toml │ ├── state_signature.toml │ └── submit.toml ├── build.rs └── src │ ├── api.rs │ ├── api │ ├── data_source.rs │ ├── endpoints.rs │ ├── fs.rs │ ├── options.rs │ ├── sql.rs │ └── update.rs │ ├── bin │ ├── cdn-broker.rs │ ├── cdn-marshal.rs │ ├── cdn-whitelist.rs │ ├── deploy.rs │ ├── dev-cdn.rs │ ├── espresso-bridge.rs │ ├── espresso-dev-node.rs │ ├── keygen.rs │ ├── nasty-client.rs │ ├── orchestrator.rs │ ├── pub-key.rs │ ├── reset-storage.rs │ ├── state-relay-server.rs │ ├── submit-transactions.rs │ ├── utils │ │ ├── keygen.rs │ │ ├── main.rs │ │ ├── ns_aggregator.rs │ │ ├── pubkey.rs │ │ └── reset_storage.rs │ └── verify-headers.rs │ ├── catchup.rs │ ├── context.rs │ ├── external_event_handler.rs │ ├── genesis.rs │ ├── lib.rs │ ├── main.rs │ ├── message_compat_tests.rs │ ├── network │ ├── cdn.rs │ ├── libp2p.rs │ └── mod.rs │ ├── options.rs │ ├── persistence.rs │ ├── persistence │ ├── fs.rs │ ├── no_storage.rs │ ├── persistence_metrics.rs │ └── sql.rs │ ├── proposal_fetcher.rs │ ├── request_response │ ├── catchup │ │ ├── mod.rs │ │ └── state.rs │ ├── data_source.rs │ ├── mod.rs │ ├── network.rs │ ├── recipient_source.rs │ └── request.rs │ ├── restart_tests.rs │ ├── run.rs │ ├── state.rs │ ├── state_signature.rs │ └── state_signature │ ├── relay_server.rs │ └── relay_server │ ├── lcv1_relay.rs │ ├── lcv2_relay.rs │ ├── lcv3_relay.rs │ └── stake_table_tracker.rs ├── shell.nix ├── staking-cli ├── Cargo.toml ├── DEVELOPER_DOCS.md ├── README.md ├── config.decaf.toml ├── config.demo.toml ├── config.dev.toml ├── src │ ├── claim.rs │ ├── delegation.rs │ ├── demo.rs │ ├── deploy.rs │ ├── funding.rs │ ├── info.rs │ ├── l1.rs │ ├── lib.rs │ ├── main.rs │ ├── parse.rs │ ├── receipt.rs │ ├── registration.rs │ └── signature.rs └── tests │ ├── cli.rs │ ├── common │ └── mod.rs │ └── node_signatures.rs ├── tests ├── Cargo.toml ├── common │ └── mod.rs ├── main.rs ├── proof_of_stake.rs ├── reward_claims_e2e.rs ├── smoke.rs └── upgrades.rs ├── types ├── Cargo.toml ├── README.md ├── src │ ├── eth_signature_key.rs │ ├── lib.rs │ ├── reference_tests.rs │ └── v0 │ │ ├── config.rs │ │ ├── header.rs │ │ ├── impls │ │ ├── block │ │ │ ├── full_payload.rs │ │ │ ├── full_payload │ │ │ │ ├── ns_proof.rs │ │ │ │ ├── ns_proof │ │ │ │ │ ├── advz.rs │ │ │ │ │ └── avidm.rs │ │ │ │ ├── ns_table.rs │ │ │ │ ├── ns_table │ │ │ │ │ └── test.rs │ │ │ │ └── payload.rs │ │ │ ├── mod.rs │ │ │ ├── namespace_payload.rs │ │ │ ├── namespace_payload │ │ │ │ ├── iter.rs │ │ │ │ ├── ns_payload.rs │ │ │ │ ├── ns_payload │ │ │ │ │ └── test.rs │ │ │ │ ├── ns_payload_range.rs │ │ │ │ ├── tx_proof.rs │ │ │ │ ├── tx_proof │ │ │ │ │ ├── advz.rs │ │ │ │ │ └── avidm.rs │ │ │ │ └── types.rs │ │ │ ├── test.rs │ │ │ └── uint_bytes.rs │ │ ├── chain_config.rs │ │ ├── fee_info.rs │ │ ├── header.rs │ │ ├── instance_state.rs │ │ ├── l1.rs │ │ ├── mod.rs │ │ ├── reward.rs │ │ ├── stake_table.rs │ │ ├── state.rs │ │ └── transaction.rs │ │ ├── mod.rs │ │ ├── nsproof.rs │ │ ├── sparse_mt.rs │ │ ├── traits.rs │ │ ├── txproof.rs │ │ ├── utils.rs │ │ ├── v0_1 │ │ ├── block.rs │ │ ├── chain_config.rs │ │ ├── fee_info.rs │ │ ├── header.rs │ │ ├── instance_state.rs │ │ ├── l1.rs │ │ ├── mod.rs │ │ ├── state.rs │ │ └── transaction.rs │ │ ├── v0_2 │ │ └── mod.rs │ │ ├── v0_3 │ │ ├── chain_config.rs │ │ ├── header.rs │ │ ├── mod.rs │ │ ├── nsproof.rs │ │ ├── stake_table.rs │ │ ├── state.rs │ │ └── txproof.rs │ │ └── v0_4 │ │ ├── header.rs │ │ ├── mod.rs │ │ └── state.rs └── tests │ └── reward_merkle_tree_test.rs ├── utils ├── Cargo.toml └── src │ ├── lib.rs │ ├── logging.rs │ ├── ser.rs │ └── test_utils.rs ├── vid ├── Cargo.toml ├── benches │ ├── dispersal.rs │ ├── recovery.rs │ └── verify.rs └── src │ ├── avid_m.rs │ ├── avid_m │ ├── config.rs │ ├── namespaced.rs │ └── proofs.rs │ ├── lib.rs │ ├── utils.rs │ └── utils │ └── bytes_to_field.rs ├── yarn.lock └── zkevm-node-additions └── init_pool_db.sql /.cargo/audit.toml: -------------------------------------------------------------------------------- 1 | [advisories] 2 | ignore = [ 3 | # DoS in WebPKI that comes with tide_disco 4 | "RUSTSEC-2023-0052", 5 | # Tungstenite allows remote attackers to cause a denial of service 6 | # Dependency of async-tungstenite -> tide-websockets / surf-disco 7 | "RUSTSEC-2023-0065", 8 | # Unfixed "Marvin" vulnerability in `RSA`, unused in sqlite dependency 9 | "RUSTSEC-2023-0071", 10 | # rustls is busted, dependency of unmaintained tide 11 | "RUSTSEC-2024-0336", 12 | # Some AES functions may panic when overflow checking is enabled. 13 | # To upgrade async-tungstenite we need a new release of tide-websockets, which seems unlikely 14 | # https://github.com/http-rs/tide-websockets 15 | "RUSTSEC-2025-0009", 16 | # Still here because of `ark-relations` crate. 17 | "RUSTSEC-2025-0055", 18 | ] 19 | -------------------------------------------------------------------------------- /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [net] 2 | git-fetch-with-cli = true -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .cargo 2 | .git 3 | target/nix/ 4 | target*/debug 5 | target*/**/.fingerprint 6 | target*/**/build 7 | target*/**/deps 8 | target*/**/examples 9 | target*/**/incremental 10 | target*/**/doc 11 | node_modules/ 12 | -------------------------------------------------------------------------------- /.env.deploy.esp.example: -------------------------------------------------------------------------------- 1 | ESPRESSO_SEQUENCER_ETH_MNEMONIC= 2 | ESPRESSO_DEPLOYER_ACCOUNT_INDEX= 3 | ESPRESSO_SEQUENCER_ETH_MULTISIG_ADDRESS= 4 | ESPRESSO_SEQUENCER_L1_PROVIDER= 5 | ESP_TOKEN_INITIAL_SUPPLY= 6 | ESP_TOKEN_NAME= 7 | ESP_TOKEN_SYMBOL= 8 | ESP_TOKEN_INITIAL_GRANT_RECIPIENT_ADDRESS= 9 | RUST_LOG=info 10 | RUST_LOG_FORMAT=json -------------------------------------------------------------------------------- /.env.docker.example: -------------------------------------------------------------------------------- 1 | ESPRESSO_SEQUENCER_ETH_MNEMONIC=test test test test test test test test test test test junk 2 | ESPRESSO_DEPLOYER_ACCOUNT_INDEX= 3 | ESPRESSO_SEQUENCER_ETH_MULTISIG_ADDRESS= 4 | ESPRESSO_SEQUENCER_LIGHT_CLIENT_PROXY_ADDRESS= 5 | ESPRESSO_SEQUENCER_LIGHT_CLIENT_V2_ADDRESS= 6 | ESPRESSO_SEQUENCER_PLONK_VERIFIER_ADDRESS= 7 | ESPRESSO_SEQUENCER_LIGHT_CLIENT_ADDRESS= 8 | ESPRESSO_SEQUENCER_PLONK_VERIFIER_V2_ADDRESS= -------------------------------------------------------------------------------- /.env.lightweight: -------------------------------------------------------------------------------- 1 | ESPRESSO_SEQUENCER_LIGHTWEIGHT=true 2 | ESPRESSO_SEQUENCER_DATABASE_PRUNE=true 3 | ESPRESSO_SEQUENCER_PRUNER_BATCH_SIZE=5000 4 | ESPRESSO_SEQUENCER_PRUNER_TARGET_RETENTION=3d 5 | ESPRESSO_SEQUENCER_PRUNER_MINIMUM_RETENTION=12h 6 | ESPRESSO_SEQUENCER_PRUNER_INTERVAL=2h 7 | ESPRESSO_SEQUENCER_PRUNER_MAX_USAGE=7000 8 | ESPRESSO_SEQUENCER_PRUNER_PRUNING_THRESHOLD="100 GB" -------------------------------------------------------------------------------- /.envrc: -------------------------------------------------------------------------------- 1 | if ! has nix_direnv_version || ! nix_direnv_version 3.0.5; then 2 | source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.5/direnvrc" "sha256-RuwIS+QKFj/T9M2TFXScjBsLR6V3A17YVoEW/Q6AZ1w=" 3 | fi 4 | 5 | use nix 6 | watch_file flake.nix 7 | watch_file flake.lock 8 | watch_file rust-toolchain.toml 9 | -------------------------------------------------------------------------------- /.gas-snapshot: -------------------------------------------------------------------------------- 1 | LightClientBench:testCorrectUpdateBench() (gas: 521271) 2 | PlonkVerifier_verify_Test:test_verify_succeeds() (gas: 385415) -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | contract-bindings/src linguist-generated=true 2 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "github-actions" 4 | directory: "/" 5 | # Group all updates together 6 | groups: 7 | all: 8 | patterns: 9 | - "*" 10 | schedule: 11 | interval: "daily" 12 | 13 | - package-ecosystem: "cargo" 14 | directories: 15 | - "/" 16 | - "/sequencer-sqlite" 17 | groups: 18 | # The `all` group should include mainly updates from crates.io which are 19 | # more likely to succeed without intervention. 20 | ark: 21 | patterns: 22 | - "ark-*" 23 | cdn: 24 | patterns: 25 | - "cdn-*" 26 | jf: 27 | patterns: 28 | - "jf-*" 29 | schedule: 30 | interval: "daily" 31 | -------------------------------------------------------------------------------- /.github/grcov.yml: -------------------------------------------------------------------------------- 1 | output-type: lcov 2 | ignore: 3 | - contract-bindings/* 4 | - contracts/* 5 | -------------------------------------------------------------------------------- /.github/workflows/auto-merge-dependabot.yml: -------------------------------------------------------------------------------- 1 | name: Dependabot enable auto merge 2 | 3 | on: 4 | pull_request: 5 | 6 | permissions: 7 | contents: write 8 | 9 | jobs: 10 | dependabot-auto-merge: 11 | name: Dependabot 12 | runs-on: ubuntu-latest 13 | if: ${{ github.actor == 'dependabot[bot]' && github.event_name == 'pull_request'}} 14 | steps: 15 | - name: Dependabot metadata 16 | uses: dependabot/fetch-metadata@v2.3.0 17 | id: metadata 18 | with: 19 | github-token: '${{ secrets.GITHUB_TOKEN }}' 20 | - name: Enable auto-merge for Dependabot PRs 21 | run: gh pr merge --auto --squash "$PR_URL" 22 | env: 23 | PR_URL: ${{github.event.pull_request.html_url}} 24 | GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} 25 | -------------------------------------------------------------------------------- /.github/workflows/backport.yml: -------------------------------------------------------------------------------- 1 | # Create a backport PR to branch "foo" from a merged PR by adding a PR label "backport foo" 2 | name: Backport merged pull request 3 | on: 4 | pull_request_target: 5 | types: [closed] 6 | permissions: 7 | contents: write # so it can comment 8 | pull-requests: write # so it can create pull requests 9 | jobs: 10 | backport: 11 | name: Backport pull request 12 | runs-on: ubuntu-latest 13 | # Don't run on closed unmerged pull requests 14 | if: github.event.pull_request.merged 15 | steps: 16 | - uses: actions/checkout@v4 17 | - name: Create backport pull requests 18 | uses: korthout/backport-action@v3 19 | -------------------------------------------------------------------------------- /.github/workflows/doc-rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust Docs 2 | on: 3 | push: 4 | branches: 5 | - "main" 6 | - "release-*" 7 | pull_request: 8 | schedule: 9 | - cron: "0 0 * * 1" 10 | workflow_dispatch: 11 | 12 | concurrency: 13 | group: ${{ github.workflow }}-${{ (github.ref == 'refs/heads/main' && github.run_number) || github.ref }} 14 | cancel-in-progress: true 15 | 16 | jobs: 17 | doc-rust: 18 | runs-on: ubuntu-24.04 19 | steps: 20 | - name: Checkout Repository 21 | uses: actions/checkout@v4 22 | 23 | - uses: taiki-e/install-action@just 24 | 25 | # NOTE: no rust cache, not a time critical job 26 | 27 | - name: Build Docs 28 | run: | 29 | just doc 30 | 31 | - name: Create documentation 32 | if: ${{ github.ref == 'refs/heads/main' }} 33 | run: | 34 | cp -R target/doc public 35 | echo '' > public/index.html 36 | 37 | - name: Deploy 38 | uses: peaceiris/actions-gh-pages@v4 39 | if: ${{ github.ref == 'refs/heads/main' }} 40 | with: 41 | github_token: ${{ secrets.GITHUB_TOKEN }} 42 | publish_dir: ./public 43 | cname: sequencer.docs.espressosys.com 44 | -------------------------------------------------------------------------------- /.github/workflows/typos.yml: -------------------------------------------------------------------------------- 1 | name: Typos 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - release-* 8 | tags: 9 | # YYYYMMDD 10 | - "20[0-9][0-9][0-1][0-9][0-3][0-9]*" 11 | pull_request: 12 | branches: 13 | workflow_dispatch: 14 | 15 | concurrency: 16 | group: ${{ github.workflow }}-${{ github.ref }} 17 | cancel-in-progress: true 18 | 19 | jobs: 20 | typos: 21 | runs-on: ubuntu-latest 22 | steps: 23 | - uses: actions/checkout@v4 24 | name: Checkout Repository 25 | 26 | - name: typos-action 27 | uses: crate-ci/typos@v1.32.0 28 | -------------------------------------------------------------------------------- /.github/workflows/ubuntu-install-without-nix.yml: -------------------------------------------------------------------------------- 1 | name: Ubuntu install without nix 2 | 3 | on: 4 | schedule: 5 | - cron: "0 0 * * MON" 6 | workflow_dispatch: 7 | 8 | concurrency: 9 | group: ${{ github.workflow }}-${{ github.ref }} 10 | cancel-in-progress: true 11 | 12 | jobs: 13 | ubuntu: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/checkout@v4 17 | with: 18 | submodules: recursive 19 | 20 | - name: Enable Rust Caching 21 | uses: Swatinem/rust-cache@v2 22 | with: 23 | save-if: ${{ github.ref == 'refs/heads/main' }} 24 | 25 | - name: Install and test 26 | run: scripts/ubuntu-install-test-no-nix 27 | -------------------------------------------------------------------------------- /.github/workflows/unused-deps.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | workflow_dispatch: 4 | 5 | name: udeps 6 | 7 | jobs: 8 | check: 9 | name: Rust project 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Checkout repository 13 | uses: actions/checkout@v4 14 | 15 | - uses: dtolnay/rust-toolchain@nightly 16 | id: toolchain 17 | 18 | # Set nightly as the default toolchain 19 | - run: rustup override set ${{steps.toolchain.outputs.name}} 20 | 21 | - uses: Swatinem/rust-cache@v2 22 | with: 23 | save-if: ${{ github.ref == 'refs/heads/main' }} 24 | 25 | - name: Run cargo-udeps 26 | uses: aig787/cargo-udeps-action@v1 27 | with: 28 | version: latest 29 | args: --all-targets 30 | -------------------------------------------------------------------------------- /.github/workflows/update_nix.yml: -------------------------------------------------------------------------------- 1 | name: update-flake-lock 2 | 3 | on: 4 | workflow_dispatch: # allows manual triggering 5 | schedule: 6 | - cron: "0 0 * * 0" # runs weekly on Sunday at 00:00 7 | 8 | concurrency: 9 | group: ${{ github.workflow }}-${{ github.ref }} 10 | cancel-in-progress: true 11 | 12 | jobs: 13 | lockfile: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Checkout repository 17 | uses: actions/checkout@v4 18 | 19 | - name: Install Nix 20 | uses: cachix/install-nix-action@v31 21 | 22 | - uses: cachix/cachix-action@v16 23 | with: 24 | name: espresso-systems-private 25 | authToken: "${{ secrets.CACHIX_AUTH_TOKEN }}" 26 | 27 | - name: Update flake.lock 28 | uses: DeterminateSystems/update-flake-lock@v24 29 | with: 30 | pr-title: "Weekly PR to bump flake.nix" # Title of PR to be created 31 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | # directories 2 | broadcast 3 | cache 4 | lib 5 | node_modules 6 | out 7 | .direnv 8 | target 9 | 10 | # files 11 | *.env 12 | *.log 13 | *.json 14 | .DS_Store 15 | .pnp.* 16 | lcov.info 17 | package-lock.json 18 | pnpm-lock.yaml 19 | yarn.lock 20 | .pre-commit-config.yaml 21 | *.yaml 22 | *.yml 23 | -------------------------------------------------------------------------------- /.prettierrc.yml: -------------------------------------------------------------------------------- 1 | bracketSpacing: true 2 | printWidth: 120 3 | proseWrap: "always" 4 | singleQuote: false 5 | tabWidth: 2 6 | trailingComma: "all" 7 | useTabs: false 8 | -------------------------------------------------------------------------------- /.solhint.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "solhint:recommended", 3 | "rules": { 4 | "code-complexity": ["error", 8] 5 | , "compiler-version": ["error", ">=0.8.0"] 6 | , "const-name-snakecase": "error" 7 | , "contract-name-capwords": "off" 8 | , "func-name-mixedcase": "error" 9 | , "func-param-name-mixedcase": "error" 10 | , "func-visibility": ["error", { "ignoreConstructors": true }] 11 | , "imports-on-top": "error" 12 | , "modifier-name-mixedcase": "error" 13 | , "named-parameters-mapping": "error" 14 | , "no-console": "error" 15 | , "no-empty-blocks": ["error", { "ignoreConstructors": true }] 16 | , "no-unused-vars": "error" 17 | , "no-global-import": "off" 18 | , "var-name-mixedcase": "error" 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /.typos.toml: -------------------------------------------------------------------------------- 1 | [files] 2 | extend-exclude = [ 3 | "data/initial_stake_table.toml", 4 | ".env", 5 | "*.json", 6 | "**/*.pdf", 7 | "doc/*.svg", 8 | "doc/*.puml", 9 | "**/*.drawio", 10 | "contracts/lib", 11 | "contracts/rust/adapter/src/bindings", 12 | "node-metrics/src/api/node_validator/v0/example_prometheus_metrics_output.txt", 13 | "crates/hotshot/orchestrator/run-config.toml", 14 | "crates/hotshot/macros/src/lib.rs", 15 | "crates/hotshot/types/src/light_client.rs", 16 | "staking-cli/tests/cli.rs", 17 | "sdks/go/types/common/consts.go", 18 | "data/insta_snapshots", 19 | ] 20 | 21 | [default.extend-words] 22 | Forgetten = "Forgetten" 23 | # Common "spelling" of "type" in Rust/go 24 | typ = "typ" 25 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # These owners will be the default owners for everything in the repo. Unless a 2 | # later match takes precedence, they will be requested for review when someone 3 | # opens a pull request. 4 | 5 | * @sveitser @jbearer @tbro @imabdulbasit @ss-es @pls148 @bfish713 @rob-maron @lukaszrzasik 6 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SRC_DIR := doc 2 | SRC_FILES := $(wildcard $(SRC_DIR)/*.puml) 3 | OUT_FILES := $(patsubst %.puml,%.svg,$(SRC_FILES)) 4 | 5 | all: doc 6 | doc: $(OUT_FILES) 7 | 8 | # Create doc/xyz.svg from doc/xyz.puml 9 | $(SRC_DIR)/%.svg: $(SRC_DIR)/%.puml 10 | plantuml -tsvg -o . $< 11 | -------------------------------------------------------------------------------- /alloy-compat/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "alloy-compat" 3 | description = "Compatibility helpers and conversions from ethers and older alloy to newer alloy" 4 | version = { workspace = true } 5 | authors = { workspace = true } 6 | edition = { workspace = true } 7 | 8 | [dependencies] 9 | alloy = { workspace = true } 10 | ethers-core = "2.0" 11 | rand = { workspace = true } 12 | serde = { workspace = true } 13 | 14 | [dev-dependencies] 15 | bincode = { workspace = true } 16 | 17 | [lints] 18 | workspace = true 19 | -------------------------------------------------------------------------------- /audits/external-reviews/EspressoHotshotLightClient-2024.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/audits/external-reviews/EspressoHotshotLightClient-2024.pdf -------------------------------------------------------------------------------- /audits/external-reviews/EspressoPlonk-2024.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/audits/external-reviews/EspressoPlonk-2024.pdf -------------------------------------------------------------------------------- /audits/internal-reviews/EspressoFeeContract-2024internal.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/audits/internal-reviews/EspressoFeeContract-2024internal.pdf -------------------------------------------------------------------------------- /audits/internal-reviews/EspressoHotshot-2024internal.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/audits/internal-reviews/EspressoHotshot-2024internal.pdf -------------------------------------------------------------------------------- /audits/internal-reviews/EspressoHotstuff2-2025internal.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/audits/internal-reviews/EspressoHotstuff2-2025internal.pdf -------------------------------------------------------------------------------- /audits/internal-reviews/EspressoSequencer-2024internal.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/audits/internal-reviews/EspressoSequencer-2024internal.pdf -------------------------------------------------------------------------------- /benchmark-stats/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = { workspace = true } 3 | name = "benchmark-stats" 4 | version = { workspace = true } 5 | edition = { workspace = true } 6 | license = "MIT" 7 | 8 | [dependencies] 9 | chrono = { workspace = true } 10 | clap = { workspace = true } 11 | csv = "1" 12 | espresso-types = { path = "../types" } 13 | hotshot-task-impls = { workspace = true } 14 | hotshot-types = { workspace = true } 15 | plotly = "0.13.5" 16 | serde = { workspace = true } 17 | -------------------------------------------------------------------------------- /client/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "client" 3 | version = { workspace = true } 4 | authors = { workspace = true } 5 | edition = { workspace = true } 6 | publish = false 7 | 8 | [dependencies] 9 | alloy = { workspace = true } 10 | anyhow = { workspace = true } 11 | espresso-types = { path = "../types" } 12 | futures = { workspace = true } 13 | jf-merkle-tree-compat = { workspace = true } 14 | surf-disco = { workspace = true } 15 | tokio = { workspace = true } 16 | tracing = { workspace = true } 17 | vbs = { workspace = true } 18 | 19 | [lints] 20 | workspace = true 21 | -------------------------------------------------------------------------------- /config/ValidatorConfigFile.toml: -------------------------------------------------------------------------------- 1 | is_da = true 2 | seed = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] 3 | node_id = 0 4 | -------------------------------------------------------------------------------- /contracts/.gitignore: -------------------------------------------------------------------------------- 1 | # Compiler files 2 | cache/ 3 | out/ 4 | 5 | # Ignores development broadcast logs 6 | !/broadcast 7 | /broadcast/*/31337/ 8 | /broadcast/*/1337/ 9 | /broadcast/*/900/ 10 | /broadcast/**/dry-run/ 11 | /broadcast/LightClientCallNewFinalizedState.s.sol 12 | /broadcast/LightClientTestScript.s.sol 13 | 14 | # Dotenv file 15 | .env 16 | 17 | .wake 18 | pytypes 19 | __pycache__/ 20 | *.py[cod] 21 | .hypothesis/ 22 | wake-coverage.cov -------------------------------------------------------------------------------- /contracts/demo/upgradeDemo/README.md: -------------------------------------------------------------------------------- 1 | # Purpose 2 | 3 | This subfolder was created to test various upgrade scenarios when using the UUPS proxy pattern. 4 | 5 | ## How to run the tests 6 | 7 | `forge test --match-contract Box -vvv --summary` 8 | 9 | ## Tests 10 | 11 | The tests check for the following post upgrade: 12 | 13 | - struct modification (addition of new member) 14 | - enum modification (addition of new member) 15 | - ETH deposit maintained in upgraded version 16 | - introduction of a withdrawal function post first deployment works 17 | -------------------------------------------------------------------------------- /contracts/echidna.yaml: -------------------------------------------------------------------------------- 1 | corpusDir: "contracts/corpus" 2 | coverage: true 3 | seqLen: 100 4 | shrinkLimit: 5000 5 | -------------------------------------------------------------------------------- /contracts/rust/adapter/src/bindings/mod.rs: -------------------------------------------------------------------------------- 1 | #![allow(unused_imports, clippy::all, rustdoc::all)] 2 | //! This module contains the sol! generated bindings for solidity contracts. 3 | //! This is autogenerated code. 4 | //! Do not manually edit these files. 5 | //! These files may be overwritten by the codegen system at any time. 6 | pub mod r#erc1967_proxy; 7 | pub mod r#esp_token; 8 | pub mod r#esp_token_v2; 9 | pub mod r#fee_contract; 10 | pub mod r#i_plonk_verifier; 11 | pub mod r#i_reward_claim; 12 | pub mod r#light_client; 13 | pub mod r#light_client_arbitrum; 14 | pub mod r#light_client_arbitrum_v2; 15 | pub mod r#light_client_arbitrum_v3; 16 | pub mod r#light_client_mock; 17 | pub mod r#light_client_v2; 18 | pub mod r#light_client_v2_mock; 19 | pub mod r#light_client_v3; 20 | pub mod r#light_client_v3_mock; 21 | pub mod r#ops_timelock; 22 | pub mod r#ownable_upgradeable; 23 | pub mod r#plonk_verifier; 24 | pub mod r#plonk_verifier_v2; 25 | pub mod r#plonk_verifier_v3; 26 | pub mod r#reward_claim; 27 | pub mod r#safe_exit_timelock; 28 | pub mod r#stake_table; 29 | pub mod r#stake_table_v2; 30 | -------------------------------------------------------------------------------- /contracts/rust/adapter/src/copy.rs: -------------------------------------------------------------------------------- 1 | // The bindings types are small and pure data, there is no reason they 2 | // shouldn't be Copy. However some of them do have a bytes field which cannot be Copy. 3 | impl Copy for crate::sol_types::G1PointSol {} 4 | impl Copy for crate::sol_types::G2PointSol {} 5 | impl Copy for crate::sol_types::EdOnBN254PointSol {} 6 | impl Copy for crate::sol_types::StakeTableV2::ValidatorRegistered {} 7 | // schnorr sig in ValidatorRegisteredV2 uses Bytes, cannot implement copy 8 | impl Copy for crate::sol_types::StakeTableV2::ValidatorExit {} 9 | impl Copy for crate::sol_types::StakeTableV2::ConsensusKeysUpdated {} 10 | // schnorr sig in ConsensusKeysUpdatedV2 Bytes, cannot implement copy 11 | impl Copy for crate::sol_types::StakeTableV2::Delegated {} 12 | impl Copy for crate::sol_types::StakeTableV2::Undelegated {} 13 | impl Copy for crate::sol_types::stake_table_v2::BN254::G1Point {} 14 | -------------------------------------------------------------------------------- /contracts/rust/adapter/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Cross-domain (between Solidity and Rust) utilities for type conversion and testing 2 | 3 | use alloy::primitives::U256; 4 | use ark_ff::{BigInteger, PrimeField}; 5 | 6 | // See https://github.com/foundry-rs/foundry/issues/11712 regarding unused attributes 7 | #[allow(dead_code, unused_attributes)] 8 | pub(crate) mod bindings; 9 | mod copy; 10 | pub mod evm; 11 | pub mod jellyfish; 12 | pub mod light_client; 13 | pub mod reward; 14 | pub mod sol_types; 15 | pub mod stake_table; 16 | 17 | /// convert a field element to U256, panic if field size is larger than 256 bit 18 | pub fn field_to_u256(f: F) -> U256 { 19 | if F::MODULUS_BIT_SIZE > 256 { 20 | panic!("Shouldn't convert a >256-bit field to U256"); 21 | } 22 | U256::from_le_slice(&f.into_bigint().to_bytes_le()) 23 | } 24 | 25 | /// convert U256 to a field (mod order) 26 | pub fn u256_to_field(x: U256) -> F { 27 | let bytes: [u8; 32] = x.to_le_bytes(); 28 | F::from_le_bytes_mod_order(&bytes) 29 | } 30 | -------------------------------------------------------------------------------- /contracts/rust/deployer/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "espresso-contract-deployer" 3 | description = "Libraries and binaries to deploy contracts used in Espresso Network" 4 | version = { workspace = true } 5 | authors = { workspace = true } 6 | edition = { workspace = true } 7 | 8 | [dependencies] 9 | alloy = { workspace = true, features = ["signer-ledger"] } 10 | anyhow = { workspace = true } 11 | clap = { workspace = true } 12 | derive_builder = "0.20.2" 13 | derive_more = { workspace = true } 14 | dotenvy = { workspace = true } 15 | espresso-types = { path = "../../../types" } 16 | hotshot-contract-adapter = { workspace = true } 17 | hotshot-types = { workspace = true } 18 | serde_json = { workspace = true } 19 | surf-disco = { workspace = true } 20 | tide-disco = { workspace = true } 21 | tokio = { workspace = true } 22 | tracing = "0.1.37" 23 | vbs = { workspace = true } 24 | 25 | [dev-dependencies] 26 | rand = { workspace = true } 27 | sequencer-utils = { version = "0.1.0", path = "../../../utils" } 28 | test-log = { workspace = true } 29 | 30 | [lints] 31 | workspace = true 32 | -------------------------------------------------------------------------------- /contracts/rust/deployer/src/proposals/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod multisig; 2 | pub mod timelock; 3 | -------------------------------------------------------------------------------- /contracts/rust/diff-test/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "diff-test-hotshot" 3 | description = "Helpers and mocks for Forge-powered differential tests on HotShot-related contracts" 4 | version = { workspace = true } 5 | authors = { workspace = true } 6 | edition = { workspace = true } 7 | 8 | [[bin]] 9 | name = "diff-test" 10 | path = "src/main.rs" 11 | 12 | [dependencies] 13 | alloy = { workspace = true } 14 | ark-bn254 = { workspace = true } 15 | ark-ec = { workspace = true } 16 | ark-ed-on-bn254 = { workspace = true } 17 | ark-ff = { workspace = true } 18 | ark-poly = { workspace = true } 19 | ark-std = { workspace = true } 20 | clap = { version = "^4.4", features = ["derive"] } 21 | hotshot-contract-adapter = { workspace = true } 22 | hotshot-state-prover = { workspace = true } 23 | hotshot-types = { workspace = true } 24 | jf-pcs = { workspace = true } 25 | jf-plonk = { workspace = true } 26 | jf-signature = { workspace = true } 27 | jf-utils = { workspace = true } 28 | sha3 = { version = "0.10.8", default-features = false } 29 | 30 | [lints] 31 | workspace = true 32 | -------------------------------------------------------------------------------- /contracts/rust/gen-vk-contract/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "gen-vk-contract" 3 | description = "Executable for generating SNARK verification key contract" 4 | version = { workspace = true } 5 | authors = { workspace = true } 6 | edition = { workspace = true } 7 | 8 | [dependencies] 9 | alloy = { workspace = true } 10 | ark-srs = { workspace = true } 11 | clap = { workspace = true } 12 | hotshot-contract-adapter = { workspace = true } 13 | hotshot-state-prover = { workspace = true } 14 | hotshot-types = { workspace = true } 15 | jf-pcs = { workspace = true } 16 | 17 | [lints] 18 | workspace = true 19 | -------------------------------------------------------------------------------- /contracts/script/multisigTransactionProposals/images/safeProposal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/contracts/script/multisigTransactionProposals/images/safeProposal.png -------------------------------------------------------------------------------- /contracts/script/output/defenderDeployments/LightClient.sol/11155111/12.json: -------------------------------------------------------------------------------- 1 | { 2 | "approvalProcessId": "a0dd5bf1-9766-4a2f-94b1-2a7ceb0dc5be", 3 | "approvalType": "Gnosis Safe", 4 | "multisig": "0xc56fA6505d10bF322e01327e22479DE78C3Bf1cE", 5 | "proxyAddress": "0xbC781a2BCcdac8F65EF10EA85D765CA240D1789b", 6 | "salt": 12 7 | } -------------------------------------------------------------------------------- /contracts/script/output/defenderDeployments/LightClient.sol/11155111/13.json: -------------------------------------------------------------------------------- 1 | { 2 | "multisig": "0xc56fA6505d10bF322e01327e22479DE78C3Bf1cE", 3 | "newContractName": "LightClient.sol", 4 | "proposalId": "8d347333-7e10-417e-96fd-a2b785115c58", 5 | "proxyAddress": "0xbC781a2BCcdac8F65EF10EA85D765CA240D1789b", 6 | "responseUrl": "https://app.safe.global/transactions/tx?safe=sep:0xc56fA6505d10bF322e01327e22479DE78C3Bf1cE&id=0xad6fe9ec8b6275b81e3a894fb87408fb77e9ad0020e40506aa189024fe28fae8", 7 | "salt": 13 8 | } -------------------------------------------------------------------------------- /contracts/script/output/defenderDeployments/LightClient.sol/11155111/14.json: -------------------------------------------------------------------------------- 1 | { 2 | "approvalProcessId": "a0dd5bf1-9766-4a2f-94b1-2a7ceb0dc5be", 3 | "approvalType": "Gnosis Safe", 4 | "multisig": "0xc56fA6505d10bF322e01327e22479DE78C3Bf1cE", 5 | "proxyAddress": "0xfBCb14b42e6dDC81Dd4e02Dc1E35A9581A1F2200", 6 | "salt": 14 7 | } -------------------------------------------------------------------------------- /contracts/script/output/defenderDeployments/LightClient.sol/11155111/saltHistory.json: -------------------------------------------------------------------------------- 1 | { 2 | "contractName": "LightClient.sol", 3 | "previousSalt": 14 4 | } -------------------------------------------------------------------------------- /contracts/script/output/defenderDeployments/PlonkVerifier.sol/11155111/2.json: -------------------------------------------------------------------------------- 1 | { 2 | "approvalProcessId": "a0dd5bf1-9766-4a2f-94b1-2a7ceb0dc5be", 3 | "approvalType": "Gnosis Safe", 4 | "multisig": "0xc56fA6505d10bF322e01327e22479DE78C3Bf1cE", 5 | "contractAddress": "0x7807612b7F4D8241E1128F0037d056DA2Eec0242", 6 | "salt": 2 7 | } -------------------------------------------------------------------------------- /contracts/script/output/defenderDeployments/PlonkVerifier.sol/11155111/saltHistory.json: -------------------------------------------------------------------------------- 1 | { 2 | "contractName": "PlonkVerifier.sol", 3 | "previousSalt": 2 4 | } -------------------------------------------------------------------------------- /contracts/src/InitializedAt.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: UNLICENSED 2 | pragma solidity ^0.8.0; 3 | 4 | // Store the block number when a contract was deployed, or initialized (for upgradable contracts). 5 | // 6 | // Clients can read the member variable `initializedAtBlock` to know at what L1 block they need to 7 | // start processing events. 8 | 9 | import { Initializable } from "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol"; 10 | 11 | contract InitializedAt is Initializable { 12 | // @notice The block number the contract was initialized at. 13 | uint256 public initializedAtBlock; 14 | 15 | constructor() { 16 | _disableInitializers(); 17 | } 18 | 19 | // @dev The `initializeAtBlock` function must be called during initialization. 20 | function initializeAtBlock() internal initializer { 21 | initializedAtBlock = block.number; 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /contracts/src/LightClientArbitrum.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: UNLICENSED 2 | 3 | pragma solidity ^0.8.0; 4 | 5 | import { LightClient } from "./LightClient.sol"; 6 | 7 | interface ArbSys { 8 | function arbBlockNumber() external view returns (uint256); 9 | } 10 | 11 | contract LightClientArbitrum is LightClient { 12 | function currentBlockNumber() public view virtual override returns (uint256) { 13 | return ArbSys(address(uint160(100))).arbBlockNumber(); 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /contracts/src/LightClientArbitrumV2.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: UNLICENSED 2 | 3 | pragma solidity ^0.8.0; 4 | 5 | import { LightClientV2 } from "./LightClientV2.sol"; 6 | 7 | interface ArbSys { 8 | function arbBlockNumber() external view returns (uint256); 9 | } 10 | 11 | contract LightClientArbitrumV2 is LightClientV2 { 12 | function currentBlockNumber() public view virtual override returns (uint256) { 13 | return ArbSys(address(uint160(100))).arbBlockNumber(); 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /contracts/src/LightClientArbitrumV3.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: UNLICENSED 2 | 3 | pragma solidity ^0.8.0; 4 | 5 | import { LightClientV3 } from "./LightClientV3.sol"; 6 | 7 | interface ArbSys { 8 | function arbBlockNumber() external view returns (uint256); 9 | } 10 | 11 | contract LightClientArbitrumV3 is LightClientV3 { 12 | function currentBlockNumber() public view virtual override returns (uint256) { 13 | return ArbSys(address(uint160(100))).arbBlockNumber(); 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /contracts/src/OpsTimelock.sol: -------------------------------------------------------------------------------- 1 | //SPDX-License-Identifier: Unlicense 2 | pragma solidity ^0.8.0; 3 | 4 | import "@openzeppelin/contracts/governance/TimelockController.sol"; 5 | 6 | /// @title OpsTimelock 7 | /// @notice A timelock controller for contracts that require faster updates 8 | /// @dev Timelock used for operational control during early protocol phases. 9 | /// Grants privileged access to core team for upgrades or config changes 10 | /// with a short delay. 11 | contract OpsTimelock is TimelockController { 12 | constructor( 13 | uint256 minDelay, 14 | address[] memory proposers, 15 | address[] memory executors, 16 | address admin 17 | ) TimelockController(minDelay, proposers, executors, admin) { } 18 | } 19 | -------------------------------------------------------------------------------- /contracts/src/SafeExitTimelock.sol: -------------------------------------------------------------------------------- 1 | //SPDX-License-Identifier: Unlicense 2 | pragma solidity ^0.8.0; 3 | 4 | import "@openzeppelin/contracts/governance/TimelockController.sol"; 5 | 6 | /// @title SafeExitTimelock 7 | /// @notice A timelock controller for contracts that can have a long delay before updates are 8 | /// applied 9 | /// @dev The delay on the contract is long enough for users to exit the system if they do not agree 10 | /// with the update 11 | contract SafeExitTimelock is TimelockController { 12 | constructor( 13 | uint256 minDelay, 14 | address[] memory proposers, 15 | address[] memory executors, 16 | address admin 17 | ) TimelockController(minDelay, proposers, executors, admin) { } 18 | } 19 | -------------------------------------------------------------------------------- /contracts/src/interfaces/ILightClient.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | pragma solidity ^0.8.0; 3 | 4 | interface ILightClient { 5 | function blocksPerEpoch() external view returns (uint64); 6 | } 7 | -------------------------------------------------------------------------------- /contracts/src/interfaces/IRewardClaim.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | pragma solidity ^0.8.28; 3 | 4 | interface IRewardClaim { 5 | /// @notice User claimed rewards 6 | event RewardsClaimed(address indexed user, uint256 amount); 7 | 8 | /// @notice Unable to authenticate rewards against Light Client contract 9 | error InvalidAuthRoot(); 10 | 11 | /// @notice All available rewards already claimed 12 | error AlreadyClaimed(); 13 | 14 | /// @notice Reward amount must be greater than zero 15 | error InvalidRewardAmount(); 16 | 17 | /// @notice A claim would exceed the remaining daily capacity 18 | error DailyLimitExceeded(); 19 | 20 | /// @notice Claim staking rewards 21 | /// 22 | /// @param lifetimeRewards Total earned lifetime rewards for the user @param 23 | /// @param authData inputs required for authentication of lifetime rewards amount. 24 | /// 25 | /// @notice Obtain authData from the Espresso query service API. 26 | function claimRewards(uint256 lifetimeRewards, bytes calldata authData) external; 27 | 28 | /// @notice Check amount of rewards claimed by a user 29 | function claimedRewards(address claimer) external view returns (uint256); 30 | } 31 | -------------------------------------------------------------------------------- /contracts/src/libraries/EdOnBn254.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: UNLICENSED 2 | 3 | pragma solidity ^0.8.0; 4 | 5 | /// @notice Edward curve on BN254. 6 | /// This library only implements a serialization function that is consistent with 7 | /// Arkworks' format. It does not support any group operations. 8 | library EdOnBN254 { 9 | uint256 public constant P_MOD = 10 | 21888242871839275222246405745257275088548364400416034343698204186575808495617; 11 | 12 | struct EdOnBN254Point { 13 | uint256 x; 14 | uint256 y; 15 | } 16 | 17 | /// @dev Check if y-coordinate of G1 point is negative. 18 | function isYNegative(EdOnBN254Point memory point) internal pure returns (bool) { 19 | return (point.y << 1) < P_MOD; 20 | } 21 | 22 | /// @dev Check if two points are equal 23 | function isEqual(EdOnBN254Point memory a, EdOnBN254Point memory b) 24 | internal 25 | pure 26 | returns (bool) 27 | { 28 | return a.x == b.x && a.y == b.y; 29 | } 30 | 31 | // TODO: (alex) add `validatePoint` methods and tests 32 | } 33 | -------------------------------------------------------------------------------- /contracts/test/LightClientArbitrumV2.t.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Unlicensed 2 | pragma solidity ^0.8.0; 3 | 4 | import "forge-std/Test.sol"; 5 | import { LightClientArbitrumV2, ArbSys } from "../src/LightClientArbitrumV2.sol"; 6 | 7 | contract MockArbSys is ArbSys { 8 | function arbBlockNumber() external pure override returns (uint256) { 9 | return 123456; 10 | } 11 | } 12 | 13 | contract LightClientArbitrumV2Test is Test { 14 | LightClientArbitrumV2 public lc; 15 | MockArbSys mockArbsys; 16 | 17 | function setUp() public { 18 | vm.createSelectFork("https://arb1.arbitrum.io/rpc"); 19 | mockArbsys = new MockArbSys(); 20 | vm.etch(address(100), address(mockArbsys).code); // Replace address(100) with mock 21 | // implementation 22 | lc = new LightClientArbitrumV2(); 23 | } 24 | 25 | function testCurrentBlockNumber() public view { 26 | assertNotEq(lc.currentBlockNumber(), block.number); 27 | assertEq(lc.currentBlockNumber(), ArbSys(address(uint160(100))).arbBlockNumber()); 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /contracts/test/LightClientBenchmark.t.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Unlicensed 2 | 3 | /* solhint-disable contract-name-camelcase, func-name-mixedcase, one-contract-per-file */ 4 | 5 | pragma solidity ^0.8.0; 6 | 7 | // Libraries 8 | import "forge-std/Test.sol"; 9 | import { IPlonkVerifier as V } from "../src/interfaces/IPlonkVerifier.sol"; 10 | 11 | // Target contract 12 | import { LightClient as LC } from "../src/LightClient.sol"; 13 | import { LightClientCommonTest } from "./LightClientV3.t.sol"; 14 | 15 | contract LightClientBench is LightClientCommonTest { 16 | constructor() { 17 | init(); 18 | } 19 | 20 | /// @dev for benchmarking purposes only 21 | function testCorrectUpdateBench() external { 22 | vm.pauseGasMetering(); 23 | ( 24 | LC.LightClientState memory newState, 25 | LC.StakeTableState memory nextStakeTable, 26 | uint256 newAuthRoot, 27 | V.PlonkProof memory newProof 28 | ) = genStateProof(); 29 | 30 | vm.prank(prover); 31 | vm.resumeGasMetering(); 32 | lc.newFinalizedState(newState, nextStakeTable, newAuthRoot, newProof); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /contracts/test/RewardClaimAdmin.t.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: UNLICENSED 2 | 3 | /* solhint-disable func-name-mixedcase */ 4 | 5 | pragma solidity ^0.8.28; 6 | 7 | import "./RewardClaim.t.sol"; 8 | 9 | contract RewardClaimAdminTest is RewardClaimTest { 10 | function test_SetDailyLimit_Success() public { 11 | uint256 newLimit = DAILY_LIMIT * 2; 12 | 13 | vm.prank(owner); 14 | vm.expectEmit(); 15 | emit RewardClaim.DailyLimitUpdated(DAILY_LIMIT, newLimit); 16 | rewardClaim.setDailyLimit(newLimit); 17 | 18 | assertEq(rewardClaim.dailyLimit(), newLimit); 19 | } 20 | 21 | function test_SetDailyLimit_RevertsNonOwner() public { 22 | vm.prank(claimer); 23 | vm.expectRevert(); 24 | rewardClaim.setDailyLimit(DAILY_LIMIT * 2); 25 | } 26 | 27 | function test_SetDailyLimit_RevertsZero() public { 28 | vm.prank(owner); 29 | vm.expectRevert(RewardClaim.ZeroDailyLimit.selector); 30 | rewardClaim.setDailyLimit(0); 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /contracts/test/RewardClaimUpgrade.t.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: UNLICENSED 2 | 3 | /* solhint-disable func-name-mixedcase */ 4 | 5 | pragma solidity ^0.8.28; 6 | 7 | import "./RewardClaim.t.sol"; 8 | import { OwnableUpgradeable } from 9 | "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol"; 10 | 11 | contract RewardClaimUpgradeTest is RewardClaimTest { 12 | function test_Upgrade_OnlyOwner() public { 13 | address newImpl = address(new RewardClaim()); 14 | 15 | vm.prank(owner); 16 | rewardClaim.upgradeToAndCall(newImpl, ""); 17 | } 18 | 19 | function test_Upgrade_RevertsNonOwner() public { 20 | address newImpl = address(new RewardClaim()); 21 | address attacker = makeAddr("attacker"); 22 | 23 | vm.prank(attacker); 24 | vm.expectRevert( 25 | abi.encodeWithSelector(OwnableUpgradeable.OwnableUnauthorizedAccount.selector, attacker) 26 | ); 27 | rewardClaim.upgradeToAndCall(newImpl, ""); 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /contracts/test/mocks/LightClientV3Fake.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: UNLICENSED 2 | 3 | pragma solidity ^0.8.0; 4 | 5 | import { LightClientV2Fake } from "./LightClientV2Fake.sol"; 6 | 7 | /// @dev a Fake V3 for testing upgradability purposes only 8 | contract LightClientV3Fake is LightClientV2Fake { 9 | uint256 public anotherField; 10 | 11 | /// @notice Initialize v3 12 | /// @param _newField New field amount 13 | /// @dev the reinitializer modifier is used to reinitialize new versions of a contract and 14 | /// is called at most once. The modifier has an uint64 argument which indicates the next 15 | /// contract version. 16 | /// when the base implementation contract is initialized for the first time, the _initialized 17 | /// version 18 | /// is set to 1. Since this is the 3rd implementation, the next contract version is 3. 19 | function initializeV3(uint256 _newField) external reinitializer(3) { 20 | anotherField = _newField; 21 | } 22 | 23 | /// @notice Use this to get the implementation contract version 24 | function getVersion() 25 | public 26 | pure 27 | virtual 28 | override 29 | returns (uint8 majorVersion, uint8 minorVersion, uint8 patchVersion) 30 | { 31 | return (3, 0, 0); 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /contracts/test/mocks/MockRewardClaim.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: UNLICENSED 2 | pragma solidity ^0.8.28; 3 | 4 | import "../../src/RewardClaim.sol"; 5 | 6 | contract MockRewardClaim is RewardClaim { 7 | function _verifyAuthRoot(uint256, bytes memory) internal pure override returns (bool) { 8 | return true; 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /crates/hotshot-builder/legacy/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot Builder Protocol. 3 | // 4 | 5 | // Builder Phase 1 6 | // It mainly provides three API services to hotshot proposers: 7 | // 1. Serves a proposer(leader)'s request to provide blocks information 8 | // 2. Serves a proposer(leader)'s request to provide the full blocks information 9 | // 3. Serves a proposer(leader)'s request to provide the block header information 10 | 11 | // It also provides one API services external users: 12 | // 1. Serves a user's request to submit a private transaction 13 | 14 | // providing the core services to support above API services 15 | pub mod builder_state; 16 | 17 | // Core interaction with the HotShot network 18 | pub mod service; 19 | 20 | // tracking the testing 21 | #[cfg(test)] 22 | pub mod testing; 23 | -------------------------------------------------------------------------------- /crates/hotshot-builder/refactored/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Builder Phase 1 2 | //! It mainly provides three API services to hotshot proposers: 3 | //! 1. Serves a proposer(leader)'s request to provide blocks information 4 | //! 2. Serves a proposer(leader)'s request to provide the full blocks information 5 | //! 3. Serves a proposer(leader)'s request to provide the block header information 6 | //! 7 | //! It also provides one API service to external users: 8 | //! 1. Serves a user's request to submit a private transaction 9 | 10 | pub mod block_size_limits; 11 | pub mod block_store; 12 | pub mod service; 13 | 14 | // tracking the testing 15 | #[cfg(test)] 16 | pub mod testing; 17 | -------------------------------------------------------------------------------- /crates/hotshot-builder/shared/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod block; 2 | pub mod coordinator; 3 | pub mod error; 4 | pub mod state; 5 | pub mod testing; 6 | pub mod utils; 7 | -------------------------------------------------------------------------------- /crates/hotshot/builder-api/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hotshot-builder-api" 3 | version = "0.1.7" 4 | edition = "2021" 5 | license = "MIT" 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | async-trait = { workspace = true } 10 | clap = { workspace = true } 11 | committable = { workspace = true } 12 | derive_more = { workspace = true, features = ["from"] } 13 | futures = { workspace = true } 14 | hotshot-types = { workspace = true } 15 | serde = { workspace = true } 16 | tagged-base64 = { workspace = true } 17 | thiserror = { workspace = true } 18 | tide-disco = { workspace = true } 19 | toml = { workspace = true } 20 | vbs = { workspace = true } 21 | 22 | [lints] 23 | workspace = true 24 | -------------------------------------------------------------------------------- /crates/hotshot/builder-api/README.md: -------------------------------------------------------------------------------- 1 | # hotshot-builder-api 2 | 3 | Minimal dependencies shared API definitions for HotShot Builder protocol 4 | 5 | # HotShot Consensus Module 6 | -------------------------------------------------------------------------------- /crates/hotshot/builder-api/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | mod api; 8 | pub mod v0_1; 9 | pub mod v0_2 { 10 | pub use super::v0_1::*; 11 | pub type Version = vbs::version::StaticVersion<0, 2>; 12 | } 13 | -------------------------------------------------------------------------------- /crates/hotshot/builder-api/src/v0_1/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod block_info; 2 | pub mod builder; 3 | pub mod data_source; 4 | pub mod query_data; 5 | 6 | pub type Version = vbs::version::StaticVersion<0, 1>; 7 | -------------------------------------------------------------------------------- /crates/hotshot/builder-api/src/v0_1/query_data.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | use hotshot_types::traits::node_implementation::NodeType; 8 | use serde::{Deserialize, Serialize}; 9 | 10 | use super::block_info::AvailableBlockInfo; 11 | 12 | #[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq, Hash)] 13 | #[serde(bound = "")] 14 | pub struct AvailableBlocksQueryData { 15 | pub blocks: Vec>, 16 | } 17 | -------------------------------------------------------------------------------- /crates/hotshot/example-types/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hotshot-example-types" 3 | version = { workspace = true } 4 | edition = { workspace = true } 5 | description = "Types and traits for the HotShot consesus module" 6 | authors = { workspace = true } 7 | license = "MIT" 8 | 9 | [features] 10 | default = [] 11 | # NOTE this is used to activate the slow tests we don't wish to run in CI 12 | slow-tests = [] 13 | 14 | [dependencies] 15 | alloy = { workspace = true } 16 | anyhow = { workspace = true } 17 | async-broadcast = { workspace = true } 18 | async-lock = { workspace = true } 19 | async-trait = { workspace = true } 20 | bincode = { workspace = true } 21 | committable = { workspace = true } 22 | hotshot = { workspace = true } 23 | hotshot-task-impls = { workspace = true } 24 | hotshot-types = { workspace = true } 25 | hotshot-utils = { workspace = true } 26 | jf-advz = { workspace = true } 27 | rand = { workspace = true } 28 | reqwest = { workspace = true } 29 | serde = { workspace = true } 30 | sha2 = { workspace = true } 31 | sha3 = "^0.10" 32 | thiserror = { workspace = true } 33 | time = { workspace = true } 34 | tokio = { workspace = true } 35 | tracing = { workspace = true } 36 | url = { workspace = true } 37 | vbs = { workspace = true } 38 | 39 | [lints] 40 | workspace = true 41 | -------------------------------------------------------------------------------- /crates/hotshot/example-types/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | /// block types 8 | pub mod block_types; 9 | 10 | /// Implementations for testing/examples 11 | pub mod state_types; 12 | 13 | /// node types 14 | pub mod node_types; 15 | 16 | /// storage types for hotshot storage 17 | pub mod storage_types; 18 | 19 | /// add a delay to async functions 20 | pub mod testable_delay; 21 | 22 | /// Implementations for test memberships 23 | pub mod membership; 24 | -------------------------------------------------------------------------------- /crates/hotshot/example-types/src/membership/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | //! elections used for consensus 8 | 9 | /// leader completely randomized every view 10 | pub mod randomized_committee; 11 | 12 | /// quorum randomized every view, with configurable overlap 13 | pub mod randomized_committee_members; 14 | 15 | /// static (round robin) committee election 16 | pub mod static_committee; 17 | 18 | /// static (round robin leader for 2 consecutive views) committee election 19 | pub mod static_committee_leader_two_views; 20 | /// two static (round robin) committees for even and odd epochs 21 | pub mod two_static_committees; 22 | 23 | /// general helpers 24 | pub mod helpers; 25 | 26 | pub mod fetcher; 27 | 28 | pub mod stake_table; 29 | 30 | pub mod strict_membership; 31 | -------------------------------------------------------------------------------- /crates/hotshot/examples/combined/orchestrator.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | //! Orchestrator using the web server 8 | /// types used for this example 9 | pub mod types; 10 | 11 | use hotshot::helpers::initialize_logging; 12 | use hotshot_example_types::state_types::TestTypes; 13 | use tracing::instrument; 14 | 15 | use crate::infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}; 16 | /// general infra used for this example 17 | #[path = "../infra/mod.rs"] 18 | pub mod infra; 19 | 20 | #[tokio::main] 21 | #[instrument] 22 | async fn main() { 23 | // Initialize logging 24 | initialize_logging(); 25 | 26 | let (config, orchestrator_url) = read_orchestrator_init_config::(); 27 | run_orchestrator::(OrchestratorArgs:: { 28 | url: orchestrator_url.clone(), 29 | config: config.clone(), 30 | }) 31 | .await; 32 | } 33 | -------------------------------------------------------------------------------- /crates/hotshot/examples/combined/types.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | use std::fmt::Debug; 8 | 9 | use hotshot::traits::implementations::CombinedNetworks; 10 | use hotshot_example_types::{state_types::TestTypes, storage_types::TestStorage}; 11 | use hotshot_types::traits::node_implementation::NodeImplementation; 12 | use serde::{Deserialize, Serialize}; 13 | 14 | use crate::infra::CombinedDaRun; 15 | 16 | /// dummy struct so we can choose types 17 | #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] 18 | pub struct NodeImpl {} 19 | 20 | /// Convenience type alias 21 | pub type Network = CombinedNetworks; 22 | 23 | impl NodeImplementation for NodeImpl { 24 | type Network = Network; 25 | type Storage = TestStorage; 26 | } 27 | /// convenience type alias 28 | pub type ThisRun = CombinedDaRun; 29 | -------------------------------------------------------------------------------- /crates/hotshot/examples/libp2p/types.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | use std::fmt::Debug; 8 | 9 | use hotshot::traits::implementations::Libp2pNetwork; 10 | use hotshot_example_types::{state_types::TestTypes, storage_types::TestStorage}; 11 | use hotshot_types::traits::node_implementation::NodeImplementation; 12 | use serde::{Deserialize, Serialize}; 13 | 14 | use crate::infra::Libp2pDaRun; 15 | 16 | /// dummy struct so we can choose types 17 | #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] 18 | pub struct NodeImpl {} 19 | 20 | /// Convenience type alias 21 | pub type Network = Libp2pNetwork; 22 | 23 | impl NodeImplementation for NodeImpl { 24 | type Network = Network; 25 | type Storage = TestStorage; 26 | } 27 | /// convenience type alias 28 | pub type ThisRun = Libp2pDaRun; 29 | -------------------------------------------------------------------------------- /crates/hotshot/examples/orchestrator.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | //! An orchestrator 8 | 9 | use hotshot::helpers::initialize_logging; 10 | use hotshot_example_types::state_types::TestTypes; 11 | use tracing::instrument; 12 | 13 | use crate::infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}; 14 | 15 | /// general infra used for this example 16 | #[path = "./infra/mod.rs"] 17 | pub mod infra; 18 | 19 | #[tokio::main] 20 | #[instrument] 21 | async fn main() { 22 | // Initialize logging 23 | initialize_logging(); 24 | 25 | let (config, orchestrator_url) = read_orchestrator_init_config::(); 26 | run_orchestrator::(OrchestratorArgs:: { 27 | url: orchestrator_url.clone(), 28 | config: config.clone(), 29 | }) 30 | .await; 31 | } 32 | -------------------------------------------------------------------------------- /crates/hotshot/examples/push-cdn/types.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | use hotshot::traits::{implementations::PushCdnNetwork, NodeImplementation}; 8 | use hotshot_example_types::{state_types::TestTypes, storage_types::TestStorage}; 9 | use hotshot_types::traits::node_implementation::NodeType; 10 | use serde::{Deserialize, Serialize}; 11 | 12 | use crate::infra::PushCdnDaRun; 13 | 14 | #[derive(Clone, Deserialize, Serialize, Hash, PartialEq, Eq)] 15 | /// Convenience type alias 16 | pub struct NodeImpl {} 17 | 18 | /// Convenience type alias 19 | pub type Network = PushCdnNetwork<::SignatureKey>; 20 | 21 | impl NodeImplementation for NodeImpl { 22 | type Network = Network; 23 | type Storage = TestStorage; 24 | } 25 | 26 | /// Convenience type alias 27 | pub type ThisRun = PushCdnDaRun; 28 | -------------------------------------------------------------------------------- /crates/hotshot/examples/push-cdn/validator.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | //! A validator 8 | use clap::Parser; 9 | use hotshot::helpers::initialize_logging; 10 | use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes}; 11 | use hotshot_orchestrator::client::ValidatorArgs; 12 | use tracing::{debug, instrument}; 13 | 14 | use crate::types::{Network, NodeImpl, ThisRun}; 15 | 16 | /// types used for this example 17 | pub mod types; 18 | 19 | /// general infra used for this example 20 | #[path = "../infra/mod.rs"] 21 | pub mod infra; 22 | 23 | #[tokio::main] 24 | #[instrument] 25 | async fn main() { 26 | // Initialize logging 27 | initialize_logging(); 28 | 29 | let args = ValidatorArgs::parse(); 30 | debug!("connecting to orchestrator at {:?}", args.url); 31 | infra::main_entry_point::(args).await; 32 | } 33 | -------------------------------------------------------------------------------- /crates/hotshot/hotshot/src/documentation.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | // This is prosaic documentation, we don't need clippy 8 | #![allow( 9 | clippy::all, 10 | clippy::pedantic, 11 | missing_docs, 12 | clippy::missing_docs_in_private_items, 13 | non_camel_case_types 14 | )] 15 | -------------------------------------------------------------------------------- /crates/hotshot/hotshot/src/traits/networking.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | //! Network access compatibility 8 | //! 9 | //! This module contains a trait abstracting over network access, as well as implementations of that 10 | //! trait. Currently this includes 11 | //! - [`MemoryNetwork`](memory_network::MemoryNetwork), an in memory testing-only implementation 12 | //! - [`Libp2pNetwork`](libp2p_network::Libp2pNetwork), a production-ready networking implementation built on top of libp2p-rs. 13 | 14 | pub mod combined_network; 15 | pub mod libp2p_network; 16 | pub mod memory_network; 17 | /// The Push CDN network 18 | pub mod push_cdn_network; 19 | 20 | pub use hotshot_types::traits::network::{NetworkError, NetworkReliability}; 21 | -------------------------------------------------------------------------------- /crates/hotshot/hotshot/src/traits/node_implementation.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | //! Composite trait for node behavior 8 | //! 9 | //! This module defines the [`NodeImplementation`] trait, which is a composite trait used for 10 | //! describing the overall behavior of a node, as a composition of implementations of the node trait. 11 | 12 | pub use hotshot_types::traits::node_implementation::{ 13 | NodeImplementation, TestableNodeImplementation, 14 | }; 15 | -------------------------------------------------------------------------------- /crates/hotshot/hotshot/src/types.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | mod event; 8 | mod handle; 9 | 10 | pub use event::{Event, EventType}; 11 | pub use handle::SystemContextHandle; 12 | pub use hotshot_types::{ 13 | message::Message, 14 | signature_key::{BLSPrivKey, BLSPubKey, SchnorrPrivKey, SchnorrPubKey}, 15 | traits::signature_key::SignatureKey, 16 | }; 17 | -------------------------------------------------------------------------------- /crates/hotshot/hotshot/src/types/event.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | //! Events that a [`SystemContext`](crate::SystemContext) instance can emit 8 | 9 | pub use hotshot_types::event::{Event, EventType}; 10 | -------------------------------------------------------------------------------- /crates/hotshot/libp2p-networking/.cargo/config: -------------------------------------------------------------------------------- 1 | [net] 2 | git-fetch-with-cli = true -------------------------------------------------------------------------------- /crates/hotshot/libp2p-networking/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /result 3 | /outfile_0 4 | /out*.txt 5 | -------------------------------------------------------------------------------- /crates/hotshot/libp2p-networking/flamegraph.sh: -------------------------------------------------------------------------------- 1 | sudo nix develop -c flamegraph -- $(fd -I "counter*" -t x | rg debug) test_request_response_one_round 2 | -------------------------------------------------------------------------------- /crates/hotshot/libp2p-networking/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | //! Library for p2p communication 8 | 9 | /// Network logic 10 | pub mod network; 11 | 12 | /// symbols needed to implement a networking instance over libp2p-netorking 13 | pub mod reexport { 14 | pub use libp2p::{request_response::ResponseChannel, Multiaddr}; 15 | pub use libp2p_identity::PeerId; 16 | } 17 | -------------------------------------------------------------------------------- /crates/hotshot/libp2p-networking/src/network/behaviours/dht/store/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod persistent; 2 | pub mod validated; 3 | -------------------------------------------------------------------------------- /crates/hotshot/libp2p-networking/src/network/behaviours/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | /// Wrapper around `RequestResponse` 8 | pub mod direct_message; 9 | 10 | /// exponential backoff type 11 | pub mod exponential_backoff; 12 | 13 | /// Wrapper around Kademlia 14 | pub mod dht; 15 | -------------------------------------------------------------------------------- /crates/hotshot/macros/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hotshot-macros" 3 | version = { workspace = true } 4 | edition = { workspace = true } 5 | description = "Macros for hotshot tests" 6 | license = "MIT" 7 | 8 | [lib] 9 | proc-macro = true 10 | 11 | [dependencies] 12 | derive_builder = { workspace = true } 13 | proc-macro2 = "1" 14 | # proc macro stuff 15 | quote = "1" 16 | syn = { version = "2", features = ["full", "extra-traits"] } 17 | 18 | [lints] 19 | workspace = true 20 | -------------------------------------------------------------------------------- /crates/hotshot/orchestrator/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hotshot-orchestrator" 3 | version = { workspace = true } 4 | edition = { workspace = true } 5 | license = "MIT" 6 | 7 | [dependencies] 8 | alloy = { workspace = true } 9 | anyhow = { workspace = true } 10 | async-lock = { workspace = true } 11 | blake3 = { workspace = true } 12 | clap = { workspace = true } 13 | csv = "1" 14 | futures = { workspace = true } 15 | hotshot-types = { workspace = true } 16 | libp2p-identity = { workspace = true } 17 | multiaddr = { workspace = true } 18 | serde = { workspace = true } 19 | surf-disco = { workspace = true } 20 | tide-disco = { workspace = true } 21 | tokio = { workspace = true } 22 | toml = { workspace = true } 23 | tracing = { workspace = true } 24 | vbs = { workspace = true } 25 | 26 | [lints] 27 | workspace = true 28 | -------------------------------------------------------------------------------- /crates/hotshot/orchestrator/README.md: -------------------------------------------------------------------------------- 1 | # Orchestrator 2 | 3 | This crate implements an orchestrator that coordinates starting the network with a particular configuration. It is 4 | useful for testing and benchmarking. Like the web server, the orchestrator is built using 5 | [Tide Disco](https://github.com/EspressoSystems/tide-disco). 6 | 7 | To run the orchestrator: `just example orchestrator http://0.0.0.0:3333 ./crates/orchestrator/run-config.toml` 8 | -------------------------------------------------------------------------------- /crates/hotshot/task-impls/HotShot_event_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/crates/hotshot/task-impls/HotShot_event_architecture.png -------------------------------------------------------------------------------- /crates/hotshot/task-impls/README.md: -------------------------------------------------------------------------------- 1 | HotShot uses an event-based architecture. This architecture is made of 4 main tasks: Network Task, View Sync Task, 2 | Consensus Task, and DA Task. The Network Task handles all incoming and outgoing messages. It forwards incoming messages 3 | to the correct task and listens for outgoing messages from the other tasks. The View Sync Task coordinates the view sync 4 | protocol. It listens for timeout events from the Consensus Task. Once a certain threshold of timeouts seen has been 5 | reached, the View Sync Task starts the View Sync protocol to bring the network back into agreement on which view it 6 | should be in. The Consensus Task handles the core HotShot consensus logic. It manages replicas that listen for quorum 7 | proposals and vote on them, leaders who send quorum proposals, and next leaders who listen for quorum votes and form 8 | QCs. The DA task handles the data availability protocol of HotShot. It listens for DA proposals, sends DA proposals, and 9 | forms a Data Availability Certificate (DAC) 10 | 11 | A diagram of how events interact with each task is below: ![HotShot Event Architecture](HotShot_event_architecture.png) 12 | 13 | For more information about each event see `./src/events.rs` 14 | -------------------------------------------------------------------------------- /crates/hotshot/task/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = { workspace = true } 3 | name = "hotshot-task" 4 | version = { workspace = true } 5 | edition = { workspace = true } 6 | license = "MIT" 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | async-broadcast = { workspace = true } 11 | async-trait = { workspace = true } 12 | futures = { workspace = true } 13 | hotshot-utils = { workspace = true } 14 | tokio = { workspace = true, features = [ 15 | "time", 16 | "rt-multi-thread", 17 | "macros", 18 | "sync", 19 | ] } 20 | tracing = { workspace = true } 21 | 22 | [lints] 23 | workspace = true 24 | -------------------------------------------------------------------------------- /crates/hotshot/task/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | //! Task primitives for `HotShot` 8 | 9 | /// Simple Dependency types 10 | pub mod dependency; 11 | /// Task which can uses dependencies 12 | pub mod dependency_task; 13 | /// Basic task types 14 | pub mod task; 15 | -------------------------------------------------------------------------------- /crates/hotshot/testing/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /out*.txt 3 | -------------------------------------------------------------------------------- /crates/hotshot/testing/src/byzantine/mod.rs: -------------------------------------------------------------------------------- 1 | /// Byzantine definitions and implementations of different behaviours 2 | pub mod byzantine_behaviour; 3 | -------------------------------------------------------------------------------- /crates/hotshot/testing/src/node_stake.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use alloy::primitives::U256; 4 | 5 | #[derive(Clone)] 6 | pub struct TestNodeStakes { 7 | stakes: HashMap, 8 | default_stake: U256, 9 | } 10 | 11 | impl TestNodeStakes { 12 | pub fn new(stakes: HashMap, default_stake: U256) -> Self { 13 | Self { 14 | stakes, 15 | default_stake, 16 | } 17 | } 18 | 19 | pub fn get(&self, node_id: u64) -> U256 { 20 | self.stakes 21 | .get(&node_id) 22 | .cloned() 23 | .unwrap_or(self.default_stake) 24 | } 25 | 26 | pub fn with_stake(mut self, node_id: u64, stake: U256) -> Self { 27 | self.stakes.insert(node_id, stake); 28 | self 29 | } 30 | } 31 | 32 | impl Default for TestNodeStakes { 33 | fn default() -> Self { 34 | Self { 35 | stakes: HashMap::new(), 36 | default_stake: U256::from(1), 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /crates/hotshot/testing/src/predicates/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | pub mod event; 8 | pub mod upgrade_with_proposal; 9 | pub mod upgrade_with_vote; 10 | 11 | use async_trait::async_trait; 12 | 13 | #[derive(Eq, PartialEq, Copy, Clone, Debug)] 14 | pub enum PredicateResult { 15 | Pass, 16 | 17 | Fail, 18 | 19 | Incomplete, 20 | } 21 | 22 | impl From for PredicateResult { 23 | fn from(boolean: bool) -> Self { 24 | match boolean { 25 | true => PredicateResult::Pass, 26 | false => PredicateResult::Fail, 27 | } 28 | } 29 | } 30 | 31 | #[async_trait] 32 | pub trait Predicate: std::fmt::Debug { 33 | async fn evaluate(&self, input: &INPUT) -> PredicateResult; 34 | async fn info(&self) -> String; 35 | } 36 | -------------------------------------------------------------------------------- /crates/hotshot/testing/src/test_helpers.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | use committable::Committable; 8 | use hotshot_example_types::{node_types::TestTypes, state_types::TestValidatedState}; 9 | use hotshot_types::{ 10 | data::Leaf, 11 | utils::{View, ViewInner}, 12 | }; 13 | /// This function will create a fake [`View`] from a provided [`Leaf`]. 14 | pub fn create_fake_view_with_leaf(leaf: Leaf) -> View { 15 | create_fake_view_with_leaf_and_state(leaf, TestValidatedState::default()) 16 | } 17 | 18 | /// This function will create a fake [`View`] from a provided [`Leaf`] and `state`. 19 | pub fn create_fake_view_with_leaf_and_state( 20 | leaf: Leaf, 21 | state: TestValidatedState, 22 | ) -> View { 23 | View { 24 | view_inner: ViewInner::Leaf { 25 | leaf: leaf.commit(), 26 | state: state.into(), 27 | delta: None, 28 | }, 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /crates/hotshot/testing/tests/tests_1.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | mod tests_1 { 8 | automod::dir!("tests/tests_1"); 9 | } 10 | -------------------------------------------------------------------------------- /crates/hotshot/testing/tests/tests_2.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | mod tests_2 { 8 | automod::dir!("tests/tests_2"); 9 | } 10 | -------------------------------------------------------------------------------- /crates/hotshot/testing/tests/tests_3.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | mod tests_3 { 8 | automod::dir!("tests/tests_3"); 9 | } 10 | -------------------------------------------------------------------------------- /crates/hotshot/testing/tests/tests_4.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | mod tests_4 { 8 | automod::dir!("tests/tests_4"); 9 | } 10 | -------------------------------------------------------------------------------- /crates/hotshot/testing/tests/tests_5.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | mod tests_5 { 8 | automod::dir!("tests/tests_5"); 9 | } 10 | -------------------------------------------------------------------------------- /crates/hotshot/types/bin/mnemonic.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | 3 | use hotshot_types::{signature_key::BLSPubKey, utils::mnemonic}; 4 | 5 | pub fn main() { 6 | let args: Vec = env::args().collect(); 7 | 8 | let keys: Vec<_> = args[1..].to_vec(); 9 | 10 | println!("\nKeys:\n"); 11 | 12 | for key in &keys { 13 | println!("{key}"); 14 | } 15 | 16 | println!("\nMnemonics:\n"); 17 | 18 | for key in keys { 19 | let mnemonic = mnemonic( 20 | BLSPubKey::try_from(&tagged_base64::TaggedBase64::parse(&key).unwrap()).unwrap(), 21 | ); 22 | 23 | println!("{mnemonic}"); 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /crates/hotshot/types/src/bundle.rs: -------------------------------------------------------------------------------- 1 | //! This module provides the `Bundle` type 2 | 3 | use serde::{Deserialize, Serialize}; 4 | 5 | use crate::traits::{ 6 | block_contents::BuilderFee, node_implementation::NodeType, signature_key::BuilderSignatureKey, 7 | BlockPayload, 8 | }; 9 | 10 | #[derive(Clone, Debug, Serialize, Deserialize)] 11 | #[serde(bound = "TYPES: NodeType")] 12 | /// The Bundle for a portion of a block, provided by a downstream 13 | /// builder that exists in a bundle auction. 14 | /// This type is maintained by HotShot 15 | pub struct Bundle { 16 | /// The bundle transactions sent by the builder. 17 | pub transactions: Vec<>::Transaction>, 18 | 19 | /// The signature over the bundle. 20 | pub signature: ::BuilderSignature, 21 | 22 | /// The fee for sequencing 23 | pub sequencing_fee: BuilderFee, 24 | } 25 | -------------------------------------------------------------------------------- /crates/hotshot/types/src/traits.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | //! Common traits for the `HotShot` protocol 8 | pub mod block_contents; 9 | pub mod consensus_api; 10 | pub mod election; 11 | pub mod metrics; 12 | pub mod network; 13 | pub mod node_implementation; 14 | pub mod qc; 15 | pub mod signature_key; 16 | pub mod states; 17 | pub mod storage; 18 | 19 | pub use block_contents::{BlockPayload, EncodeBytes}; 20 | pub use states::ValidatedState; 21 | -------------------------------------------------------------------------------- /crates/hotshot/types/src/vid.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | //! This module provides: 8 | //! - an opaque constructor [`vid_scheme`] that returns a new instance of a 9 | //! VID scheme. 10 | //! - type aliases [`VidCommitment`], [`VidCommon`], [`VidShare`] 11 | //! for [`VidScheme`] assoc types. 12 | //! 13 | //! Purpose: the specific choice of VID scheme is an implementation detail. 14 | //! This crate and all downstream crates should talk to the VID scheme only 15 | //! via the traits exposed here. 16 | 17 | #![allow(missing_docs)] 18 | 19 | pub mod advz; 20 | pub mod avidm; 21 | -------------------------------------------------------------------------------- /crates/hotshot/types/src/vid/avidm.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2024 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot repository. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HotShot repository. If not, see . 6 | 7 | //! Provides the implementation for AVID-M scheme 8 | 9 | use hotshot_utils::anytrace::*; 10 | 11 | pub type AvidMScheme = vid::avid_m::namespaced::NsAvidMScheme; 12 | pub type AvidMParam = vid::avid_m::namespaced::NsAvidMParam; 13 | pub type AvidMCommitment = vid::avid_m::namespaced::NsAvidMCommit; 14 | pub type AvidMShare = vid::avid_m::namespaced::NsAvidMShare; 15 | pub type AvidMCommon = AvidMParam; 16 | 17 | pub fn init_avidm_param(total_weight: usize) -> Result { 18 | let recovery_threshold = total_weight.div_ceil(3); 19 | AvidMParam::new(recovery_threshold, total_weight) 20 | .map_err(|err| error!("Failed to initialize VID: {}", err.to_string())) 21 | } 22 | -------------------------------------------------------------------------------- /crates/hotshot/utils/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hotshot-utils" 3 | version = { workspace = true } 4 | edition = { workspace = true } 5 | description = "Utils" 6 | license = "MIT" 7 | 8 | [dependencies] 9 | tracing = { workspace = true } 10 | 11 | [lints] 12 | workspace = true 13 | -------------------------------------------------------------------------------- /crates/hotshot/utils/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! General (not HotShot-specific) utilities 2 | 3 | /// Error utilities, intended to function as a replacement to `anyhow`. 4 | pub mod anytrace; 5 | -------------------------------------------------------------------------------- /cross-shell.nix: -------------------------------------------------------------------------------- 1 | # A simplest nix shell file with the project dependencies and 2 | # a cross-compilation support. 3 | { pkgs, envVars, rustShellHook }: 4 | pkgs.mkShell (envVars // { 5 | # Native project dependencies like build utilities and additional routines 6 | # like container building, linters, etc. 7 | nativeBuildInputs = with pkgs.pkgsBuildHost; [ 8 | # Rust 9 | (rust-bin.stable.latest.minimal.override { 10 | extensions = [ "rustfmt" "clippy" "llvm-tools-preview" "rust-src" ]; 11 | }) 12 | 13 | # Will add some dependencies like libiconv 14 | rustBuildHostDependencies 15 | 16 | # Crate dependencies 17 | cargoDeps.openssl-sys 18 | protobuf # required by libp2p 19 | 20 | openssh 21 | ]; 22 | # Libraries essential to build the service binaries 23 | buildInputs = with pkgs; [ 24 | # Enable Rust cross-compilation support 25 | rustCrossHook 26 | ]; 27 | 28 | shellHook = rustShellHook; 29 | }) 30 | -------------------------------------------------------------------------------- /data/README.md: -------------------------------------------------------------------------------- 1 | # Reference Data 2 | 3 | This directory contains reference instantiations of the data types used by the sequencer which have a stable 4 | language-agnostic interface for serialization (in both `.json` files and binary `.bin` files) and cryptographic 5 | commitments. The objects in this directory have well-known commitments. They serve as examples of the data formats used 6 | by the Espresso Sequencer, and can be used as test cases for ports of the serialization and commitment algorithms to 7 | other languages. 8 | 9 | The Rust module `espresso-types::reference_tests` contains test cases which are designed to fail if the serialization 10 | format or commitment scheme for any of these data types changes. If you make a breaking change, you may need to update 11 | these reference objects as well. Running those tests will also print out information about the commitments of these 12 | reference objects, which can be useful for generating test cases for ports. To run them and get the output, use 13 | 14 | ```bash 15 | cargo test --all-features -p espresso-types -- --nocapture --test-threads 1 reference_tests 16 | ``` 17 | -------------------------------------------------------------------------------- /data/genesis/benchmark.toml: -------------------------------------------------------------------------------- 1 | base_version = "0.4" 2 | upgrade_version = "0.4" 3 | genesis_version = "0.4" 4 | epoch_height = 3000 5 | drb_difficulty = 10 6 | drb_upgrade_difficulty = 10 7 | epoch_start_block = 1000 8 | 9 | [stake_table] 10 | capacity = 200 11 | 12 | [chain_config] 13 | chain_id = 999999999 14 | base_fee = "1 wei" 15 | max_block_size = "100mb" 16 | fee_recipient = "0x0000000000000000000000000000000000000000" 17 | fee_contract = "0xa15bb66138824a1c7167f5e85b957d04dd34e468" 18 | stake_table_contract = "0x196dbcbb54b8ec4958c959d8949ebfe87ac2aaaf" 19 | 20 | [header] 21 | timestamp = "1970-01-01T00:00:00Z" 22 | 23 | [header.chain_config] 24 | chain_id = 999999999 25 | base_fee = "1 wei" 26 | max_block_size = "100mb" 27 | fee_recipient = "0x0000000000000000000000000000000000000000" 28 | fee_contract = "0xa15bb66138824a1c7167f5e85b957d04dd34e468" 29 | stake_table_contract = "0x196dbcbb54b8ec4958c959d8949ebfe87ac2aaaf" 30 | 31 | [l1_finalized] 32 | number = 0 # Block where fee contract is deployed -------------------------------------------------------------------------------- /data/genesis/cappuccino.toml: -------------------------------------------------------------------------------- 1 | base_version = "0.1" 2 | upgrade_version = "0.2" 3 | 4 | [stake_table] 5 | capacity = 200 6 | 7 | [chain_config] 8 | chain_id = 0 9 | base_fee = "0 wei" 10 | max_block_size = "30mb" 11 | fee_recipient = "0x0000000000000000000000000000000000000000" 12 | 13 | [header] 14 | timestamp = "1970-01-01T00:00:00Z" 15 | 16 | [[upgrade]] 17 | version = "0.2" 18 | start_proposing_time = "2024-09-18T15:00:00Z" 19 | stop_proposing_time = "2024-09-19T15:00:00Z" 20 | 21 | [upgrade.fee] 22 | 23 | [upgrade.fee.chain_config] 24 | chain_id = 0 25 | base_fee = "1 wei" 26 | max_block_size = "30mb" 27 | fee_recipient = "0x0000000000000000000000000000000000000000" 28 | fee_contract = "0x9d08cb3361b071ec1e2f3c206ac9c08d67385547" 29 | 30 | [l1_finalized] 31 | number = 0 32 | -------------------------------------------------------------------------------- /data/genesis/cocoa.toml: -------------------------------------------------------------------------------- 1 | base_version = "0.2" 2 | upgrade_version = "0.3" 3 | 4 | [stake_table] 5 | capacity = 200 6 | 7 | [chain_config] 8 | chain_id = 888888888 9 | base_fee = "1 wei" 10 | max_block_size = "1mb" 11 | fee_recipient = "0x0000000000000000000000000000000000000000" 12 | fee_contract = "0x553adde5f01ff839f98574e3f4f50d9b503f70f3" 13 | 14 | [header] 15 | timestamp = "1970-01-01T00:00:00Z" 16 | 17 | [l1_finalized] 18 | number = 6857392 # Block where fee contract is deployed 19 | -------------------------------------------------------------------------------- /data/genesis/decaf.toml: -------------------------------------------------------------------------------- 1 | base_version = "0.4" 2 | upgrade_version = "0.4" 3 | genesis_version = "0.2" 4 | epoch_height = 3000 5 | drb_difficulty = 10 6 | drb_upgrade_difficulty = 5000000000 7 | epoch_start_block = 3160636 8 | 9 | [stake_table] 10 | capacity = 200 11 | 12 | [chain_config] 13 | chain_id = 0xdecaf 14 | base_fee = "1 wei" 15 | max_block_size = "10mb" 16 | fee_recipient = "0x0000000000000000000000000000000000000000" 17 | fee_contract = "0x42835083fd1d3fc5d799b5f6815ae4bf2623e6d0" 18 | stake_table_contract = "0x40304fbe94d5e7d1492dd90c53a2d63e8506a037" 19 | 20 | [header] 21 | timestamp = "1970-01-01T00:00:00Z" 22 | 23 | [header.chain_config] 24 | chain_id = 0xdecaf 25 | base_fee = '1 wei' 26 | max_block_size = '1mb' 27 | fee_recipient = '0x0000000000000000000000000000000000000000' 28 | fee_contract = '0x42835083fd1d3fc5d799b5f6815ae4bf2623e6d0' 29 | 30 | [l1_finalized] 31 | number = 6758037 # Block where fee contract is deployed 32 | -------------------------------------------------------------------------------- /data/genesis/demo-drb-header.toml: -------------------------------------------------------------------------------- 1 | base_version = "0.4" 2 | upgrade_version = "0.4" 3 | genesis_version = "0.4" 4 | # NOTE: no upgrade configured, using lower epoch height 5 | epoch_height = 30 6 | drb_difficulty = 10 7 | drb_upgrade_difficulty = 20 8 | epoch_start_block = 1 9 | stake_table_capacity = 10 10 | 11 | [stake_table] 12 | capacity = 10 13 | 14 | [chain_config] 15 | chain_id = 999999999 16 | max_block_size = "10mb" 17 | base_fee = "1 wei" 18 | fee_recipient = "0x0000000000000000000000000000000000000000" 19 | fee_contract = "0x8ce361602b935680e8dec218b820ff5056beb7af" 20 | stake_table_contract = "0x12975173b87f7595ee45dffb2ab812ece596bf84" 21 | 22 | [header] 23 | timestamp = "1970-01-01T00:00:00Z" 24 | 25 | [header.chain_config] 26 | chain_id = 999999999 27 | max_block_size = "10mb" 28 | base_fee = "1 wei" 29 | fee_recipient = "0x0000000000000000000000000000000000000000" 30 | fee_contract = "0x8ce361602b935680e8dec218b820ff5056beb7af" 31 | stake_table_contract = "0x12975173b87f7595ee45dffb2ab812ece596bf84" 32 | 33 | [l1_finalized] 34 | number = 0 35 | -------------------------------------------------------------------------------- /data/genesis/demo-pos-base.toml: -------------------------------------------------------------------------------- 1 | base_version = "0.3" 2 | upgrade_version = "0.3" 3 | genesis_version = "0.3" 4 | epoch_height = 150 5 | epoch_start_block = 1 6 | stake_table_capacity = 10 7 | 8 | [stake_table] 9 | capacity = 10 10 | 11 | [chain_config] 12 | chain_id = 999999999 13 | max_block_size = "1mb" 14 | base_fee = "1 wei" 15 | fee_recipient = "0x0000000000000000000000000000000000000000" 16 | fee_contract = "0x8ce361602b935680e8dec218b820ff5056beb7af" 17 | stake_table_contract = "0x12975173b87f7595ee45dffb2ab812ece596bf84" 18 | 19 | [header] 20 | timestamp = "1970-01-01T00:00:00Z" 21 | 22 | [header.chain_config] 23 | chain_id = 999999999 24 | max_block_size = "1mb" 25 | base_fee = "1 wei" 26 | fee_recipient = "0x0000000000000000000000000000000000000000" 27 | fee_contract = "0x8ce361602b935680e8dec218b820ff5056beb7af" 28 | stake_table_contract = "0x12975173b87f7595ee45dffb2ab812ece596bf84" 29 | 30 | [l1_finalized] 31 | number = 0 32 | -------------------------------------------------------------------------------- /data/genesis/demo.toml: -------------------------------------------------------------------------------- 1 | base_version = "0.2" 2 | upgrade_version = "0.2" 3 | genesis_version = "0.2" 4 | 5 | [stake_table] 6 | capacity = 10 7 | 8 | [chain_config] 9 | chain_id = 999999999 10 | base_fee = "1 wei" 11 | max_block_size = "1mb" 12 | fee_recipient = "0x0000000000000000000000000000000000000000" 13 | fee_contract = "0x8ce361602b935680e8dec218b820ff5056beb7af" 14 | 15 | [header] 16 | timestamp = "1970-01-01T00:00:00Z" 17 | 18 | [header.chain_config] 19 | chain_id = 999999999 20 | base_fee = "1 wei" 21 | max_block_size = "1mb" 22 | fee_recipient = "0x0000000000000000000000000000000000000000" 23 | fee_contract = "0x8ce361602b935680e8dec218b820ff5056beb7af" 24 | 25 | [l1_finalized] 26 | number = 0 27 | -------------------------------------------------------------------------------- /data/genesis/mainnet.toml: -------------------------------------------------------------------------------- 1 | base_version = "0.2" 2 | upgrade_version = "0.3" 3 | 4 | [stake_table] 5 | capacity = 200 6 | 7 | [chain_config] 8 | chain_id = 1 9 | base_fee = "1 wei" 10 | max_block_size = "1mb" 11 | fee_recipient = "0x0000000000000000000000000000000000000000" 12 | # Deployed 13 | fee_contract = "0x9fce21c3f7600aa63392a5f5713986b39bb98884" 14 | 15 | [header] 16 | timestamp = "1970-01-01T00:00:00Z" 17 | 18 | [l1_finalized] 19 | # 21087503 (deployed block) + (28800) (~4 days of blocks) 20 | number = 21116303 21 | -------------------------------------------------------------------------------- /data/genesis/staging.toml: -------------------------------------------------------------------------------- 1 | base_version = "0.2" 2 | upgrade_version = "0.3" 3 | genesis_version = "0.2" 4 | 5 | [stake_table] 6 | capacity = 200 7 | 8 | [chain_config] 9 | chain_id = 888888888 10 | base_fee = "1 wei" 11 | max_block_size = "1mb" 12 | fee_recipient = "0x0000000000000000000000000000000000000000" 13 | fee_contract = "0xa15bb66138824a1c7167f5e85b957d04dd34e468" 14 | 15 | [header] 16 | timestamp = "1970-01-01T00:00:00Z" 17 | 18 | [header.chain_config] 19 | chain_id = 888888888 20 | base_fee = "1 wei" 21 | max_block_size = "1mb" 22 | fee_recipient = "0x0000000000000000000000000000000000000000" 23 | fee_contract = "0xa15bb66138824a1c7167f5e85b957d04dd34e468" 24 | 25 | [l1_finalized] 26 | number = 0 27 | -------------------------------------------------------------------------------- /data/v1/block_query_data.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v1/block_query_data.bin -------------------------------------------------------------------------------- /data/v1/chain_config.bin: -------------------------------------------------------------------------------- 1 | 0x8a19(0x0*0x0000000000000000000000000000000000000000*0x0000000000000000000000000000000000000000 -------------------------------------------------------------------------------- /data/v1/chain_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "base_fee": "0", 3 | "chain_id": "35353", 4 | "fee_contract": "0x0000000000000000000000000000000000000000", 5 | "fee_recipient": "0x0000000000000000000000000000000000000000", 6 | "max_block_size": "10240" 7 | } 8 | -------------------------------------------------------------------------------- /data/v1/fee_info.bin: -------------------------------------------------------------------------------- 1 | *0xf39fd6e51aad88f6f4ce6ab8827279cfffb922660x0 -------------------------------------------------------------------------------- /data/v1/fee_info.json: -------------------------------------------------------------------------------- 1 | { 2 | "account": "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266", 3 | "amount": "0" 4 | } 5 | -------------------------------------------------------------------------------- /data/v1/header.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v1/header.bin -------------------------------------------------------------------------------- /data/v1/l1_block.bin: -------------------------------------------------------------------------------- 1 | {0x456B0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef -------------------------------------------------------------------------------- /data/v1/l1_block.json: -------------------------------------------------------------------------------- 1 | { 2 | "number": 123, 3 | "timestamp": "0x456", 4 | "hash": "0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" 5 | } 6 | -------------------------------------------------------------------------------- /data/v1/leaf_query_data_legacy.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v1/leaf_query_data_legacy.bin -------------------------------------------------------------------------------- /data/v1/messages.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v1/messages.bin -------------------------------------------------------------------------------- /data/v1/ns_proof_legacy.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v1/ns_proof_legacy.bin -------------------------------------------------------------------------------- /data/v1/ns_table.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v1/ns_table.bin -------------------------------------------------------------------------------- /data/v1/ns_table.json: -------------------------------------------------------------------------------- 1 | { 2 | "bytes": "AwAAAO7/wAAcBgAAobC5EkAOAABksAWiXBQAAA==" 3 | } -------------------------------------------------------------------------------- /data/v1/payload.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v1/payload.bin -------------------------------------------------------------------------------- /data/v1/payload_query_data.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v1/payload_query_data.bin -------------------------------------------------------------------------------- /data/v1/transaction.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v1/transaction.bin -------------------------------------------------------------------------------- /data/v1/transaction.json: -------------------------------------------------------------------------------- 1 | { 2 | "namespace": 12648430, 3 | "payload": "vj8cymNUwpTPZMCY3qItBACelLfb+2v0bng7fk/U3aoEfKP36cdVglIDjBkk3D5Dpz9jZLgthi81fsxCGod7uMTy40NZUWR69skwH9gd67REz6ar2K9OuVPwKG3F3Vg973TIrwKTP3plY4Lu6hW6El2AgT5q9qqgmZ3XwnIpZ1lfbAIJ7Ngn6RRTXFsa+jtLb0goXSQtALN69RHcntnqe3q2Ao02ZTxWkspFf0GPgOtCVh6/NcftJfP7D/wDNF+7mT4fCUn7jzySd0oc/BNFtAx8inCSJyZLrOCRhjkiBd8VNjrWUx9mA76WZpIRSlUwIooO2MzeQwL/mkbJjojVQg==" 4 | } -------------------------------------------------------------------------------- /data/v1/transaction_query_data.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v1/transaction_query_data.bin -------------------------------------------------------------------------------- /data/v1/vid_common_v0.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v1/vid_common_v0.bin -------------------------------------------------------------------------------- /data/v1/vid_common_v1.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v1/vid_common_v1.bin -------------------------------------------------------------------------------- /data/v1/vid_common_v1.json: -------------------------------------------------------------------------------- 1 | { 2 | "block_hash": "BLOCK~dh1KpdvvxSvnnPpOi2yI3DOg8h6ltr2Kv13iRzbQvtN2", 3 | "common": { 4 | "V1": { 5 | "recovery_threshold": 4, 6 | "total_weights": 10 7 | } 8 | }, 9 | "height": 42, 10 | "payload_hash": "HASH~u-mEo1mwByROUhnvO7pBFitcD0UEvruK-b8WONkKoCLQ" 11 | } -------------------------------------------------------------------------------- /data/v2/chain_config.bin: -------------------------------------------------------------------------------- 1 | 0x8a19(0x0*0x0000000000000000000000000000000000000000*0x0000000000000000000000000000000000000000 -------------------------------------------------------------------------------- /data/v2/chain_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "base_fee": "0", 3 | "chain_id": "35353", 4 | "fee_contract": "0x0000000000000000000000000000000000000000", 5 | "fee_recipient": "0x0000000000000000000000000000000000000000", 6 | "max_block_size": "10240" 7 | } 8 | -------------------------------------------------------------------------------- /data/v2/header.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v2/header.bin -------------------------------------------------------------------------------- /data/v2/leaf_query_data_legacy.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v2/leaf_query_data_legacy.bin -------------------------------------------------------------------------------- /data/v2/messages.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v2/messages.bin -------------------------------------------------------------------------------- /data/v3/chain_config.bin: -------------------------------------------------------------------------------- 1 | 0x8a19(0x0*0x0000000000000000000000000000000000000000*0x0000000000000000000000000000000000000000*0x0000000000000000000000000000000000000000 -------------------------------------------------------------------------------- /data/v3/chain_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "base_fee": "0", 3 | "chain_id": "35353", 4 | "fee_contract": "0x0000000000000000000000000000000000000000", 5 | "fee_recipient": "0x0000000000000000000000000000000000000000", 6 | "max_block_size": "10240", 7 | "stake_table_contract": "0x0000000000000000000000000000000000000000" 8 | } -------------------------------------------------------------------------------- /data/v3/header.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v3/header.bin -------------------------------------------------------------------------------- /data/v3/leaf_query_data.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v3/leaf_query_data.bin -------------------------------------------------------------------------------- /data/v3/messages.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v3/messages.bin -------------------------------------------------------------------------------- /data/v3/ns_proof_V0.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v3/ns_proof_V0.bin -------------------------------------------------------------------------------- /data/v3/ns_proof_V1.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v3/ns_proof_V1.bin -------------------------------------------------------------------------------- /data/v3/state_cert.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v3/state_cert.bin -------------------------------------------------------------------------------- /data/v3/state_cert.json: -------------------------------------------------------------------------------- 1 | { 2 | "epoch": 1, 3 | "light_client_state": "LIGHT_CLIENT_STATE~AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABA", 4 | "next_stake_table_state": "STAKE_TABLE_STATE~AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB8", 5 | "signatures": [] 6 | } -------------------------------------------------------------------------------- /data/v4/chain_config.bin: -------------------------------------------------------------------------------- 1 | 0x8a19(0x0*0x0000000000000000000000000000000000000000*0x0000000000000000000000000000000000000000*0x0000000000000000000000000000000000000000 -------------------------------------------------------------------------------- /data/v4/chain_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "base_fee": "0", 3 | "chain_id": "35353", 4 | "fee_contract": "0x0000000000000000000000000000000000000000", 5 | "fee_recipient": "0x0000000000000000000000000000000000000000", 6 | "max_block_size": "10240", 7 | "stake_table_contract": "0x0000000000000000000000000000000000000000" 8 | } -------------------------------------------------------------------------------- /data/v4/header.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v4/header.bin -------------------------------------------------------------------------------- /data/v4/messages.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v4/messages.bin -------------------------------------------------------------------------------- /data/v4/ns_proof_V0.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v4/ns_proof_V0.bin -------------------------------------------------------------------------------- /data/v4/ns_proof_V1.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v4/ns_proof_V1.bin -------------------------------------------------------------------------------- /data/v4/state_cert.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/data/v4/state_cert.bin -------------------------------------------------------------------------------- /data/v4/state_cert.json: -------------------------------------------------------------------------------- 1 | { 2 | "auth_root": "0x0000000000000000000000000000000000000000000000000000000000000000", 3 | "epoch": 1, 4 | "light_client_state": "LIGHT_CLIENT_STATE~AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABA", 5 | "next_stake_table_state": "STAKE_TABLE_STATE~AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB8", 6 | "signatures": [] 7 | } -------------------------------------------------------------------------------- /doc/full-node-espresso-integration.puml: -------------------------------------------------------------------------------- 1 | @startuml 2 | actor Client 3 | participant Rollup 4 | participant "Espresso Node" as EspNode 5 | participant L1 6 | 7 | Rollup <- L1 : height of last verified block 8 | 9 | Rollup -> EspNode : GET availability/stream/blocks/height 10 | Rollup -> EspNode : GET availability/stream/leaves/height 11 | 12 | loop 13 | Rollup <-- EspNode : Block 14 | Rollup <-- EspNode : Leaf, proof of consensus 15 | Rollup -> Rollup : Verify proof of consensus 16 | Rollup -> Rollup : Execute Block 17 | Rollup --> Client: Updated State 18 | end 19 | @enduml 20 | -------------------------------------------------------------------------------- /doc/prover-espresso-integration.puml: -------------------------------------------------------------------------------- 1 | @startuml 2 | participant "Rollup Prover" as Rollup 3 | participant "Espresso Node" as EspNode 4 | participant L1 5 | 6 | Rollup <- L1 : height of last verified block 7 | Rollup -> EspNode : GET availability/stream/blocks/height 8 | 9 | loop 10 | Rollup <-- EspNode : Block 11 | Rollup <- L1 : Certified block commitment 12 | Rollup -> Rollup : Check block against commitment 13 | Rollup -> Rollup : Execute block 14 | Rollup -> L1 : New State\nProof 15 | end 16 | @enduml 17 | -------------------------------------------------------------------------------- /doc/zk-rollup-circuit-no-espresso-consensus.puml: -------------------------------------------------------------------------------- 1 | @startuml 2 | 3 | 4 | [Espresso Derivation] as ED 5 | [AND] as AND 6 | 7 | [zkVM] as ZK 8 | 9 | blk_cm_root --> ED 10 | "TXS_NAMESPACE_PROOFS" --> ED 11 | "ROLLUP_TXS" --> ED 12 | "ROLLUP_TXS" --> ZK 13 | 14 | cm_txs_rollup --> ED 15 | 16 | "cm_state_vm i" --> ZK 17 | "cm_state_vm i+1" --> ZK 18 | cm_txs_rollup --> ZK 19 | 20 | 21 | ZK --> AND 22 | ED --> AND 23 | 24 | AND --> OUTPUT 25 | 26 | 27 | @enduml 28 | -------------------------------------------------------------------------------- /doc/zk-rollup-circuit.puml: -------------------------------------------------------------------------------- 1 | @startuml 2 | 3 | [Espresso Consensus] as EspCons 4 | [Espresso Derivation] as ED 5 | [AND] as AND 6 | 7 | [zkVM] as ZK 8 | blk_cm_root --> EspCons 9 | "STATE_SIGS" --> EspCons 10 | "STAKE_TABLE_ENTRIES" --> EspCons 11 | "STAKE_TABLE_OPENINGS" --> EspCons 12 | 13 | 14 | 15 | blk_cm_root --> ED 16 | "TXS_NAMESPACE_PROOFS" --> ED 17 | "ROLLUP_TXS" --> ED 18 | "ROLLUP_TXS" --> ZK 19 | 20 | cm_txs_rollup --> ED 21 | 22 | "cm_state_vm i" --> ZK 23 | "cm_state_vm i+1" --> ZK 24 | cm_txs_rollup --> ZK 25 | 26 | EspCons -> AND 27 | ZK --> AND 28 | ED --> AND 29 | 30 | AND --> OUTPUT 31 | 32 | 33 | @enduml 34 | -------------------------------------------------------------------------------- /doc/zk-rollup-default-sequencer.puml: -------------------------------------------------------------------------------- 1 | @startuml 2 | 3 | [zkVM] as ZK 4 | 5 | "cm_state_vm i" --> ZK 6 | "cm_state_vm i+1" --> ZK 7 | cm_txs_rollup --> ZK 8 | "ROLLUP_TXS" --> ZK 9 | ZK --> OUTPUT 10 | 11 | 12 | @enduml 13 | -------------------------------------------------------------------------------- /docker/bridge.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ghcr.io/espressosystems/ubuntu-base:main 2 | 3 | ARG TARGETARCH 4 | 5 | COPY target/$TARGETARCH/release/espresso-bridge /bin/espresso-bridge 6 | RUN chmod +x /bin/espresso-bridge 7 | 8 | RUN ln -s /bin/espresso-bridge /bin/bridge 9 | 10 | CMD [ "/bin/espresso-bridge"] 11 | -------------------------------------------------------------------------------- /docker/builder.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ghcr.io/espressosystems/ubuntu-base:main 2 | 3 | ARG TARGETARCH 4 | 5 | # Install genesis files for all supported configurations. The desired configuration can be chosen by 6 | # setting `ESPRESSO_BUILDER_GENESIS_FILE`. 7 | COPY data/genesis /genesis 8 | 9 | COPY target/$TARGETARCH/release/permissionless-builder /bin/permissionless-builder 10 | RUN chmod +x /bin/permissionless-builder 11 | 12 | HEALTHCHECK --interval=1s --timeout=1s --retries=100 CMD curl --fail http://localhost:${ESPRESSO_BUILDER_SERVER_PORT}/healthcheck || exit 1 13 | 14 | CMD [ "/bin/permissionless-builder"] 15 | -------------------------------------------------------------------------------- /docker/cdn-broker.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ghcr.io/espressosystems/ubuntu-base:main 2 | 3 | ARG TARGETARCH 4 | 5 | COPY target/$TARGETARCH/release/cdn-broker /bin/cdn-broker 6 | RUN chmod +x /bin/cdn-broker 7 | 8 | ENV RUST_LOG="info" 9 | 10 | HEALTHCHECK --interval=1s --timeout=1s --retries=100 CMD curl --fail http://localhost:${ESPRESSO_CDN_SERVER_METRICS_PORT}/metrics || exit 1 11 | CMD ["cdn-broker"] 12 | -------------------------------------------------------------------------------- /docker/cdn-marshal.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ghcr.io/espressosystems/ubuntu-base:main 2 | 3 | ARG TARGETARCH 4 | 5 | COPY target/$TARGETARCH/release/cdn-marshal /bin/cdn-marshal 6 | RUN chmod +x /bin/cdn-marshal 7 | 8 | ENV RUST_LOG="info" 9 | 10 | HEALTHCHECK --interval=1s --timeout=1s --retries=100 CMD curl --fail http://localhost:${ESPRESSO_CDN_SERVER_METRICS_PORT}/metrics || exit 1 11 | CMD ["cdn-marshal"] 12 | -------------------------------------------------------------------------------- /docker/cdn-whitelist.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ghcr.io/espressosystems/ubuntu-base:main 2 | 3 | ARG TARGETARCH 4 | 5 | COPY target/$TARGETARCH/release/cdn-whitelist /bin/cdn-whitelist 6 | RUN chmod +x /bin/cdn-whitelist 7 | 8 | ENV RUST_LOG="info" 9 | 10 | CMD ["cdn-whitelist"] 11 | -------------------------------------------------------------------------------- /docker/deploy.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ghcr.io/espressosystems/nodejs-base:main 2 | 3 | ARG TARGETARCH 4 | 5 | WORKDIR /app 6 | COPY package.json yarn.lock ./ 7 | 8 | RUN yarn && rm -rf /usr/local/share/.cache 9 | 10 | COPY target/$TARGETARCH/release/deploy /bin/deploy 11 | RUN chmod +x /bin/deploy 12 | 13 | COPY scripts/multisig-upgrade-entrypoint /bin/multisig-upgrade-entrypoint 14 | RUN chmod +x /bin/multisig-upgrade-entrypoint 15 | 16 | COPY contracts/script/multisigTransactionProposals/safeSDK ./contracts/script/multisigTransactionProposals/safeSDK/ 17 | 18 | RUN /bin/deploy verify-node-js-files 19 | CMD ["/bin/deploy"] 20 | -------------------------------------------------------------------------------- /docker/espresso-dev-node.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ghcr.io/espressosystems/ubuntu-base:main 2 | 3 | ARG TARGETARCH 4 | 5 | COPY target/$TARGETARCH/release/espresso-dev-node /bin/espresso-dev-node 6 | RUN chmod +x /bin/espresso-dev-node 7 | 8 | # Download the anvil binary 9 | RUN curl -L https://github.com/foundry-rs/foundry/releases/download/nightly/foundry_nightly_linux_${TARGETARCH}.tar.gz --output -| tar -xzvf - -C /bin/ anvil 10 | 11 | # When running as a Docker service, we always want a healthcheck endpoint, so set a default for the 12 | # port that the HTTP server will run on. This can be overridden in any given deployment environment. 13 | ENV ESPRESSO_SEQUENCER_API_PORT=8770 14 | HEALTHCHECK --interval=1s --timeout=1s --retries=100 CMD curl --fail http://localhost:${ESPRESSO_SEQUENCER_API_PORT}/status/block-height || exit 1 15 | 16 | # A storage directory is required to run the node. Set one inside the container by default. For 17 | # persistence between runs, the user can optionally set up a volume mounted at this path. 18 | ENV ESPRESSO_SEQUENCER_STORAGE_PATH=/data/espresso 19 | 20 | EXPOSE 8770 21 | EXPOSE 8771 22 | EXPOSE 8772 23 | 24 | CMD [ "/bin/espresso-dev-node" ] 25 | -------------------------------------------------------------------------------- /docker/nasty-client.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ghcr.io/espressosystems/ubuntu-base:main 2 | 3 | ARG TARGETARCH 4 | 5 | COPY target/$TARGETARCH/release/nasty-client /bin/nasty-client 6 | RUN chmod +x /bin/nasty-client 7 | 8 | # Run a web server on this port by default. Port can be overridden by the container orchestrator. 9 | ENV ESPRESSO_NASTY_CLIENT_PORT=80 10 | 11 | CMD [ "/bin/nasty-client"] 12 | HEALTHCHECK --interval=1s --timeout=1s --retries=100 CMD curl --fail http://localhost:${ESPRESSO_NASTY_CLIENT_PORT}/healthcheck || exit 1 13 | EXPOSE ${ESPRESSO_NASTY_CLIENT_PORT} 14 | -------------------------------------------------------------------------------- /docker/node-validator.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ghcr.io/espressosystems/ubuntu-base:main 2 | 3 | ARG TARGETARCH 4 | 5 | COPY target/$TARGETARCH/release/node-metrics /bin/node-metrics 6 | RUN chmod +x /bin/node-metrics 7 | 8 | # Run a web server on this port by default. Port can be overridden by the container orchestrator. 9 | ENV ESPRESSO_NODE_VALIDATOR_PORT=80 10 | 11 | CMD [ "/bin/node-metrics"] 12 | HEALTHCHECK --interval=1s --timeout=1s --retries=100 CMD curl --fail http://localhost:${ESPRESSO_NODE_VALIDATOR_PORT}/healthcheck || exit 1 13 | EXPOSE ${ESPRESSO_NODE_VALIDATOR_PORT} 14 | -------------------------------------------------------------------------------- /docker/prover-service.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ghcr.io/espressosystems/ubuntu-base:main 2 | 3 | ARG TARGETARCH 4 | 5 | 6 | # copy the binaries 7 | COPY target/$TARGETARCH/release/state-prover /usr/local/bin/state-prover 8 | RUN chmod +x /usr/local/bin/state-prover 9 | 10 | # When running as a Docker service, we always want a healthcheck endpoint, so set a default for the 11 | # port that the HTTP server will run on. This can be overridden in any given deployment environment. 12 | ENV ESPRESSO_PROVER_SERVICE_PORT=80 13 | HEALTHCHECK --interval=1s --timeout=1s --retries=100 CMD curl --fail http://localhost:${ESPRESSO_PROVER_SERVICE_PORT}/healthcheck || exit 1 14 | 15 | CMD [ "state-prover", "-d" ] 16 | -------------------------------------------------------------------------------- /docker/scripts/sequencer-awssecretsmanager.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eEu -o pipefail 3 | 4 | if [[ -v ESPRESSO_SEQUENCER_GENESIS_SECRET ]]; then 5 | echo "Loading genesis file from AWS secrets manager" 6 | aws secretsmanager get-secret-value --secret-id ${ESPRESSO_SEQUENCER_GENESIS_SECRET} --query SecretString --output text | tee /genesis/injected.toml >/dev/null 7 | fi 8 | 9 | /bin/sequencer "$@" 10 | -------------------------------------------------------------------------------- /docker/staking-cli.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ghcr.io/espressosystems/ubuntu-base:main 2 | 3 | ARG TARGETARCH 4 | 5 | COPY target/$TARGETARCH/release/staking-cli /bin/staking-cli 6 | RUN chmod +x /bin/staking-cli 7 | 8 | CMD [ "staking-cli" ] 9 | -------------------------------------------------------------------------------- /docker/state-relay-server.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ghcr.io/espressosystems/ubuntu-base:main 2 | 3 | ARG TARGETARCH 4 | 5 | COPY target/$TARGETARCH/release/state-relay-server /bin/state-relay-server 6 | RUN chmod +x /bin/state-relay-server 7 | 8 | ENV ESPRESSO_STATE_RELAY_SERVER_PORT=40004 9 | HEALTHCHECK --interval=1s --timeout=1s --retries=100 CMD curl --fail http://localhost:${ESPRESSO_STATE_RELAY_SERVER_PORT}/healthcheck || exit 1 10 | 11 | EXPOSE ${ESPRESSO_STATE_RELAY_SERVER_PORT} 12 | 13 | CMD [ "/bin/state-relay-server"] 14 | -------------------------------------------------------------------------------- /docker/submit-transactions.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ghcr.io/espressosystems/ubuntu-base:main 2 | 3 | ARG TARGETARCH 4 | 5 | COPY target/$TARGETARCH/release/submit-transactions /bin/submit-transactions 6 | RUN chmod +x /bin/submit-transactions 7 | 8 | # Run a web server on this port by default. Port can be overridden by the container orchestrator. 9 | ENV ESPRESSO_SUBMIT_TRANSACTIONS_PORT=80 10 | 11 | CMD [ "/bin/submit-transactions"] 12 | HEALTHCHECK --interval=1s --timeout=1s --retries=100 CMD curl --fail http://localhost:${ESPRESSO_SUBMIT_TRANSACTIONS_PORT}/healthcheck || exit 1 13 | EXPOSE ${ESPRESSO_SUBMIT_TRANSACTIONS_PORT} 14 | -------------------------------------------------------------------------------- /geth-config/test-jwt-secret.txt: -------------------------------------------------------------------------------- 1 | 688f5d737bad920bdfb2fc2f488d6b6209eebda1dae949a8de91398d932c517a 2 | -------------------------------------------------------------------------------- /hotshot-events-service/.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | debug/ 4 | target/ 5 | 6 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 7 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | 12 | # MSVC Windows builds of rustc generate these, which store debugging information 13 | *.pdb 14 | 15 | **/target 16 | **/result 17 | **/out*.txt 18 | **/out*.json 19 | **/.idea 20 | /*.pdf 21 | **/target_dirs 22 | /target_dirs 23 | /.vscode/settings.json 24 | **/.DS_Store 25 | 26 | vsc/ 27 | -------------------------------------------------------------------------------- /hotshot-events-service/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hotshot-events-service" 3 | version = "0.1.57" 4 | edition = "2021" 5 | license = "MIT" 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | alloy = { workspace = true } 10 | async-broadcast = { workspace = true } 11 | async-lock = { workspace = true } 12 | async-trait = { workspace = true } 13 | clap = { workspace = true } 14 | derive_more = { workspace = true } 15 | futures = { workspace = true } 16 | hotshot-types = { workspace = true } 17 | rand = { workspace = true } 18 | semver = { workspace = true } 19 | serde = { workspace = true } 20 | snafu = "0.8" 21 | tide-disco = "0.9" 22 | tokio = { workspace = true } 23 | toml = { workspace = true } 24 | tracing = { workspace = true } 25 | tracing-test = "0.2" 26 | vbs = { workspace = true } 27 | 28 | [dev-dependencies] 29 | hotshot-example-types = { workspace = true } 30 | portpicker = "0.1.1" 31 | surf-disco = { workspace = true } 32 | 33 | [lints] 34 | workspace = true 35 | -------------------------------------------------------------------------------- /hotshot-events-service/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Espresso Systems 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /hotshot-events-service/README.md: -------------------------------------------------------------------------------- 1 | # hotshot-events-service 2 | 3 | Minimal dependencies shared API definitions to serve Internal HotShot Events 4 | 5 | # HotShot Consensus Module 6 | -------------------------------------------------------------------------------- /hotshot-events-service/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod api; 2 | pub mod events; 3 | pub mod events_source; 4 | mod test; 5 | -------------------------------------------------------------------------------- /hotshot-query-service/.cargo/config.toml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 Espresso Systems (espressosys.com) 2 | # This file is part of the HotShot Query Service library. 3 | # 4 | # This program is free software: you can redistribute it and/or modify it under the terms of the GNU 5 | # General Public License as published by the Free Software Foundation, either version 3 of the 6 | # License, or (at your option) any later version. 7 | # This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without 8 | # even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 9 | # General Public License for more details. 10 | # You should have received a copy of the GNU General Public License along with this program. If not, 11 | # see . 12 | 13 | [net] 14 | git-fetch-with-cli = true -------------------------------------------------------------------------------- /hotshot-query-service/.config/nextest.toml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 Espresso Systems (espressosys.com) 2 | # This file is part of the HotShot Query Service library. 3 | # 4 | # This program is free software: you can redistribute it and/or modify it under the terms of the GNU 5 | # General Public License as published by the Free Software Foundation, either version 3 of the 6 | # License, or (at your option) any later version. 7 | # This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without 8 | # even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 9 | # General Public License for more details. 10 | # You should have received a copy of the GNU General Public License along with this program. If not, 11 | # see . 12 | 13 | [test-groups] 14 | sql = { max-threads = 2 } 15 | 16 | [[profile.default.overrides]] 17 | filter = 'test(sql) | test(query_service)' 18 | test-group = 'sql' 19 | -------------------------------------------------------------------------------- /hotshot-query-service/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | 3 | # generated by nix-pre-commit-hooks 4 | /.pre-commit-config.yaml 5 | 6 | # generated by coverage workflow 7 | lcov.info 8 | 9 | /vsc 10 | 11 | /.vscode 12 | 13 | # for sqlite databases created during the tests 14 | /tmp -------------------------------------------------------------------------------- /hotshot-query-service/.license-header.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2022 Espresso Systems (espressosys.com) 2 | This file is part of the HotShot Query Service library. 3 | 4 | This program is free software: you can redistribute it and/or modify it under the terms of the GNU 5 | General Public License as published by the Free Software Foundation, either version 3 of the 6 | License, or (at your option) any later version. 7 | This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without 8 | even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 9 | General Public License for more details. 10 | You should have received a copy of the GNU General Public License along with this program. If not, 11 | see . 12 | -------------------------------------------------------------------------------- /hotshot-query-service/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # These owners will be the default owners for everything in the repo. Unless a 2 | # later match takes precedence, they will be requested for review when someone 3 | # opens a pull request. 4 | 5 | * @nomaxg @sveitser @jbearer @imabdulbasit 6 | 7 | # Dependabot PRs 8 | *.toml @nomaxg @sveitser 9 | *.lock @nomaxg @sveitser 10 | -------------------------------------------------------------------------------- /hotshot-query-service/doc/fetch-block.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/hotshot-query-service/doc/fetch-block.png -------------------------------------------------------------------------------- /hotshot-query-service/doc/fetch-leaf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/hotshot-query-service/doc/fetch-leaf.png -------------------------------------------------------------------------------- /hotshot-query-service/doc/fetching-workflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EspressoSystems/espresso-network/78d1196d270f707fb241ecdf55f7e3054fb4ec83/hotshot-query-service/doc/fetching-workflow.png -------------------------------------------------------------------------------- /hotshot-query-service/migrations/postgres/V1000__aggregate_table_ns.sql: -------------------------------------------------------------------------------- 1 | -- this table is dropped so that we can start building aggregator stats from block height 0 2 | -- for each namespace 3 | -- otherwise aggregator would have namespace stats for future blocks only 4 | DROP TABLE IF EXISTS aggregate; 5 | 6 | CREATE TABLE aggregate ( 7 | height BIGINT, 8 | namespace BIGINT, 9 | num_transactions BIGINT NOT NULL, 10 | payload_size BIGINT NOT NULL, 11 | PRIMARY KEY (height, namespace) 12 | ); 13 | -------------------------------------------------------------------------------- /hotshot-query-service/migrations/postgres/V100__drop_leaf_payload.sql: -------------------------------------------------------------------------------- 1 | -- A previous version of the software erroneously stored leaves in the database with the full 2 | -- payload. This is unnecesssary, since we store payloads in their own separate table, and hurts 3 | -- performance. The updated software no longer does this for new leaves. This migration removes the 4 | -- redundant payloads for old leaves. 5 | UPDATE leaf SET leaf = jsonb_set(leaf, '{block_payload}', 'null'); 6 | -------------------------------------------------------------------------------- /hotshot-query-service/migrations/postgres/V1100__latest_qc_chain.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE latest_qc_chain ( 2 | id INT PRIMARY KEY, 3 | qcs JSONB NOT NULL 4 | ); 5 | -------------------------------------------------------------------------------- /hotshot-query-service/migrations/postgres/V200__create_aggregates_table.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE aggregate ( 2 | height BIGINT PRIMARY KEY REFERENCES header (height) ON DELETE CASCADE, 3 | num_transactions BIGINT NOT NULL, 4 | payload_size BIGINT NOT NULL 5 | ); 6 | -------------------------------------------------------------------------------- /hotshot-query-service/migrations/postgres/V20__payload_hash_index.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX header_payload_hash_idx ON header (payload_hash); 2 | -------------------------------------------------------------------------------- /hotshot-query-service/migrations/postgres/V300__transactions_count.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE payload 2 | ADD COLUMN num_transactions INTEGER; 3 | 4 | -- Initialize the `num_transactions` column by counting transactions for each 5 | -- existing payload. 6 | UPDATE payload AS p 7 | SET (num_transactions) = 8 | (SELECT count(*) FROM transaction AS t where t.block_height = p.height) 9 | -- Don't set `num_transactions` (leave it NULL) for payloads we don't have 10 | -- yet. 11 | WHERE p.data IS NOT NULL; 12 | -------------------------------------------------------------------------------- /hotshot-query-service/migrations/postgres/V30__drop_leaf_block_hash_fkey_constraint.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE leaf 2 | DROP CONSTRAINT leaf_block_hash_fkey; -------------------------------------------------------------------------------- /hotshot-query-service/migrations/postgres/V400__rename_transaction_table.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE transaction 2 | RENAME TO transactions; 3 | 4 | ALTER TABLE transactions 5 | RENAME COLUMN index TO idx; -------------------------------------------------------------------------------- /hotshot-query-service/migrations/postgres/V500__types_migration.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE leaf2 2 | ( 3 | height BIGINT NOT NULL REFERENCES header (height) ON DELETE CASCADE, 4 | view BIGINT NOT NULL, 5 | hash VARCHAR NOT NULL UNIQUE, 6 | block_hash VARCHAR NOT NULL REFERENCES header (hash) ON DELETE CASCADE, 7 | leaf JSONB NOT NULL, 8 | qc JSONB NOT NULL 9 | ); 10 | ALTER TABLE leaf2 ADD CONSTRAINT leaf2_pk PRIMARY KEY (height, view); 11 | 12 | CREATE TABLE types_migration ( 13 | id SERIAL PRIMARY KEY, 14 | completed bool NOT NULL DEFAULT false 15 | ); 16 | 17 | INSERT INTO types_migration ("completed") VALUES (false); 18 | 19 | 20 | CREATE TABLE vid2 21 | ( 22 | height BIGINT PRIMARY KEY REFERENCES header (height) ON DELETE CASCADE, 23 | common BYTEA NOT NULL, 24 | share BYTEA 25 | ); 26 | -------------------------------------------------------------------------------- /hotshot-query-service/migrations/postgres/V600__state_cert.sql: -------------------------------------------------------------------------------- 1 | -- This table is used to store the finalized light client state cert 2 | CREATE TABLE finalized_state_cert 3 | ( 4 | epoch BIGINT PRIMARY KEY, 5 | state_cert BYTEA 6 | ); 7 | 8 | -- This table is used for consensus to store the light client state cert indexed by view 9 | CREATE TABLE state_cert 10 | ( 11 | view BIGINT PRIMARY KEY, 12 | state_cert BYTEA 13 | ); 14 | -------------------------------------------------------------------------------- /hotshot-query-service/migrations/postgres/V700__add_migrated_rows_col.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE types_migration 2 | ADD COLUMN migrated_rows BIGINT DEFAULT 0; -------------------------------------------------------------------------------- /hotshot-query-service/migrations/postgres/V800__leaf2_remove_view.sql: -------------------------------------------------------------------------------- 1 | CREATE UNIQUE INDEX IF NOT EXISTS leaf2_height_idx ON leaf2 (height); 2 | 3 | ALTER TABLE leaf2 DROP CONSTRAINT leaf2_pk; 4 | 5 | ALTER TABLE leaf2 ADD CONSTRAINT leaf2_pk PRIMARY KEY USING INDEX leaf2_height_idx; 6 | 7 | ALTER TABLE leaf2 DROP COLUMN view; -------------------------------------------------------------------------------- /hotshot-query-service/migrations/sqlite/V200__create_aggregates_table.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE aggregate ( 2 | height BIGINT PRIMARY KEY REFERENCES header (height) ON DELETE CASCADE, 3 | num_transactions BIGINT NOT NULL, 4 | payload_size BIGINT NOT NULL 5 | ); -------------------------------------------------------------------------------- /hotshot-query-service/migrations/sqlite/V300__types_migration.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE leaf2 2 | ( 3 | height BIGINT NOT NULL REFERENCES header (height) ON DELETE CASCADE, 4 | view BIGINT NOT NULL, 5 | hash VARCHAR NOT NULL UNIQUE, 6 | block_hash VARCHAR NOT NULL REFERENCES header (hash) ON DELETE CASCADE, 7 | leaf JSONB NOT NULL, 8 | qc JSONB NOT NULL, 9 | PRIMARY KEY (height, view) 10 | ); 11 | 12 | CREATE TABLE types_migration ( 13 | id INTEGER PRIMARY KEY AUTOINCREMENT, 14 | completed bool NOT NULL DEFAULT false 15 | ); 16 | 17 | INSERT INTO types_migration ("completed") VALUES (false); 18 | 19 | CREATE TABLE vid2 20 | ( 21 | height BIGINT PRIMARY KEY REFERENCES header (height) ON DELETE CASCADE, 22 | common BYTEA NOT NULL, 23 | share BYTEA 24 | ); 25 | -------------------------------------------------------------------------------- /hotshot-query-service/migrations/sqlite/V400__state_cert.sql: -------------------------------------------------------------------------------- 1 | -- This table is used to store the finalized light client state cert 2 | CREATE TABLE finalized_state_cert 3 | ( 4 | epoch BIGINT PRIMARY KEY, 5 | state_cert BLOB 6 | ); 7 | 8 | -- This table is used for consensus to store the light client state cert indexed by view 9 | CREATE TABLE state_cert 10 | ( 11 | view BIGINT PRIMARY KEY, 12 | state_cert BLOB 13 | ); 14 | -------------------------------------------------------------------------------- /hotshot-query-service/migrations/sqlite/V500__add_migrated_rows_col.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE types_migration 2 | ADD COLUMN migrated_rows BIGINT DEFAULT 0; -------------------------------------------------------------------------------- /hotshot-query-service/migrations/sqlite/V600__leaf2_remove_view.sql: -------------------------------------------------------------------------------- 1 | -- SQLite doesn't let you drop PK columns or restructure tables, so we make a temporary table 2 | -- and copy the data from the old leaf2 table. 3 | 4 | CREATE TABLE leaf2_new 5 | ( 6 | height BIGINT PRIMARY KEY REFERENCES header (height) ON DELETE CASCADE, 7 | hash VARCHAR NOT NULL UNIQUE, 8 | block_hash VARCHAR NOT NULL REFERENCES header (hash) ON DELETE CASCADE, 9 | leaf JSONB NOT NULL, 10 | qc JSONB NOT NULL 11 | ); 12 | 13 | INSERT INTO leaf2_new (height,hash,block_hash,leaf,qc) SELECT height,hash,block_hash,leaf,qc FROM leaf2; 14 | 15 | DROP TABLE leaf2; 16 | 17 | ALTER TABLE leaf2_new RENAME TO leaf2; 18 | -------------------------------------------------------------------------------- /hotshot-query-service/migrations/sqlite/V700__tx_separate_index.sql: -------------------------------------------------------------------------------- 1 | -- In SQLite, we have to create a new table since we are going to be using a different primary key, 2 | -- and then copy the data over. 3 | CREATE TABLE transactions2 ( 4 | hash TEXT NOT NULL, 5 | -- Block containing this transaction. 6 | block_height BIGINT NOT NULL REFERENCES header(height) ON DELETE CASCADE, 7 | -- Index within block of the namespace containing this transaction. 8 | ns_index BIGINT NOT NULL, 9 | -- Namespace containing this transaction. 10 | ns_id BIGINT NOT NULL, 11 | -- Position within the namespace. 12 | position BIGINT NOT NULL, 13 | PRIMARY KEY (block_height, ns_id, position) 14 | ); 15 | 16 | DROP TABLE transactions; 17 | ALTER TABLE transactions2 RENAME TO transactions; 18 | -------------------------------------------------------------------------------- /hotshot-query-service/migrations/sqlite/V800__aggregate_table_ns.sql: -------------------------------------------------------------------------------- 1 | -- this table is dropped so that we can start building aggregator stats from block height 0 2 | -- for each namespace 3 | -- otherwise aggregator would have namespace stats for future blocks only 4 | 5 | DROP TABLE IF EXISTS aggregate; 6 | 7 | CREATE TABLE aggregate ( 8 | height BIGINT, 9 | namespace BIGINT, 10 | num_transactions BIGINT NOT NULL, 11 | payload_size BIGINT NOT NULL, 12 | PRIMARY KEY (height, namespace) 13 | ); -------------------------------------------------------------------------------- /hotshot-query-service/migrations/sqlite/V900__latest_qc_chain.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE latest_qc_chain ( 2 | id INT PRIMARY KEY, 3 | qcs JSONB NOT NULL 4 | ); 5 | -------------------------------------------------------------------------------- /hotshot-query-service/pyproject.toml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 Espresso Systems (espressosys.com) 2 | # This file is part of the HotShot Query Service library. 3 | # 4 | # This program is free software: you can redistribute it and/or modify it under the terms of the GNU 5 | # General Public License as published by the Free Software Foundation, either version 3 of the 6 | # License, or (at your option) any later version. 7 | # This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without 8 | # even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 9 | # General Public License for more details. 10 | # You should have received a copy of the GNU General Public License along with this program. If not, 11 | # see . 12 | 13 | [tool.poetry] 14 | name = "hotshot-query-service" 15 | version = "0.0.1" 16 | description = "" 17 | authors = ["Espresso Systems "] 18 | 19 | [tool.poetry.dependencies] 20 | python = "^3.9" 21 | pre-commit-hooks = {git = "https://github.com/Lucas-C/pre-commit-hooks"} 22 | 23 | [tool.poetry.dev-dependencies] 24 | 25 | [build-system] 26 | requires = ["poetry-core>=1.0.0"] 27 | build-backend = "poetry.core.masonry.api" 28 | -------------------------------------------------------------------------------- /hotshot-query-service/rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | # TODO: Compilation of jf-pcs with rust 1.84 takes a very long time, while this 3 | # is being fixed we use rust 1.83. See 4 | # https://github.com/rust-lang/rust/issues/135457 for upstream issue. 5 | # 6 | # channel = "stable" 7 | channel = "1.83" 8 | components = ["rustfmt", "llvm-tools-preview", "rust-src", "clippy"] 9 | profile = "minimal" 10 | -------------------------------------------------------------------------------- /hotshot-query-service/src/testing.rs: -------------------------------------------------------------------------------- 1 | #![cfg(any(test, feature = "testing"))] 2 | 3 | // Copyright (c) 2022 Espresso Systems (espressosys.com) 4 | // This file is part of the HotShot Query Service library. 5 | // 6 | // This program is free software: you can redistribute it and/or modify it under the terms of the GNU 7 | // General Public License as published by the Free Software Foundation, either version 3 of the 8 | // License, or (at your option) any later version. 9 | // This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without 10 | // even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 | // General Public License for more details. 12 | // You should have received a copy of the GNU General Public License along with this program. If not, 13 | // see . 14 | use std::time::Duration; 15 | 16 | pub mod consensus; 17 | pub mod mocks; 18 | 19 | pub async fn sleep(dur: Duration) { 20 | tokio::time::sleep(dur).await; 21 | } 22 | -------------------------------------------------------------------------------- /hotshot-query-service/src/types.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022 Espresso Systems (espressosys.com) 2 | // This file is part of the HotShot Query Service library. 3 | // 4 | // This program is free software: you can redistribute it and/or modify it under the terms of the GNU 5 | // General Public License as published by the Free Software Foundation, either version 3 of the 6 | // License, or (at your option) any later version. 7 | // This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without 8 | // even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 9 | // General Public License for more details. 10 | // You should have received a copy of the GNU General Public License along with this program. If not, 11 | // see . 12 | 13 | //! Common functionality provided by types used in this crate. 14 | 15 | /// Types which have a notion of "height" within a chain. 16 | pub trait HeightIndexed { 17 | fn height(&self) -> u64; 18 | } 19 | -------------------------------------------------------------------------------- /hotshot-state-prover/api/prover-service.toml: -------------------------------------------------------------------------------- 1 | [route.getlightclientcontract] 2 | PATH = ["/lightclient_contract"] 3 | METHOD = "GET" 4 | DOC = "Get the address of light client contract on Layer1." -------------------------------------------------------------------------------- /hotshot-state-prover/src/bin/gen-demo-genesis.rs: -------------------------------------------------------------------------------- 1 | use alloy::{hex::ToHexExt, sol_types::SolValue}; 2 | use clap::Parser; 3 | use espresso_contract_deployer::network_config::light_client_genesis; 4 | use hotshot_contract_adapter::sol_types::{LightClientStateSol, StakeTableStateSol}; 5 | use hotshot_types::light_client::DEFAULT_STAKE_TABLE_CAPACITY; 6 | use url::Url; 7 | 8 | #[derive(Parser)] 9 | struct Args { 10 | /// URL of the HotShot orchestrator. 11 | #[clap( 12 | short, 13 | long, 14 | env = "ESPRESSO_SEQUENCER_ORCHESTRATOR_URL", 15 | default_value = "http://localhost:8080" 16 | )] 17 | pub orchestrator_url: Url, 18 | } 19 | 20 | #[tokio::main] 21 | async fn main() { 22 | let args = Args::parse(); 23 | let pi: (LightClientStateSol, StakeTableStateSol) = 24 | light_client_genesis(&args.orchestrator_url, DEFAULT_STAKE_TABLE_CAPACITY) 25 | .await 26 | .unwrap(); 27 | println!("{}", pi.abi_encode_params().encode_hex()); 28 | } 29 | -------------------------------------------------------------------------------- /hotshot-state-prover/src/v1/mod.rs: -------------------------------------------------------------------------------- 1 | //! Light client V1 prover 2 | 3 | /// State verifier circuit builder 4 | pub mod circuit; 5 | /// Utilities for test 6 | pub mod mock_ledger; 7 | /// Prover service related functionalities 8 | pub mod service; 9 | /// SNARK proof generation 10 | pub mod snark; 11 | 12 | pub use snark::*; 13 | -------------------------------------------------------------------------------- /hotshot-state-prover/src/v2/mod.rs: -------------------------------------------------------------------------------- 1 | //! Light client V2 prover 2 | 3 | /// State verifier circuit builder 4 | pub mod circuit; 5 | /// Utilities for test 6 | pub mod mock_ledger; 7 | /// Prover service related functionalities 8 | pub mod service; 9 | /// SNARK proof generation 10 | pub mod snark; 11 | 12 | /// Re-exports 13 | pub use snark::*; 14 | -------------------------------------------------------------------------------- /hotshot-state-prover/src/v3/mod.rs: -------------------------------------------------------------------------------- 1 | //! Light client V3 prover 2 | 3 | /// State verifier circuit builder 4 | pub mod circuit; 5 | /// Utilities for test 6 | pub mod mock_ledger; 7 | /// Prover service related functionalities 8 | pub mod service; 9 | /// SNARK proof generation 10 | pub mod snark; 11 | 12 | /// Re-exports 13 | pub use snark::*; 14 | -------------------------------------------------------------------------------- /jest.config.js: -------------------------------------------------------------------------------- 1 | require('dotenv').config(); 2 | 3 | module.exports = { 4 | preset: 'ts-jest', 5 | testEnvironment: 'node', 6 | transform: {'^.+\\.ts?$': 'ts-jest'}, 7 | testRegex: '/tests/.*\\.(test|spec)?\\.(ts|tsx)$', 8 | moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx', 'json', 'node'], 9 | }; 10 | 11 | // Custom configuration for specific tests 12 | -------------------------------------------------------------------------------- /node-metrics/src/api/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod node_validator; 2 | -------------------------------------------------------------------------------- /node-metrics/src/api/node_validator/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod v0; 2 | -------------------------------------------------------------------------------- /node-metrics/src/main.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | use hotshot::helpers::initialize_logging; 3 | use node_metrics::{run_standalone_service, Options}; 4 | 5 | #[tokio::main] 6 | async fn main() { 7 | initialize_logging(); 8 | 9 | run_standalone_service(Options::parse()).await; 10 | } 11 | -------------------------------------------------------------------------------- /node-metrics/src/service/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod client_id; 2 | pub mod client_message; 3 | pub mod client_state; 4 | pub mod data_state; 5 | pub mod node_type; 6 | pub mod server_message; 7 | -------------------------------------------------------------------------------- /node-metrics/src/service/node_type/mod.rs: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "espresso-sequencer", 3 | "version": "0.1.0", 4 | "main": "index.js", 5 | "repository": "https://github.com/EspressoSystems/espresso-sequencer.git", 6 | "author": "Espresso Systems ", 7 | "license": "UNLICENSED", 8 | "devDependencies": { 9 | "@types/jest": "^29.5.12", 10 | "ts-jest": "^29.1.2", 11 | "typescript": "^5.4.5" 12 | }, 13 | "dependencies": { 14 | "@ethers-ext/signer-ledger": "^6.0.0-beta.1", 15 | "@ledgerhq/hw-transport-node-hid": "^6.29.4", 16 | "@safe-global/api-kit": "^2.3.1", 17 | "@safe-global/protocol-kit": "^3.1.0", 18 | "dotenv": "^16.4.5", 19 | "ethers": "^6.12.1", 20 | "jest": "^29.7.0", 21 | "ts-node": "^10.9.2" 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /request-response/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "request-response" 3 | version = { workspace = true } 4 | edition = { workspace = true } 5 | license = "MIT" 6 | 7 | [dependencies] 8 | anyhow = { workspace = true } 9 | async-broadcast = { workspace = true } 10 | async-trait = { workspace = true } 11 | bincode = { workspace = true } 12 | blake3 = { workspace = true } 13 | byteorder = { version = "1", default-features = false } 14 | dashmap = { workspace = true } 15 | derive_more = { workspace = true } 16 | hotshot-types = { workspace = true } 17 | parking_lot = { workspace = true } 18 | rand = { workspace = true } 19 | thiserror = { workspace = true } 20 | tokio = { workspace = true } 21 | tokio-util = { workspace = true } 22 | tracing = { workspace = true } 23 | 24 | [dev-dependencies] 25 | serde = { workspace = true } 26 | 27 | [lints] 28 | workspace = true 29 | -------------------------------------------------------------------------------- /request-response/src/data_source.rs: -------------------------------------------------------------------------------- 1 | //! This file contains the [`DataSource`] trait. This trait allows the [`RequestResponseProtocol`] 2 | //! to calculate/derive a response for a specific request. In the confirmation layer the implementer 3 | //! would be something like a [`FeeMerkleTree`] for fee catchup 4 | 5 | use anyhow::Result; 6 | use async_trait::async_trait; 7 | 8 | use super::request::Request; 9 | 10 | /// The trait that allows the [`RequestResponseProtocol`] to calculate/derive a response for a specific request 11 | #[async_trait] 12 | pub trait DataSource: Send + Sync + 'static + Clone { 13 | /// Calculate/derive the response for a specific request 14 | async fn derive_response_for(&self, request: &R) -> Result; 15 | } 16 | -------------------------------------------------------------------------------- /request-response/src/recipient_source.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use async_trait::async_trait; 3 | use hotshot_types::traits::signature_key::SignatureKey; 4 | 5 | use super::request::Request; 6 | 7 | /// A trait that allows the [`RequestResponseProtocol`] to get the recipients that a specific message should 8 | /// expect responses from. In `HotShot` this would go on top of the [`Membership`] trait and determine 9 | /// which nodes are able (quorum/DA) to respond to which requests 10 | #[async_trait] 11 | pub trait RecipientSource: Send + Sync + 'static { 12 | /// Get all the recipients that the specific request should expect responses from 13 | async fn get_expected_responders(&self, request: &R) -> Result>; 14 | } 15 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "stable" 3 | components = ["llvm-tools-preview", "rust-src", "clippy"] 4 | profile = "minimal" 5 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | reorder_imports = true 2 | use_try_shorthand = true 3 | match_block_trailing_comma = true 4 | use_field_init_shorthand = true 5 | edition = "2021" 6 | condense_wildcard_suffixes = true 7 | imports_granularity = "Crate" 8 | group_imports = "StdExternalCrate" 9 | 10 | # if lines exceed the desired width, rustfmt often gives up 11 | format_strings = true 12 | 13 | ignore = [ 14 | # the contract bindings are generated files 15 | "contracts/rust/adapter/src/bindings" 16 | ] 17 | -------------------------------------------------------------------------------- /scripts/ci-build-binary: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # usage: 3 | # 4 | # ./ci-build-binary 5 | # 6 | # where is one of: sequencer, sequencer-sqlite, espresso-dev-node, other 7 | # 8 | set -euo pipefail 9 | 10 | case "$1" in 11 | "sequencer") 12 | cargo build --locked --release --bin sequencer 13 | ;; 14 | "sequencer-sqlite") 15 | cargo build --locked --release -p sequencer-sqlite 16 | ;; 17 | "espresso-dev-node") 18 | cargo build --locked --release --features "embedded-db testing" --bin espresso-dev-node 19 | ;; 20 | "other") 21 | BINS="$(cargo metadata --no-deps --format-version 1 \ 22 | | jq -r '.packages[].targets[] | select(.kind[] == "bin") | .name' \ 23 | | grep -v '^sequencer$\|^sequencer-sqlite$\|^espresso-dev-node$' \ 24 | | xargs -I{} echo --bin {} \ 25 | | tr '\n' ' ')" 26 | echo "Building other binaries: $BINS" 27 | cargo build --locked --release $BINS 28 | ;; 29 | *) 30 | echo "Unknown binary: $1" 31 | exit 1 32 | ;; 33 | esac 34 | -------------------------------------------------------------------------------- /scripts/cli: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | docker exec -it espresso-sequencer-example-rollup-1 bin/cli "$@" 4 | -------------------------------------------------------------------------------- /scripts/fmt-pc-logs: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # This script takes the logs output by process-compose (formatted as JSON) and writes them in an 4 | # easier-to-read plaintext format with colors. 5 | # 6 | # Usage (assuming `log_location: /tmp/pc.log` in `process-compose.yaml`: 7 | # 8 | # cat /tmp/pc.log | scripts/fmt-pc-logs > /tmp/formatted.log 9 | # 10 | 11 | import fileinput 12 | import json 13 | 14 | def main(): 15 | for line in fileinput.input(encoding="utf-8"): 16 | obj = json.loads(line) 17 | if 'message' in obj: 18 | print(obj['process'] + '| ' + obj['message']) 19 | 20 | if __name__ == '__main__': 21 | main() 22 | -------------------------------------------------------------------------------- /scripts/multisig-upgrade-entrypoint: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # A script that we can use locally and in docker to run the upgrade proxy script 4 | # with ts-node. The arguments are passed to the upgrade script. 5 | # 6 | # Usage: scripts/multisig-upgrade-entrypoint [args] 7 | # 8 | set -e 9 | 10 | # Default script path 11 | DEFAULT_SCRIPT="upgradeProxy.ts" 12 | SCRIPT_NAME=${1:-$DEFAULT_SCRIPT} 13 | 14 | docker_path=/app/contracts/script/multisigTransactionProposals/safeSDK/$SCRIPT_NAME 15 | 16 | # 1. if the docker path exists use it 17 | # 2. get repo path if the docker path does not exist and use that 18 | # 3. otherwise error 19 | 20 | if [ -f "$docker_path" ]; then 21 | echo "Using docker path: $docker_path" 22 | path=$docker_path 23 | else 24 | REPO_ROOT=$(git rev-parse --show-toplevel) 25 | repo_path=$REPO_ROOT/contracts/script/multisigTransactionProposals/safeSDK/$SCRIPT_NAME 26 | if [ -f "$repo_path" ]; then 27 | path=$repo_path 28 | else 29 | echo "Error: Neither $repo_path nor $docker_path exist." 30 | exit 1 31 | fi 32 | fi 33 | 34 | npx ts-node "$path" "$@" 35 | -------------------------------------------------------------------------------- /scripts/sequencer-entrypoint: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eEu -o pipefail 3 | 4 | export ESPRESSO_SEQUENCER_EMBEDDED_DB=${ESPRESSO_SEQUENCER_EMBEDDED_DB:-false} 5 | 6 | # Trap SIGTERM and SIGINT signals and send them to the process group 7 | trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT 8 | 9 | # Decide which binary to run based on the environment variable 10 | if [ "$ESPRESSO_SEQUENCER_EMBEDDED_DB" = "true" ]; then 11 | echo "Starting sequencer with sqlite..." 12 | /bin/sequencer-sqlite -- storage-sql "$@" 13 | else 14 | echo "Starting sequencer..." 15 | /bin/sequencer-postgres "$@" 16 | fi 17 | -------------------------------------------------------------------------------- /scripts/show-toolchain-versions: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | which cargo 5 | which rustc 6 | which rustfmt 7 | which forge 8 | which solc 9 | which clippy-driver 10 | 11 | echo 12 | echo 'cargo --version: ' $(cargo --version) 13 | echo 'rustc --version: ' $(rustc --version) 14 | echo 'rustfmt --version: ' $(rustfmt --version) 15 | echo 'cargo fmt --version: ' $(cargo fmt --version) 16 | echo 'cargo clippy --version: ' $(cargo clippy --version) 17 | echo 18 | 19 | forge --version 20 | solc --version 21 | -------------------------------------------------------------------------------- /scripts/test-build-docker-images-native: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euxo pipefail 3 | 4 | images=( 5 | bridge 6 | builder 7 | cdn-broker 8 | cdn-marshal 9 | cdn-whitelist 10 | deploy 11 | espresso-dev-node 12 | nasty-client 13 | node-validator 14 | orchestrator 15 | prover-service 16 | sequencer 17 | state-relay-server 18 | staking-cli 19 | submit-transactions 20 | ) 21 | 22 | for image in "${images[@]}"; do 23 | scripts/build-docker-images-native --image "$image" 24 | done 25 | 26 | # build everything 27 | scripts/build-docker-images-native 28 | 29 | scripts/build-docker-images-native clean 30 | 31 | echo "Ok" 32 | -------------------------------------------------------------------------------- /scripts/ubuntu-install-test-no-nix: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # This script is to test the installation instructions in doc/ubuntu.md without 3 | # using Nix on ubuntu. It's not recommended to run it outside of a container or 4 | # otherwise ephemeral environment like the CI. 5 | set -e 6 | # 7 | # Execute the ubuntu installation instructions from doc/ubuntu.md 8 | # 9 | # Skip cloning the repo and changing directories, because we want to test with 10 | # the current code and not what's in the main branch. 11 | SCRIPT="$(grep '^ ' doc/ubuntu.md | sed 's/^ //' | sed 's/^cd .*//' | sed 's/^git clone .*//')" 12 | 13 | # Remove sudo if we're already root 14 | if [ "$(whoami)" == root ]; then 15 | SCRIPT="$(echo "$SCRIPT" | sed 's/^sudo //' | sed 's/| sudo /| /')" 16 | fi 17 | 18 | # Workaround: foundryup installs into XDG_CONFIG_HOME if that's set HOME otherwise. 19 | export XDG_CONFIG_HOME=$HOME 20 | 21 | echo "Will execute:" 22 | echo "$SCRIPT" 23 | echo 24 | 25 | echo "$SCRIPT" | bash -exo pipefail 26 | -------------------------------------------------------------------------------- /scripts/ubuntu-install-test-no-nix-docker: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # This script is to test the ubuntu installation instructions locally on 3 | # non-ubuntu machines by running through the installation and tests inside an 4 | # ubuntu docker container. 5 | set -e 6 | 7 | # Create a temporary copy of the repository to be mounted into the container. 8 | REPO_COPY_DIR="$(mktemp -d)" 9 | 10 | # Remove repo copy on exit, error etc. 11 | trap "exit" INT TERM 12 | trap cleanup EXIT 13 | cleanup(){ 14 | echo "Cleaning up repo copy: $REPO_COPY_DIR, need sudo" 15 | sudo rm -rf "$REPO_COPY_DIR" 16 | } 17 | 18 | git clone --recursive . "$REPO_COPY_DIR" 19 | 20 | # Run the installation and tests inside the container. 21 | docker run -it --rm \ 22 | -v "$REPO_COPY_DIR:/code" \ 23 | ubuntu:24.04 \ 24 | bash -c "cd /code && ./scripts/ubuntu-install-test-no-nix" 25 | -------------------------------------------------------------------------------- /sdks/crypto-helper/.cargo/config.toml: -------------------------------------------------------------------------------- 1 | # Set linker flags for macOS dynamic library builds to control the install_name. 2 | # This ensures that the produced .dylib uses @rpath for its install name, making 3 | # it relocatable and easier for consumers to load at runtime, regardless of the 4 | # absolute path where the library is installed. 5 | [target.aarch64-apple-darwin] 6 | rustflags = [ 7 | "-C", 8 | "link-arg=-Wl,-install_name,@rpath/libespresso_crypto_helper-aarch64-apple-darwin.dylib", 9 | ] 10 | 11 | [target.x86_64-apple-darwin] 12 | rustflags = [ 13 | "-C", 14 | "link-arg=-Wl,-install_name,@rpath/libespresso_crypto_helper-x86_64-apple-darwin.dylib", 15 | ] 16 | -------------------------------------------------------------------------------- /sdks/crypto-helper/Cargo.toml: -------------------------------------------------------------------------------- 1 | 2 | [package] 3 | name = "espresso-crypto-helper" 4 | version = "0.1.0" 5 | edition = "2021" 6 | 7 | [lib] 8 | 9 | crate-type = ["cdylib"] 10 | 11 | [dependencies] 12 | jf-rescue = { workspace = true, features = ["std"] } 13 | 14 | ark-ed-on-bn254 = { workspace = true } 15 | ark-ff = { workspace = true } 16 | ark-serialize = { workspace = true } 17 | committable = { workspace = true } 18 | espresso-types = { path = "../../types" } 19 | hotshot-query-service = { workspace = true } 20 | hotshot-types = { workspace = true } 21 | jf-crhf = { workspace = true } 22 | jf-merkle-tree-compat = { workspace = true, features = ["std"] } 23 | primitive-types = { version = "0.13" } 24 | serde = { workspace = true } 25 | serde_json = { workspace = true } 26 | sha2 = { workspace = true } 27 | tagged-base64 = { workspace = true } 28 | 29 | # https://tikv.github.io/doc/openssl/index.html 30 | # We need this for the target `aarch64-unknown-linux-gnu` in CI. 31 | # This can be removed if we figure out this build issue. 32 | openssl = { version = "0.10", features = ["vendored"] } 33 | 34 | [lints] 35 | workspace = true 36 | -------------------------------------------------------------------------------- /sdks/go/README.md: -------------------------------------------------------------------------------- 1 | # Espresso Network Go SDK 2 | 3 | This package provides tools and interfaces for working with the 4 | [Espresso Global Confirmation Layer](https://github.com/EspressoSystems/espresso-network) in Go. It should (eventually) 5 | provide everything needed to integrate a rollup written in Go with the Espresso 6 | 7 | ## How to release 8 | 9 | - Make sure your changes are committed and pushed to the main branch. 10 | - Choose the correct version for your release, following semantic versioning (e.g., `sdks/go/v1.2.3`). 11 | - In the root directory, create a new tag and push it to the remote: 12 | 13 | ```sh 14 | git tag sdks/go/vX.Y.Z 15 | git push origin sdks/go/vX.Y.Z 16 | ``` 17 | 18 | Replace `X.Y.Z` with your desired version number. 19 | 20 | - This will trigger the GitHub Actions workflow to build and release the Go SDK. 21 | - After the workflow completes, check the 22 | [GitHub Releases page](https://github.com/EspressoSystems/espresso-network/releases) for the published artifacts. 23 | - Verify that the crypto helper library artifacts (e.g., `.so`, `.dylib`, and their `.sha256` files) have been built and 24 | are included in the release assets. 25 | -------------------------------------------------------------------------------- /sdks/go/client-dev-node/types.go: -------------------------------------------------------------------------------- 1 | package clientdevnode 2 | 3 | type DevInfo struct { 4 | BuilderUrl string `json:"builder_url"` 5 | SequencerApiPort uint16 `json:"sequencer_api_port"` 6 | L1Url string `json:"l1_url"` 7 | L1LightClientAddress string `json:"l1_light_client_address"` 8 | } 9 | -------------------------------------------------------------------------------- /sdks/go/client/api.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | // This interface represents the full API of clients defined in this package 4 | // It is provided for consumers that wish to use the full scope of the EspressoClient 5 | // while still providing the ability to use mock structures for testing. 6 | 7 | type EspressoClient interface { 8 | QueryService 9 | SubmitAPI 10 | } 11 | -------------------------------------------------------------------------------- /sdks/go/client/submit.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "context" 5 | 6 | common "github.com/EspressoSystems/espresso-network/sdks/go/types/common" 7 | ) 8 | 9 | // Interface to the Espresso Sequencer submit API 10 | type SubmitAPI interface { 11 | // Submit a transaction to the espresso sequencer. 12 | SubmitTransaction(ctx context.Context, tx common.Transaction) (*common.TaggedBase64, error) 13 | } 14 | -------------------------------------------------------------------------------- /sdks/go/verification/namespace_proof_test_data.json: -------------------------------------------------------------------------------- 1 | { 2 | "ns_proof": { 3 | "V1": { 4 | "ns_index": 1, 5 | "ns_payload": "AwAAAAEAAAADAAAABgAAABQa+m9IKA==", 6 | "ns_proof": "MERKLE_PROOF~AQAAAAAAAAACAAAAAAAAAOiGF-SEiGzWEIFAHfElj12MUBXIWWp32MinBDi2H53ZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAO" 7 | } 8 | }, 9 | "vid_commit": "AvidMCommit~WuG_hRf__dciS9qe0uA3xvCupxAe23I5hOIs35J2IkMf", 10 | "vid_common": { 11 | "V1": { 12 | "total_weights": 10, 13 | "recovery_threshold": 5 14 | } 15 | }, 16 | "namespace": 3911702764, 17 | "ns_table": [ 18 | 2, 0, 0, 0, 36, 45, 0, 179, 31, 0, 0, 0, 236, 216, 39, 233, 53, 0, 0, 0 19 | ], 20 | "tx_commit": "fe0505a36be9ad1d8509f7ed28f4eabd96d2c3ce24adcdacaf0d194aa41c1fac" 21 | } 22 | -------------------------------------------------------------------------------- /sdks/go/verification/resp/vid_common.json: -------------------------------------------------------------------------------- 1 | { 2 | "height": 3207986, 3 | "block_hash": "BLOCK~aTme5rw6iy3O2b9EX77inVofN6aurD0NlJ--vY4s2kqj", 4 | "payload_hash": "AvidMCommit~TbqFKauXuC8EGQ_Jek9yK-9X0oyeZrE2C49tdw-AhJwz", 5 | "common": { "V1": { "total_weights": 1100, "recovery_threshold": 367 } } 6 | } 7 | -------------------------------------------------------------------------------- /sequencer-sqlite/Cargo.toml: -------------------------------------------------------------------------------- 1 | # As a workaround for feature unification by cargo this separate crate 2 | # that is **not** a default member of the workspace. 3 | [package] 4 | name = "sequencer-sqlite" 5 | version = "0.1.0" 6 | edition = "2021" 7 | 8 | [features] 9 | fee = ["sequencer/fee"] 10 | pos = ["sequencer/pos"] 11 | drb-and-header = ["sequencer/drb-and-header"] 12 | default = ["fee", "pos", "drb-and-header"] 13 | 14 | [dependencies] 15 | anyhow = { workspace = true } 16 | # disable default features to allow including versions on demand 17 | sequencer = { path = "../sequencer", default-features = false, features = ["embedded-db"] } 18 | tokio = { workspace = true } 19 | -------------------------------------------------------------------------------- /sequencer-sqlite/src/main.rs: -------------------------------------------------------------------------------- 1 | #[tokio::main] 2 | pub async fn main() -> anyhow::Result<()> { 3 | sequencer::main().await 4 | } 5 | -------------------------------------------------------------------------------- /sequencer/api/availability.toml: -------------------------------------------------------------------------------- 1 | [route.getnamespaceproof] 2 | PATH = ["block/:height/namespace/:namespace"] 3 | ":height" = "Integer" 4 | ":namespace" = "Integer" 5 | DOC = "Get the transactions in a namespace of the given block, along with a proof." 6 | 7 | [route.incorrect_encoding_proof] 8 | PATH = ["incorrect-encoding-proof/:block_number"] 9 | ":block_number" = "Integer" 10 | DOC = "Generate a proof of incorrect encoding for the given block number." 11 | -------------------------------------------------------------------------------- /sequencer/api/commitment_task.toml: -------------------------------------------------------------------------------- 1 | [route.gethotshotcontract] 2 | PATH = ["/hotshot_contract"] 3 | DOC = "Get the address of HotShot contract on Layer1." 4 | -------------------------------------------------------------------------------- /sequencer/api/config.toml: -------------------------------------------------------------------------------- 1 | [route.hotshot] 2 | PATH = ["/hotshot"] 3 | METHOD = "GET" 4 | DOC = "Get the Hotshot configuration for the current node." 5 | 6 | [route.env] 7 | PATH = ["/env"] 8 | METHOD = "GET" 9 | DOC = "Get all ESPRESSO environment variables set for the current node." -------------------------------------------------------------------------------- /sequencer/api/fee.toml: -------------------------------------------------------------------------------- 1 | [route.getfeebalance] 2 | PATH = ["fee-balance/latest/:address"] 3 | ":address" = "Literal" 4 | DOC = "Get current balance in fee state. Expected parameter is an Ethereum address in hex format." -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V1001__add_drb_params.sql: -------------------------------------------------------------------------------- 1 | -- HotShotConfig was upgraded to include parameters for stake table capacity and DRB difficulty. Configs 2 | -- which were persisted before this upgrade may be missing these parameters. This migration 3 | -- initializes them with a default. We use the `||` operator to merge two JSON objects, one 4 | -- containing default values for the new config parameters and one containing the existing config. 5 | -- When keys are present in both, the rightmost operand (the existing config) will take precedence. 6 | UPDATE network_config SET 7 | config = jsonb_set(config, '{config}', '{ 8 | "stake_table_capacity": 200, 9 | "drb_difficulty": 0, 10 | "drb_upgrade_difficulty": 0 11 | 12 | }' || (config->'config')); 13 | -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V1002__add_block_reward_column.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE epoch_drb_and_root 2 | ADD COLUMN block_reward BYTEA, 3 | ADD COLUMN stake_table_hash BYTEA; -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V1003__create_reward_merkle_tree_v2.sql: -------------------------------------------------------------------------------- 1 | 2 | -- The new reward_merkle_tree table corresponds to `RewardMerkleTreeV2` with keccak hashing algorithm, 3 | -- and is used starting from protocol version V4. 4 | 5 | CREATE TABLE reward_merkle_tree_v2 ( 6 | path JSONB NOT NULL, 7 | created BIGINT NOT NULL, 8 | hash_id INT NOT NULL REFERENCES hash (id), 9 | children JSONB, 10 | children_bitvec BIT(2), 11 | idx JSONB, 12 | entry JSONB 13 | ); 14 | 15 | ALTER TABLE 16 | reward_merkle_tree_v2 17 | ADD 18 | CONSTRAINT reward_merkle_tree_v2_pk PRIMARY KEY (path, created); 19 | 20 | CREATE INDEX reward_merkle_tree_v2_created ON reward_merkle_tree_v2 (created); -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V1004__eqc.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE eqc ( 2 | id bool PRIMARY KEY DEFAULT true, 3 | data BYTEA 4 | ); 5 | REVOKE DELETE, TRUNCATE ON eqc FROM public; 6 | -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V1005__all_validators.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE stake_table_validators ( 2 | epoch BIGINT NOT NULL, 3 | address TEXT NOT NULL, 4 | validator JSONB NOT NULL, 5 | PRIMARY KEY (epoch, address) 6 | ); -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V12__network_config.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE network_config ( 2 | id SERIAL PRIMARY KEY, 3 | config JSONB 4 | ); 5 | -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V13__consensus_state.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE anchor_leaf ( 2 | -- The ID is always set to 0. Setting it explicitly allows us to enforce with every insert or 3 | -- update that there is only a single entry in this table: the latest decided leaf. 4 | id INT PRIMARY KEY, 5 | 6 | height BIGINT, 7 | view BIGINT, 8 | leaf BYTEA, 9 | qc BYTEA 10 | ); 11 | 12 | CREATE TABLE highest_voted_view ( 13 | -- The ID is always set to 0. Setting it explicitly allows us to enforce with every insert or 14 | -- update that there is only a single entry in this table: the latest known view. 15 | id INT PRIMARY KEY, 16 | 17 | view BIGINT 18 | ); 19 | 20 | CREATE TABLE da_proposal ( 21 | view BIGINT PRIMARY KEY, 22 | data BYTEA 23 | ); 24 | 25 | CREATE TABLE vid_share ( 26 | view BIGINT PRIMARY KEY, 27 | data BYTEA 28 | ); 29 | -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V14__state_tables.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS hash ( 2 | id SERIAL PRIMARY KEY, value BYTEA NOT NULL UNIQUE 3 | ); 4 | 5 | CREATE TABLE fee_merkle_tree ( 6 | path INTEGER[] NOT NULL, 7 | created BIGINT NOT NULL, 8 | hash_id INT NOT NULL REFERENCES hash (id), 9 | children INT[], 10 | children_bitvec BIT(256), 11 | index JSONB, 12 | entry JSONB 13 | ); 14 | 15 | ALTER TABLE 16 | fee_merkle_tree 17 | ADD 18 | CONSTRAINT fee_merkle_tree_pk PRIMARY KEY (path, created); 19 | 20 | CREATE INDEX fee_merkle_tree_created ON fee_merkle_tree (created); 21 | 22 | CREATE TABLE block_merkle_tree ( 23 | path INTEGER[] NOT NULL, 24 | created BIGINT NOT NULL, 25 | hash_id INT NOT NULL REFERENCES hash (id), 26 | children INT[], 27 | children_bitvec BIT(3), 28 | index JSONB, 29 | entry JSONB 30 | ); 31 | 32 | ALTER TABLE 33 | block_merkle_tree 34 | ADD 35 | CONSTRAINT block_merkle_tree_pk PRIMARY KEY (path, created); 36 | 37 | CREATE INDEX block_merkle_tree_created ON block_merkle_tree (created); 38 | -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V15__undecided_state.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE undecided_state ( 2 | -- The ID is always set to 0. Setting it explicitly allows us to enforce with every insert or 3 | -- update that there is only a single entry in this table: the latest known state. 4 | id INT PRIMARY KEY, 5 | 6 | leaves BYTEA NOT NULL, 7 | state BYTEA NOT NULL 8 | ); 9 | -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V16__merkle_root_columns.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE header 2 | ADD column block_merkle_tree_root text 3 | GENERATED ALWAYS AS (data->>'block_merkle_tree_root') STORED NOT NULL; 4 | 5 | ALTER TABLE header 6 | ADD column fee_merkle_tree_root text 7 | GENERATED ALWAYS as (data->>'fee_merkle_tree_root') STORED NOT NULL; 8 | 9 | CREATE INDEX header_block_merkle_tree_root_idx ON header (block_merkle_tree_root); 10 | CREATE INDEX header_fee_merkle_tree_root_idx ON header (fee_merkle_tree_root); -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V301__merkle_root_column_indexes.sql: -------------------------------------------------------------------------------- 1 | -- Migration V16 created these columns and indexed them. Then, migration V36 altered the expression 2 | -- use to populate the generated columns. However, it did so by dropping and re-adding the columns 3 | -- with a different expression, which also caused the indexes on the columns to be dropped. They 4 | -- were erroneously not added back. This migration corrects that error by recreating the indexes. 5 | CREATE INDEX header_block_merkle_tree_root_idx ON header (block_merkle_tree_root); 6 | CREATE INDEX header_fee_merkle_tree_root_idx ON header (fee_merkle_tree_root); 7 | -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V302__update_state_tables_types.sql: -------------------------------------------------------------------------------- 1 | 2 | ALTER TABLE fee_merkle_tree 3 | ALTER COLUMN path TYPE JSONB USING array_to_json(path), 4 | ALTER COLUMN children TYPE JSONB USING array_to_json(children); 5 | 6 | ALTER TABLE fee_merkle_tree RENAME COLUMN index TO idx; 7 | 8 | 9 | ALTER TABLE block_merkle_tree 10 | ALTER COLUMN path TYPE JSONB USING array_to_json(path), 11 | ALTER COLUMN children TYPE JSONB USING array_to_json(children); 12 | 13 | ALTER TABLE block_merkle_tree RENAME COLUMN index TO idx; -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V31__drop_merklized_state_created_index.sql: -------------------------------------------------------------------------------- 1 | -- Indexes on created block height are not strictly necessary for Merklized state queries. In most 2 | -- cases, we want the queries to use the multi-column index on (path, created), which allows us to 3 | -- seek directly to the desired path down the tree and then take the latest version of that path in 4 | -- a single B-tree traversal. 5 | -- 6 | -- Occasionally, it is marginally faster to use the created index, such as when a Merkle node was 7 | -- modified very recently, and you don't have to scan back very far in the created index before 8 | -- finding a version of that node. However, it is sometimes *much* slower to use `created` over the 9 | -- multi-column index, such as when a node hasn't had a new version in a very long time. For reasons 10 | -- that are not well understood, having these indexes causes the query planner to sometimes use them 11 | -- in the extremely slow cases, but dropping them means we always use the multi-column index. 12 | DROP INDEX fee_merkle_tree_created; 13 | DROP INDEX block_merkle_tree_created; 14 | -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V32__saved_proposals.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE quorum_proposals ( 2 | view BIGINT PRIMARY KEY, 3 | data BYTEA 4 | ); -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V33__chain_config_table.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE chain_config ( 2 | commitment VARCHAR PRIMARY KEY, 3 | data BYTEA NOT NULL 4 | ); 5 | -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V34__builder_urls.sql: -------------------------------------------------------------------------------- 1 | -- When multi-builder support was added, the configuration field `builder_url: Url` was replaced by 2 | -- an array `builder_urls: Vec`. If the saved config has no `builder_urls` field, it is older 3 | -- than this change. Populate `builder_urls` with a singleton array formed from the old value of 4 | -- `builder_url`, and delete the no longer used `builder_url`. 5 | UPDATE network_config 6 | SET config = 7 | jsonb_set(config, '{config,builder_urls}', jsonb_build_array(config->'config'->>'builder_url')) 8 | #- '{config,builder_url}' 9 | WHERE NOT (config->'config' ? 'builder_urls'); 10 | -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V35__add_upgrade_params.sql: -------------------------------------------------------------------------------- 1 | -- HotShotConfig was upgraded to include parameters for proposing and voting on upgrades. Configs 2 | -- which were persisted before this upgrade may be missing these parameters. This migration 3 | -- initializes them with a default. We use the `||` operator to merge two JSON objects, one 4 | -- containing default values for the new config parameters and one containing the existing config. 5 | -- When keys are present in both, the rightmost operand (the existing config) will take precedence. 6 | -- 7 | -- For the upgrade settings, we use JS MAX_SAFE_INTEGER for the start parameters so that nodes will 8 | -- never do an upgrade, unless explicitly configured otherwise. 9 | UPDATE network_config SET 10 | config = jsonb_set(config, '{config}', '{ 11 | "start_proposing_view": 9007199254740991, 12 | "stop_proposing_view": 0, 13 | "start_voting_view": 9007199254740991, 14 | "stop_voting_view": 0, 15 | "start_proposing_time": 9007199254740991, 16 | "stop_proposing_time": 0, 17 | "start_voting_time": 9007199254740991, 18 | "stop_voting_time": 0 19 | }' || (config->'config')); 20 | -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V36__alter_merkle_root_column_expressions.sql: -------------------------------------------------------------------------------- 1 | -- The generated columns for header merkle roots were originally created by extracting fields 2 | -- `block_merkle_tree_root` and `fee_merkle_tree_root` from the header JSON. Post 0.1, though, the 3 | -- header serialization changed so that these fields are now nested one level deeper: 4 | -- `fields.block_merkle_tree_root` and `fields.fee_merkle_tree_root`. This migration alters the 5 | -- generated column expression to use NULL coalescing to extract the value from either of these 6 | -- paths depending on which version of the header we have. 7 | -- 8 | -- Pre 17.x (we target Postgres >= 16.x), there is not explicit instruction for changing the 9 | -- expression of a generated column, so the best we can do is drop and re-add the column with a 10 | -- different expression. 11 | 12 | ALTER TABLE header 13 | DROP column block_merkle_tree_root, 14 | ADD column block_merkle_tree_root text 15 | GENERATED ALWAYS AS (coalesce(data->'fields'->>'block_merkle_tree_root', data->>'block_merkle_tree_root')) STORED NOT NULL, 16 | DROP column fee_merkle_tree_root, 17 | ADD column fee_merkle_tree_root text 18 | GENERATED ALWAYS AS (coalesce(data->'fields'->>'fee_merkle_tree_root', data->>'fee_merkle_tree_root')) STORED NOT NULL; 19 | -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V38__add_quorum_proposal_hash.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE quorum_proposals 2 | ADD COLUMN leaf_hash VARCHAR; 3 | -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V39__upgrade_certificate.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE upgrade_certificate ( 2 | id bool PRIMARY KEY DEFAULT true, 3 | data BYTEA 4 | ); 5 | REVOKE DELETE, TRUNCATE ON upgrade_certificate FROM public; 6 | -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V401__archive_provider.sql: -------------------------------------------------------------------------------- 1 | -- Add information needed for consensus storage to act as a provider for archive recovery. 2 | 3 | -- Add payload hash to DA proposal, since the query service requests missing payloads by hash. 4 | ALTER TABLE da_proposal 5 | ADD COLUMN payload_hash VARCHAR; 6 | CREATE INDEX da_proposal_payload_hash_idx ON da_proposal (payload_hash); 7 | 8 | -- Add payload hash to VID share, since the query service requests missing VID common by payload 9 | -- hash. 10 | ALTER TABLE vid_share 11 | ADD COLUMN payload_hash VARCHAR; 12 | CREATE INDEX vid_share_payload_hash_idx ON vid_share (payload_hash); 13 | 14 | -- Add QC storage, since the query service requires missing leaves to be fetched alongside a QC with 15 | -- that leaf hash. 16 | CREATE TABLE quorum_certificate ( 17 | view BIGINT PRIMARY KEY, 18 | leaf_hash VARCHAR NOT NULL, 19 | data BYTEA NOT NULL 20 | ); 21 | CREATE INDEX quorum_certificate_leaf_hash_idx ON quorum_certificate (leaf_hash); 22 | -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V402__next_epoch_qc.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE next_epoch_quorum_certificate ( 2 | id bool PRIMARY KEY DEFAULT true, 3 | data BYTEA 4 | ); 5 | REVOKE DELETE, TRUNCATE ON next_epoch_quorum_certificate FROM public; 6 | -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V403__drop_undecided_state.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE undecided_state; 2 | -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V40__anchor_leaf_chain.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE anchor_leaf 2 | DROP COLUMN id, 3 | DROP COLUMN height, 4 | ADD CONSTRAINT anchor_leaf_pk PRIMARY KEY (view); 5 | 6 | CREATE TABLE event_stream ( 7 | id SERIAL PRIMARY KEY, 8 | last_processed_view BIGINT 9 | ); 10 | -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V41__epoch_height.sql: -------------------------------------------------------------------------------- 1 | -- HotShotConfig was upgraded to include an `epoch_height` parameter. This migration initializes it 2 | -- with a default. We use the `||` operator to merge two JSON objects, one containing the default 3 | -- value for the new config parameter and one containing the existing config. When keys are present 4 | -- in both, the rightmost operand (the existing config) will take precedence. 5 | UPDATE network_config SET 6 | config = jsonb_set(config, '{config}', '{ 7 | "epoch_height": 0 8 | }' || (config->'config')); 9 | -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V42__index_quorum_proposal_leaf_hash.sql: -------------------------------------------------------------------------------- 1 | CREATE UNIQUE INDEX quorum_proposals_leaf_hash_idx ON quorum_proposals (leaf_hash); 2 | -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V501__epoch_tables.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE anchor_leaf2 ( 2 | view BIGINT PRIMARY KEY, 3 | leaf BYTEA, 4 | qc BYTEA 5 | ); 6 | 7 | 8 | CREATE TABLE da_proposal2 ( 9 | view BIGINT PRIMARY KEY, 10 | payload_hash VARCHAR, 11 | data BYTEA 12 | ); 13 | 14 | CREATE TABLE vid_share2 ( 15 | view BIGINT PRIMARY KEY, 16 | payload_hash VARCHAR, 17 | data BYTEA 18 | ); 19 | 20 | 21 | CREATE TABLE quorum_proposals2 ( 22 | view BIGINT PRIMARY KEY, 23 | leaf_hash VARCHAR, 24 | data BYTEA 25 | ); 26 | 27 | CREATE UNIQUE INDEX quorum_proposals2_leaf_hash_idx ON quorum_proposals (leaf_hash); 28 | CREATE INDEX da_proposal2_payload_hash_idx ON da_proposal (payload_hash); 29 | CREATE INDEX vid_share2_payload_hash_idx ON vid_share (payload_hash); 30 | 31 | CREATE TABLE quorum_certificate2 ( 32 | view BIGINT PRIMARY KEY, 33 | leaf_hash VARCHAR NOT NULL, 34 | data BYTEA NOT NULL 35 | ); 36 | 37 | CREATE INDEX quorum_certificate2_leaf_hash_idx ON quorum_certificate (leaf_hash); 38 | 39 | CREATE TABLE epoch_migration ( 40 | table_name TEXT PRIMARY KEY, 41 | completed bool DEFAULT FALSE 42 | ); 43 | 44 | INSERT INTO epoch_migration (table_name) VALUES ('anchor_leaf'), ('da_proposal'), ('vid_share'), ('quorum_proposals'), ('quorum_certificate'); -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V502__epoch_drb_and_root.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE epoch_drb_and_root ( 2 | epoch BIGINT PRIMARY KEY, 3 | drb_result BYTEA, 4 | block_header BYTEA, 5 | stake BYTEA 6 | ); 7 | -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V504__reward_merkle_tree.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE reward_merkle_tree ( 2 | path JSONB NOT NULL, 3 | created BIGINT NOT NULL, 4 | hash_id INT NOT NULL REFERENCES hash (id), 5 | children JSONB, 6 | children_bitvec BIT(256), 7 | idx JSONB, 8 | entry JSONB 9 | ); 10 | 11 | ALTER TABLE 12 | reward_merkle_tree 13 | ADD 14 | CONSTRAINT reward_merkle_tree_pk PRIMARY KEY (path, created); 15 | 16 | CREATE INDEX reward_merkle_tree_created ON reward_merkle_tree (created); 17 | 18 | 19 | ALTER TABLE header 20 | ADD column reward_merkle_tree_root text 21 | GENERATED ALWAYS AS (data->'fields'->>'reward_merkle_tree_root') STORED; -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V506__migrated_rows_col.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE epoch_migration 2 | ADD COLUMN migrated_rows BIGINT DEFAULT 0; -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V801__stake_table_events_table.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE stake_table_events ( 2 | id INTEGER PRIMARY KEY CHECK (id = 0), 3 | l1_block BIGINT NOT NULL, 4 | data JSONB NOT NULL 5 | ); -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V802__redefine_stake_table_events.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE stake_table_events; 2 | 3 | -- Stores the stake table events from the contract. 4 | -- Each event is uniquely identified by a combination of `l1_block` and `log_index` 5 | CREATE TABLE stake_table_events ( 6 | l1_block BIGINT NOT NULL, 7 | log_index BIGINT NOT NULL, 8 | event JSONB NOT NULL, 9 | PRIMARY KEY (l1_block, log_index) 10 | ); 11 | 12 | -- Tracks the last L1 block that has been finalized and whose events have been processed and stored. 13 | -- This tracking is necessary to determine the starting point for fetching new contract events. 14 | CREATE TABLE stake_table_events_l1_block ( 15 | id INTEGER PRIMARY KEY CHECK (id = 0), 16 | last_l1_block BIGINT NOT NULL 17 | ); -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V803__drb.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE drb ( 2 | epoch BIGINT PRIMARY KEY, 3 | drb_input BYTEA 4 | ); 5 | -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V804__libp2p_dht.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE libp2p_dht ( 2 | id INTEGER PRIMARY KEY, 3 | serialized_records BYTEA NOT NULL 4 | ); 5 | -------------------------------------------------------------------------------- /sequencer/api/migrations/postgres/V805__consensus_restart_view.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE restart_view ( 2 | -- The ID is always set to 0. Setting it explicitly allows us to enforce with every insert or 3 | -- update that there is only a single entry in this table: the view we should restart from. 4 | id INT PRIMARY KEY, 5 | 6 | view BIGINT 7 | ); 8 | -------------------------------------------------------------------------------- /sequencer/api/migrations/sqlite/V102__network_config.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE network_config ( 2 | id INTEGER PRIMARY KEY AUTOINCREMENT, 3 | config JSONB 4 | ); -------------------------------------------------------------------------------- /sequencer/api/migrations/sqlite/V103__consensus_state.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE anchor_leaf ( 2 | view BIGINT PRIMARY KEY, 3 | leaf JSONB, 4 | qc JSONB 5 | ); 6 | 7 | CREATE TABLE highest_voted_view ( 8 | -- The ID is always set to 0. Setting it explicitly allows us to enforce with every insert or 9 | -- update that there is only a single entry in this table: the latest known view. 10 | id INT PRIMARY KEY, 11 | view BIGINT 12 | ); 13 | 14 | CREATE TABLE da_proposal ( 15 | view BIGINT PRIMARY KEY, 16 | data JSONB 17 | ); 18 | 19 | CREATE TABLE vid_share ( 20 | view BIGINT PRIMARY KEY, 21 | data JSONB 22 | ); 23 | -------------------------------------------------------------------------------- /sequencer/api/migrations/sqlite/V104__state_tables.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS hash ( 2 | id INTEGER PRIMARY KEY AUTOINCREMENT, value JSONB NOT NULL UNIQUE 3 | ); 4 | 5 | CREATE TABLE fee_merkle_tree ( 6 | path JSONB NOT NULL, 7 | created BIGINT NOT NULL, 8 | hash_id INT NOT NULL REFERENCES hash (id), 9 | children JSONB, 10 | children_bitvec BLOB, 11 | idx JSONB, 12 | entry JSONB, 13 | PRIMARY KEY (path, created) 14 | ); 15 | 16 | CREATE TABLE block_merkle_tree ( 17 | path JSONB NOT NULL, 18 | created BIGINT NOT NULL, 19 | hash_id INT NOT NULL REFERENCES hash (id), 20 | children JSONB, 21 | children_bitvec BLOB, 22 | idx JSONB, 23 | entry JSONB, 24 | PRIMARY KEY (path, created) 25 | ); 26 | -------------------------------------------------------------------------------- /sequencer/api/migrations/sqlite/V105__undecided_state.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE undecided_state ( 2 | -- The ID is always set to 0. Setting it explicitly allows us to enforce with every insert or 3 | -- update that there is only a single entry in this table: the latest known state. 4 | id INT PRIMARY KEY, 5 | leaves BLOB NOT NULL, 6 | state BLOB NOT NULL 7 | ); 8 | -------------------------------------------------------------------------------- /sequencer/api/migrations/sqlite/V106__merkle_root_columns.sql: -------------------------------------------------------------------------------- 1 | -- Add block_merkle_tree_root column as a generated column 2 | ALTER TABLE header 3 | ADD COLUMN block_merkle_tree_root TEXT 4 | GENERATED ALWAYS AS (coalesce(json_extract(data, '$.block_merkle_tree_root'), json_extract(data, '$.fields.block_merkle_tree_root'))) STORED NOT NULL; 5 | 6 | -- Add fee_merkle_tree_root column as a generated column 7 | ALTER TABLE header 8 | ADD COLUMN fee_merkle_tree_root TEXT 9 | GENERATED ALWAYS AS (coalesce(json_extract(data, '$.fee_merkle_tree_root'), json_extract(data, '$.fields.fee_merkle_tree_root'))) STORED NOT NULL; 10 | 11 | CREATE INDEX header_block_merkle_tree_root_idx ON header (block_merkle_tree_root); 12 | CREATE INDEX header_fee_merkle_tree_root_idx ON header (fee_merkle_tree_root); -------------------------------------------------------------------------------- /sequencer/api/migrations/sqlite/V107__saved_proposals.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE quorum_proposals ( 2 | view BIGINT PRIMARY KEY, 3 | data BLOB, 4 | leaf_hash TEXT 5 | ); 6 | 7 | CREATE UNIQUE INDEX quorum_proposals_leaf_hash_idx ON quorum_proposals (leaf_hash); -------------------------------------------------------------------------------- /sequencer/api/migrations/sqlite/V108__chain_config_table.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE chain_config ( 2 | commitment TEXT PRIMARY KEY, 3 | data BLOB NOT NULL 4 | ); 5 | -------------------------------------------------------------------------------- /sequencer/api/migrations/sqlite/V109__upgrade_certificate.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE upgrade_certificate ( 2 | id bool PRIMARY KEY DEFAULT true, 3 | data BLOB 4 | ); -------------------------------------------------------------------------------- /sequencer/api/migrations/sqlite/V110__event_stream.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE event_stream ( 2 | id INTEGER PRIMARY KEY AUTOINCREMENT, 3 | last_processed_view BIGINT 4 | ); 5 | -------------------------------------------------------------------------------- /sequencer/api/migrations/sqlite/V201__archive_provider.sql: -------------------------------------------------------------------------------- 1 | -- Add information needed for consensus storage to act as a provider for archive recovery. 2 | 3 | -- Add payload hash to DA proposal, since the query service requests missing payloads by hash. 4 | ALTER TABLE da_proposal 5 | ADD COLUMN payload_hash VARCHAR; 6 | CREATE INDEX da_proposal_payload_hash_idx ON da_proposal (payload_hash); 7 | 8 | -- Add payload hash to VID share, since the query service requests missing VID common by payload 9 | -- hash. 10 | ALTER TABLE vid_share 11 | ADD COLUMN payload_hash VARCHAR; 12 | CREATE INDEX vid_share_payload_hash_idx ON vid_share (payload_hash); 13 | 14 | -- Add QC storage, since the query service requires missing leaves to be fetched alongside a QC with 15 | -- that leaf hash. 16 | CREATE TABLE quorum_certificate ( 17 | view BIGINT PRIMARY KEY, 18 | leaf_hash VARCHAR NOT NULL, 19 | data BLOB NOT NULL 20 | ); 21 | CREATE INDEX quorum_certificate_leaf_hash_idx ON quorum_certificate (leaf_hash); 22 | -------------------------------------------------------------------------------- /sequencer/api/migrations/sqlite/V203__next_epoch_qc.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE next_epoch_quorum_certificate ( 2 | id bool PRIMARY KEY DEFAULT true, 3 | data BLOB 4 | ); -------------------------------------------------------------------------------- /sequencer/api/migrations/sqlite/V204__drop_undecided_state.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE undecided_state; 2 | -------------------------------------------------------------------------------- /sequencer/api/migrations/sqlite/V301__epoch_tables.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE anchor_leaf2 ( 2 | view BIGINT PRIMARY KEY, 3 | leaf BLOB, 4 | qc BLOB 5 | ); 6 | 7 | 8 | CREATE TABLE da_proposal2 ( 9 | view BIGINT PRIMARY KEY, 10 | payload_hash VARCHAR, 11 | data BLOB 12 | ); 13 | 14 | CREATE TABLE vid_share2 ( 15 | view BIGINT PRIMARY KEY, 16 | payload_hash VARCHAR, 17 | data BLOB 18 | ); 19 | 20 | 21 | CREATE TABLE quorum_proposals2 ( 22 | view BIGINT PRIMARY KEY, 23 | leaf_hash VARCHAR, 24 | data BLOB 25 | ); 26 | 27 | CREATE UNIQUE INDEX quorum_proposals2_leaf_hash_idx ON quorum_proposals (leaf_hash); 28 | CREATE INDEX da_proposal2_payload_hash_idx ON da_proposal (payload_hash); 29 | CREATE INDEX vid_share2_payload_hash_idx ON vid_share (payload_hash); 30 | 31 | CREATE TABLE quorum_certificate2 ( 32 | view BIGINT PRIMARY KEY, 33 | leaf_hash VARCHAR NOT NULL, 34 | data BLOB NOT NULL 35 | ); 36 | 37 | CREATE INDEX quorum_certificate2_leaf_hash_idx ON quorum_certificate (leaf_hash); 38 | 39 | CREATE TABLE epoch_migration ( 40 | table_name TEXT PRIMARY KEY, 41 | completed bool NOT NULL DEFAULT FALSE 42 | ); 43 | 44 | INSERT INTO epoch_migration (table_name) VALUES ('anchor_leaf'), ('da_proposal'), ('vid_share'), ('quorum_proposals'), ('quorum_certificate'); -------------------------------------------------------------------------------- /sequencer/api/migrations/sqlite/V302__epoch_drb_and_root.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE epoch_drb_and_root ( 2 | epoch BIGINT PRIMARY KEY, 3 | drb_result BLOB, 4 | block_header BLOB, 5 | stake BLOB 6 | ); 7 | -------------------------------------------------------------------------------- /sequencer/api/migrations/sqlite/V304__reward_merkle_tree.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE reward_merkle_tree ( 2 | path JSONB NOT NULL, 3 | created BIGINT NOT NULL, 4 | hash_id INT NOT NULL REFERENCES hash (id), 5 | children JSONB, 6 | children_bitvec BLOB, 7 | idx JSONB, 8 | entry JSONB, 9 | PRIMARY KEY (path, created) 10 | ); 11 | 12 | ALTER TABLE header 13 | ADD COLUMN reward_merkle_tree_root TEXT 14 | GENERATED ALWAYS AS (json_extract(data, '$.fields.reward_merkle_tree_root')) STORED; -------------------------------------------------------------------------------- /sequencer/api/migrations/sqlite/V306__migrated_rows_col.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE epoch_migration 2 | ADD COLUMN migrated_rows BIGINT DEFAULT 0; -------------------------------------------------------------------------------- /sequencer/api/migrations/sqlite/V601__stake_table_events_table.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE stake_table_events ( 2 | id INTEGER PRIMARY KEY CHECK (id = 0), 3 | l1_block INTEGER NOT NULL UNIQUE, 4 | data JSONB NOT NULL 5 | ); -------------------------------------------------------------------------------- /sequencer/api/migrations/sqlite/V602__redefine_stake_table_events.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE stake_table_events; 2 | 3 | -- Stores the stake table events from the contract. 4 | -- Each event is uniquely identified by a combination of `l1_block` and `log_index` 5 | CREATE TABLE stake_table_events ( 6 | l1_block BIGINT NOT NULL, 7 | log_index BIGINT NOT NULL, 8 | event JSONB NOT NULL, 9 | PRIMARY KEY (l1_block, log_index) 10 | ); 11 | 12 | -- Tracks the last L1 block that has been finalized and whose events have been processed and stored. 13 | -- This tracking is necessary to determine the starting point for fetching new contract events. 14 | CREATE TABLE stake_table_events_l1_block ( 15 | id INTEGER PRIMARY KEY CHECK (id = 0), 16 | last_l1_block BIGINT NOT NULL 17 | ); -------------------------------------------------------------------------------- /sequencer/api/migrations/sqlite/V603__drb.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE drb ( 2 | epoch BIGINT PRIMARY KEY, 3 | drb_input BLOB 4 | ); 5 | -------------------------------------------------------------------------------- /sequencer/api/migrations/sqlite/V604__libp2p_dht.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE libp2p_dht ( 2 | id INTEGER PRIMARY KEY, 3 | serialized_records BLOB NOT NULL 4 | ); 5 | -------------------------------------------------------------------------------- /sequencer/api/migrations/sqlite/V605__consensus_restart_view.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE restart_view ( 2 | -- The ID is always set to 0. Setting it explicitly allows us to enforce with every insert or 3 | -- update that there is only a single entry in this table: the view we should restart from. 4 | id INT PRIMARY KEY, 5 | view BIGINT 6 | ); -------------------------------------------------------------------------------- /sequencer/api/migrations/sqlite/V606__add_block_reward_column.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE epoch_drb_and_root 2 | ADD COLUMN block_reward BLOB; 3 | 4 | ALTER TABLE epoch_drb_and_root 5 | ADD COLUMN stake_table_hash BLOB; 6 | -------------------------------------------------------------------------------- /sequencer/api/migrations/sqlite/V607__create_reward_merkle_tree_v2.sql: -------------------------------------------------------------------------------- 1 | 2 | -- The new reward_merkle_tree table corresponds to `RewardMerkleTreeV2` with keccak hashing algorithm, 3 | -- and is used starting from protocol version V4. 4 | 5 | CREATE TABLE reward_merkle_tree_v2 ( 6 | path JSONB NOT NULL, 7 | created BIGINT NOT NULL, 8 | hash_id INT NOT NULL REFERENCES hash (id), 9 | children JSONB, 10 | children_bitvec BLOB, 11 | idx JSONB, 12 | entry JSONB, 13 | PRIMARY KEY (path, created) 14 | ); -------------------------------------------------------------------------------- /sequencer/api/migrations/sqlite/V801__eqc.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE eqc ( 2 | id bool PRIMARY KEY DEFAULT true, 3 | data BLOB 4 | ); 5 | -------------------------------------------------------------------------------- /sequencer/api/migrations/sqlite/V802__all_validators.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE stake_table_validators ( 2 | epoch BIGINT NOT NULL, 3 | address TEXT NOT NULL, 4 | validator JSONB NOT NULL, 5 | PRIMARY KEY (epoch, address) 6 | ); -------------------------------------------------------------------------------- /sequencer/api/state_relay_server.toml: -------------------------------------------------------------------------------- 1 | [meta] 2 | NAME = "hotshot_state_relay_server" 3 | DESCRIPTION = "A relay server for storing and serving the light client states and their signatures" 4 | FORMAT_VERSION = "0.1.0" 5 | 6 | [route.poststatesignature] 7 | PATH = ["state"] 8 | METHOD = "POST" 9 | DOC = """ 10 | Post a light client state and its signature for a given block height. 11 | """ 12 | 13 | [route.lateststate] 14 | PATH = ["lateststate"] 15 | METHOD = "GET" 16 | DOC = """ 17 | Fetch the latest light client state who has enough corresponding Schnorr signatures collected, 18 | as well as a list of those signatures. 19 | """ 20 | 21 | [route.getlateststate] 22 | PATH = ["state"] 23 | METHOD = "GET" 24 | DOC = """ 25 | DEPRECATED! 26 | Fetch the latest light client state who has enough corresponding Schnorr signatures collected, 27 | as well as a list of those signatures. 28 | """ 29 | 30 | 31 | [route.postlegacystatesignature] 32 | PATH = ["legacy-state"] 33 | METHOD = "POST" 34 | DOC = """ 35 | DEPRECATED! 36 | Post a state and its signature for the legacy light client. 37 | """ 38 | 39 | [route.getlatestlegacystate] 40 | PATH = ["legacy-state"] 41 | METHOD = "GET" 42 | DOC = """ 43 | DEPRECATED! 44 | Fetch the latest state for the legacy light client. 45 | """ -------------------------------------------------------------------------------- /sequencer/api/state_signature.toml: -------------------------------------------------------------------------------- 1 | [route.get_state_signature] 2 | PATH = ["block/:height"] 3 | ":height" = "Integer" 4 | DOC = "Get the signature for the light client state" 5 | -------------------------------------------------------------------------------- /sequencer/api/submit.toml: -------------------------------------------------------------------------------- 1 | [route.submit] 2 | PATH = ["/submit"] 3 | METHOD = "POST" 4 | DOC = "Submit transaction to HotShot handle." -------------------------------------------------------------------------------- /sequencer/build.rs: -------------------------------------------------------------------------------- 1 | use vergen::EmitBuilder; 2 | 3 | pub fn main() -> anyhow::Result<()> { 4 | // Set an environment variable with git information 5 | EmitBuilder::builder() 6 | .git_sha(false) 7 | .git_describe(true, true, None) 8 | .git_commit_timestamp() 9 | .emit()?; 10 | Ok(()) 11 | } 12 | -------------------------------------------------------------------------------- /sequencer/src/bin/utils/main.rs: -------------------------------------------------------------------------------- 1 | //! sequencer utility programs 2 | 3 | use clap::{Parser, Subcommand}; 4 | use sequencer_utils::logging; 5 | mod keygen; 6 | mod ns_aggregator; 7 | mod pubkey; 8 | mod reset_storage; 9 | 10 | #[derive(Debug, Parser)] 11 | struct Options { 12 | #[clap(flatten)] 13 | logging: logging::Config, 14 | 15 | #[command(subcommand)] 16 | command: Command, 17 | } 18 | 19 | #[derive(Debug, Subcommand)] 20 | enum Command { 21 | Keygen(keygen::Options), 22 | Pubkey(pubkey::Options), 23 | #[command(subcommand)] 24 | ResetStorage(reset_storage::Commands), 25 | NsAggregator(ns_aggregator::Options), 26 | } 27 | 28 | #[tokio::main] 29 | async fn main() -> anyhow::Result<()> { 30 | let opt = Options::parse(); 31 | opt.logging.init(); 32 | 33 | match opt.command { 34 | Command::Keygen(opt) => keygen::run(opt), 35 | Command::Pubkey(opt) => { 36 | pubkey::run(opt); 37 | Ok(()) 38 | }, 39 | Command::ResetStorage(opt) => reset_storage::run(opt).await, 40 | Command::NsAggregator(opt) => ns_aggregator::run(opt).await, 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /sequencer/src/network/libp2p.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use libp2p::{multiaddr::Protocol, Multiaddr, PeerId}; 3 | 4 | /// Split off the peer ID from a multiaddress, returning the shortened address and the peer ID. 5 | /// 6 | /// # Errors 7 | /// - If the last protocol in the address is not a peer ID. 8 | pub fn split_off_peer_id(mut address: Multiaddr) -> Result<(PeerId, Multiaddr)> { 9 | let Some(Protocol::P2p(peer_id)) = address.pop() else { 10 | return Err(anyhow::anyhow!("Failed to parse peer ID from address")); 11 | }; 12 | 13 | Ok((peer_id, address)) 14 | } 15 | -------------------------------------------------------------------------------- /sequencer/src/network/mod.rs: -------------------------------------------------------------------------------- 1 | use espresso_types::PubKey; 2 | 3 | use super::*; 4 | 5 | pub mod cdn; 6 | pub mod libp2p; 7 | 8 | pub type Production = CombinedNetworks; 9 | 10 | pub type Memory = MemoryNetwork; 11 | -------------------------------------------------------------------------------- /sequencer/src/request_response/catchup/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod state; 2 | -------------------------------------------------------------------------------- /shell.nix: -------------------------------------------------------------------------------- 1 | (import 2 | ( 3 | let 4 | lock = builtins.fromJSON (builtins.readFile ./flake.lock); 5 | in 6 | fetchTarball { 7 | url = "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz"; 8 | sha256 = lock.nodes.flake-compat.locked.narHash; 9 | } 10 | ) 11 | { 12 | src = ./.; 13 | }).shellNix 14 | -------------------------------------------------------------------------------- /staking-cli/DEVELOPER_DOCS.md: -------------------------------------------------------------------------------- 1 | # staking-cli Developer Docs 2 | 3 | The staking-cli can be used to fund the stake table on L1 for our testnet and demos. 4 | 5 | ``` 6 | cargo run --bin staking-cli -- stake-for-demo --help 7 | 8 | Usage: staking-cli stake-for-demo [OPTIONS] 9 | 10 | Options: 11 | --num-validators 12 | The number of validators to register. 13 | 14 | The default (5) works for the local native and docker demos. 15 | 16 | [default: 5] 17 | 18 | --delegation-config 19 | [default: VariableAmounts] 20 | [possible values: equal-amounts, variable-amounts, multiple-delegators] 21 | 22 | -h, --help 23 | Print help (see a summary with '-h') 24 | ``` 25 | 26 | Currently supported are these delegation configurations: 27 | 28 | 1. Equal amounts: each validator self delegates an equal amount. Leading to uniform staking weights. 29 | 2. Variable amounts: validator delegate 100, 200, ..., 500 ESP tokens in order. This is currently the default because it 30 | used to be the only option. 31 | 3. Multiple delegators: Like 2, but also adds a randomly chosen number of other delegators to each validator. 32 | -------------------------------------------------------------------------------- /staking-cli/config.decaf.toml: -------------------------------------------------------------------------------- 1 | rpc_url = "https://ethereum-sepolia-rpc.publicnode.com" 2 | stake_table_address = "0x40304fbe94d5e7d1492dd90c53a2d63e8506a037" 3 | 4 | [signer] 5 | mnemonic = "test test test test test test test test test test test junk" 6 | account_index = 0 7 | ledger = false 8 | -------------------------------------------------------------------------------- /staking-cli/config.demo.toml: -------------------------------------------------------------------------------- 1 | # currently used for testing 2 | mnemonic = "test test test test test test test test test test test junk" 3 | account_index = 0 4 | rpc_url = "http://127.0.0.1:8545" 5 | stake_table_address = "0x0000000000000000000000000000000000000000" 6 | -------------------------------------------------------------------------------- /staking-cli/config.dev.toml: -------------------------------------------------------------------------------- 1 | # currently used for testing 2 | mnemonic = "test test test test test test test test test test test junk" 3 | account_index = 0 4 | rollup_rpc_url = "http://127.0.0.1:8545" 5 | stake_table_address = "0x0000000000000000000000000000000000000000" 6 | -------------------------------------------------------------------------------- /staking-cli/src/funding.rs: -------------------------------------------------------------------------------- 1 | use alloy::{ 2 | network::{Ethereum, TransactionBuilder as _}, 3 | primitives::{Address, U256}, 4 | providers::{PendingTransactionBuilder, Provider}, 5 | rpc::types::TransactionRequest, 6 | }; 7 | use anyhow::Result; 8 | use hotshot_contract_adapter::{ 9 | evm::DecodeRevert as _, 10 | sol_types::EspToken::{self, EspTokenErrors}, 11 | }; 12 | 13 | pub async fn send_eth( 14 | provider: impl Provider, 15 | to: Address, 16 | amount: U256, 17 | ) -> Result> { 18 | tracing::info!("fund address {to} with {amount} ETH"); 19 | let tx = TransactionRequest::default().with_to(to).with_value(amount); 20 | Ok(provider.send_transaction(tx).await?) 21 | } 22 | 23 | pub async fn send_esp( 24 | provider: impl Provider, 25 | token_address: Address, 26 | to: Address, 27 | amount: U256, 28 | ) -> Result> { 29 | tracing::info!("transfer {amount} ESP to {to}"); 30 | let token = EspToken::new(token_address, provider); 31 | token 32 | .transfer(to, amount) 33 | .send() 34 | .await 35 | .maybe_decode_revert::() 36 | } 37 | -------------------------------------------------------------------------------- /staking-cli/src/l1.rs: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /staking-cli/src/receipt.rs: -------------------------------------------------------------------------------- 1 | use alloy::{ 2 | network::Ethereum, providers::PendingTransactionBuilder, rpc::types::TransactionReceipt, 3 | }; 4 | use anyhow::{bail, Result}; 5 | 6 | pub(crate) trait ReceiptExt { 7 | async fn assert_success(self) -> Result; 8 | } 9 | 10 | impl ReceiptExt for PendingTransactionBuilder { 11 | async fn assert_success(self) -> Result { 12 | let receipt = self.get_receipt().await?; 13 | if !receipt.status() { 14 | bail!("transaction failed: hash={:?}", receipt.transaction_hash); 15 | } 16 | Ok(receipt) 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /tests/main.rs: -------------------------------------------------------------------------------- 1 | pub mod common; 2 | mod proof_of_stake; 3 | mod reward_claims_e2e; 4 | mod smoke; 5 | mod upgrades; 6 | -------------------------------------------------------------------------------- /types/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod v0; 2 | 3 | // Re-export the latest major version compatibility types. 4 | pub use v0::*; 5 | 6 | pub mod eth_signature_key; 7 | mod reference_tests; 8 | -------------------------------------------------------------------------------- /types/src/v0/impls/block/full_payload.rs: -------------------------------------------------------------------------------- 1 | mod ns_proof; 2 | mod ns_table; 3 | mod payload; 4 | -------------------------------------------------------------------------------- /types/src/v0/impls/block/full_payload/ns_proof.rs: -------------------------------------------------------------------------------- 1 | //! This module implements the namespace proof for all VID schemes. 2 | 3 | pub mod advz; 4 | pub mod avidm; 5 | -------------------------------------------------------------------------------- /types/src/v0/impls/block/mod.rs: -------------------------------------------------------------------------------- 1 | mod full_payload; 2 | mod namespace_payload; 3 | mod test; 4 | mod uint_bytes; 5 | 6 | pub use uint_bytes::*; 7 | -------------------------------------------------------------------------------- /types/src/v0/impls/block/namespace_payload.rs: -------------------------------------------------------------------------------- 1 | mod iter; 2 | mod ns_payload; 3 | mod ns_payload_range; 4 | mod tx_proof; 5 | mod types; 6 | -------------------------------------------------------------------------------- /types/src/v0/impls/block/namespace_payload/iter.rs: -------------------------------------------------------------------------------- 1 | use crate::{Index, Iter, NsIter, Payload}; 2 | 3 | impl<'a> Iter<'a> { 4 | pub fn new(block: &'a Payload) -> Self { 5 | Self { 6 | ns_iter: NsIter::new(&block.ns_table().len()).peekable(), 7 | tx_iter: None, 8 | block, 9 | } 10 | } 11 | } 12 | 13 | impl Iterator for Iter<'_> { 14 | type Item = Index; 15 | 16 | fn next(&mut self) -> Option { 17 | loop { 18 | let Some(ns_index) = self.ns_iter.peek() else { 19 | break None; // ns_iter consumed 20 | }; 21 | 22 | if let Some(tx_index) = self 23 | .tx_iter 24 | .get_or_insert_with(|| self.block.ns_payload(ns_index).iter()) 25 | .next() 26 | { 27 | break Some(Index { 28 | ns_index: ns_index.clone(), 29 | position: tx_index.0 as u32, 30 | }); 31 | } 32 | 33 | self.tx_iter = None; // unset `tx_iter`; it's consumed for this namespace 34 | self.ns_iter.next(); 35 | } 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /types/src/v0/impls/block/namespace_payload/ns_payload_range.rs: -------------------------------------------------------------------------------- 1 | use std::ops::Range; 2 | 3 | use crate::{v0::traits::NsPayloadBytesRange, NsPayloadByteLen, NsPayloadRange}; 4 | 5 | impl NsPayloadRange { 6 | /// TODO restrict visibility? 7 | pub fn new(start: usize, end: usize) -> Self { 8 | Self(start..end) 9 | } 10 | 11 | /// Access the underlying index range for this namespace inside a block 12 | /// payload. 13 | pub fn as_block_range(&self) -> Range { 14 | self.0.clone() 15 | } 16 | 17 | /// Return the byte length of this namespace. 18 | pub fn byte_len(&self) -> NsPayloadByteLen { 19 | NsPayloadByteLen::from_usize(self.0.len()) 20 | } 21 | 22 | /// Convert a [`NsPayloadBytesRange`] into a range that's relative to the 23 | /// entire block payload. 24 | pub fn block_range<'a, R>(&self, range: &R) -> Range 25 | where 26 | R: NsPayloadBytesRange<'a>, 27 | { 28 | let range = range.ns_payload_range(); 29 | range.start + self.0.start..range.end + self.0.start 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /types/src/v0/impls/block/namespace_payload/tx_proof.rs: -------------------------------------------------------------------------------- 1 | //! This module implements the transaction proof for all VID schemes. 2 | 3 | pub mod advz; 4 | pub mod avidm; 5 | -------------------------------------------------------------------------------- /types/src/v0/impls/mod.rs: -------------------------------------------------------------------------------- 1 | pub use super::*; 2 | 3 | mod block; 4 | mod chain_config; 5 | mod fee_info; 6 | mod header; 7 | mod instance_state; 8 | mod l1; 9 | mod reward; 10 | mod stake_table; 11 | mod state; 12 | mod transaction; 13 | 14 | pub use fee_info::{retain_accounts, FeeError}; 15 | #[cfg(any(test, feature = "testing"))] 16 | pub use instance_state::mock; 17 | pub use instance_state::{NodeState, UpgradeMap}; 18 | pub use reward::*; 19 | pub use stake_table::*; 20 | pub use state::{ 21 | get_l1_deposits, BuilderValidationError, ProposalValidationError, StateValidationError, 22 | ValidatedState, 23 | }; 24 | -------------------------------------------------------------------------------- /types/src/v0/v0_1/mod.rs: -------------------------------------------------------------------------------- 1 | use vbs::version::Version; 2 | 3 | pub const VERSION: Version = Version { major: 0, minor: 1 }; 4 | 5 | mod block; 6 | mod chain_config; 7 | mod fee_info; 8 | mod header; 9 | mod instance_state; 10 | mod l1; 11 | mod state; 12 | mod transaction; 13 | 14 | pub use block::*; 15 | pub use chain_config::*; 16 | pub use fee_info::*; 17 | pub use header::Header; 18 | pub use instance_state::*; 19 | pub use l1::*; 20 | pub use state::*; 21 | pub use transaction::*; 22 | pub use crate::eth_signature_key::BuilderSignature; 23 | -------------------------------------------------------------------------------- /types/src/v0/v0_1/state.rs: -------------------------------------------------------------------------------- 1 | use committable::Commitment; 2 | use jf_merkle_tree_compat::{ 3 | prelude::{LightWeightSHA3MerkleTree, Sha3Digest, Sha3Node}, 4 | universal_merkle_tree::UniversalMerkleTree, 5 | MerkleTreeScheme, 6 | }; 7 | 8 | use super::{FeeAccount, FeeAmount}; 9 | use crate::Header; 10 | 11 | pub const BLOCK_MERKLE_TREE_HEIGHT: usize = 32; 12 | pub const FEE_MERKLE_TREE_HEIGHT: usize = 20; 13 | const FEE_MERKLE_TREE_ARITY: usize = 256; 14 | 15 | // The block merkle tree accumulates header commitments. However, since the underlying 16 | // representation of the commitment type remains the same even while the header itself changes, 17 | // using the underlying type `[u8; 32]` allows us to use the same state type across minor versions. 18 | pub type BlockMerkleTree = LightWeightSHA3MerkleTree>; 19 | pub type BlockMerkleCommitment = ::Commitment; 20 | 21 | pub type FeeMerkleTree = 22 | UniversalMerkleTree; 23 | pub type FeeMerkleCommitment = ::Commitment; 24 | -------------------------------------------------------------------------------- /types/src/v0/v0_1/transaction.rs: -------------------------------------------------------------------------------- 1 | use derive_more::{Display, From, Into}; 2 | 3 | use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive( 7 | Clone, 8 | Serialize, 9 | Deserialize, 10 | Debug, 11 | PartialEq, 12 | Eq, 13 | Hash, 14 | CanonicalSerialize, 15 | CanonicalDeserialize, 16 | )] 17 | pub struct Transaction { 18 | pub(crate) namespace: NamespaceId, 19 | #[serde(with = "base64_bytes")] 20 | pub(crate) payload: Vec, 21 | } 22 | 23 | #[derive( 24 | Clone, 25 | Copy, 26 | Serialize, 27 | Debug, 28 | Display, 29 | PartialEq, 30 | Eq, 31 | Hash, 32 | Into, 33 | From, 34 | Default, 35 | CanonicalDeserialize, 36 | CanonicalSerialize, 37 | PartialOrd, 38 | Ord, 39 | )] 40 | #[display("{_0}")] 41 | pub struct NamespaceId(pub(crate) u64); 42 | -------------------------------------------------------------------------------- /types/src/v0/v0_2/mod.rs: -------------------------------------------------------------------------------- 1 | use vbs::version::Version; 2 | 3 | // Re-export types which haven't changed since the last minor version. 4 | pub use super::v0_1::{ 5 | ADVZNsProof, ADVZTxProof, AccountQueryData, BlockMerkleCommitment, BlockMerkleTree, BlockSize, 6 | BuilderSignature, ChainConfig, ChainId, FeeAccount, FeeAccountProof, FeeAmount, FeeInfo, 7 | FeeMerkleCommitment, FeeMerkleProof, FeeMerkleTree, Header, Index, Iter, L1BlockInfo, L1Client, 8 | L1ClientOptions, L1Snapshot, NamespaceId, NsIndex, NsIter, NsPayload, NsPayloadBuilder, 9 | NsPayloadByteLen, NsPayloadOwned, NsPayloadRange, NsTable, NsTableBuilder, 10 | NsTableValidationError, NumNss, NumTxs, NumTxsRange, NumTxsUnchecked, Payload, PayloadByteLen, 11 | ResolvableChainConfig, TimeBasedUpgrade, Transaction, TxIndex, TxIter, TxPayload, 12 | TxPayloadRange, TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeMode, UpgradeType, 13 | ViewBasedUpgrade, BLOCK_MERKLE_TREE_HEIGHT, FEE_MERKLE_TREE_HEIGHT, NS_ID_BYTE_LEN, 14 | NS_OFFSET_BYTE_LEN, NUM_NSS_BYTE_LEN, NUM_TXS_BYTE_LEN, TX_OFFSET_BYTE_LEN, 15 | }; 16 | 17 | pub const VERSION: Version = Version { major: 0, minor: 2 }; 18 | -------------------------------------------------------------------------------- /types/src/v0/v0_3/txproof.rs: -------------------------------------------------------------------------------- 1 | use super::{AvidMNsProof, TxIndex}; 2 | use serde::{Deserialize, Serialize}; 3 | 4 | #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] 5 | pub struct AvidMTxProof { 6 | pub(crate) tx_index: TxIndex, 7 | pub(crate) ns_proof: AvidMNsProof, 8 | } 9 | -------------------------------------------------------------------------------- /utils/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sequencer-utils" 3 | version = "0.1.0" 4 | authors = ["Espresso Systems "] 5 | edition = "2021" 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [features] 9 | testing = [] 10 | 11 | [dependencies] 12 | alloy = { workspace = true } 13 | anyhow = { workspace = true } 14 | ark-serialize = { workspace = true, features = ["derive"] } 15 | async-trait = { workspace = true } 16 | clap = { workspace = true } 17 | committable = { workspace = true } 18 | hotshot = { workspace = true } 19 | hotshot-example-types = { workspace = true } 20 | log-panics = { workspace = true } 21 | portpicker = { workspace = true } 22 | serde = { workspace = true } 23 | serde_json = "^1.0.113" 24 | surf = "2.3.2" 25 | tokio = { workspace = true } 26 | toml = { workspace = true } 27 | tracing = "0.1.37" 28 | url = "2.3.1" 29 | 30 | [dev-dependencies] 31 | test-log = { workspace = true } 32 | 33 | [lints] 34 | workspace = true 35 | -------------------------------------------------------------------------------- /utils/src/test_utils.rs: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /vid/src/utils.rs: -------------------------------------------------------------------------------- 1 | pub(crate) mod bytes_to_field; 2 | -------------------------------------------------------------------------------- /zkevm-node-additions/init_pool_db.sql: -------------------------------------------------------------------------------- 1 | -- Create the database for the pool. In the combined database, this DB is not 2 | -- created via the `POSTGRES_DB` environment variable passed to the docker 3 | -- container. 4 | CREATE DATABASE pool_db; 5 | --------------------------------------------------------------------------------