├── .github └── workflows │ ├── book.yml │ ├── pr.yml │ └── test.yml ├── .gitignore ├── .gitmodules ├── .vscode └── settings.json ├── Cargo.lock ├── Cargo.toml ├── README.md ├── book.toml ├── book ├── SUMMARY.md ├── architecture.md ├── components.md ├── deployed-contracts.md ├── deployment.md ├── introduction.md ├── mermaid │ ├── mermaid-init.js │ └── mermaid.min.js ├── query-data-root-proofs.md └── reproducible-builds.md ├── chains.example.json ├── contracts ├── .env.example ├── .gitignore ├── README.md ├── foundry.toml ├── remappings.txt ├── script │ ├── Base.s.sol │ ├── Deploy.s.sol │ ├── Guardian.s.sol │ ├── UpdateVkey.s.sol │ ├── UpdateVkeySingle.s.sol │ └── Upgrade.s.sol ├── src │ ├── SP1Vector.sol │ └── interfaces │ │ └── ISP1Vector.sol └── test │ └── SP1Vector.t.sol ├── elf └── vector-elf ├── primitives ├── Cargo.toml └── src │ ├── consts.rs │ ├── header_range.rs │ ├── justification.rs │ ├── lib.rs │ ├── merkle.rs │ ├── rotate.rs │ └── types.rs ├── program ├── Cargo.toml ├── elf │ └── riscv32im-succinct-zkvm-elf └── src │ └── main.rs ├── query ├── .env.example ├── .eslintrc.json ├── .gitignore ├── app │ ├── api │ │ ├── health │ │ │ └── route.ts │ │ ├── justification │ │ │ └── route.ts │ │ ├── range │ │ │ └── route.ts │ │ └── route.ts │ └── utils │ │ ├── abi.ts │ │ ├── avail.ts │ │ ├── deployments.json │ │ └── shared.ts ├── next.config.mjs ├── package-lock.json ├── package.json ├── postcss.config.mjs ├── tailwind.config.ts └── tsconfig.json ├── script ├── .env.example ├── Cargo.toml ├── bin │ ├── costs.rs │ ├── genesis.rs │ ├── operator.rs │ ├── test.rs │ └── vkey.rs ├── build.rs ├── rust-toolchain └── src │ ├── lib.rs │ └── relay.rs └── services ├── .env.example ├── Cargo.toml ├── bin └── indexer.rs ├── src ├── aws.rs ├── input.rs ├── lib.rs └── types.rs └── test_assets ├── ancestry.json ├── ancestry_missing_link_no_majority.json └── ancestry_missing_link_works.json /.github/workflows/book.yml: -------------------------------------------------------------------------------- 1 | # Documentation and mdbook related jobs. 2 | # Reference: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/book.yml 3 | 4 | name: book 5 | 6 | on: 7 | push: 8 | branches: [main] 9 | pull_request: 10 | branches: [main] 11 | paths: 12 | - "book/**" 13 | merge_group: 14 | 15 | jobs: 16 | lint: 17 | runs-on: ubuntu-latest 18 | name: lint 19 | timeout-minutes: 60 20 | 21 | steps: 22 | - uses: actions/checkout@v4 23 | 24 | - name: Install mdbook-linkcheck 25 | run: | 26 | mkdir mdbook-linkcheck 27 | curl -sSL -o mdbook-linkcheck.zip https://github.com/Michael-F-Bryan/mdbook-linkcheck/releases/latest/download/mdbook-linkcheck.x86_64-unknown-linux-gnu.zip 28 | unzip mdbook-linkcheck.zip -d ./mdbook-linkcheck 29 | chmod +x $(pwd)/mdbook-linkcheck/mdbook-linkcheck 30 | echo $(pwd)/mdbook-linkcheck >> $GITHUB_PATH 31 | 32 | - name: Run linkcheck 33 | run: mdbook-linkcheck --standalone 34 | 35 | build: 36 | runs-on: ubuntu-latest 37 | timeout-minutes: 60 38 | steps: 39 | - uses: actions/checkout@v4 40 | - uses: dtolnay/rust-toolchain@nightly 41 | - name: Install mdbook 42 | run: | 43 | mkdir mdbook 44 | curl -sSL https://github.com/rust-lang/mdBook/releases/download/v0.4.14/mdbook-v0.4.14-x86_64-unknown-linux-gnu.tar.gz | tar -xz --directory=./mdbook 45 | echo $(pwd)/mdbook >> $GITHUB_PATH 46 | 47 | - name: Install mdbook-template 48 | run: | 49 | mkdir mdbook-template 50 | curl -sSL https://github.com/sgoudham/mdbook-template/releases/latest/download/mdbook-template-x86_64-unknown-linux-gnu.tar.gz | tar -xz --directory=./mdbook-template 51 | echo $(pwd)/mdbook-template >> $GITHUB_PATH 52 | 53 | - uses: Swatinem/rust-cache@v2 54 | with: 55 | cache-on-failure: true 56 | 57 | - name: Build book 58 | run: mdbook build 59 | 60 | - name: Archive artifact 61 | shell: sh 62 | run: | 63 | chmod -c -R +rX "target/book" | 64 | while read line; do 65 | echo "::warning title=Invalid file permissions automatically fixed::$line" 66 | done 67 | tar \ 68 | --dereference --hard-dereference \ 69 | --directory "target/book" \ 70 | -cvf "$RUNNER_TEMP/artifact.tar" \ 71 | --exclude=.git \ 72 | --exclude=.github \ 73 | . 74 | 75 | - name: Upload artifact 76 | uses: actions/upload-artifact@v4 77 | with: 78 | name: github-pages 79 | path: ${{ runner.temp }}/artifact.tar 80 | retention-days: 1 81 | if-no-files-found: error 82 | 83 | deploy: 84 | # Only deploy if a push to main 85 | if: github.ref_name == 'main' && github.event_name == 'push' 86 | runs-on: ubuntu-latest 87 | needs: [lint, build] 88 | 89 | # Grant GITHUB_TOKEN the permissions required to make a Pages deployment 90 | permissions: 91 | pages: write 92 | id-token: write 93 | 94 | environment: 95 | name: github-pages 96 | url: ${{ steps.deployment.outputs.page_url }} 97 | 98 | timeout-minutes: 60 99 | 100 | steps: 101 | - name: Deploy to GitHub Pages 102 | id: deployment 103 | uses: actions/deploy-pages@v4 -------------------------------------------------------------------------------- /.github/workflows/pr.yml: -------------------------------------------------------------------------------- 1 | name: PR 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: 8 | - "**" 9 | merge_group: 10 | 11 | concurrency: 12 | group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} 13 | cancel-in-progress: true 14 | 15 | jobs: 16 | lint: 17 | name: Formatting & Clippy 18 | runs-on: [runs-on, runner=16cpu-linux-arm64 , "run-id=${{ github.run_id }}"] 19 | env: 20 | CARGO_NET_GIT_FETCH_WITH_CLI: "true" 21 | steps: 22 | - name: Checkout sources 23 | uses: actions/checkout@v4 24 | 25 | - name: Install Rust 26 | uses: actions-rs/toolchain@v1 27 | with: 28 | profile: minimal 29 | toolchain: stable 30 | components: rustfmt 31 | 32 | - name: Run rustfmt 33 | run: cargo fmt --all -- --check 34 | env: 35 | CARGO_INCREMENTAL: 1 36 | 37 | - name: Run cargo clippy 38 | run: cargo clippy --all-features --all-targets -- -D warnings -A incomplete-features 39 | env: 40 | CARGO_INCREMENTAL: 1 41 | 42 | elf: 43 | runs-on: 44 | - runs-on 45 | - runner=16cpu-linux-x64 46 | - run-id=${{ github.run_id }} 47 | steps: 48 | - name: Checkout code 49 | uses: actions/checkout@v4 50 | with: 51 | ref: ${{ github.event.pull_request.head.ref }} 52 | - name: Install SP1 toolchain 53 | run: | 54 | curl -L https://sp1.succinct.xyz | bash 55 | ~/.sp1/bin/sp1up 56 | ~/.sp1/bin/cargo-prove prove --version 57 | source ~/.bashrc 58 | - name: Setup Docker Buildx 59 | uses: docker/setup-buildx-action@v3 60 | - name: Verify the SP1 Vector Binary 61 | run: | 62 | # Build the binaries 63 | cd program 64 | ~/.sp1/bin/cargo-prove prove build --elf-name vector-elf --docker --tag v4.1.3 --output-directory ../elf 65 | cd ../ 66 | # Check for any changes in the elf directory 67 | if [ -n "$(git status --porcelain elf/)" ]; then 68 | echo "❌ ELF files changed during build!" 69 | git diff elf/ 70 | exit 1 71 | else 72 | echo "✅ ELF files remained unchanged" 73 | fi -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: test 2 | 3 | on: workflow_dispatch 4 | 5 | env: 6 | FOUNDRY_PROFILE: ci 7 | 8 | jobs: 9 | check: 10 | strategy: 11 | fail-fast: true 12 | 13 | name: Foundry project 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/checkout@v4 17 | with: 18 | submodules: recursive 19 | 20 | - name: Install Foundry 21 | uses: foundry-rs/foundry-toolchain@v1 22 | with: 23 | version: nightly 24 | 25 | - name: Run Forge build 26 | run: | 27 | forge --version 28 | forge build --sizes 29 | id: build 30 | 31 | - name: Run Forge tests 32 | run: | 33 | forge test -vvv 34 | id: test 35 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Cargo build 2 | **/target 3 | 4 | # Cargo config 5 | .cargo 6 | 7 | # Profile-guided optimization 8 | /tmp 9 | pgo-data.profdata 10 | 11 | # MacOS nuisances 12 | .DS_Store 13 | 14 | # Proofs 15 | **/proof-with-pis.json 16 | **/proof-with-io.json 17 | 18 | 19 | # Added by cargo 20 | 21 | /target 22 | 23 | .env 24 | 25 | # vscode debug config 26 | .vscode/launch.json 27 | 28 | # IntelliJ IDEA 29 | .idea 30 | 31 | /etc/secrets 32 | 33 | **/filtered_transactions/** 34 | 35 | chains*.json 36 | !chains.example.json -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "contracts/lib/forge-std"] 2 | path = contracts/lib/forge-std 3 | url = https://github.com/foundry-rs/forge-std 4 | [submodule "contracts/lib/succinctx"] 5 | path = contracts/lib/succinctx 6 | url = https://github.com/succinctlabs/succinctx 7 | [submodule "contracts/lib/sp1-contracts"] 8 | path = contracts/lib/sp1-contracts 9 | url = https://github.com/succinctlabs/sp1-contracts 10 | [submodule "contracts/lib/openzeppelin-contracts"] 11 | path = contracts/lib/openzeppelin-contracts 12 | url = https://github.com/openzeppelin/openzeppelin-contracts 13 | [submodule "contracts/lib/openzeppelin-contracts-upgradeable"] 14 | path = contracts/lib/openzeppelin-contracts-upgradeable 15 | url = https://github.com/openzeppelin/openzeppelin-contracts-upgradeable 16 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "editor.inlineSuggest.enabled": true, 3 | "[rust]": { 4 | "editor.defaultFormatter": "rust-lang.rust-analyzer", 5 | "editor.formatOnSave": true, 6 | "editor.hover.enabled": true 7 | }, 8 | "[solidity]": { 9 | "editor.defaultFormatter": "JuanBlanco.solidity" 10 | }, 11 | "solidity.formatter": "forge", 12 | "editor.rulers": [ 13 | 100 14 | ], 15 | "rust-analyzer.check.overrideCommand": [ 16 | "cargo", 17 | "clippy", 18 | "--workspace", 19 | "--message-format=json", 20 | "--all-features", 21 | "--all-targets", 22 | "--", 23 | "-A", 24 | "incomplete-features" 25 | ], 26 | "rust-analyzer.linkedProjects": [ 27 | "primitives/Cargo.toml", 28 | "program/Cargo.toml", 29 | "script/Cargo.toml", 30 | "services/Cargo.toml" 31 | ], 32 | "rust-analyzer.showUnlinkedFileNotification": false, 33 | } -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = ["script", "services", "primitives", "program"] 3 | resolver = "2" 4 | 5 | [workspace.dependencies] 6 | # Avail 7 | avail-subxt = { git = "https://github.com/availproject/avail.git", tag = "v2.2.5.1" } 8 | sp-core = { git = "https://github.com/availproject/polkadot-sdk.git", tag = "polkadot-1.7.1-patch-10", default-features = false } 9 | subxt = "0.34" 10 | codec = { package = "parity-scale-codec", version = "3", default-features = false } 11 | 12 | # Cryptography 13 | ed25519-consensus = { version = "2.1", default-features = false } 14 | sha2 = { version = "0.10.8", default-features = false } 15 | blake2 = "0.10.6" 16 | 17 | # Alloy 18 | alloy = { version = "0.11.1", features = ["full"] } 19 | 20 | # Common 21 | anyhow = "1.0.68" 22 | clap = { version = "4.0", features = ["derive"] } 23 | dotenv = "0.15.0" 24 | env_logger = "0.9.0" 25 | hex = "0.4.3" 26 | log = "0.4.14" 27 | serde_json = "1.0.86" 28 | tokio = { version = "1.2.0", features = ["full"] } 29 | serde = { version = "1", features = ["derive"] } 30 | itertools = "0.10.5" 31 | chrono = "0.4.39" 32 | 33 | # sp1 34 | sp1-sdk = "4.0.0-rc.9" 35 | sp1-build = "4.1.3" 36 | sp1-zkvm = "4.0.0-rc.9" 37 | 38 | reqwest = { version = "0.11.20", features = ["json"] } 39 | futures = "0.3.30" 40 | 41 | aws-config = { version = "1.5.1", features = ["behavior-version-latest"] } 42 | aws-sdk-dynamodb = "1.34.0" 43 | 44 | sp1-vectorx-script = { path = "script" } 45 | sp1-vectorx-program = { path = "program" } 46 | services = { path = "services" } 47 | sp1-vector-primitives = { path = "primitives" } 48 | 49 | # logging 50 | tracing = "0.1.41" 51 | tracing-subscriber = "0.3.19" 52 | 53 | 54 | [profile.release] 55 | opt-level = 3 56 | lto = true 57 | codegen-units = 1 58 | 59 | [profile.dev] 60 | opt-level = 0 61 | debug = true 62 | 63 | [patch.crates-io] 64 | sp-core = { git = "https://github.com/availproject/polkadot-sdk.git", tag = "polkadot-1.7.1-patch-10" } 65 | sp-io = { git = "https://github.com/availproject/polkadot-sdk.git", tag = "polkadot-1.7.1-patch-10" } 66 | sp-runtime = { git = "https://github.com/availproject/polkadot-sdk.git", tag = "polkadot-1.7.1-patch-10" } 67 | sp-std = { git = "https://github.com/availproject/polkadot-sdk.git", tag = "polkadot-1.7.1-patch-10" } 68 | 69 | sha2-v0-9-9 = { git = "https://github.com/sp1-patches/RustCrypto-hashes", package = "sha2", tag = "patch-sha2-0.9.9-sp1-4.0.0-rc.3" } 70 | sha2-v0-10-8 = { git = "https://github.com/sp1-patches/RustCrypto-hashes", package = "sha2", tag = "patch-sha2-0.10.8-sp1-4.0.0-rc.3" } 71 | curve25519-dalek-ng = { git = "https://github.com/sp1-patches/curve25519-dalek-ng", tag = "patch-4.1.1-sp1-4.0.0-rc.3" } 72 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SP1 Vector 2 | 3 | Implementation of zero-knowledge proof circuits for [Vector](https://blog.availproject.org/data-attestation-bridge/), Avail's Data Attestation Bridge in SP1. 4 | 5 | **[Docs](https://succinctlabs.github.io/sp1-vector)** 6 | -------------------------------------------------------------------------------- /book.toml: -------------------------------------------------------------------------------- 1 | [book] 2 | authors = ["Ratan Kaliani"] 3 | language = "en" 4 | multilingual = false 5 | src = "book" 6 | title = "SP1 Vector Documentation" 7 | 8 | [build] 9 | build-dir = "target/book" 10 | 11 | [output.html] 12 | git-repository-url = "https://github.com/succinctlabs/sp1-vector" 13 | git-repository-icon = "fa-github" 14 | additional-js = ["book/mermaid/mermaid.min.js", "book/mermaid/mermaid-init.js"] 15 | 16 | [preprocessor.template] 17 | before = ["links"] 18 | 19 | [preprocessor.mermaid] 20 | command = "mdbook-mermaid" 21 | -------------------------------------------------------------------------------- /book/SUMMARY.md: -------------------------------------------------------------------------------- 1 | # Summary 2 | 3 | - [Introduction](./introduction.md) 4 | - [Program Architecture](./architecture.md) 5 | - [Components](./components.md) 6 | - [Deployment](./deployment.md) 7 | - [Query Data Root Proofs](./query-data-root-proofs.md) 8 | - [Deployed Contracts](./deployed-contracts.md) 9 | - [Reproducible Builds](./reproducible-builds.md) 10 | -------------------------------------------------------------------------------- /book/architecture.md: -------------------------------------------------------------------------------- 1 | # Program Architecture 2 | 3 | ## Header Range 4 | 5 | The header range program computes the data root and state root commitments for a range of headers. The program does the following: 6 | 1. The first header in the range is the trusted header (matching the `latestHeader` on the `SP1Vector.sol` contract when the proof is verified). 7 | 2. There exists a valid justification by an authority set in the `SP1Vector.sol` contract for the target block with an authority set id >= the authority set id of the trusted header. 8 | 3. The intermediate headers are linked in order by number and parent hash. Note: There is no way to produce a valid linked header range that does not match the real range as long as the justification on the target block is valid and the trusted header is also valid. 9 | 4. The authority set hash of the justification matches the authority set hash of the target block. 10 | 11 | ```mermaid 12 | graph TD; 13 | subgraph SP1Vector.sol 14 | LatestBlock[Latest Block] 15 | AuthoritySetIdToHash[Authority Set Id to Hash] 16 | DataRootCommitments[Data Root Commitments] 17 | StateRootCommitments[State Root Commitments] 18 | end 19 | 20 | subgraph Program 21 | LatestBlock -->|latest block| TrustedHeaderHash[Trusted Header Hash] 22 | TrustedHeaderHash[Trusted Header Hash] -->|matches first header in| HeaderData 23 | AuthoritySetIdToHash -->|includes| AuthoritySetHash[Authority Set Hash] 24 | AuthoritySetHash -->|preimage| AuthoritySetPubkeys[Authority Set Pubkeys] 25 | AuthoritySetPubkeys -->|signs| TargetJustification[Target Justification] 26 | TargetJustification -->|includes| TargetBlockHash[Target Block Hash] 27 | TargetBlockHash -->|matches last header in| HeaderData[Header Data] 28 | HeaderData -->|verify header hashes and numbers are linked| ValidatedHeaderData[Validated Header Data] 29 | ValidatedHeaderData -->|hash data and state roots of headers| MerkleCommitments[Data Root and State Root Merkle Commitments] 30 | MerkleCommitments -->|data root commitment| DataRootCommitments[Data Root Commitments] 31 | MerkleCommitments -->|state root commitment| StateRootCommitments[State Root Commitments] 32 | end 33 | 34 | ``` 35 | 36 | ## Rotate 37 | 38 | The rotate program computes the next authority set hash from the last header in an epoch signed by the current authority set. The program does the following: 39 | 1. The current authority set signed the justification for the target block. 40 | 2. The preimage of the target block hash which will be used for extracting the next validator set matches the target block hash signed in the justification. 41 | 3. Extract the next validator set from the epoch end header. In this process, validate that this epoch end header is valid. 42 | 4. Compute the next authority set hash from the next validator set. 43 | 44 | ```mermaid 45 | graph TD; 46 | subgraph SP1Vector.sol 47 | CurrentAuthoritySet[Current Authority Set Hash] 48 | NextAuthoritySet[Next Authority Set Hash] 49 | end 50 | 51 | subgraph Program 52 | CurrentAuthoritySet -->|preimage| CurrentAuthoritySetPubkeys[Current Authority Set Pubkeys] 53 | CurrentAuthoritySetPubkeys[Current Authority Set Pubkeys] -->|signs| Justification[Circuit Justification] 54 | Justification -->|includes| TargetBlockHash[Target Block Hash] 55 | TargetBlockHash -->|preimage| EncodedHeaderData[Encoded Header Data] 56 | EncodedHeaderData -->|contains| NewPubkeys[Next Pubkeys] 57 | NewPubkeys -->|hash| NextAuthoritySetHash[Next Authority Set Hash] 58 | NextAuthoritySetHash --> NextAuthoritySet[Next Authority Set Hash] 59 | end 60 | ``` -------------------------------------------------------------------------------- /book/components.md: -------------------------------------------------------------------------------- 1 | # Components 2 | 3 | An SP1 Vector implementation has a few key components: 4 | - An `SP1Vector` contract. Contains the logic for verifying SP1 Vector proofs, storing the 5 | latest data from the Avail chain, including the headers and data commitments. Matches the interface 6 | of the existing [VectorX](https://github.com/succinctlabs/vectorx/blob/main/contracts/src/VectorX.sol) contract so it can be upgraded in-place. 7 | - An `SP1Verifier` contract. Verifies arbitrary SP1 programs. Most chains will have canonical deployments 8 | upon SP1's mainnet launch. Until then, users can deploy their own `SP1Verifier` contracts to verify 9 | SP1 programs on their chain. The SP1 Vector implementation will use the `SP1Verifier` contract to verify 10 | the proofs of the SP1 Vector programs. 11 | - The SP1 Vector program. An SP1 program that verifies the transition between two Avail 12 | headers and computes the data commitment of the intermediate blocks. 13 | - The operator. A Rust script that fetches the latest data from a deployed `SP1Vector` contract and an Avail chain, determines the block to request, requests for/generates a proof, and relays the proof to 14 | the `SP1Vector` contract. 15 | -------------------------------------------------------------------------------- /book/deployed-contracts.md: -------------------------------------------------------------------------------- 1 | # Deployed Contracts 2 | 3 | You can find a list of actively deployed contracts in this [deployments.json](https://github.com/succinctlabs/sp1-vector/tree/main/query/app/utils/deployments.json). 4 | -------------------------------------------------------------------------------- /book/deployment.md: -------------------------------------------------------------------------------- 1 | # Deployment 2 | 3 | ## Overview 4 | 5 | Here's how to deploy an SP1 Vector contract for an Avail chain. 6 | 7 | ## Steps 8 | 9 | 1. To deploy an SP1 Vector contract for an Avail chain do the following. 10 | 11 | Get the genesis parameters for the `SP1Vector` contract. 12 | 13 | ```shell 14 | cd script 15 | 16 | # Example with Avail Turing Testnet. 17 | AVAIL_URL=wss://turing-rpc.avail.so/ws cargo run --bin genesis --release 18 | ``` 19 | 20 | 2. Add the genesis parameters to `/contracts/.env` mirroring `contracts/.env.example`. 21 | 22 | | Parameter | Description | 23 | |-----------|-------------| 24 | | GENESIS_HEIGHT | The block height of the genesis block for the Avail chain | 25 | | GENESIS_HEADER | The header of the genesis block for the Avail chain | 26 | | GENESIS_AUTHORITY_SET_ID | The ID of the initial authority set for the Avail chain | 27 | | GENESIS_AUTHORITY_SET_HASH | The hash of the initial authority set for the Avail chain | 28 | | SP1_VECTOR_PROGRAM_VKEY | The verification key for the SP1 Vector program | 29 | | HEADER_RANGE_COMMITMENT_TREE_SIZE | The size of the Merkle tree used for header range commitments (Default 1024) | 30 | 31 | 32 | 3. Deploy the `SP1Vector` contract with genesis parameters. 33 | ```shell 34 | cd ../contracts 35 | 36 | forge install 37 | 38 | SP1_PROVER={mock, network} CHAINS=sepolia forge script script/Deploy.s.sol --private-key $PRIVATE_KEY --multi --broadcast --verify 39 | ``` 40 | 41 | If you see the following error, add `--legacy` to the command. 42 | ```shell 43 | Error: Failed to get EIP-1559 fees 44 | ``` 45 | 3. Your deployed contract address will be printed to the terminal. 46 | 47 | ```shell 48 | == Return == 49 | 0: address 50 | ``` 51 | 52 | This will be used when you run the operator in step 5. 53 | 54 | 4. Export your SP1 Prover Network configuration 55 | 56 | ```shell 57 | # Export the PRIVATE_KEY you will use to relay proofs. 58 | export PRIVATE_KEY= 59 | 60 | # Optional 61 | # If you're generating proofs on the Succinct Network, set NETWORK_PRIVATE_KEY to the private key of the account you want to use. 62 | export NETWORK_PRIVATE_KEY= 63 | # If you're using a custom endpoint, set NETWORK_RPC_URL to the URL of the endpoint you want to use. 64 | export NETWORK_RPC_URL= 65 | # If you're generating proofs in mock mode, set SP1_PROVER to "mock". 66 | export SP1_PROVER={mock} 67 | ``` 68 | 69 | 5. Run the SP1 Vector operator to update the LC continuously. 70 | 71 | ``` 72 | cd ../script 73 | 74 | AVAIL_URL=wss://turing-rpc.avail.so/ws AVAIL_CHAIN_ID=turing CHAIN_ID=11155111 RPC_URL=https://ethereum-sepolia.publicnode.com/ CONTRACT_ADDRESS= VECTORX_QUERY_URL=https://vectorx-query.succinct.xyz 75 | cargo run --bin operator --release 76 | ``` 77 | 78 | ## Demo Contract 79 | 80 | An example contract using SP1 Vector can be found on Sepolia [here](https://sepolia.etherscan.io/address/0x04819f50EE813a8f6F6ba28288551c4339fDC881). -------------------------------------------------------------------------------- /book/introduction.md: -------------------------------------------------------------------------------- 1 | # SP1 Vector 2 | 3 | ## Overview 4 | 5 | Implementation of zero-knowledge proof circuits for [Vector](https://blog.availproject.org/data-attestation-bridge/), Avail's Data Attestation Bridge in SP1. 6 | 7 | - `/program`: The SP1 Vector program. 8 | - `/primitives`: Libraries for types and helper functions used in the program. 9 | - `/script`: Scripts for getting the contract's genesis parameters and deploying the operator to 10 | update the light client. 11 | - `/services`: RPC fetcher for the `script` + the justification indexer. 12 | - `/contracts`: The contract's source code and deployment scripts. 13 | - `/query`: Contains the logic for querying data root proofs from the contracts. Automatically deploys to https://vectorx-query.succinct.xyz. 14 | -------------------------------------------------------------------------------- /book/mermaid/mermaid-init.js: -------------------------------------------------------------------------------- 1 | (() => { 2 | const darkThemes = ['ayu', 'navy', 'coal']; 3 | const lightThemes = ['light', 'rust']; 4 | 5 | const classList = document.getElementsByTagName('html')[0].classList; 6 | 7 | let lastThemeWasLight = true; 8 | for (const cssClass of classList) { 9 | if (darkThemes.includes(cssClass)) { 10 | lastThemeWasLight = false; 11 | break; 12 | } 13 | } 14 | 15 | const theme = lastThemeWasLight ? 'default' : 'dark'; 16 | mermaid.initialize({ startOnLoad: true, theme }); 17 | 18 | // Simplest way to make mermaid re-render the diagrams in the new theme is via refreshing the page 19 | 20 | for (const darkTheme of darkThemes) { 21 | document.getElementById(darkTheme).addEventListener('click', () => { 22 | if (lastThemeWasLight) { 23 | window.location.reload(); 24 | } 25 | }); 26 | } 27 | 28 | for (const lightTheme of lightThemes) { 29 | document.getElementById(lightTheme).addEventListener('click', () => { 30 | if (!lastThemeWasLight) { 31 | window.location.reload(); 32 | } 33 | }); 34 | } 35 | })(); 36 | -------------------------------------------------------------------------------- /book/query-data-root-proofs.md: -------------------------------------------------------------------------------- 1 | # Query Data Root Proofs 2 | 3 | ## Overview 4 | 5 | Whenever a new data root commitment is stored on-chain, the merkle proofs need to be made available for end-users to prove the data root's of blocks within those data commitments. This service listens for data root commitment events on-chain and stores the merkle proofs for each data root in the range, which is then exposed via a separate endpoint. 6 | 7 | The indexed contracts are configured in [deployments.json](https://github.com/succinctlabs/sp1-vector/tree/main/query/app/utils/deployments.json). 8 | 9 | ## RPC Queries 10 | 11 | ### Query for `dataRoot` Proof Data 12 | 13 | Querying with a block number. 14 | 15 | ``` 16 | https://vectorx-query.succinct.xyz/api?chainName=hex&contractChainId=11155111&contractAddress=0xbc281367e1F2dB1c3e92255AA2F040B1c642ec75&blockNumber=247230 17 | ``` 18 | 19 | Example response: 20 | 21 | ```json 22 | { 23 | "data": { 24 | "blockNumber": 247230, 25 | "rangeHash": "0xafad54e98bdaebacc1f220dd919dda48b84ed0689906c288a4d93dae1ae9d7c5", 26 | ... 27 | } 28 | } 29 | ``` 30 | 31 | Querying with a block hash. 32 | 33 | ``` 34 | https://vectorx-query.succinct.xyz/api?chainName=hex&contractChainId=11155111&contractAddress=0xbc281367e1F2dB1c3e92255AA2F040B1c642ec75&blockHash=0xad664ed32323c70e9c19333f6d7d6f855719f439bc0cb4cd92d89138c252d560 35 | ``` 36 | 37 | Example response: 38 | 39 | ```json 40 | { 41 | "data": { 42 | "rangeHash": "0xafad54e98bdaebacc1f220dd919dda48b84ed0689906c288a4d93dae1ae9d7c5", 43 | "dataCommitment": "0x7b0f5743191b390b3ba21cdda41b3940b37566a9f336b9e37cf0ad94c937242a", 44 | ... 45 | } 46 | } 47 | ``` 48 | 49 | ### Health of the `VectorX` contract 50 | 51 | Querying for the health of the VectorX contract deployed on Sepolia (chain ID: 11155111) at address 0xbc281367e1F2dB1c3e92255AA2F040B1c642ec75. 52 | 53 | ``` 54 | https://vectorx-query.succinct.xyz/api/health?chainName=hex&contractChainId=11155111&contractAddress=0xbc281367e1F2dB1c3e92255AA2F040B1c642ec75 55 | ``` 56 | 57 | Example response: 58 | 59 | ```json 60 | {"data":{"logEmitted":true,"ethBlocksSinceLastLog":35,"lastLogTimestamp":1717707768,"blocksBehindHead":50}} 61 | ``` 62 | 63 | Note: If `logEmitted` is false, the contract has not emitted a log in at least the last `ethBlocksSinceLastLog` blocks. 64 | 65 | ### Range of the `VectorX` contract 66 | 67 | Querying for the range of the VectorX contract deployed on Sepolia (chain ID: 11155111) at address 0xbc281367e1F2dB1c3e92255AA2F040B1c642ec75. 68 | 69 | ``` 70 | https://vectorx-query.succinct.xyz/api/range?contractChainId=11155111&contractAddress=0xbc281367e1F2dB1c3e92255AA2F040B1c642ec75 71 | ``` 72 | 73 | Example response: 74 | 75 | ```json 76 | {"data":{"start":63091,"end":304710}} 77 | ``` 78 | 79 | ## Launch the Query Service 80 | 81 | Update `query/.env` with the corresponding variables from [.env.example](https://github.com/succinctlabs/sp1-vector/tree/main/query/.env.example). Then launch the service with: 82 | 83 | ``` 84 | npm run dev 85 | ``` 86 | -------------------------------------------------------------------------------- /book/reproducible-builds.md: -------------------------------------------------------------------------------- 1 | # Reproducible Builds 2 | 3 | ## Overview 4 | 5 | When deploying SP1 Vector in production, it's important to ensure that the program used when generating proofs is reproducible. 6 | 7 | ## Prerequisites 8 | 9 | You first need to install the [cargo prove](https://docs.succinct.xyz/docs/getting-started/install) toolchain. 10 | 11 | Ensure that you have the latest version of the toolchain by running: 12 | 13 | ```bash 14 | sp1up 15 | ``` 16 | 17 | Confirm that you have the toolchain installed by running: 18 | 19 | ```bash 20 | cargo prove --version 21 | ``` 22 | 23 | ## Verify the SP1 Vector binary 24 | 25 | To build the SP1 Vector binary, first ensure that Docker is running. 26 | 27 | ```bash 28 | docker ps 29 | ``` 30 | 31 | Then build the binaries: 32 | 33 | ```bash 34 | cd program 35 | 36 | # Builds the SP1 Vector binary using the corresponding Docker tag and ELF name. 37 | cargo prove build --docker --tag v4.1.3 --elf-name vector-elf 38 | ``` 39 | 40 | Now, verify the binaries by confirming the output of `vkey` matches the vkeys on the contract. The `vkey` program outputs the verification key 41 | based on the ELF in `/elf`. 42 | 43 | ```bash 44 | cargo run --bin vkey --release 45 | ``` 46 | -------------------------------------------------------------------------------- /chains.example.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "rpc_url": "https://rpc.ankr.com/eth", 4 | "vector_address": "0x0000000000000000000000000000000000000000" 5 | } 6 | ] -------------------------------------------------------------------------------- /contracts/.env.example: -------------------------------------------------------------------------------- 1 | ### Salt used to deploy the contracts. Recommended to use the same salt across different chains. 2 | CREATE2_SALT= 3 | 4 | ### The chains to deploy to, specified by chain name (e.g. CHAINS=mainnet,sepolia,arbitrum_sepolia) 5 | CHAINS= 6 | 7 | ### RPCs for each chain ID 8 | RPC_MAINNET= 9 | RPC_BASE_SEPOLIA= 10 | RPC_ARBITRUM_SEPOLIA= 11 | RPC_SEPOLIA= 12 | 13 | ### Etherscan API keys for each chain ID 14 | ETHERSCAN_API_KEY_MAINNET= 15 | ETHERSCAN_API_KEY_BASE_SEPOLIA= 16 | ETHERSCAN_API_KEY_ARBITRUM_SEPOLIA= 17 | ETHERSCAN_API_KEY_SEPOLIA= 18 | 19 | ### Etherscan API URLs for each chain ID 20 | ETHERSCAN_API_URL_MAINNET=https://api.etherscan.io/api 21 | ETHERSCAN_API_URL_HOLESKY=https://api-holesky.etherscan.io/api 22 | ETHERSCAN_API_URL_SEPOLIA=https://api-sepolia.etherscan.io/api 23 | ETHERSCAN_API_URL_ARBITRUM=https://api.arbiscan.io/api 24 | ETHERSCAN_API_URL_ARBITRUM_SEPOLIA=https://api-sepolia.arbiscan.io/api 25 | ETHERSCAN_API_URL_BASE=https://api.basescan.org/api 26 | ETHERSCAN_API_URL_BASE_SEPOLIA=https://api-sepolia.basescan.org/api 27 | ETHERSCAN_API_URL_SCROLL=https://api.scrollscan.com/api 28 | ETHERSCAN_API_URL_SCROLL_SEPOLIA=https://api-sepolia.scrollscan.com/api 29 | 30 | 31 | #### Arguments for SP1 Vector 32 | GENESIS_HEIGHT= 33 | GENESIS_HEADER= 34 | GENESIS_AUTHORITY_SET_ID= 35 | GENESIS_AUTHORITY_SET_HASH= 36 | SP1_VECTOR_PROGRAM_VKEY= 37 | HEADER_RANGE_COMMITMENT_TREE_SIZE= 38 | 39 | #### Guardian Address 40 | GUARDIAN_ADDRESS= 41 | 42 | #### Contract Private Key 43 | PRIVATE_KEY= -------------------------------------------------------------------------------- /contracts/.gitignore: -------------------------------------------------------------------------------- 1 | # Compiler files 2 | cache/ 3 | out/ 4 | zkout/ 5 | 6 | # Ignores development broadcast logs 7 | /broadcast 8 | /broadcast/*/31337/ 9 | /broadcast/**/dry-run/ 10 | 11 | # Docs 12 | docs/ 13 | 14 | # Dotenv file 15 | .env 16 | -------------------------------------------------------------------------------- /contracts/README.md: -------------------------------------------------------------------------------- 1 | # SP1 Vector Contracts 2 | 3 | ## Deploy new contracts 4 | To deploy new contracts, generate genesis parameters, then add the chains to the .env file: 5 | ``` 6 | CHAINS=sepolia,arbitrum_sepolia,... 7 | ``` 8 | 9 | Then run the deploy script: 10 | ``` 11 | forge script script/Deploy.s.sol --private-key $PRIVATE_KEY --verify --verifier etherscan --multi --broadcast 12 | ``` 13 | 14 | ## Updating existing contracts 15 | To update the existing contracts, set the contract addresses on the chains you want to update: 16 | ``` 17 | CHAINS=sepolia,arbitrum_sepolia,... 18 | 19 | CONTRACT_ADDRESS_= 20 | ``` 21 | 22 | Then run the upgrade script: 23 | ``` 24 | forge script script/Upgrade.s.sol --verify --verifier etherscan --private-key $PRIVATE_KEY --multi --broadcast 25 | ``` -------------------------------------------------------------------------------- /contracts/foundry.toml: -------------------------------------------------------------------------------- 1 | [profile.default] 2 | src = "src" 3 | out = "out" 4 | libs = ["lib"] 5 | evm_version = 'cancun' 6 | via_ir = true 7 | 8 | # See more config options https://github.com/foundry-rs/foundry/blob/master/crates/config/README.md#all-options 9 | fs_permissions = [{ access = "read", path = "./" }] 10 | 11 | [profile.deploy] 12 | optimizer = true 13 | optimizer_runs = 200 14 | evm_version = 'cancun' 15 | 16 | [rpc_endpoints] 17 | mainnet = "${RPC_MAINNET}" 18 | sepolia = "${RPC_SEPOLIA}" 19 | holesky = "${RPC_HOLESKY}" 20 | arbitrum = "${RPC_ARBITRUM}" 21 | arbitrum_sepolia = "${RPC_ARBITRUM_SEPOLIA}" 22 | base = "${RPC_BASE}" 23 | base_sepolia = "${RPC_BASE_SEPOLIA}" 24 | scroll = "${RPC_SCROLL}" 25 | scroll_sepolia = "${RPC_SCROLL_SEPOLIA}" 26 | 27 | [etherscan] 28 | mainnet = { key = "${ETHERSCAN_API_KEY_MAINNET}", url = "${ETHERSCAN_API_URL_MAINNET}" } 29 | sepolia = { key = "${ETHERSCAN_API_KEY_SEPOLIA}", url = "${ETHERSCAN_API_URL_SEPOLIA}" } 30 | holesky = { key = "${ETHERSCAN_API_KEY_HOLESKY}", url = "${ETHERSCAN_API_URL_HOLESKY}" } 31 | arbitrum = { key = "${ETHERSCAN_API_KEY_ARBITRUM}", url = "${ETHERSCAN_API_URL_ARBITRUM}" } 32 | arbitrum_sepolia = { key = "${ETHERSCAN_API_KEY_ARBITRUM_SEPOLIA}", url = "${ETHERSCAN_API_URL_ARBITRUM_SEPOLIA}" } 33 | base = { key = "${ETHERSCAN_API_KEY_BASE}", url = "${ETHERSCAN_API_URL_BASE}" } 34 | base_sepolia = { key = "${ETHERSCAN_API_KEY_BASE_SEPOLIA}", url = "${ETHERSCAN_API_URL_BASE_SEPOLIA}", chain = 84532 } 35 | scroll = { key = "${ETHERSCAN_API_KEY_SCROLL}", url = "${ETHERSCAN_API_URL_SCROLL}" } 36 | scroll_sepolia = { key = "${ETHERSCAN_API_KEY_SCROLL_SEPOLIA}", url = "${ETHERSCAN_API_URL_SCROLL_SEPOLIA}" } 37 | -------------------------------------------------------------------------------- /contracts/remappings.txt: -------------------------------------------------------------------------------- 1 | @sp1-contracts/=lib/sp1-contracts/contracts/src/ 2 | @succinctx/=lib/succinctx/contracts/src 3 | @openzeppelin/=lib/openzeppelin-contracts/ 4 | @openzeppelin-upgradeable/=lib/openzeppelin-contracts-upgradeable/ -------------------------------------------------------------------------------- /contracts/script/Base.s.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | pragma solidity ^0.8.26; 3 | 4 | import "forge-std/Vm.sol"; 5 | import "forge-std/console.sol"; 6 | import {stdJson} from "forge-std/StdJson.sol"; 7 | import {Script} from "forge-std/Script.sol"; 8 | 9 | /// @notice Script to inherit from to get access to helper functions for deployments. 10 | abstract contract BaseScript is Script { 11 | using stdJson for string; 12 | 13 | /// @notice Run the command with the `--broadcast` flag to send the transaction to the chain, 14 | /// otherwise just simulate the transaction execution. 15 | modifier broadcaster() { 16 | vm.startBroadcast(msg.sender); 17 | _; 18 | vm.stopBroadcast(); 19 | } 20 | 21 | /// @notice When used, runs the script on the chains specified in the `CHAINS` env variable. 22 | /// Must have a `RPC_${CHAIN}` env variable set for each chain (e.g. RPC_MAINNET). 23 | modifier multichain(string memory KEY) { 24 | string[] memory chains = vm.envString("CHAINS", ","); 25 | for (uint256 i = 0; i < chains.length; i++) { 26 | string memory chain = chains[i]; 27 | 28 | // Switch to the chain using the RPC 29 | vm.createSelectFork(chain); 30 | 31 | console.log("Deploying %s to %s", KEY, chain); 32 | 33 | _; 34 | } 35 | } 36 | 37 | /// @notice Returns the directory of the deployments. 38 | function directory() internal view returns (string memory) { 39 | return string.concat(vm.projectRoot(), "/deployments/"); 40 | } 41 | 42 | /// @notice Returns the file name for the current chain. 43 | function file() internal view returns (string memory) { 44 | return string.concat(vm.toString(block.chainid), ".json"); 45 | } 46 | 47 | /// @notice Returns the path to the deployments file for the current chain. 48 | function path() internal view returns (string memory) { 49 | return string.concat(directory(), file()); 50 | } 51 | 52 | /// @notice Returns the deployments file contents for the current chain. 53 | function deployments() internal view returns (string memory) { 54 | return vm.readFile(path()); 55 | } 56 | 57 | /// @notice Ensures that the deployments file exists for the current chain. 58 | function ensureExists() internal { 59 | if (!vm.exists(directory())) { 60 | vm.createDir(directory(), true); 61 | } 62 | 63 | if (!vm.exists(path())) { 64 | vm.writeFile(path(), "{}"); 65 | } 66 | } 67 | 68 | /// @notice Tries to read an address from the env. 69 | function envAddress(string memory key) internal view returns (address) { 70 | return vm.envOr(key, address(0)); 71 | } 72 | 73 | /// @notice Tries to read a bytes32 from the env. 74 | function envBytes32(string memory key) internal view returns (bytes32) { 75 | return vm.envOr(key, bytes32(0)); 76 | } 77 | 78 | /// @notice Tries to read an address from the env first, then from the deployments file for the current chain. 79 | function readAddress(string memory key) internal view returns (address) { 80 | if (envAddress(key) != address(0)) { 81 | return envAddress(key); 82 | } 83 | return deployments().readAddress(string.concat(".", key)); 84 | } 85 | 86 | /// @notice Tries to read a bytes32 from the env first, then from the deployments file for the current chain. 87 | function readBytes32(string memory key) internal view returns (bytes32) { 88 | if (envBytes32(key) != bytes32(0)) { 89 | return envBytes32(key); 90 | } 91 | return deployments().readBytes32(string.concat(".", key)); 92 | } 93 | 94 | /// @notice Writes an address to the deployments file for the current chain. 95 | function writeAddress(string memory key, address value) internal { 96 | ensureExists(); 97 | 98 | if (vm.keyExists(deployments(), string.concat(".", key))) { 99 | vm.writeJson(vm.toString(value), path(), string.concat(".", key)); 100 | } else { 101 | string memory root = "root"; 102 | vm.serializeJson(root, deployments()); 103 | vm.writeJson(vm.serializeAddress(root, key, value), path()); 104 | } 105 | } 106 | 107 | /// @notice Writes a bytes32 to the deployments file for the current chain. 108 | function writeBytes32(string memory key, bytes32 value) internal { 109 | ensureExists(); 110 | 111 | if (vm.keyExists(deployments(), string.concat(".", key))) { 112 | vm.writeJson(vm.toString(value), path(), string.concat(".", key)); 113 | } else { 114 | string memory root = "root"; 115 | vm.serializeJson(root, deployments()); 116 | vm.writeJson(vm.serializeBytes32(root, key, value), path()); 117 | } 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /contracts/script/Deploy.s.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: UNLICENSED 2 | pragma solidity ^0.8.26; 3 | 4 | import "forge-std/console.sol"; 5 | import {Vm} from "forge-std/Vm.sol"; 6 | import {StdAssertions} from "forge-std/StdAssertions.sol"; 7 | import {Script} from "forge-std/Script.sol"; 8 | import {stdJson} from "forge-std/StdJson.sol"; 9 | import {SP1MockVerifier} from "@sp1-contracts/SP1MockVerifier.sol"; 10 | import {ISP1Verifier} from "@sp1-contracts/ISP1Verifier.sol"; 11 | import {SP1Vector} from "../src/SP1Vector.sol"; 12 | import {ERC1967Proxy} from "@openzeppelin/contracts/proxy/ERC1967/ERC1967Proxy.sol"; 13 | 14 | import {BaseScript} from "./Base.s.sol"; 15 | 16 | // Required environment variables: 17 | // - GENESIS_HEIGHT 18 | // - GENESIS_HEADER 19 | // - GENESIS_AUTHORITY_SET_ID 20 | // - GENESIS_AUTHORITY_SET_HASH 21 | // - HEADER_RANGE_COMMITMENT_TREE_SIZE 22 | // - SP1_VECTOR_PROGRAM_VKEY 23 | // - CREATE2_SALT 24 | // - GUARDIAN_ADDRESS 25 | // - SP1_VERIFIER_ADDRESS 26 | 27 | contract DeployScript is BaseScript { 28 | using stdJson for string; 29 | 30 | string internal constant KEY = "SP1_VECTOR"; 31 | 32 | SP1Vector public sp1Vector; 33 | 34 | function setUp() public {} 35 | 36 | function run() external multichain(KEY) returns (address sp1VectorAddress) { 37 | vm.startBroadcast(); 38 | 39 | uint32 genesisHeight = uint32(vm.envUint("GENESIS_HEIGHT")); 40 | bytes32 genesisHeader = vm.envBytes32("GENESIS_HEADER"); 41 | uint64 genesisAuthoritySetId = uint64(vm.envUint("GENESIS_AUTHORITY_SET_ID")); 42 | bytes32 genesisAuthoritySetHash = vm.envBytes32("GENESIS_AUTHORITY_SET_HASH"); 43 | uint32 headerRangeCommitmentTreeSize = uint32(vm.envUint("HEADER_RANGE_COMMITMENT_TREE_SIZE")); 44 | bytes32 vectorProgramVkey = vm.envBytes32("SP1_VECTOR_PROGRAM_VKEY"); 45 | 46 | // Read trusted initialization parameters from environment. 47 | address guardian = vm.envOr("GUARDIAN_ADDRESS", msg.sender); 48 | 49 | ISP1Verifier verifier = 50 | ISP1Verifier(vm.envOr("SP1_VERIFIER_ADDRESS", 0x3B6041173B80E77f038f3F2C0f9744f04837185e)); 51 | SP1Vector sp1VectorImpl = new SP1Vector(); 52 | // ERC1967Proxy proxy = new ERC1967Proxy{salt: vm.envBytes32("CREATE2_SALT")}(address(sp1VectorImpl), ""); 53 | ERC1967Proxy proxy = new ERC1967Proxy(address(sp1VectorImpl), ""); 54 | sp1Vector = SP1Vector(address(proxy)); 55 | sp1Vector.initialize( 56 | SP1Vector.InitParameters({ 57 | guardian: guardian, 58 | height: genesisHeight, 59 | header: genesisHeader, 60 | authoritySetId: genesisAuthoritySetId, 61 | authoritySetHash: genesisAuthoritySetHash, 62 | headerRangeCommitmentTreeSize: headerRangeCommitmentTreeSize, 63 | vectorProgramVkey: vectorProgramVkey, 64 | verifier: address(verifier) 65 | }) 66 | ); 67 | 68 | vm.stopBroadcast(); 69 | 70 | return address(sp1Vector); 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /contracts/script/Guardian.s.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: UNLICENSED 2 | pragma solidity ^0.8.26; 3 | 4 | import "forge-std/console.sol"; 5 | import {Vm} from "forge-std/Vm.sol"; 6 | import {StdAssertions} from "forge-std/StdAssertions.sol"; 7 | import {Script} from "forge-std/Script.sol"; 8 | import {stdJson} from "forge-std/StdJson.sol"; 9 | import {SP1MockVerifier} from "@sp1-contracts/SP1MockVerifier.sol"; 10 | import {ISP1Verifier} from "@sp1-contracts/ISP1Verifier.sol"; 11 | import {SP1Vector} from "../src/SP1Vector.sol"; 12 | import {TimelockedUpgradeable} from "@succinctx/upgrades/TimelockedUpgradeable.sol"; 13 | import {ERC1967Proxy} from "@openzeppelin/contracts/proxy/ERC1967/ERC1967Proxy.sol"; 14 | import {BaseScript} from "./Base.s.sol"; 15 | 16 | // Required environment variables: 17 | // - CHAINS (comma separated list of chain names) 18 | // - CONTRACT_ADDRESS_{CHAIN_ID} 19 | // - GUARDIAN_ADDRESS 20 | 21 | contract UpgradeScript is BaseScript { 22 | using stdJson for string; 23 | 24 | function setUp() public {} 25 | 26 | string internal constant KEY = "GuardianScript"; 27 | 28 | /// Reads CONTRACT_ADDRESS_ from the environment variables and updates the SP1 Verifier and program vkey. 29 | function run() external multichain(KEY) broadcaster { 30 | string memory contractAddressKey = string.concat("CONTRACT_ADDRESS_", vm.toString(block.chainid)); 31 | address existingProxyAddress = vm.envAddress(contractAddressKey); 32 | 33 | // Grant roles to multi-sig. 34 | SP1Vector sp1Vector = SP1Vector(address(existingProxyAddress)); 35 | address guardian = vm.envAddress("GUARDIAN_ADDRESS"); 36 | sp1Vector.grantRole(sp1Vector.DEFAULT_ADMIN_ROLE(), guardian); 37 | sp1Vector.grantRole(sp1Vector.GUARDIAN_ROLE(), guardian); 38 | sp1Vector.grantRole(sp1Vector.TIMELOCK_ROLE(), guardian); 39 | 40 | // // Removes roles from 0xded. 41 | // sp1Vector.revokeRole(sp1Vector.DEFAULT_ADMIN_ROLE(), 0xDEd0000E32f8F40414d3ab3a830f735a3553E18e); 42 | sp1Vector.revokeRole(sp1Vector.GUARDIAN_ROLE(), 0xDEd0000E32f8F40414d3ab3a830f735a3553E18e); 43 | sp1Vector.revokeRole(sp1Vector.TIMELOCK_ROLE(), 0xDEd0000E32f8F40414d3ab3a830f735a3553E18e); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /contracts/script/UpdateVkey.s.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: UNLICENSED 2 | pragma solidity ^0.8.26; 3 | 4 | import {stdJson} from "forge-std/StdJson.sol"; 5 | import {SP1Vector} from "../src/SP1Vector.sol"; 6 | import {BaseScript} from "./Base.s.sol"; 7 | 8 | // Required environment variables: 9 | // - CHAINS (comma separated list of chain names) 10 | // - CONTRACT_ADDRESS_{CHAIN_ID} 11 | 12 | contract UpdateVkeyScript is BaseScript { 13 | using stdJson for string; 14 | 15 | function setUp() public {} 16 | 17 | string internal constant KEY = "UpdateVkey"; 18 | 19 | /// Reads CONTRACT_ADDRESS_ from the environment variables and updates the SP1 Verifier and program vkey. 20 | function run() external multichain(KEY) broadcaster { 21 | string memory contractAddressKey = string.concat("CONTRACT_ADDRESS_", vm.toString(block.chainid)); 22 | address existingProxyAddress = vm.envAddress(contractAddressKey); 23 | 24 | SP1Vector sp1Vector = SP1Vector(address(existingProxyAddress)); 25 | 26 | // v4 program vkey 27 | sp1Vector.updateVectorXProgramVkey(0x00e9bba2a9360f570b9ba99e5186825ac723bedebd486b6a818870c44e3e4d4f); 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /contracts/script/UpdateVkeySingle.s.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: UNLICENSED 2 | pragma solidity ^0.8.0; 3 | 4 | import {stdJson} from "forge-std/StdJson.sol"; 5 | import {SP1Vector} from "../src/SP1Vector.sol"; 6 | import "forge-std/Script.sol"; 7 | 8 | // Required environment variables: 9 | // - CONTRACT_ADDRESS_{CHAIN_ID} 10 | 11 | contract UpdateVkeySingleScript is Script { 12 | using stdJson for string; 13 | 14 | function setUp() public {} 15 | 16 | /// Reads CONTRACT_ADDRESS_ from the environment variables and updates the SP1 Verifier and program vkey. 17 | function run() external { 18 | vm.startBroadcast(); 19 | 20 | string memory contractAddressKey = string.concat("CONTRACT_ADDRESS_", vm.toString(block.chainid)); 21 | address existingProxyAddress = vm.envAddress(contractAddressKey); 22 | 23 | SP1Vector sp1Vector = SP1Vector(address(existingProxyAddress)); 24 | 25 | // v4 program vkey 26 | sp1Vector.updateVectorXProgramVkey(0x00e9bba2a9360f570b9ba99e5186825ac723bedebd486b6a818870c44e3e4d4f); 27 | 28 | vm.stopBroadcast(); 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /contracts/script/Upgrade.s.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: UNLICENSED 2 | pragma solidity ^0.8.26; 3 | 4 | import "forge-std/console.sol"; 5 | import {Vm} from "forge-std/Vm.sol"; 6 | import {StdAssertions} from "forge-std/StdAssertions.sol"; 7 | import {Script} from "forge-std/Script.sol"; 8 | import {stdJson} from "forge-std/StdJson.sol"; 9 | import {SP1MockVerifier} from "@sp1-contracts/SP1MockVerifier.sol"; 10 | import {ISP1Verifier} from "@sp1-contracts/ISP1Verifier.sol"; 11 | import {SP1Vector} from "../src/SP1Vector.sol"; 12 | import {TimelockedUpgradeable} from "@succinctx/upgrades/TimelockedUpgradeable.sol"; 13 | import {ERC1967Proxy} from "@openzeppelin/contracts/proxy/ERC1967/ERC1967Proxy.sol"; 14 | import {BaseScript} from "./Base.s.sol"; 15 | 16 | // Required environment variables: 17 | // - CHAINS (comma separated list of chain names) 18 | // - CONTRACT_ADDRESS_{CHAIN_ID} 19 | // - SP1_VECTOR_PROGRAM_VKEY 20 | 21 | contract UpgradeScript is BaseScript { 22 | using stdJson for string; 23 | 24 | function setUp() public {} 25 | 26 | string internal constant KEY = "UpgradeScript"; 27 | 28 | /// Reads CONTRACT_ADDRESS_ from the environment variables and updates the SP1 Verifier and program vkey. 29 | function run() external multichain(KEY) broadcaster { 30 | string memory contractAddressKey = string.concat("CONTRACT_ADDRESS_", vm.toString(block.chainid)); 31 | address existingProxyAddress = vm.envAddress(contractAddressKey); 32 | 33 | SP1Vector sp1Vector = SP1Vector(address(existingProxyAddress)); 34 | 35 | // Deploy new SP1Vector implementation. 36 | SP1Vector newImplementation = new SP1Vector(); 37 | sp1Vector.upgradeTo(address(newImplementation)); 38 | 39 | // Set the approved relayer. 40 | sp1Vector.setCheckRelayer(true); 41 | sp1Vector.setRelayerApproval(vm.envAddress("RELAYER_ADDRESS"), true); 42 | 43 | sp1Vector.updateVectorXProgramVkey(vm.envBytes32("SP1_VECTOR_PROGRAM_VKEY")); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /contracts/src/SP1Vector.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | pragma solidity ^0.8.26; 3 | 4 | import {ISP1Vector} from "./interfaces/ISP1Vector.sol"; 5 | import {TimelockedUpgradeable} from "@succinctx/upgrades/TimelockedUpgradeable.sol"; 6 | import {ISP1Verifier} from "@sp1-contracts/ISP1Verifier.sol"; 7 | 8 | /// @notice SP1Vector is a light client for Avail's consensus. 9 | /// @dev The light client tracks both the state of Avail's Grandpa consensus and Vector, Avail's 10 | /// data commitment solution. 11 | /// @dev Ensure that all new storage variables are placed after existing storage variables to avoid 12 | /// storage corruption during upgrades. 13 | contract SP1Vector is ISP1Vector, TimelockedUpgradeable { 14 | /// @notice Indicator of if the contract is frozen. 15 | bool public frozen; 16 | 17 | /// @notice The address of the gateway contract. 18 | /// @dev DEPRECATED: Do not use. Compatibility for upgrades from VectorX. 19 | address public gateway_deprecated; 20 | 21 | /// @notice The latest block that has been committed. 22 | uint32 public latestBlock; 23 | 24 | /// @notice The latest authority set id used in commitHeaderRange. 25 | uint64 public latestAuthoritySetId; 26 | 27 | /// @notice The function for requesting a header range. 28 | /// @dev DEPRECATED: Do not use. Compatibility for upgrades from VectorX. 29 | bytes32 public headerRangeFunctionId_deprecated; 30 | 31 | /// @notice The function for requesting a rotate. 32 | /// @dev DEPRECATED: Do not use. Compatibility for upgrades from VectorX. 33 | bytes32 public rotateFunctionId_deprecated; 34 | 35 | /// @notice Maps block height to the header hash of the block. 36 | mapping(uint32 => bytes32) public blockHeightToHeaderHash; 37 | 38 | /// @notice Maps authority set id to the authority set hash. 39 | mapping(uint64 => bytes32) public authoritySetIdToHash; 40 | 41 | /// @notice Maps block ranges to data commitments. Block ranges are stored as 42 | /// keccak256(abi.encode(startBlock, endBlock)). 43 | mapping(bytes32 => bytes32) public dataRootCommitments; 44 | 45 | /// @notice Maps block ranges to state commitments. Block ranges are stored as 46 | /// keccak256(abi.encode(startBlock, endBlock)). 47 | mapping(bytes32 => bytes32) public stateRootCommitments; 48 | 49 | /// @notice Maps range hashes to the start block of the range. This allows us 50 | /// to know the block height of an attestation. 51 | mapping(bytes32 => uint32) public rangeStartBlocks; 52 | 53 | /// @notice The commitment tree size for the header range. 54 | uint32 public headerRangeCommitmentTreeSize; 55 | 56 | /// @notice The verification key for the SP1Vector program. 57 | bytes32 public vectorXProgramVkey; 58 | 59 | /// @notice The deployed SP1 verifier contract. 60 | ISP1Verifier public verifier; 61 | 62 | /// @notice Approved relayers for the contract. 63 | mapping(address => bool) public approvedRelayers; 64 | 65 | /// @notice Check the relayer is approved. 66 | bool public checkRelayer; 67 | 68 | /// @notice The type of proof that is being verified. 69 | enum ProofType { 70 | HeaderRangeProof, 71 | RotateProof 72 | } 73 | 74 | struct InitParameters { 75 | address guardian; 76 | uint32 height; 77 | bytes32 header; 78 | uint64 authoritySetId; 79 | bytes32 authoritySetHash; 80 | uint32 headerRangeCommitmentTreeSize; 81 | bytes32 vectorProgramVkey; 82 | address verifier; 83 | } 84 | 85 | struct HeaderRangeOutputs { 86 | uint32 trusted_block; 87 | bytes32 trusted_header_hash; 88 | uint64 authority_set_id; 89 | bytes32 authority_set_hash; 90 | uint32 target_block; 91 | bytes32 target_header_hash; 92 | bytes32 state_root_commitment; 93 | bytes32 data_root_commitment; 94 | uint32 merkle_tree_size; 95 | } 96 | 97 | struct RotateOutputs { 98 | uint64 current_authority_set_id; 99 | bytes32 current_authority_set_hash; 100 | bytes32 new_authority_set_hash; 101 | } 102 | 103 | struct ProofOutputs { 104 | ProofType proofType; 105 | bytes headerRangeOutputs; 106 | bytes rotateOutputs; 107 | } 108 | 109 | /// @notice If the relayer check is enabled, only approved relayers can call the function. 110 | modifier onlyApprovedRelayer() { 111 | if (checkRelayer && !approvedRelayers[msg.sender]) { 112 | revert RelayerNotApproved(); 113 | } 114 | _; 115 | } 116 | 117 | function VERSION() external pure override returns (string memory) { 118 | return "2.0.0"; 119 | } 120 | 121 | /// @dev Initializes the contract. 122 | /// @param _params The initialization parameters for the contract. 123 | function initialize(InitParameters calldata _params) external initializer { 124 | blockHeightToHeaderHash[_params.height] = _params.header; 125 | authoritySetIdToHash[_params.authoritySetId] = _params.authoritySetHash; 126 | latestAuthoritySetId = _params.authoritySetId; 127 | latestBlock = _params.height; 128 | vectorXProgramVkey = _params.vectorProgramVkey; 129 | verifier = ISP1Verifier(_params.verifier); 130 | headerRangeCommitmentTreeSize = _params.headerRangeCommitmentTreeSize; 131 | 132 | __TimelockedUpgradeable_init(_params.guardian, _params.guardian); 133 | } 134 | 135 | /// @notice Update the SP1 verifier. 136 | function updateVerifier(address _verifier) external onlyGuardian { 137 | verifier = ISP1Verifier(_verifier); 138 | } 139 | 140 | /// @notice Update the SP1Vector program verification key. 141 | function updateVectorXProgramVkey(bytes32 _vectorXProgramVkey) external onlyGuardian { 142 | vectorXProgramVkey = _vectorXProgramVkey; 143 | } 144 | 145 | /// @notice Update the freeze parameter. 146 | function updateFreeze(bool _freeze) external onlyGuardian { 147 | frozen = _freeze; 148 | } 149 | 150 | /// @notice Update the commitment tree size for the header range function. 151 | function updateCommitmentTreeSize(uint32 _headerRangeCommitmentTreeSize) external onlyGuardian { 152 | headerRangeCommitmentTreeSize = _headerRangeCommitmentTreeSize; 153 | } 154 | 155 | /// @notice Update the genesis state of the light client. 156 | function updateGenesisState(uint32 _height, bytes32 _header, uint64 _authoritySetId, bytes32 _authoritySetHash) 157 | external 158 | onlyGuardian 159 | { 160 | blockHeightToHeaderHash[_height] = _header; 161 | latestBlock = _height; 162 | 163 | authoritySetIdToHash[_authoritySetId] = _authoritySetHash; 164 | latestAuthoritySetId = _authoritySetId; 165 | } 166 | 167 | /// @notice Force update the data & state commitments for a range of blocks. 168 | function updateBlockRangeData( 169 | uint32[] calldata _startBlocks, 170 | uint32[] calldata _endBlocks, 171 | bytes32[] calldata _headerHashes, 172 | bytes32[] calldata _dataRootCommitments, 173 | bytes32[] calldata _stateRootCommitments, 174 | uint64 _endAuthoritySetId, 175 | bytes32 _endAuthoritySetHash 176 | ) external onlyGuardian { 177 | assert( 178 | _startBlocks.length > 0 && _startBlocks.length == _endBlocks.length 179 | && _endBlocks.length == _headerHashes.length && _headerHashes.length == _dataRootCommitments.length 180 | && _dataRootCommitments.length == _stateRootCommitments.length 181 | ); 182 | require(_startBlocks[0] == latestBlock); 183 | for (uint256 i = 0; i < _startBlocks.length; i++) { 184 | if (i < _startBlocks.length - 1) { 185 | require(_endBlocks[i] == _startBlocks[i + 1]); 186 | } 187 | bytes32 key = keccak256(abi.encode(_startBlocks[i], _endBlocks[i])); 188 | dataRootCommitments[key] = _dataRootCommitments[i]; 189 | stateRootCommitments[key] = _stateRootCommitments[i]; 190 | rangeStartBlocks[key] = _startBlocks[i]; 191 | 192 | blockHeightToHeaderHash[_endBlocks[i]] = _headerHashes[i]; 193 | 194 | emit HeadUpdate(_endBlocks[i], _headerHashes[i]); 195 | 196 | emit HeaderRangeCommitmentStored( 197 | _startBlocks[i], 198 | _endBlocks[i], 199 | _dataRootCommitments[i], 200 | _stateRootCommitments[i], 201 | headerRangeCommitmentTreeSize 202 | ); 203 | } 204 | latestBlock = _endBlocks[_endBlocks.length - 1]; 205 | 206 | authoritySetIdToHash[_endAuthoritySetId] = _endAuthoritySetHash; 207 | latestAuthoritySetId = _endAuthoritySetId; 208 | } 209 | 210 | /// @notice Set a relayer's approval status. 211 | function setRelayerApproval(address _relayer, bool _approved) external onlyGuardian { 212 | approvedRelayers[_relayer] = _approved; 213 | } 214 | 215 | /// @notice Set a check relayer status. 216 | function setCheckRelayer(bool _checkRelayer) external onlyGuardian { 217 | checkRelayer = _checkRelayer; 218 | } 219 | 220 | /// @notice Add target header hash, and data + state commitments for (latestBlock, targetBlock]. 221 | /// @param proof The proof bytes for the SP1 proof. 222 | /// @param publicValues The public commitments from the SP1 proof. 223 | /// @dev The trusted block and requested block must have the same authority set id. If the target 224 | /// block is greater than the max batch size of the circuit, the proof will fail to generate. 225 | function commitHeaderRange(bytes calldata proof, bytes calldata publicValues) external onlyApprovedRelayer { 226 | if (frozen) { 227 | revert ContractFrozen(); 228 | } 229 | 230 | // Parse the outputs from the committed public values associated with the proof. 231 | ProofOutputs memory proofOutputs = abi.decode(publicValues, (ProofOutputs)); 232 | 233 | // Assert this is a header range proof. Sanity check to prevent accidental submission of a 234 | // rotate proof. 235 | if (proofOutputs.proofType != ProofType.HeaderRangeProof) { 236 | revert InvalidProofType(); 237 | } 238 | 239 | // Decode the header range outputs from the proof outputs. 240 | HeaderRangeOutputs memory hro = abi.decode(proofOutputs.headerRangeOutputs, (HeaderRangeOutputs)); 241 | 242 | // Verify the merkle tree size in the proof matches the expected size. 243 | if (hro.merkle_tree_size != headerRangeCommitmentTreeSize) { 244 | revert InvalidMerkleTreeSize(); 245 | } 246 | 247 | // Verify the trusted header matches the stored trusted header and latestBlock. 248 | bytes32 storedTrustedHeader = blockHeightToHeaderHash[latestBlock]; 249 | if (storedTrustedHeader == bytes32(0)) { 250 | revert TrustedHeaderNotFound(); 251 | } 252 | if (hro.trusted_header_hash != storedTrustedHeader && hro.trusted_block != latestBlock) { 253 | revert TrustedHeaderMismatch(); 254 | } 255 | 256 | // Verify the authority set hash matches the stored authority set hash. 257 | bytes32 authoritySetHashStored = authoritySetIdToHash[hro.authority_set_id]; 258 | if (authoritySetHashStored == bytes32(0)) { 259 | revert AuthoritySetNotFound(); 260 | } 261 | if (hro.authority_set_hash != authoritySetHashStored) { 262 | revert AuthoritySetMismatch(); 263 | } 264 | 265 | // Verify the target block is greater than the latest block. 266 | if (hro.target_block <= latestBlock) { 267 | revert InvalidTargetBlock(); 268 | } 269 | 270 | // If the authority set id is less than the latest authority set id proven in the contract, 271 | // the proof is invalid. 272 | if (hro.authority_set_id < latestAuthoritySetId) { 273 | revert OldAuthoritySetId(); 274 | } 275 | 276 | // Verify the proof with the associated public values. This will revert if proof invalid. 277 | verifier.verifyProof(vectorXProgramVkey, publicValues, proof); 278 | 279 | emit HeadUpdate(hro.target_block, hro.target_header_hash); 280 | emit HeaderRangeCommitmentStored( 281 | hro.trusted_block, 282 | hro.target_block, 283 | hro.data_root_commitment, 284 | hro.state_root_commitment, 285 | headerRangeCommitmentTreeSize 286 | ); 287 | 288 | // Update the latest authority set id if the authority set id is greater than the latest 289 | // authority set id. 290 | if (hro.authority_set_id > latestAuthoritySetId) { 291 | latestAuthoritySetId = hro.authority_set_id; 292 | } 293 | 294 | // Store the data and state commitments for the range (latestBlock, targetBlock]. 295 | bytes32 key = keccak256(abi.encode(latestBlock, hro.target_block)); 296 | dataRootCommitments[key] = hro.data_root_commitment; 297 | stateRootCommitments[key] = hro.state_root_commitment; 298 | rangeStartBlocks[key] = latestBlock; 299 | 300 | // Add the target header hash to the contract. 301 | blockHeightToHeaderHash[hro.target_block] = hro.target_header_hash; 302 | 303 | // Update latest block. 304 | latestBlock = hro.target_block; 305 | } 306 | 307 | /// @notice Adds the authority set hash for the next authority set id. 308 | /// @param proof The proof bytes for the SP1 proof. 309 | /// @param publicValues The public commitments from the SP1 proof. 310 | function rotate(bytes calldata proof, bytes calldata publicValues) external onlyApprovedRelayer { 311 | if (frozen) { 312 | revert ContractFrozen(); 313 | } 314 | 315 | // Parse the outputs from the committed public values associated with the proof. 316 | ProofOutputs memory proofOutputs = abi.decode(publicValues, (ProofOutputs)); 317 | 318 | // Assert this is a rotate proof. 319 | if (proofOutputs.proofType != ProofType.RotateProof) { 320 | revert InvalidProofType(); 321 | } 322 | 323 | // Decode the rotate outputs from the proof. 324 | RotateOutputs memory ro = abi.decode(proofOutputs.rotateOutputs, (RotateOutputs)); 325 | 326 | // Verify the current authority set hash has already been proven in the contract. 327 | bytes32 currentAuthoritySetHashStored = authoritySetIdToHash[ro.current_authority_set_id]; 328 | if (currentAuthoritySetHashStored == bytes32(0)) { 329 | revert AuthoritySetNotFound(); 330 | } 331 | if (ro.current_authority_set_hash != currentAuthoritySetHashStored) { 332 | revert AuthoritySetMismatch(); 333 | } 334 | 335 | // Verify the next authority set hash does not exist yet. 336 | bytes32 nextAuthoritySetHash = authoritySetIdToHash[ro.current_authority_set_id + 1]; 337 | if (nextAuthoritySetHash != bytes32(0)) { 338 | revert NextAuthoritySetExists(); 339 | } 340 | 341 | // Verify the proof with the associated public values. 342 | verifier.verifyProof(vectorXProgramVkey, publicValues, proof); 343 | 344 | emit AuthoritySetStored(ro.current_authority_set_id + 1, ro.new_authority_set_hash); 345 | 346 | // Store the authority set hash for the next authority set id. 347 | authoritySetIdToHash[ro.current_authority_set_id + 1] = ro.new_authority_set_hash; 348 | } 349 | } 350 | -------------------------------------------------------------------------------- /contracts/src/interfaces/ISP1Vector.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | pragma solidity ^0.8.0; 3 | 4 | interface ISP1Vector { 5 | /// @notice Emits event with the inputs of a header range request. 6 | /// @param trustedBlock The block height of the trusted block. 7 | /// @param trustedHeader The header hash of the trusted block. 8 | /// @param authoritySetId The authority set id of trusted block + 1. 9 | /// @param authoritySetHash The authority set hash of trusted block + 1. 10 | /// @param targetBlock The block height of the target block. 11 | event HeaderRangeRequested( 12 | uint32 trustedBlock, bytes32 trustedHeader, uint64 authoritySetId, bytes32 authoritySetHash, uint32 targetBlock 13 | ); 14 | 15 | /// @notice Emits event with the inputs of a rotate request. 16 | /// @param currentAuthoritySetId The authority set id of the current authority set. 17 | /// @param currentAuthoritySetHash The authority set hash of the current authority set. 18 | event RotateRequested(uint64 currentAuthoritySetId, bytes32 currentAuthoritySetHash); 19 | 20 | /// @notice Emitted when the light client's head is updated. 21 | event HeadUpdate(uint32 blockNumber, bytes32 headerHash); 22 | 23 | /// @notice Emitted when data + state commitment for range (startBlock, endBlock] are stored. 24 | /// @param headerRangeCommitmentTreeSize The commitment tree size for the header range. 25 | event HeaderRangeCommitmentStored( 26 | uint32 startBlock, 27 | uint32 endBlock, 28 | bytes32 dataCommitment, 29 | bytes32 stateCommitment, 30 | uint32 headerRangeCommitmentTreeSize 31 | ); 32 | 33 | /// @notice Emitted when a new authority set is stored. 34 | event AuthoritySetStored(uint64 authoritySetId, bytes32 authoritySetHash); 35 | 36 | /// @notice If the next authority set already exists. 37 | error NextAuthoritySetExists(); 38 | 39 | /// @notice Contract is frozen. 40 | error ContractFrozen(); 41 | 42 | /// @notice Trusted header not found. 43 | error TrustedHeaderNotFound(); 44 | 45 | /// @notice Stored trusted header does not match proof trusted header. 46 | error TrustedHeaderMismatch(); 47 | 48 | /// @notice Authority set not found. 49 | error AuthoritySetNotFound(); 50 | 51 | /// @notice Stored authority set does not match proof authority set. 52 | error AuthoritySetMismatch(); 53 | 54 | /// @notice The authority set id is older than the authority set id of the latest commitHeaderRange. 55 | error OldAuthoritySetId(); 56 | 57 | /// @notice The proof type is not HeaderRangeProof or RotateProof. 58 | error InvalidProofType(); 59 | 60 | /// @notice The merkle tree size does not match the expected size. 61 | error InvalidMerkleTreeSize(); 62 | 63 | /// @notice The trusted block inside the proof does not match the trusted block of the contract. 64 | error BlockHeightMismatch(); 65 | 66 | /// @notice Target block is not greater than the latest block. 67 | error InvalidTargetBlock(); 68 | 69 | /// @notice Relayer not approved. 70 | error RelayerNotApproved(); 71 | } 72 | -------------------------------------------------------------------------------- /contracts/test/SP1Vector.t.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: UNLICENSED 2 | pragma solidity ^0.8.0; 3 | 4 | import "forge-std/Test.sol"; 5 | import {SP1Vector} from "../src/SP1Vector.sol"; 6 | import {TimelockedUpgradeable} from "@succinctx/upgrades/TimelockedUpgradeable.sol"; 7 | import {ISP1Verifier} from "@sp1-contracts/ISP1Verifier.sol"; 8 | import "forge-std/console.sol"; 9 | import {Script} from "forge-std/Script.sol"; 10 | import {stdJson} from "forge-std/StdJson.sol"; 11 | import {SP1MockVerifier} from "@sp1-contracts/SP1MockVerifier.sol"; 12 | import {ERC1967Proxy} from "@openzeppelin/contracts/proxy/ERC1967/ERC1967Proxy.sol"; 13 | 14 | contract SP1VectorTest is Test { 15 | using stdJson for string; 16 | /// @notice The type of proof that is being verified. 17 | 18 | enum ProofType { 19 | HeaderRangeProof, 20 | RotateProof 21 | } 22 | 23 | SP1Vector public sp1Vector; 24 | 25 | function setUp() public {} 26 | 27 | function test_Deploy() public { 28 | // Read trusted initialization parameters from .env 29 | address guardian = msg.sender; 30 | uint32 height = uint32(vm.envUint("GENESIS_HEIGHT")); 31 | bytes32 header = bytes32(vm.envBytes32("GENESIS_HEADER")); 32 | uint64 authoritySetId = uint64(vm.envUint("GENESIS_AUTHORITY_SET_ID")); 33 | bytes32 authoritySetHash = bytes32(vm.envBytes32("GENESIS_AUTHORITY_SET_HASH")); 34 | uint32 headerRangeCommitmentTreeSize = uint32(vm.envUint("HEADER_RANGE_COMMITMENT_TREE_SIZE")); 35 | bytes32 vectorProgramVkey = bytes32(vm.envBytes32("SP1_VECTOR_PROGRAM_VKEY")); 36 | SP1MockVerifier verifier = new SP1MockVerifier(); 37 | 38 | SP1Vector vectorImpl = new SP1Vector(); 39 | sp1Vector = SP1Vector(address(new ERC1967Proxy(address(vectorImpl), ""))); 40 | sp1Vector.initialize( 41 | SP1Vector.InitParameters({ 42 | guardian: guardian, 43 | height: height, 44 | header: header, 45 | authoritySetId: authoritySetId, 46 | authoritySetHash: authoritySetHash, 47 | headerRangeCommitmentTreeSize: headerRangeCommitmentTreeSize, 48 | vectorProgramVkey: vectorProgramVkey, 49 | verifier: address(verifier) 50 | }) 51 | ); 52 | 53 | console.log("Deployed Address:", address(sp1Vector)); 54 | } 55 | 56 | function test_Rotate() public { 57 | test_Deploy(); 58 | 59 | bytes memory publicValues = 60 | hex"00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000006100ac0925b3544fd394483fe65261944a57198a269d8048a45102df1cd355bd0a6b3648f7bf29f5e6d8113ddec2e26bbf8705e7459bedb15e0619778312e9fd8b"; 61 | bytes memory proof = ""; 62 | sp1Vector.rotate(proof, publicValues); 63 | } 64 | 65 | function test_HeaderRange() public { 66 | test_Deploy(); 67 | 68 | bytes memory publicValues = 69 | hex"0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000427e34c8dd5e52e2d3a01f0228070d6c6ec557304c1a71b21a8de344ed5f9de8588790000000000000000000000000000000000000000000000000000000000000054ba873a3572cc2e019a5ec10182716aea73325906882194b9d3a19fc0408834e80000000000000000000000000000000000000000000000000000000000042896bc4b14a9759ff3ba227179419129f719ee9ed33894e6a1f1edc300954f63f48b7f48a4428b18e80a47eaf92880dd048a79bd1d4161a3a5b5edb67b97c525972a13ab31250cb9b3890c436541c1fa081622b5117fdca36fe88a8ae8bf6d852bb00000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; 70 | bytes memory proof = ""; 71 | sp1Vector.commitHeaderRange(proof, publicValues); 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /elf/vector-elf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/availproject/sp1-vector/a1722c11f1662bccb9abfc3bdff0738dfc97dbda/elf/vector-elf -------------------------------------------------------------------------------- /primitives/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | version = "0.1.0" 3 | name = "sp1-vector-primitives" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | serde.workspace = true 8 | sha2.workspace = true 9 | ed25519-consensus.workspace = true 10 | codec.workspace = true 11 | blake2.workspace = true 12 | itertools.workspace = true 13 | alloy-primitives = { version = "0.8", features = ["serde"] } 14 | alloy-sol-types = { version = "0.8" } 15 | 16 | [dev-dependencies] 17 | primitive-types = "0.12.2" 18 | avail-subxt = { git = "https://github.com/availproject/avail.git", tag = "v2.2.2.0-rc1" } 19 | -------------------------------------------------------------------------------- /primitives/src/consts.rs: -------------------------------------------------------------------------------- 1 | // Length of an Avail validator (pubkey + weight). 2 | pub const VALIDATOR_LENGTH: usize = PUBKEY_LENGTH + WEIGHT_LENGTH; 3 | 4 | // Length of an Avail pubkey. 5 | pub const PUBKEY_LENGTH: usize = 32; 6 | 7 | // Length of the weight of an Avail validator. 8 | pub const WEIGHT_LENGTH: usize = 8; 9 | 10 | // Blake2b hash size. 11 | pub const HASH_SIZE: usize = 32; 12 | 13 | // ABI-encoded length of the header range outputs. 14 | pub const HEADER_OUTPUTS_LENGTH: usize = 32 * 9; 15 | 16 | // ABI-encoded length of the rotate outputs. 17 | pub const ROTATE_OUTPUTS_LENGTH: usize = 32 * 3; 18 | 19 | // ABI-encoded length of final proof output. 20 | pub const PROOF_OUTPUT_LENGTH: usize = 544; 21 | -------------------------------------------------------------------------------- /primitives/src/header_range.rs: -------------------------------------------------------------------------------- 1 | use alloy_primitives::B256; 2 | use alloy_sol_types::SolType; 3 | 4 | use crate::consts::HEADER_OUTPUTS_LENGTH; 5 | use crate::merkle::get_merkle_root_commitments; 6 | use crate::types::{DecodedHeaderData, HeaderRangeInputs, HeaderRangeOutputs}; 7 | use crate::{ 8 | compute_authority_set_commitment, decode_scale_compact_int, hash_encoded_header, 9 | verify_justification, 10 | }; 11 | 12 | /// Verify the justification from an authority set on the target block and compute the 13 | /// state and data root commitments over the range [trusted_block + 1, target_block] inclusive. 14 | pub fn verify_header_range(header_range_inputs: HeaderRangeInputs) -> [u8; HEADER_OUTPUTS_LENGTH] { 15 | // 1. Decode the headers using: https://github.com/availproject/avail-core/blob/main/core/src/header/mod.rs#L44-L66. 16 | // 2. Verify the chain of headers is connected from the trusted block to the target block. 17 | // 3. Compute the simple merkle tree commitment for the headers. 18 | // 4. Verify the justification is valid. 19 | // 5. Compute the authority set hash used for the justification. This will be verified to be 20 | // from an authority set id >= the latest authority set id used in the contract. The authority 21 | // set used must have been proven with a previous rotate proof. 22 | // 6. Verify the block hash the justification is signed over matches the last header hash in the 23 | // header chain. 24 | 25 | // Stage 1: Decode and get the hashes of all of the headers. 26 | let header_data: Vec = header_range_inputs 27 | .encoded_headers 28 | .iter() 29 | .map(|header_bytes| decode_header(header_bytes)) 30 | .collect(); 31 | 32 | // Stage 2: Verify the chain of all headers is connected from the trusted block to the target block 33 | // by verifying the parent hashes are linked and the block numbers are sequential. 34 | for i in 1..header_data.len() { 35 | // Verify the headers are linked. 36 | assert_eq!(header_data[i - 1].header_hash, header_data[i].parent_hash); 37 | // Verify the block numbers are sequential. 38 | assert_eq!( 39 | header_data[i - 1] 40 | .block_number 41 | .checked_add(1) 42 | .expect("Block number overflow"), 43 | header_data[i].block_number 44 | ); 45 | } 46 | 47 | // Stage 3: Compute the simple Merkle tree commitment for the headers. Note: Does not include 48 | // the trusted header in the commitment. 49 | let (state_root_commitment, data_root_commitment) = 50 | get_merkle_root_commitments(&header_data[1..], header_range_inputs.merkle_tree_size); 51 | 52 | // Stage 4: Verify the justification is valid. 53 | verify_justification(&header_range_inputs.target_justification); 54 | 55 | // Stage 5. Compute the authority set hash for the justification. This is verified to match 56 | // an authority set hash in the SP1Vector contract when the proof is verified. 57 | let authority_set_hash = 58 | compute_authority_set_commitment(&header_range_inputs.target_justification.valset_pubkeys); 59 | 60 | // Stage 6: Verify the block hash the justification is signed over matches the last header hash 61 | // in the header chain commitment. 62 | assert_eq!( 63 | header_range_inputs.target_justification.block_hash, 64 | header_data[header_data.len() - 1].header_hash 65 | ); 66 | 67 | HeaderRangeOutputs::abi_encode(&( 68 | // Trusted block. 69 | header_data[0].block_number, 70 | header_data[0].header_hash, 71 | header_range_inputs.target_justification.authority_set_id, 72 | authority_set_hash, 73 | // Target block. 74 | header_data[header_data.len() - 1].block_number, 75 | header_data[header_data.len() - 1].header_hash, 76 | state_root_commitment, 77 | data_root_commitment, 78 | header_range_inputs.merkle_tree_size as u32, 79 | )) 80 | .try_into() 81 | .unwrap() 82 | } 83 | 84 | /// Decode the header into a DecodedHeaderData struct manually and compute the header hash. 85 | fn decode_header(header_bytes: &[u8]) -> DecodedHeaderData { 86 | // The first 32 bytes are the parent hash. 87 | let mut cursor: usize = 32; 88 | let parent_hash = B256::from_slice(&header_bytes[..cursor]); 89 | 90 | // The next section is the variable-length encoded block number. 91 | let (block_nb, num_bytes) = decode_scale_compact_int(header_bytes[cursor..cursor + 5].to_vec()); 92 | cursor += num_bytes; 93 | 94 | // After the block number is the state root. 95 | let state_root = B256::from_slice(&header_bytes[cursor..cursor + 32]); 96 | 97 | // The last 32 bytes are the data root. 98 | let data_root = B256::from_slice(&header_bytes[header_bytes.len() - 32..header_bytes.len()]); 99 | 100 | // Get the header hash. 101 | let header_hash = hash_encoded_header(header_bytes); 102 | 103 | DecodedHeaderData { 104 | block_number: block_nb as u32, 105 | parent_hash, 106 | state_root, 107 | data_root, 108 | header_hash, 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /primitives/src/justification.rs: -------------------------------------------------------------------------------- 1 | use crate::{hash_encoded_header, types::CircuitJustification}; 2 | use codec::Encode; 3 | use ed25519_consensus::{Signature, VerificationKey}; 4 | use std::collections::{HashMap, HashSet}; 5 | 6 | use alloy_primitives::B256; 7 | 8 | /// Verify that a Ed25519 signature is valid. Panics if the signature is not valid. 9 | fn verify_signature(pubkey_bytes: [u8; 32], signed_message: &[u8], signature: [u8; 64]) { 10 | let pubkey: VerificationKey = VerificationKey::try_from(pubkey_bytes).unwrap(); 11 | let verified = pubkey.verify(&Signature::from(signature), signed_message); 12 | if verified.is_err() { 13 | panic!("Failed to verify Ed25519 signature."); 14 | } 15 | } 16 | 17 | /// Confirm ancestry of a child block by traversing the ancestry_map until root_hash is reached. 18 | /// Sourced from https://github.com/availproject/avail-light/blob/main/core/src/finality.rs with some 19 | /// small refactors for readability. 20 | fn confirm_ancestry( 21 | child_hash: &B256, 22 | root_hash: &B256, 23 | ancestry_map: &HashMap, 24 | ) -> bool { 25 | let mut current_hash = child_hash; 26 | 27 | while current_hash != root_hash { 28 | match ancestry_map.get(current_hash) { 29 | Some(parent_hash) => current_hash = parent_hash, 30 | None => return false, 31 | } 32 | } 33 | 34 | true 35 | } 36 | 37 | /// Determine if a supermajority is achieved. 38 | fn is_signed_by_supermajority(num_signatures: usize, validator_set_size: usize) -> bool { 39 | num_signatures * 3 > validator_set_size * 2 40 | } 41 | 42 | /// Verify a justification on a block from the specified authority set. Confirms that a supermajority 43 | /// of the validator set is achieved on the specific block. Sourced from 44 | /// https://github.com/availproject/avail-light/blob/main/core/src/finality.rs with some minor 45 | /// modifications to fit into SP1 Vector, and small refactors for readability. 46 | pub fn verify_justification(justification: &CircuitJustification) { 47 | // 1. Form an ancestry map from votes_ancestries in the justification. This maps header hashes to their parents' hashes. 48 | // Since we only get encoded headers, ensure that the parent is contained in the encoded header, no need to decode it. 49 | let ancestry_map: HashMap = justification 50 | .ancestries_encoded 51 | .iter() 52 | .map(|encoded_header| { 53 | let parent_hash_array: [u8; 32] = encoded_header[0..32].try_into().unwrap(); 54 | let parent_hash = B256::from(parent_hash_array); 55 | let header_hash = hash_encoded_header(encoded_header); 56 | 57 | (header_hash, parent_hash.to_owned()) 58 | }) 59 | .collect(); 60 | 61 | // 2. Get the signer addresses of the accounts with valid precommits for the justification. Invalidate 62 | // precommits from the same signer address with a set. 63 | let signer_addresses: HashSet = justification 64 | .precommits 65 | .iter() 66 | .filter_map(|p| { 67 | // Form the message which is signed in the Justification. 68 | // Combination of the precommit flag, block data, round number and set_id. 69 | let signed_message = Encode::encode(&( 70 | 1u8, 71 | p.target_hash.0, 72 | p.target_number, 73 | &justification.round, 74 | &justification.authority_set_id, 75 | )); 76 | 77 | // Verify the signature is valid on the precommit, and panic if this is not the case. 78 | verify_signature(p.pubkey.0, &signed_message, p.signature.0); 79 | 80 | // Confirm the ancestry of the child block. 81 | let ancestry_confirmed = 82 | confirm_ancestry(&p.target_hash, &justification.block_hash, &ancestry_map); 83 | 84 | if ancestry_confirmed { 85 | Some(p.pubkey) 86 | } else { 87 | None 88 | } 89 | }) 90 | .collect(); 91 | 92 | // 3. Count the accounts which are in validator set of the justification. 93 | let num_matched_addresses = signer_addresses 94 | .iter() 95 | .filter(|x| justification.valset_pubkeys.iter().any(|e| e.0.eq(&x[..]))) 96 | .count(); 97 | 98 | // 4. Confirm that the supermajority of the validator set is achieved. 99 | assert!( 100 | is_signed_by_supermajority(num_matched_addresses, justification.valset_pubkeys.len()), 101 | "More than 2/3 of signatures are not verifie!" 102 | ); 103 | } 104 | -------------------------------------------------------------------------------- /primitives/src/lib.rs: -------------------------------------------------------------------------------- 1 | use alloy_primitives::B256; 2 | use blake2::{ 3 | digest::{Update, VariableOutput}, 4 | Blake2bVar, 5 | }; 6 | use codec::{Compact, Decode, Encode}; 7 | use sha2::{Digest as Sha256Digest, Sha256}; 8 | 9 | pub mod consts; 10 | pub mod header_range; 11 | mod justification; 12 | pub mod merkle; 13 | pub mod rotate; 14 | pub mod types; 15 | 16 | pub use justification::verify_justification; 17 | 18 | /// Blake2B hash of an encoded header. Note: This is a generic hash fn for any data. 19 | pub(crate) fn hash_encoded_header(encoded_header: &[u8]) -> B256 { 20 | const DIGEST_SIZE: usize = 32; 21 | let mut hasher = Blake2bVar::new(DIGEST_SIZE).unwrap(); 22 | hasher.update(encoded_header); 23 | 24 | let mut digest_bytes = [0u8; DIGEST_SIZE]; 25 | let _ = hasher.finalize_variable(&mut digest_bytes); 26 | B256::from(digest_bytes) 27 | } 28 | 29 | /// Compute the new authority set hash from the encoded pubkeys. 30 | pub fn compute_authority_set_commitment(pubkeys: &[B256]) -> B256 { 31 | let mut commitment_so_far = Sha256::digest(pubkeys[0]).to_vec(); 32 | for pubkey in pubkeys.iter().skip(1) { 33 | let mut input_to_hash = Vec::new(); 34 | input_to_hash.extend_from_slice(&commitment_so_far); 35 | input_to_hash.extend_from_slice(pubkey.as_slice()); 36 | commitment_so_far = Sha256::digest(&input_to_hash).to_vec(); 37 | } 38 | B256::from_slice(&commitment_so_far) 39 | } 40 | 41 | /// Decode a SCALE-encoded compact int and get the value and the number of bytes it took to encode. 42 | pub(crate) fn decode_scale_compact_int(bytes: Vec) -> (u64, usize) { 43 | let value = Compact::::decode(&mut bytes.as_slice()) 44 | .expect("Failed to decode SCALE-encoded compact int."); 45 | (value.into(), value.encoded_size()) 46 | } 47 | 48 | #[cfg(test)] 49 | mod tests { 50 | use super::*; 51 | use avail_subxt::api::runtime_types::avail_core::header::extension::v3::HeaderExtension; 52 | use avail_subxt::api::runtime_types::avail_core::header::extension::HeaderExtension::V3; 53 | use avail_subxt::config::substrate::Digest; 54 | use avail_subxt::primitives::Header as DaHeader; 55 | use codec::{Compact, Encode}; 56 | use primitive_types::H256; 57 | 58 | #[test] 59 | fn test_decode_scale_compact_int() { 60 | let nums = [ 61 | u32::MIN, 62 | 1u32, 63 | 63u32, 64 | 64u32, 65 | 16383u32, 66 | 16384u32, 67 | 1073741823u32, 68 | 1073741824u32, 69 | 4294967295u32, 70 | u32::MAX, 71 | ]; 72 | let encoded_nums: Vec> = nums.iter().map(|num| Compact(*num).encode()).collect(); 73 | let zipped: Vec<(&Vec, &u32)> = encoded_nums.iter().zip(nums.iter()).collect(); 74 | for (encoded_num, num) in zipped { 75 | let (value, _) = decode_scale_compact_int(encoded_num.to_vec()); 76 | assert_eq!(value, *num as u64); 77 | } 78 | } 79 | 80 | #[test] 81 | fn test_header_parent_hash_extracting() { 82 | let hash = H256::random(); 83 | let h = DaHeader { 84 | parent_hash: hash, 85 | number: 1, 86 | state_root: H256::zero(), 87 | extrinsics_root: H256::zero(), 88 | extension: V3(HeaderExtension { 89 | ..Default::default() 90 | }), 91 | digest: Digest { 92 | ..Default::default() 93 | }, 94 | }; 95 | 96 | let encoded = h.encode(); 97 | 98 | let n: [u8; 32] = encoded[0..32].try_into().unwrap(); 99 | let extracted_hash = H256::from(n); 100 | assert_eq!(extracted_hash, hash, "Hashes don't match") 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /primitives/src/merkle.rs: -------------------------------------------------------------------------------- 1 | use sha2::{Digest, Sha256}; 2 | 3 | use crate::types::DecodedHeaderData; 4 | use alloy_primitives::B256; 5 | 6 | // Computes the simple Merkle root of the leaves. If the number of leaves is not a power of 2, pad 7 | // with empty 32 byte arrays till the next power of 2. 8 | fn get_merkle_root(leaves: Vec) -> B256 { 9 | // Return empty 32 byte array if there are no leaves. 10 | if leaves.is_empty() { 11 | return B256::from_slice(&[0u8; 32]); 12 | } 13 | 14 | // Extend leaves to the next power of 2 if needed. 15 | let mut leaves = leaves; 16 | while leaves.len().count_ones() != 1 { 17 | leaves.push(B256::from([0u8; 32])); 18 | } 19 | 20 | // Note: In SP1 Vector, the leaves are not hashed. 21 | let mut nodes = leaves.clone(); 22 | while nodes.len() > 1 { 23 | nodes = (0..nodes.len() / 2) 24 | .map(|i| { 25 | let mut hasher = Sha256::new(); 26 | hasher.update(nodes[2 * i]); 27 | hasher.update(nodes[2 * i + 1]); 28 | B256::from_slice(&hasher.finalize()) 29 | }) 30 | .collect(); 31 | } 32 | 33 | nodes[0] 34 | } 35 | 36 | /// Computes the simple Merkle root commitments for the state root and data root. 37 | pub fn get_merkle_root_commitments( 38 | decoded_headers: &[DecodedHeaderData], 39 | tree_size: usize, 40 | ) -> (B256, B256) { 41 | let mut state_root_leaves = Vec::new(); 42 | let mut data_root_leaves = Vec::new(); 43 | 44 | for header in decoded_headers { 45 | state_root_leaves.push(header.state_root); 46 | data_root_leaves.push(header.data_root); 47 | } 48 | 49 | // Confirm tree_size is a power of 2. 50 | assert!(tree_size.is_power_of_two()); 51 | 52 | // Confirm that it's greater than the number of headers that's passed in. 53 | assert!(tree_size >= decoded_headers.len()); 54 | 55 | // Pad the leaves to a fixed size of tree_size. 56 | while state_root_leaves.len() < tree_size { 57 | state_root_leaves.push(B256::from([0u8; 32])); 58 | data_root_leaves.push(B256::from([0u8; 32])); 59 | } 60 | 61 | // Compute the Merkle root for state root leaves. 62 | let state_root_commitment = get_merkle_root(state_root_leaves); 63 | 64 | // Compute the Merkle root for data root leaves. 65 | let data_root_commitment = get_merkle_root(data_root_leaves); 66 | 67 | (state_root_commitment, data_root_commitment) 68 | } 69 | -------------------------------------------------------------------------------- /primitives/src/rotate.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | compute_authority_set_commitment, 3 | consts::{PUBKEY_LENGTH, ROTATE_OUTPUTS_LENGTH, VALIDATOR_LENGTH}, 4 | decode_scale_compact_int, hash_encoded_header, 5 | types::{RotateInputs, RotateOutputs}, 6 | verify_justification, 7 | }; 8 | use alloy_primitives::B256; 9 | use alloy_sol_types::SolType; 10 | 11 | /// Verify the justification from the current authority set on the epoch end header and return the next 12 | /// authority set commitment. 13 | pub fn verify_rotate(rotate_inputs: RotateInputs) -> [u8; ROTATE_OUTPUTS_LENGTH] { 14 | // Verify the provided justification is valid. 15 | verify_justification(&rotate_inputs.justification); 16 | 17 | let expected_block_hash = hash_encoded_header(&rotate_inputs.header_rotate_data.header_bytes); 18 | 19 | // The header hash should match the block hash signed by the justification. 20 | assert_eq!(expected_block_hash, rotate_inputs.justification.block_hash); 21 | 22 | // Extract the public keys of the next validator set from the epoch end header. 23 | let next_validator_pubkeys = get_next_validator_pubkeys_from_epoch_end_header( 24 | &rotate_inputs.header_rotate_data.header_bytes, 25 | rotate_inputs.header_rotate_data.consensus_log_position, 26 | ); 27 | 28 | // Compute the current authority set hash from the public keys used in the justification. 29 | let current_authority_set_hash = 30 | compute_authority_set_commitment(&rotate_inputs.justification.valset_pubkeys); 31 | 32 | // Compute the next authority set hash from the public keys that are encoded in the epoch end header. 33 | let next_authority_set_hash = compute_authority_set_commitment(&next_validator_pubkeys); 34 | 35 | // Return the ABI encoded RotateOutputs. 36 | RotateOutputs::abi_encode(&( 37 | rotate_inputs.justification.authority_set_id, 38 | current_authority_set_hash, 39 | next_authority_set_hash, 40 | )) 41 | .try_into() 42 | .unwrap() 43 | } 44 | 45 | /// Extract the public keys of the next validator set from the epoch end header. 46 | /// 47 | /// 1. Verify the epoch end header's consensus log is formatted correctly before the next authority set hash bytes. 48 | /// 2. Extract the public keys from the epoch end header. All validator voting weights are 1. The public 49 | /// keys are encoded as 40 bytes: 32 bytes for the pubkey and 8 bytes for the voting weight. 50 | /// 3. Assert the delay is 0. 51 | pub fn get_next_validator_pubkeys_from_epoch_end_header( 52 | header_bytes: &[u8], 53 | mut cursor: usize, 54 | ) -> Vec { 55 | // Verify consensus flag is 4. 56 | assert_eq!(header_bytes[cursor + 1], 4u8); 57 | 58 | // Verify the consensus engine ID: 0x46524e4b [70, 82, 78, 75] 59 | // Consensus Id: https://github.com/availproject/avail/blob/188c20d6a1577670da65e0c6e1c2a38bea8239bb/avail-subxt/examples/download_digest_items.rs#L41-L56 60 | assert_eq!( 61 | header_bytes[cursor + 2..cursor + 6], 62 | [70u8, 82u8, 78u8, 75u8] 63 | ); 64 | 65 | // Move past the consensus engine ID. 66 | cursor += 6; 67 | 68 | // Decode the encoded scheduled change message length. 69 | let (_, decoded_byte_length) = 70 | decode_scale_compact_int(header_bytes[cursor..cursor + 5].to_vec()); 71 | 72 | // Move past the encoded scheduled change message length. 73 | cursor += decoded_byte_length; 74 | 75 | // Verify the next byte after encoded scheduled change message is the ScheduledChange enum flag. 76 | assert_eq!(header_bytes[cursor], 1u8); 77 | 78 | // Move past the ScheduledChange enum flag. 79 | cursor += 1; 80 | 81 | // Decoded the encoded authority set size. 82 | let (authority_set_size, decoded_byte_length) = 83 | decode_scale_compact_int(header_bytes[cursor..cursor + 5].to_vec()); 84 | 85 | // Move past the encoded authority set size. 86 | cursor += decoded_byte_length; 87 | 88 | // Extract the public keys from the epoch end header. 89 | let extracted_pubkeys: Vec = (0..authority_set_size as usize) 90 | .map(|i| { 91 | let start = cursor + (i * VALIDATOR_LENGTH); 92 | let pubkey = B256::from_slice(&header_bytes[start..start + PUBKEY_LENGTH]); 93 | 94 | // All validator voting weights in Avail are 1. 95 | assert_eq!( 96 | &header_bytes[start + PUBKEY_LENGTH..start + VALIDATOR_LENGTH], 97 | &[1u8, 0, 0, 0, 0, 0, 0, 0] 98 | ); 99 | 100 | pubkey 101 | }) 102 | .collect(); 103 | 104 | // Assert the delay is 0. 105 | let delay_start = cursor + (authority_set_size as usize * VALIDATOR_LENGTH); 106 | assert_eq!( 107 | &header_bytes[delay_start..delay_start + 4], 108 | &[0u8, 0u8, 0u8, 0u8] 109 | ); 110 | 111 | extracted_pubkeys 112 | } 113 | -------------------------------------------------------------------------------- /primitives/src/types.rs: -------------------------------------------------------------------------------- 1 | use alloy_primitives::{B256, B512}; 2 | use alloy_sol_types::sol; 3 | 4 | use serde::{Deserialize, Serialize}; 5 | 6 | /// uint32 trusted_block; 7 | /// bytes32 trusted_header_hash; 8 | /// uint64 authority_set_id; 9 | /// bytes32 authority_set_hash; 10 | /// uint32 target_block; 11 | /// bytes32 target_header_hash 12 | /// bytes32 state_root_commitment; 13 | /// bytes32 data_root_commitment; 14 | /// uint32 commitment_tree_size; 15 | pub type HeaderRangeOutputs = sol! { 16 | tuple(uint32, bytes32, uint64, bytes32, uint32, bytes32, bytes32, bytes32, uint32) 17 | }; 18 | 19 | /// uint64 current_authority_set_id; 20 | /// bytes32 current_authority_set_hash; 21 | /// bytes32 new_authority_set_hash; 22 | pub type RotateOutputs = sol! { 23 | tuple(uint64, bytes32, bytes32) 24 | }; 25 | 26 | /// uint8 ProofType (0 = HeaderRangeProof, 1 = RotateProof) 27 | /// bytes HeaderRangeOutputs 28 | /// bytes RotateOutputs 29 | pub type ProofOutput = sol! { 30 | tuple(uint8, bytes, bytes) 31 | }; 32 | 33 | #[derive(Debug, Deserialize, Serialize)] 34 | pub enum ProofType { 35 | HeaderRangeProof = 0, 36 | RotateProof = 1, 37 | } 38 | 39 | impl ProofType { 40 | pub fn from_uint(value: u8) -> Option { 41 | match value { 42 | 0 => Some(ProofType::HeaderRangeProof), 43 | 1 => Some(ProofType::RotateProof), 44 | _ => None, 45 | } 46 | } 47 | } 48 | 49 | #[derive(Debug, Deserialize, Serialize, Clone)] 50 | pub struct RotateInputs { 51 | /// Justification data for the current authority set. 52 | pub justification: CircuitJustification, 53 | /// Data for the next authority set rotation. 54 | pub header_rotate_data: HeaderRotateData, 55 | } 56 | 57 | #[derive(Debug, Deserialize, Serialize, Clone)] 58 | /// Data for the next set of authorities. 59 | pub struct HeaderRotateData { 60 | /// Encoded header bytes for the epoch end block. 61 | pub header_bytes: Vec, 62 | /// Index of the new authority set data in the header bytes. 63 | pub consensus_log_position: usize, 64 | } 65 | 66 | #[derive(Debug, Deserialize, Serialize, Clone)] 67 | /// Signature of a particular validator targeting a specific block 68 | /// (may not be the same as justification's target block) 69 | pub struct Precommit { 70 | /// Target block number 71 | pub target_number: u32, 72 | /// Target block hash 73 | pub target_hash: B256, 74 | /// Signer public key 75 | pub pubkey: B256, 76 | /// Signature of the precommit 77 | pub signature: B512, 78 | } 79 | 80 | #[derive(Debug, Deserialize, Serialize, Clone)] 81 | /// Justification data for an authority set. 82 | pub struct CircuitJustification { 83 | /// Commit round 84 | pub round: u64, 85 | /// Set ID of authority set 86 | pub authority_set_id: u64, 87 | /// All authority set public keys 88 | pub valset_pubkeys: Vec, 89 | /// Precommits containing signatures of a subset of authority set 90 | pub precommits: Vec, 91 | /// Hash of the block associated with the justification. 92 | pub block_hash: B256, 93 | /// Vector of encoded headers needed to prove precommit target ancestry. 94 | pub ancestries_encoded: Vec>, 95 | } 96 | 97 | #[derive(Debug, Deserialize, Serialize)] 98 | pub struct HeaderRangeInputs { 99 | pub merkle_tree_size: usize, 100 | pub encoded_headers: Vec>, 101 | pub target_justification: CircuitJustification, 102 | } 103 | 104 | #[derive(Debug, Deserialize, Serialize)] 105 | pub struct DecodedHeaderData { 106 | /// Block number of the decoded header. 107 | pub block_number: u32, 108 | /// Hash of the parent block. 109 | pub parent_hash: B256, 110 | /// State root of the block. 111 | pub state_root: B256, 112 | /// Data root of the block. 113 | pub data_root: B256, 114 | /// Hash of the header. 115 | pub header_hash: B256, 116 | } 117 | -------------------------------------------------------------------------------- /program/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | version = "0.1.0" 3 | name = "sp1-vectorx-program" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | sp1-vector-primitives.workspace = true 8 | alloy-sol-types = { version = "0.8" } 9 | sp1-zkvm.workspace = true 10 | -------------------------------------------------------------------------------- /program/elf/riscv32im-succinct-zkvm-elf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/availproject/sp1-vector/a1722c11f1662bccb9abfc3bdff0738dfc97dbda/program/elf/riscv32im-succinct-zkvm-elf -------------------------------------------------------------------------------- /program/src/main.rs: -------------------------------------------------------------------------------- 1 | //! A simple program to be proven inside the zkVM. 2 | 3 | #![no_main] 4 | sp1_zkvm::entrypoint!(main); 5 | 6 | use alloy_sol_types::SolType; 7 | use sp1_vector_primitives::{ 8 | consts::HEADER_OUTPUTS_LENGTH, 9 | consts::ROTATE_OUTPUTS_LENGTH, 10 | header_range::verify_header_range, 11 | rotate::verify_rotate, 12 | types::{HeaderRangeInputs, ProofOutput, ProofType, RotateInputs}, 13 | }; 14 | 15 | /// Generate an SP1 Vector proof for a given proof type. 16 | pub fn main() { 17 | // Read the proof type requested from the inputs. 18 | let proof_type: ProofType = sp1_zkvm::io::read::(); 19 | 20 | let mut header_range_outputs = [0u8; HEADER_OUTPUTS_LENGTH]; 21 | let mut rotate_outputs = [0u8; ROTATE_OUTPUTS_LENGTH]; 22 | 23 | match proof_type { 24 | ProofType::HeaderRangeProof => { 25 | // Read the header range inputs from the inputs. 26 | let header_range_inputs = sp1_zkvm::io::read::(); 27 | header_range_outputs = verify_header_range(header_range_inputs); 28 | } 29 | ProofType::RotateProof => { 30 | // Read the rotate inputs from the inputs. 31 | let rotate_inputs = sp1_zkvm::io::read::(); 32 | rotate_outputs = verify_rotate(rotate_inputs); 33 | } 34 | } 35 | 36 | // Commit the proof outputs to the zkVM as an encoded slice. 37 | let output = ProofOutput::abi_encode(&(proof_type as u8, header_range_outputs, rotate_outputs)); 38 | sp1_zkvm::io::commit_slice(&output); 39 | } 40 | -------------------------------------------------------------------------------- /query/.env.example: -------------------------------------------------------------------------------- 1 | RPC_1= 2 | RPC_11155111= 3 | RPC_42161= 4 | RPC_421614= 5 | RPC_8453= 6 | RPC_84532= 7 | AVAIL_WS_HEX= 8 | AVAIL_WS_MAINNET= 9 | AVAIL_WS_TURING= 10 | 11 | # AWS Dynamo DB Read Keys 12 | AWS_REGION= 13 | AWS_ACCESS_KEY_ID= 14 | AWS_SECRET_ACCESS_KEY= -------------------------------------------------------------------------------- /query/.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "next/core-web-vitals" 3 | } 4 | -------------------------------------------------------------------------------- /query/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | .yarn/install-state.gz 8 | 9 | # testing 10 | /coverage 11 | 12 | # next.js 13 | /.next/ 14 | /out/ 15 | 16 | # production 17 | /build 18 | 19 | # misc 20 | .DS_Store 21 | *.pem 22 | 23 | # debug 24 | npm-debug.log* 25 | yarn-debug.log* 26 | yarn-error.log* 27 | 28 | # local env files 29 | .env*.local 30 | 31 | # vercel 32 | .vercel 33 | 34 | # typescript 35 | *.tsbuildinfo 36 | next-env.d.ts 37 | -------------------------------------------------------------------------------- /query/app/api/health/route.ts: -------------------------------------------------------------------------------- 1 | import { CHAIN_TO_WS_ENDPOINT, getHealthStatusAvail } from '@/app/utils/avail'; 2 | import { NextRequest, NextResponse } from 'next/server'; 3 | 4 | /** Request the health of a VectorX light client. Searches for the latest log emitted by the VectorX 5 | * contract and compares it to the latest block on the Avail chain. Also finds the difference between 6 | * the latest block on the Avail chain and the latest block on the VectorX contract. 7 | 8 | * Required query parameters: 9 | * - chainName: The name of the Avail chain to check. 10 | * - contractChainId: The chain ID where the VectorX contract is deployed. 11 | * - contractAddress: The address of the VectorX contract. 12 | * Optional query parameters: 13 | * - maxDelayHours: The number of hours to check for emitted logs. Default is 4 hours. 14 | */ 15 | export async function GET(req: NextRequest) { 16 | const url = new URL(req.url); 17 | 18 | const chainName = url.searchParams.get('chainName'); 19 | const ethereumChainId = Number(url.searchParams.get('contractChainId')); 20 | const address = url.searchParams.get('contractAddress'); 21 | const maxDelayHours = Number(url.searchParams.get('maxDelayHours')) || 4; 22 | 23 | console.log('Avail Chain name: ' + chainName); 24 | console.log('Ethereum Chain ID: ' + ethereumChainId); 25 | console.log('Address: ' + address); 26 | 27 | if (ethereumChainId === undefined || address === undefined || chainName === undefined) { 28 | return NextResponse.json({ 29 | success: false, 30 | error: 'Missing required parameters' 31 | }); 32 | } 33 | 34 | if (process.env[`RPC_${ethereumChainId}`] === undefined) { 35 | return NextResponse.json({ 36 | success: false, 37 | error: `Chain ID ${ethereumChainId} is not supported.` 38 | }); 39 | } 40 | 41 | let chainNameLowercase = chainName?.toLowerCase() as string; 42 | 43 | if (!CHAIN_TO_WS_ENDPOINT.has(chainNameLowercase)) { 44 | return NextResponse.json({ 45 | success: false, 46 | error: `Chain name ${chainNameLowercase} is not supported. Supported chains: ${Array.from(CHAIN_TO_WS_ENDPOINT.keys()).join(', ')}` 47 | }); 48 | } 49 | 50 | // Strip `0x` from address 51 | const addressUint8Array = Buffer.from(address!.substring(2), 'hex'); 52 | 53 | let healthInfo = await getHealthStatusAvail( 54 | addressUint8Array, 55 | ethereumChainId, 56 | chainName?.toUpperCase() as string, 57 | BigInt(maxDelayHours) * 60n * 60n 58 | ); 59 | 60 | return NextResponse.json({ 61 | data: healthInfo 62 | }); 63 | } 64 | -------------------------------------------------------------------------------- /query/app/api/justification/route.ts: -------------------------------------------------------------------------------- 1 | import { DynamoDBClient, QueryCommand } from '@aws-sdk/client-dynamodb'; 2 | import { NextRequest, NextResponse } from 'next/server'; 3 | 4 | const tableName = 'justifications-v2'; 5 | 6 | /** Get the justification for a given Avail block. 7 | * - blockNumber: The block number of the Avail block. 8 | * - availChainId: The chain ID where the Avail contract is deployed. 9 | */ 10 | export async function GET(req: NextRequest) { 11 | const url = new URL(req.url); 12 | 13 | let dynamoClient = new DynamoDBClient({ region: process.env.AWS_REGION }); 14 | 15 | const blockNumber = Number(url.searchParams.get('blockNumber')); 16 | const availChainId = url.searchParams.get('availChainId'); 17 | 18 | console.log(url.searchParams); 19 | 20 | console.log('Block Number: ' + blockNumber); 21 | console.log('Avail Chain ID: ' + availChainId); 22 | 23 | if (blockNumber === undefined || availChainId === undefined) { 24 | return NextResponse.json({ 25 | success: false 26 | }); 27 | } 28 | 29 | let justificationKey = (availChainId! + '-' + blockNumber.toString()).toLowerCase(); 30 | 31 | const command = new QueryCommand({ 32 | TableName: tableName, 33 | KeyConditionExpression: 'id = :id', 34 | ExpressionAttributeValues: { 35 | ':id': { S: justificationKey }, 36 | }, 37 | }); 38 | 39 | const response = await dynamoClient.send(command); 40 | 41 | if (response.Items === undefined || response.Items.length === 0) { 42 | return NextResponse.json({ 43 | success: false, 44 | error: 'No justification found' 45 | }); 46 | } 47 | 48 | return NextResponse.json({ 49 | success: true, 50 | justification: response.Items![0].data 51 | }); 52 | } -------------------------------------------------------------------------------- /query/app/api/range/route.ts: -------------------------------------------------------------------------------- 1 | import { getBlockRangeAvail } from '@/app/utils/avail'; 2 | import { NextRequest, NextResponse } from 'next/server'; 3 | 4 | /** Get the range of blocks that the VectorX contract has emitted logs for. 5 | * Required query parameters: 6 | * - contractChainId: The chain ID where the VectorX contract is deployed. 7 | * - contractAddress: The address of the VectorX contract. 8 | */ 9 | export async function GET(req: NextRequest) { 10 | const url = new URL(req.url); 11 | 12 | const ethereumChainId = Number(url.searchParams.get('contractChainId')); 13 | const address = url.searchParams.get('contractAddress'); 14 | 15 | console.log('Ethereum Chain ID: ' + ethereumChainId); 16 | console.log('VectorX Address: ' + address); 17 | 18 | if (ethereumChainId === undefined || address === undefined) { 19 | return NextResponse.json({ 20 | success: false 21 | }); 22 | } 23 | 24 | // Parse address from string to Uint8Array. 25 | const contractAddress = Buffer.from(address!.substring(2), 'hex'); 26 | 27 | let range = await getBlockRangeAvail(contractAddress, ethereumChainId); 28 | if (range === undefined) { 29 | return NextResponse.json({ 30 | success: false, 31 | error: 'Failed to get block range for requested block! This means that the specified contract is not registered in this service.' 32 | }); 33 | } else { 34 | return NextResponse.json({ 35 | data: range 36 | }); 37 | } 38 | } -------------------------------------------------------------------------------- /query/app/api/route.ts: -------------------------------------------------------------------------------- 1 | import { NextRequest, NextResponse } from 'next/server'; 2 | import assert from 'assert'; 3 | import { createHash } from 'crypto'; 4 | import { keccak256, encodeAbiParameters, createPublicClient, http } from 'viem'; 5 | import { ApiPromise, initialize, disconnect } from 'avail-js-sdk'; 6 | import { getChainInfo, queryLogs } from '@/app/utils/shared'; 7 | import { VECTORX_DATA_COMMITMENT_EVENT } from '@/app/utils/abi'; 8 | import { AbiEvent } from 'abitype'; 9 | import { CHAIN_TO_WS_ENDPOINT, getBlockRangeAvail } from '@/app/utils/avail'; 10 | 11 | type DataCommitmentRange = { 12 | startBlockNumber: number; 13 | endBlockNumber: number; 14 | dataCommitment: Uint8Array; 15 | stateCommitment: Uint8Array; 16 | commitmentTreeSize: number; 17 | }; 18 | 19 | async function getBlockHash(blockNumber: number, chainName: string): Promise { 20 | const api = await initialize(CHAIN_TO_WS_ENDPOINT.get(chainName.toLowerCase()) as string); 21 | const rpc: any = api.rpc; 22 | try { 23 | const blockHash = await rpc.chain.getBlockHash(blockNumber); 24 | await disconnect(); 25 | return blockHash.toHex(); 26 | } catch (error) { 27 | console.log(error); 28 | } 29 | } 30 | 31 | function isEqualUint8Array(a: Uint8Array, b: Uint8Array): boolean { 32 | if (a.length != b.length) { 33 | return false; 34 | } 35 | for (let i = 0; i < a.length; i++) { 36 | if (a[i] != b[i]) { 37 | return false; 38 | } 39 | } 40 | return true; 41 | } 42 | 43 | async function getBlockNumber(blockHash: string, chainName: string): Promise { 44 | const api = await initialize(CHAIN_TO_WS_ENDPOINT.get(chainName.toLowerCase()) as string); 45 | const rpc: any = api.rpc; 46 | try { 47 | const block = await rpc.chain.getBlock(blockHash); 48 | await disconnect(); 49 | return block.block.header.number.toNumber(); 50 | } catch (error) { 51 | console.log(error); 52 | } 53 | } 54 | 55 | //* Fetch the dataRoot of blockNb from the given RPC. */ 56 | const fetchDataRoot = async (api: ApiPromise, blockNb: number): Promise => { 57 | const blockHash = await api.rpc.chain.getBlockHash(blockNb); 58 | const header = await api.rpc.chain.getHeader(blockHash); 59 | const extension = header.toJSON().extension as { 60 | [version: string]: { commitment: { dataRoot: string } }; 61 | }; 62 | if (!extension || Object.keys(extension).length === 0) { 63 | throw new Error(`Extension not found for block ${blockNb}`); 64 | } 65 | 66 | // Resilient to future changes in the extension format. Ex. v3, v4, etc. 67 | let dataRoot = extension[Object.keys(extension)[0]].commitment?.dataRoot; 68 | if (!dataRoot) throw new Error(`Data root not found for block ${blockNb}`); 69 | if (dataRoot.startsWith('0x')) dataRoot = dataRoot.slice(2); 70 | return new Uint8Array(Buffer.from(dataRoot, 'hex')); 71 | }; 72 | 73 | /** Fetch data roots for the range (startBlock, endBlock - 1) inclusive from the RPC. */ 74 | const fetchDataRootsForRange = async ( 75 | startBlock: number, 76 | endBlock: number, 77 | chainName: string 78 | ): Promise => { 79 | const api = await initialize(CHAIN_TO_WS_ENDPOINT.get(chainName.toLowerCase()) as string); 80 | 81 | const blockNumbers = Array.from( 82 | { length: endBlock - startBlock }, 83 | (_, i) => startBlock + i 84 | ); 85 | 86 | const dataRoots = await Promise.all(blockNumbers.map(x => fetchDataRoot(api, x))); 87 | return dataRoots; 88 | }; 89 | 90 | /** Compute the Merkle tree branch for the requested block number. */ 91 | function computeMerkleLayersAndBranch(commitmentTreeSize: number, dataRoots: Uint8Array[], index: number): Uint8Array[] { 92 | if (dataRoots.length != commitmentTreeSize) { 93 | console.log('Wrong number of leaves'); 94 | 95 | throw new Error('Invalid number of leaves!'); 96 | } 97 | 98 | let nodes = dataRoots; 99 | 100 | let branch: Uint8Array[] = []; 101 | 102 | let indexSoFar = index; 103 | 104 | while (nodes.length > 1) { 105 | let nextLevelNodes: Uint8Array[] = []; 106 | 107 | for (let i = 0; i < nodes.length; i += 2) { 108 | let leftChild = nodes[i]; 109 | let rightChild = nodes[i + 1]; 110 | // Append the left and right child and hash them together. 111 | const combinedArray = new Uint8Array(leftChild.length + rightChild.length); 112 | combinedArray.set(leftChild, 0); 113 | combinedArray.set(rightChild, leftChild.length); 114 | const hash = createHash('sha256').update(combinedArray).digest('hex'); 115 | nextLevelNodes.push(new Uint8Array(Buffer.from(hash, 'hex'))); 116 | 117 | // This is the index of the node in the next level. 118 | if (indexSoFar - (indexSoFar % 2) == i) { 119 | if (indexSoFar % 2 == 0) { 120 | // If leftChild is the node we are looking for, then the right child is the sibling. 121 | branch.push(rightChild); 122 | } else { 123 | // If rightChild is the node we are looking for, then the left child is the sibling. 124 | branch.push(leftChild); 125 | } 126 | } 127 | } 128 | indexSoFar = Math.floor(indexSoFar / 2); 129 | nodes = nextLevelNodes; 130 | } 131 | 132 | return branch; // The root of the Merkle tree 133 | } 134 | 135 | /** Parse a log retrieved from eth_getLogs. */ 136 | function parseLog(log: any): DataCommitmentRange { 137 | // Parse dataCommitment and stateCommitment which are 0x prefixed hex strings. 138 | let dataCommitment = new Uint8Array(Buffer.from(log.args.dataCommitment.substring(2), 'hex')); 139 | let stateCommitment = new Uint8Array(Buffer.from(log.args.stateCommitment.substring(2), 'hex')); 140 | return { 141 | startBlockNumber: log.args.startBlock, 142 | endBlockNumber: log.args.endBlock, 143 | dataCommitment: dataCommitment, 144 | stateCommitment: stateCommitment, 145 | commitmentTreeSize: log.args.headerRangeCommitmentTreeSize 146 | }; 147 | } 148 | 149 | /** Binary search for the log that contains the target block. */ 150 | function binarySearchForLog(logs: any[], targetBlock: number): DataCommitmentRange { 151 | let left = 0; 152 | let right = logs.length - 1; 153 | while (left <= right) { 154 | let mid = Math.floor((left + right) / 2); 155 | let log = parseLog(logs[mid]); 156 | // Check if the targetBlock is contained within startBlock + 1 and endBlock of the log. 157 | if (targetBlock >= log.startBlockNumber + 1 && targetBlock <= log.endBlockNumber) { 158 | return log; 159 | } 160 | if (targetBlock < log.startBlockNumber + 1) { 161 | right = mid - 1; 162 | } else { 163 | left = mid + 1; 164 | } 165 | } 166 | // This should never happen. 167 | throw new Error('Log not found'); 168 | } 169 | 170 | /** Find the data commitment range in the contract matching the requested block number. */ 171 | async function getDataCommitmentRangeForBlock( 172 | contractChainId: number, 173 | contractAddress: Uint8Array, 174 | targetBlock: number 175 | ): Promise { 176 | if (process.env[`RPC_${contractChainId}`] == undefined) { 177 | throw new Error('Missing RPC URL for chain ' + contractChainId); 178 | } 179 | let ethereumRpc = process.env[`RPC_${contractChainId}`] as string; 180 | 181 | let chainInfo = getChainInfo(contractChainId); 182 | const client = createPublicClient({ 183 | chain: chainInfo, 184 | transport: http(ethereumRpc, { 185 | fetchOptions: { cache: 'no-store' } 186 | }) 187 | }); 188 | let latestBlock = await client.getBlockNumber(); 189 | 190 | // Query in batches of 10000 blocks. 191 | const BATCH_SIZE = 10_000; 192 | 193 | // TODO: Implement a more efficient search for the first log, based on a heuristic for the ETH 194 | // block corresponding to an Avail block. 195 | let currentBlock = Number(latestBlock); 196 | 197 | while (true) { 198 | let logs = await queryLogs( 199 | contractChainId, 200 | contractAddress, 201 | currentBlock - BATCH_SIZE, 202 | currentBlock, 203 | VECTORX_DATA_COMMITMENT_EVENT as AbiEvent, 204 | ); 205 | 206 | if (logs.length > 0) { 207 | let startLog = parseLog(logs[0]); 208 | let lastLog = parseLog(logs[logs.length - 1]); 209 | // Check if the targetBlock is contained within startBlock + 1 and endBlock of the last log. If so, 210 | // binary search for which log contains the targetBlock. 211 | if (targetBlock >= startLog.startBlockNumber + 1 && targetBlock <= lastLog.endBlockNumber) { 212 | return binarySearchForLog(logs, targetBlock); 213 | } 214 | } else { 215 | console.log('No ranges found for block ' + currentBlock); 216 | return null; 217 | } 218 | 219 | currentBlock -= BATCH_SIZE; 220 | } 221 | 222 | } 223 | 224 | /** Get the range hash for the given range. */ 225 | function getRangeHash(startBlockNumber: number, endBlockNumber: number): Uint8Array { 226 | let encodedRange = encodeAbiParameters( 227 | [ 228 | { name: 'startBlockNumber', type: 'uint32' }, 229 | { name: 'endBlockNumber', type: 'uint32' } 230 | ], 231 | [startBlockNumber, endBlockNumber] 232 | ); 233 | 234 | // Strip the 0x prefix. 235 | let encodedRangeStripped = encodedRange.substring(2); 236 | 237 | // Convert to bytes and hash with keccak256. 238 | let rangeHash = keccak256(new Uint8Array(Buffer.from(encodedRangeStripped, 'hex'))); 239 | let rangeHashUint8 = new Uint8Array(Buffer.from(rangeHash.substring(2), 'hex')); 240 | return rangeHashUint8; 241 | } 242 | 243 | /** Verify that the merkle tree branch matches the data commitment. */ 244 | function verifyMerkleBranch( 245 | dataRoots: Uint8Array[], 246 | branch: Uint8Array[], 247 | index: number, 248 | dataCommitment: Uint8Array 249 | ) { 250 | // Verify the branch matches the data commitment. 251 | let currentHash = dataRoots[index]; 252 | let indexSoFar = index; 253 | for (let i = 0; i < branch.length; i++) { 254 | let sibling = branch[i]; 255 | if (indexSoFar % 2 == 0) { 256 | currentHash = createHash('sha256') 257 | .update(Buffer.concat([currentHash, sibling])) 258 | .digest(); 259 | } else { 260 | currentHash = createHash('sha256') 261 | .update(Buffer.concat([sibling, currentHash])) 262 | .digest(); 263 | } 264 | indexSoFar = Math.floor(indexSoFar / 2); 265 | } 266 | assert( 267 | isEqualUint8Array(currentHash, dataCommitment), 268 | 'Data commitment does not match the root constructed from the Merkle tree branch! This means that the computed data commitment or the Merkle tree branch is incorrect.' 269 | ); 270 | } 271 | 272 | // Compute the Merkle Root from the dataRoots after confirming it's a power of 2. 273 | function computeDataCommitment(dataRoots: Uint8Array[], commitmentTreeSize: number): Uint8Array { 274 | if (dataRoots.length != commitmentTreeSize) { 275 | throw new Error('Data roots length must be a power of 2!'); 276 | } 277 | let level = dataRoots; 278 | 279 | // Continue combining pairs until we get to the root 280 | while (level.length > 1) { 281 | const nextLevel: Uint8Array[] = []; 282 | 283 | for (let i = 0; i < level.length; i += 2) { 284 | let hashStr = createHash('sha256').update(Buffer.concat([level[i], level[i + 1]])).digest('hex'); 285 | nextLevel.push(new Uint8Array(Buffer.from(hashStr, 'hex'))); 286 | } 287 | 288 | level = nextLevel; 289 | } 290 | 291 | return level[0]; 292 | } 293 | 294 | /** 295 | * Get the proof for a data commitment for a specific block number on Avail against the data commitments posted by the VectorX contract. 296 | * Required query parameters: 297 | * - chainName: The name of the Avail chain to check. 298 | * - contractChainId: The chain ID where the VectorX contract is deployed. 299 | * - contractAddress: The address of the VectorX contract. 300 | * - blockHash | blockNumber: The block hash/block number of the Avail block for which the proof is requested. 301 | */ 302 | export async function GET(req: NextRequest) { 303 | const url = new URL(req.url); 304 | 305 | const chainName = url.searchParams.get('chainName'); 306 | const ethereumChainId = Number(url.searchParams.get('contractChainId')); 307 | let address = url.searchParams.get('contractAddress'); 308 | const blockHash = url.searchParams.get('blockHash') 309 | ? url.searchParams.get('blockHash') 310 | : undefined; 311 | const blockNumber = url.searchParams.get('blockNumber') 312 | ? Number(url.searchParams.get('blockNumber')) 313 | : undefined; 314 | 315 | console.log('Avail Chain name: ' + chainName); 316 | console.log('Ethereum Chain ID: ' + ethereumChainId); 317 | console.log('Address: ' + address); 318 | console.log('Block hash: ' + blockHash); 319 | console.log('Block number: ' + blockNumber); 320 | 321 | let requestedBlock: number; 322 | 323 | if (chainName === undefined || ethereumChainId === undefined || address === undefined) { 324 | return NextResponse.json({ 325 | success: false, 326 | error: 'Invalid parameters!' 327 | }); 328 | } 329 | 330 | // Strip the 0x prefix from the address (if it exists) and convert it to lowercase then Uint8Array. 331 | address = address!.toLowerCase(); 332 | if (address.startsWith('0x')) { 333 | address = address.slice(2); 334 | } 335 | const addressUint8 = new Uint8Array(Buffer.from(address!, 'hex')); 336 | 337 | try { 338 | if (blockHash === undefined) { 339 | if (blockNumber === undefined) { 340 | return NextResponse.json({ 341 | success: false, 342 | error: 'No block hash or block number provided!' 343 | }); 344 | } 345 | requestedBlock = blockNumber; 346 | } else { 347 | // Get the block number for the given block hash. 348 | let tempRequestedBlock = await getBlockNumber(blockHash!, chainName!); 349 | if (tempRequestedBlock == undefined) { 350 | return NextResponse.json({ 351 | success: false, 352 | error: 'Invalid block hash!' 353 | }); 354 | } 355 | requestedBlock = tempRequestedBlock; 356 | } 357 | } catch (error) { 358 | return NextResponse.json({ 359 | success: false, 360 | error: 'Getting block number failed!' 361 | }); 362 | } 363 | 364 | console.log('Requested block: ' + requestedBlock); 365 | 366 | let blockRange = await getBlockRangeAvail(addressUint8, ethereumChainId); 367 | if (blockRange === undefined) { 368 | return NextResponse.json({ 369 | success: false, 370 | error: 'Getting the block range covered by the VectorX contract failed!' 371 | }); 372 | } 373 | 374 | if (requestedBlock < blockRange.start || requestedBlock > blockRange.end) { 375 | return NextResponse.json({ 376 | success: false, 377 | error: `Requested block ${requestedBlock} is not in the range of blocks [${blockRange.start}, ${blockRange.end}] contained in the VectorX contract.` 378 | }); 379 | } 380 | 381 | try { 382 | let promises = [ 383 | getBlockHash(requestedBlock, chainName!), 384 | // Get the data commitment range data for the requested block number. 385 | getDataCommitmentRangeForBlock(ethereumChainId, addressUint8, requestedBlock) 386 | ]; 387 | 388 | let [requestedBlockHash, dataCommitmentRange] = await Promise.all(promises); 389 | 390 | if (dataCommitmentRange === null) { 391 | return NextResponse.json({ 392 | success: false, 393 | error: 'Requested block is not in the range of blocks contained in the VectorX contract.' 394 | }); 395 | } 396 | 397 | let { startBlockNumber, endBlockNumber, dataCommitment, stateCommitment, commitmentTreeSize } = dataCommitmentRange as DataCommitmentRange; 398 | 399 | // The Avail Merkle tree root is constructed from the data roots of blocks from the range [startBlockNumber + 1, endBlockNumber] inclusive. 400 | // Fetch all headers from the RPC. 401 | let dataRoots = await fetchDataRootsForRange( 402 | startBlockNumber + 1, 403 | endBlockNumber + 1, 404 | chainName! 405 | ); 406 | 407 | // Extend the header array to commitmentTreeSize (fill with empty bytes). 408 | if (dataRoots.length < commitmentTreeSize) { 409 | const additionalRoots = new Array(commitmentTreeSize - dataRoots.length).fill(new Uint8Array(32)); 410 | dataRoots = dataRoots.concat(additionalRoots); 411 | } 412 | 413 | // Get the merkle branch for the requested block number by computing the Merkle tree branch 414 | // of the tree constructed from the data roots. 415 | const index = requestedBlock - startBlockNumber - 1; 416 | let branch = computeMerkleLayersAndBranch(commitmentTreeSize, dataRoots, index); 417 | 418 | // Verify the Merkle tree branch against the data commitment. 419 | verifyMerkleBranch(dataRoots, branch, index, dataCommitment); 420 | 421 | const res = NextResponse.json({ 422 | data: { 423 | blockNumber: blockNumber, 424 | rangeHash: 425 | '0x' + 426 | Buffer.from(getRangeHash(startBlockNumber, endBlockNumber)).toString('hex'), 427 | dataCommitment: '0x' + Buffer.from(dataCommitment).toString('hex'), 428 | merkleBranch: branch.map( 429 | (node) => '0x' + Buffer.from(new Uint8Array(node)).toString('hex') 430 | ), 431 | index, 432 | totalLeaves: commitmentTreeSize, 433 | dataRoot: '0x' + Buffer.from(dataRoots[index]).toString('hex'), 434 | blockHash: requestedBlockHash as String 435 | } 436 | }); 437 | 438 | // Cache for 24 hours. 439 | res.headers.set('CDN-Cache-Control', 'public, max-age=86400'); 440 | 441 | return res; 442 | } catch (error) { 443 | console.log(error); 444 | // TODO: Better logging, come back to this when upgrading to mainnet. 445 | return NextResponse.json({ 446 | success: false, 447 | error 448 | }); 449 | } 450 | } 451 | -------------------------------------------------------------------------------- /query/app/utils/avail.ts: -------------------------------------------------------------------------------- 1 | import { VECTORX_ABI, VECTORX_DATA_COMMITMENT_EVENT, VECTORX_HEAD_UPDATE_EVENT, VECTORX_INITIALIZED_EVENT } from '@/app/utils/abi'; 2 | import { HealthInfo, getBlocksSinceLastLog, getChainInfo, queryEthereumBlockByTimestamp, queryLogs, queryLogsWithBatches } from '@/app/utils/shared'; 3 | import { disconnect, initialize } from 'avail-js-sdk'; 4 | import { createPublicClient, http } from 'viem'; 5 | 6 | type RangeInfo = { 7 | start: number; 8 | end: number; 9 | }; 10 | 11 | // List of Avail chains. 12 | export const CHAIN_TO_WS_ENDPOINT = new Map([ 13 | ['hex', process.env.AVAIL_WS_HEX as string], 14 | ['turing', process.env.AVAIL_WS_TURING as string], 15 | ['mainnet', process.env.AVAIL_WS_MAINNET as string], 16 | ]); 17 | 18 | import deploymentData from "./deployments.json"; 19 | 20 | export interface DeploymentConfig { 21 | deployments: { 22 | contractChainId: number; 23 | contractAddress: string; 24 | cursorStartBlock: number; 25 | }[]; 26 | } 27 | 28 | function readDeploymentConfig(): DeploymentConfig { 29 | return deploymentData as DeploymentConfig; 30 | } 31 | 32 | const deploymentConfig = readDeploymentConfig(); 33 | 34 | export async function getHealthStatusAvail( 35 | contractAddress: Uint8Array, 36 | ethereumChainId: number, 37 | sourceChainName: string, 38 | maxDelaySeconds: bigint 39 | ): Promise { 40 | if (process.env[`RPC_${ethereumChainId}`] == undefined) { 41 | throw new Error('Missing RPC URL for chain ' + ethereumChainId); 42 | } 43 | let ethereumRpc = process.env[`RPC_${ethereumChainId}`] as string; 44 | if (process.env[`AVAIL_WS_${sourceChainName.toUpperCase()}`] == undefined) { 45 | throw new Error('Missing Avail WS URL for chain ' + sourceChainName); 46 | } 47 | let availRpc = process.env[`AVAIL_WS_${sourceChainName.toUpperCase()}`] as string; 48 | 49 | const api = await initialize(availRpc); 50 | const rpc: any = api.rpc; 51 | const finalizedHead = await rpc.chain.getFinalizedHead(); 52 | const finalizedHeader = await api.rpc.chain.getHeader(finalizedHead); 53 | const availHeadBlockNb = finalizedHeader.number.toNumber(); 54 | 55 | await disconnect(); 56 | 57 | let chainInfo = getChainInfo(ethereumChainId); 58 | 59 | const client = createPublicClient({ 60 | chain: chainInfo, 61 | transport: http(ethereumRpc, { 62 | fetchOptions: { cache: 'no-store' } 63 | }) 64 | }); 65 | 66 | const ethCurrentBlock = await client.getBlock(); 67 | let ethCurrentBlockTimestamp = ethCurrentBlock.timestamp; 68 | let ethCurrentBlockNumber = ethCurrentBlock.number; 69 | 70 | // Get the number of blocks since the last log and whether a log was emitted in the last 10 * maxDelaySeconds. 71 | let logData = await getBlocksSinceLastLog(ethereumChainId, ethereumRpc, ethCurrentBlockTimestamp, maxDelaySeconds, contractAddress, ethCurrentBlockNumber, VECTORX_HEAD_UPDATE_EVENT); 72 | 73 | let address = Buffer.from(contractAddress).toString('hex'); 74 | 75 | // Read data from chain. 76 | const latestVectorBlockNb: number = (await client.readContract({ 77 | address: `0x${address}`, 78 | abi: VECTORX_ABI, 79 | functionName: 'latestBlock' 80 | })) as number; 81 | 82 | let lastLogBlock = await client.getBlock({ blockNumber: BigInt(logData.lastLogBlockNumber) }); 83 | 84 | return { 85 | logEmitted: logData.logEmitted, 86 | ethBlocksSinceLastLog: Number(ethCurrentBlockNumber) - logData.lastLogBlockNumber, 87 | lastLogTimestamp: Number(lastLogBlock.timestamp), 88 | blocksBehindHead: availHeadBlockNb - latestVectorBlockNb 89 | }; 90 | } 91 | 92 | // Contract address is a string with the format '0x' followed by the address. 93 | export async function getBlockRangeAvail(contractAddress: Uint8Array, ethereumChainId: number): Promise { 94 | if (process.env[`RPC_${ethereumChainId}`] == undefined) { 95 | throw new Error('Missing RPC URL for chain ' + ethereumChainId); 96 | } 97 | let ethereumRpc = process.env[`RPC_${ethereumChainId}`] as string; 98 | 99 | // Query in batches of 10_000 blocks. 100 | const BATCH_SIZE = 10_000; 101 | 102 | let chainInfo = getChainInfo(ethereumChainId); 103 | const client = createPublicClient({ 104 | chain: chainInfo, 105 | transport: http(ethereumRpc, { 106 | fetchOptions: { cache: 'no-store' } 107 | }) 108 | }); 109 | let latestBlock = await client.getBlockNumber(); 110 | 111 | // Convert contract address to a 0x prefixed string. 112 | let hexPrefixContractAddress = `0x` + Buffer.from(contractAddress).toString('hex'); 113 | console.log('Hex prefix contract address: ' + hexPrefixContractAddress); 114 | 115 | // Check if there is a matching deployment config for the given contract address and chain id. 116 | let deployment = deploymentConfig.deployments.find((deployment) => deployment.contractAddress.toLowerCase() === hexPrefixContractAddress.toLowerCase() && deployment.contractChainId === ethereumChainId); 117 | if (deployment == undefined) { 118 | throw new Error('Deployment config not found for contract address ' + hexPrefixContractAddress + ' on chain ' + ethereumChainId); 119 | } 120 | 121 | let contractRangeStartBlock = 0; 122 | let contractRangeEndBlock = 0; 123 | 124 | // Find the first data commitment log after the cursor start block. 125 | let firstDataCommitmentCursor = Number(deployment.cursorStartBlock); 126 | while (true) { 127 | let dataCommitmentLogs: any = await queryLogsWithBatches(ethereumChainId, contractAddress, firstDataCommitmentCursor, firstDataCommitmentCursor + BATCH_SIZE, VECTORX_DATA_COMMITMENT_EVENT, BATCH_SIZE); 128 | if (dataCommitmentLogs.length == 0) { 129 | firstDataCommitmentCursor += BATCH_SIZE; 130 | if (firstDataCommitmentCursor > latestBlock) { 131 | throw new Error('No data commitment logs found'); 132 | } 133 | continue; 134 | } 135 | 136 | // The first data commitment log is the oldest one. 137 | // Note: The +1 is because the start block in Avail is one block ahead of the event start block. 138 | contractRangeStartBlock = dataCommitmentLogs[0].args.startBlock + 1; 139 | break; 140 | } 141 | 142 | let mostRecentDataCommitmentCursor = Number(latestBlock); 143 | while (true) { 144 | // Search for data commitment logs starting from the most recent block number. 145 | let dataCommitmentLogs: any = await queryLogsWithBatches(ethereumChainId, contractAddress, mostRecentDataCommitmentCursor - BATCH_SIZE, mostRecentDataCommitmentCursor, VECTORX_DATA_COMMITMENT_EVENT, BATCH_SIZE); 146 | if (dataCommitmentLogs.length == 0) { 147 | mostRecentDataCommitmentCursor -= BATCH_SIZE; 148 | if (mostRecentDataCommitmentCursor < contractRangeStartBlock) { 149 | throw new Error('No data commitment logs found'); 150 | } 151 | continue; 152 | } 153 | 154 | // The last log is the most recent one. 155 | let greatestEndBlockSoFar = dataCommitmentLogs[dataCommitmentLogs.length - 1].args.endBlock; 156 | 157 | contractRangeEndBlock = greatestEndBlockSoFar; 158 | break; 159 | } 160 | 161 | return { 162 | start: contractRangeStartBlock, 163 | end: contractRangeEndBlock 164 | }; 165 | } 166 | -------------------------------------------------------------------------------- /query/app/utils/deployments.json: -------------------------------------------------------------------------------- 1 | { 2 | "deployments": [ 3 | { 4 | "contractChainId": 1, 5 | "contractAddress": "0x02993cdC11213985b9B13224f3aF289F03bf298d", 6 | "cursorStartBlock": 20235805, 7 | "availChainId": "mainnet" 8 | }, 9 | { 10 | "contractChainId": 324, 11 | "contractAddress": "0x72f36fD57e6B4b88107aE6AE702EC1Bf777A1491", 12 | "cursorStartBlock": 56272898, 13 | "availChainId": "mainnet" 14 | }, 15 | { 16 | "contractChainId": 84532, 17 | "contractAddress": "0x13cBE46E168460a101c07efb6Ab7B9ec637F02aA", 18 | "cursorStartBlock": 12169693, 19 | "availChainId": "turing" 20 | }, 21 | { 22 | "contractChainId": 11155111, 23 | "contractAddress": "0xe542db219a7e2b29c7aeaeace242c9a2cd528f96", 24 | "cursorStartBlock": 6204379, 25 | "availChainId": "turing" 26 | }, 27 | { 28 | "contractChainId": 421614, 29 | "contractAddress": "0xA712dfec48AF3a78419A8FF90fE8f97Ae74680F0", 30 | "cursorStartBlock": 59163104, 31 | "availChainId": "turing" 32 | }, 33 | { 34 | "contractChainId": 300, 35 | "contractAddress": "0x72f36fD57e6B4b88107aE6AE702EC1Bf777A1491", 36 | "cursorStartBlock": 4868682, 37 | "availChainId": "turing" 38 | }, 39 | { 40 | "contractChainId": 11155111, 41 | "contractAddress": "0xbc281367e1F2dB1c3e92255AA2F040B1c642ec75", 42 | "cursorStartBlock": 6204379, 43 | "availChainId": "hex" 44 | } 45 | ] 46 | } 47 | -------------------------------------------------------------------------------- /query/app/utils/shared.ts: -------------------------------------------------------------------------------- 1 | import { createPublicClient, http } from 'viem'; 2 | import { mainnet, goerli, gnosis, sepolia, holesky, arbitrumSepolia, arbitrum, scrollSepolia, optimism, optimismGoerli, base, baseSepolia } from 'viem/chains'; 3 | import { AbiEvent } from 'abitype'; 4 | 5 | const CHAINS = [mainnet, goerli, gnosis, sepolia, holesky, arbitrumSepolia, arbitrum, scrollSepolia, optimism, optimismGoerli, base, baseSepolia]; 6 | 7 | export type HealthInfo = { 8 | blocksBehindHead: number; 9 | ethBlocksSinceLastLog: number; 10 | lastLogTimestamp: number; 11 | logEmitted: boolean; 12 | }; 13 | 14 | export function currentUnixTimestamp() { 15 | return BigInt(Math.floor(Date.now() / 1000)); 16 | } 17 | 18 | export function unixTimestampFromSlot(slot: bigint, genesisTime: bigint, secondsPerSlot: bigint) { 19 | return genesisTime + slot * secondsPerSlot; 20 | } 21 | 22 | // Gets the most recent block before a given timestamp. 23 | export async function queryEthereumBlockByTimestamp( 24 | ethereumChainId: number, 25 | ethereumRpc: string, 26 | timestamp: number 27 | ) { 28 | let chainInfo = getChainInfo(ethereumChainId); 29 | const client = createPublicClient({ 30 | chain: chainInfo, 31 | transport: http(ethereumRpc, { 32 | fetchOptions: { cache: 'no-store' } 33 | }) 34 | }); 35 | 36 | let high = Number(await client.getBlockNumber()); 37 | let low = high; 38 | let mid; 39 | let found = false; 40 | 41 | // Search to find a block with a lower timestamp 42 | let searchFactor = 5; 43 | for (let i = 1; !found && low > 0; i++) { 44 | low = high - (i ** searchFactor); 45 | if (low < 0) { 46 | low = 0; 47 | } 48 | const block = await client.getBlock({ blockNumber: BigInt(low) }); 49 | if (BigInt(block.timestamp) < BigInt(timestamp)) { 50 | found = true; 51 | } 52 | } 53 | 54 | if (!found) { 55 | throw new Error('No block found before the given timestamp'); 56 | } 57 | 58 | // Binary search between low and high to find the most recent block before the given timestamp 59 | while (low <= high) { 60 | mid = low + Math.floor((high - low) / 2); 61 | const block = await client.getBlock({ blockNumber: BigInt(mid) }); 62 | 63 | if (BigInt(block.timestamp) < timestamp) { 64 | if (mid === high || BigInt((await client.getBlock({ blockNumber: BigInt(mid + 1) })).timestamp) >= timestamp) { 65 | return block; // This is the most recent block before the given timestamp 66 | } 67 | low = mid + 1; 68 | } else { 69 | high = mid - 1; 70 | } 71 | } 72 | 73 | throw new Error('Failed to find the most recent block before the given timestamp'); 74 | } 75 | 76 | export async function queryLogs( 77 | ethereumChainId: number, 78 | contractAddress: Uint8Array, 79 | fromBlock: number, 80 | toBlock: number, 81 | event: AbiEvent, 82 | ) { 83 | if (process.env[`RPC_${ethereumChainId}`] == undefined) { 84 | throw new Error('Missing RPC URL for chain ' + ethereumChainId); 85 | } 86 | let ethereumRpc = process.env[`RPC_${ethereumChainId}`] as string; 87 | let chainInfo = getChainInfo(ethereumChainId); 88 | const client = createPublicClient({ 89 | chain: chainInfo, 90 | transport: http(ethereumRpc, { 91 | fetchOptions: { cache: 'no-store' } 92 | }) 93 | }); 94 | let address = Buffer.from(contractAddress).toString('hex'); 95 | let logs = await client.getLogs({ 96 | address: `0x${address}`, 97 | event, 98 | fromBlock: BigInt(fromBlock), 99 | toBlock: BigInt(toBlock) 100 | }); 101 | return logs; 102 | } 103 | 104 | // Query logs in batches of maxLogsPerQuery. 105 | export async function queryLogsWithBatches( 106 | ethereumChainId: number, 107 | contractAddress: Uint8Array, 108 | fromBlock: number, 109 | toBlock: number, 110 | event: AbiEvent, 111 | maxLogsPerQuery: number, 112 | ) { 113 | let logs: any = []; 114 | let currentBlock = fromBlock; 115 | while (currentBlock < toBlock) { 116 | let batchEndBlock = currentBlock + maxLogsPerQuery; 117 | if (batchEndBlock > toBlock) { 118 | batchEndBlock = toBlock; 119 | } 120 | let newLogs = await queryLogs(ethereumChainId, contractAddress, currentBlock, batchEndBlock, event); 121 | logs = logs.concat(newLogs); 122 | currentBlock = batchEndBlock + 1; 123 | } 124 | return logs; 125 | } 126 | 127 | const SLOTS_PER_PERIOD = 8192n; 128 | export function getSyncCommitteePeriod(slot: bigint): bigint { 129 | return slot / SLOTS_PER_PERIOD; 130 | } 131 | 132 | export function getConsensusRpc(chainId?: number) { 133 | if (!chainId) { 134 | const chainIdStr = process.env.CHAIN_ID; 135 | if (!chainIdStr) { 136 | throw new Error('Default CHAIN_ID env not set'); 137 | } 138 | chainId = Number(chainIdStr); 139 | } 140 | switch (chainId) { 141 | case 1: 142 | return process.env.CONSENSUS_RPC_1; 143 | case 5: 144 | return process.env.CONSENSUS_RPC_5; 145 | case 17000: 146 | return process.env.CONSENSUS_RPC_17000; 147 | case 11155111: 148 | return process.env.CONSENSUS_RPC_11155111; 149 | default: 150 | throw new Error('Chain not supported'); 151 | } 152 | } 153 | 154 | export function getChainInfo(chainId: number) { 155 | for (const chain of CHAINS) { 156 | if (chain.id === chainId) { 157 | return chain; 158 | } 159 | } 160 | throw new Error(`No chain found for chainId ${chainId}`); 161 | } 162 | 163 | // Returns the number of blocks since the last log and whether a log was emitted in the last 10 * maxDelaySeconds. 164 | export async function getBlocksSinceLastLog(ethereumChainId: number, ethereumRpc: string, ethCurrentBlockTimestamp: bigint, maxDelaySeconds: bigint, contractAddress: Uint8Array, ethCurrentBlockNumber: bigint, event: AbiEvent): Promise<{ lastLogBlockNumber: number, logEmitted: boolean }> { 165 | let queryBlock = await queryEthereumBlockByTimestamp(ethereumChainId, ethereumRpc, Number(ethCurrentBlockTimestamp - maxDelaySeconds)); 166 | 167 | let diffSeconds = Number(ethCurrentBlockTimestamp) - Number(queryBlock.timestamp); 168 | let diffBlocks = Number(ethCurrentBlockNumber) - Number(queryBlock.number); 169 | 170 | const headUpdateLogs = await queryLogsWithBatches( 171 | ethereumChainId, 172 | contractAddress, 173 | Number(ethCurrentBlockNumber) - (diffBlocks * 10), 174 | Number(ethCurrentBlockNumber), 175 | event, 176 | diffBlocks 177 | ); 178 | 179 | // Sort headUpdateLogs by block number descending. 180 | headUpdateLogs.sort((a: any, b: any) => Number(b.blockNumber - a.blockNumber)); 181 | let lastLogBlockNumber = Number(ethCurrentBlockNumber) - diffBlocks * 10; 182 | if (headUpdateLogs.length > 0) { 183 | lastLogBlockNumber = Number(headUpdateLogs[0].blockNumber); 184 | } 185 | 186 | return { 187 | lastLogBlockNumber, 188 | logEmitted: headUpdateLogs.length > 0 189 | } 190 | } 191 | -------------------------------------------------------------------------------- /query/next.config.mjs: -------------------------------------------------------------------------------- 1 | /** @type {import('next').NextConfig} */ 2 | const nextConfig = {}; 3 | 4 | export default nextConfig; 5 | -------------------------------------------------------------------------------- /query/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@succinctlabs/vectorx-query", 3 | "version": "0.1.0", 4 | "private": true, 5 | "type": "module", 6 | "scripts": { 7 | "dev": "next dev", 8 | "build": "next build", 9 | "start": "next start", 10 | "lint": "next lint" 11 | }, 12 | "dependencies": { 13 | "@aws-sdk/client-dynamodb": "^3.596.0", 14 | "abitype": "^1.0.2", 15 | "avail-js-sdk": "^0.2.12", 16 | "next": "14.2.3", 17 | "react": "^18", 18 | "react-dom": "^18", 19 | "viem": "^2.13.10" 20 | }, 21 | "devDependencies": { 22 | "@types/node": "^20.14.2", 23 | "@types/react": "^18", 24 | "@types/react-dom": "^18", 25 | "eslint": "^8", 26 | "eslint-config-next": "14.2.3", 27 | "postcss": "^8", 28 | "tailwindcss": "^3.4.1", 29 | "typescript": "^5" 30 | } 31 | } -------------------------------------------------------------------------------- /query/postcss.config.mjs: -------------------------------------------------------------------------------- 1 | /** @type {import('postcss-load-config').Config} */ 2 | const config = { 3 | plugins: { 4 | tailwindcss: {}, 5 | }, 6 | }; 7 | 8 | export default config; 9 | -------------------------------------------------------------------------------- /query/tailwind.config.ts: -------------------------------------------------------------------------------- 1 | import type { Config } from "tailwindcss"; 2 | 3 | const config: Config = { 4 | content: [ 5 | "./pages/**/*.{js,ts,jsx,tsx,mdx}", 6 | "./components/**/*.{js,ts,jsx,tsx,mdx}", 7 | "./app/**/*.{js,ts,jsx,tsx,mdx}", 8 | ], 9 | theme: { 10 | extend: { 11 | backgroundImage: { 12 | "gradient-radial": "radial-gradient(var(--tw-gradient-stops))", 13 | "gradient-conic": 14 | "conic-gradient(from 180deg at 50% 50%, var(--tw-gradient-stops))", 15 | }, 16 | }, 17 | }, 18 | plugins: [], 19 | }; 20 | export default config; 21 | -------------------------------------------------------------------------------- /query/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2022", 4 | "lib": [ 5 | "dom", 6 | "dom.iterable", 7 | "esnext" 8 | ], 9 | "allowJs": true, 10 | "skipLibCheck": true, 11 | "strict": true, 12 | "noEmit": true, 13 | "esModuleInterop": true, 14 | "module": "esnext", 15 | "moduleResolution": "bundler", 16 | "resolveJsonModule": true, 17 | "isolatedModules": true, 18 | "jsx": "preserve", 19 | "incremental": true, 20 | "plugins": [ 21 | { 22 | "name": "next" 23 | } 24 | ], 25 | "paths": { 26 | "@/*": [ 27 | "./*" 28 | ] 29 | } 30 | }, 31 | "include": [ 32 | "next-env.d.ts", 33 | "**/*.ts", 34 | "**/*.tsx", 35 | ".next/types/**/*.ts" 36 | ], 37 | "exclude": [ 38 | "node_modules" 39 | ] 40 | } -------------------------------------------------------------------------------- /script/.env.example: -------------------------------------------------------------------------------- 1 | # Avail 2 | AVAIL_URL= 3 | AVAIL_CHAIN_ID={hex, turing, mainnet} 4 | # Querying justifications. 5 | VECTORX_QUERY_URL=https://vectorx-query.succinct.xyz 6 | 7 | # SP1 Config 8 | NETWORK_PRIVATE_KEY= 9 | 10 | # Optional 11 | NETWORK_RPC_URL= 12 | 13 | # If in mock mode, set SP1_PROVER to "mock". 14 | SP1_PROVER= 15 | 16 | ## If set to true, fill out the KMS config. Else, fill out the local relayer config. 17 | USE_KMS_RELAYER= 18 | # KMS Relayer Config 19 | SECURE_RELAYER_ENDPOINT= 20 | SECURE_RELAYER_API_KEY= 21 | # Local Relayer Config 22 | PRIVATE_KEY= 23 | 24 | # VectorX Contract 25 | RPC_URL= 26 | CHAIN_ID= 27 | CONTRACT_ADDRESS= 28 | 29 | # Interval Config [Optional] 30 | LOOP_INTERVAL_MINS= 31 | BLOCK_UPDATE_INTERVAL= -------------------------------------------------------------------------------- /script/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | version = "0.1.0" 3 | name = "sp1-vectorx-script" 4 | edition = "2021" 5 | 6 | [[bin]] 7 | name = "operator" 8 | path = "bin/operator.rs" 9 | 10 | [[bin]] 11 | name = "genesis" 12 | path = "bin/genesis.rs" 13 | 14 | [[bin]] 15 | name = "vkey" 16 | path = "bin/vkey.rs" 17 | 18 | [[bin]] 19 | name = "test" 20 | path = "bin/test.rs" 21 | 22 | [[bin]] 23 | name = "costs" 24 | path = "bin/costs.rs" 25 | 26 | [dependencies] 27 | sp1-vector-primitives = { workspace = true } 28 | services = { workspace = true } 29 | sp1-sdk = { workspace = true } 30 | hex = { workspace = true } 31 | tokio = { workspace = true } 32 | 33 | dotenv = { workspace = true } 34 | avail-subxt = { workspace = true } 35 | anyhow = { workspace = true } 36 | clap = { workspace = true, features = ["derive", "env"] } 37 | reqwest = { workspace = true } 38 | futures = { workspace = true } 39 | 40 | alloy = { workspace = true } 41 | serde_json = { workspace = true } 42 | serde = { workspace = true } 43 | chrono = { workspace = true } 44 | csv = "1.3.1" 45 | rand = "0.8.5" 46 | sp1-build = { workspace = true } 47 | 48 | # Logging 49 | tracing.workspace = true 50 | tracing-subscriber.workspace = true 51 | 52 | [build-dependencies] 53 | sp1-build = { workspace = true } 54 | -------------------------------------------------------------------------------- /script/bin/costs.rs: -------------------------------------------------------------------------------- 1 | use alloy::consensus::BlockHeader; 2 | use alloy::eips::BlockId; 3 | use alloy::network::primitives::HeaderResponse; 4 | use alloy::rpc::types::{BlockTransactionsKind, Filter}; 5 | use alloy::sol; 6 | use alloy::sol_types::SolEvent; 7 | use alloy::{ 8 | network::{BlockResponse, Network}, 9 | primitives::{Address, B256}, 10 | providers::{Provider, ProviderBuilder}, 11 | }; 12 | use anyhow::Result; 13 | use chrono::{TimeZone, Utc}; 14 | use clap::Parser; 15 | use futures::StreamExt; 16 | use reqwest::Url; 17 | use std::cmp::Ordering; 18 | use std::collections::HashMap; 19 | use std::str::FromStr; 20 | use std::{env, fs}; 21 | use tracing_subscriber::EnvFilter; 22 | 23 | #[derive(Parser, Debug, Clone)] 24 | #[command(about = "Get transaction costs for an address in a given month")] 25 | pub struct CostScriptArgs { 26 | #[arg(long)] 27 | pub from_address: String, 28 | #[arg(long)] 29 | pub ethereum_rpc: String, 30 | #[arg(long)] 31 | pub sepolia_rpc: String, 32 | #[arg(long)] 33 | pub base_sepolia_rpc: String, 34 | #[arg(long)] 35 | pub arbitrum_sepolia_rpc: String, 36 | #[arg(long)] 37 | pub holesky_rpc: String, 38 | #[arg(long)] 39 | pub month: u32, 40 | #[arg(long)] 41 | pub year: i32, 42 | } 43 | 44 | sol! { 45 | event HeadUpdate(uint32 blockNumber, bytes32 headerHash); 46 | } 47 | 48 | pub fn get_contract_address(chain_id: u64) -> Option
{ 49 | match chain_id { 50 | 1 => Some(Address::from_str("0x02993cdC11213985b9B13224f3aF289F03bf298d").unwrap()), 51 | 17000 => Some(Address::from_str("0x8a48b5184dEc29E7276BF74d1C1d30d032F31e19").unwrap()), 52 | 11155111 => Some(Address::from_str("0xbc281367e1F2dB1c3e92255AA2F040B1c642ec75").unwrap()), 53 | 421614 => Some(Address::from_str("0xbc281367e1F2dB1c3e92255AA2F040B1c642ec75").unwrap()), 54 | 84532 => Some(Address::from_str("0x13cBE46E168460a101c07efb6Ab7B9ec637F02aA").unwrap()), 55 | _ => None, 56 | } 57 | } 58 | 59 | #[derive(serde::Serialize, Clone)] 60 | struct RelayTransaction { 61 | chain_id: u64, 62 | tx_hash: B256, 63 | tx_fee_wei: u128, 64 | from: Address, 65 | to: Address, 66 | } 67 | 68 | async fn get_receipts_for_chain( 69 | from_addr: Address, 70 | rpc_url: &str, 71 | month: u32, 72 | year: i32, 73 | ) -> Result> { 74 | let provider = ProviderBuilder::new().on_http(Url::parse(rpc_url).unwrap()); 75 | let chain_id = provider.get_chain_id().await?; 76 | 77 | let to_addr = get_contract_address(chain_id).expect("Chain ID not supported"); 78 | 79 | // Get start and end timestamps for the month 80 | let start = Utc.with_ymd_and_hms(year, month, 1, 0, 0, 0).unwrap(); 81 | let end = if month == 12 { 82 | Utc.with_ymd_and_hms(year + 1, 1, 1, 0, 0, 0).unwrap() 83 | } else { 84 | Utc.with_ymd_and_hms(year, month + 1, 1, 0, 0, 0).unwrap() 85 | }; 86 | 87 | let start_block = find_block_by_timestamp(&provider, start.timestamp() as u64).await?; 88 | let end_block = find_block_by_timestamp(&provider, end.timestamp() as u64).await?; 89 | 90 | let mut tx_hashes = Vec::new(); 91 | // The maximum number of blocks that Alchemy will return logs for in a single request. 92 | const ALCHEMY_CHUNK_SIZE: u64 = 100_000; 93 | 94 | let chunks = (start_block.1..=end_block.1) 95 | .step_by(ALCHEMY_CHUNK_SIZE as usize) 96 | .map(|chunk_start| { 97 | let chunk_end = (chunk_start + ALCHEMY_CHUNK_SIZE - 1).min(end_block.1); 98 | let provider = provider.clone(); 99 | 100 | async move { 101 | let filter = Filter::new() 102 | .from_block(chunk_start) 103 | .to_block(chunk_end) 104 | .address(to_addr) 105 | .event_signature(HeadUpdate::SIGNATURE_HASH); 106 | provider.get_logs(&filter).await 107 | } 108 | }); 109 | 110 | let mut stream = futures::stream::iter(chunks).buffer_unordered(3); 111 | while let Some(result) = stream.next().await { 112 | for log in result? { 113 | if let Some(tx_hash) = log.transaction_hash { 114 | tx_hashes.push(tx_hash); 115 | } 116 | } 117 | } 118 | 119 | println!("Collected all transaction hashes for chain {}.", chain_id); 120 | 121 | let mut all_transactions = Vec::new(); 122 | // Get the receipts for the transactions. 123 | let mut stream = futures::stream::iter(tx_hashes.into_iter().map(|tx_hash| { 124 | let provider = provider.clone(); 125 | async move { provider.get_transaction_receipt(tx_hash).await } 126 | })) 127 | .buffer_unordered(10); 128 | 129 | while let Some(receipt) = stream.next().await { 130 | if let Ok(Some(receipt)) = receipt { 131 | all_transactions.push(receipt); 132 | } 133 | } 134 | 135 | println!("Collected all receipts for chain {}.", chain_id); 136 | 137 | Ok(all_transactions 138 | .into_iter() 139 | .filter(|receipt| receipt.from == from_addr) 140 | .map(|receipt| RelayTransaction { 141 | chain_id, 142 | tx_hash: receipt.transaction_hash, 143 | tx_fee_wei: receipt.gas_used as u128 * receipt.effective_gas_price, 144 | from: receipt.from, 145 | to: receipt.to.unwrap_or_default(), 146 | }) 147 | .collect()) 148 | } 149 | 150 | #[tokio::main] 151 | async fn main() -> Result<()> { 152 | env::set_var("RUST_LOG", "info"); 153 | dotenv::dotenv().ok(); 154 | tracing_subscriber::fmt::fmt() 155 | .with_env_filter( 156 | EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::from_env("info")), 157 | ) 158 | .init(); 159 | 160 | let args = CostScriptArgs::parse(); 161 | 162 | let from_addr = Address::from_str(&args.from_address).unwrap(); 163 | 164 | let chains = [ 165 | (&args.ethereum_rpc, "eth"), 166 | (&args.sepolia_rpc, "sepolia"), 167 | (&args.base_sepolia_rpc, "base_sepolia"), 168 | (&args.arbitrum_sepolia_rpc, "arbitrum_sepolia"), 169 | (&args.holesky_rpc, "holesky"), 170 | ]; 171 | 172 | let futures = chains 173 | .iter() 174 | .map(|(rpc, _)| get_receipts_for_chain(from_addr, rpc, args.month, args.year)); 175 | 176 | let results = futures::future::join_all(futures).await; 177 | 178 | let mut transactions = HashMap::new(); 179 | for ((_, name), result) in chains.iter().zip(results) { 180 | transactions.insert(*name, result?); 181 | } 182 | 183 | let all_transactions = transactions.values().flatten().cloned().collect::>(); 184 | 185 | let filename = format!("{}-{}-{}.csv", args.month, args.year, args.from_address); 186 | fs::create_dir_all("filtered_transactions")?; 187 | let file = std::fs::File::create(format!("filtered_transactions/{}", filename))?; 188 | let mut csv_writer = csv::Writer::from_writer(file); 189 | 190 | println!( 191 | "Writing {} transactions to filtered_transactions/{}", 192 | all_transactions.len(), 193 | filename 194 | ); 195 | 196 | for tx in &all_transactions { 197 | csv_writer.serialize(tx)?; 198 | } 199 | 200 | let eth_total = all_transactions 201 | .iter() 202 | .filter(|tx| tx.chain_id == 1) 203 | .map(|tx| tx.tx_fee_wei as f64 / 1e18) 204 | .sum::(); 205 | let total = eth_total; 206 | 207 | println!( 208 | "\n{} paid the following in SP1 Vector relaying fees in {}/{}:\n Ethereum: {:.4} ETH\n Total: {:.4} ETH", 209 | args.from_address, args.month, args.year, eth_total, total 210 | ); 211 | 212 | csv_writer.flush()?; 213 | Ok(()) 214 | } 215 | 216 | /// Finds the block at the provided timestamp, using the provided provider. 217 | async fn find_block_by_timestamp(provider: &P, target_timestamp: u64) -> Result<(B256, u64)> 218 | where 219 | P: Provider, 220 | N: Network, 221 | { 222 | let latest_block = provider 223 | .get_block(BlockId::latest(), BlockTransactionsKind::Hashes) 224 | .await?; 225 | let Some(latest_block) = latest_block else { 226 | return Err(anyhow::anyhow!("No latest block found")); 227 | }; 228 | let mut low = 0; 229 | let mut high = latest_block.header().number(); 230 | 231 | while low <= high { 232 | let mid = (low + high) / 2; 233 | let block = provider 234 | .get_block(mid.into(), BlockTransactionsKind::Hashes) 235 | .await?; 236 | let Some(block) = block else { 237 | return Err(anyhow::anyhow!("No block found")); 238 | }; 239 | let block_timestamp = block.header().timestamp(); 240 | 241 | match block_timestamp.cmp(&target_timestamp) { 242 | Ordering::Equal => { 243 | return Ok((block.header().hash(), block.header().number())); 244 | } 245 | Ordering::Less => low = mid + 1, 246 | Ordering::Greater => high = mid - 1, 247 | } 248 | } 249 | 250 | // Return the block hash of the closest block after the target timestamp 251 | let block = provider 252 | .get_block((low - 10).into(), BlockTransactionsKind::Hashes) 253 | .await?; 254 | let Some(block) = block else { 255 | return Err(anyhow::anyhow!("No block found")); 256 | }; 257 | Ok((block.header().hash(), block.header().number())) 258 | } 259 | -------------------------------------------------------------------------------- /script/bin/genesis.rs: -------------------------------------------------------------------------------- 1 | //! To build the binary: 2 | //! 3 | //! `cargo build --release --bin genesis` 4 | //! 5 | use avail_subxt::config::Header; 6 | use clap::Parser; 7 | use services::input::RpcDataFetcher; 8 | use sp1_sdk::{HashableKey, Prover, ProverClient}; 9 | use sp1_vectorx_script::SP1_VECTOR_ELF; 10 | 11 | #[derive(Parser, Debug, Clone)] 12 | #[command(about = "Get the genesis parameters from a block.")] 13 | pub struct GenesisArgs { 14 | #[arg(long)] 15 | pub block: Option, 16 | } 17 | 18 | const HEADER_RANGE_COMMITMENT_TREE_SIZE: u32 = 1024; 19 | 20 | #[tokio::main] 21 | async fn main() -> anyhow::Result<()> { 22 | let fetcher = RpcDataFetcher::new().await; 23 | let client = ProverClient::builder().mock().build(); 24 | let (_pk, vk) = client.setup(SP1_VECTOR_ELF); 25 | 26 | let args = GenesisArgs::parse(); 27 | 28 | let header; 29 | if let Some(block) = args.block { 30 | header = fetcher.get_header(block).await; 31 | } else { 32 | header = fetcher.get_head().await; 33 | } 34 | let header_hash = header.hash(); 35 | let authority_set_id = fetcher.get_authority_set_id(header.number).await; 36 | let authority_set_hash = fetcher 37 | .compute_authority_set_hash_for_block(header.number) 38 | .await; 39 | 40 | struct GenesisOutput { 41 | genesis_height: u32, 42 | genesis_header: String, 43 | genesis_authority_set_id: u64, 44 | genesis_authority_set_hash: String, 45 | sp1_vector_program_vkey: String, 46 | header_range_commitment_tree_size: u32, 47 | } 48 | 49 | let output = GenesisOutput { 50 | genesis_height: header.number, 51 | genesis_header: format!("{:#x}", header_hash), 52 | genesis_authority_set_id: authority_set_id, 53 | genesis_authority_set_hash: format!("{:#x}", authority_set_hash), 54 | sp1_vector_program_vkey: vk.bytes32(), 55 | header_range_commitment_tree_size: HEADER_RANGE_COMMITMENT_TREE_SIZE, 56 | }; 57 | 58 | println!("GENESIS_HEIGHT={}\nGENESIS_HEADER={}\nGENESIS_AUTHORITY_SET_ID={}\nGENESIS_AUTHORITY_SET_HASH={}\nSP1_VECTOR_PROGRAM_VKEY={}\nHEADER_RANGE_COMMITMENT_TREE_SIZE={}", 59 | output.genesis_height, 60 | output.genesis_header, 61 | output.genesis_authority_set_id, 62 | output.genesis_authority_set_hash, 63 | output.sp1_vector_program_vkey, 64 | output.header_range_commitment_tree_size, 65 | ); 66 | 67 | Ok(()) 68 | } 69 | -------------------------------------------------------------------------------- /script/bin/test.rs: -------------------------------------------------------------------------------- 1 | //! A simple script to test the generation of proofs. 2 | 3 | use alloy::sol_types::SolType; 4 | use clap::Parser; 5 | use services::input::{HeaderRangeRequestData, RpcDataFetcher}; 6 | use sp1_sdk::{utils::setup_logger, ProverClient, SP1Stdin}; 7 | use sp1_vector_primitives::types::{ProofOutput, ProofType}; 8 | use sp1_vectorx_script::SP1_VECTOR_ELF; 9 | 10 | // Requires the following environment variables to be set: 11 | // - AVAIL_URL: The URL of the Avail RPC endpoint. 12 | // - AVAIL_CHAIN_ID: The chain id of the Avail network. 13 | // - VECTORX_QUERY_URL: The URL of the VectorX query service. 14 | 15 | #[derive(Parser, Debug)] 16 | #[clap(author, version, about, long_about = None)] 17 | struct ScriptArgs { 18 | /// Trusted block. 19 | #[clap(long)] 20 | trusted_block: u32, 21 | 22 | /// Target block. 23 | #[clap(long, env)] 24 | target_block: u32, 25 | } 26 | 27 | #[tokio::main] 28 | async fn main() -> anyhow::Result<()> { 29 | setup_logger(); 30 | 31 | let args = ScriptArgs::parse(); 32 | 33 | let trusted_block = args.trusted_block; 34 | let target_block = args.target_block; 35 | 36 | let authority_set_id = 282u64; 37 | let proof_type = ProofType::HeaderRangeProof; 38 | 39 | let fetcher = RpcDataFetcher::new().await; 40 | let mut stdin: SP1Stdin = SP1Stdin::new(); 41 | 42 | // Fetch & write inputs to proof based on the proof type. 43 | match proof_type { 44 | ProofType::HeaderRangeProof => { 45 | let header_range_inputs = fetcher 46 | .get_header_range_inputs( 47 | HeaderRangeRequestData { 48 | trusted_block, 49 | target_block, 50 | is_target_epoch_end_block: false, 51 | }, 52 | Some(512), 53 | ) 54 | .await; 55 | 56 | stdin.write(&proof_type); 57 | stdin.write(&header_range_inputs); 58 | } 59 | ProofType::RotateProof => { 60 | let rotate_input = fetcher.get_rotate_inputs(authority_set_id).await; 61 | 62 | stdin.write(&proof_type); 63 | stdin.write(&rotate_input); 64 | } 65 | } 66 | 67 | let client = ProverClient::from_env(); 68 | 69 | let (pv, report) = client.execute(SP1_VECTOR_ELF, &stdin).run()?; 70 | 71 | let _ = ProofOutput::abi_decode(pv.as_slice(), true)?; 72 | 73 | println!("Exeuction Report: {:?}", report); 74 | println!("Total instructions: {}", report.total_instruction_count()); 75 | 76 | Ok(()) 77 | } 78 | -------------------------------------------------------------------------------- /script/bin/vkey.rs: -------------------------------------------------------------------------------- 1 | use sp1_sdk::{HashableKey, Prover, ProverClient}; 2 | use sp1_vectorx_script::SP1_VECTOR_ELF; 3 | 4 | #[tokio::main] 5 | async fn main() -> anyhow::Result<()> { 6 | let client = ProverClient::builder().mock().build(); 7 | let (_pk, vk) = client.setup(SP1_VECTOR_ELF); 8 | 9 | println!("VK: {}", vk.bytes32()); 10 | 11 | Ok(()) 12 | } 13 | -------------------------------------------------------------------------------- /script/build.rs: -------------------------------------------------------------------------------- 1 | #[allow(unused_imports)] 2 | use sp1_build::{build_program_with_args, BuildArgs}; 3 | 4 | fn main() { 5 | // build_program_with_args( 6 | // "../program", 7 | // BuildArgs { 8 | // docker: true, 9 | // output_directory: Some("../elf".to_string()), 10 | // elf_name: Some("vector-elf".to_string()), 11 | // tag: "v4.1.3".to_string(), 12 | // ..Default::default() 13 | // }, 14 | // ); 15 | } 16 | -------------------------------------------------------------------------------- /script/rust-toolchain: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.81" 3 | components = ["llvm-tools", "rustc-dev"] -------------------------------------------------------------------------------- /script/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod relay; 2 | 3 | pub const SP1_VECTOR_ELF: &[u8] = include_bytes!("../../elf/vector-elf"); 4 | 5 | #[cfg(test)] 6 | mod tests { 7 | use anyhow::Result; 8 | use services::input::RpcDataFetcher; 9 | 10 | #[tokio::test] 11 | async fn test_get_justification_query_service() -> Result<()> { 12 | let client = RpcDataFetcher::new().await; 13 | let justification = client.get_justification(337281).await?; 14 | println!("Justification: {:?}", justification); 15 | Ok(()) 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /script/src/relay.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | use std::str::FromStr; 3 | use std::time::Duration; 4 | 5 | use alloy::network::Network; 6 | use alloy::primitives::B256; 7 | use alloy::providers::Provider; 8 | use alloy::transports::http::Client; 9 | use anyhow::Result; 10 | use serde::{Deserialize, Serialize}; 11 | use serde_json::json; 12 | use tracing::info; 13 | 14 | /// Get the gas limit associated with the chain id. Note: These values have been found through 15 | /// trial and error and can be configured. 16 | pub fn get_gas_limit(chain_id: u64) -> u64 { 17 | if chain_id == 42161 || chain_id == 421614 { 18 | 25_000_000 19 | } else { 20 | 1_500_000 21 | } 22 | } 23 | 24 | /// Get the gas fee cap associated with the chain id, using the provider to get the gas price. Note: 25 | /// These values have been found through trial and error and can be configured. 26 | pub async fn get_fee_cap(chain_id: u64, provider: &P) -> u128 27 | where 28 | P: Provider, 29 | N: Network, 30 | { 31 | // Base percentage multiplier for the gas fee. 32 | let mut multiplier = 20; 33 | 34 | // Double the estimated gas fee cap for the testnets. 35 | if chain_id == 17000 || chain_id == 421614 || chain_id == 11155111 || chain_id == 84532 { 36 | multiplier = 100 37 | } 38 | 39 | // Get the gas price. 40 | let gas_price = provider.get_gas_price().await.unwrap(); 41 | 42 | // Calculate the fee cap. 43 | (gas_price * (100 + multiplier)) / 100 44 | } 45 | 46 | #[derive(Serialize, Deserialize)] 47 | pub enum KMSRelayStatus { 48 | Unknown = 0, 49 | Relayed = 1, 50 | PreflightError = 2, 51 | SimulationFailure = 3, 52 | RelayFailure = 4, 53 | InvalidAuthenticationToken = 5, 54 | } 55 | 56 | /// Relay request arguments for KMS relayer. 57 | #[derive(Debug, Deserialize, Serialize)] 58 | pub struct KMSRelayRequest { 59 | pub chain_id: u64, 60 | pub address: String, 61 | pub calldata: String, 62 | pub platform_request: bool, 63 | } 64 | 65 | /// Response from KMS relayer. 66 | #[derive(Debug, Deserialize, Serialize)] 67 | pub struct KMSRelayResponse { 68 | pub transaction_hash: Option, 69 | pub message: Option, 70 | pub status: u32, 71 | } 72 | 73 | /// Relay a transaction with KMS and return the transaction hash with retries. 74 | /// Requires SECURE_RELAYER_ENDPOINT and SECURE_RELAYER_API_KEY to be set in the environment. 75 | pub async fn relay_with_kms(args: &KMSRelayRequest, num_retries: u32) -> Result { 76 | for attempt in 1..=num_retries { 77 | let response = send_kms_relay_request(args).await?; 78 | match response.status { 79 | status if status == KMSRelayStatus::Relayed as u32 => { 80 | return Ok(B256::from_str( 81 | &response 82 | .transaction_hash 83 | .ok_or_else(|| anyhow::anyhow!("Missing transaction hash"))?, 84 | )?); 85 | } 86 | _ => { 87 | let error_message = response 88 | .message 89 | .expect("KMS request always returns a message"); 90 | tracing::warn!("KMS relay attempt {} failed: {}", attempt, error_message); 91 | if attempt == num_retries { 92 | return Err(anyhow::anyhow!( 93 | "Failed to relay transaction: {}", 94 | error_message 95 | )); 96 | } 97 | } 98 | } 99 | } 100 | unreachable!("Loop should have returned or thrown an error") 101 | } 102 | 103 | /// Send a KMS relay request and get the response. 104 | /// Requires SECURE_RELAYER_ENDPOINT and SECURE_RELAYER_API_KEY to be set in the environment. 105 | async fn send_kms_relay_request(args: &KMSRelayRequest) -> Result { 106 | info!("Sending KMS relay request: {:?}", args); 107 | // Read relayer endpoint from env 108 | let relayer_endpoint = env::var("SECURE_RELAYER_ENDPOINT").unwrap(); 109 | let api_key = env::var("SECURE_RELAYER_API_KEY").unwrap(); 110 | 111 | let client = Client::new(); 112 | let response = client 113 | .post(format!("{}/relay", relayer_endpoint)) 114 | .bearer_auth(api_key) 115 | .json(&json!(args)) 116 | .timeout(Duration::from_secs(90)) 117 | .send() 118 | .await?; 119 | let response_body = response.text().await?; 120 | let response_json: KMSRelayResponse = serde_json::from_str(&response_body)?; 121 | Ok(response_json) 122 | } 123 | -------------------------------------------------------------------------------- /services/.env.example: -------------------------------------------------------------------------------- 1 | # Avail Chain Config 2 | AVAIL_URL= 3 | AVAIL_CHAIN_ID= 4 | 5 | # Justification Indexer Write Keys 6 | AWS_REGION= 7 | AWS_ACCESS_KEY_ID= 8 | AWS_SECRET_ACCESS_KEY= 9 | -------------------------------------------------------------------------------- /services/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | version = "0.1.0" 3 | name = "services" 4 | edition = "2021" 5 | 6 | [[bin]] 7 | name = "indexer" 8 | path = "bin/indexer.rs" 9 | 10 | [dependencies] 11 | sp1-vector-primitives = { workspace = true } 12 | env_logger = { workspace = true } 13 | hex = { workspace = true } 14 | tracing = { workspace = true } 15 | serde = { workspace = true } 16 | serde_json = { workspace = true } 17 | tokio = { workspace = true } 18 | reqwest = { workspace = true } 19 | 20 | dotenv = { workspace = true } 21 | avail-subxt = { workspace = true } 22 | subxt = { workspace = true } 23 | sp-core = { workspace = true } 24 | codec = { workspace = true } 25 | alloy = { workspace = true } 26 | anyhow = { workspace = true } 27 | futures = { workspace = true } 28 | 29 | aws-config = { workspace = true } 30 | aws-sdk-dynamodb = { workspace = true } 31 | 32 | [dev-dependencies] 33 | test-case = "3.3.1" 34 | -------------------------------------------------------------------------------- /services/bin/indexer.rs: -------------------------------------------------------------------------------- 1 | use avail_subxt::primitives::Header; 2 | use avail_subxt::RpcParams; 3 | use codec::Decode; 4 | use serde::de::Error; 5 | use serde::Deserialize; 6 | use services::aws::AWSClient; 7 | use services::input::RpcDataFetcher; 8 | use services::types::{Commit, GrandpaJustification}; 9 | use sp_core::bytes; 10 | use subxt::backend::rpc::RpcSubscription; 11 | use tracing::{debug, error, info}; 12 | 13 | use services::Timeout; 14 | 15 | /// The justification type that the Avail Subxt client returns for justifications. Needs a custom 16 | /// deserializer, so we can't use the equivalent `GrandpaJustification` type. 17 | #[derive(Clone, Debug, Decode)] 18 | pub struct AvailSubscriptionGrandpaJustification { 19 | pub round: u64, 20 | pub commit: Commit, 21 | #[allow(dead_code)] 22 | pub votes_ancestries: Vec
, 23 | } 24 | 25 | impl From for GrandpaJustification { 26 | fn from(justification: AvailSubscriptionGrandpaJustification) -> GrandpaJustification { 27 | GrandpaJustification { 28 | round: justification.round, 29 | commit: justification.commit, 30 | votes_ancestries: justification.votes_ancestries, 31 | } 32 | } 33 | } 34 | 35 | impl<'de> Deserialize<'de> for AvailSubscriptionGrandpaJustification { 36 | fn deserialize(deserializer: D) -> Result 37 | where 38 | D: serde::Deserializer<'de>, 39 | { 40 | let encoded = bytes::deserialize(deserializer)?; 41 | Self::decode(&mut &encoded[..]) 42 | .map_err(|codec_err| D::Error::custom(format!("Invalid decoding: {:?}", codec_err))) 43 | } 44 | } 45 | 46 | /// When the subscription yields events, add them to the indexer DB. If the subscription fails, 47 | /// exit so the outer loop can re-initialize it. 48 | async fn handle_subscription( 49 | sub: &mut RpcSubscription, 50 | aws_client: &AWSClient, 51 | fetcher: &RpcDataFetcher, 52 | timeout_duration: std::time::Duration, 53 | ) { 54 | loop { 55 | match sub.next().timeout(timeout_duration).await { 56 | Ok(Some(Ok(justification))) => { 57 | debug!( 58 | "New justification from block {}", 59 | justification.commit.target_number 60 | ); 61 | if let Err(e) = aws_client 62 | .add_justification(&fetcher.avail_chain_id, justification.into()) 63 | .await 64 | { 65 | error!("Error adding justification to AWS: {:?}", e); 66 | } 67 | } 68 | Ok(None) => { 69 | error!("Subscription ended unexpectedly"); 70 | return; 71 | } 72 | Ok(Some(Err(e))) => { 73 | error!("Error in subscription: {:?}", e); 74 | return; 75 | } 76 | Err(_) => { 77 | error!("Timeout reached. No event received in the last minute."); 78 | return; 79 | } 80 | } 81 | } 82 | } 83 | 84 | /// Initialize the subscription for the grandpa justification events. 85 | async fn initialize_subscription( 86 | fetcher: &RpcDataFetcher, 87 | ) -> Result, subxt::Error> { 88 | fetcher 89 | .client 90 | .rpc() 91 | .subscribe( 92 | "grandpa_subscribeJustifications", 93 | RpcParams::new(), 94 | "grandpa_unsubscribeJustifications", 95 | ) 96 | .await 97 | } 98 | 99 | /// Listen for justifications. If the subscription fails to yield a justification within the timeout 100 | /// or errors, it will re-initialize the subscription. 101 | async fn listen_for_justifications() { 102 | // Avail's block time is 20 seconds, as long as this is greater than that, we should be fine. 103 | let timeout_duration = std::time::Duration::from_secs(60); 104 | // Time to wait before retrying the subscription. 105 | let retry_delay = std::time::Duration::from_secs(5); 106 | 107 | loop { 108 | info!("Initializing fetcher and subscription..."); 109 | 110 | let Ok(fetcher) = RpcDataFetcher::new().timeout(timeout_duration).await else { 111 | error!("Failed to initialize fetcher after timeout"); 112 | continue; 113 | }; 114 | 115 | // Initialize the AWS client. 116 | let Ok(aws_client) = AWSClient::new().timeout(timeout_duration).await else { 117 | error!("Failed to initialize AWS client after timeout"); 118 | continue; 119 | }; 120 | 121 | match initialize_subscription(&fetcher).await { 122 | Ok(mut sub) => { 123 | debug!("Subscription initialized successfully"); 124 | handle_subscription(&mut sub, &aws_client, &fetcher, timeout_duration).await; 125 | } 126 | Err(e) => { 127 | debug!("Failed to initialize subscription: {:?}", e); 128 | } 129 | } 130 | 131 | debug!("Retrying subscription in {} seconds", retry_delay.as_secs()); 132 | tokio::time::sleep(retry_delay).await; 133 | } 134 | } 135 | 136 | #[tokio::main] 137 | pub async fn main() { 138 | dotenv::dotenv().ok(); 139 | env_logger::init(); 140 | 141 | listen_for_justifications().await; 142 | } 143 | -------------------------------------------------------------------------------- /services/src/aws.rs: -------------------------------------------------------------------------------- 1 | use aws_sdk_dynamodb::types::AttributeValue; 2 | use aws_sdk_dynamodb::Client; 3 | 4 | use anyhow::Result; 5 | use serde_json::{from_str, to_string}; 6 | use std::collections::HashMap; 7 | use tracing::info; 8 | 9 | use crate::types::GrandpaJustification; 10 | 11 | pub struct AWSClient { 12 | client: Client, 13 | } 14 | 15 | const JUSTIFICATION_TABLE: &str = "justifications-v2"; 16 | 17 | impl AWSClient { 18 | pub async fn new() -> Self { 19 | let shared_config = aws_config::load_from_env().await; 20 | let client = Client::new(&shared_config); 21 | AWSClient { client } 22 | } 23 | 24 | /// Add a justification to the AWS DynamoDB table. 25 | pub async fn add_justification( 26 | &self, 27 | avail_chain_id: &str, 28 | justification: GrandpaJustification, 29 | ) -> Result<()> { 30 | let json_data = to_string(&justification)?; 31 | 32 | let block_nb = justification.commit.target_number; 33 | let key = format!("{}-{}", avail_chain_id, block_nb).to_lowercase(); 34 | 35 | let item = HashMap::from([ 36 | ("id".to_string(), AttributeValue::S(key.to_string())), 37 | ("data".to_string(), AttributeValue::S(json_data.to_string())), 38 | ]); 39 | 40 | info!( 41 | "Adding justification for chain: {} for block number: {:?}", 42 | avail_chain_id, block_nb 43 | ); 44 | 45 | self.client 46 | .put_item() 47 | .table_name(JUSTIFICATION_TABLE) 48 | .set_item(Some(item)) 49 | .send() 50 | .await?; 51 | Ok(()) 52 | } 53 | 54 | /// Get a justification from the AWS DynamoDB table. 55 | pub async fn get_justification( 56 | &self, 57 | avail_chain_id: &str, 58 | block_number: u32, 59 | ) -> Result { 60 | let key = format!("{}-{}", avail_chain_id, block_number).to_lowercase(); 61 | 62 | let resp = self 63 | .client 64 | .get_item() 65 | .table_name(JUSTIFICATION_TABLE) 66 | .key("id", AttributeValue::S(key.to_string())) 67 | .send() 68 | .await?; 69 | 70 | if let Some(item) = resp.item { 71 | if let Some(data_attr) = item.get("data") { 72 | if let Ok(data_json) = data_attr.as_s() { 73 | let data: GrandpaJustification = from_str(data_json)?; 74 | return Ok(data); 75 | } 76 | } 77 | } 78 | Err(anyhow::anyhow!("Justification not found")) 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /services/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod aws; 2 | pub mod input; 3 | pub mod types; 4 | 5 | pub use timeout::Timeout; 6 | 7 | mod timeout { 8 | use std::future::Future; 9 | use std::time::Duration; 10 | use tokio::time::{timeout, Timeout as TimeoutFuture}; 11 | 12 | pub trait Timeout: Sized { 13 | fn timeout(self, duration: Duration) -> TimeoutFuture; 14 | } 15 | 16 | impl Timeout for T { 17 | fn timeout(self, duration: Duration) -> TimeoutFuture { 18 | timeout(duration, self) 19 | } 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /services/src/types.rs: -------------------------------------------------------------------------------- 1 | use avail_subxt::primitives::Header; 2 | use codec::{Decode, Encode}; 3 | use serde::{Deserialize, Serialize}; 4 | use sp_core::ed25519::{Public as EdPublic, Signature}; 5 | use sp_core::Bytes; 6 | use sp_core::H256; 7 | 8 | #[derive(Clone, Debug, Decode, Encode, Serialize, Deserialize)] 9 | pub struct Precommit { 10 | pub target_hash: H256, 11 | /// The target block's number 12 | pub target_number: u32, 13 | } 14 | 15 | #[derive(Clone, Debug, Decode, Serialize, Deserialize)] 16 | pub struct SignedPrecommit { 17 | pub precommit: Precommit, 18 | /// The signature on the message. 19 | pub signature: Signature, 20 | /// The Id of the signer. 21 | pub id: EdPublic, 22 | } 23 | 24 | #[derive(Clone, Debug, Decode, Serialize, Deserialize)] 25 | pub struct Commit { 26 | pub target_hash: H256, 27 | /// The target block's number. 28 | pub target_number: u32, 29 | /// Precommits for target block or any block after it that justify this commit. 30 | pub precommits: Vec, 31 | } 32 | 33 | #[derive(Clone, Debug, Decode, Serialize, Deserialize)] 34 | pub struct GrandpaJustification { 35 | pub round: u64, 36 | pub commit: Commit, 37 | pub votes_ancestries: Vec
, 38 | } 39 | 40 | #[derive(Debug, Encode)] 41 | pub enum SignerMessage { 42 | #[allow(dead_code)] 43 | DummyMessage(u32), 44 | PrecommitMessage(Precommit), 45 | } 46 | 47 | #[derive(Serialize, Deserialize, Debug)] 48 | pub struct EncodedFinalityProof(pub Bytes); 49 | 50 | #[derive(Debug, PartialEq, Encode, Decode, Clone, Deserialize)] 51 | pub struct FinalityProof { 52 | /// The hash of block F for which justification is provided. 53 | pub block: H256, 54 | /// Justification of the block F. 55 | pub justification: Vec, 56 | /// The set of headers in the range (B; F] that are unknown to the caller, ordered by block number. 57 | pub unknown_headers: Vec
, 58 | } 59 | -------------------------------------------------------------------------------- /services/test_assets/ancestry.json: -------------------------------------------------------------------------------- 1 | { 2 | "validator_set": { 3 | "set_id": 1, 4 | "validator_set": [ 5 | "5FA9nQDVg267DEd8m1ZypXLBnvN7SFxYwV7ndqSYGiN9TTpu", 6 | "5GoNkf6WdbxCFnPdAnYYQyCjAKPJgLNxXwPjwTh6DGg6gN3E", 7 | "5DbKjhNLpqX3zqZdNBc9BGb4fHU1cRBaDhJUskrvkwfraDi6", 8 | "5ECTwv6cZ5nJQPk6tWfaTrEk8YH2L7X1VT4EL5Tx2ikfFwb7" 9 | ] 10 | }, 11 | "justification": { 12 | "round": 1, 13 | "commit": { 14 | "target_hash": "0xb4ab92948e78b5e3115d2ce5ff2207e7d713a7fb33f4a9240e413c00954f244b", 15 | "target_number": 1, 16 | "precommits": [ 17 | { 18 | "precommit": { 19 | "target_hash": "0xdd4e3b7be002b78b5acc41476c17c2c25e1c0351968f5a108baf96d5548b6e9b", 20 | "target_number": 3 21 | }, 22 | "signature": "efa2fa0c41d25d0d9f2ffb4a11195a5f51f14ff45a7617a7d9558d19265ca7f98eb16c537b8d2597f4398607d20040bc78fff4551fc0c815c4c5565e2dcffd04", 23 | "id": "5FA9nQDVg267DEd8m1ZypXLBnvN7SFxYwV7ndqSYGiN9TTpu" 24 | }, 25 | { 26 | "precommit": { 27 | "target_hash": "0xea93a9c49947df0f821da58f5bf420010a5b885cca5d09a52680ae72a7f2354b", 28 | "target_number": 4 29 | }, 30 | "signature": "12c5d0e19eea1047228d09e47c286b781d955d2d60bc33efcb09bf92d0e1ce76ad2a158d7b1c6f9797ac420379f8a346dc10a4f5b1d0d5e81332c3704c13ef0d", 31 | "id": "5DbKjhNLpqX3zqZdNBc9BGb4fHU1cRBaDhJUskrvkwfraDi6" 32 | }, 33 | { 34 | "precommit": { 35 | "target_hash": "0x6fc89525db02b31c2509a900edbfb2cd949f71a9d63bb66c7a3dac9e4fce3b18", 36 | "target_number": 3 37 | }, 38 | "signature": "1269595fcfe8dd487808367e1157e465911eb6d224a5e617d71607496ffc24438da3d987dc86ddab0e7e89193014508bcc288b8c78c6aa97242101befc79140a", 39 | "id": "5ECTwv6cZ5nJQPk6tWfaTrEk8YH2L7X1VT4EL5Tx2ikfFwb7" 40 | } 41 | ] 42 | }, 43 | "votes_ancestries": [ 44 | { 45 | "parentHash": "0xb4ab92948e78b5e3115d2ce5ff2207e7d713a7fb33f4a9240e413c00954f244b", 46 | "number": "2", 47 | "stateRoot": "0x5b0f9b4e91f82bd9f256cf39568bdbdf1212e6a5cba0b8b4451233e8aa669466", 48 | "extrinsicsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", 49 | "digest": { 50 | "logs": [] 51 | }, 52 | "extension": { 53 | "V3": { 54 | "appLookup": { 55 | "size": 0, 56 | "index": [] 57 | }, 58 | "commitment": { 59 | "rows": 0, 60 | "cols": 0, 61 | "commitment": [], 62 | "dataRoot": "0x0000000000000000000000000000000000000000000000000000000000000000" 63 | } 64 | } 65 | } 66 | }, 67 | { 68 | "parentHash": "0x745609767b544320850c9d8e7a34834227658fd78287394c8c7c501c2c53c7a2", 69 | "number": "3", 70 | "stateRoot": "0xe35b3502b538d0ee15cf1787da1e9db973c7e470e843c5656e1794cda6394c32", 71 | "extrinsicsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", 72 | "digest": { 73 | "logs": [] 74 | }, 75 | "extension": { 76 | "V3": { 77 | "appLookup": { 78 | "size": 0, 79 | "index": [] 80 | }, 81 | "commitment": { 82 | "rows": 0, 83 | "cols": 0, 84 | "commitment": [], 85 | "dataRoot": "0x0000000000000000000000000000000000000000000000000000000000000000" 86 | } 87 | } 88 | } 89 | }, 90 | { 91 | "parentHash": "0x745609767b544320850c9d8e7a34834227658fd78287394c8c7c501c2c53c7a2", 92 | "number": "3", 93 | "stateRoot": "0x9a12a18aef5d4a4cf9b24cd4d81a65f01d1ade01c83bc94f2b1c3d6e30281ae1", 94 | "extrinsicsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", 95 | "digest": { 96 | "logs": [] 97 | }, 98 | "extension": { 99 | "V3": { 100 | "appLookup": { 101 | "size": 0, 102 | "index": [] 103 | }, 104 | "commitment": { 105 | "rows": 0, 106 | "cols": 0, 107 | "commitment": [], 108 | "dataRoot": "0x0000000000000000000000000000000000000000000000000000000000000000" 109 | } 110 | } 111 | } 112 | }, 113 | { 114 | "parentHash": "0xb4ab92948e78b5e3115d2ce5ff2207e7d713a7fb33f4a9240e413c00954f244b", 115 | "number": "2", 116 | "stateRoot": "0x131e87ed38f468a1d3fb302cb0f882d5de2c62866af17f3ba4cd7ef0be9b7095", 117 | "extrinsicsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", 118 | "digest": { 119 | "logs": [] 120 | }, 121 | "extension": { 122 | "V3": { 123 | "appLookup": { 124 | "size": 0, 125 | "index": [] 126 | }, 127 | "commitment": { 128 | "rows": 0, 129 | "cols": 0, 130 | "commitment": [], 131 | "dataRoot": "0x0000000000000000000000000000000000000000000000000000000000000000" 132 | } 133 | } 134 | } 135 | }, 136 | { 137 | "parentHash": "0x79a93341fdb1b0863ebd75c07c3513d0e59892850542374a25e06577cb2c6c8e", 138 | "number": "3", 139 | "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", 140 | "extrinsicsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", 141 | "digest": { 142 | "logs": [] 143 | }, 144 | "extension": { 145 | "V3": { 146 | "appLookup": { 147 | "size": 0, 148 | "index": [] 149 | }, 150 | "commitment": { 151 | "rows": 0, 152 | "cols": 0, 153 | "commitment": [], 154 | "dataRoot": "0x0000000000000000000000000000000000000000000000000000000000000000" 155 | } 156 | } 157 | } 158 | }, 159 | { 160 | "parentHash": "0x26661ea30e0c462c13f000cfd74bf51463e1fc2befb3efd98ea7182c010cb4fd", 161 | "number": "4", 162 | "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", 163 | "extrinsicsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", 164 | "digest": { 165 | "logs": [] 166 | }, 167 | "extension": { 168 | "V3": { 169 | "appLookup": { 170 | "size": 0, 171 | "index": [] 172 | }, 173 | "commitment": { 174 | "rows": 0, 175 | "cols": 0, 176 | "commitment": [], 177 | "dataRoot": "0x0000000000000000000000000000000000000000000000000000000000000000" 178 | } 179 | } 180 | } 181 | } 182 | ] 183 | } 184 | } 185 | -------------------------------------------------------------------------------- /services/test_assets/ancestry_missing_link_no_majority.json: -------------------------------------------------------------------------------- 1 | { 2 | "validator_set": { 3 | "set_id": 1, 4 | "validator_set": [ 5 | "5FA9nQDVg267DEd8m1ZypXLBnvN7SFxYwV7ndqSYGiN9TTpu", 6 | "5GoNkf6WdbxCFnPdAnYYQyCjAKPJgLNxXwPjwTh6DGg6gN3E", 7 | "5DbKjhNLpqX3zqZdNBc9BGb4fHU1cRBaDhJUskrvkwfraDi6", 8 | "5ECTwv6cZ5nJQPk6tWfaTrEk8YH2L7X1VT4EL5Tx2ikfFwb7" 9 | ] 10 | }, 11 | "justification": { 12 | "round": 1, 13 | "commit": { 14 | "target_hash": "0xb4ab92948e78b5e3115d2ce5ff2207e7d713a7fb33f4a9240e413c00954f244b", 15 | "target_number": 1, 16 | "precommits": [ 17 | { 18 | "precommit": { 19 | "target_hash": "0xb6d914e630c33071cd8f906c6a9c7475280d8d78b3334a5bf32cacb86d6ab091", 20 | "target_number": 3 21 | }, 22 | "signature": "e1c95a7b2065df2522156da7a93ea65568f24a6140494f7fbd756798ddb3f2b221bcf18eb0fe863168b1cb95faf93850ec15528f2eaf013c3b0c97118d405207", 23 | "id": "5FA9nQDVg267DEd8m1ZypXLBnvN7SFxYwV7ndqSYGiN9TTpu" 24 | }, 25 | { 26 | "precommit": { 27 | "target_hash": "0xcc31fde82d6ba620fdc24ef71e35617192231e2d51121e75b29fa7f7e2d8be17", 28 | "target_number": 4 29 | }, 30 | "signature": "57e11cc1dfa99d718028ed2c5218f3237a3ec7c41e2f1c93eac68605ea6f99f9e9a91b2379b686cf2a7640f890191ed49e6037fca3366d91fbd345a76255b60e", 31 | "id": "5DbKjhNLpqX3zqZdNBc9BGb4fHU1cRBaDhJUskrvkwfraDi6" 32 | }, 33 | { 34 | "precommit": { 35 | "target_hash": "0x6ded954ee4a2cc6c89051702f83555662169d1d29c01b03425ed6925cfc7853a", 36 | "target_number": 3 37 | }, 38 | "signature": "569029c39fefc7eeaca31c8bcada086078a415ae817d08a5990ce1e8311fdac0766b29e577c0d0a61ba44dd0b262f5492fb56740af04621a60190f93f4d18b0a", 39 | "id": "5ECTwv6cZ5nJQPk6tWfaTrEk8YH2L7X1VT4EL5Tx2ikfFwb7" 40 | } 41 | ] 42 | }, 43 | "votes_ancestries": [ 44 | { 45 | "parentHash": "0x12947f6f2cd0122b8838e77bd6d9fd9d753cba6f3a3defecf1065477943fd478", 46 | "number": "3", 47 | "stateRoot": "0xfead8417044680ba65e6945a8f1f607d08b54109bdd55d45988c931af960667b", 48 | "extrinsicsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", 49 | "digest": { 50 | "logs": [] 51 | }, 52 | "extension": { 53 | "V3": { 54 | "appLookup": { 55 | "size": 0, 56 | "index": [] 57 | }, 58 | "commitment": { 59 | "rows": 0, 60 | "cols": 0, 61 | "commitment": [], 62 | "dataRoot": "0x0000000000000000000000000000000000000000000000000000000000000000" 63 | } 64 | } 65 | } 66 | }, 67 | { 68 | "parentHash": "0x12947f6f2cd0122b8838e77bd6d9fd9d753cba6f3a3defecf1065477943fd478", 69 | "number": "3", 70 | "stateRoot": "0x741f230ad66dbb6e3281a2381a77632d0d8e72fdad68532605d871d48e8a48e1", 71 | "extrinsicsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", 72 | "digest": { 73 | "logs": [] 74 | }, 75 | "extension": { 76 | "V3": { 77 | "appLookup": { 78 | "size": 0, 79 | "index": [] 80 | }, 81 | "commitment": { 82 | "rows": 0, 83 | "cols": 0, 84 | "commitment": [], 85 | "dataRoot": "0x0000000000000000000000000000000000000000000000000000000000000000" 86 | } 87 | } 88 | } 89 | }, 90 | { 91 | "parentHash": "0xb4ab92948e78b5e3115d2ce5ff2207e7d713a7fb33f4a9240e413c00954f244b", 92 | "number": "2", 93 | "stateRoot": "0x0c02255e78baa9cd1044c54895b22ff4cf5fb040bf1f787d907e946c88c118e1", 94 | "extrinsicsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", 95 | "digest": { 96 | "logs": [] 97 | }, 98 | "extension": { 99 | "V3": { 100 | "appLookup": { 101 | "size": 0, 102 | "index": [] 103 | }, 104 | "commitment": { 105 | "rows": 0, 106 | "cols": 0, 107 | "commitment": [], 108 | "dataRoot": "0x0000000000000000000000000000000000000000000000000000000000000000" 109 | } 110 | } 111 | } 112 | }, 113 | { 114 | "parentHash": "0xa8ddc2792663c61e9fa88b66e520beaa202c281d73cd1fa00b547dd07cbfc68d", 115 | "number": "3", 116 | "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", 117 | "extrinsicsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", 118 | "digest": { 119 | "logs": [] 120 | }, 121 | "extension": { 122 | "V3": { 123 | "appLookup": { 124 | "size": 0, 125 | "index": [] 126 | }, 127 | "commitment": { 128 | "rows": 0, 129 | "cols": 0, 130 | "commitment": [], 131 | "dataRoot": "0x0000000000000000000000000000000000000000000000000000000000000000" 132 | } 133 | } 134 | } 135 | }, 136 | { 137 | "parentHash": "0xc2c4759dbf708253c1a6619794f336efc5fbde013d559a510900017a14583170", 138 | "number": "4", 139 | "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", 140 | "extrinsicsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", 141 | "digest": { 142 | "logs": [] 143 | }, 144 | "extension": { 145 | "V3": { 146 | "appLookup": { 147 | "size": 0, 148 | "index": [] 149 | }, 150 | "commitment": { 151 | "rows": 0, 152 | "cols": 0, 153 | "commitment": [], 154 | "dataRoot": "0x0000000000000000000000000000000000000000000000000000000000000000" 155 | } 156 | } 157 | } 158 | } 159 | ] 160 | } 161 | } 162 | -------------------------------------------------------------------------------- /services/test_assets/ancestry_missing_link_works.json: -------------------------------------------------------------------------------- 1 | { 2 | "validator_set": { 3 | "set_id": 1, 4 | "validator_set": [ 5 | "5FA9nQDVg267DEd8m1ZypXLBnvN7SFxYwV7ndqSYGiN9TTpu", 6 | "5GoNkf6WdbxCFnPdAnYYQyCjAKPJgLNxXwPjwTh6DGg6gN3E", 7 | "5DbKjhNLpqX3zqZdNBc9BGb4fHU1cRBaDhJUskrvkwfraDi6", 8 | "5ECTwv6cZ5nJQPk6tWfaTrEk8YH2L7X1VT4EL5Tx2ikfFwb7" 9 | ] 10 | }, 11 | "justification": { 12 | "round": 1, 13 | "commit": { 14 | "target_hash": "0xb4ab92948e78b5e3115d2ce5ff2207e7d713a7fb33f4a9240e413c00954f244b", 15 | "target_number": 1, 16 | "precommits": [ 17 | { 18 | "precommit": { 19 | "target_hash": "0x11fbf502f2ae671e85f02e34cd2da8840432367408d52b600b413f5008978db7", 20 | "target_number": 3 21 | }, 22 | "signature": "61b3c924c53c688a34334c45ca721e170900d1c307906f78a1b5847915261ea08de3cf7247e0e164bd1d03845becdad7bac0148035897bb92ba67adae591170f", 23 | "id": "5FA9nQDVg267DEd8m1ZypXLBnvN7SFxYwV7ndqSYGiN9TTpu" 24 | }, 25 | { 26 | "precommit": { 27 | "target_hash": "0x810d68cebc8729ad9fa07ad4304238249ae698db495d8362e7d9d72a57e7e9c1", 28 | "target_number": 3 29 | }, 30 | "signature": "8ac2f7e571ad125d8ccdc4593976617b47245fe4017923351716e6043c589493eec215d36d89f17a7ebec79f6f4aeb2d8fcfb8c1a2c9d64a1fc828e748f0430f", 31 | "id": "5GoNkf6WdbxCFnPdAnYYQyCjAKPJgLNxXwPjwTh6DGg6gN3E" 32 | }, 33 | { 34 | "precommit": { 35 | "target_hash": "0xcb94f3b508f9f297abde2d00435669b3029cc6b7af6d420d7fb1e80f5d3df410", 36 | "target_number": 4 37 | }, 38 | "signature": "ca6e0e506885121d537037b9e7638740b4bdc241f6d4c96e78acae5c703581bcacbe69d92a94d599cb25fa8a95d55f0018dab6227550975cd974f55b7f534d06", 39 | "id": "5DbKjhNLpqX3zqZdNBc9BGb4fHU1cRBaDhJUskrvkwfraDi6" 40 | }, 41 | { 42 | "precommit": { 43 | "target_hash": "0xc7c778213b2c9b4843937afd3d30bc7a3dbf8f33f62b37deb90498dfe8c4778c", 44 | "target_number": 3 45 | }, 46 | "signature": "981b5093a32a35b0ca433381379448042d7c0e7ee71ab542cc27f08c0cba06613068d1c4bd638759dd73984ecbd0726ebcb99fab89b8a605df83e0e53ba56808", 47 | "id": "5ECTwv6cZ5nJQPk6tWfaTrEk8YH2L7X1VT4EL5Tx2ikfFwb7" 48 | } 49 | ] 50 | }, 51 | "votes_ancestries": [ 52 | { 53 | "parentHash": "0xb4ab92948e78b5e3115d2ce5ff2207e7d713a7fb33f4a9240e413c00954f244b", 54 | "number": "2", 55 | "stateRoot": "0xfc6bcc63e4adab4a8b7c18e57903cf68e238401026378388f79800ca6d2137a9", 56 | "extrinsicsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", 57 | "digest": { 58 | "logs": [] 59 | }, 60 | "extension": { 61 | "V3": { 62 | "appLookup": { 63 | "size": 0, 64 | "index": [] 65 | }, 66 | "commitment": { 67 | "rows": 0, 68 | "cols": 0, 69 | "commitment": [], 70 | "dataRoot": "0x0000000000000000000000000000000000000000000000000000000000000000" 71 | } 72 | } 73 | } 74 | }, 75 | { 76 | "parentHash": "0x562d0d23f699138437a7ae3070715e1c4e0dce4b45ac79f66c98211a2945ef82", 77 | "number": "3", 78 | "stateRoot": "0x2c18164d44375a5deec96aea2d9e3aab87654c4f926197a5f9570c2c5d006590", 79 | "extrinsicsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", 80 | "digest": { 81 | "logs": [] 82 | }, 83 | "extension": { 84 | "V3": { 85 | "appLookup": { 86 | "size": 0, 87 | "index": [] 88 | }, 89 | "commitment": { 90 | "rows": 0, 91 | "cols": 0, 92 | "commitment": [], 93 | "dataRoot": "0x0000000000000000000000000000000000000000000000000000000000000000" 94 | } 95 | } 96 | } 97 | }, 98 | { 99 | "parentHash": "0x562d0d23f699138437a7ae3070715e1c4e0dce4b45ac79f66c98211a2945ef82", 100 | "number": "3", 101 | "stateRoot": "0x3eea87baa82c1d624611fdf832eaa7e28dfb2855ae73b327f22e76aaf3ac0fd2", 102 | "extrinsicsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", 103 | "digest": { 104 | "logs": [] 105 | }, 106 | "extension": { 107 | "V3": { 108 | "appLookup": { 109 | "size": 0, 110 | "index": [] 111 | }, 112 | "commitment": { 113 | "rows": 0, 114 | "cols": 0, 115 | "commitment": [], 116 | "dataRoot": "0x0000000000000000000000000000000000000000000000000000000000000000" 117 | } 118 | } 119 | } 120 | }, 121 | { 122 | "parentHash": "0x7a8c2060754252234eb783787770c66a7b76af1bc983128388247e0aad76fa94", 123 | "number": "3", 124 | "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", 125 | "extrinsicsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", 126 | "digest": { 127 | "logs": [] 128 | }, 129 | "extension": { 130 | "V3": { 131 | "appLookup": { 132 | "size": 0, 133 | "index": [] 134 | }, 135 | "commitment": { 136 | "rows": 0, 137 | "cols": 0, 138 | "commitment": [], 139 | "dataRoot": "0x0000000000000000000000000000000000000000000000000000000000000000" 140 | } 141 | } 142 | } 143 | }, 144 | { 145 | "parentHash": "0x810d68cebc8729ad9fa07ad4304238249ae698db495d8362e7d9d72a57e7e9c1", 146 | "number": "4", 147 | "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", 148 | "extrinsicsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", 149 | "digest": { 150 | "logs": [] 151 | }, 152 | "extension": { 153 | "V3": { 154 | "appLookup": { 155 | "size": 0, 156 | "index": [] 157 | }, 158 | "commitment": { 159 | "rows": 0, 160 | "cols": 0, 161 | "commitment": [], 162 | "dataRoot": "0x0000000000000000000000000000000000000000000000000000000000000000" 163 | } 164 | } 165 | } 166 | } 167 | ] 168 | } 169 | } 170 | --------------------------------------------------------------------------------