├── .github ├── actions │ └── setup │ │ └── action.yml └── workflows │ ├── pr.yml │ └── pr_lint.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── README.md ├── cli ├── Cargo.toml └── src │ └── main.rs ├── crates ├── algebra │ ├── Cargo.toml │ └── src │ │ ├── lib.rs │ │ └── univariate.rs ├── alloc │ ├── Cargo.toml │ └── src │ │ ├── allocator.rs │ │ ├── backend │ │ ├── cpu.rs │ │ ├── io.rs │ │ └── mod.rs │ │ ├── buffer.rs │ │ ├── init.rs │ │ ├── lib.rs │ │ ├── mem.rs │ │ ├── raw_buffer.rs │ │ └── slice.rs ├── basefold │ ├── Cargo.toml │ └── src │ │ ├── code.rs │ │ ├── config.rs │ │ ├── lib.rs │ │ └── verifier.rs ├── commit │ ├── Cargo.toml │ └── src │ │ ├── lib.rs │ │ ├── message.rs │ │ ├── rounds.rs │ │ └── tensor_cs.rs ├── derive │ ├── Cargo.toml │ └── src │ │ └── lib.rs ├── jagged │ ├── Cargo.toml │ └── src │ │ ├── basefold.rs │ │ ├── config.rs │ │ ├── jagged_eval │ │ ├── mod.rs │ │ └── sumcheck_eval.rs │ │ ├── lib.rs │ │ ├── poly.rs │ │ └── verifier.rs ├── merkle-tree │ ├── Cargo.toml │ └── src │ │ ├── baby_bear_poseidon2.rs │ │ ├── lib.rs │ │ └── tcs.rs ├── multilinear │ ├── Cargo.toml │ └── src │ │ ├── base.rs │ │ ├── eval.rs │ │ ├── lagrange.rs │ │ ├── lib.rs │ │ ├── mle.rs │ │ ├── pcs.rs │ │ └── point.rs ├── primitives │ ├── CHANGELOG.md │ ├── Cargo.toml │ └── src │ │ ├── consts.rs │ │ ├── io.rs │ │ ├── lib.rs │ │ └── types.rs ├── recursion │ ├── README.md │ ├── derive │ │ ├── CHANGELOG.md │ │ ├── Cargo.toml │ │ └── src │ │ │ └── lib.rs │ ├── executor │ │ ├── Cargo.toml │ │ └── src │ │ │ ├── block.rs │ │ │ ├── lib.rs │ │ │ ├── opcode.rs │ │ │ ├── program.rs │ │ │ ├── public_values.rs │ │ │ ├── record.rs │ │ │ └── shape.rs │ └── machine │ │ ├── Cargo.toml │ │ └── src │ │ ├── builder.rs │ │ ├── chips │ │ ├── alu_base.rs │ │ ├── alu_ext.rs │ │ ├── mem │ │ │ ├── constant.rs │ │ │ ├── mod.rs │ │ │ └── variable.rs │ │ ├── mod.rs │ │ ├── poseidon2_wide │ │ │ ├── air.rs │ │ │ ├── columns │ │ │ │ ├── mod.rs │ │ │ │ └── preprocessed.rs │ │ │ ├── mod.rs │ │ │ ├── permutation.rs │ │ │ └── trace.rs │ │ ├── prefix_sum_checks.rs │ │ ├── public_values.rs │ │ └── select.rs │ │ ├── lib.rs │ │ ├── machine.rs │ │ ├── sys.rs │ │ └── verify_compress.rs ├── stacked │ ├── Cargo.toml │ └── src │ │ ├── lib.rs │ │ └── verifier.rs ├── stark │ ├── CHANGELOG.md │ ├── Cargo.toml │ └── src │ │ ├── air │ │ ├── builder.rs │ │ ├── extension.rs │ │ ├── interaction.rs │ │ ├── machine.rs │ │ ├── mod.rs │ │ └── sub_builder.rs │ │ ├── chip.rs │ │ ├── folder.rs │ │ ├── lib.rs │ │ ├── logup_gkr │ │ ├── mod.rs │ │ ├── proof.rs │ │ └── verifier.rs │ │ ├── lookup │ │ ├── builder.rs │ │ ├── debug.rs │ │ ├── interaction.rs │ │ └── mod.rs │ │ ├── machine.rs │ │ ├── public_values.rs │ │ ├── septic_curve.rs │ │ ├── septic_digest.rs │ │ ├── septic_extension.rs │ │ ├── util.rs │ │ ├── verifier │ │ ├── config.rs │ │ ├── mod.rs │ │ ├── proof.rs │ │ └── shard.rs │ │ └── word.rs ├── sumcheck │ ├── Cargo.toml │ └── src │ │ ├── lib.rs │ │ ├── proof.rs │ │ └── verifier.rs ├── tensor │ ├── Cargo.toml │ └── src │ │ ├── dimensions.rs │ │ ├── inner.rs │ │ └── lib.rs └── utils │ ├── Cargo.toml │ └── src │ ├── lib.rs │ └── logger.rs ├── jagged-polynomial-commitments.pdf ├── message.bin ├── proof.bin ├── rust-toolchain ├── rustfmt.toml └── vk.bin /.github/actions/setup/action.yml: -------------------------------------------------------------------------------- 1 | name: Test setup 2 | inputs: 3 | pull_token: 4 | description: "Token to use for private repo access" 5 | required: true 6 | runs: 7 | using: "composite" 8 | steps: 9 | - name: Set up git private repo access 10 | shell: bash 11 | run: | 12 | git config --global url."https://${{ inputs.pull_token }}@github.com/".insteadOf ssh://git@github.com 13 | git config --global url."https://${{ inputs.pull_token }}@github.com".insteadOf https://github.com 14 | 15 | - name: rust-cache 16 | uses: actions/cache@v3 17 | with: 18 | path: | 19 | ~/.cargo/bin/ 20 | ~/.cargo/registry/index/ 21 | ~/.cargo/registry/cache/ 22 | ~/.cargo/git/db/ 23 | target/ 24 | ~/.rustup/ 25 | key: rust-1.81.0-${{ hashFiles('**/Cargo.toml') }} 26 | restore-keys: rust-1.81.0- 27 | 28 | - name: Setup toolchain 29 | id: rustc-toolchain 30 | shell: bash 31 | run: | 32 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain 1.81.0 -y 33 | . "$HOME/.cargo/env" 34 | echo "$HOME/.cargo/bin" >> $GITHUB_PATH -------------------------------------------------------------------------------- /.github/workflows/pr.yml: -------------------------------------------------------------------------------- 1 | name: PR 2 | 3 | on: 4 | push: 5 | branches: [main, dev] 6 | pull_request: 7 | branches: 8 | - "**" 9 | paths: 10 | - "crates/**" 11 | - "examples/**" 12 | - "Cargo.toml" 13 | - ".github/workflows/**" 14 | merge_group: 15 | 16 | concurrency: 17 | group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} 18 | cancel-in-progress: true 19 | 20 | jobs: 21 | test-x86: 22 | name: Test (x86-64) 23 | runs-on: 24 | [ 25 | runs-on, 26 | runner=64cpu-linux-x64, 27 | spot=false, 28 | "run-id=${{ github.run_id }}", 29 | ] 30 | env: 31 | CARGO_NET_GIT_FETCH_WITH_CLI: "true" 32 | steps: 33 | - name: Checkout sources 34 | uses: actions/checkout@v4 35 | 36 | - name: Setup CI 37 | uses: ./.github/actions/setup 38 | with: 39 | pull_token: ${{ secrets.PRIVATE_PULL_TOKEN }} 40 | 41 | - name: Run cargo check 42 | uses: actions-rs/cargo@v1 43 | with: 44 | command: check 45 | toolchain: 1.81.0 46 | args: --all-targets --all-features 47 | 48 | - name: Run cargo test 49 | uses: actions-rs/cargo@v1 50 | with: 51 | command: test 52 | toolchain: 1.81.0 53 | args: --release --workspace 54 | env: 55 | RUSTFLAGS: -Copt-level=3 -Cdebug-assertions -Coverflow-checks=y -Cdebuginfo=0 -C target-cpu=native 56 | RUST_BACKTRACE: 1 57 | 58 | test-arm: 59 | name: Test (ARM) 60 | runs-on: 61 | [ 62 | runs-on, 63 | runner=64cpu-linux-arm64, 64 | spot=false, 65 | "run-id=${{ github.run_id }}", 66 | ] 67 | env: 68 | CARGO_NET_GIT_FETCH_WITH_CLI: "true" 69 | steps: 70 | - name: Checkout sources 71 | uses: actions/checkout@v4 72 | 73 | - name: Setup CI 74 | uses: ./.github/actions/setup 75 | with: 76 | pull_token: ${{ secrets.PRIVATE_PULL_TOKEN }} 77 | 78 | - name: Run cargo check 79 | uses: actions-rs/cargo@v1 80 | with: 81 | command: check 82 | toolchain: 1.81.0 83 | args: --all-targets --all-features 84 | 85 | - name: Run cargo test 86 | uses: actions-rs/cargo@v1 87 | with: 88 | command: test 89 | toolchain: 1.81.0 90 | args: --release --workspace 91 | env: 92 | RUSTFLAGS: -Copt-level=3 -Cdebug-assertions -Coverflow-checks=y -Cdebuginfo=0 -C target-cpu=native 93 | RUST_BACKTRACE: 1 94 | 95 | lint: 96 | name: Formatting & Clippy 97 | runs-on: [runs-on, runner=8cpu-linux-x64, "run-id=${{ github.run_id }}"] 98 | steps: 99 | - name: Checkout sources 100 | uses: actions/checkout@v4 101 | 102 | - name: Setup CI 103 | uses: ./.github/actions/setup 104 | with: 105 | pull_token: ${{ secrets.PRIVATE_PULL_TOKEN }} 106 | 107 | - name: Run cargo fmt 108 | uses: actions-rs/cargo@v1 109 | with: 110 | command: fmt 111 | args: --all -- --check 112 | env: 113 | CARGO_INCREMENTAL: 1 114 | 115 | - name: Run cargo clippy 116 | uses: actions-rs/cargo@v1 117 | with: 118 | command: clippy 119 | args: --all-features --all-targets -- -D warnings -A incomplete-features 120 | env: 121 | CARGO_INCREMENTAL: 1 -------------------------------------------------------------------------------- /.github/workflows/pr_lint.yml: -------------------------------------------------------------------------------- 1 | name: PR Lint 2 | 3 | on: 4 | pull_request_target: 5 | types: 6 | - opened 7 | - edited 8 | 9 | permissions: 10 | pull-requests: read 11 | 12 | jobs: 13 | main: 14 | name: Title 15 | runs-on: warp-ubuntu-latest-arm64-4x 16 | steps: 17 | - uses: amannn/action-semantic-pull-request@v5 18 | env: 19 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 20 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Cargo build 2 | **/target 3 | 4 | # Cargo config 5 | .cargo 6 | 7 | # Profile-guided optimization 8 | /tmp 9 | pgo-data.profdata 10 | 11 | # MacOS nuisances 12 | .DS_Store 13 | 14 | # Proofs 15 | **/proof-with-pis.bin 16 | **/proof-with-io.bin 17 | 18 | # Benchmark 19 | benchmark.csv 20 | 21 | # Environment 22 | .env 23 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace.package] 2 | version = "1.2.0" 3 | edition = "2021" 4 | repository = "https://github.com/succinctlabs/hypercube-verifier.git" 5 | keywords = ["sp1-hypercube", "succinct", "zero-knowledge"] 6 | categories = ["cryptography"] 7 | 8 | [workspace.lints.clippy] 9 | missing_docs = "warn" 10 | 11 | [workspace] 12 | members = [ 13 | "crates/basefold", 14 | "crates/algebra", 15 | "crates/sumcheck", 16 | "crates/multilinear", 17 | "crates/utils", 18 | "crates/merkle-tree", 19 | "crates/commit", 20 | "crates/tensor", 21 | "crates/alloc", 22 | "crates/jagged", 23 | "crates/stacked", 24 | "crates/stark", 25 | "crates/primitives", 26 | "crates/derive", 27 | "crates/recursion/machine", 28 | "crates/recursion/derive", 29 | "crates/recursion/executor", "cli", 30 | ] 31 | resolver = "2" 32 | 33 | [profile.release] 34 | opt-level = 3 35 | 36 | [profile.bench] 37 | opt-level = 3 38 | 39 | [profile.fast] 40 | inherits = "release" 41 | debug = true 42 | debug-assertions = true 43 | 44 | [workspace.dependencies] 45 | p3-air = { git = "https://github.com/erabinov/Plonky3/", branch = "clone_config" } 46 | p3-field = { git = "https://github.com/erabinov/Plonky3/", branch = "clone_config" } 47 | p3-commit = { git = "https://github.com/erabinov/Plonky3/", branch = "clone_config" } 48 | p3-matrix = { git = "https://github.com/erabinov/Plonky3/", branch = "clone_config" } 49 | p3-baby-bear = { git = "https://github.com/erabinov/Plonky3/", branch = "clone_config", features = [ 50 | "nightly-features", 51 | ] } 52 | p3-util = { git = "https://github.com/erabinov/Plonky3/", branch = "clone_config" } 53 | p3-challenger = { git = "https://github.com/erabinov/Plonky3/", branch = "clone_config" } 54 | p3-merkle-tree = { git = "https://github.com/erabinov/Plonky3/", branch = "clone_config" } 55 | p3-poseidon2 = { git = "https://github.com/erabinov/Plonky3/", branch = "clone_config" } 56 | p3-symmetric = { git = "https://github.com/erabinov/Plonky3/", branch = "clone_config" } 57 | p3-uni-stark = { git = "https://github.com/erabinov/Plonky3/", branch = "clone_config" } 58 | 59 | 60 | #hypercube verifier 61 | hypercube-basefold = { path = "crates/basefold" } 62 | hypercube-algebra = { path = "crates/algebra" } 63 | hypercube-sumcheck = { path = "crates/sumcheck" } 64 | hypercube-multilinear = { path = "crates/multilinear" } 65 | hypercube-utils = { path = "crates/utils" } 66 | hypercube-merkle-tree = { path = "crates/merkle-tree" } 67 | hypercube-commit = { path = "crates/commit" } 68 | hypercube-tensor = { path = "crates/tensor" } 69 | hypercube-alloc = { path = "crates/alloc" } 70 | hypercube-jagged = { path = "crates/jagged" } 71 | hypercube-stacked = { path = "crates/stacked" } 72 | sp1-primitives = { path = "crates/primitives" } 73 | sp1-derive = { path = "crates/derive" } 74 | hypercube-stark = { path = "crates/stark" } 75 | hypercube-recursion-executor = { path = "crates/recursion/executor" } 76 | hypercube-recursion-machine = { path = "crates/recursion/machine" } 77 | 78 | # rayon 79 | rayon = "1.10.0" 80 | 81 | # misc 82 | thiserror = "1.0" 83 | itertools = "0.14.0" 84 | derive-where = "1.2.7" 85 | rand = "0.8" 86 | serde = { version = "1.0.217", features = ["derive"] } 87 | tracing = "0.1.40" 88 | tracing-subscriber = "0.3.18" 89 | strum_macros = "0.27.1" 90 | strum = "0.27.1" 91 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # The SP1 Hypercube Verifier 2 | 3 | This is the verifier code for the SP1 Hypercube proof system. 4 | 5 | > [!CAUTION] 6 | > 7 | > As of May 20th, 2025, the SP1 Hypercube proof system is still a research prototype. 8 | > Do not use this code in production. 9 | 10 | To give it a whirl, run 11 | 12 | ```sh 13 | cargo run -- --proof-dir --vk-dir . 14 | ``` 15 | 16 | To get started, you can run the command for the provided `proof.bin` and `vk.bin` files like so: 17 | 18 | ```sh 19 | cargo run -- --proof-dir proof.bin --vk-dir vk.bin 20 | ``` 21 | 22 | 23 | SP1 Hypercube employs a novel protocol: the *jagged polynomial commitment scheme*, details on which 24 | can be found in `jagged-polynomial-commitments.pdf`. 25 | -------------------------------------------------------------------------------- /cli/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "cli" 3 | version.workspace = true 4 | edition.workspace = true 5 | 6 | repository.workspace = true 7 | keywords.workspace = true 8 | categories.workspace = true 9 | 10 | [dependencies] 11 | p3-field = { workspace = true } 12 | hypercube-recursion-machine = {workspace = true} 13 | hypercube-stark = {workspace = true} 14 | bincode = "1.3.3" 15 | clap = { version = "4.5.9", features = ["derive", "env"] } 16 | itertools = {workspace = true} 17 | [lints] 18 | workspace = true 19 | -------------------------------------------------------------------------------- /cli/src/main.rs: -------------------------------------------------------------------------------- 1 | use core::panic; 2 | use std::{borrow::Borrow, fs::File, path::PathBuf}; 3 | 4 | use itertools::Itertools; 5 | use p3_field::PrimeField32; 6 | 7 | use clap::Parser; 8 | use hypercube_recursion_machine::{verify_compressed, SP1Proof, SP1ProofWithPublicValues}; 9 | use hypercube_stark::{blake3_hash, sha256_hash, MachineVerifyingKey, PublicValues, Word}; 10 | 11 | #[derive(Parser, Debug)] 12 | #[clap(author, version, about, long_about = None)] 13 | struct Args { 14 | #[clap(short, long)] 15 | proof_dir: PathBuf, 16 | #[clap(short, long)] 17 | vk_dir: PathBuf, 18 | } 19 | 20 | fn main() { 21 | let args = Args::parse(); 22 | 23 | let path = args.proof_dir; 24 | 25 | let mut file = File::open(path).unwrap(); 26 | 27 | let proof: SP1ProofWithPublicValues = bincode::deserialize_from(&mut file).unwrap(); 28 | 29 | let mut file = File::open(args.vk_dir).unwrap(); 30 | let vk: MachineVerifyingKey<_> = bincode::deserialize_from(&mut file).unwrap(); 31 | 32 | if let SP1Proof::Compressed(inner_proof) = &proof.proof { 33 | let public_values: &PublicValues<[_; 4], Word<_>, _> = 34 | inner_proof.proof.public_values.as_slice().borrow(); 35 | 36 | // Get the committed value digest bytes. 37 | let committed_value_digest_bytes = public_values 38 | .committed_value_digest 39 | .iter() 40 | .flat_map(|w| w.iter().map(|x| x.as_canonical_u32() as u8)) 41 | .collect_vec(); 42 | 43 | // Make sure the committed value digest matches the public values hash. 44 | // It is computationally infeasible to find two distinct inputs, one processed with 45 | // SHA256 and the other with Blake3, that yield the same hash value. 46 | if committed_value_digest_bytes != sha256_hash(proof.public_values.as_byte_slice()) 47 | && committed_value_digest_bytes != blake3_hash(proof.public_values.as_byte_slice()) 48 | { 49 | panic!("Committed value digest does not match public values hash"); 50 | } 51 | } else { 52 | panic!("not a compressed proof"); 53 | } 54 | 55 | let proof = proof.proof; 56 | let proof = match proof { 57 | SP1Proof::Compressed(proof) => proof, 58 | _ => panic!("not a compressed proof"), 59 | }; 60 | 61 | let result = verify_compressed(&proof, &vk); 62 | 63 | assert!(result.is_ok(), "Failed to verify compressed proof"); 64 | 65 | let mut file = File::open("message.bin").unwrap(); 66 | 67 | let message: String = bincode::deserialize_from(&mut file).unwrap(); 68 | 69 | println!("{}", message); 70 | } 71 | -------------------------------------------------------------------------------- /crates/algebra/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hypercube-algebra" 3 | version.workspace = true 4 | edition.workspace = true 5 | 6 | repository.workspace = true 7 | keywords.workspace = true 8 | categories.workspace = true 9 | 10 | [dependencies] 11 | p3-field = { workspace = true } 12 | serde = "1.0.210" 13 | 14 | [dev-dependencies] 15 | -------------------------------------------------------------------------------- /crates/algebra/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod univariate; 2 | 3 | pub use univariate::*; 4 | -------------------------------------------------------------------------------- /crates/algebra/src/univariate.rs: -------------------------------------------------------------------------------- 1 | use std::ops::{Add, Mul}; 2 | 3 | use p3_field::{AbstractField, Field}; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] 7 | pub struct UnivariatePolynomial { 8 | pub coefficients: Vec, 9 | } 10 | 11 | impl UnivariatePolynomial { 12 | pub fn new(coefficients: Vec) -> Self { 13 | Self { coefficients } 14 | } 15 | 16 | pub fn mul_by_x(&self) -> Self { 17 | let mut result = Vec::with_capacity(self.coefficients.len() + 1); 18 | result.push(K::zero()); 19 | result.extend(&self.coefficients[..]); 20 | Self::new(result) 21 | } 22 | } 23 | 24 | /// Basic univariate polynomial operations. 25 | impl UnivariatePolynomial { 26 | pub fn zero(degree: usize) -> Self { 27 | Self { coefficients: vec![K::zero(); degree + 1] } 28 | } 29 | 30 | pub fn one(degree: usize) -> Self { 31 | let zeros = vec![K::zero(); degree]; 32 | let mut coefficients = vec![K::one()]; 33 | coefficients.extend(zeros); 34 | Self { coefficients } 35 | } 36 | 37 | pub fn eval_at_point(&self, point: K) -> K { 38 | // Horner's method. 39 | self.coefficients.iter().rev().fold(K::zero(), |acc, x| acc * point.clone() + x.clone()) 40 | } 41 | 42 | pub fn eval_one_plus_eval_zero(&self) -> K { 43 | if self.coefficients.is_empty() { 44 | K::zero() 45 | } else { 46 | self.coefficients[0].clone() + self.coefficients.iter().cloned().sum::() 47 | } 48 | } 49 | } 50 | 51 | impl IntoIterator for UnivariatePolynomial { 52 | type Item = K; 53 | type IntoIter = std::vec::IntoIter; 54 | 55 | fn into_iter(self) -> Self::IntoIter { 56 | self.coefficients.into_iter() 57 | } 58 | } 59 | /// Scalar multiplication for univariate polynomials. 60 | impl Mul for UnivariatePolynomial { 61 | type Output = Self; 62 | 63 | fn mul(self, rhs: K) -> Self::Output { 64 | Self { coefficients: self.coefficients.into_iter().map(|x| x * rhs).collect() } 65 | } 66 | } 67 | 68 | /// Sum of two univariate polynomials. 69 | impl Add for UnivariatePolynomial { 70 | type Output = Self; 71 | 72 | fn add(self, rhs: Self) -> Self::Output { 73 | let mut new_coeffs = vec![K::zero(); self.coefficients.len().max(rhs.coefficients.len())]; 74 | for (i, x) in new_coeffs.iter_mut().enumerate() { 75 | *x = *self.coefficients.get(i).unwrap_or(&K::zero()) 76 | + *rhs.coefficients.get(i).unwrap_or(&K::zero()); 77 | } 78 | UnivariatePolynomial::new(new_coeffs) 79 | } 80 | } 81 | 82 | pub fn interpolate_univariate_polynomial(xs: &[K], ys: &[K]) -> UnivariatePolynomial { 83 | let mut result = UnivariatePolynomial::new(vec![K::zero()]); 84 | for (i, (x, y)) in xs.iter().zip(ys).enumerate() { 85 | let (denominator, numerator) = xs.iter().enumerate().filter(|(j, _)| *j != i).fold( 86 | (K::one(), UnivariatePolynomial::new(vec![*y])), 87 | |(denominator, numerator), (_, xj)| { 88 | (denominator * (*x - *xj), numerator.mul_by_x() + numerator * (-*xj)) 89 | }, 90 | ); 91 | result = result + numerator * denominator.inverse(); 92 | } 93 | result 94 | } 95 | 96 | pub fn rlc_univariate_polynomials( 97 | polys: &[UnivariatePolynomial], 98 | lambda: K, 99 | ) -> UnivariatePolynomial { 100 | let mut result = UnivariatePolynomial::new(vec![K::zero()]); 101 | for poly in polys { 102 | result = result * lambda + poly.clone(); 103 | } 104 | result 105 | } 106 | -------------------------------------------------------------------------------- /crates/alloc/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hypercube-alloc" 3 | version.workspace = true 4 | edition.workspace = true 5 | 6 | repository.workspace = true 7 | keywords.workspace = true 8 | categories.workspace = true 9 | 10 | [dependencies] 11 | p3-field = { workspace = true } 12 | 13 | thiserror = { workspace = true } 14 | serde = { workspace = true } 15 | 16 | [lints] 17 | workspace = true 18 | -------------------------------------------------------------------------------- /crates/alloc/src/allocator.rs: -------------------------------------------------------------------------------- 1 | use core::{alloc::Layout, ptr::NonNull}; 2 | use std::{rc::Rc, sync::Arc}; 3 | 4 | use thiserror::Error; 5 | 6 | #[derive(Copy, Clone, PartialEq, Eq, Debug, Error)] 7 | #[error("allocation error")] 8 | pub struct AllocError; 9 | 10 | /// An implementation of `Allocator` can allocate, grow, shrink, and deallocate arbitrary blocks of 11 | /// data described via [`Layout`][] 12 | /// 13 | /// # Safety 14 | /// 15 | /// * Memory blocks returned from an allocator that are [*currently allocated*] must point to 16 | /// valid memory and retain their validity while they are [*currently allocated*] and the shorter 17 | /// of: 18 | /// - the borrow-checker lifetime of the allocator type itself. 19 | /// - as long as at least one of the instance and all of its clones has not been dropped. 20 | /// 21 | /// * copying, cloning, or moving the allocator must not invalidate memory blocks returned from this 22 | /// allocator. A copied or cloned allocator must behave like the same allocator, and 23 | /// 24 | /// * any pointer to a memory block which is [*currently allocated*] may be passed to any other 25 | /// method of the allocator. 26 | pub unsafe trait Allocator { 27 | /// Attempts to allocate a block of memory. 28 | /// 29 | /// On success, returns a [`NonNull<[u8]>`][NonNull] meeting the size and alignment guarantees of `layout`. 30 | /// 31 | /// The returned block may have a larger size than specified by `layout.size()`, and may or may 32 | /// not have its contents initialized. 33 | /// 34 | /// The returned block of memory remains valid as long as it is [*currently allocated*] and the shorter of: 35 | /// - the borrow-checker lifetime of the allocator type itself. 36 | /// - as long as at the allocator and all its clones has not been dropped. 37 | /// 38 | /// # Errors 39 | /// 40 | /// Returning `Err` indicates that either memory is exhausted or `layout` does not meet 41 | /// allocator's size or alignment constraints. 42 | /// 43 | /// # Safety 44 | /// 45 | /// The memory is not necessarily available upon return from this function. The caller must 46 | /// ensure that the proper synchronization is performed before using the memory, if necessary. 47 | unsafe fn allocate(&self, layout: Layout) -> Result, AllocError>; 48 | 49 | /// Deallocates the memory referenced by `ptr`. 50 | /// 51 | /// # Safety 52 | /// 53 | /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator, and 54 | /// * `layout` must [*fit*] that block of memory. 55 | unsafe fn deallocate(&self, ptr: NonNull, layout: Layout); 56 | 57 | fn by_ref(&self) -> &Self 58 | where 59 | Self: Sized, 60 | { 61 | self 62 | } 63 | } 64 | 65 | unsafe impl Allocator for &A 66 | where 67 | A: Allocator + ?Sized, 68 | { 69 | #[inline] 70 | unsafe fn allocate(&self, layout: Layout) -> Result, AllocError> { 71 | (**self).allocate(layout) 72 | } 73 | 74 | #[inline] 75 | unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { 76 | // SAFETY: the safety contract must be upheld by the caller 77 | unsafe { (**self).deallocate(ptr, layout) } 78 | } 79 | } 80 | 81 | unsafe impl Allocator for Rc 82 | where 83 | A: Allocator + ?Sized, 84 | { 85 | #[inline] 86 | unsafe fn allocate(&self, layout: Layout) -> Result, AllocError> { 87 | (**self).allocate(layout) 88 | } 89 | 90 | #[inline] 91 | unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { 92 | // SAFETY: the safety contract must be upheld by the caller 93 | unsafe { (**self).deallocate(ptr, layout) } 94 | } 95 | } 96 | 97 | unsafe impl Allocator for Arc 98 | where 99 | A: Allocator + ?Sized, 100 | { 101 | #[inline] 102 | unsafe fn allocate(&self, layout: Layout) -> Result, AllocError> { 103 | (**self).allocate(layout) 104 | } 105 | 106 | #[inline] 107 | unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { 108 | // SAFETY: the safety contract must be upheld by the caller 109 | unsafe { (**self).deallocate(ptr, layout) } 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /crates/alloc/src/backend/cpu.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | alloc::Layout, 3 | ptr::{self, NonNull}, 4 | }; 5 | 6 | use crate::{ 7 | mem::{CopyDirection, CopyError, DeviceMemory}, 8 | AllocError, Allocator, 9 | }; 10 | 11 | use super::{Backend, GlobalBackend}; 12 | 13 | pub const GLOBAL_CPU_BACKEND: CpuBackend = CpuBackend; 14 | 15 | #[derive(Debug, Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] 16 | pub struct CpuBackend; 17 | 18 | impl GlobalBackend for CpuBackend { 19 | fn global() -> &'static Self { 20 | &GLOBAL_CPU_BACKEND 21 | } 22 | } 23 | 24 | unsafe impl Allocator for CpuBackend { 25 | #[inline] 26 | unsafe fn allocate(&self, layout: Layout) -> Result, AllocError> { 27 | let ptr = std::alloc::alloc(layout); 28 | Ok(NonNull::slice_from_raw_parts(NonNull::new_unchecked(ptr), layout.size())) 29 | } 30 | 31 | #[inline] 32 | unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { 33 | std::alloc::dealloc(ptr.as_ptr(), layout); 34 | } 35 | } 36 | 37 | impl DeviceMemory for CpuBackend { 38 | #[inline] 39 | unsafe fn copy_nonoverlapping( 40 | &self, 41 | src: *const u8, 42 | dst: *mut u8, 43 | size: usize, 44 | _direction: CopyDirection, 45 | ) -> Result<(), CopyError> { 46 | src.copy_to_nonoverlapping(dst, size); 47 | Ok(()) 48 | } 49 | 50 | #[inline] 51 | unsafe fn write_bytes(&self, dst: *mut u8, value: u8, size: usize) -> Result<(), CopyError> { 52 | dst.write_bytes(value, size); 53 | Ok(()) 54 | } 55 | } 56 | 57 | unsafe impl Backend for CpuBackend {} 58 | -------------------------------------------------------------------------------- /crates/alloc/src/backend/io.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | 3 | use crate::mem::CopyError; 4 | 5 | use super::{Backend, CpuBackend, GlobalBackend, HasBackend}; 6 | 7 | /// Copy data between different backends 8 | pub trait CopyIntoBackend: HasBackend { 9 | type Output: HasBackend; 10 | fn copy_into_backend( 11 | self, 12 | backend: &Dst, 13 | ) -> impl Future> + Send + Sync; 14 | } 15 | 16 | impl CopyIntoBackend for T 17 | where 18 | A: Backend, 19 | T: HasBackend + Send + Sync, 20 | { 21 | type Output = T; 22 | async fn copy_into_backend(self, _backend: &A) -> Result { 23 | Ok(self) 24 | } 25 | } 26 | 27 | pub trait CanCopyFrom: Backend 28 | where 29 | Src: Backend, 30 | { 31 | type Output; 32 | fn copy_into( 33 | &self, 34 | value: T, 35 | ) -> impl Future> + Send + Sync; 36 | } 37 | 38 | pub trait CanCopyInto: Backend 39 | where 40 | Dst: Backend, 41 | { 42 | type Output; 43 | fn copy_to_dst( 44 | dst: &Dst, 45 | value: T, 46 | ) -> impl Future> + Send + Sync; 47 | } 48 | 49 | impl CanCopyInto for A 50 | where 51 | A: Backend, 52 | Dst: Backend, 53 | T: HasBackend, 54 | Dst: CanCopyFrom, 55 | { 56 | type Output = >::Output; 57 | fn copy_to_dst( 58 | dst: &Dst, 59 | value: T, 60 | ) -> impl Future> + Send + Sync { 61 | dst.copy_into(value) 62 | } 63 | } 64 | 65 | impl CanCopyFrom for A 66 | where 67 | A: Backend, 68 | Src: Backend, 69 | T: CopyIntoBackend, 70 | { 71 | type Output = T::Output; 72 | fn copy_into( 73 | &self, 74 | value: T, 75 | ) -> impl Future> + Send + Sync { 76 | value.copy_into_backend(self) 77 | } 78 | } 79 | 80 | pub trait CanCopyFromRef: Backend 81 | where 82 | Src: Backend, 83 | { 84 | type Output; 85 | fn copy_to( 86 | &self, 87 | value: &T, 88 | ) -> impl Future> + Send + Sync; 89 | } 90 | 91 | pub trait CanCopyIntoRef: Backend 92 | where 93 | Dst: Backend, 94 | { 95 | type Output; 96 | fn copy_to_dst( 97 | dst: &Dst, 98 | value: &T, 99 | ) -> impl Future> + Send + Sync; 100 | } 101 | 102 | impl CanCopyIntoRef for Src 103 | where 104 | Src: Backend, 105 | Dst: Backend, 106 | T: HasBackend, 107 | Dst: CanCopyFromRef, 108 | { 109 | type Output = >::Output; 110 | fn copy_to_dst( 111 | dst: &Dst, 112 | value: &T, 113 | ) -> impl Future> + Send + Sync { 114 | dst.copy_to(value) 115 | } 116 | } 117 | 118 | impl CanCopyFromRef for A 119 | where 120 | A: Backend, 121 | Src: Backend, 122 | T: CopyToBackend, 123 | { 124 | type Output = >::Output; 125 | fn copy_to( 126 | &self, 127 | value: &T, 128 | ) -> impl Future> + Send + Sync { 129 | value.copy_to_backend(self) 130 | } 131 | } 132 | 133 | pub trait CopyToBackend: HasBackend { 134 | type Output: HasBackend; 135 | fn copy_to_backend( 136 | &self, 137 | backend: &Dst, 138 | ) -> impl Future> + Send + Sync; 139 | } 140 | 141 | impl + Clone + Sync, A: Backend> CopyToBackend for T { 142 | type Output = T; 143 | async fn copy_to_backend(&self, _backend: &A) -> Result { 144 | Ok(self.clone()) 145 | } 146 | } 147 | 148 | pub trait IntoGlobal: HasBackend { 149 | type Output; 150 | fn into_global(self) -> impl Future> + Send; 151 | } 152 | 153 | impl IntoGlobal for T 154 | where 155 | T: HasBackend, 156 | T::Backend: CanCopyInto, 157 | { 158 | type Output = >::Output; 159 | #[inline] 160 | fn into_global(self) -> impl Future> + Send { 161 | >::copy_to_dst(Dst::global(), self) 162 | } 163 | } 164 | 165 | pub trait ToGlobal: HasBackend { 166 | type Output; 167 | fn to_global(&self) -> impl Future> + Send; 168 | } 169 | 170 | impl ToGlobal for T 171 | where 172 | T: HasBackend, 173 | T::Backend: CanCopyIntoRef, 174 | { 175 | type Output = >::Output; 176 | #[inline] 177 | fn to_global(&self) -> impl Future> + Send { 178 | >::copy_to_dst(Dst::global(), self) 179 | } 180 | } 181 | 182 | pub trait IntoHost: IntoGlobal + Sized { 183 | #[inline] 184 | fn into_host(self) -> impl Future> + Send { 185 | self.into_global() 186 | } 187 | } 188 | 189 | impl IntoHost for T where T: IntoGlobal {} 190 | 191 | pub trait ToHost: ToGlobal { 192 | #[inline] 193 | fn to_host(&self) -> impl Future> + Send { 194 | self.to_global() 195 | } 196 | } 197 | 198 | impl ToHost for T where T: ToGlobal {} 199 | -------------------------------------------------------------------------------- /crates/alloc/src/backend/mod.rs: -------------------------------------------------------------------------------- 1 | mod cpu; 2 | mod io; 3 | 4 | use std::{borrow::Cow, fmt::Debug, future::Future, rc::Rc, sync::Arc}; 5 | 6 | pub use cpu::*; 7 | pub use io::*; 8 | 9 | use crate::{ 10 | mem::{CopyError, DeviceMemory}, 11 | Allocator, 12 | }; 13 | 14 | /// # Safety 15 | /// 16 | /// TODO 17 | pub unsafe trait Backend: 18 | Sized + Allocator + DeviceMemory + Clone + Debug + Send + Sync + 'static 19 | { 20 | fn copy_from(&self, data: T) -> impl Future> + Send 21 | where 22 | B: Backend, 23 | T: HasBackend + CopyIntoBackend, 24 | { 25 | data.copy_into_backend(self) 26 | } 27 | } 28 | 29 | pub trait GlobalBackend: Backend + 'static { 30 | fn global() -> &'static Self; 31 | } 32 | 33 | pub trait HasBackend { 34 | type Backend: Backend; 35 | 36 | fn backend(&self) -> &Self::Backend; 37 | } 38 | 39 | impl<'a, T> HasBackend for &'a T 40 | where 41 | T: HasBackend, 42 | { 43 | type Backend = T::Backend; 44 | 45 | fn backend(&self) -> &Self::Backend { 46 | (**self).backend() 47 | } 48 | } 49 | 50 | impl<'a, T> HasBackend for &'a mut T 51 | where 52 | T: HasBackend, 53 | { 54 | type Backend = T::Backend; 55 | 56 | fn backend(&self) -> &Self::Backend { 57 | (**self).backend() 58 | } 59 | } 60 | 61 | impl<'a, T> HasBackend for Cow<'a, T> 62 | where 63 | T: HasBackend + Clone, 64 | { 65 | type Backend = T::Backend; 66 | 67 | fn backend(&self) -> &Self::Backend { 68 | self.as_ref().backend() 69 | } 70 | } 71 | 72 | impl HasBackend for Box 73 | where 74 | T: HasBackend, 75 | { 76 | type Backend = T::Backend; 77 | 78 | fn backend(&self) -> &Self::Backend { 79 | self.as_ref().backend() 80 | } 81 | } 82 | 83 | impl HasBackend for Arc 84 | where 85 | T: HasBackend, 86 | { 87 | type Backend = T::Backend; 88 | 89 | fn backend(&self) -> &Self::Backend { 90 | self.as_ref().backend() 91 | } 92 | } 93 | 94 | impl HasBackend for Rc 95 | where 96 | T: HasBackend, 97 | { 98 | type Backend = T::Backend; 99 | 100 | fn backend(&self) -> &Self::Backend { 101 | self.as_ref().backend() 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /crates/alloc/src/init.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | alloc::Layout, 3 | marker::PhantomData, 4 | mem::MaybeUninit, 5 | ops::{Deref, DerefMut}, 6 | }; 7 | 8 | use crate::{backend::CpuBackend, mem::CopyDirection, Allocator, Backend}; 9 | 10 | #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] 11 | #[repr(transparent)] 12 | pub struct Init { 13 | inner: T, 14 | _marker: PhantomData, 15 | } 16 | 17 | impl Init { 18 | #[inline] 19 | pub const fn as_ptr(&self) -> *const T { 20 | &self.inner 21 | } 22 | 23 | #[inline] 24 | pub fn as_mut_ptr(&mut self) -> *mut T { 25 | &mut self.inner 26 | } 27 | 28 | pub fn copy_into_host(&self, alloc: &A) -> T 29 | where 30 | A: Backend, 31 | T: Copy, 32 | { 33 | let mut value = MaybeUninit::::uninit(); 34 | let layout = Layout::new::(); 35 | unsafe { 36 | alloc 37 | .copy_nonoverlapping( 38 | self.as_ptr() as *const u8, 39 | value.as_mut_ptr() as *mut u8, 40 | layout.size(), 41 | CopyDirection::DeviceToHost, 42 | ) 43 | .unwrap(); 44 | 45 | value.assume_init() 46 | } 47 | } 48 | } 49 | 50 | impl Deref for Init { 51 | type Target = T; 52 | 53 | #[inline] 54 | fn deref(&self) -> &Self::Target { 55 | &self.inner 56 | } 57 | } 58 | 59 | impl DerefMut for Init { 60 | #[inline] 61 | fn deref_mut(&mut self) -> &mut Self::Target { 62 | &mut self.inner 63 | } 64 | } 65 | 66 | impl Clone for Init { 67 | fn clone(&self) -> Self { 68 | Self { inner: self.inner.clone(), _marker: PhantomData } 69 | } 70 | } 71 | 72 | impl Copy for Init {} 73 | -------------------------------------------------------------------------------- /crates/alloc/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod allocator; 2 | mod backend; 3 | mod buffer; 4 | mod init; 5 | pub mod mem; 6 | mod raw_buffer; 7 | mod slice; 8 | 9 | pub use allocator::*; 10 | pub use buffer::Buffer; 11 | pub use init::Init; 12 | pub use slice::Slice; 13 | 14 | pub use backend::*; 15 | pub use raw_buffer::{RawBuffer, TryReserveError}; 16 | -------------------------------------------------------------------------------- /crates/alloc/src/mem.rs: -------------------------------------------------------------------------------- 1 | // use std::{rc::Rc, sync::Arc}; 2 | 3 | use thiserror::Error; 4 | 5 | // /// The [AllocError] error indicates an allocation failure that may be due to resource exhaustion 6 | // /// or to something wrong when combining the given input arguments with this allocator. 7 | // #[derive(Copy, Clone, PartialEq, Eq, Debug, Error)] 8 | // #[error("allocation error")] 9 | // pub struct AllocError; 10 | 11 | #[derive(Copy, Clone, PartialEq, Eq, Debug, Error)] 12 | #[error("copy error")] 13 | pub struct CopyError; 14 | 15 | /// The [CopyDirection] enum represents the direction of a memory copy operation. 16 | #[derive(Copy, Clone, PartialEq, Eq, Debug)] 17 | pub enum CopyDirection { 18 | HostToDevice, 19 | DeviceToHost, 20 | DeviceToDevice, 21 | } 22 | 23 | /// A trait that defines memory operations for a device. 24 | pub trait DeviceMemory { 25 | /// # Safety 26 | /// 27 | unsafe fn copy_nonoverlapping( 28 | &self, 29 | src: *const u8, 30 | dst: *mut u8, 31 | size: usize, 32 | direction: CopyDirection, 33 | ) -> Result<(), CopyError>; 34 | 35 | /// TODO 36 | /// 37 | /// # Safety 38 | unsafe fn write_bytes(&self, dst: *mut u8, value: u8, size: usize) -> Result<(), CopyError>; 39 | } 40 | 41 | // impl<'a, T: DeviceMemory> DeviceMemory for &'a T { 42 | // #[inline] 43 | // unsafe fn copy_nonoverlapping( 44 | // &self, 45 | // src: *const u8, 46 | // dst: *mut u8, 47 | // size: usize, 48 | // direction: CopyDirection, 49 | // ) -> Result<(), CopyError> { 50 | // (**self).copy_nonoverlapping(src, dst, size, direction) 51 | // } 52 | 53 | // #[inline] 54 | // unsafe fn write_bytes(&self, dst: *mut u8, value: u8, size: usize) -> Result<(), CopyError> { 55 | // (**self).write_bytes(dst, value, size) 56 | // } 57 | // } 58 | 59 | // impl DeviceMemory for Rc { 60 | // #[inline] 61 | // unsafe fn copy_nonoverlapping( 62 | // &self, 63 | // src: *const u8, 64 | // dst: *mut u8, 65 | // size: usize, 66 | // direction: CopyDirection, 67 | // ) -> Result<(), CopyError> { 68 | // (**self).copy_nonoverlapping(src, dst, size, direction) 69 | // } 70 | 71 | // #[inline] 72 | // unsafe fn write_bytes(&self, dst: *mut u8, value: u8, size: usize) -> Result<(), CopyError> { 73 | // (**self).write_bytes(dst, value, size) 74 | // } 75 | // } 76 | 77 | // impl DeviceMemory for Arc { 78 | // #[inline] 79 | // unsafe fn copy_nonoverlapping( 80 | // &self, 81 | // src: *const u8, 82 | // dst: *mut u8, 83 | // size: usize, 84 | // direction: CopyDirection, 85 | // ) -> Result<(), CopyError> { 86 | // (**self).copy_nonoverlapping(src, dst, size, direction) 87 | // } 88 | 89 | // #[inline] 90 | // unsafe fn write_bytes(&self, dst: *mut u8, value: u8, size: usize) -> Result<(), CopyError> { 91 | // (**self).write_bytes(dst, value, size) 92 | // } 93 | // } 94 | -------------------------------------------------------------------------------- /crates/alloc/src/slice.rs: -------------------------------------------------------------------------------- 1 | use std::alloc::Layout; 2 | use std::marker::PhantomData; 3 | use std::ops::{ 4 | Deref, DerefMut, Index, IndexMut, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, 5 | RangeToInclusive, 6 | }; 7 | 8 | use crate::backend::CpuBackend; 9 | use crate::mem::{CopyDirection, CopyError, DeviceMemory}; 10 | use crate::{Allocator, Init}; 11 | 12 | /// A slice of data associated with a specific allocator type. 13 | /// 14 | /// This type is enssentially a wrapper around a slice and has an indicator for the type of the 15 | /// allocator to induicate where the memory resides but it 16 | #[repr(transparent)] 17 | pub struct Slice { 18 | allocator: PhantomData, 19 | slice: [T], 20 | } 21 | 22 | impl Slice { 23 | #[inline] 24 | pub const fn len(&self) -> usize { 25 | self.slice.len() 26 | } 27 | 28 | #[inline] 29 | pub const fn is_empty(&self) -> bool { 30 | self.slice.is_empty() 31 | } 32 | 33 | #[inline] 34 | pub fn as_ptr(&self) -> *const T { 35 | self.slice.as_ptr() 36 | } 37 | 38 | #[inline] 39 | pub fn as_mut_ptr(&mut self) -> *mut T { 40 | self.slice.as_mut_ptr() 41 | } 42 | 43 | #[inline(always)] 44 | pub(crate) unsafe fn from_slice(src: &[T]) -> &Self { 45 | &*(src as *const [T] as *const Self) 46 | } 47 | 48 | /// # Safety 49 | #[inline] 50 | pub unsafe fn from_raw_parts<'a>(data: *const T, len: usize) -> &'a Self { 51 | Self::from_slice(std::slice::from_raw_parts(data, len)) 52 | } 53 | 54 | #[inline(always)] 55 | pub(crate) unsafe fn from_slice_mut(src: &mut [T]) -> &mut Self { 56 | &mut *(src as *mut [T] as *mut Self) 57 | } 58 | 59 | /// # Safety 60 | pub unsafe fn from_raw_parts_mut<'a>(data: *mut T, len: usize) -> &'a mut Self { 61 | Self::from_slice_mut(std::slice::from_raw_parts_mut(data, len)) 62 | } 63 | 64 | #[inline] 65 | pub fn split_at_mut(&mut self, mid: usize) -> (&mut Self, &mut Self) { 66 | let (left, right) = self.slice.split_at_mut(mid); 67 | unsafe { (Self::from_slice_mut(left), Self::from_slice_mut(right)) } 68 | } 69 | 70 | #[inline] 71 | pub fn split_at(&self, mid: usize) -> (&Self, &Self) { 72 | let (left, right) = self.slice.split_at(mid); 73 | unsafe { (Self::from_slice(left), Self::from_slice(right)) } 74 | } 75 | 76 | /// Copies all elements from `src` into `self`, using `copy_nonoverlapping`. 77 | /// 78 | /// The length of `src` must be the same as `self`. 79 | /// 80 | /// # Panics 81 | /// 82 | /// This function will panic if the two slices have different lengths or if cudaMalloc 83 | /// returned an error. 84 | /// 85 | /// # Safety 86 | /// This operation is potentially asynchronous. The caller must insure the memory of the source 87 | /// is valid for the duration of the operation. 88 | #[inline] 89 | #[track_caller] 90 | pub unsafe fn copy_from_slice( 91 | &mut self, 92 | src: &Slice, 93 | allocator: &A, 94 | ) -> Result<(), CopyError> 95 | where 96 | A: DeviceMemory, 97 | { 98 | // The panic code path was put into a cold function to not bloat the 99 | // call site. 100 | #[inline(never)] 101 | #[cold] 102 | #[track_caller] 103 | fn len_mismatch_fail(dst_len: usize, src_len: usize) -> ! { 104 | panic!( 105 | "source slice length ({}) does not match destination slice length ({})", 106 | src_len, dst_len, 107 | ); 108 | } 109 | 110 | if self.len() != src.len() { 111 | len_mismatch_fail(self.len(), src.len()); 112 | } 113 | 114 | let layout = Layout::array::(src.len()).unwrap(); 115 | 116 | unsafe { 117 | allocator.copy_nonoverlapping( 118 | src.as_ptr() as *const u8, 119 | self.as_mut_ptr() as *mut u8, 120 | layout.size(), 121 | CopyDirection::DeviceToDevice, 122 | ) 123 | } 124 | } 125 | } 126 | 127 | macro_rules! impl_index { 128 | ($($t:ty)*) => { 129 | $( 130 | impl Index<$t> for Slice 131 | { 132 | type Output = Slice; 133 | 134 | fn index(&self, index: $t) -> &Self { 135 | unsafe { Slice::from_slice(self.slice.index(index)) } 136 | } 137 | } 138 | 139 | impl IndexMut<$t> for Slice 140 | { 141 | fn index_mut(&mut self, index: $t) -> &mut Self { 142 | unsafe { Slice::from_slice_mut( self.slice.index_mut(index)) } 143 | } 144 | } 145 | )* 146 | } 147 | } 148 | 149 | impl_index! { 150 | Range 151 | RangeFull 152 | RangeFrom 153 | RangeInclusive 154 | RangeTo 155 | RangeToInclusive 156 | } 157 | 158 | impl Index for Slice { 159 | type Output = Init; 160 | 161 | #[inline] 162 | fn index(&self, index: usize) -> &Self::Output { 163 | let ptr = self.slice.index(index) as *const T as *const Init; 164 | unsafe { ptr.as_ref().unwrap() } 165 | } 166 | } 167 | 168 | impl IndexMut for Slice { 169 | #[inline] 170 | fn index_mut(&mut self, index: usize) -> &mut Self::Output { 171 | let ptr = self.slice.index_mut(index) as *mut T as *mut Init; 172 | unsafe { ptr.as_mut().unwrap() } 173 | } 174 | } 175 | 176 | impl Slice { 177 | #[inline] 178 | pub fn to_vec(&self) -> Vec 179 | where 180 | T: Clone, 181 | { 182 | self.slice.to_vec() 183 | } 184 | } 185 | 186 | impl Deref for Slice { 187 | type Target = [T]; 188 | 189 | fn deref(&self) -> &Self::Target { 190 | unsafe { std::slice::from_raw_parts(self.as_ptr(), self.len()) } 191 | } 192 | } 193 | 194 | impl DerefMut for Slice { 195 | fn deref_mut(&mut self) -> &mut Self::Target { 196 | unsafe { std::slice::from_raw_parts_mut(self.as_mut_ptr(), self.len()) } 197 | } 198 | } 199 | 200 | impl PartialEq for Slice { 201 | fn eq(&self, other: &Self) -> bool { 202 | self.slice == other.slice 203 | } 204 | } 205 | 206 | impl Eq for Slice {} 207 | -------------------------------------------------------------------------------- /crates/basefold/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hypercube-basefold" 3 | version.workspace = true 4 | edition.workspace = true 5 | repository.workspace = true 6 | keywords.workspace = true 7 | categories.workspace = true 8 | 9 | [dependencies] 10 | p3-field = { workspace = true } 11 | hypercube-multilinear = { workspace = true } 12 | p3-challenger = { workspace = true } 13 | hypercube-utils = { workspace = true } 14 | hypercube-commit = { workspace = true } 15 | hypercube-tensor = { workspace = true } 16 | hypercube-alloc = { workspace = true } 17 | p3-baby-bear = { workspace = true } 18 | hypercube-merkle-tree = { workspace = true } 19 | 20 | thiserror = "1.0" 21 | itertools = "0.12.0" 22 | serde = { version = "1.0.207", features = ["derive"] } 23 | derive-where = { workspace = true } 24 | 25 | [dev-dependencies] 26 | p3-baby-bear = { workspace = true } 27 | -------------------------------------------------------------------------------- /crates/basefold/src/code.rs: -------------------------------------------------------------------------------- 1 | use derive_where::derive_where; 2 | use hypercube_alloc::{Backend, CpuBackend, HasBackend}; 3 | use hypercube_tensor::Tensor; 4 | use p3_field::{AbstractField, TwoAdicField}; 5 | use serde::{Deserialize, Serialize}; 6 | use std::{ 7 | borrow::{Borrow, BorrowMut}, 8 | marker::PhantomData, 9 | }; 10 | 11 | #[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash)] 12 | pub struct FriConfig { 13 | pub log_blowup: usize, 14 | pub num_queries: usize, 15 | pub proof_of_work_bits: usize, 16 | _marker: PhantomData, 17 | } 18 | 19 | impl FriConfig { 20 | #[inline] 21 | pub const fn new(log_blowup: usize, num_queries: usize, proof_of_work_bits: usize) -> Self { 22 | Self { log_blowup, num_queries, proof_of_work_bits, _marker: PhantomData } 23 | } 24 | 25 | pub fn auto(log_blowup: usize, bits_of_security: usize) -> Self { 26 | assert_eq!(bits_of_security, 100); 27 | assert_eq!(log_blowup, 1); 28 | let num_queries = 100; 29 | let proof_of_work_bits = 16; 30 | Self::new(log_blowup, num_queries, proof_of_work_bits) 31 | } 32 | 33 | #[inline] 34 | pub const fn log_blowup(&self) -> usize { 35 | self.log_blowup 36 | } 37 | 38 | #[inline] 39 | pub const fn num_queries(&self) -> usize { 40 | self.num_queries 41 | } 42 | 43 | #[inline] 44 | pub const fn proof_of_work_bits(&self) -> usize { 45 | self.proof_of_work_bits 46 | } 47 | } 48 | 49 | #[derive(Debug, Clone, Serialize, Deserialize)] 50 | #[derive_where(PartialEq, Eq; Tensor)] 51 | #[serde(bound( 52 | serialize = "Tensor: Serialize", 53 | deserialize = "Tensor: Deserialize<'de>" 54 | ))] 55 | pub struct RsCodeWord { 56 | pub data: Tensor, 57 | } 58 | 59 | impl RsCodeWord { 60 | pub const fn new(data: Tensor) -> Self { 61 | Self { data } 62 | } 63 | } 64 | 65 | impl Borrow> for RsCodeWord { 66 | fn borrow(&self) -> &Tensor { 67 | &self.data 68 | } 69 | } 70 | 71 | impl BorrowMut> for RsCodeWord { 72 | fn borrow_mut(&mut self) -> &mut Tensor { 73 | &mut self.data 74 | } 75 | } 76 | 77 | impl HasBackend for RsCodeWord { 78 | type Backend = A; 79 | 80 | #[inline] 81 | fn backend(&self) -> &Self::Backend { 82 | self.data.backend() 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /crates/basefold/src/config.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Debug; 2 | use std::marker::PhantomData; 3 | 4 | use hypercube_commit::TensorCs; 5 | use hypercube_merkle_tree::{my_bb_16_perm, MerkleTreeTcs, Perm, Poseidon2BabyBearConfig}; 6 | use p3_baby_bear::BabyBear; 7 | use p3_challenger::{CanObserve, DuplexChallenger, FieldChallenger, GrindingChallenger}; 8 | use p3_field::{extension::BinomialExtensionField, ExtensionField, TwoAdicField}; 9 | use serde::{de::DeserializeOwned, Deserialize, Serialize}; 10 | 11 | use crate::{BasefoldVerifier, FriConfig}; 12 | 13 | /// The configuration required for a Reed-Solomon-based Basefold. 14 | pub trait BasefoldConfig: 15 | 'static + Clone + Debug + Send + Sync + Serialize + DeserializeOwned 16 | { 17 | /// The base field. 18 | /// 19 | /// This is the field on which the MLEs committed to are defined over. 20 | type F: TwoAdicField; 21 | /// The field of random elements. 22 | /// 23 | /// This is an extension field of the base field which is of cryptographically secure size. The 24 | /// random evaluation points of the protocol are drawn from `EF`. 25 | type EF: ExtensionField; 26 | 27 | type Commitment: 'static + Clone + Send + Sync + Serialize + DeserializeOwned; 28 | 29 | /// The tensor commitment scheme. 30 | /// 31 | /// The tensor commitment scheme is used to send long messages in the protocol by converting 32 | /// them to a tensor committment providing oracle acccess. 33 | type Tcs: TensorCs; 34 | /// The challenger type that creates the random challenges via Fiat-Shamir. 35 | /// 36 | /// The challenger is observing all the messages sent throughout the protocol and uses this 37 | /// to create the verifier messages of the IOP. 38 | type Challenger: FieldChallenger 39 | + GrindingChallenger 40 | + CanObserve 41 | + 'static 42 | + Send 43 | + Sync 44 | + Clone; 45 | 46 | fn default_challenger(_verifier: &BasefoldVerifier) -> Self::Challenger; 47 | } 48 | 49 | pub trait DefaultBasefoldConfig: BasefoldConfig + Sized { 50 | fn default_verifier(log_blowup: usize) -> BasefoldVerifier; 51 | } 52 | 53 | #[derive(Clone, Serialize, Deserialize)] 54 | pub struct BasefoldConfigImpl(PhantomData<(F, EF, Tcs, Challenger)>); 55 | 56 | impl std::fmt::Debug for BasefoldConfigImpl { 57 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 58 | write!(f, "BasefoldConfigImpl") 59 | } 60 | } 61 | 62 | impl Default for BasefoldConfigImpl { 63 | fn default() -> Self { 64 | Self(PhantomData) 65 | } 66 | } 67 | 68 | pub type Poseidon2BabyBear16BasefoldConfig = BasefoldConfigImpl< 69 | BabyBear, 70 | BinomialExtensionField, 71 | MerkleTreeTcs, 72 | DuplexChallenger, 73 | >; 74 | 75 | impl BasefoldConfig for Poseidon2BabyBear16BasefoldConfig { 76 | type F = BabyBear; 77 | type EF = BinomialExtensionField; 78 | type Commitment = as TensorCs>::Commitment; 79 | type Tcs = MerkleTreeTcs; 80 | type Challenger = DuplexChallenger; 81 | 82 | fn default_challenger( 83 | _verifier: &BasefoldVerifier, 84 | ) -> DuplexChallenger { 85 | let default_perm = my_bb_16_perm(); 86 | DuplexChallenger::::new(default_perm) 87 | } 88 | } 89 | 90 | impl DefaultBasefoldConfig for Poseidon2BabyBear16BasefoldConfig { 91 | fn default_verifier(log_blowup: usize) -> BasefoldVerifier { 92 | let fri_config = FriConfig::::auto(log_blowup, 100); 93 | let tcs = MerkleTreeTcs::::default(); 94 | BasefoldVerifier:: { fri_config, tcs } 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /crates/basefold/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod code; 2 | mod config; 3 | mod verifier; 4 | 5 | pub use code::*; 6 | pub use config::*; 7 | pub use verifier::*; 8 | -------------------------------------------------------------------------------- /crates/commit/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hypercube-commit" 3 | version.workspace = true 4 | edition.workspace = true 5 | 6 | repository.workspace = true 7 | keywords.workspace = true 8 | categories.workspace = true 9 | 10 | [dependencies] 11 | p3-commit = { workspace = true } 12 | 13 | hypercube-tensor = { workspace = true } 14 | hypercube-alloc = { workspace = true } 15 | serde = { workspace = true, features = ["rc"] } 16 | -------------------------------------------------------------------------------- /crates/commit/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub use p3_commit::*; 2 | 3 | mod message; 4 | mod rounds; 5 | mod tensor_cs; 6 | 7 | pub use message::*; 8 | pub use rounds::*; 9 | pub use tensor_cs::*; 10 | -------------------------------------------------------------------------------- /crates/commit/src/message.rs: -------------------------------------------------------------------------------- 1 | use std::{ops::Deref, sync::Arc}; 2 | 3 | use serde::{Deserialize, Serialize}; 4 | 5 | /// A message sent to a prover. 6 | /// 7 | /// In a commitment scheme, the prover can send messages to the verifier and later make structural 8 | /// claims about them. The [Message] struct is used as input to the prover when sending the actual 9 | /// data. The main usefulness of this struct is that it is cheap to clone if the number of different 10 | /// message batches sent is small. 11 | #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] 12 | pub struct Message { 13 | values: Vec>, 14 | } 15 | 16 | impl Default for Message { 17 | fn default() -> Self { 18 | Self { values: vec![] } 19 | } 20 | } 21 | 22 | impl From> for Message { 23 | fn from(value: Vec) -> Self { 24 | let values = value.into_iter().map(|t| Arc::new(t)).collect(); 25 | Self { values } 26 | } 27 | } 28 | 29 | impl From>> for Message { 30 | fn from(value: Vec>) -> Self { 31 | Self { values: value } 32 | } 33 | } 34 | 35 | impl FromIterator for Message { 36 | fn from_iter>(iter: I) -> Self { 37 | let values = iter.into_iter().map(|t| Arc::new(t)).collect::>(); 38 | Self { values } 39 | } 40 | } 41 | 42 | impl FromIterator> for Message { 43 | fn from_iter>>(iter: I) -> Self { 44 | let values = iter.into_iter().collect::>(); 45 | Self { values } 46 | } 47 | } 48 | 49 | impl IntoIterator for Message { 50 | type Item = Arc; 51 | type IntoIter = > as IntoIterator>::IntoIter; 52 | fn into_iter(self) -> Self::IntoIter { 53 | self.values.into_iter() 54 | } 55 | } 56 | 57 | impl Deref for Message { 58 | type Target = Vec>; 59 | fn deref(&self) -> &Self::Target { 60 | &self.values 61 | } 62 | } 63 | 64 | impl From> for Message { 65 | #[inline] 66 | fn from(value: Arc) -> Self { 67 | Self { values: vec![value] } 68 | } 69 | } 70 | 71 | impl From for Message { 72 | #[inline] 73 | fn from(value: T) -> Self { 74 | Self::from(Arc::new(value)) 75 | } 76 | } 77 | 78 | impl Extend for Message { 79 | fn extend>(&mut self, iter: I) { 80 | self.values.extend(iter.into_iter().map(|t| Arc::new(t))); 81 | } 82 | } 83 | 84 | impl Extend> for Message { 85 | fn extend>>(&mut self, iter: I) { 86 | self.values.extend(iter); 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /crates/commit/src/rounds.rs: -------------------------------------------------------------------------------- 1 | use std::ops::{Deref, DerefMut}; 2 | 3 | use hypercube_alloc::HasBackend; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Debug, Clone, Serialize, Deserialize)] 7 | pub struct Rounds { 8 | pub rounds: Vec, 9 | } 10 | 11 | impl Rounds { 12 | #[inline] 13 | pub const fn new() -> Self { 14 | Self { rounds: vec![] } 15 | } 16 | } 17 | 18 | impl Default for Rounds { 19 | #[inline] 20 | fn default() -> Self { 21 | Self::new() 22 | } 23 | } 24 | 25 | impl Deref for Rounds { 26 | type Target = Vec; 27 | 28 | fn deref(&self) -> &Self::Target { 29 | &self.rounds 30 | } 31 | } 32 | 33 | impl DerefMut for Rounds { 34 | fn deref_mut(&mut self) -> &mut Self::Target { 35 | &mut self.rounds 36 | } 37 | } 38 | 39 | impl FromIterator for Rounds { 40 | fn from_iter>(iter: T) -> Self { 41 | Rounds { rounds: iter.into_iter().collect() } 42 | } 43 | } 44 | 45 | impl IntoIterator for Rounds { 46 | type Item = M; 47 | type IntoIter = as IntoIterator>::IntoIter; 48 | 49 | fn into_iter(self) -> Self::IntoIter { 50 | self.rounds.into_iter() 51 | } 52 | } 53 | 54 | impl Extend for Rounds { 55 | fn extend>(&mut self, iter: T) { 56 | self.rounds.extend(iter); 57 | } 58 | } 59 | 60 | impl HasBackend for Rounds 61 | where 62 | M: HasBackend, 63 | { 64 | type Backend = M::Backend; 65 | 66 | fn backend(&self) -> &Self::Backend { 67 | assert!(!self.rounds.is_empty(), "Rounds must not be empty"); 68 | self.rounds.first().unwrap().backend() 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /crates/commit/src/tensor_cs.rs: -------------------------------------------------------------------------------- 1 | use std::{error::Error, fmt::Debug}; 2 | 3 | use hypercube_tensor::Tensor; 4 | use serde::{de::DeserializeOwned, Deserialize, Serialize}; 5 | 6 | /// An opening of a tensor commitment scheme. 7 | #[derive(Debug, Clone, Serialize, Deserialize)] 8 | #[serde(bound(serialize = "", deserialize = ""))] 9 | pub struct TensorCsOpening { 10 | /// The claimed values of the opening. 11 | pub values: Tensor, 12 | /// The proof of the opening. 13 | pub proof: ::Proof, 14 | } 15 | 16 | /// Tensor commitment scheme. 17 | /// 18 | /// A tensor commitment scheme is essentially a batch vector commitment scheme, where the latter 19 | /// allows you to commit to a list of elements of type [Self::Data] and later provide a verifier 20 | /// oracle access to a specific element at a specific index. In a Tensor commitment scheme, the 21 | /// verifier oracle access is to a specific slice of the input tensor t[[.., i, ...]]. The prover 22 | /// is free to choose the dimension along which the commitment is made. 23 | /// 24 | /// As tensors are stored contiguously in memory, it is not always desirable to have all committed 25 | /// data in a single tensor. Hence, a tensor commitment scheme assumes the prover commits as above 26 | /// to a list of tensors of the same shape at a given order. 27 | pub trait TensorCs: 'static + Clone + Send + Sync { 28 | type Data: Clone + Send + Sync + Serialize + DeserializeOwned; 29 | type Commitment: 'static + Clone + Send + Sync + Serialize + DeserializeOwned; 30 | type Proof: Debug + Clone + Send + Sync + Serialize + DeserializeOwned; 31 | type VerifierError: Error; 32 | 33 | /// Verify a batch of openings. 34 | /// 35 | /// The claimed valued tensors are assumed to be of shape [indices.len(), ..]. For each index, 36 | /// the collection of claimed values indexed at [index,...] is the data of the corresponding 37 | /// committed tensors at the given index. 38 | fn verify_tensor_openings( 39 | &self, 40 | commit: &Self::Commitment, 41 | indices: &[usize], 42 | opening: &TensorCsOpening, 43 | ) -> Result<(), Self::VerifierError>; 44 | } 45 | 46 | impl TensorCsOpening { 47 | #[inline] 48 | pub const fn new(values: Tensor, proof: ::Proof) -> Self { 49 | Self { values, proof } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /crates/derive/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sp1-derive" 3 | description = "SP1 is a performant, 100% open-source, contributor-friendly zkVM." 4 | readme = "../../README.md" 5 | version = { workspace = true } 6 | edition = { workspace = true } 7 | 8 | repository = { workspace = true } 9 | keywords = { workspace = true } 10 | categories = { workspace = true } 11 | 12 | [lib] 13 | proc-macro = true 14 | 15 | [dependencies] 16 | quote = "1.0" 17 | syn = { version = "1.0", features = ["full"] } 18 | 19 | [lints] 20 | workspace = true 21 | -------------------------------------------------------------------------------- /crates/jagged/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hypercube-jagged" 3 | version.workspace = true 4 | edition.workspace = true 5 | 6 | repository.workspace = true 7 | keywords.workspace = true 8 | categories.workspace = true 9 | 10 | [dependencies] 11 | hypercube-multilinear = { workspace = true } 12 | p3-baby-bear = { workspace = true } 13 | hypercube-sumcheck = { workspace = true } 14 | p3-field = { workspace = true } 15 | p3-challenger = { workspace = true } 16 | hypercube-stacked = { workspace = true } 17 | hypercube-basefold = { workspace = true } 18 | rayon = "1.5.0" 19 | serde = { workspace = true, features = ["rc"] } 20 | thiserror = { workspace = true } 21 | 22 | [dev-dependencies] 23 | p3-baby-bear = { workspace = true } 24 | -------------------------------------------------------------------------------- /crates/jagged/src/basefold.rs: -------------------------------------------------------------------------------- 1 | use hypercube_basefold::{ 2 | BasefoldConfig, BasefoldProof, BasefoldVerifier, DefaultBasefoldConfig, 3 | Poseidon2BabyBear16BasefoldConfig, 4 | }; 5 | use hypercube_stacked::StackedPcsVerifier; 6 | use p3_baby_bear::BabyBear; 7 | use serde::{Deserialize, Serialize}; 8 | use std::fmt::Debug; 9 | 10 | use crate::{JaggedConfig, JaggedEvalConfig, JaggedEvalSumcheckConfig, JaggedPcsVerifier}; 11 | 12 | pub type BabyBearPoseidon2 = 13 | JaggedBasefoldConfig>; 14 | 15 | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] 16 | pub struct JaggedBasefoldConfig(BC, E); 17 | 18 | impl JaggedConfig for JaggedBasefoldConfig 19 | where 20 | BC: BasefoldConfig, 21 | E: JaggedEvalConfig + Clone, 22 | BC::Commitment: Debug, 23 | { 24 | type F = BC::F; 25 | type EF = BC::EF; 26 | type Commitment = BC::Commitment; 27 | type BatchPcsProof = BasefoldProof; 28 | type Challenger = BC::Challenger; 29 | type BatchPcsVerifier = BasefoldVerifier; 30 | type JaggedEvaluator = E; 31 | } 32 | 33 | impl JaggedPcsVerifier> 34 | where 35 | BC: DefaultBasefoldConfig, 36 | BC::Commitment: Debug, 37 | E: JaggedEvalConfig + Default, 38 | { 39 | pub fn new(log_blowup: usize, log_stacking_height: u32, max_log_row_count: usize) -> Self { 40 | let basefold_verifer = BasefoldVerifier::::new(log_blowup); 41 | let stacked_pcs_verifier = StackedPcsVerifier::new(basefold_verifer, log_stacking_height); 42 | Self { stacked_pcs_verifier, max_log_row_count, jagged_evaluator: E::default() } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /crates/jagged/src/config.rs: -------------------------------------------------------------------------------- 1 | use hypercube_multilinear::MultilinearPcsVerifier; 2 | use p3_challenger::{CanObserve, FieldChallenger}; 3 | use p3_field::{ExtensionField, Field}; 4 | use serde::{de::DeserializeOwned, Serialize}; 5 | use std::fmt::Debug; 6 | 7 | use crate::JaggedEvalConfig; 8 | 9 | pub trait JaggedConfig: 'static + Clone + Send + Clone + Serialize + DeserializeOwned { 10 | type F: Field; 11 | type EF: ExtensionField; 12 | 13 | type Commitment: 'static + Clone + Send + Sync + Serialize + DeserializeOwned + Debug; 14 | 15 | /// The challenger type that creates the random challenges via Fiat-Shamir. 16 | /// 17 | /// The challenger is observing all the messages sent throughout the protocol and uses this 18 | /// to create the verifier messages of the IOP. 19 | type Challenger: FieldChallenger 20 | + CanObserve 21 | + 'static 22 | + Send 23 | + Sync 24 | + Clone; 25 | 26 | type BatchPcsProof: 'static + Clone + Send + Sync + Serialize + DeserializeOwned; 27 | 28 | type BatchPcsVerifier: MultilinearPcsVerifier< 29 | F = Self::F, 30 | EF = Self::EF, 31 | Challenger = Self::Challenger, 32 | Proof = Self::BatchPcsProof, 33 | Commitment = Self::Commitment, 34 | >; 35 | 36 | type JaggedEvaluator: JaggedEvalConfig 37 | + 'static 38 | + Clone 39 | + Send 40 | + Sync 41 | + Serialize 42 | + DeserializeOwned; 43 | } 44 | -------------------------------------------------------------------------------- /crates/jagged/src/jagged_eval/mod.rs: -------------------------------------------------------------------------------- 1 | mod sumcheck_eval; 2 | 3 | pub use sumcheck_eval::*; 4 | 5 | use std::{error::Error, fmt::Debug}; 6 | 7 | use hypercube_multilinear::Point; 8 | use p3_field::{ExtensionField, Field}; 9 | use serde::{de::DeserializeOwned, Serialize}; 10 | 11 | use crate::JaggedLittlePolynomialVerifierParams; 12 | 13 | pub trait JaggedEvalConfig, Challenger>: 14 | 'static + Send + Sync + Serialize + DeserializeOwned + std::fmt::Debug + Clone 15 | { 16 | type JaggedEvalProof: 'static + Debug + Clone + Send + Sync + Serialize + DeserializeOwned; 17 | 18 | type JaggedEvalError: Error + 'static + Send + Sync; 19 | 20 | fn jagged_evaluation( 21 | &self, 22 | params: &JaggedLittlePolynomialVerifierParams, 23 | z_row: &Point, 24 | z_col: &Point, 25 | z_trace: &Point, 26 | proof: &Self::JaggedEvalProof, 27 | challenger: &mut Challenger, 28 | ) -> Result; 29 | } 30 | -------------------------------------------------------------------------------- /crates/jagged/src/jagged_eval/sumcheck_eval.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt::Debug, marker::PhantomData}; 2 | 3 | use hypercube_multilinear::{Mle, Point}; 4 | use hypercube_sumcheck::{partially_verify_sumcheck_proof, PartialSumcheckProof, SumcheckError}; 5 | use p3_challenger::FieldChallenger; 6 | use p3_field::{ExtensionField, Field}; 7 | use serde::{Deserialize, Serialize}; 8 | use thiserror::Error; 9 | 10 | use crate::{poly::BranchingProgram, JaggedLittlePolynomialVerifierParams}; 11 | 12 | use super::JaggedEvalConfig; 13 | 14 | #[derive(Debug, Clone, Serialize, Deserialize)] 15 | pub struct JaggedSumcheckEvalProof { 16 | pub branching_program_evals: Vec, 17 | pub partial_sumcheck_proof: PartialSumcheckProof, 18 | } 19 | 20 | #[derive(Debug, Default, Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] 21 | pub struct JaggedEvalSumcheckConfig(PhantomData); 22 | 23 | #[derive(Debug, Error)] 24 | pub enum JaggedEvalSumcheckError { 25 | #[error("sumcheck error: {0}")] 26 | SumcheckError(SumcheckError), 27 | #[error("jagged evaluation proof verification failed, expected: {0}, got: {1}")] 28 | JaggedEvaluationFailed(F, F), 29 | } 30 | 31 | impl JaggedEvalConfig for JaggedEvalSumcheckConfig 32 | where 33 | F: Field, 34 | EF: ExtensionField, 35 | Challenger: FieldChallenger, 36 | { 37 | type JaggedEvalProof = JaggedSumcheckEvalProof; 38 | type JaggedEvalError = JaggedEvalSumcheckError; 39 | 40 | fn jagged_evaluation( 41 | &self, 42 | params: &JaggedLittlePolynomialVerifierParams, 43 | z_row: &Point, 44 | z_col: &Point, 45 | z_trace: &Point, 46 | proof: &Self::JaggedEvalProof, 47 | challenger: &mut Challenger, 48 | ) -> Result { 49 | let JaggedSumcheckEvalProof { branching_program_evals, partial_sumcheck_proof } = proof; 50 | // Calculate the partial lagrange from z_col point. 51 | let z_col_partial_lagrange = Mle::blocking_partial_lagrange(z_col); 52 | let z_col_partial_lagrange = z_col_partial_lagrange.guts().as_slice(); 53 | 54 | // Calcuate the jagged eval from the branching program eval claims. 55 | let jagged_eval = z_col_partial_lagrange 56 | .iter() 57 | .zip(branching_program_evals.iter()) 58 | .map(|(partial_lagrange, branching_program_eval)| { 59 | *partial_lagrange * *branching_program_eval 60 | }) 61 | .sum::(); 62 | 63 | // Verify the jagged eval proof. 64 | let result = partially_verify_sumcheck_proof(partial_sumcheck_proof, challenger); 65 | if let Err(result) = result { 66 | return Err(JaggedEvalSumcheckError::SumcheckError(result)); 67 | } 68 | let (first_half_z_index, second_half_z_index) = partial_sumcheck_proof 69 | .point_and_eval 70 | .0 71 | .split_at(partial_sumcheck_proof.point_and_eval.0.dimension() / 2); 72 | assert!(first_half_z_index.len() == second_half_z_index.len()); 73 | 74 | // Compute the jagged eval sc expected eval and assert it matches the proof's eval. 75 | let current_column_prefix_sums = params.col_prefix_sums.iter(); 76 | let next_column_prefix_sums = params.col_prefix_sums.iter().skip(1); 77 | let mut is_first_column = true; 78 | let mut prev_merged_prefix_sum = Point::::default(); 79 | let mut prev_full_lagrange_eval = EF::zero(); 80 | let mut jagged_eval_sc_expected_eval = current_column_prefix_sums 81 | .zip(next_column_prefix_sums) 82 | .zip(z_col_partial_lagrange.iter()) 83 | .map(|((current_column_prefix_sum, next_column_prefix_sum), z_col_eq_val)| { 84 | let mut merged_prefix_sum = current_column_prefix_sum.clone(); 85 | merged_prefix_sum.extend(next_column_prefix_sum); 86 | 87 | let full_lagrange_eval = 88 | if prev_merged_prefix_sum == merged_prefix_sum && !is_first_column { 89 | prev_full_lagrange_eval 90 | } else { 91 | let full_lagrange_eval = Mle::full_lagrange_eval( 92 | &merged_prefix_sum, 93 | &partial_sumcheck_proof.point_and_eval.0, 94 | ); 95 | prev_full_lagrange_eval = full_lagrange_eval; 96 | full_lagrange_eval 97 | }; 98 | 99 | prev_merged_prefix_sum = merged_prefix_sum; 100 | is_first_column = false; 101 | 102 | *z_col_eq_val * full_lagrange_eval 103 | }) 104 | .sum::(); 105 | 106 | let branching_program = BranchingProgram::new(z_row.clone(), z_trace.clone()); 107 | jagged_eval_sc_expected_eval *= 108 | branching_program.eval(&first_half_z_index, &second_half_z_index); 109 | 110 | if jagged_eval_sc_expected_eval != partial_sumcheck_proof.point_and_eval.1 { 111 | Err(JaggedEvalSumcheckError::JaggedEvaluationFailed( 112 | jagged_eval_sc_expected_eval, 113 | partial_sumcheck_proof.point_and_eval.1, 114 | )) 115 | } else { 116 | Ok(jagged_eval) 117 | } 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /crates/jagged/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod basefold; 2 | mod config; 3 | mod jagged_eval; 4 | mod poly; 5 | mod verifier; 6 | 7 | pub use basefold::*; 8 | pub use config::*; 9 | pub use jagged_eval::*; 10 | pub use poly::*; 11 | pub use verifier::*; 12 | -------------------------------------------------------------------------------- /crates/merkle-tree/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hypercube-merkle-tree" 3 | version.workspace = true 4 | edition.workspace = true 5 | 6 | repository.workspace = true 7 | keywords.workspace = true 8 | categories.workspace = true 9 | 10 | [dependencies] 11 | p3-merkle-tree = { workspace = true } 12 | 13 | hypercube-commit = { workspace = true } 14 | p3-symmetric = { workspace = true } 15 | p3-poseidon2 = { workspace = true } 16 | p3-baby-bear = { workspace = true } 17 | p3-field = { workspace = true } 18 | hypercube-tensor = { workspace = true } 19 | 20 | thiserror = { workspace = true } 21 | serde = { workspace = true } 22 | lazy_static = "1.5.0" 23 | itertools = { workspace = true } 24 | 25 | 26 | [dev-dependencies] 27 | -------------------------------------------------------------------------------- /crates/merkle-tree/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub use p3_merkle_tree::*; 2 | 3 | mod baby_bear_poseidon2; 4 | mod tcs; 5 | 6 | pub use baby_bear_poseidon2::*; 7 | pub use tcs::*; 8 | -------------------------------------------------------------------------------- /crates/merkle-tree/src/tcs.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Debug; 2 | 3 | use hypercube_commit::{TensorCs, TensorCsOpening}; 4 | use hypercube_tensor::Tensor; 5 | use itertools::Itertools; 6 | use p3_symmetric::{CryptographicHasher, PseudoCompressionFunction}; 7 | use serde::{de::DeserializeOwned, Deserialize, Serialize}; 8 | use thiserror::Error; 9 | 10 | /// An interfacr defining a Merkle tree. 11 | pub trait MerkleTreeConfig: 'static + Clone + Send + Sync { 12 | type Data: 'static + Clone + Send + Sync + Serialize + DeserializeOwned; 13 | type Digest: 'static 14 | + Debug 15 | + Clone 16 | + Send 17 | + Sync 18 | + PartialEq 19 | + Eq 20 | + Serialize 21 | + DeserializeOwned; 22 | type Hasher: CryptographicHasher + Send + Sync + Clone; 23 | type Compressor: PseudoCompressionFunction + Send + Sync + Clone; 24 | } 25 | 26 | pub trait DefaultMerkleTreeConfig: MerkleTreeConfig { 27 | fn default_hasher_and_compressor() -> (Self::Hasher, Self::Compressor); 28 | } 29 | 30 | /// A merkle tree Tensor commitment scheme. 31 | /// 32 | /// A tensor commitment scheme based on merkleizing the committed tensors at a given dimension, 33 | /// which the prover is free to choose. 34 | #[derive(Debug, Clone, Copy)] 35 | pub struct MerkleTreeTcs { 36 | pub hasher: M::Hasher, 37 | pub compressor: M::Compressor, 38 | } 39 | 40 | #[derive(Debug, Clone, Copy, Error)] 41 | pub enum MerkleTreeTcsError { 42 | #[error("root mismatch")] 43 | RootMismatch, 44 | } 45 | 46 | #[derive(Debug, Clone, Serialize, Deserialize)] 47 | pub struct MerkleTreeTcsProof { 48 | pub paths: Tensor, 49 | } 50 | 51 | impl Default for MerkleTreeTcs { 52 | #[inline] 53 | fn default() -> Self { 54 | let (hasher, compressor) = M::default_hasher_and_compressor(); 55 | Self { hasher, compressor } 56 | } 57 | } 58 | 59 | impl TensorCs for MerkleTreeTcs { 60 | type Data = M::Data; 61 | type Commitment = M::Digest; 62 | type Proof = MerkleTreeTcsProof; 63 | type VerifierError = MerkleTreeTcsError; 64 | 65 | fn verify_tensor_openings( 66 | &self, 67 | commit: &Self::Commitment, 68 | indices: &[usize], 69 | opening: &TensorCsOpening, 70 | ) -> Result<(), Self::VerifierError> { 71 | for (i, (index, path)) in indices.iter().zip_eq(opening.proof.paths.split()).enumerate() { 72 | // Collect the lead slices of the claimed values. 73 | let claimed_values_slices = opening.values.get(i).unwrap().as_slice(); 74 | 75 | let path = path.as_slice(); 76 | 77 | // Iterate the path and compute the root. 78 | let digest = self.hasher.hash_iter_slices(vec![claimed_values_slices]); 79 | 80 | let mut root = digest; 81 | let mut index = *index; 82 | for sibling in path.iter().cloned() { 83 | let (left, right) = if index & 1 == 0 { (root, sibling) } else { (sibling, root) }; 84 | root = self.compressor.compress([left, right]); 85 | index >>= 1; 86 | } 87 | 88 | if root != *commit { 89 | return Err(Self::VerifierError::RootMismatch); 90 | } 91 | } 92 | 93 | Ok(()) 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /crates/multilinear/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hypercube-multilinear" 3 | version.workspace = true 4 | edition.workspace = true 5 | 6 | repository.workspace = true 7 | keywords.workspace = true 8 | categories.workspace = true 9 | 10 | [dependencies] 11 | hypercube-alloc = { workspace = true } 12 | hypercube-tensor = { workspace = true } 13 | p3-field = { workspace = true } 14 | p3-matrix = { workspace = true } 15 | p3-challenger = { workspace = true } 16 | 17 | serde = { workspace = true } 18 | rayon = { workspace = true } 19 | derive-where = { workspace = true } 20 | rand = { workspace = true } 21 | 22 | [dev-dependencies] 23 | rand = { workspace = true } 24 | -------------------------------------------------------------------------------- /crates/multilinear/src/base.rs: -------------------------------------------------------------------------------- 1 | use hypercube_alloc::{Backend, CpuBackend}; 2 | use hypercube_tensor::Tensor; 3 | use p3_field::AbstractField; 4 | 5 | pub trait MleBaseBackend: Backend { 6 | /// Returns the number of polynomials in the batch. 7 | fn num_polynomials(guts: &Tensor) -> usize; 8 | 9 | /// Returns the number of variables in the polynomials. 10 | fn num_variables(guts: &Tensor) -> u32; 11 | 12 | // Number of non-zero entries in the MLE. 13 | fn num_non_zero_entries(guts: &Tensor) -> usize; 14 | 15 | fn uninit_mle(&self, num_polynomials: usize, num_non_zero_entries: usize) -> Tensor; 16 | } 17 | 18 | impl MleBaseBackend for CpuBackend { 19 | fn num_polynomials(guts: &Tensor) -> usize { 20 | guts.sizes()[1] 21 | } 22 | 23 | fn num_variables(guts: &Tensor) -> u32 { 24 | guts.sizes()[0].next_power_of_two().ilog2() 25 | } 26 | 27 | fn num_non_zero_entries(guts: &Tensor) -> usize { 28 | guts.sizes()[0] 29 | } 30 | 31 | fn uninit_mle(&self, num_polynomials: usize, num_non_zero_entries: usize) -> Tensor { 32 | Tensor::with_sizes_in([num_non_zero_entries, num_polynomials], *self) 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /crates/multilinear/src/eval.rs: -------------------------------------------------------------------------------- 1 | use hypercube_alloc::{buffer, Buffer, CpuBackend}; 2 | use hypercube_tensor::{Dimensions, Tensor}; 3 | use p3_field::{AbstractExtensionField, AbstractField}; 4 | use rayon::prelude::*; 5 | 6 | use crate::{partial_lagrange_blocking, Point}; 7 | 8 | pub(crate) fn eval_mle_at_point_blocking< 9 | F: AbstractField + Sync, 10 | EF: AbstractExtensionField + Send + Sync, 11 | >( 12 | mle: &Tensor, 13 | point: &Point, 14 | ) -> Tensor { 15 | let partial_lagrange = partial_lagrange_blocking(point); 16 | let mut sizes = mle.sizes().to_vec(); 17 | sizes.remove(0); 18 | let dimensions = Dimensions::try_from(sizes).unwrap(); 19 | let mut dst = Tensor { storage: buffer![], dimensions }; 20 | let total_len = dst.total_len(); 21 | let dot_products = mle 22 | .as_buffer() 23 | .par_chunks_exact(mle.strides()[0]) 24 | .zip(partial_lagrange.as_buffer().par_iter()) 25 | .map(|(chunk, scalar)| chunk.iter().map(|a| scalar.clone() * a.clone()).collect()) 26 | .reduce( 27 | || vec![EF::zero(); total_len], 28 | |mut a, b| { 29 | a.iter_mut().zip(b.iter()).for_each(|(a, b)| *a += b.clone()); 30 | a 31 | }, 32 | ); 33 | 34 | let dot_products = Buffer::from(dot_products); 35 | dst.storage = dot_products; 36 | dst 37 | } 38 | -------------------------------------------------------------------------------- /crates/multilinear/src/lagrange.rs: -------------------------------------------------------------------------------- 1 | use hypercube_alloc::CpuBackend; 2 | use hypercube_tensor::Tensor; 3 | use p3_field::AbstractField; 4 | 5 | use crate::Point; 6 | 7 | pub fn partial_lagrange_blocking( 8 | point: &Point, 9 | ) -> Tensor { 10 | let one = F::one(); 11 | let mut evals = Vec::with_capacity(1 << point.dimension()); 12 | evals.push(one); 13 | 14 | // Build evals in num_variables rounds. In each round, we consider one more entry of `point`, 15 | // hence the zip. 16 | point.iter().for_each(|coordinate| { 17 | evals = evals 18 | .iter() 19 | // For each value in the previous round, multiply by (1-coordinate) and coordinate, 20 | // and collect all these values into a new vec. 21 | .flat_map(|val| { 22 | let prod = val.clone() * coordinate.clone(); 23 | [val.clone() - prod.clone(), prod] 24 | }) 25 | .collect(); 26 | }); 27 | Tensor::from(evals).reshape([1 << point.dimension(), 1]) 28 | } 29 | -------------------------------------------------------------------------------- /crates/multilinear/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod base; 2 | mod eval; 3 | mod lagrange; 4 | mod mle; 5 | mod pcs; 6 | mod point; 7 | 8 | pub use base::*; 9 | pub use lagrange::*; 10 | pub use mle::*; 11 | pub use pcs::*; 12 | pub use point::*; 13 | -------------------------------------------------------------------------------- /crates/primitives/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [Unreleased] 9 | 10 | ## [1.1.0](https://github.com/succinctlabs/sp1/compare/sp1-primitives-v1.0.1...sp1-primitives-v1.1.0) - 2024-08-02 11 | 12 | ### Added 13 | - update tg ([#1214](https://github.com/succinctlabs/sp1/pull/1214)) 14 | 15 | ## [1.0.0-rc1](https://github.com/succinctlabs/sp1/compare/sp1-primitives-v1.0.0-rc1...sp1-primitives-v1.0.0-rc1) - 2024-07-19 16 | 17 | ### Added 18 | 19 | - publish sp1 to crates.io ([#1052](https://github.com/succinctlabs/sp1/pull/1052)) 20 | - plonk circuit optimizations ([#972](https://github.com/succinctlabs/sp1/pull/972)) 21 | - enable arbitrary constraint degree ([#593](https://github.com/succinctlabs/sp1/pull/593)) 22 | - complete reduce program ([#565](https://github.com/succinctlabs/sp1/pull/565)) 23 | - nested sp1 proof verification ([#494](https://github.com/succinctlabs/sp1/pull/494)) 24 | - new README img ([#226](https://github.com/succinctlabs/sp1/pull/226)) 25 | - readme updates ([#205](https://github.com/succinctlabs/sp1/pull/205)) 26 | - more final touches ([#194](https://github.com/succinctlabs/sp1/pull/194)) 27 | - curtaup + release system + cargo prove CLI updates ([#178](https://github.com/succinctlabs/sp1/pull/178)) 28 | - (perf) updates from Plonky3 and verifier refactor ([#156](https://github.com/succinctlabs/sp1/pull/156)) 29 | - developer experience improvements ([#145](https://github.com/succinctlabs/sp1/pull/145)) 30 | - toolchain build from source & install ([#113](https://github.com/succinctlabs/sp1/pull/113)) 31 | - io::read io::write ([#126](https://github.com/succinctlabs/sp1/pull/126)) 32 | - tracing, profiling, benchmarking ([#99](https://github.com/succinctlabs/sp1/pull/99)) 33 | 34 | ### Fixed 35 | 36 | - deferred proofs + cleanup hash_vkey ([#615](https://github.com/succinctlabs/sp1/pull/615)) 37 | 38 | ### Other 39 | 40 | - use global workspace version ([#1102](https://github.com/succinctlabs/sp1/pull/1102)) 41 | - fix release-plz ([#1088](https://github.com/succinctlabs/sp1/pull/1088)) 42 | - add release-plz ([#1086](https://github.com/succinctlabs/sp1/pull/1086)) 43 | - _(deps)_ bump lazy_static from 1.4.0 to 1.5.0 44 | - _(deps)_ bump itertools from 0.12.1 to 0.13.0 45 | - Clean up TOML files ([#796](https://github.com/succinctlabs/sp1/pull/796)) 46 | - prover tweaks ([#610](https://github.com/succinctlabs/sp1/pull/610)) 47 | - final touches for public release ([#239](https://github.com/succinctlabs/sp1/pull/239)) 48 | - update docs with slight nits ([#224](https://github.com/succinctlabs/sp1/pull/224)) 49 | - sp1 rename ([#212](https://github.com/succinctlabs/sp1/pull/212)) 50 | - enshrine AlignedBorrow macro ([#209](https://github.com/succinctlabs/sp1/pull/209)) 51 | - readme cleanup ([#196](https://github.com/succinctlabs/sp1/pull/196)) 52 | - rename succinct to curta ([#192](https://github.com/succinctlabs/sp1/pull/192)) 53 | - better curta graphic ([#184](https://github.com/succinctlabs/sp1/pull/184)) 54 | - Initial commit 55 | -------------------------------------------------------------------------------- /crates/primitives/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sp1-primitives" 3 | description = "SP1 is a performant, 100% open-source, contributor-friendly zkVM." 4 | readme = "../../README.md" 5 | version = { workspace = true } 6 | edition = { workspace = true } 7 | 8 | repository = { workspace = true } 9 | keywords = { workspace = true } 10 | categories = { workspace = true } 11 | 12 | [dependencies] 13 | lazy_static = "1.5.0" 14 | p3-field = { workspace = true } 15 | p3-baby-bear = { workspace = true } 16 | p3-poseidon2 = { workspace = true } 17 | p3-symmetric = { workspace = true } 18 | serde = { workspace = true, features = ["derive"] } 19 | 20 | [lints] 21 | workspace = true 22 | -------------------------------------------------------------------------------- /crates/primitives/src/consts.rs: -------------------------------------------------------------------------------- 1 | /// The maximum size of the memory in bytes. 2 | pub const MAXIMUM_MEMORY_SIZE: u32 = u32::MAX; 3 | 4 | /// The number of bits in a byte. 5 | pub const BYTE_SIZE: usize = 8; 6 | 7 | /// The size of a word in limbs. 8 | pub const WORD_SIZE: usize = 2; 9 | 10 | /// The size of a word in bytes. 11 | pub const WORD_BYTE_SIZE: usize = 4; 12 | 13 | /// The number of bytes necessary to represent a 64-bit integer. 14 | pub const LONG_WORD_BYTE_SIZE: usize = 2 * WORD_BYTE_SIZE; 15 | 16 | /// The Baby Bear prime. 17 | pub const BABYBEAR_PRIME: u32 = 0x78000001; 18 | -------------------------------------------------------------------------------- /crates/primitives/src/io.rs: -------------------------------------------------------------------------------- 1 | use crate::types::Buffer; 2 | use serde::{Deserialize, Serialize}; 3 | 4 | /// The number of 32 bit words in the SP1 proof's committed value digest. 5 | pub const PV_DIGEST_NUM_WORDS: usize = 8; 6 | 7 | /// The number of field elements in the poseidon2 digest. 8 | pub const POSEIDON_NUM_WORDS: usize = 8; 9 | 10 | /// Public values for the prover. 11 | #[derive(Debug, Clone, Serialize, Deserialize, Default)] 12 | pub struct SP1PublicValues { 13 | buffer: Buffer, 14 | } 15 | 16 | impl SP1PublicValues { 17 | pub fn as_byte_slice(&self) -> &[u8] { 18 | &self.buffer.data 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /crates/primitives/src/types.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | #[derive(Debug, Clone, Copy)] 4 | pub enum RecursionProgramType { 5 | Core, 6 | Deferred, 7 | Compress, 8 | Shrink, 9 | Wrap, 10 | } 11 | 12 | /// A buffer of serializable/deserializable objects. 13 | #[derive(Debug, Clone, Serialize, Deserialize)] 14 | pub struct Buffer { 15 | pub data: Vec, 16 | #[serde(skip)] 17 | pub ptr: usize, 18 | } 19 | 20 | impl Buffer { 21 | pub const fn new() -> Self { 22 | Self { data: Vec::new(), ptr: 0 } 23 | } 24 | } 25 | 26 | impl Default for Buffer { 27 | fn default() -> Self { 28 | Self::new() 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /crates/recursion/README.md: -------------------------------------------------------------------------------- 1 | # SP1 Recursion 2 | 3 | 4 | ## Debugging recursion programs 5 | The recursion programs are executed in the recursion runtime. In case of a panic in the recursion 6 | runtime, rust will panic with a `TRAP` error. In order to get detailed information about the panic, 7 | with a backtrace, compile the test with the environment variables: 8 | ```bash 9 | RUST_BACKTRACE=1 RUSTFLAGS="-g" SP1_DEBUG=true 10 | ``` 11 | -------------------------------------------------------------------------------- /crates/recursion/derive/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [Unreleased] 9 | 10 | ## [1.1.0](https://github.com/succinctlabs/sp1/compare/sp1-recursion-derive-v1.0.1...sp1-recursion-derive-v1.1.0) - 2024-08-02 11 | 12 | ### Added 13 | - update tg ([#1214](https://github.com/succinctlabs/sp1/pull/1214)) 14 | 15 | ## [1.0.0-rc1](https://github.com/succinctlabs/sp1/compare/sp1-recursion-derive-v1.0.0-rc1...sp1-recursion-derive-v1.0.0-rc1) - 2024-07-19 16 | 17 | ### Other 18 | 19 | - use global workspace version ([#1102](https://github.com/succinctlabs/sp1/pull/1102)) 20 | -------------------------------------------------------------------------------- /crates/recursion/derive/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hypercube-recursion-derive" 3 | description = "SP1 is a performant, 100% open-source, contributor-friendly zkVM." 4 | readme = "../../../README.md" 5 | version = { workspace = true } 6 | edition = { workspace = true } 7 | 8 | repository = { workspace = true } 9 | keywords = { workspace = true } 10 | categories = { workspace = true } 11 | 12 | [lib] 13 | proc-macro = true 14 | 15 | [dependencies] 16 | quote = "1.0" 17 | syn = { version = "1.0", features = ["full"] } 18 | 19 | [lints] 20 | workspace = true 21 | -------------------------------------------------------------------------------- /crates/recursion/derive/src/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate proc_macro; 2 | 3 | use proc_macro::TokenStream; 4 | use quote::quote; 5 | use syn::{parse_macro_input, Data, DeriveInput, Fields}; 6 | 7 | #[proc_macro_derive(DslVariable)] 8 | pub fn derive_variable(input: TokenStream) -> TokenStream { 9 | let input = parse_macro_input!(input as DeriveInput); 10 | let name = input.ident; // Struct name 11 | 12 | let gen = match input.data { 13 | Data::Struct(data) => match data.fields { 14 | Fields::Named(fields) => { 15 | let fields_init = fields.named.iter().map(|f| { 16 | let fname = &f.ident; 17 | let ftype = &f.ty; 18 | let ftype_str = quote! { #ftype }.to_string(); 19 | if ftype_str.contains("Array") { 20 | quote! { 21 | #fname: Array::Dyn(builder.uninit(), builder.uninit()), 22 | } 23 | } else { 24 | quote! { 25 | #fname: <#ftype as Variable>::uninit(builder), 26 | } 27 | } 28 | }); 29 | 30 | let fields_assign = fields.named.iter().map(|f| { 31 | let fname = &f.ident; 32 | quote! { 33 | self.#fname.assign(src.#fname.into(), builder); 34 | } 35 | }); 36 | 37 | let fields_assert_eq = fields.named.iter().map(|f| { 38 | let fname = &f.ident; 39 | let ftype = &f.ty; 40 | quote! { 41 | <#ftype as Variable>::assert_eq(lhs.#fname, rhs.#fname, builder); 42 | } 43 | }); 44 | 45 | let fields_assert_ne = fields.named.iter().map(|f| { 46 | let fname = &f.ident; 47 | let ftype = &f.ty; 48 | quote! { 49 | <#ftype as Variable>::assert_ne(lhs.#fname, rhs.#fname, builder); 50 | } 51 | }); 52 | 53 | let field_sizes = fields.named.iter().map(|f| { 54 | let ftype = &f.ty; 55 | quote! { 56 | <#ftype as MemVariable>::size_of() 57 | } 58 | }); 59 | 60 | let field_loads = fields.named.iter().map(|f| { 61 | let fname = &f.ident; 62 | let ftype = &f.ty; 63 | quote! { 64 | { 65 | // let address = builder.eval(ptr + Usize::Const(offset)); 66 | self.#fname.load(ptr, index, builder); 67 | index.offset += <#ftype as MemVariable>::size_of(); 68 | } 69 | } 70 | }); 71 | 72 | let field_stores = fields.named.iter().map(|f| { 73 | let fname = &f.ident; 74 | let ftype = &f.ty; 75 | quote! { 76 | { 77 | // let address = builder.eval(ptr + Usize::Const(offset)); 78 | self.#fname.store(ptr, index, builder); 79 | index.offset += <#ftype as MemVariable>::size_of(); 80 | } 81 | } 82 | }); 83 | 84 | quote! { 85 | impl Variable for #name { 86 | type Expression = Self; 87 | 88 | fn uninit(builder: &mut Builder) -> Self { 89 | Self { 90 | #(#fields_init)* 91 | } 92 | } 93 | 94 | fn assign(&self, src: Self::Expression, builder: &mut Builder) { 95 | #(#fields_assign)* 96 | } 97 | 98 | fn assert_eq( 99 | lhs: impl Into, 100 | rhs: impl Into, 101 | builder: &mut Builder, 102 | ) { 103 | let lhs = lhs.into(); 104 | let rhs = rhs.into(); 105 | #(#fields_assert_eq)* 106 | } 107 | 108 | fn assert_ne( 109 | lhs: impl Into, 110 | rhs: impl Into, 111 | builder: &mut Builder, 112 | ) { 113 | let lhs = lhs.into(); 114 | let rhs = rhs.into(); 115 | #(#fields_assert_ne)* 116 | } 117 | } 118 | 119 | impl MemVariable for #name { 120 | fn size_of() -> usize { 121 | let mut size = 0; 122 | #(size += #field_sizes;)* 123 | size 124 | } 125 | 126 | fn load(&self, ptr: Ptr<::N>, 127 | index: MemIndex<::N>, 128 | builder: &mut Builder) { 129 | let mut index = index; 130 | #(#field_loads)* 131 | } 132 | 133 | fn store(&self, ptr: Ptr<::N>, 134 | index: MemIndex<::N>, 135 | builder: &mut Builder) { 136 | let mut index = index; 137 | #(#field_stores)* 138 | } 139 | } 140 | } 141 | } 142 | _ => unimplemented!(), 143 | }, 144 | _ => unimplemented!(), 145 | }; 146 | 147 | gen.into() 148 | } 149 | -------------------------------------------------------------------------------- /crates/recursion/executor/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hypercube-recursion-executor" 3 | description = "SP1 is a performant, 100% open-source, contributor-friendly zkVM." 4 | readme = "../../../README.md" 5 | version = { workspace = true } 6 | edition = { workspace = true } 7 | 8 | repository = { workspace = true } 9 | keywords = { workspace = true } 10 | categories = { workspace = true } 11 | 12 | [dependencies] 13 | p3-field = { workspace = true } 14 | p3-symmetric = { workspace = true } 15 | sp1-derive = { workspace = true } 16 | hypercube-stark = { workspace = true } 17 | p3-challenger = { workspace = true } 18 | hypercube-utils = { workspace = true } 19 | serde = { workspace = true, features = ["derive", "rc"] } 20 | static_assertions = "1.1.0" 21 | -------------------------------------------------------------------------------- /crates/recursion/executor/src/block.rs: -------------------------------------------------------------------------------- 1 | use hypercube_stark::air::{BinomialExtension, ExtensionAirBuilder, SP1AirBuilder}; 2 | use p3_field::{AbstractField, ExtensionField, Field}; 3 | use serde::{Deserialize, Serialize}; 4 | use sp1_derive::AlignedBorrow; 5 | 6 | use std::ops::{Index, IndexMut}; 7 | 8 | use crate::D; 9 | 10 | /// The smallest unit of memory that can be read and written to. 11 | #[derive( 12 | AlignedBorrow, Clone, Copy, Debug, Default, PartialEq, Eq, Hash, Serialize, Deserialize, 13 | )] 14 | #[repr(C)] 15 | pub struct Block(pub [T; D]); 16 | 17 | impl Block { 18 | pub fn map(self, f: F) -> Block 19 | where 20 | F: FnMut(T) -> U, 21 | { 22 | Block(self.0.map(f)) 23 | } 24 | 25 | pub fn ext(&self) -> E 26 | where 27 | T: Field, 28 | E: ExtensionField, 29 | { 30 | E::from_base_fn(|i| self.0[i]) 31 | } 32 | } 33 | 34 | impl Block { 35 | pub fn as_extension>(&self) -> BinomialExtension { 36 | let arr: [AB::Expr; 4] = self.0.clone().map(|x| AB::Expr::zero() + x); 37 | BinomialExtension(arr) 38 | } 39 | 40 | pub fn as_extension_from_base>( 41 | &self, 42 | base: AB::Expr, 43 | ) -> BinomialExtension { 44 | let mut arr: [AB::Expr; 4] = self.0.clone().map(|_| AB::Expr::zero()); 45 | arr[0] = base; 46 | 47 | BinomialExtension(arr) 48 | } 49 | } 50 | 51 | impl From<[T; D]> for Block { 52 | fn from(arr: [T; D]) -> Self { 53 | Self(arr) 54 | } 55 | } 56 | 57 | impl From for Block { 58 | fn from(value: T) -> Self { 59 | Self([value, T::zero(), T::zero(), T::zero()]) 60 | } 61 | } 62 | 63 | impl From<&[T]> for Block { 64 | fn from(slice: &[T]) -> Self { 65 | let arr: [T; D] = slice.try_into().unwrap(); 66 | Self(arr) 67 | } 68 | } 69 | 70 | impl Index for Block 71 | where 72 | [T]: Index, 73 | { 74 | type Output = <[T] as Index>::Output; 75 | 76 | #[inline] 77 | fn index(&self, index: I) -> &Self::Output { 78 | Index::index(&self.0, index) 79 | } 80 | } 81 | 82 | impl IndexMut for Block 83 | where 84 | [T]: IndexMut, 85 | { 86 | #[inline] 87 | fn index_mut(&mut self, index: I) -> &mut Self::Output { 88 | IndexMut::index_mut(&mut self.0, index) 89 | } 90 | } 91 | 92 | impl IntoIterator for Block { 93 | type Item = T; 94 | type IntoIter = std::array::IntoIter; 95 | 96 | fn into_iter(self) -> Self::IntoIter { 97 | self.0.into_iter() 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /crates/recursion/executor/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod block; 2 | mod opcode; 3 | mod program; 4 | mod public_values; 5 | mod record; 6 | pub mod shape; 7 | 8 | pub use public_values::PV_DIGEST_NUM_WORDS; 9 | 10 | // Avoid triggering annoying branch of thiserror derive macro. 11 | pub use block::Block; 12 | pub use opcode::*; 13 | use p3_field::PrimeField64; 14 | pub use public_values::{ 15 | RecursionPublicValues, NUM_PV_ELMS_TO_HASH, POSEIDON_NUM_WORDS, RECURSIVE_PROOF_NUM_PV_ELTS, 16 | }; 17 | use serde::{Deserialize, Serialize}; 18 | use sp1_derive::AlignedBorrow; 19 | use std::fmt::Debug; 20 | 21 | /// The width of the Poseidon2 permutation. 22 | pub const PERMUTATION_WIDTH: usize = 16; 23 | pub const POSEIDON2_SBOX_DEGREE: u64 = 7; 24 | pub const HASH_RATE: usize = 8; 25 | 26 | /// The current verifier implementation assumes that we are using a 256-bit hash with 32-bit 27 | /// elements. 28 | pub const DIGEST_SIZE: usize = 8; 29 | 30 | pub const NUM_BITS: usize = 31; 31 | 32 | pub const D: usize = 4; 33 | 34 | #[derive( 35 | AlignedBorrow, Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize, Default, 36 | )] 37 | #[repr(transparent)] 38 | pub struct Address(pub F); 39 | 40 | impl Address { 41 | #[inline] 42 | pub fn as_usize(&self) -> usize { 43 | self.0.as_canonical_u64() as usize 44 | } 45 | } 46 | 47 | // ------------------------------------------------------------------------------------------------- 48 | 49 | /// The inputs and outputs to an operation of the base field ALU. 50 | #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] 51 | #[repr(C)] 52 | pub struct BaseAluIo { 53 | pub out: V, 54 | pub in1: V, 55 | pub in2: V, 56 | } 57 | 58 | // ------------------------------------------------------------------------------------------------- 59 | 60 | /// The inputs and outputs to an operation of the extension field ALU. 61 | #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] 62 | #[repr(C)] 63 | pub struct ExtAluIo { 64 | pub out: V, 65 | pub in1: V, 66 | pub in2: V, 67 | } 68 | 69 | // ------------------------------------------------------------------------------------------------- 70 | 71 | /// The inputs and outputs to the manual memory management/memory initialization table. 72 | #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] 73 | #[repr(C)] 74 | pub struct MemIo { 75 | pub inner: V, 76 | } 77 | 78 | // ------------------------------------------------------------------------------------------------- 79 | 80 | #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] 81 | pub enum MemAccessKind { 82 | Read, 83 | Write, 84 | } 85 | 86 | /// The inputs and outputs to a Poseidon2 permutation. 87 | #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] 88 | #[repr(C)] 89 | pub struct Poseidon2Io { 90 | pub input: [V; PERMUTATION_WIDTH], 91 | pub output: [V; PERMUTATION_WIDTH], 92 | } 93 | 94 | /// The inputs and outputs to a select operation. 95 | #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] 96 | #[repr(C)] 97 | pub struct SelectIo { 98 | pub bit: V, 99 | pub out1: V, 100 | pub out2: V, 101 | pub in1: V, 102 | pub in2: V, 103 | } 104 | 105 | /// The inputs and outputs to the operations for prefix sum checks. 106 | #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] 107 | pub struct PrefixSumChecksIo { 108 | pub zero: V, 109 | pub one: V, 110 | pub x1: Vec, 111 | pub x2: Vec, 112 | pub accs: Vec, 113 | pub field_accs: Vec, 114 | } 115 | 116 | #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] 117 | pub struct BatchFRIIo { 118 | pub ext_single: BatchFRIExtSingleIo>, 119 | pub ext_vec: BatchFRIExtVecIo>>, 120 | pub base_vec: BatchFRIBaseVecIo, 121 | } 122 | 123 | /// The extension-field-valued single inputs to the batch FRI operation. 124 | #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] 125 | #[repr(C)] 126 | pub struct BatchFRIExtSingleIo { 127 | pub acc: V, 128 | } 129 | 130 | /// The extension-field-valued vector inputs to the batch FRI operation. 131 | #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] 132 | #[repr(C)] 133 | pub struct BatchFRIExtVecIo { 134 | pub p_at_z: V, 135 | pub alpha_pow: V, 136 | } 137 | 138 | /// The base-field-valued vector inputs to the batch FRI operation. 139 | #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] 140 | #[repr(C)] 141 | pub struct BatchFRIBaseVecIo { 142 | pub p_at_x: V, 143 | } 144 | -------------------------------------------------------------------------------- /crates/recursion/executor/src/opcode.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] 4 | #[repr(C)] 5 | pub enum BaseAluOpcode { 6 | AddF, 7 | SubF, 8 | MulF, 9 | DivF, 10 | } 11 | 12 | #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] 13 | #[repr(C)] 14 | pub enum ExtAluOpcode { 15 | AddE, 16 | SubE, 17 | MulE, 18 | DivE, 19 | } 20 | -------------------------------------------------------------------------------- /crates/recursion/executor/src/public_values.rs: -------------------------------------------------------------------------------- 1 | use core::fmt::Debug; 2 | use hypercube_stark::{septic_digest::SepticDigest, Word, PROOF_MAX_NUM_PVS}; 3 | use hypercube_utils::indices_arr; 4 | use p3_challenger::DuplexChallenger; 5 | use p3_field::PrimeField32; 6 | use p3_symmetric::CryptographicPermutation; 7 | use serde::{Deserialize, Serialize}; 8 | use sp1_derive::AlignedBorrow; 9 | use static_assertions::const_assert_eq; 10 | use std::{ 11 | borrow::BorrowMut, 12 | mem::{size_of, transmute, MaybeUninit}, 13 | }; 14 | 15 | use crate::{DIGEST_SIZE, HASH_RATE, PERMUTATION_WIDTH}; 16 | 17 | pub const PV_DIGEST_NUM_WORDS: usize = 8; 18 | 19 | pub const CHALLENGER_STATE_NUM_ELTS: usize = size_of::>(); 20 | 21 | pub const RECURSIVE_PROOF_NUM_PV_ELTS: usize = size_of::>(); 22 | 23 | const fn make_col_map() -> RecursionPublicValues { 24 | let indices_arr = indices_arr::(); 25 | unsafe { 26 | transmute::<[usize; RECURSIVE_PROOF_NUM_PV_ELTS], RecursionPublicValues>(indices_arr) 27 | } 28 | } 29 | 30 | pub const RECURSION_PUBLIC_VALUES_COL_MAP: RecursionPublicValues = make_col_map(); 31 | pub const POSEIDON_NUM_WORDS: usize = 8; 32 | 33 | // All the fields before `digest` are hashed to produce the digest. 34 | pub const NUM_PV_ELMS_TO_HASH: usize = RECURSION_PUBLIC_VALUES_COL_MAP.digest[0]; 35 | 36 | // Recursive proof has more public values than core proof, so the max number constant defined in 37 | // sp1_core should be set to `RECURSIVE_PROOF_NUM_PV_ELTS`. 38 | const_assert_eq!(RECURSIVE_PROOF_NUM_PV_ELTS, PROOF_MAX_NUM_PVS); 39 | 40 | #[derive(AlignedBorrow, Serialize, Deserialize, Clone, Copy, Default, Debug)] 41 | #[repr(C)] 42 | pub struct ChallengerPublicValues { 43 | pub sponge_state: [T; PERMUTATION_WIDTH], 44 | pub num_inputs: T, 45 | pub input_buffer: [T; PERMUTATION_WIDTH], 46 | pub num_outputs: T, 47 | pub output_buffer: [T; PERMUTATION_WIDTH], 48 | } 49 | 50 | impl ChallengerPublicValues { 51 | pub fn set_challenger>( 52 | &self, 53 | challenger: &mut DuplexChallenger, 54 | ) where 55 | T: PrimeField32, 56 | { 57 | challenger.sponge_state = self.sponge_state; 58 | let num_inputs = self.num_inputs.as_canonical_u32() as usize; 59 | challenger.input_buffer = self.input_buffer[..num_inputs].to_vec(); 60 | let num_outputs = self.num_outputs.as_canonical_u32() as usize; 61 | challenger.output_buffer = self.output_buffer[..num_outputs].to_vec(); 62 | } 63 | 64 | pub fn as_array(&self) -> [T; CHALLENGER_STATE_NUM_ELTS] 65 | where 66 | T: Copy, 67 | { 68 | unsafe { 69 | let mut ret = [MaybeUninit::::zeroed().assume_init(); CHALLENGER_STATE_NUM_ELTS]; 70 | let pv: &mut ChallengerPublicValues = ret.as_mut_slice().borrow_mut(); 71 | *pv = *self; 72 | ret 73 | } 74 | } 75 | } 76 | 77 | /// The PublicValues struct is used to store all of a reduce proof's public values. 78 | #[derive(AlignedBorrow, Serialize, Deserialize, Clone, Copy, Default, Debug)] 79 | #[repr(C)] 80 | pub struct RecursionPublicValues { 81 | /// The hash of all the bytes that the program has written to public values. 82 | pub committed_value_digest: [[T; 4]; PV_DIGEST_NUM_WORDS], 83 | 84 | /// The hash of all deferred proofs that have been witnessed in the VM. 85 | pub deferred_proofs_digest: [T; POSEIDON_NUM_WORDS], 86 | 87 | /// The start pc of shards being proven. 88 | pub start_pc: T, 89 | 90 | /// The expected start pc for the next shard. 91 | pub next_pc: T, 92 | 93 | /// First shard being proven. 94 | pub start_shard: T, 95 | 96 | /// Next shard that should be proven. 97 | pub next_shard: T, 98 | 99 | /// First execution shard being proven. 100 | pub start_execution_shard: T, 101 | 102 | /// Next execution shard that should be proven. 103 | pub next_execution_shard: T, 104 | 105 | /// Previous MemoryInit address word. 106 | pub previous_init_addr_word: Word, 107 | 108 | /// Last MemoryInit address word. 109 | pub last_init_addr_word: Word, 110 | 111 | /// Previous MemoryFinalize address word. 112 | pub previous_finalize_addr_word: Word, 113 | 114 | /// Last MemoryFinalize address word. 115 | pub last_finalize_addr_word: Word, 116 | 117 | /// Start state of reconstruct_deferred_digest. 118 | pub start_reconstruct_deferred_digest: [T; POSEIDON_NUM_WORDS], 119 | 120 | /// End state of reconstruct_deferred_digest. 121 | pub end_reconstruct_deferred_digest: [T; POSEIDON_NUM_WORDS], 122 | 123 | /// The commitment to the sp1 program being proven. 124 | pub sp1_vk_digest: [T; DIGEST_SIZE], 125 | 126 | /// The root of the vk merkle tree. 127 | pub vk_root: [T; DIGEST_SIZE], 128 | 129 | /// Current cumulative sum of lookup bus. Note that for recursive proofs for core proofs, this 130 | /// contains the global cumulative sum. 131 | pub global_cumulative_sum: SepticDigest, 132 | 133 | /// Whether the proof completely proves the program execution. 134 | pub is_complete: T, 135 | 136 | /// The exit code of the program. 137 | pub exit_code: T, 138 | 139 | /// The digest of all the previous public values elements. 140 | pub digest: [T; DIGEST_SIZE], 141 | } 142 | 143 | /// Converts the public values to an array of elements. 144 | impl RecursionPublicValues { 145 | pub fn as_array(&self) -> [F; RECURSIVE_PROOF_NUM_PV_ELTS] { 146 | unsafe { 147 | let mut ret = [MaybeUninit::::zeroed().assume_init(); RECURSIVE_PROOF_NUM_PV_ELTS]; 148 | let pv: &mut RecursionPublicValues = ret.as_mut_slice().borrow_mut(); 149 | *pv = *self; 150 | ret 151 | } 152 | } 153 | } 154 | 155 | impl IntoIterator for RecursionPublicValues { 156 | type Item = T; 157 | type IntoIter = std::array::IntoIter; 158 | 159 | fn into_iter(self) -> Self::IntoIter { 160 | self.as_array().into_iter() 161 | } 162 | } 163 | 164 | impl IntoIterator for ChallengerPublicValues { 165 | type Item = T; 166 | type IntoIter = std::array::IntoIter; 167 | 168 | fn into_iter(self) -> Self::IntoIter { 169 | self.as_array().into_iter() 170 | } 171 | } 172 | -------------------------------------------------------------------------------- /crates/recursion/executor/src/shape.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::{BTreeMap, BTreeSet}, 3 | marker::PhantomData, 4 | }; 5 | 6 | use hypercube_stark::{air::MachineAir, ChipDimensions}; 7 | use p3_field::Field; 8 | use serde::{Deserialize, Serialize}; 9 | 10 | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash)] 11 | pub struct RecursionShape { 12 | heights: BTreeMap, 13 | _marker: PhantomData, 14 | } 15 | 16 | impl RecursionShape { 17 | pub const fn new(heights: BTreeMap) -> Self { 18 | Self { heights, _marker: PhantomData } 19 | } 20 | 21 | pub fn height(&self, air: &A) -> Option 22 | where 23 | F: Field, 24 | A: MachineAir, 25 | { 26 | self.heights.get(&air.name()).copied() 27 | } 28 | 29 | pub fn height_of_name(&self, name: &str) -> Option { 30 | self.heights.get(name).copied() 31 | } 32 | 33 | pub fn insert(&mut self, air: &A, height: usize) 34 | where 35 | F: Field, 36 | A: MachineAir, 37 | { 38 | self.heights.insert(air.name(), height); 39 | } 40 | 41 | pub const fn empty() -> Self { 42 | Self { heights: BTreeMap::new(), _marker: PhantomData } 43 | } 44 | 45 | pub fn preprocessed_chip_information( 46 | &self, 47 | chips: &BTreeSet, 48 | ) -> BTreeMap 49 | where 50 | F: Field, 51 | A: MachineAir, 52 | { 53 | chips 54 | .iter() 55 | .filter_map(|chip| { 56 | self.height(chip).map(|height| { 57 | ( 58 | chip.name(), 59 | ChipDimensions { height, num_polynomials: chip.preprocessed_width() }, 60 | ) 61 | }) 62 | }) 63 | .collect() 64 | } 65 | } 66 | 67 | impl> FromIterator<(A, usize)> for RecursionShape { 68 | fn from_iter>(iter: T) -> Self { 69 | RecursionShape { 70 | heights: iter.into_iter().map(|(air, height)| (air.name(), height)).collect(), 71 | _marker: PhantomData, 72 | } 73 | } 74 | } 75 | 76 | impl IntoIterator for RecursionShape { 77 | type Item = (String, usize); 78 | type IntoIter = as IntoIterator>::IntoIter; 79 | 80 | fn into_iter(self) -> Self::IntoIter { 81 | self.heights.into_iter() 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /crates/recursion/machine/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hypercube-recursion-machine" 3 | description = "SP1 is a performant, 100% open-source, contributor-friendly zkVM." 4 | readme = "../../../README.md" 5 | version = { workspace = true } 6 | edition = { workspace = true } 7 | 8 | repository = { workspace = true } 9 | keywords = { workspace = true } 10 | categories = { workspace = true } 11 | 12 | [dependencies] 13 | sp1-derive = { workspace = true } 14 | sp1-primitives = { workspace = true } 15 | hypercube-stark = { workspace = true } 16 | hypercube-recursion-executor = { workspace = true } 17 | 18 | p3-air = { workspace = true } 19 | p3-matrix = { workspace = true } 20 | hypercube-utils = { workspace = true } 21 | p3-baby-bear = { workspace = true } 22 | hypercube-jagged = { workspace = true } 23 | p3-poseidon2 = { workspace = true } 24 | p3-field = { workspace = true } 25 | itertools = { workspace = true } 26 | serde = { workspace = true, features = ["derive", "rc"] } 27 | strum_macros = { workspace = true } 28 | strum = {workspace = true } 29 | thiserror = { workspace = true } 30 | 31 | 32 | [lints] 33 | workspace = true 34 | -------------------------------------------------------------------------------- /crates/recursion/machine/src/builder.rs: -------------------------------------------------------------------------------- 1 | use std::iter::once; 2 | 3 | use hypercube_recursion_executor::{Address, Block}; 4 | use hypercube_stark::{ 5 | air::{AirInteraction, BaseAirBuilder, InteractionScope, MachineAirBuilder}, 6 | InteractionKind, 7 | }; 8 | use p3_air::AirBuilderWithPublicValues; 9 | use p3_field::AbstractField; 10 | 11 | /// A trait which contains all helper methods for building SP1 recursion machine AIRs. 12 | pub trait SP1RecursionAirBuilder: MachineAirBuilder + RecursionAirBuilder {} 13 | 14 | impl SP1RecursionAirBuilder for AB {} 15 | impl RecursionAirBuilder for AB {} 16 | 17 | pub trait RecursionAirBuilder: BaseAirBuilder { 18 | fn send_single>( 19 | &mut self, 20 | addr: Address, 21 | val: E, 22 | mult: impl Into, 23 | ) { 24 | let mut padded_value = core::array::from_fn(|_| Self::Expr::zero()); 25 | padded_value[0] = val.into(); 26 | self.send_block(Address(addr.0.into()), Block(padded_value), mult) 27 | } 28 | 29 | fn send_block>( 30 | &mut self, 31 | addr: Address, 32 | val: Block, 33 | mult: impl Into, 34 | ) { 35 | self.send( 36 | AirInteraction::new( 37 | once(addr.0).chain(val).map(Into::into).collect(), 38 | mult.into(), 39 | InteractionKind::Memory, 40 | ), 41 | InteractionScope::Local, 42 | ); 43 | } 44 | 45 | fn receive_single>( 46 | &mut self, 47 | addr: Address, 48 | val: E, 49 | mult: impl Into, 50 | ) { 51 | let mut padded_value = core::array::from_fn(|_| Self::Expr::zero()); 52 | padded_value[0] = val.into(); 53 | self.receive_block(Address(addr.0.into()), Block(padded_value), mult) 54 | } 55 | 56 | fn receive_block>( 57 | &mut self, 58 | addr: Address, 59 | val: Block, 60 | mult: impl Into, 61 | ) { 62 | self.receive( 63 | AirInteraction::new( 64 | once(addr.0).chain(val).map(Into::into).collect(), 65 | mult.into(), 66 | InteractionKind::Memory, 67 | ), 68 | InteractionScope::Local, 69 | ); 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /crates/recursion/machine/src/chips/alu_ext.rs: -------------------------------------------------------------------------------- 1 | use core::borrow::Borrow; 2 | use hypercube_recursion_executor::{Address, Block, ExtAluIo, D}; 3 | use hypercube_stark::air::{ExtensionAirBuilder, MachineAir}; 4 | use p3_air::{Air, BaseAir, PairBuilder}; 5 | use p3_field::{extension::BinomiallyExtendable, Field, PrimeField32}; 6 | use p3_matrix::Matrix; 7 | use sp1_derive::AlignedBorrow; 8 | use std::iter::zip; 9 | 10 | use crate::builder::SP1RecursionAirBuilder; 11 | 12 | pub const NUM_EXT_ALU_ENTRIES_PER_ROW: usize = 4; 13 | 14 | #[derive(Default)] 15 | pub struct ExtAluChip; 16 | 17 | pub const NUM_EXT_ALU_COLS: usize = core::mem::size_of::>(); 18 | 19 | #[derive(AlignedBorrow, Debug, Clone, Copy)] 20 | #[repr(C)] 21 | pub struct ExtAluCols { 22 | pub values: [ExtAluValueCols; NUM_EXT_ALU_ENTRIES_PER_ROW], 23 | } 24 | 25 | #[derive(AlignedBorrow, Debug, Clone, Copy)] 26 | #[repr(C)] 27 | pub struct ExtAluValueCols { 28 | pub vals: ExtAluIo>, 29 | } 30 | 31 | pub const NUM_EXT_ALU_PREPROCESSED_COLS: usize = core::mem::size_of::>(); 32 | 33 | #[derive(AlignedBorrow, Debug, Clone, Copy)] 34 | #[repr(C)] 35 | pub struct ExtAluPreprocessedCols { 36 | pub accesses: [ExtAluAccessCols; NUM_EXT_ALU_ENTRIES_PER_ROW], 37 | } 38 | 39 | pub const NUM_EXT_ALU_ACCESS_COLS: usize = core::mem::size_of::>(); 40 | 41 | #[derive(AlignedBorrow, Debug, Clone, Copy)] 42 | #[repr(C)] 43 | pub struct ExtAluAccessCols { 44 | pub addrs: ExtAluIo>, 45 | pub is_add: F, 46 | pub is_sub: F, 47 | pub is_mul: F, 48 | pub is_div: F, 49 | pub mult: F, 50 | } 51 | 52 | impl BaseAir for ExtAluChip { 53 | fn width(&self) -> usize { 54 | NUM_EXT_ALU_COLS 55 | } 56 | } 57 | 58 | impl Air for ExtAluChip 59 | where 60 | AB: SP1RecursionAirBuilder + PairBuilder, 61 | { 62 | fn eval(&self, builder: &mut AB) { 63 | let main = builder.main(); 64 | let local = main.row_slice(0); 65 | let local: &ExtAluCols = (*local).borrow(); 66 | let prep = builder.preprocessed(); 67 | let prep_local = prep.row_slice(0); 68 | let prep_local: &ExtAluPreprocessedCols = (*prep_local).borrow(); 69 | 70 | for ( 71 | ExtAluValueCols { vals }, 72 | ExtAluAccessCols { addrs, is_add, is_sub, is_mul, is_div, mult }, 73 | ) in zip(local.values, prep_local.accesses) 74 | { 75 | let in1 = vals.in1.as_extension::(); 76 | let in2 = vals.in2.as_extension::(); 77 | let out = vals.out.as_extension::(); 78 | 79 | // Check exactly one flag is enabled. 80 | let is_real = is_add + is_sub + is_mul + is_div; 81 | builder.assert_bool(is_real.clone()); 82 | 83 | builder.when(is_add).assert_ext_eq(in1.clone() + in2.clone(), out.clone()); 84 | builder.when(is_sub).assert_ext_eq(in1.clone(), in2.clone() + out.clone()); 85 | builder.when(is_mul).assert_ext_eq(in1.clone() * in2.clone(), out.clone()); 86 | builder.when(is_div).assert_ext_eq(in1, in2 * out); 87 | 88 | // Read the inputs from memory. 89 | builder.receive_block(addrs.in1, vals.in1, is_real.clone()); 90 | 91 | builder.receive_block(addrs.in2, vals.in2, is_real); 92 | 93 | // Write the output to memory. 94 | builder.send_block(addrs.out, vals.out, mult); 95 | } 96 | } 97 | } 98 | impl> MachineAir for ExtAluChip { 99 | fn name(&self) -> String { 100 | "ExtAlu".to_string() 101 | } 102 | 103 | fn preprocessed_width(&self) -> usize { 104 | NUM_EXT_ALU_PREPROCESSED_COLS 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /crates/recursion/machine/src/chips/mem/constant.rs: -------------------------------------------------------------------------------- 1 | use core::borrow::Borrow; 2 | use hypercube_recursion_executor::Block; 3 | use hypercube_stark::air::MachineAir; 4 | use p3_air::{Air, BaseAir, PairBuilder}; 5 | use p3_field::PrimeField32; 6 | use p3_matrix::Matrix; 7 | use sp1_derive::AlignedBorrow; 8 | use std::marker::PhantomData; 9 | 10 | use crate::builder::SP1RecursionAirBuilder; 11 | 12 | use super::MemoryAccessCols; 13 | 14 | pub const NUM_CONST_MEM_ENTRIES_PER_ROW: usize = 2; 15 | 16 | #[derive(Default)] 17 | pub struct MemoryConstChip { 18 | _marker: PhantomData, 19 | } 20 | 21 | pub const NUM_MEM_INIT_COLS: usize = core::mem::size_of::>(); 22 | 23 | #[derive(AlignedBorrow, Debug, Clone, Copy)] 24 | #[repr(C)] 25 | pub struct MemoryConstCols { 26 | // At least one column is required, otherwise a bunch of things break. 27 | _nothing: F, 28 | } 29 | 30 | pub const NUM_MEM_PREPROCESSED_INIT_COLS: usize = 31 | core::mem::size_of::>(); 32 | 33 | #[derive(AlignedBorrow, Debug, Clone, Copy)] 34 | #[repr(C)] 35 | pub struct MemoryConstPreprocessedCols { 36 | values_and_accesses: [(Block, MemoryAccessCols); NUM_CONST_MEM_ENTRIES_PER_ROW], 37 | } 38 | impl BaseAir for MemoryConstChip { 39 | fn width(&self) -> usize { 40 | NUM_MEM_INIT_COLS 41 | } 42 | } 43 | 44 | impl Air for MemoryConstChip 45 | where 46 | AB: SP1RecursionAirBuilder + PairBuilder, 47 | { 48 | fn eval(&self, builder: &mut AB) { 49 | let prep = builder.preprocessed(); 50 | let prep_local = prep.row_slice(0); 51 | let prep_local: &MemoryConstPreprocessedCols = (*prep_local).borrow(); 52 | 53 | for (value, access) in prep_local.values_and_accesses { 54 | builder.send_block(access.addr, value, access.mult); 55 | } 56 | } 57 | } 58 | 59 | impl MachineAir for MemoryConstChip { 60 | fn name(&self) -> String { 61 | "MemoryConst".to_string() 62 | } 63 | fn preprocessed_width(&self) -> usize { 64 | NUM_MEM_PREPROCESSED_INIT_COLS 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /crates/recursion/machine/src/chips/mem/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod constant; 2 | pub mod variable; 3 | 4 | pub use constant::MemoryConstChip; 5 | use hypercube_recursion_executor::Address; 6 | pub use variable::MemoryVarChip; 7 | 8 | use sp1_derive::AlignedBorrow; 9 | 10 | pub const NUM_MEM_ACCESS_COLS: usize = core::mem::size_of::>(); 11 | 12 | /// Data describing in what manner to access a particular memory block. 13 | #[derive(AlignedBorrow, Debug, Clone, Copy)] 14 | #[repr(C)] 15 | pub struct MemoryAccessColsChips { 16 | /// The address to access. 17 | pub addr: Address, 18 | /// The multiplicity which to read/write. 19 | /// "Positive" values indicate a write, and "negative" values indicate a read. 20 | pub mult: F, 21 | } 22 | 23 | /// Avoids cbindgen naming collisions. 24 | pub type MemoryAccessCols = MemoryAccessColsChips; 25 | -------------------------------------------------------------------------------- /crates/recursion/machine/src/chips/mem/variable.rs: -------------------------------------------------------------------------------- 1 | use core::borrow::Borrow; 2 | use hypercube_recursion_executor::Block; 3 | use hypercube_stark::air::MachineAir; 4 | use p3_air::{Air, BaseAir, PairBuilder}; 5 | use p3_field::PrimeField32; 6 | use p3_matrix::Matrix; 7 | use sp1_derive::AlignedBorrow; 8 | use std::{iter::zip, marker::PhantomData}; 9 | 10 | use crate::builder::SP1RecursionAirBuilder; 11 | 12 | use super::MemoryAccessCols; 13 | 14 | pub const NUM_VAR_MEM_ENTRIES_PER_ROW: usize = 2; 15 | 16 | #[derive(Default)] 17 | pub struct MemoryVarChip { 18 | _marker: PhantomData, 19 | } 20 | 21 | pub const NUM_MEM_INIT_COLS: usize = core::mem::size_of::>(); 22 | 23 | #[derive(AlignedBorrow, Debug, Clone, Copy)] 24 | #[repr(C)] 25 | pub struct MemoryVarCols { 26 | values: [Block; NUM_VAR_MEM_ENTRIES_PER_ROW], 27 | } 28 | 29 | pub const NUM_MEM_PREPROCESSED_INIT_COLS: usize = 30 | core::mem::size_of::>(); 31 | 32 | #[derive(AlignedBorrow, Debug, Clone, Copy)] 33 | #[repr(C)] 34 | pub struct MemoryVarPreprocessedCols { 35 | accesses: [MemoryAccessCols; NUM_VAR_MEM_ENTRIES_PER_ROW], 36 | } 37 | 38 | impl BaseAir for MemoryVarChip { 39 | fn width(&self) -> usize { 40 | NUM_MEM_INIT_COLS 41 | } 42 | } 43 | 44 | impl Air for MemoryVarChip 45 | where 46 | AB: SP1RecursionAirBuilder + PairBuilder, 47 | { 48 | fn eval(&self, builder: &mut AB) { 49 | let main = builder.main(); 50 | let local = main.row_slice(0); 51 | let local: &MemoryVarCols = (*local).borrow(); 52 | let prep = builder.preprocessed(); 53 | let prep_local = prep.row_slice(0); 54 | let prep_local: &MemoryVarPreprocessedCols = (*prep_local).borrow(); 55 | 56 | for (value, access) in zip(local.values, prep_local.accesses) { 57 | builder.send_block(access.addr, value, access.mult); 58 | } 59 | } 60 | } 61 | 62 | impl MachineAir for MemoryVarChip { 63 | fn name(&self) -> String { 64 | "MemoryVar".to_string() 65 | } 66 | fn preprocessed_width(&self) -> usize { 67 | NUM_MEM_PREPROCESSED_INIT_COLS 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /crates/recursion/machine/src/chips/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod alu_base; 2 | pub mod alu_ext; 3 | pub mod mem; 4 | // pub mod poseidon2_skinny; 5 | pub mod poseidon2_wide; 6 | pub mod prefix_sum_checks; 7 | pub mod public_values; 8 | pub mod select; 9 | -------------------------------------------------------------------------------- /crates/recursion/machine/src/chips/poseidon2_wide/columns/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod preprocessed; 2 | -------------------------------------------------------------------------------- /crates/recursion/machine/src/chips/poseidon2_wide/columns/preprocessed.rs: -------------------------------------------------------------------------------- 1 | use hypercube_recursion_executor::Address; 2 | use sp1_core_machine::operations::poseidon2::WIDTH; 3 | use sp1_derive::AlignedBorrow; 4 | 5 | use crate::chips::mem::MemoryAccessColsChips; 6 | 7 | /// A column layout for the preprocessed Poseidon2 AIR. 8 | #[derive(AlignedBorrow, Clone, Copy, Debug)] 9 | #[repr(C)] 10 | pub struct Poseidon2PreprocessedColsWide { 11 | pub input: [Address; WIDTH], 12 | pub output: [MemoryAccessColsChips; WIDTH], 13 | pub is_real_neg: T, 14 | } 15 | 16 | impl MachineAir for Poseidon2WideChip { 17 | type Record = ExecutionRecord; 18 | 19 | type Program = RecursionProgram; 20 | 21 | fn name(&self) -> String { 22 | format!("Poseidon2WideDeg{}", DEGREE) 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /crates/recursion/machine/src/chips/poseidon2_wide/mod.rs: -------------------------------------------------------------------------------- 1 | use std::{borrow::Borrow, ops::Deref}; 2 | 3 | use hypercube_recursion_executor::Address; 4 | use hypercube_stark::air::MachineAir; 5 | use p3_air::BaseAir; 6 | use p3_field::PrimeField32; 7 | use permutation::{Poseidon2Cols, Poseidon2Degree3Cols, NUM_POSEIDON2_DEGREE3_COLS}; 8 | use sp1_derive::AlignedBorrow; 9 | 10 | use super::mem::MemoryAccessColsChips; 11 | 12 | pub mod air; 13 | pub mod permutation; 14 | pub mod trace; 15 | 16 | /// The width of the permutation. 17 | pub const WIDTH: usize = 16; 18 | 19 | /// The rate of the permutation. 20 | pub const RATE: usize = WIDTH / 2; 21 | 22 | /// The number of external rounds. 23 | pub const NUM_EXTERNAL_ROUNDS: usize = 8; 24 | 25 | /// The number of internal rounds. 26 | pub const NUM_INTERNAL_ROUNDS: usize = 13; 27 | 28 | /// The total number of rounds. 29 | pub const NUM_ROUNDS: usize = NUM_EXTERNAL_ROUNDS + NUM_INTERNAL_ROUNDS; 30 | 31 | /// The number of columns in the Poseidon2 operation. 32 | pub const NUM_POSEIDON2_OPERATION_COLUMNS: usize = std::mem::size_of::>(); 33 | 34 | /// A column layout for the preprocessed Poseidon2 AIR. 35 | #[derive(AlignedBorrow, Clone, Copy, Debug)] 36 | #[repr(C)] 37 | pub struct Poseidon2PreprocessedColsWide { 38 | pub input: [Address; WIDTH], 39 | pub output: [MemoryAccessColsChips; WIDTH], 40 | pub is_real_neg: T, 41 | } 42 | 43 | const PREPROCESSED_POSEIDON2_WIDTH: usize = size_of::>(); 44 | 45 | /// A chip that implements addition for the opcode Poseidon2Wide. 46 | #[derive(Default, Debug, Clone, Copy)] 47 | pub struct Poseidon2WideChip; 48 | 49 | impl<'a, const DEGREE: usize> Poseidon2WideChip { 50 | /// Transmute a row it to an immutable [`Poseidon2Cols`] instance. 51 | pub fn convert(row: impl Deref) -> Box + 'a> 52 | where 53 | T: Copy + 'a, 54 | { 55 | if DEGREE == 3 { 56 | let convert: &Poseidon2Degree3Cols = (*row).borrow(); 57 | Box::new(*convert) 58 | } else { 59 | panic!("Unsupported degree"); 60 | } 61 | } 62 | } 63 | 64 | /// A set of columns needed to compute the Poseidon2 operation. 65 | #[derive(AlignedBorrow, Clone, Copy)] 66 | #[repr(C)] 67 | pub struct Poseidon2Operation { 68 | /// The permutation. 69 | pub permutation: Poseidon2Degree3Cols, 70 | } 71 | 72 | impl BaseAir for Poseidon2WideChip { 73 | fn width(&self) -> usize { 74 | if DEGREE == 3 { 75 | NUM_POSEIDON2_DEGREE3_COLS 76 | } else { 77 | panic!("Unsupported degree: {}", DEGREE); 78 | } 79 | } 80 | } 81 | 82 | impl MachineAir for Poseidon2WideChip { 83 | fn name(&self) -> String { 84 | format!("Poseidon2WideDeg{}", DEGREE) 85 | } 86 | 87 | fn preprocessed_width(&self) -> usize { 88 | PREPROCESSED_POSEIDON2_WIDTH 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /crates/recursion/machine/src/chips/poseidon2_wide/trace.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Borrow; 2 | 3 | use p3_field::PrimeField32; 4 | use sp1_primitives::RC_16_30_U32; 5 | 6 | use super::{ 7 | air::{external_linear_layer, external_linear_layer_mut, internal_linear_layer_mut}, 8 | permutation::permutation_mut, 9 | Poseidon2Operation, NUM_EXTERNAL_ROUNDS, NUM_INTERNAL_ROUNDS, NUM_POSEIDON2_OPERATION_COLUMNS, 10 | WIDTH, 11 | }; 12 | 13 | pub fn populate_perm_deg3( 14 | input: [F; WIDTH], 15 | expected_output: Option<[F; WIDTH]>, 16 | ) -> Poseidon2Operation { 17 | let mut row: Vec = vec![F::zero(); NUM_POSEIDON2_OPERATION_COLUMNS]; 18 | populate_perm::(input, expected_output, row.as_mut_slice()); 19 | let op: &Poseidon2Operation = row.as_slice().borrow(); 20 | *op 21 | } 22 | 23 | pub fn populate_perm( 24 | input: [F; WIDTH], 25 | expected_output: Option<[F; WIDTH]>, 26 | input_row: &mut [F], 27 | ) { 28 | { 29 | let permutation = permutation_mut::(input_row); 30 | 31 | let ( 32 | external_rounds_state, 33 | internal_rounds_state, 34 | internal_rounds_s0, 35 | mut external_sbox, 36 | mut internal_sbox, 37 | output_state, 38 | ) = permutation.get_cols_mut(); 39 | 40 | external_rounds_state[0] = input; 41 | 42 | // Apply the first half of external rounds. 43 | for r in 0..NUM_EXTERNAL_ROUNDS / 2 { 44 | let next_state = 45 | populate_external_round::(external_rounds_state, &mut external_sbox, r); 46 | if r == NUM_EXTERNAL_ROUNDS / 2 - 1 { 47 | *internal_rounds_state = next_state; 48 | } else { 49 | external_rounds_state[r + 1] = next_state; 50 | } 51 | } 52 | 53 | // Apply the internal rounds. 54 | external_rounds_state[NUM_EXTERNAL_ROUNDS / 2] = 55 | populate_internal_rounds(internal_rounds_state, internal_rounds_s0, &mut internal_sbox); 56 | 57 | // Apply the second half of external rounds. 58 | for r in NUM_EXTERNAL_ROUNDS / 2..NUM_EXTERNAL_ROUNDS { 59 | let next_state = 60 | populate_external_round::(external_rounds_state, &mut external_sbox, r); 61 | if r == NUM_EXTERNAL_ROUNDS - 1 { 62 | for i in 0..WIDTH { 63 | output_state[i] = next_state[i]; 64 | if let Some(expected_output) = expected_output { 65 | assert_eq!(expected_output[i], next_state[i]); 66 | } 67 | } 68 | } else { 69 | external_rounds_state[r + 1] = next_state; 70 | } 71 | } 72 | } 73 | } 74 | 75 | pub fn populate_external_round( 76 | external_rounds_state: &[[F; WIDTH]], 77 | sbox: &mut Option<&mut [[F; WIDTH]; NUM_EXTERNAL_ROUNDS]>, 78 | r: usize, 79 | ) -> [F; WIDTH] { 80 | let mut state = { 81 | // For the first round, apply the linear layer. 82 | let round_state: &[F; WIDTH] = if r == 0 { 83 | &external_linear_layer(&external_rounds_state[r]) 84 | } else { 85 | &external_rounds_state[r] 86 | }; 87 | 88 | // Add round constants. 89 | // 90 | // Optimization: Since adding a constant is a degree 1 operation, we can avoid adding 91 | // columns for it, and instead include it in the constraint for the x^3 part of the 92 | // sbox. 93 | let round = if r < NUM_EXTERNAL_ROUNDS / 2 { r } else { r + NUM_INTERNAL_ROUNDS }; 94 | let mut add_rc = *round_state; 95 | for (i, add_rc_elem) in add_rc.iter_mut().enumerate().take(WIDTH) { 96 | *add_rc_elem += F::from_wrapped_u32(RC_16_30_U32[round][i]); 97 | } 98 | 99 | // Apply the sboxes. 100 | // Optimization: since the linear layer that comes after the sbox is degree 1, we can 101 | // avoid adding columns for the result of the sbox, and instead include the x^3 -> x^7 102 | // part of the sbox in the constraint for the linear layer 103 | let mut sbox_deg_7: [F; 16] = [F::zero(); WIDTH]; 104 | let mut sbox_deg_3: [F; 16] = [F::zero(); WIDTH]; 105 | for i in 0..WIDTH { 106 | sbox_deg_3[i] = add_rc[i] * add_rc[i] * add_rc[i]; 107 | sbox_deg_7[i] = sbox_deg_3[i] * sbox_deg_3[i] * add_rc[i]; 108 | } 109 | 110 | if let Some(sbox) = sbox.as_deref_mut() { 111 | sbox[r] = sbox_deg_3; 112 | } 113 | 114 | sbox_deg_7 115 | }; 116 | 117 | // Apply the linear layer. 118 | external_linear_layer_mut(&mut state); 119 | state 120 | } 121 | 122 | pub fn populate_internal_rounds( 123 | internal_rounds_state: &[F; WIDTH], 124 | internal_rounds_s0: &mut [F; NUM_INTERNAL_ROUNDS - 1], 125 | sbox: &mut Option<&mut [F; NUM_INTERNAL_ROUNDS]>, 126 | ) -> [F; WIDTH] { 127 | let mut state: [F; WIDTH] = *internal_rounds_state; 128 | let mut sbox_deg_3: [F; NUM_INTERNAL_ROUNDS] = [F::zero(); NUM_INTERNAL_ROUNDS]; 129 | for r in 0..NUM_INTERNAL_ROUNDS { 130 | // Add the round constant to the 0th state element. 131 | // Optimization: Since adding a constant is a degree 1 operation, we can avoid adding 132 | // columns for it, just like for external rounds. 133 | let round = r + NUM_EXTERNAL_ROUNDS / 2; 134 | let add_rc = state[0] + F::from_wrapped_u32(RC_16_30_U32[round][0]); 135 | 136 | // Apply the sboxes. 137 | // Optimization: since the linear layer that comes after the sbox is degree 1, we can 138 | // avoid adding columns for the result of the sbox, just like for external rounds. 139 | sbox_deg_3[r] = add_rc * add_rc * add_rc; 140 | let sbox_deg_7 = sbox_deg_3[r] * sbox_deg_3[r] * add_rc; 141 | 142 | // Apply the linear layer. 143 | state[0] = sbox_deg_7; 144 | internal_linear_layer_mut(&mut state); 145 | 146 | // Optimization: since we're only applying the sbox to the 0th state element, we only 147 | // need to have columns for the 0th state element at every step. This is because the 148 | // linear layer is degree 1, so all state elements at the end can be expressed as a 149 | // degree-3 polynomial of the state at the beginning of the internal rounds and the 0th 150 | // state element at rounds prior to the current round 151 | if r < NUM_INTERNAL_ROUNDS - 1 { 152 | internal_rounds_s0[r] = state[0]; 153 | } 154 | } 155 | 156 | let ret_state = state; 157 | 158 | if let Some(sbox) = sbox.as_deref_mut() { 159 | *sbox = sbox_deg_3; 160 | } 161 | 162 | ret_state 163 | } 164 | -------------------------------------------------------------------------------- /crates/recursion/machine/src/chips/select.rs: -------------------------------------------------------------------------------- 1 | use core::borrow::Borrow; 2 | use hypercube_recursion_executor::{Address, SelectIo}; 3 | use hypercube_stark::air::MachineAir; 4 | use p3_air::{Air, BaseAir, PairBuilder}; 5 | use p3_field::{AbstractField, Field, PrimeField32}; 6 | use p3_matrix::Matrix; 7 | use sp1_derive::AlignedBorrow; 8 | 9 | use crate::builder::SP1RecursionAirBuilder; 10 | 11 | #[derive(Default)] 12 | pub struct SelectChip; 13 | 14 | pub const SELECT_COLS: usize = core::mem::size_of::>(); 15 | 16 | #[derive(AlignedBorrow, Debug, Clone, Copy)] 17 | #[repr(C)] 18 | pub struct SelectCols { 19 | pub vals: SelectIo, 20 | } 21 | 22 | pub const SELECT_PREPROCESSED_COLS: usize = core::mem::size_of::>(); 23 | 24 | #[derive(AlignedBorrow, Debug, Clone, Copy)] 25 | #[repr(C)] 26 | pub struct SelectPreprocessedCols { 27 | pub is_real: F, 28 | pub addrs: SelectIo>, 29 | pub mult1: F, 30 | pub mult2: F, 31 | } 32 | 33 | impl BaseAir for SelectChip { 34 | fn width(&self) -> usize { 35 | SELECT_COLS 36 | } 37 | } 38 | 39 | impl Air for SelectChip 40 | where 41 | AB: SP1RecursionAirBuilder + PairBuilder, 42 | { 43 | fn eval(&self, builder: &mut AB) { 44 | let main = builder.main(); 45 | let local = main.row_slice(0); 46 | let local: &SelectCols = (*local).borrow(); 47 | let prep = builder.preprocessed(); 48 | let prep_local = prep.row_slice(0); 49 | let prep_local: &SelectPreprocessedCols = (*prep_local).borrow(); 50 | 51 | builder.receive_single(prep_local.addrs.bit, local.vals.bit, prep_local.is_real); 52 | builder.receive_single(prep_local.addrs.in1, local.vals.in1, prep_local.is_real); 53 | builder.receive_single(prep_local.addrs.in2, local.vals.in2, prep_local.is_real); 54 | builder.send_single(prep_local.addrs.out1, local.vals.out1, prep_local.mult1); 55 | builder.send_single(prep_local.addrs.out2, local.vals.out2, prep_local.mult2); 56 | builder.assert_eq( 57 | local.vals.out1, 58 | local.vals.bit * local.vals.in2 + (AB::Expr::one() - local.vals.bit) * local.vals.in1, 59 | ); 60 | builder.assert_eq( 61 | local.vals.out2, 62 | local.vals.bit * local.vals.in1 + (AB::Expr::one() - local.vals.bit) * local.vals.in2, 63 | ); 64 | } 65 | } 66 | 67 | impl MachineAir for SelectChip { 68 | fn name(&self) -> String { 69 | "Select".to_string() 70 | } 71 | 72 | fn preprocessed_width(&self) -> usize { 73 | SELECT_PREPROCESSED_COLS 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /crates/recursion/machine/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod builder; 2 | pub mod chips; 3 | mod machine; 4 | mod verify_compress; 5 | 6 | pub use machine::RecursionAir; 7 | pub use verify_compress::*; 8 | -------------------------------------------------------------------------------- /crates/recursion/machine/src/machine.rs: -------------------------------------------------------------------------------- 1 | use hypercube_recursion_executor::D; 2 | use hypercube_stark::{Chip, Machine, PROOF_MAX_NUM_PVS}; 3 | use p3_field::{extension::BinomiallyExtendable, PrimeField32}; 4 | 5 | use strum_macros::EnumDiscriminants; 6 | 7 | use crate::chips::{ 8 | alu_base::BaseAluChip, 9 | alu_ext::ExtAluChip, 10 | mem::{MemoryConstChip, MemoryVarChip}, 11 | // poseidon2_skinny::Poseidon2SkinnyChip, 12 | poseidon2_wide::Poseidon2WideChip, 13 | prefix_sum_checks::PrefixSumChecksChip, 14 | public_values::PublicValuesChip, 15 | select::SelectChip, 16 | }; 17 | 18 | #[derive(sp1_derive::MachineAir, EnumDiscriminants)] 19 | #[sp1_core_path = "hypercube_core_machine"] 20 | #[builder_path = "crate::builder::SP1RecursionAirBuilder"] 21 | #[eval_trait_bound = "AB::Var: 'static"] 22 | #[allow(dead_code)] 23 | pub enum RecursionAir, const DEGREE: usize> { 24 | MemoryConst(MemoryConstChip), 25 | MemoryVar(MemoryVarChip), 26 | BaseAlu(BaseAluChip), 27 | ExtAlu(ExtAluChip), 28 | Poseidon2Wide(Poseidon2WideChip), 29 | Select(SelectChip), 30 | PrefixSumChecks(PrefixSumChecksChip), 31 | PublicValues(PublicValuesChip), 32 | } 33 | 34 | #[allow(dead_code)] 35 | impl, const DEGREE: usize> RecursionAir { 36 | /// Get a machine with all chips, except the dummy chip. 37 | pub fn machine_wide_with_all_chips() -> Machine { 38 | let chips = [ 39 | RecursionAir::MemoryConst(MemoryConstChip::default()), 40 | RecursionAir::MemoryVar(MemoryVarChip::default()), 41 | RecursionAir::BaseAlu(BaseAluChip), 42 | RecursionAir::ExtAlu(ExtAluChip), 43 | RecursionAir::Poseidon2Wide(Poseidon2WideChip::), 44 | RecursionAir::PrefixSumChecks(PrefixSumChecksChip), 45 | RecursionAir::Select(SelectChip), 46 | RecursionAir::PublicValues(PublicValuesChip), 47 | ] 48 | .map(Chip::new) 49 | .into_iter() 50 | .collect::>(); 51 | 52 | Machine::new(chips, PROOF_MAX_NUM_PVS) 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /crates/recursion/machine/src/sys.rs: -------------------------------------------------------------------------------- 1 | // use hypercube_recursion_executor::{ 2 | // BaseAluInstr, BaseAluIo, Block, CommitPublicValuesEvent, CommitPublicValuesInstr, ExtAluInstr, 3 | // ExtAluIo, Poseidon2Event, Poseidon2Instr, SelectEvent, SelectInstr, 4 | // }; 5 | // use p3_baby_bear::BabyBear; 6 | 7 | // use crate::chips::{ 8 | // alu_base::{BaseAluAccessCols, BaseAluValueCols}, 9 | // alu_ext::{ExtAluAccessCols, ExtAluValueCols}, 10 | // poseidon2_skinny::columns::{preprocessed::Poseidon2PreprocessedColsSkinny, Poseidon2}, 11 | // poseidon2_wide::columns::preprocessed::Poseidon2PreprocessedColsWide, 12 | // public_values::{PublicValuesCols, PublicValuesPreprocessedCols}, 13 | // select::{SelectCols, SelectPreprocessedCols}, 14 | // }; 15 | 16 | // #[link(name = "sp1-recursion-machine-sys", kind = "static")] 17 | // extern "C-unwind" { 18 | // pub fn alu_base_event_to_row_babybear( 19 | // io: &BaseAluIo, 20 | // cols: &mut BaseAluValueCols, 21 | // ); 22 | // pub fn alu_base_instr_to_row_babybear( 23 | // instr: &BaseAluInstr, 24 | // cols: &mut BaseAluAccessCols, 25 | // ); 26 | 27 | // pub fn alu_ext_event_to_row_babybear( 28 | // io: &ExtAluIo>, 29 | // cols: &mut ExtAluValueCols, 30 | // ); 31 | // pub fn alu_ext_instr_to_row_babybear( 32 | // instr: &ExtAluInstr, 33 | // cols: &mut ExtAluAccessCols, 34 | // ); 35 | 36 | // pub fn public_values_event_to_row_babybear( 37 | // io: &CommitPublicValuesEvent, 38 | // digest_idx: usize, 39 | // cols: &mut PublicValuesCols, 40 | // ); 41 | // pub fn public_values_instr_to_row_babybear( 42 | // instr: &CommitPublicValuesInstr, 43 | // digest_idx: usize, 44 | // cols: &mut PublicValuesPreprocessedCols, 45 | // ); 46 | 47 | // pub fn select_event_to_row_babybear( 48 | // io: &SelectEvent, 49 | // cols: &mut SelectCols, 50 | // ); 51 | // pub fn select_instr_to_row_babybear( 52 | // instr: &SelectInstr, 53 | // cols: &mut SelectPreprocessedCols, 54 | // ); 55 | 56 | // pub fn poseidon2_skinny_event_to_row_babybear( 57 | // io: &Poseidon2Event, 58 | // cols: *mut Poseidon2, 59 | // ); 60 | // pub fn poseidon2_skinny_instr_to_row_babybear( 61 | // instr: &Poseidon2Instr, 62 | // i: usize, 63 | // cols: &mut Poseidon2PreprocessedColsSkinny, 64 | // ); 65 | 66 | // pub fn poseidon2_wide_event_to_row_babybear( 67 | // input: *const BabyBear, 68 | // input_row: *mut BabyBear, 69 | // sbox_state: bool, 70 | // ); 71 | // pub fn poseidon2_wide_instr_to_row_babybear( 72 | // instr: &Poseidon2Instr, 73 | // cols: &mut Poseidon2PreprocessedColsWide, 74 | // ); 75 | // } 76 | -------------------------------------------------------------------------------- /crates/recursion/machine/src/verify_compress.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Borrow; 2 | 3 | use hypercube_jagged::BabyBearPoseidon2; 4 | use hypercube_recursion_executor::{RecursionPublicValues, NUM_PV_ELMS_TO_HASH}; 5 | use hypercube_stark::{ 6 | MachineConfig, MachineVerifyingKey, ShardProof, ShardVerifier, ShardVerifierError, 7 | }; 8 | use itertools::Itertools; 9 | use p3_baby_bear::BabyBear; 10 | use p3_field::AbstractField; 11 | use serde::{Deserialize, Serialize}; 12 | use sp1_primitives::{io::SP1PublicValues, poseidon2_hash}; 13 | use strum_macros::{EnumDiscriminants, EnumTryAs}; 14 | use thiserror::Error; 15 | 16 | use crate::RecursionAir; 17 | 18 | pub type CompressAir = RecursionAir; 19 | 20 | #[derive(Debug, Clone, Serialize, Deserialize, Default)] 21 | pub struct PlonkBn254Proof { 22 | pub public_inputs: [String; 2], 23 | pub encoded_proof: String, 24 | pub raw_proof: String, 25 | pub plonk_vkey_hash: [u8; 32], 26 | } 27 | 28 | #[derive(Debug, Clone, Serialize, Deserialize, Default)] 29 | pub struct Groth16Bn254Proof { 30 | pub public_inputs: [String; 2], 31 | pub encoded_proof: String, 32 | pub raw_proof: String, 33 | pub groth16_vkey_hash: [u8; 32], 34 | } 35 | 36 | #[derive(Serialize, Deserialize, Clone)] 37 | #[serde(bound( 38 | serialize = "C: MachineConfig, C::Challenger: Serialize", 39 | deserialize = "C: MachineConfig, C::Challenger: Deserialize<'de>" 40 | ))] 41 | pub struct SP1ReduceProof { 42 | /// The compress verifying key associated with the proof. 43 | pub vk: MachineVerifyingKey, 44 | /// The shard proof representing the compressed proof. 45 | pub proof: ShardProof, 46 | } 47 | 48 | /// The information necessary to verify a proof for a given RISC-V program. 49 | #[derive(Clone, Serialize, Deserialize)] 50 | pub struct SP1VerifyingKey { 51 | pub vk: MachineVerifyingKey, 52 | } 53 | 54 | /// A proof generated by the SP1 RISC-V zkVM. 55 | #[derive(Clone, Serialize, Deserialize, EnumDiscriminants, EnumTryAs)] 56 | #[strum_discriminants(derive(Default, Hash, PartialOrd, Ord))] 57 | #[strum_discriminants(name(SP1ProofMode))] 58 | pub enum SP1Proof { 59 | /// A proof generated by the core proof mode. 60 | /// 61 | /// The proof size scales linearly with the number of cycles. 62 | #[strum_discriminants(default)] 63 | Core(Vec>), 64 | /// A proof generated by the compress proof mode. 65 | /// 66 | /// The proof size is constant, regardless of the number of cycles. 67 | Compressed(Box>), 68 | /// A proof generated by the Plonk proof mode. 69 | Plonk(PlonkBn254Proof), 70 | /// A proof generated by the Groth16 proof mode. 71 | Groth16(Groth16Bn254Proof), 72 | } 73 | 74 | /// A proof generated by the SP1 RISC-V zkVM bundled together with the public values and the 75 | /// version. 76 | #[derive(Clone, Serialize, Deserialize)] 77 | pub struct SP1ProofWithPublicValues { 78 | /// The raw proof generated by the SP1 RISC-V zkVM. 79 | pub proof: SP1Proof, 80 | /// The public values generated by the SP1 RISC-V zkVM. 81 | pub public_values: SP1PublicValues, 82 | /// The version of the SP1 RISC-V zkVM (not necessary but useful for detecting version 83 | /// mismatches). 84 | pub sp1_version: String, 85 | /// The integrity proof generated by the TEE server. 86 | pub tee_proof: Option>, 87 | } 88 | 89 | #[derive(Debug, Error)] 90 | pub enum MachineVerifierError { 91 | /// An error that occurs during the verification of a shard proof. 92 | #[error("invalid shard proof: {0}")] 93 | InvalidShardProof(ShardVerifierError), 94 | /// The public values are invalid 95 | #[error("invalid public values")] 96 | InvalidPublicValues(&'static str), 97 | /// There are too many shards. 98 | #[error("too many shards")] 99 | TooManyShards, 100 | } 101 | 102 | /// Compute the digest of the public values. 103 | pub fn recursion_public_values_digest( 104 | public_values: &RecursionPublicValues, 105 | ) -> [BabyBear; 8] { 106 | let pv_array = public_values.as_array(); 107 | poseidon2_hash(pv_array[0..NUM_PV_ELMS_TO_HASH].to_vec()) 108 | } 109 | 110 | /// Assert that the digest of the public values is correct. 111 | pub fn assert_recursion_public_values_valid(public_values: &RecursionPublicValues) { 112 | let expected_digest = recursion_public_values_digest(public_values); 113 | for (value, expected) in public_values.digest.iter().copied().zip_eq(expected_digest) { 114 | assert_eq!(value, expected); 115 | } 116 | } 117 | 118 | fn verifier() -> ShardVerifier> { 119 | let compress_log_blowup = 1; 120 | let compress_log_stacking_height = 20; 121 | let compress_max_log_row_count = 20; 122 | 123 | let machine = CompressAir::::machine_wide_with_all_chips(); 124 | ShardVerifier::from_basefold_parameters( 125 | compress_log_blowup, 126 | compress_log_stacking_height, 127 | compress_max_log_row_count, 128 | machine.clone(), 129 | ) 130 | } 131 | 132 | /// Verify a compressed proof. 133 | pub fn verify_compressed( 134 | proof: &SP1ReduceProof, 135 | vk: &MachineVerifyingKey, 136 | ) -> Result<(), MachineVerifierError> { 137 | let SP1ReduceProof { vk: compress_vk, proof } = proof; 138 | let verifier = verifier(); 139 | let mut challenger = verifier.challenger(); 140 | compress_vk.observe_into(&mut challenger); 141 | verifier 142 | .verify_shard(compress_vk, proof, &mut challenger) 143 | .map_err(MachineVerifierError::InvalidShardProof)?; 144 | 145 | // Validate public values 146 | let public_values: &RecursionPublicValues<_> = proof.public_values.as_slice().borrow(); 147 | assert_recursion_public_values_valid(public_values); 148 | 149 | if public_values.is_complete != BabyBear::one() { 150 | return Err(MachineVerifierError::InvalidPublicValues("is_complete is not 1")); 151 | } 152 | 153 | // Verify that the proof is for the sp1 vkey we are expecting. 154 | let vkey_hash = vk.hash_babybear(); 155 | if public_values.sp1_vk_digest != vkey_hash { 156 | return Err(MachineVerifierError::InvalidPublicValues("sp1 vk hash mismatch")); 157 | } 158 | 159 | Ok(()) 160 | } 161 | -------------------------------------------------------------------------------- /crates/stacked/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hypercube-stacked" 3 | version.workspace = true 4 | edition.workspace = true 5 | 6 | repository.workspace = true 7 | keywords.workspace = true 8 | categories.workspace = true 9 | 10 | [dependencies] 11 | hypercube-multilinear = { workspace = true } 12 | hypercube-commit = { workspace = true } 13 | 14 | thiserror = { workspace = true } 15 | serde = { workspace = true } 16 | 17 | [dev-dependencies] 18 | 19 | [lints] 20 | workspace = true 21 | -------------------------------------------------------------------------------- /crates/stacked/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! API for stacked multilinear polynomial commitment schemes. 2 | //! 3 | //! 4 | //! For any multilinear PCS that can commit to batches of matrices of the same height (considering 5 | //! the columns of those matrices as evaluations of multilinear polynomials on the Boolean hypercube), 6 | //! and then prove joint evaluations of those multililiner polynomials at the same point, this module 7 | //! provides functionality that can commit to heterogeneous batches of matrices (considering that 8 | //! batch as a single mutlilinear polynomial in many variables), and then prove evaluations of that 9 | //! multilinear polynomial at a point. 10 | //! 11 | //! This is implemented by making a virtual vector consisting of the concatenation of all of the 12 | //! data in the matrices in the batch, splitting that vector up into vectors of a prescribed size, 13 | //! and then using the underlying PCS to commit to and prove evaluations of those vectors. The 14 | //! verifier then computes the expected multilinear evaluation of the larger vector by using a 15 | //! multilinear evaluation algorithm in a smaller number of variables). This is essentially the 16 | //! the interleaving algorithm of `Ligero`(https://eprint.iacr.org/2022/1608). 17 | 18 | mod verifier; 19 | 20 | pub use verifier::*; 21 | -------------------------------------------------------------------------------- /crates/stacked/src/verifier.rs: -------------------------------------------------------------------------------- 1 | use hypercube_commit::Rounds; 2 | use hypercube_multilinear::{Evaluations, Mle, MultilinearPcsVerifier, Point}; 3 | use serde::{Deserialize, Serialize}; 4 | use thiserror::Error; 5 | 6 | #[derive(Debug, Clone)] 7 | pub struct StackedPcsVerifier

{ 8 | pub pcs_verifier: P, 9 | pub log_stacking_height: u32, 10 | } 11 | 12 | #[derive(Debug, Clone, Copy, PartialEq, Eq, Error)] 13 | pub enum StackedVerifierError { 14 | #[error("PCS error: {0}")] 15 | PcsError(PcsError), 16 | #[error("Batch evaluations do not match the claimed evaluations")] 17 | StackingError, 18 | } 19 | 20 | #[derive(Debug, Clone, Serialize, Deserialize)] 21 | pub struct StackedPcsProof { 22 | pub pcs_proof: PcsProof, 23 | pub batch_evaluations: Rounds>, 24 | } 25 | 26 | impl StackedPcsVerifier

{ 27 | pub fn challenger(&self) -> P::Challenger { 28 | self.pcs_verifier.default_challenger() 29 | } 30 | 31 | #[inline] 32 | pub const fn new(pcs_verifier: P, log_stacking_height: u32) -> Self { 33 | Self { pcs_verifier, log_stacking_height } 34 | } 35 | 36 | pub fn verify_trusted_evaluation( 37 | &self, 38 | commitments: &[P::Commitment], 39 | point: &Point, 40 | proof: &StackedPcsProof, 41 | evaluation_claim: P::EF, 42 | challenger: &mut P::Challenger, 43 | ) -> Result<(), StackedVerifierError> { 44 | // Split the point into the interleaved and batched parts. 45 | let (batch_point, stack_point) = 46 | point.split_at(point.dimension() - self.log_stacking_height as usize); 47 | 48 | // Interpolate the batch evaluations as a multilinear polynomial. 49 | let batch_evaluations = 50 | proof.batch_evaluations.iter().flatten().flatten().cloned().collect::>(); 51 | // Verify that the climed evaluations matched the interpolated evaluations. 52 | let expected_evaluation = batch_evaluations.blocking_eval_at(&batch_point)[0]; 53 | if evaluation_claim != expected_evaluation { 54 | return Err(StackedVerifierError::StackingError); 55 | } 56 | 57 | // Verify the PCS proof with respect to the claimed evaluations. 58 | self.pcs_verifier 59 | .verify_untrusted_evaluations( 60 | commitments, 61 | stack_point, 62 | &proof.batch_evaluations, 63 | &proof.pcs_proof, 64 | challenger, 65 | ) 66 | .map_err(StackedVerifierError::PcsError) 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /crates/stark/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [Unreleased] 9 | 10 | ## [1.2.0-rc1](https://github.com/succinctlabs/sp1/releases/tag/hypercube-stark-v1.2.0-rc1) - 2024-08-23 11 | 12 | ### Added 13 | 14 | - gas ([#1354](https://github.com/succinctlabs/sp1/pull/1354)) 15 | 16 | ### Fixed 17 | 18 | - fix fptower tests 19 | - fix imports 20 | 21 | ### Other 22 | 23 | - resolve merge conflicts between dev and experimental 24 | - refactor + cleanup core crates 25 | -------------------------------------------------------------------------------- /crates/stark/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hypercube-stark" 3 | description = "SP1 is a performant, 100% open-source, contributor-friendly zkVM." 4 | readme = "../../README.md" 5 | version = { workspace = true } 6 | edition = { workspace = true } 7 | 8 | repository = { workspace = true } 9 | keywords = { workspace = true } 10 | categories = { workspace = true } 11 | 12 | [dependencies] 13 | # sp1 14 | sp1-derive = { workspace = true } 15 | sp1-primitives = { workspace = true } 16 | 17 | hypercube-alloc = { workspace = true } 18 | hypercube-basefold = { workspace = true } 19 | hypercube-commit = { workspace = true } 20 | hypercube-jagged = { workspace = true } 21 | hypercube-sumcheck = { workspace = true } 22 | hypercube-multilinear = { workspace = true } 23 | hypercube-merkle-tree = { workspace = true } 24 | 25 | # p3 26 | p3-uni-stark = { workspace = true } 27 | p3-air = { workspace = true } 28 | p3-field = { workspace = true } 29 | p3-matrix = { workspace = true } 30 | p3-challenger = { workspace = true } 31 | p3-baby-bear = { workspace = true } 32 | p3-symmetric = { workspace = true } 33 | serde = { workspace = true, features = ["derive", "rc"] } 34 | itertools = { workspace = true } 35 | tracing = { workspace = true } 36 | thiserror = "1.0" 37 | derive-where = { workspace = true } 38 | 39 | arrayref = "0.3.8" 40 | num-bigint = { version = "0.4.3", default-features = false } 41 | strum_macros = { workspace = true } 42 | strum = {workspace = true } 43 | num-traits = "0.2.19" 44 | blake3 = { version = "1.6.1", default-features = false } 45 | sha2 = "0.10.8" 46 | -------------------------------------------------------------------------------- /crates/stark/src/air/extension.rs: -------------------------------------------------------------------------------- 1 | use std::ops::{Add, Div, Mul, Neg, Sub}; 2 | 3 | use p3_field::{ 4 | extension::{BinomialExtensionField, BinomiallyExtendable}, 5 | AbstractExtensionField, AbstractField, Field, 6 | }; 7 | use sp1_derive::AlignedBorrow; 8 | 9 | const D: usize = 4; 10 | 11 | /// A binomial extension element represented over a generic type `T`. 12 | #[derive(AlignedBorrow, Clone, Copy, Debug, Default, PartialEq, Eq, Hash)] 13 | #[repr(C)] 14 | pub struct BinomialExtension(pub [T; D]); 15 | 16 | impl BinomialExtension { 17 | /// Creates a new binomial extension element from a base element. 18 | pub fn from_base(b: T) -> Self 19 | where 20 | T: AbstractField, 21 | { 22 | let mut arr: [T; D] = core::array::from_fn(|_| T::zero()); 23 | arr[0] = b; 24 | Self(arr) 25 | } 26 | 27 | /// Returns a reference to the underlying slice. 28 | pub const fn as_base_slice(&self) -> &[T] { 29 | &self.0 30 | } 31 | 32 | /// Creates a new binomial extension element from a binomial extension element. 33 | #[allow(clippy::needless_pass_by_value)] 34 | pub fn from + Clone>(from: BinomialExtension) -> Self { 35 | BinomialExtension(core::array::from_fn(|i| from.0[i].clone().into())) 36 | } 37 | } 38 | 39 | impl + Clone> Add for BinomialExtension { 40 | type Output = Self; 41 | 42 | fn add(self, rhs: Self) -> Self::Output { 43 | Self(core::array::from_fn(|i| self.0[i].clone() + rhs.0[i].clone())) 44 | } 45 | } 46 | 47 | impl + Clone> Sub for BinomialExtension { 48 | type Output = Self; 49 | 50 | fn sub(self, rhs: Self) -> Self::Output { 51 | Self(core::array::from_fn(|i| self.0[i].clone() - rhs.0[i].clone())) 52 | } 53 | } 54 | 55 | impl + Mul + AbstractField> Mul for BinomialExtension { 56 | type Output = Self; 57 | 58 | fn mul(self, rhs: Self) -> Self::Output { 59 | let mut result = [T::zero(), T::zero(), T::zero(), T::zero()]; 60 | let w = T::from_canonical_u32(11); 61 | 62 | for i in 0..D { 63 | for j in 0..D { 64 | if i + j >= D { 65 | result[i + j - D] = result[i + j - D].clone() 66 | + w.clone() * self.0[i].clone() * rhs.0[j].clone(); 67 | } else { 68 | result[i + j] = result[i + j].clone() + self.0[i].clone() * rhs.0[j].clone(); 69 | } 70 | } 71 | } 72 | 73 | Self(result) 74 | } 75 | } 76 | 77 | impl Div for BinomialExtension 78 | where 79 | F: BinomiallyExtendable, 80 | { 81 | type Output = Self; 82 | 83 | fn div(self, rhs: Self) -> Self::Output { 84 | let p3_ef_lhs = BinomialExtensionField::from_base_slice(&self.0); 85 | let p3_ef_rhs = BinomialExtensionField::from_base_slice(&rhs.0); 86 | let p3_ef_result = p3_ef_lhs / p3_ef_rhs; 87 | Self(p3_ef_result.as_base_slice().try_into().unwrap()) 88 | } 89 | } 90 | 91 | impl BinomialExtension 92 | where 93 | F: BinomiallyExtendable<4>, 94 | { 95 | /// Returns the multiplicative inverse of the element. 96 | #[must_use] 97 | pub fn inverse(&self) -> Self { 98 | let p3_ef = BinomialExtensionField::from_base_slice(&self.0); 99 | let p3_ef_inverse = p3_ef.inverse(); 100 | Self(p3_ef_inverse.as_base_slice().try_into().unwrap()) 101 | } 102 | 103 | /// Returns the multiplicative inverse of the element, if it exists. 104 | #[must_use] 105 | pub fn try_inverse(&self) -> Option { 106 | let p3_ef = BinomialExtensionField::from_base_slice(&self.0); 107 | let p3_ef_inverse = p3_ef.try_inverse()?; 108 | Some(Self(p3_ef_inverse.as_base_slice().try_into().unwrap())) 109 | } 110 | } 111 | 112 | impl Neg for BinomialExtension { 113 | type Output = Self; 114 | 115 | fn neg(self) -> Self::Output { 116 | Self([-self.0[0], -self.0[1], -self.0[2], -self.0[3]]) 117 | } 118 | } 119 | 120 | impl From> for BinomialExtension 121 | where 122 | AF: AbstractField + Copy, 123 | AF::F: BinomiallyExtendable, 124 | { 125 | fn from(value: BinomialExtensionField) -> Self { 126 | let arr: [AF; D] = value.as_base_slice().try_into().unwrap(); 127 | Self(arr) 128 | } 129 | } 130 | 131 | impl From> for BinomialExtensionField 132 | where 133 | AF: AbstractField + Copy, 134 | AF::F: BinomiallyExtendable, 135 | { 136 | fn from(value: BinomialExtension) -> Self { 137 | BinomialExtensionField::from_base_slice(&value.0) 138 | } 139 | } 140 | 141 | impl IntoIterator for BinomialExtension { 142 | type Item = T; 143 | type IntoIter = core::array::IntoIter; 144 | 145 | fn into_iter(self) -> Self::IntoIter { 146 | self.0.into_iter() 147 | } 148 | } 149 | -------------------------------------------------------------------------------- /crates/stark/src/air/interaction.rs: -------------------------------------------------------------------------------- 1 | use crate::lookup::InteractionKind; 2 | 3 | /// An interaction is a cross-table lookup. 4 | pub struct AirInteraction { 5 | /// The values of the interaction. 6 | pub values: Vec, 7 | /// The multiplicity of the interaction. 8 | pub multiplicity: E, 9 | /// The kind of interaction. 10 | pub kind: InteractionKind, 11 | } 12 | 13 | impl AirInteraction { 14 | /// Create a new [`AirInteraction`]. 15 | pub const fn new(values: Vec, multiplicity: E, kind: InteractionKind) -> Self { 16 | Self { values, multiplicity, kind } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /crates/stark/src/air/machine.rs: -------------------------------------------------------------------------------- 1 | use p3_air::BaseAir; 2 | use p3_field::Field; 3 | 4 | // TODO: add Id type and also fn id() 5 | 6 | #[macro_export] 7 | /// Macro to get the name of a chip. 8 | macro_rules! chip_name { 9 | ($chip:ident, $field:ty) => { 10 | <$chip as MachineAir<$field>>::name(&$chip {}) 11 | }; 12 | } 13 | 14 | /// An AIR that is part of a multi table AIR arithmetization. 15 | pub trait MachineAir: BaseAir + 'static + Send + Sync { 16 | /// A unique identifier for this AIR as part of a machine. 17 | fn name(&self) -> String; 18 | 19 | /// The width of the preprocessed trace. 20 | fn preprocessed_width(&self) -> usize { 21 | 0 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /crates/stark/src/air/mod.rs: -------------------------------------------------------------------------------- 1 | //! Building blocks for defining AIRs. 2 | 3 | mod builder; 4 | mod extension; 5 | mod interaction; 6 | mod machine; 7 | 8 | pub use builder::*; 9 | pub use extension::*; 10 | pub use interaction::*; 11 | pub use machine::*; 12 | -------------------------------------------------------------------------------- /crates/stark/src/air/sub_builder.rs: -------------------------------------------------------------------------------- 1 | // use std::{ 2 | // iter::{Skip, Take}, 3 | // ops::{Deref, Range}, 4 | // }; 5 | 6 | // use p3_air::{AirBuilder, BaseAir}; 7 | // use p3_matrix::Matrix; 8 | 9 | // /// A submatrix of a matrix. The matrix will contain a subset of the columns of `self.inner`. 10 | // pub struct SubMatrixRowSlices, T: Send + Sync> { 11 | // inner: M, 12 | // column_range: Range, 13 | // _phantom: std::marker::PhantomData, 14 | // } 15 | 16 | // impl, T: Send + Sync> SubMatrixRowSlices { 17 | // /// Creates a new [`SubMatrixRowSlices`]. 18 | // #[must_use] 19 | // pub const fn new(inner: M, column_range: Range) -> Self { 20 | // Self { inner, column_range, _phantom: std::marker::PhantomData } 21 | // } 22 | // } 23 | 24 | // /// Implement `Matrix` for `SubMatrixRowSlices`. 25 | // impl, T: Send + Sync> Matrix for SubMatrixRowSlices { 26 | // type Row<'a> 27 | // = Skip>> 28 | // where 29 | // Self: 'a; 30 | 31 | // #[inline] 32 | // fn row(&self, r: usize) -> Self::Row<'_> { 33 | // self.inner.row(r).take(self.column_range.end).skip(self.column_range.start) 34 | // } 35 | 36 | // #[inline] 37 | // fn row_slice(&self, r: usize) -> impl Deref { 38 | // self.row(r).collect::>() 39 | // } 40 | 41 | // #[inline] 42 | // fn width(&self) -> usize { 43 | // self.column_range.len() 44 | // } 45 | 46 | // #[inline] 47 | // fn height(&self) -> usize { 48 | // self.inner.height() 49 | // } 50 | // } 51 | 52 | // /// A builder used to eval a sub-air. This will handle enforcing constraints for a subset of a 53 | // /// trace matrix. E.g. if a particular air needs to be enforced for a subset of the columns of 54 | // /// the trace, then the [`SubAirBuilder`] can be used. 55 | // pub struct SubAirBuilder<'a, AB: AirBuilder, SubAir: BaseAir, T> { 56 | // inner: &'a mut AB, 57 | // column_range: Range, 58 | // _phantom: std::marker::PhantomData<(SubAir, T)>, 59 | // } 60 | 61 | // impl<'a, AB: AirBuilder, SubAir: BaseAir, T> SubAirBuilder<'a, AB, SubAir, T> { 62 | // /// Creates a new [`SubAirBuilder`]. 63 | // #[must_use] 64 | // pub fn new(inner: &'a mut AB, column_range: Range) -> Self { 65 | // Self { inner, column_range, _phantom: std::marker::PhantomData } 66 | // } 67 | // } 68 | 69 | // /// Implement `AirBuilder` for `SubAirBuilder`. 70 | // impl, F> AirBuilder for SubAirBuilder<'_, AB, SubAir, F> { 71 | // type F = AB::F; 72 | // type Expr = AB::Expr; 73 | // type Var = AB::Var; 74 | // type M = SubMatrixRowSlices; 75 | 76 | // fn main(&self) -> Self::M { 77 | // let matrix = self.inner.main(); 78 | 79 | // SubMatrixRowSlices::new(matrix, self.column_range.clone()) 80 | // } 81 | 82 | // fn is_first_row(&self) -> Self::Expr { 83 | // self.inner.is_first_row() 84 | // } 85 | 86 | // fn is_last_row(&self) -> Self::Expr { 87 | // self.inner.is_last_row() 88 | // } 89 | 90 | // fn is_transition_window(&self, size: usize) -> Self::Expr { 91 | // self.inner.is_transition_window(size) 92 | // } 93 | 94 | // fn assert_zero>(&mut self, x: I) { 95 | // self.inner.assert_zero(x.into()); 96 | // } 97 | // } 98 | -------------------------------------------------------------------------------- /crates/stark/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! STARK-based primitives for proof generation and verification over AIRs. 2 | 3 | #![warn(clippy::pedantic)] 4 | #![allow(clippy::similar_names)] 5 | #![allow(clippy::cast_possible_wrap)] 6 | #![allow(clippy::cast_possible_truncation)] 7 | #![allow(clippy::cast_sign_loss)] 8 | #![allow(clippy::module_name_repetitions)] 9 | #![allow(clippy::needless_range_loop)] 10 | #![allow(clippy::cast_lossless)] 11 | #![allow(clippy::bool_to_int_with_if)] 12 | #![allow(clippy::should_panic_without_expect)] 13 | #![allow(clippy::field_reassign_with_default)] 14 | #![allow(clippy::manual_assert)] 15 | #![allow(clippy::unreadable_literal)] 16 | #![allow(clippy::match_wildcard_for_single_variants)] 17 | #![allow(clippy::missing_panics_doc)] 18 | #![allow(clippy::missing_errors_doc)] 19 | #![allow(clippy::explicit_iter_loop)] 20 | #![allow(clippy::if_not_else)] 21 | #![warn(missing_docs)] 22 | 23 | pub mod air; 24 | mod chip; 25 | mod folder; 26 | mod logup_gkr; 27 | mod lookup; 28 | mod machine; 29 | mod public_values; 30 | mod util; 31 | mod verifier; 32 | pub use chip::*; 33 | pub use folder::*; 34 | pub use logup_gkr::*; 35 | pub mod septic_curve; 36 | pub mod septic_digest; 37 | pub mod septic_extension; 38 | pub use lookup::*; 39 | mod word; 40 | pub use machine::*; 41 | pub use public_values::*; 42 | pub use util::*; 43 | pub use verifier::*; 44 | pub use word::*; 45 | -------------------------------------------------------------------------------- /crates/stark/src/logup_gkr/mod.rs: -------------------------------------------------------------------------------- 1 | mod proof; 2 | mod verifier; 3 | 4 | pub use proof::*; 5 | pub use verifier::*; 6 | -------------------------------------------------------------------------------- /crates/stark/src/logup_gkr/proof.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | 3 | use hypercube_alloc::{Backend, CpuBackend}; 4 | use hypercube_multilinear::{Mle, MleEval, Point}; 5 | use hypercube_sumcheck::PartialSumcheckProof; 6 | use serde::{Deserialize, Serialize}; 7 | 8 | /// The output of the log-up GKR circuit. 9 | #[derive(Debug, Serialize, Deserialize, Clone)] 10 | #[serde(bound(serialize = "Mle: Serialize", deserialize = "Mle: Deserialize<'de>"))] 11 | pub struct LogUpGkrOutput { 12 | /// Numerator 13 | pub numerator: Mle, 14 | /// Denominator 15 | pub denominator: Mle, 16 | } 17 | 18 | /// The proof for a single round of the log-up GKR circuit. 19 | #[derive(Debug, Serialize, Deserialize, Clone)] 20 | pub struct LogupGkrRoundProof { 21 | /// The numerator of the numerator with last coordinate being 0. 22 | pub numerator_0: EF, 23 | /// The numerator of the numerator with last coordinate being 1. 24 | pub numerator_1: EF, 25 | /// The denominator of the denominator with last coordinate being 0. 26 | pub denominator_0: EF, 27 | /// The denominator of the denominator with last coordinate being 1. 28 | pub denominator_1: EF, 29 | /// The sumcheck proof for the round. 30 | pub sumcheck_proof: PartialSumcheckProof, 31 | } 32 | 33 | /// The proof for the log-up GKR circuit. 34 | #[derive(Debug, Serialize, Deserialize, Clone)] 35 | pub struct LogupGkrProof { 36 | /// The output of the circuit. 37 | pub circuit_output: LogUpGkrOutput, 38 | /// The proof for each round. 39 | pub round_proofs: Vec>, 40 | /// The evaluations for each chip. 41 | pub logup_evaluations: LogUpEvaluations, 42 | } 43 | 44 | /// The evaluations for a chip 45 | #[derive(Debug, Serialize, Deserialize, Clone)] 46 | pub struct ChipEvaluation { 47 | /// The evaluations of the main trace. 48 | pub main_trace_evaluations: MleEval, 49 | /// The evaluations of the preprocessed trace. 50 | pub preprocessed_trace_evaluations: Option>, 51 | } 52 | 53 | #[derive(Debug, Serialize, Deserialize, Clone)] 54 | /// The data passed from the GKR prover to the zerocheck prover. 55 | pub struct LogUpEvaluations { 56 | /// The point at which the evaluations are made. 57 | pub point: Point, 58 | /// The evaluations for each chip. 59 | pub chip_openings: BTreeMap>, 60 | } 61 | -------------------------------------------------------------------------------- /crates/stark/src/lookup/debug.rs: -------------------------------------------------------------------------------- 1 | use p3_field::Field; 2 | 3 | use super::InteractionKind; 4 | 5 | /// The data for an interaction. 6 | #[derive(Debug)] 7 | pub struct InteractionData { 8 | /// The chip name. 9 | pub chip_name: String, 10 | /// The kind of interaction. 11 | pub kind: InteractionKind, 12 | /// The row of the interaction. 13 | pub row: usize, 14 | /// The interaction number. 15 | pub interaction_number: usize, 16 | /// Whether the interaction is a send. 17 | pub is_send: bool, 18 | /// The multiplicity of the interaction. 19 | pub multiplicity: F, 20 | } 21 | 22 | /// Converts a vector of field elements to a string. 23 | #[allow(clippy::needless_pass_by_value)] 24 | #[must_use] 25 | pub fn vec_to_string(vec: Vec) -> String { 26 | let mut result = String::from("("); 27 | for (i, value) in vec.iter().enumerate() { 28 | if i != 0 { 29 | result.push_str(", "); 30 | } 31 | result.push_str(&value.to_string()); 32 | } 33 | result.push(')'); 34 | result 35 | } 36 | -------------------------------------------------------------------------------- /crates/stark/src/lookup/interaction.rs: -------------------------------------------------------------------------------- 1 | use core::fmt::{Debug, Display}; 2 | use std::ops::Mul; 3 | 4 | use hypercube_multilinear::MleEval; 5 | use p3_air::{PairCol, VirtualPairCol}; 6 | use p3_field::{AbstractField, Field}; 7 | 8 | use crate::air::InteractionScope; 9 | 10 | /// An interaction for a lookup or a permutation argument. 11 | #[derive(Clone)] 12 | pub struct Interaction { 13 | /// The values of the interaction. 14 | pub values: Vec>, 15 | /// The multiplicity of the interaction. 16 | pub multiplicity: VirtualPairCol, 17 | /// The kind of interaction. 18 | pub kind: InteractionKind, 19 | /// The scope of the interaction. 20 | pub scope: InteractionScope, 21 | } 22 | 23 | /// The type of interaction for a lookup argument. 24 | #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] 25 | pub enum InteractionKind { 26 | /// Interaction with the memory table, such as read and write. 27 | Memory = 1, 28 | 29 | /// Interaction with the program table, loading an instruction at a given pc address. 30 | Program = 2, 31 | 32 | /// Interaction with instruction oracle. 33 | Instruction = 3, 34 | 35 | /// Interaction with the ALU operations. 36 | Alu = 4, 37 | 38 | /// Interaction with the byte lookup table for byte operations. 39 | Byte = 5, 40 | 41 | /// Requesting a range check for a given value and range. 42 | Range = 6, 43 | 44 | /// Interaction with the current CPU state. 45 | State = 7, 46 | 47 | /// Interaction with a syscall. 48 | Syscall = 8, 49 | 50 | /// Interaction with the global table. 51 | Global = 9, 52 | 53 | /// Interaction with the `ShaExtend` chip. 54 | ShaExtend = 10, 55 | 56 | /// Interaction with the `ShaCompress` chip. 57 | ShaCompress = 11, 58 | 59 | /// Interaction with the `Keccak` chip. 60 | Keccak = 12, 61 | 62 | /// Interaction to accumulate the global interaction digests. 63 | GlobalAccumulation = 13, 64 | 65 | /// Interaction with the `MemoryGlobalInit` chip. 66 | MemoryGlobalInitControl = 14, 67 | 68 | /// Interaction with the `MemoryGlobalFinalize` chip. 69 | MemoryGlobalFinalizeControl = 15, 70 | } 71 | 72 | impl InteractionKind { 73 | /// Returns all kinds of interactions. 74 | #[must_use] 75 | pub fn all_kinds() -> Vec { 76 | vec![ 77 | InteractionKind::Memory, 78 | InteractionKind::Program, 79 | InteractionKind::Instruction, 80 | InteractionKind::Alu, 81 | InteractionKind::Byte, 82 | InteractionKind::Range, 83 | InteractionKind::State, 84 | InteractionKind::Syscall, 85 | InteractionKind::Global, 86 | InteractionKind::ShaExtend, 87 | InteractionKind::ShaCompress, 88 | InteractionKind::Keccak, 89 | InteractionKind::GlobalAccumulation, 90 | InteractionKind::MemoryGlobalInitControl, 91 | InteractionKind::MemoryGlobalFinalizeControl, 92 | ] 93 | } 94 | } 95 | 96 | impl Interaction { 97 | /// Create a new interaction. 98 | pub const fn new( 99 | values: Vec>, 100 | multiplicity: VirtualPairCol, 101 | kind: InteractionKind, 102 | scope: InteractionScope, 103 | ) -> Self { 104 | Self { values, multiplicity, kind, scope } 105 | } 106 | 107 | /// The index of the argument in the lookup table. 108 | pub const fn argument_index(&self) -> usize { 109 | self.kind as usize 110 | } 111 | 112 | /// Calculate the interactions evaluation. 113 | pub fn eval( 114 | &self, 115 | preprocessed: Option<&MleEval>, 116 | main: &MleEval, 117 | alpha: Expr, 118 | beta: &Expr, 119 | ) -> (Expr, Expr) 120 | where 121 | F: Into, 122 | Expr: AbstractField + Mul, 123 | Var: Into + Copy, 124 | { 125 | let mut multiplicity_eval = self.multiplicity.constant.into(); 126 | // let mut mult_value = self.multiplicity.constant.into(); 127 | let mut betas = beta.powers(); 128 | for (column, weight) in self.multiplicity.column_weights.iter() { 129 | let weight: Expr = (*weight).into(); 130 | match column { 131 | PairCol::Preprocessed(i) => { 132 | multiplicity_eval += preprocessed.as_ref().unwrap()[*i].into() * weight; 133 | } 134 | PairCol::Main(i) => multiplicity_eval += main[*i].into() * weight, 135 | } 136 | } 137 | 138 | let mut fingerprint_eval = 139 | alpha + betas.next().unwrap() * Expr::from_canonical_usize(self.argument_index()); 140 | for (element, beta_pow) in self.values.iter().zip(betas) { 141 | let evaluation = if let Some(preprocessed) = preprocessed { 142 | element.apply::(preprocessed, main) 143 | } else { 144 | element.apply::(&[], main) 145 | }; 146 | fingerprint_eval += evaluation * beta_pow; 147 | } 148 | 149 | (multiplicity_eval, fingerprint_eval) 150 | } 151 | } 152 | 153 | impl Debug for Interaction { 154 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 155 | f.debug_struct("Interaction") 156 | .field("kind", &self.kind) 157 | .field("scope", &self.scope) 158 | .finish_non_exhaustive() 159 | } 160 | } 161 | 162 | impl Display for InteractionKind { 163 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 164 | match self { 165 | InteractionKind::Memory => write!(f, "Memory"), 166 | InteractionKind::Program => write!(f, "Program"), 167 | InteractionKind::Instruction => write!(f, "Instruction"), 168 | InteractionKind::Alu => write!(f, "Alu"), 169 | InteractionKind::Byte => write!(f, "Byte"), 170 | InteractionKind::Range => write!(f, "Range"), 171 | InteractionKind::State => write!(f, "State"), 172 | InteractionKind::Syscall => write!(f, "Syscall"), 173 | InteractionKind::Global => write!(f, "Global"), 174 | InteractionKind::ShaExtend => write!(f, "ShaExtend"), 175 | InteractionKind::ShaCompress => write!(f, "ShaCompress"), 176 | InteractionKind::Keccak => write!(f, "Keccak"), 177 | InteractionKind::GlobalAccumulation => write!(f, "GlobalAccumulation"), 178 | InteractionKind::MemoryGlobalInitControl => write!(f, "MemoryGlobalInitControl"), 179 | InteractionKind::MemoryGlobalFinalizeControl => { 180 | write!(f, "MemoryGlobalFinalizeControl") 181 | } 182 | } 183 | } 184 | } 185 | -------------------------------------------------------------------------------- /crates/stark/src/lookup/mod.rs: -------------------------------------------------------------------------------- 1 | mod builder; 2 | mod debug; 3 | mod interaction; 4 | 5 | pub use builder::InteractionBuilder; 6 | pub use debug::*; 7 | pub use interaction::*; 8 | -------------------------------------------------------------------------------- /crates/stark/src/machine.rs: -------------------------------------------------------------------------------- 1 | use derive_where::derive_where; 2 | use p3_field::Field; 3 | use std::collections::BTreeSet; 4 | 5 | use crate::{air::MachineAir, Chip}; 6 | 7 | /// A shape for a machine. 8 | #[derive(Debug)] 9 | #[derive_where(Clone)] 10 | pub struct MachineShape { 11 | chip_clusters: Vec>>, 12 | } 13 | 14 | impl> MachineShape { 15 | /// Create a single shape that always includes all the chips. 16 | #[must_use] 17 | pub fn all(chips: &[Chip]) -> Self { 18 | let chip_clusters = vec![chips.iter().cloned().collect()]; 19 | Self { chip_clusters } 20 | } 21 | 22 | /// Create a new shape from a list of chip clusters. 23 | #[must_use] 24 | pub const fn new(chip_clusters: Vec>>) -> Self { 25 | Self { chip_clusters } 26 | } 27 | 28 | /// Returns the smallest shape cluster that contains all the chips with given names. 29 | #[must_use] 30 | pub fn smallest_cluster(&self, chips: &BTreeSet>) -> Option<&BTreeSet>> { 31 | self.chip_clusters 32 | .iter() 33 | .filter(|cluster| chips.is_subset(cluster)) 34 | .min_by_key(|cluster| cluster.len()) 35 | } 36 | } 37 | 38 | /// A STARK for proving RISC-V execution. 39 | #[derive(Debug)] 40 | #[derive_where(Clone)] 41 | pub struct Machine { 42 | /// The chips that make up the RISC-V STARK machine, in order of their execution. 43 | chips: Vec>, 44 | /// The number of public values elements that the machine uses 45 | num_pv_elts: usize, 46 | } 47 | 48 | impl Machine 49 | where 50 | F: Field, 51 | A: MachineAir, 52 | { 53 | /// Creates a new [`StarkMachine`]. 54 | #[must_use] 55 | pub const fn new(chips: Vec>, num_pv_elts: usize) -> Self { 56 | Self { chips, num_pv_elts } 57 | } 58 | 59 | /// Returns the chips in the machine. 60 | #[must_use] 61 | pub fn chips(&self) -> &[Chip] { 62 | &self.chips 63 | } 64 | 65 | /// Returns the number of public values elements. 66 | #[must_use] 67 | pub const fn num_pv_elts(&self) -> usize { 68 | self.num_pv_elts 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /crates/stark/src/public_values.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Borrow; 2 | 3 | use serde::{Deserialize, Serialize}; 4 | use sha2::{Digest, Sha256}; 5 | use sp1_primitives::io::{POSEIDON_NUM_WORDS, PV_DIGEST_NUM_WORDS}; 6 | 7 | use crate::Word; 8 | /// A septic extension with an irreducible polynomial `z^7 - 2z - 5`. 9 | /// 10 | /// The field can be constructed as `F_{p^7} = F_p[z]/(z^7 - 2z - 5)`. 11 | #[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, PartialEq, Eq, Hash)] 12 | #[repr(C)] 13 | pub struct SepticExtension(pub [F; 7]); 14 | 15 | /// A septic elliptic curve point on y^2 = x^3 + 2x + 26z^5 over field `F_{p^7} = F_p[z]/(z^7 - 2z - 16 | /// 5)`. 17 | #[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, PartialEq, Eq, Hash)] 18 | #[repr(C)] 19 | pub struct SepticCurve { 20 | /// The x-coordinate of an elliptic curve point. 21 | pub x: SepticExtension, 22 | /// The y-coordinate of an elliptic curve point. 23 | pub y: SepticExtension, 24 | } 25 | 26 | /// A global cumulative sum digest, a point on the elliptic curve that `SepticCurve` represents. 27 | /// As these digests start with the `CURVE_CUMULATIVE_SUM_START` point, they require special summing 28 | /// logic. 29 | #[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, PartialEq, Eq, Hash)] 30 | #[repr(C)] 31 | pub struct SepticDigest(pub SepticCurve); 32 | 33 | /// Stores all of a shard proof's public values. 34 | #[derive(Serialize, Deserialize, Clone, Copy, Default, Debug)] 35 | #[repr(C)] 36 | pub struct PublicValues { 37 | /// The hash of all the bytes that the guest program has written to public values. 38 | pub committed_value_digest: [W1; PV_DIGEST_NUM_WORDS], 39 | 40 | /// The hash of all deferred proofs that have been witnessed in the VM. It will be rebuilt in 41 | /// recursive verification as the proofs get verified. The hash itself is a rolling poseidon2 42 | /// hash of each proof+vkey hash and the previous hash which is initially zero. 43 | pub deferred_proofs_digest: [T; POSEIDON_NUM_WORDS], 44 | 45 | /// The shard's start program counter. 46 | pub start_pc: T, 47 | 48 | /// The expected start program counter for the next shard. 49 | pub next_pc: T, 50 | 51 | /// The exit code of the program. Only valid if halt has been executed. 52 | pub exit_code: T, 53 | 54 | /// The shard number. 55 | pub shard: T, 56 | 57 | /// The execution shard number. 58 | pub execution_shard: T, 59 | 60 | /// The next execution shard number. 61 | pub next_execution_shard: T, 62 | 63 | /// The largest address that is witnessed for initialization in the previous shard. 64 | pub previous_init_addr_word: W2, 65 | 66 | /// The largest address that is witnessed for initialization in the current shard. 67 | pub last_init_addr_word: W2, 68 | 69 | /// The largest address that is witnessed for finalization in the previous shard. 70 | pub previous_finalize_addr_word: W2, 71 | 72 | /// The largest address that is witnessed for finalization in the current shard. 73 | pub last_finalize_addr_word: W2, 74 | 75 | /// The last timestamp of the shard. 76 | pub last_timestamp: T, 77 | 78 | /// The inverse of the last timestamp of the shard. 79 | pub last_timestamp_inv: T, 80 | 81 | /// The number of global memory initializations in the shard. 82 | pub global_init_count: T, 83 | 84 | /// The number of global memory finalizations in the shard. 85 | pub global_finalize_count: T, 86 | 87 | /// The number of global interactions in the shard. 88 | pub global_count: T, 89 | 90 | /// The global cumulative sum of the shard. 91 | pub global_cumulative_sum: SepticDigest, 92 | 93 | /// The empty values to ensure the size of the public values struct is a multiple of 8. 94 | pub empty: [T; 7], 95 | } 96 | 97 | impl Borrow, T>> for [T] { 98 | fn borrow(&self) -> &PublicValues<[T; 4], Word, T> { 99 | let size = std::mem::size_of::, u8>>(); 100 | debug_assert!(self.len() >= size); 101 | let slice = &self[0..size]; 102 | let (prefix, shorts, _suffix) = 103 | unsafe { slice.align_to::, T>>() }; 104 | debug_assert!(prefix.is_empty(), "Alignment should match"); 105 | debug_assert_eq!(shorts.len(), 1); 106 | &shorts[0] 107 | } 108 | } 109 | 110 | /// Hash the input using SHA256. 111 | #[must_use] 112 | pub fn sha256_hash(input: &[u8]) -> Vec { 113 | let mut hasher = Sha256::new(); 114 | hasher.update(input); 115 | hasher.finalize().to_vec() 116 | } 117 | 118 | /// Hash the input using Blake3. 119 | #[must_use] 120 | pub fn blake3_hash(input: &[u8]) -> Vec { 121 | blake3::hash(input).as_bytes().to_vec() 122 | } 123 | -------------------------------------------------------------------------------- /crates/stark/src/septic_digest.rs: -------------------------------------------------------------------------------- 1 | //! Elliptic Curve digests with a starting point to avoid weierstrass addition exceptions. 2 | use crate::{septic_curve::SepticCurve, septic_extension::SepticExtension}; 3 | use p3_field::{AbstractExtensionField, AbstractField, Field}; 4 | use serde::{Deserialize, Serialize}; 5 | use std::{iter::Sum, ops::Add}; 6 | 7 | /// The x-coordinate for a curve point used as a starting cumulative sum for global permutation 8 | /// trace generation, derived from `sqrt(2)`. 9 | pub const CURVE_CUMULATIVE_SUM_START_X: [u32; 7] = 10 | [0x1434213, 0x5623730, 0x9504880, 0x1688724, 0x2096980, 0x7856967, 0x1875376]; 11 | 12 | /// The y-coordinate for a curve point used as a starting cumulative sum for global permutation 13 | /// trace generation, derived from `sqrt(2)`. 14 | pub const CURVE_CUMULATIVE_SUM_START_Y: [u32; 7] = 15 | [885797405, 1130275556, 567836311, 52700240, 239639200, 442612155, 1839439733]; 16 | 17 | /// The x-coordinate for a curve point used as a starting random point for digest accumulation, 18 | /// derived from `sqrt(3)`. 19 | pub const DIGEST_SUM_START_X: [u32; 7] = 20 | [0x1742050, 0x8075688, 0x7729352, 0x7446341, 0x5058723, 0x6694280, 0x5253810]; 21 | 22 | /// The y-coordinate for a curve point used as a starting random point for digest accumulation, 23 | /// derived from `sqrt(3)`. 24 | pub const DIGEST_SUM_START_Y: [u32; 7] = 25 | [462194069, 1842131493, 281651264, 1684885851, 483907222, 1097389352, 1648978901]; 26 | 27 | /// A global cumulative sum digest, a point on the elliptic curve that `SepticCurve` represents. 28 | /// As these digests start with the `CURVE_CUMULATIVE_SUM_START` point, they require special summing 29 | /// logic. 30 | #[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, PartialEq, Eq, Hash)] 31 | #[repr(C)] 32 | pub struct SepticDigest(pub SepticCurve); 33 | 34 | impl SepticDigest { 35 | #[must_use] 36 | /// The zero digest, the starting point of the accumulation of curve points derived from the 37 | /// scheme. 38 | pub fn zero() -> Self { 39 | SepticDigest(SepticCurve { 40 | x: SepticExtension::::from_base_fn(|i| { 41 | F::from_canonical_u32(CURVE_CUMULATIVE_SUM_START_X[i]) 42 | }), 43 | y: SepticExtension::::from_base_fn(|i| { 44 | F::from_canonical_u32(CURVE_CUMULATIVE_SUM_START_Y[i]) 45 | }), 46 | }) 47 | } 48 | 49 | #[must_use] 50 | /// The digest used for starting the accumulation of digests. 51 | pub fn starting_digest() -> Self { 52 | SepticDigest(SepticCurve { 53 | x: SepticExtension::::from_base_fn(|i| F::from_canonical_u32(DIGEST_SUM_START_X[i])), 54 | y: SepticExtension::::from_base_fn(|i| F::from_canonical_u32(DIGEST_SUM_START_Y[i])), 55 | }) 56 | } 57 | } 58 | 59 | impl SepticDigest { 60 | /// Checks that the digest is zero, the starting point of the accumulation. 61 | pub fn is_zero(&self) -> bool { 62 | *self == SepticDigest::::zero() 63 | } 64 | } 65 | 66 | impl Add for SepticDigest { 67 | type Output = Self; 68 | 69 | fn add(self, rhs: Self) -> Self { 70 | let start = Self::starting_digest().0; 71 | 72 | let sum_a = start.add_incomplete(self.0).sub_incomplete(Self::zero().0); 73 | let sum_b = sum_a.add_incomplete(rhs.0).sub_incomplete(Self::zero().0); 74 | 75 | let mut result = sum_b; 76 | result.add_assign(SepticDigest::::zero().0); 77 | result.sub_assign(start); 78 | 79 | SepticDigest(result) 80 | } 81 | } 82 | 83 | impl Sum for SepticDigest { 84 | fn sum>(iter: I) -> Self { 85 | let start = SepticDigest::::starting_digest().0; 86 | 87 | // Computation order is start + (digest1 - offset) + (digest2 - offset) + ... + (digestN - 88 | // offset) + offset - start. 89 | let mut ret = iter.fold(start, |acc, x| { 90 | let sum_offset = acc.add_incomplete(x.0); 91 | sum_offset.sub_incomplete(SepticDigest::::zero().0) 92 | }); 93 | 94 | ret.add_assign(SepticDigest::::zero().0); 95 | ret.sub_assign(start); 96 | SepticDigest(ret) 97 | } 98 | } 99 | 100 | #[cfg(test)] 101 | mod test { 102 | use crate::septic_curve::{CURVE_WITNESS_DUMMY_POINT_X, CURVE_WITNESS_DUMMY_POINT_Y}; 103 | 104 | use super::*; 105 | use p3_baby_bear::BabyBear; 106 | #[test] 107 | fn test_const_points() { 108 | let x: SepticExtension = SepticExtension::from_base_fn(|i| { 109 | BabyBear::from_canonical_u32(CURVE_CUMULATIVE_SUM_START_X[i]) 110 | }); 111 | let y: SepticExtension = SepticExtension::from_base_fn(|i| { 112 | BabyBear::from_canonical_u32(CURVE_CUMULATIVE_SUM_START_Y[i]) 113 | }); 114 | let point = SepticCurve { x, y }; 115 | assert!(point.check_on_point()); 116 | let x: SepticExtension = 117 | SepticExtension::from_base_fn(|i| BabyBear::from_canonical_u32(DIGEST_SUM_START_X[i])); 118 | let y: SepticExtension = 119 | SepticExtension::from_base_fn(|i| BabyBear::from_canonical_u32(DIGEST_SUM_START_Y[i])); 120 | let point = SepticCurve { x, y }; 121 | assert!(point.check_on_point()); 122 | let x: SepticExtension = SepticExtension::from_base_fn(|i| { 123 | BabyBear::from_canonical_u32(CURVE_WITNESS_DUMMY_POINT_X[i]) 124 | }); 125 | let y: SepticExtension = SepticExtension::from_base_fn(|i| { 126 | BabyBear::from_canonical_u32(CURVE_WITNESS_DUMMY_POINT_Y[i]) 127 | }); 128 | let point = SepticCurve { x, y }; 129 | assert!(point.check_on_point()); 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /crates/stark/src/util.rs: -------------------------------------------------------------------------------- 1 | use hypercube_merkle_tree::my_bb_16_perm; 2 | use p3_field::Field; 3 | 4 | /// The digest size. 5 | pub const DIGEST_SIZE: usize = 8; 6 | 7 | /// An implementation of `batch_multiplicative_inverse` that operates in place. 8 | #[allow(dead_code)] 9 | pub fn batch_multiplicative_inverse_inplace(values: &mut [F]) { 10 | // Check if values are zero and construct a new vector with only nonzero values. 11 | let mut nonzero_values = Vec::with_capacity(values.len()); 12 | let mut indices = Vec::with_capacity(values.len()); 13 | for (i, value) in values.iter().copied().enumerate() { 14 | if value.is_zero() { 15 | continue; 16 | } 17 | nonzero_values.push(value); 18 | indices.push(i); 19 | } 20 | 21 | // Compute the multiplicative inverse of nonzero values. 22 | let inverse_nonzero_values = p3_field::batch_multiplicative_inverse(&nonzero_values); 23 | 24 | // Reconstruct the original vector. 25 | for (i, index) in indices.into_iter().enumerate() { 26 | values[index] = inverse_nonzero_values[i]; 27 | } 28 | } 29 | 30 | /// Compute the ceiling of the base-2 logarithm of a number. 31 | #[must_use] 32 | pub fn log2_ceil_usize(n: usize) -> usize { 33 | n.next_power_of_two().ilog2() as usize 34 | } 35 | 36 | /// Get the inner perm 37 | #[must_use] 38 | pub fn inner_perm() -> hypercube_merkle_tree::Perm { 39 | my_bb_16_perm() 40 | } 41 | -------------------------------------------------------------------------------- /crates/stark/src/verifier/config.rs: -------------------------------------------------------------------------------- 1 | use std::{borrow::Borrow, collections::BTreeMap}; 2 | 3 | use hypercube_jagged::JaggedConfig; 4 | use p3_baby_bear::BabyBear; 5 | use p3_challenger::CanObserve; 6 | use p3_field::AbstractField; 7 | use serde::{de::DeserializeOwned, Deserialize, Serialize}; 8 | use sp1_primitives::poseidon2_hash; 9 | 10 | use crate::{septic_digest::SepticDigest, DIGEST_SIZE}; 11 | 12 | /// A configuration for a machine. 13 | pub trait MachineConfig: 14 | JaggedConfig + 'static + Send + Sync + Serialize + DeserializeOwned 15 | { 16 | } 17 | 18 | impl MachineConfig for C where 19 | C: JaggedConfig + 'static + Send + Sync + Serialize + DeserializeOwned 20 | { 21 | } 22 | 23 | pub use hypercube_jagged::BabyBearPoseidon2; 24 | 25 | /// A specification of preprocessed polynomial batch dimensions. 26 | #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] 27 | pub struct ChipDimensions { 28 | /// The height of the preprocessed polynomial. 29 | pub height: usize, 30 | /// The number of polynomials in the preprocessed batch. 31 | pub num_polynomials: usize, 32 | } 33 | 34 | /// A verifying key. 35 | #[derive(Debug, Clone, Serialize, Deserialize)] 36 | pub struct MachineVerifyingKey { 37 | /// The start pc of the program. 38 | pub pc_start: C::F, 39 | /// The starting global digest of the program, after incorporating the initial memory. 40 | pub initial_global_cumulative_sum: SepticDigest, 41 | /// The preprocessed commitments. 42 | pub preprocessed_commit: Option, 43 | /// The dimensions of the preprocessed polynomials. 44 | pub preprocessed_chip_information: BTreeMap, 45 | } 46 | 47 | impl> MachineVerifyingKey 48 | where 49 | C::Commitment: Borrow<[BabyBear; DIGEST_SIZE]>, 50 | { 51 | /// Observes the values of the proving key into the challenger. 52 | pub fn observe_into(&self, challenger: &mut C::Challenger) { 53 | if let Some(preprocessed_commit) = self.preprocessed_commit.as_ref() { 54 | challenger.observe(preprocessed_commit.clone()); 55 | } 56 | challenger.observe(self.pc_start); 57 | challenger.observe_slice(&self.initial_global_cumulative_sum.0.x.0); 58 | challenger.observe_slice(&self.initial_global_cumulative_sum.0.y.0); 59 | // Observe the padding. 60 | challenger.observe(C::F::zero()); 61 | } 62 | 63 | /// Hash the verifying key, an array of `BabyBear` elements. 64 | pub fn hash_babybear(&self) -> [BabyBear; DIGEST_SIZE] { 65 | let num_inputs = DIGEST_SIZE + 1 + 14 + (4 * self.preprocessed_chip_information.len()); 66 | let mut inputs = Vec::with_capacity(num_inputs); 67 | inputs.extend( 68 | self.preprocessed_commit 69 | .as_ref() 70 | .map(Borrow::borrow) 71 | .map(IntoIterator::into_iter) 72 | .unwrap_or_default() 73 | .copied(), 74 | ); 75 | inputs.push(self.pc_start); 76 | inputs.extend(self.initial_global_cumulative_sum.0.x.0); 77 | inputs.extend(self.initial_global_cumulative_sum.0.y.0); 78 | for ChipDimensions { height, num_polynomials: _ } in 79 | self.preprocessed_chip_information.values() 80 | { 81 | inputs.push(BabyBear::from_canonical_usize(*height)); 82 | } 83 | 84 | poseidon2_hash(inputs) 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /crates/stark/src/verifier/mod.rs: -------------------------------------------------------------------------------- 1 | mod config; 2 | mod proof; 3 | mod shard; 4 | 5 | pub use config::*; 6 | pub use proof::*; 7 | pub use shard::*; 8 | -------------------------------------------------------------------------------- /crates/stark/src/verifier/proof.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeSet; 2 | 3 | use hypercube_jagged::JaggedPcsProof; 4 | use hypercube_multilinear::Point; 5 | use hypercube_sumcheck::PartialSumcheckProof; 6 | use p3_matrix::{dense::RowMajorMatrixView, stack::VerticalPair}; 7 | use serde::{Deserialize, Serialize}; 8 | 9 | use crate::LogupGkrProof; 10 | 11 | use super::MachineConfig; 12 | 13 | /// The maximum number of elements that can be stored in the public values vec. Both SP1 and 14 | /// recursive proofs need to pad their public values vec to this length. This is required since the 15 | /// recursion verification program expects the public values vec to be fixed length. 16 | pub const PROOF_MAX_NUM_PVS: usize = 110; 17 | 18 | /// Data required for testing. 19 | #[derive(Clone, Serialize, Deserialize)] 20 | #[serde(bound( 21 | serialize = "C: MachineConfig, C::Challenger: Serialize", 22 | deserialize = "C: MachineConfig, C::Challenger: Deserialize<'de>" 23 | ))] 24 | // #[cfg(any(test, feature = "test-proof"))] 25 | pub struct TestingData { 26 | /// The gkr points. 27 | pub gkr_points: Vec>, 28 | /// The challenger state just before the zerocheck. 29 | pub challenger_state: C::Challenger, 30 | } 31 | 32 | /// A proof for a shard. 33 | #[derive(Clone, Serialize, Deserialize)] 34 | #[serde(bound( 35 | serialize = "C: MachineConfig, C::Challenger: Serialize", 36 | deserialize = "C: MachineConfig, C::Challenger: Deserialize<'de>" 37 | ))] 38 | pub struct ShardProof { 39 | /// The public values 40 | pub public_values: Vec, 41 | /// The commitments to main traces. 42 | pub main_commitment: C::Commitment, 43 | /// The Logup GKR IOP proof. 44 | pub logup_gkr_proof: LogupGkrProof, 45 | /// TH zerocheck IOP proof. 46 | pub zerocheck_proof: PartialSumcheckProof, 47 | /// The values of the traces at the final random point. 48 | pub opened_values: ShardOpenedValues, 49 | /// The evaluation proof. 50 | pub evaluation_proof: JaggedPcsProof, 51 | /// The chips participating in the shard. 52 | pub shard_chips: BTreeSet, 53 | } 54 | 55 | /// The values of the chips in the shard at a random point. 56 | #[derive(Debug, Clone, Serialize, Deserialize)] 57 | pub struct ShardOpenedValues { 58 | /// For each chip with respect to the canonical ordering, the values of the chip at the random 59 | /// point. 60 | pub chips: Vec>, 61 | } 62 | 63 | /// The opening values for a given chip at a random point. 64 | #[derive(Debug, Clone, Serialize, Deserialize)] 65 | #[serde(bound(serialize = "F: Serialize, EF: Serialize"))] 66 | #[serde(bound(deserialize = "F: Deserialize<'de>, EF: Deserialize<'de>"))] 67 | pub struct ChipOpenedValues { 68 | /// The opening of the preprocessed trace. 69 | pub preprocessed: AirOpenedValues, 70 | /// The opening of the main trace. 71 | pub main: AirOpenedValues, 72 | /// The local cumulative sum. 73 | pub local_cumulative_sum: EF, 74 | /// The big-endian bit representation of the degree of the chip. 75 | pub degree: Point, 76 | } 77 | 78 | /// The opening values for a given table section at a random point. 79 | #[derive(Debug, Clone, Serialize, Deserialize)] 80 | #[serde(bound(serialize = "T: Serialize"))] 81 | #[serde(bound(deserialize = "T: Deserialize<'de>"))] 82 | pub struct AirOpenedValues { 83 | /// The opening of the local trace 84 | pub local: Vec, 85 | /// The opening of the next trace. 86 | pub next: Vec, 87 | } 88 | 89 | impl AirOpenedValues { 90 | /// Organize the opening values into a vertical pair. 91 | #[must_use] 92 | pub fn view(&self) -> VerticalPair, RowMajorMatrixView<'_, T>> 93 | where 94 | T: Clone + Send + Sync, 95 | { 96 | let a = RowMajorMatrixView::new_row(&self.local); 97 | let b = RowMajorMatrixView::new_row(&self.next); 98 | VerticalPair::new(a, b) 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /crates/stark/src/word.rs: -------------------------------------------------------------------------------- 1 | use std::ops::{Index, IndexMut}; 2 | 3 | use crate::air::SP1AirBuilder; 4 | use arrayref::array_ref; 5 | use itertools::Itertools; 6 | use p3_air::AirBuilder; 7 | use p3_field::{AbstractField, Field}; 8 | use serde::{Deserialize, Serialize}; 9 | use sp1_derive::AlignedBorrow; 10 | use sp1_primitives::consts::WORD_SIZE; 11 | use std::array::IntoIter; 12 | 13 | /// An array of four bytes to represent a 32-bit value. 14 | /// 15 | /// We use the generic type `T` to represent the different representations of a byte, ranging from 16 | /// a `u8` to a `AB::Var` or `AB::Expr`. 17 | #[derive( 18 | AlignedBorrow, Clone, Copy, Debug, Default, PartialEq, Eq, Hash, Serialize, Deserialize, 19 | )] 20 | #[repr(C)] 21 | pub struct Word(pub [T; WORD_SIZE]); 22 | 23 | impl Word { 24 | /// Applies `f` to each element of the word. 25 | pub fn map(self, f: F) -> Word 26 | where 27 | F: FnMut(T) -> S, 28 | { 29 | Word(self.0.map(f)) 30 | } 31 | 32 | /// Extends a variable to a word. 33 | pub fn extend_var>(var: T) -> Word { 34 | Word([AB::Expr::zero() + var, AB::Expr::zero()]) 35 | } 36 | } 37 | 38 | impl Word { 39 | /// Extends a variable to a word. 40 | pub fn extend_expr>(expr: T) -> Word { 41 | Word([AB::Expr::zero() + expr, AB::Expr::zero()]) 42 | } 43 | 44 | /// Returns a word with all zero expressions. 45 | #[must_use] 46 | pub fn zero>() -> Word { 47 | Word([AB::Expr::zero(), AB::Expr::zero()]) 48 | } 49 | } 50 | 51 | impl Word { 52 | /// Converts a word to a u32. 53 | pub fn to_u32(&self) -> u32 { 54 | let low = self.0[0].to_string().parse::().unwrap(); 55 | let high = self.0[1].to_string().parse::().unwrap(); 56 | ((high as u32) << 16) | (low as u32) 57 | } 58 | } 59 | 60 | impl Word { 61 | /// Reduces a word to a single variable. 62 | pub fn reduce>(&self) -> AB::Expr { 63 | let base = [1, 1 << 16].map(AB::Expr::from_canonical_u32); 64 | self.0.iter().enumerate().map(|(i, x)| base[i].clone() * *x).sum() 65 | } 66 | } 67 | 68 | impl Index for Word { 69 | type Output = T; 70 | 71 | fn index(&self, index: usize) -> &Self::Output { 72 | &self.0[index] 73 | } 74 | } 75 | 76 | impl IndexMut for Word { 77 | fn index_mut(&mut self, index: usize) -> &mut Self::Output { 78 | &mut self.0[index] 79 | } 80 | } 81 | 82 | impl From for Word { 83 | fn from(value: u32) -> Self { 84 | Word([ 85 | F::from_canonical_u16((value & 0xFFFF) as u16), 86 | F::from_canonical_u16((value >> 16) as u16), 87 | ]) 88 | } 89 | } 90 | 91 | impl IntoIterator for Word { 92 | type Item = T; 93 | type IntoIter = IntoIter; 94 | 95 | fn into_iter(self) -> Self::IntoIter { 96 | self.0.into_iter() 97 | } 98 | } 99 | 100 | impl FromIterator for Word { 101 | fn from_iter>(iter: I) -> Self { 102 | let elements = iter.into_iter().take(WORD_SIZE).collect_vec(); 103 | 104 | Word(array_ref![elements, 0, WORD_SIZE].clone()) 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /crates/sumcheck/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hypercube-sumcheck" 3 | version.workspace = true 4 | edition.workspace = true 5 | 6 | repository.workspace = true 7 | keywords.workspace = true 8 | categories.workspace = true 9 | 10 | [dependencies] 11 | p3-challenger = { workspace = true } 12 | 13 | hypercube-algebra = { workspace = true } 14 | p3-field = { workspace = true } 15 | hypercube-multilinear = { workspace = true } 16 | serde = { workspace = true } 17 | thiserror = { workspace = true } 18 | 19 | 20 | -------------------------------------------------------------------------------- /crates/sumcheck/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod proof; 2 | mod verifier; 3 | 4 | pub use proof::*; 5 | pub use verifier::*; 6 | -------------------------------------------------------------------------------- /crates/sumcheck/src/proof.rs: -------------------------------------------------------------------------------- 1 | use hypercube_algebra::UnivariatePolynomial; 2 | use hypercube_multilinear::Point; 3 | use serde::{Deserialize, Serialize}; 4 | 5 | /// A sumcheck proof that does not include the evaluation proofs. 6 | /// 7 | /// Verifying a partial sumcheck proof is equivalent to verifying the sumcheck claim on the 8 | /// condition of having evaluation proofs for the given componment polynomials at the given points. 9 | #[derive(Serialize, Deserialize, Debug, Clone)] 10 | pub struct PartialSumcheckProof { 11 | pub univariate_polys: Vec>, 12 | pub claimed_sum: K, 13 | pub point_and_eval: (Point, K), 14 | } 15 | -------------------------------------------------------------------------------- /crates/sumcheck/src/verifier.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error; 2 | 3 | use hypercube_multilinear::Point; 4 | use p3_challenger::FieldChallenger; 5 | use p3_field::{ExtensionField, Field}; 6 | 7 | use crate::PartialSumcheckProof; 8 | 9 | #[derive(Debug, Eq, PartialEq, Error)] 10 | pub enum SumcheckError { 11 | #[error("invalid proof shape")] 12 | InvalidProofShape, 13 | #[error("sumcheck round inconsistency")] 14 | SumcheckRoundInconsistency, 15 | #[error("inconsistency of prover message with claimed sum")] 16 | InconsistencyWithClaimedSum, 17 | #[error("inconsistency of proof with evaluation claim")] 18 | InconsistencyWithEval, 19 | } 20 | 21 | /// Verifies that a PartialSumcheckProof is correct up until the evaluation claim. 22 | pub fn partially_verify_sumcheck_proof< 23 | F: Field, 24 | EF: ExtensionField, 25 | Challenger: FieldChallenger, 26 | >( 27 | proof: &PartialSumcheckProof, 28 | challenger: &mut Challenger, 29 | ) -> Result<(), SumcheckError> { 30 | let num_variables = proof.univariate_polys.len(); 31 | let mut alpha_point = Point::default(); 32 | 33 | // Checks for the correct proof shape. 34 | if num_variables != proof.point_and_eval.0.dimension() { 35 | return Err(SumcheckError::InvalidProofShape); 36 | } 37 | 38 | // There is a way to structure a sumcheck proof so that this check is not needed, but it doesn't 39 | // actually save the verifier work. 40 | let first_poly = &proof.univariate_polys[0]; 41 | if first_poly.eval_one_plus_eval_zero() != proof.claimed_sum { 42 | return Err(SumcheckError::InconsistencyWithClaimedSum); 43 | } 44 | 45 | challenger.observe_slice( 46 | &first_poly 47 | .coefficients 48 | .iter() 49 | .flat_map(|x| x.as_base_slice()) 50 | .copied() 51 | .collect::>(), 52 | ); 53 | let mut previous_poly = first_poly; 54 | 55 | for poly in proof.univariate_polys.iter().skip(1) { 56 | let alpha = challenger.sample_ext_element(); 57 | alpha_point.add_dimension(alpha); 58 | let expected_eval = previous_poly.eval_at_point(alpha); 59 | if expected_eval != poly.eval_one_plus_eval_zero() { 60 | return Err(SumcheckError::SumcheckRoundInconsistency); 61 | } 62 | challenger.observe_slice( 63 | &poly.coefficients.iter().flat_map(|x| x.as_base_slice()).copied().collect::>(), 64 | ); 65 | previous_poly = poly; 66 | } 67 | 68 | let alpha = challenger.sample_ext_element(); 69 | alpha_point.add_dimension(alpha); 70 | 71 | // Check that the randomness generated for the prover is the same as the one obtained by the 72 | // verifier. There is a way to structure a sumcheck proof so that this check is not needed, 73 | // but it doesn't actually save the verifier work. 74 | if alpha_point != proof.point_and_eval.0 { 75 | return Err(SumcheckError::InvalidProofShape); 76 | } 77 | 78 | // Check that the evaluation claim implied by the last univariate polynomial matches the 79 | // evaluation claim in the proof struct. 80 | // There is a way to structure a sumcheck proof so that this check is not needed, but it doesn't 81 | // actually save the verifier work. 82 | if previous_poly.eval_at_point(alpha) != proof.point_and_eval.1 { 83 | return Err(SumcheckError::InconsistencyWithEval); 84 | } 85 | 86 | Ok(()) 87 | } 88 | -------------------------------------------------------------------------------- /crates/tensor/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hypercube-tensor" 3 | version.workspace = true 4 | edition.workspace = true 5 | 6 | repository.workspace = true 7 | keywords.workspace = true 8 | categories.workspace = true 9 | 10 | [dependencies] 11 | hypercube-alloc = { workspace = true } 12 | p3-matrix = { workspace = true } 13 | arrayvec = "0.7.6" 14 | rand = { workspace = true } 15 | serde = { workspace = true } 16 | thiserror = { workspace = true } 17 | itertools = { workspace = true } 18 | derive-where = { workspace = true } 19 | 20 | [dev-dependencies] 21 | 22 | [lints] 23 | workspace = true 24 | -------------------------------------------------------------------------------- /crates/tensor/src/dimensions.rs: -------------------------------------------------------------------------------- 1 | use arrayvec::ArrayVec; 2 | use itertools::Itertools; 3 | use serde::{Deserialize, Deserializer, Serialize, Serializer}; 4 | use thiserror::Error; 5 | 6 | const MAX_DIMENSIONS: usize = 3; 7 | 8 | #[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] 9 | #[repr(C)] 10 | pub struct Dimensions { 11 | sizes: ArrayVec, 12 | strides: ArrayVec, 13 | } 14 | 15 | #[derive(Debug, Clone, Copy, Error)] 16 | pub enum DimensionsError { 17 | #[error("Too many dimensions {0}, maximum number allowed is {MAX_DIMENSIONS}")] 18 | TooManyDimensions(usize), 19 | #[error("total number of elements must match, expected {0}, got {1}")] 20 | NumElementsMismatch(usize, usize), 21 | } 22 | 23 | impl Dimensions { 24 | fn new(sizes: ArrayVec) -> Self { 25 | let mut strides = ArrayVec::new(); 26 | let mut stride = 1; 27 | for size in sizes.iter().rev() { 28 | strides.push(stride); 29 | stride *= size; 30 | } 31 | strides.reverse(); 32 | Self { sizes, strides } 33 | } 34 | 35 | #[inline] 36 | pub fn total_len(&self) -> usize { 37 | self.sizes.iter().product() 38 | } 39 | 40 | #[inline] 41 | pub(crate) fn compatible(&self, other: &Dimensions) -> Result<(), DimensionsError> { 42 | if self.total_len() != other.total_len() { 43 | return Err(DimensionsError::NumElementsMismatch(self.total_len(), other.total_len())); 44 | } 45 | Ok(()) 46 | } 47 | 48 | #[inline] 49 | pub fn sizes(&self) -> &[usize] { 50 | &self.sizes 51 | } 52 | 53 | pub(crate) fn sizes_mut(&mut self) -> &mut ArrayVec { 54 | &mut self.sizes 55 | } 56 | 57 | pub(crate) fn strides_mut(&mut self) -> &mut ArrayVec { 58 | &mut self.strides 59 | } 60 | 61 | #[inline] 62 | pub fn strides(&self) -> &[usize] { 63 | &self.strides 64 | } 65 | 66 | #[inline] 67 | pub(crate) fn index_map(&self, index: impl AsRef<[usize]>) -> usize { 68 | index.as_ref().iter().zip_eq(self.strides.iter()).map(|(i, s)| i * s).sum() 69 | } 70 | } 71 | 72 | impl TryFrom<&[usize]> for Dimensions { 73 | type Error = DimensionsError; 74 | 75 | fn try_from(value: &[usize]) -> Result { 76 | let sizes = ArrayVec::try_from(value) 77 | .map_err(|_| DimensionsError::TooManyDimensions(value.len()))?; 78 | Ok(Self::new(sizes)) 79 | } 80 | } 81 | 82 | impl TryFrom> for Dimensions { 83 | type Error = DimensionsError; 84 | 85 | fn try_from(value: Vec) -> Result { 86 | let sizes = ArrayVec::try_from(value.as_slice()) 87 | .map_err(|_| DimensionsError::TooManyDimensions(value.len()))?; 88 | Ok(Self::new(sizes)) 89 | } 90 | } 91 | 92 | impl TryFrom<[usize; N]> for Dimensions { 93 | type Error = DimensionsError; 94 | 95 | fn try_from(value: [usize; N]) -> Result { 96 | let sizes = ArrayVec::try_from(value.as_slice()) 97 | .map_err(|_| DimensionsError::TooManyDimensions(value.len()))?; 98 | Ok(Self::new(sizes)) 99 | } 100 | } 101 | 102 | impl FromIterator for Dimensions { 103 | #[inline] 104 | fn from_iter>(iter: T) -> Self { 105 | let sizes = ArrayVec::from_iter(iter); 106 | Self::new(sizes) 107 | } 108 | } 109 | 110 | impl Serialize for Dimensions { 111 | fn serialize(&self, serializer: S) -> Result { 112 | self.sizes.serialize(serializer) 113 | } 114 | } 115 | 116 | impl<'de> Deserialize<'de> for Dimensions { 117 | fn deserialize>(deserializer: D) -> Result { 118 | let sizes = Vec::deserialize(deserializer)?; 119 | Ok(Self::try_from(sizes).expect("invalid dimension length")) 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /crates/tensor/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod dimensions; 2 | mod inner; 3 | 4 | pub use dimensions::*; 5 | pub use inner::*; 6 | -------------------------------------------------------------------------------- /crates/utils/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hypercube-utils" 3 | serde = "1.0" 4 | version.workspace = true 5 | edition.workspace = true 6 | 7 | repository.workspace = true 8 | keywords.workspace = true 9 | categories.workspace = true 10 | 11 | [dependencies] 12 | tracing-forest = { version = "0.1.6", features = ["ansi", "smallvec"] } 13 | tracing-subscriber = { version = "0.3.18", features = ["std", "env-filter"] } 14 | p3-util = {workspace = true} 15 | -------------------------------------------------------------------------------- /crates/utils/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod logger; 2 | 3 | pub use logger::setup_logger; 4 | 5 | pub use p3_util::log2_ceil_usize; 6 | pub use p3_util::log2_strict_usize; 7 | pub use p3_util::reverse_bits_len; 8 | 9 | pub const fn indices_arr() -> [usize; N] { 10 | let mut indices_arr = [0; N]; 11 | let mut i = 0; 12 | while i < N { 13 | indices_arr[i] = i; 14 | i += 1; 15 | } 16 | indices_arr 17 | } 18 | 19 | /// Returns the internal value of the option if it is set, otherwise returns the next multiple of 20 | /// 32. 21 | #[track_caller] 22 | #[inline] 23 | pub fn next_multiple_of_32(n: usize, fixed_height: Option) -> usize { 24 | match fixed_height { 25 | Some(height) => { 26 | if n > height { 27 | panic!("fixed height is too small: got height {} for number of rows {}", height, n); 28 | } 29 | height 30 | } 31 | None => { 32 | let mut padded_nb_rows = n.next_multiple_of(32); 33 | if padded_nb_rows < 16 { 34 | padded_nb_rows = 16; 35 | } 36 | padded_nb_rows 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /crates/utils/src/logger.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Once; 2 | 3 | use tracing_forest::ForestLayer; 4 | use tracing_subscriber::{ 5 | fmt::format::FmtSpan, layer::SubscriberExt, util::SubscriberInitExt, EnvFilter, Registry, 6 | }; 7 | 8 | static INIT: Once = Once::new(); 9 | 10 | /// A simple logger. 11 | /// 12 | /// Set the `RUST_LOG` environment variable to be set to `info` or `debug`. 13 | pub fn setup_logger() { 14 | INIT.call_once(|| { 15 | let default_filter = "off"; 16 | let env_filter = EnvFilter::try_from_default_env() 17 | .unwrap_or_else(|_| EnvFilter::new(default_filter)) 18 | .add_directive("hyper=off".parse().unwrap()) 19 | .add_directive("p3_keccak_air=off".parse().unwrap()) 20 | .add_directive("p3_fri=off".parse().unwrap()) 21 | .add_directive("p3_dft=off".parse().unwrap()) 22 | .add_directive("p3_challenger=off".parse().unwrap()); 23 | 24 | // if the RUST_LOGGER environment variable is set, use it to determine which logger to 25 | // configure (tracing_forest or tracing_subscriber) 26 | // otherwise, default to 'forest' 27 | let logger_type = std::env::var("RUST_LOGGER").unwrap_or_else(|_| "flat".to_string()); 28 | match logger_type.as_str() { 29 | "forest" => { 30 | Registry::default().with(env_filter).with(ForestLayer::default()).init(); 31 | } 32 | "flat" => { 33 | tracing_subscriber::fmt::Subscriber::builder() 34 | .compact() 35 | .with_file(false) 36 | .with_target(false) 37 | .with_thread_names(false) 38 | .with_env_filter(env_filter) 39 | .with_span_events(FmtSpan::CLOSE) 40 | .finish() 41 | .init(); 42 | } 43 | _ => { 44 | panic!("Invalid logger type: {}", logger_type); 45 | } 46 | } 47 | }); 48 | } 49 | -------------------------------------------------------------------------------- /jagged-polynomial-commitments.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/succinctlabs/hypercube-verifier/fc1d1a3a98a6241afeb27fda4b703a691baf180a/jagged-polynomial-commitments.pdf -------------------------------------------------------------------------------- /message.bin: -------------------------------------------------------------------------------- 1 | | 2 | _______ _______ __ _______ _______ _______ _______ ______ _______ 3 | ( ____ \( ____ )/ \ |\ /||\ /|( ____ )( ____ \( ____ )( ____ \|\ /|( ___ \ ( ____ \ 4 | | ( \/| ( )|\/) ) | ) ( |( \ / )| ( )|| ( \/| ( )|| ( \/| ) ( || ( ) )| ( \/ 5 | | (_____ | (____)| | | | (___) | \ (_) / | (____)|| (__ | (____)|| | | | | || (__/ / | (__ 6 | (_____ )| _____) | | | ___ | \ / | _____)| __) | __)| | | | | || __ ( | __) 7 | ) || ( | | | ( ) | ) ( | ( | ( | (\ ( | | | | | || ( \ \ | ( 8 | /\____) || ) __) (_ | ) ( | | | | ) | (____/\| ) \ \__| (____/\| (___) || )___) )| (____/\ 9 | \_______)|/ \____/ |/ \| \_/ |/ (_______/|/ \__/(_______/(_______)|/ \___/ (_______/ 10 | 11 | 12 | 13 | .+------+ +------+ +------+ +------+ +------+. 14 | .' | .'| /| /| | | |\ |\ |`. | `. 15 | +---+--+' | +-+----+ | +------+ | +----+-+ | `+--+---+ 16 | | | | | | | | | | | | | | | | | | | 17 | | ,+--+---+ | +----+-+ +------+ +-+----+ | +---+--+ | 18 | |.' | .' |/ |/ | | \| \| `. | `. | 19 | +------+' +------+ +------+ +------+ `+------+ 20 | 21 | 22 | 23 | _______ _______ _______ _______ _______ _______ _______ _________ _______ _________ _______ ______ 24 | ( ____ )( ____ )( ___ )( ___ )( ____ \ |\ /|( ____ \( ____ )\__ __/( ____ \\__ __/( ____ \( __ \ 25 | | ( )|| ( )|| ( ) || ( ) || ( \/ | ) ( || ( \/| ( )| ) ( | ( \/ ) ( | ( \/| ( \ ) 26 | | (____)|| (____)|| | | || | | || (__ | | | || (__ | (____)| | | | (__ | | | (__ | | ) | 27 | | _____)| __)| | | || | | || __) ( ( ) )| __) | __) | | | __) | | | __) | | | | 28 | | ( | (\ ( | | | || | | || ( \ \_/ / | ( | (\ ( | | | ( | | | ( | | ) | 29 | | ) | ) \ \__| (___) || (___) || ) \ / | (____/\| ) \ \_____) (___| ) ___) (___| (____/\| (__/ ) 30 | |/ |/ \__/(_______)(_______)|/ \_/ (_______/|/ \__/\_______/|/ \_______/(_______/(______/ -------------------------------------------------------------------------------- /proof.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/succinctlabs/hypercube-verifier/fc1d1a3a98a6241afeb27fda4b703a691baf180a/proof.bin -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.82.0" 3 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | reorder_imports = true 2 | use_small_heuristics = "Max" 3 | use_field_init_shorthand = true 4 | -------------------------------------------------------------------------------- /vk.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/succinctlabs/hypercube-verifier/fc1d1a3a98a6241afeb27fda4b703a691baf180a/vk.bin --------------------------------------------------------------------------------