├── .gitignore ├── .cargo └── config.toml ├── tests ├── spec │ ├── mod.rs │ ├── random.rs │ ├── rng.rs │ ├── bls12_381.rs │ ├── custom_schnorr_protocol.rs │ └── vectors │ │ ├── testSigmaProtocols.json │ │ └── duplexSpongeVectors.json ├── spec_duplex_sponge.rs ├── test_relations.rs ├── test_composition.rs ├── spec_vectors.rs ├── test_validation_criteria.rs └── relations │ └── mod.rs ├── CHANGELOG.md ├── rust-toolchain.toml ├── src ├── group │ ├── mod.rs │ ├── serialization.rs │ └── msm.rs ├── duplex_sponge │ ├── shake.rs │ ├── mod.rs │ └── keccak.rs ├── errors.rs ├── lib.rs ├── linear_relation │ ├── convert.rs │ ├── canonical.rs │ └── mod.rs ├── codec.rs ├── traits.rs ├── fiat_shamir.rs └── schnorr_protocol.rs ├── .github └── workflows │ ├── typos.yml │ ├── security.yml │ ├── test-vectors.yml │ ├── docs.yml │ ├── lint-fmt.yml │ └── rust.yml ├── LICENSE ├── examples ├── schnorr.rs └── simple_composition.rs ├── Cargo.toml ├── README.md └── benches └── msm.rs /.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | **/*.rs.bk 3 | Cargo.lock 4 | *.DS_Store -------------------------------------------------------------------------------- /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [target.wasm32-wasip1] 2 | runner = "wasmtime" 3 | -------------------------------------------------------------------------------- /tests/spec/mod.rs: -------------------------------------------------------------------------------- 1 | mod bls12_381; 2 | pub(crate) mod custom_schnorr_protocol; 3 | mod random; 4 | pub(crate) mod rng; 5 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | Entries are listed in reverse chronological order. 4 | 5 | 6 | ## 0.1 7 | 8 | Initial fork from `lox-zkp` and compatibility with the sigma protocol IETF draft. -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.91" 3 | components = ["rustfmt", "clippy"] 4 | # Add the wasm32-wasip1 target, used for running tests. 5 | targets = ["wasm32-wasip1"] 6 | profile = "minimal" 7 | 8 | -------------------------------------------------------------------------------- /src/group/mod.rs: -------------------------------------------------------------------------------- 1 | /// Implementation of multi-scalar multiplication (MSM) over scalars and points. 2 | pub mod msm; 3 | 4 | /// Implementation of batch serialization functions for scalars and points. 5 | pub mod serialization; 6 | -------------------------------------------------------------------------------- /.github/workflows/typos.yml: -------------------------------------------------------------------------------- 1 | 2 | name: Spell checker 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | typos: 7 | name: Spell Check with typos 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Checkout Actions Repository 11 | uses: actions/checkout@v6 12 | - name: Check spelling 13 | uses: crate-ci/typos@master 14 | -------------------------------------------------------------------------------- /.github/workflows/security.yml: -------------------------------------------------------------------------------- 1 | name: Security audit 2 | on: 3 | schedule: 4 | - cron: '0 0 * * 1' 5 | pull_request: 6 | branches: 7 | - main 8 | jobs: 9 | audit: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v6 13 | - uses: actions-rs/audit-check@v1 14 | with: 15 | token: ${{ secrets.GITHUB_TOKEN }} 16 | -------------------------------------------------------------------------------- /tests/spec/random.rs: -------------------------------------------------------------------------------- 1 | use group::{prime::PrimeGroup, Group}; 2 | use num_bigint::BigUint; 3 | use rand::{CryptoRng, Rng}; 4 | 5 | pub trait SInput: PrimeGroup { 6 | fn scalar_from_hex_be(hex_str: &str) -> Option; 7 | } 8 | 9 | pub trait SRandom: Group { 10 | fn randint_big(l: &BigUint, h: &BigUint, rng: &mut (impl Rng + CryptoRng)) -> BigUint; 11 | 12 | fn random_scalar_elt(rng: &mut (impl Rng + CryptoRng)) -> Self::Scalar; 13 | } 14 | -------------------------------------------------------------------------------- /src/duplex_sponge/shake.rs: -------------------------------------------------------------------------------- 1 | //! SHAKE-based duplex sponge implementation 2 | //! 3 | //! This module implements a duplex sponge construction using SHAKE128. 4 | 5 | use crate::duplex_sponge::DuplexSpongeInterface; 6 | use alloc::vec; 7 | use alloc::vec::Vec; 8 | use sha3::digest::{ExtendableOutput, Update}; 9 | use sha3::Shake128; 10 | 11 | /// Duplex sponge construction using SHAKE128. 12 | #[derive(Clone, Debug)] 13 | pub struct ShakeDuplexSponge(Shake128); 14 | 15 | impl DuplexSpongeInterface for ShakeDuplexSponge { 16 | fn new(iv: [u8; 64]) -> Self { 17 | let mut hasher = Shake128::default(); 18 | let initial_block = [iv.to_vec(), vec![0u8; 168 - 64]].concat(); 19 | hasher.update(&initial_block); 20 | Self(hasher) 21 | } 22 | 23 | fn absorb(&mut self, input: &[u8]) { 24 | self.0.update(input); 25 | } 26 | 27 | fn squeeze(&mut self, length: usize) -> Vec { 28 | let mut output = vec![0u8; length]; 29 | self.0.clone().finalize_xof_into(&mut output); 30 | output 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /.github/workflows/test-vectors.yml: -------------------------------------------------------------------------------- 1 | name: Verify Test Vectors 2 | 3 | on: 4 | push: 5 | paths: 6 | - 'src/tests/spec/vectors/*.json' 7 | pull_request: 8 | paths: 9 | - 'src/tests/spec/vectors/*.json' 10 | workflow_dispatch: 11 | schedule: 12 | - cron: '0 0 * * 0' 13 | 14 | jobs: 15 | verify-checksums: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - uses: actions/checkout@v6 19 | - run: | 20 | curl -sL https://raw.githubusercontent.com/mmaker/draft-irtf-cfrg-sigma-protocols/main/poc/vectors/duplexSpongeVectors.json -o /tmp/duplexSpongeVectors.json 21 | [ "$(sha256sum src/tests/spec/vectors/duplexSpongeVectors.json | cut -d' ' -f1)" = "$(sha256sum /tmp/duplexSpongeVectors.json | cut -d' ' -f1)" ] || exit 1 22 | curl -sL https://raw.githubusercontent.com/mmaker/draft-irtf-cfrg-sigma-protocols/main/poc/vectors/testSigmaProtocols.json -o /tmp/testSigmaProtocols.json 23 | [ "$(sha256sum src/tests/spec/vectors/testSigmaProtocols.json | cut -d' ' -f1)" = "$(sha256sum /tmp/testSigmaProtocols.json | cut -d' ' -f1)" ] || exit 1 24 | -------------------------------------------------------------------------------- /src/duplex_sponge/mod.rs: -------------------------------------------------------------------------------- 1 | //! Duplex Sponge Interface 2 | //! 3 | //! This module defines the [`DuplexSpongeInterface`] trait, which provides 4 | //! a generic interface for cryptographic sponge functions that support 5 | //! duplex operations: alternating absorb and squeeze phases. 6 | 7 | use alloc::vec::Vec; 8 | 9 | pub mod keccak; 10 | pub mod shake; 11 | 12 | /// A trait defining the behavior of a duplex sponge construction. 13 | /// 14 | /// A duplex sponge allows for: 15 | /// - **Absorbing** input data into the sponge state 16 | /// - **Squeezing** output data from the sponge state 17 | /// 18 | /// This is the core primitive used for building cryptographic codecs. 19 | pub trait DuplexSpongeInterface { 20 | /// Creates a new sponge instance with a given initialization vector (IV). 21 | /// 22 | /// The IV enables domain separation and reproducibility between parties. 23 | fn new(iv: [u8; 64]) -> Self 24 | where 25 | Self: Sized; 26 | 27 | /// Absorbs input data into the sponge state. 28 | fn absorb(&mut self, input: &[u8]); 29 | 30 | /// Squeezes output data from the sponge state. 31 | fn squeeze(&mut self, length: usize) -> Vec; 32 | } 33 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 2-Clause License 2 | 3 | Copyright (c) 2025, sigma-rs authors 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | 1. Redistributions of source code must retain the above copyright notice, this 9 | list of conditions and the following disclaimer. 10 | 11 | 2. Redistributions in binary form must reproduce the above copyright notice, 12 | this list of conditions and the following disclaimer in the documentation 13 | and/or other materials provided with the distribution. 14 | 15 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 19 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 22 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 23 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 24 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | name: Deploy rust documentation to Pages 2 | 3 | on: 4 | # Runs on pushes targeting the default branch 5 | push: 6 | branches: ["main"] 7 | 8 | # Allows you to run this workflow manually from the Actions tab 9 | workflow_dispatch: 10 | 11 | # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages 12 | permissions: 13 | contents: read 14 | pages: write 15 | id-token: write 16 | 17 | # Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. 18 | # However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. 19 | concurrency: 20 | group: "pages" 21 | cancel-in-progress: false 22 | 23 | jobs: 24 | # Single deploy job since we're just deploying 25 | deploy: 26 | environment: 27 | name: github-pages 28 | url: ${{ steps.deployment.outputs.page_url }} 29 | runs-on: ubuntu-latest 30 | steps: 31 | - name: git clone 32 | uses: actions/checkout@v6 33 | - name: install toolchain 34 | uses: dtolnay/rust-toolchain@nightly 35 | - name: cargo doc 36 | run: cargo doc --all-features 37 | - name: page configuration 38 | uses: actions/configure-pages@v3 39 | - name: Upload artifact 40 | uses: actions/upload-pages-artifact@v3 41 | with: 42 | # Upload entire repository 43 | path: 'target/doc' 44 | - name: Deploy to GitHub Pages 45 | id: deployment 46 | uses: actions/deploy-pages@v4 47 | -------------------------------------------------------------------------------- /.github/workflows/lint-fmt.yml: -------------------------------------------------------------------------------- 1 | 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - '**.rs' 7 | - 'Cargo.toml' 8 | - 'Cargo.lock' 9 | 10 | name: Linter and Formatter 11 | 12 | jobs: 13 | format: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Checkout code 17 | uses: actions/checkout@v6 18 | with: 19 | token: ${{ secrets.GITHUB_TOKEN }} 20 | 21 | - name: Run cargo fmt 22 | run: cargo fmt --all -- --check 23 | 24 | - name: Check for formatting differences 25 | id: fmt-check 26 | run: | 27 | if ! cargo fmt --all -- --check; then 28 | echo "Formatting differences found" 29 | cargo fmt --all 30 | echo "needs_commit=true" >> $GITHUB_OUTPUT 31 | else 32 | echo "No formatting differences" 33 | echo "needs_commit=false" >> $GITHUB_OUTPUT 34 | fi 35 | 36 | - name: Commit and push formatting changes 37 | if: steps.fmt-check.outputs.needs_commit == 'true' 38 | run: | 39 | git config --local user.name 'github-actions[bot]' 40 | git config --local user.email 'github-actions[bot]@users.noreply.github.com' 41 | git add . 42 | git commit -m "Apply rustfmt changes" 43 | git push 44 | 45 | clippy: 46 | name: Clippy 47 | env: 48 | CARGO_INCREMENTAL: 0 49 | CARGO_TERM_COLOR: always 50 | runs-on: ubuntu-latest 51 | steps: 52 | - uses: actions/checkout@v6 53 | - run: cargo clippy --all-targets -- -D warnings 54 | -------------------------------------------------------------------------------- /tests/spec/rng.rs: -------------------------------------------------------------------------------- 1 | use rand::{CryptoRng, Error, RngCore}; 2 | use sha2::{Digest, Sha256}; 3 | 4 | pub struct TestDRNG { 5 | seed: [u8; 32], 6 | } 7 | 8 | impl TestDRNG { 9 | pub fn new(seed: &[u8]) -> Self { 10 | let mut hasher = Sha256::new(); 11 | hasher.update(seed); 12 | let result = hasher.finalize(); 13 | let mut seed_bytes = [0u8; 32]; 14 | seed_bytes.copy_from_slice(&result); 15 | Self { seed: seed_bytes } 16 | } 17 | } 18 | 19 | impl RngCore for TestDRNG { 20 | fn next_u32(&mut self) -> u32 { 21 | let val = u32::from_be_bytes([self.seed[0], self.seed[1], self.seed[2], self.seed[3]]); 22 | let mut hasher = Sha256::new(); 23 | hasher.update(val.to_be_bytes()); 24 | let result = hasher.finalize(); 25 | self.seed.copy_from_slice(&result); 26 | val 27 | } 28 | 29 | fn next_u64(&mut self) -> u64 { 30 | ((self.next_u32() as u64) << 32) | (self.next_u32() as u64) 31 | } 32 | 33 | fn fill_bytes(&mut self, dest: &mut [u8]) { 34 | let mut i = 0; 35 | while i < dest.len() { 36 | let rand = self.next_u32().to_be_bytes(); 37 | for b in rand.iter() { 38 | if i < dest.len() { 39 | dest[i] = *b; 40 | i += 1; 41 | } else { 42 | break; 43 | } 44 | } 45 | } 46 | } 47 | 48 | fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { 49 | self.fill_bytes(dest); 50 | Ok(()) 51 | } 52 | } 53 | 54 | impl CryptoRng for TestDRNG {} 55 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust build 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | pull_request: 7 | branches: [ "main" ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | minimal-setup: 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - uses: actions/checkout@v6 18 | - name: Build 19 | run: cargo build --verbose 20 | - name: Run tests 21 | run: cargo test --verbose --no-fail-fast 22 | - name: Run benchmark test 23 | # Run the msm benchmark, just to ensure it isn't broken. 24 | run: cargo bench --bench msm -- --quick 25 | 26 | no-std-check: 27 | runs-on: ubuntu-latest 28 | 29 | steps: 30 | - uses: actions/checkout@v6 31 | - name: Check no_std compilation 32 | run: | 33 | echo "Checking no_std compilation..." 34 | cargo check --no-default-features --verbose 35 | - name: Check std compilation 36 | run: | 37 | echo "Checking std compilation..." 38 | cargo check --verbose 39 | - name: Check all features 40 | run: | 41 | echo "Checking with all features..." 42 | cargo check --all-features --verbose 43 | 44 | 45 | full-setup: 46 | 47 | runs-on: ubuntu-latest 48 | strategy: 49 | matrix: 50 | toolchain: [nightly] 51 | 52 | steps: 53 | - uses: actions/checkout@v6 54 | - run: rustup update ${{ matrix.toolchain }} 55 | - name: Build (nightly) 56 | run: cargo +${{ matrix.toolchain }} build --all-features --verbose 57 | - name: Build no_std (nightly) 58 | run: cargo +${{ matrix.toolchain }} build --no-default-features --verbose 59 | - name: Run tests (nightly) 60 | run: cargo +${{ matrix.toolchain }} test --all-features --verbose --no-fail-fast 61 | 62 | wasm: 63 | runs-on: ubuntu-latest 64 | 65 | steps: 66 | - uses: actions/checkout@v6 67 | - name: Setup `wasmtime` 68 | uses: bytecodealliance/actions/wasmtime/setup@v1 69 | - name: Build 70 | run: cargo build --target wasm32-wasip1 --verbose 71 | - name: Run tests 72 | run: cargo test --target wasm32-wasip1 --verbose --no-fail-fast 73 | -------------------------------------------------------------------------------- /tests/spec/bls12_381.rs: -------------------------------------------------------------------------------- 1 | use bls12_381::G1Projective; 2 | use ff::PrimeField; 3 | use group::Group; 4 | use hex::FromHex; 5 | use num_bigint::BigUint; 6 | use rand::{CryptoRng, Rng}; 7 | use subtle::CtOption; 8 | 9 | use crate::spec::random::{SInput, SRandom}; 10 | 11 | impl SInput for G1Projective { 12 | fn scalar_from_hex_be(hex_str: &str) -> Option { 13 | let be_bytes = Vec::from_hex(hex_str).ok()?; 14 | if be_bytes.len() != 32 { 15 | return None; 16 | } 17 | 18 | let mut le_bytes = [0u8; 32]; 19 | for (i, b) in be_bytes.iter().enumerate() { 20 | le_bytes[31 - i] = *b; 21 | } 22 | 23 | let ctopt: CtOption = ::Scalar::from_repr(le_bytes); 24 | if bool::from(ctopt.is_some()) { 25 | Some(ctopt.unwrap()) 26 | } else { 27 | None 28 | } 29 | } 30 | } 31 | 32 | impl SRandom for G1Projective { 33 | fn randint_big(l: &BigUint, h: &BigUint, rng: &mut (impl Rng + CryptoRng)) -> BigUint { 34 | assert!(l <= h); 35 | let range = h - l; 36 | let bits = range.bits(); 37 | #[allow(clippy::manual_div_ceil)] 38 | let bytes_needed = ((bits + 7) / 8) as usize; 39 | 40 | loop { 41 | let mut buf = vec![0u8; bytes_needed]; 42 | rng.fill_bytes(&mut buf); 43 | let val = BigUint::from_bytes_be(&buf); 44 | if val.bits() <= bits { 45 | return l + (val % &range); 46 | } 47 | } 48 | } 49 | 50 | fn random_scalar_elt(rng: &mut (impl Rng + CryptoRng)) -> Self::Scalar { 51 | let low = BigUint::parse_bytes(b"1", 10).unwrap(); 52 | let high = BigUint::parse_bytes( 53 | b"73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", 54 | 16, 55 | ) 56 | .unwrap(); 57 | let rand = Self::randint_big(&low, &high, rng); 58 | let mut hex_string = rand.to_str_radix(16); 59 | if hex_string.len() < 64 { 60 | hex_string = format!("{hex_string:0>64}"); 61 | } 62 | G1Projective::scalar_from_hex_be(&hex_string).unwrap() 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /examples/schnorr.rs: -------------------------------------------------------------------------------- 1 | //! Example: Schnorr proof of knowledge. 2 | //! 3 | //! This example demonstrates how to prove knowledge of a discrete logarithm using `sigma-rs`. 4 | //! 5 | //! The prover convinces a verifier that it knows a secret $x$ such that: $$P = x \cdot G$$ 6 | //! 7 | //! where $G$ is a generator of a prime-order group $\mathbb{G}$ and $P$ is a public group element. 8 | 9 | use curve25519_dalek::scalar::Scalar; 10 | use curve25519_dalek::RistrettoPoint; 11 | use group::Group; 12 | use rand::rngs::OsRng; 13 | 14 | use sigma_proofs::errors::Error; 15 | use sigma_proofs::LinearRelation; 16 | 17 | type ProofResult = Result; 18 | 19 | /// Create a discrete logarithm relation for the given public key P 20 | #[allow(non_snake_case)] 21 | fn create_relation(P: RistrettoPoint) -> LinearRelation { 22 | let mut relation = LinearRelation::new(); 23 | 24 | let x = relation.allocate_scalar(); 25 | let G = relation.allocate_element(); 26 | let P_var = relation.allocate_eq(x * G); 27 | relation.set_element(G, RistrettoPoint::generator()); 28 | relation.set_element(P_var, P); 29 | 30 | relation 31 | } 32 | 33 | /// Prove knowledge of the discrete logarithm: given witness x and public key P, 34 | /// generate a proof that P = x * G 35 | #[allow(non_snake_case)] 36 | fn prove(x: Scalar, P: RistrettoPoint) -> ProofResult> { 37 | let nizk = create_relation(P).into_nizk(b"sigma-proofs-example"); 38 | nizk?.prove_batchable(&vec![x], &mut OsRng) 39 | } 40 | 41 | /// Verify a proof of knowledge of discrete logarithm for the given public key P 42 | #[allow(non_snake_case)] 43 | fn verify(P: RistrettoPoint, proof: &[u8]) -> ProofResult<()> { 44 | let nizk = create_relation(P).into_nizk(b"sigma-proofs-example"); 45 | nizk?.verify_batchable(proof) 46 | } 47 | 48 | #[allow(non_snake_case)] 49 | fn main() { 50 | let x = Scalar::random(&mut OsRng); // Private key (witness) 51 | let P = RistrettoPoint::generator() * x; // Public key (statement) 52 | 53 | println!("Generated new key pair:"); 54 | println!("Public key P: {:?}", hex::encode(P.compress().as_bytes())); 55 | 56 | match prove(x, P) { 57 | Ok(proof) => { 58 | println!("Proof generated successfully:"); 59 | println!("Proof (hex): {}", hex::encode(&proof)); 60 | 61 | // Verify the proof 62 | match verify(P, &proof) { 63 | Ok(()) => println!("✓ Proof verified successfully!"), 64 | Err(e) => println!("✗ Proof verification failed: {e:?}"), 65 | } 66 | } 67 | Err(e) => println!("✗ Failed to generate proof: {e:?}"), 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sigma-proofs" 3 | version = "0.1.0-sigma" 4 | authors = [ 5 | "Nugzari Uzoevi ", 6 | "Michele Orrù ", 7 | "Ian Goldberg ", 8 | "Victor Snyder-Graf ", 9 | "Lénaïck Gouriou " 10 | ] 11 | edition = "2021" 12 | license = "BSD-2-Clause" 13 | readme = "README.md" 14 | repository = "https://github.com/sigma-rs/sigma-proofs" 15 | documentation = "https://docs.rs/sigma-proofs" 16 | categories = ["cryptography"] 17 | keywords = ["cryptography", "zero-knowledge", "NIZK", "sigma-protocols"] 18 | description = "A toolkit for auto-generated implementations of Σ-protocols" 19 | exclude = [ 20 | ".gitignore" 21 | ] 22 | 23 | [features] 24 | default = ["std"] 25 | std = ["thiserror", "rand", "num-bigint/std", "num-traits/std", "sha3/std", "rand_core/std"] 26 | 27 | [dependencies] 28 | ahash = { version = "0.8", default-features = false } 29 | ff = { version = "0.13", features = ["derive"] } 30 | group = "0.13.0" 31 | hashbrown = { version = "0.15", default-features = false } 32 | keccak = { version = "0.1.5", default-features = false } 33 | num-bigint = { version = "0.4.6", default-features = false } 34 | num-traits = { version = "0.2.19", default-features = false, features = ["libm"] } 35 | rand = { version = "0.8.5", optional = true } 36 | rand_core = { version = "0.6", default-features = false } 37 | sha3 = { version = "0.10.8", default-features = false } 38 | subtle = { version = "2.6.1", default-features = false } 39 | thiserror = { version = "1", optional = true } 40 | zerocopy = { version = "0.8", default-features = false } 41 | zeroize = { version = "1.8.1", default-features = false, features = ["alloc"] } 42 | 43 | [dev-dependencies] 44 | bls12_381 = "0.8.0" 45 | curve25519-dalek = { version = "4", default-features = false, features = ["serde", "rand_core", "alloc", "digest", "precomputed-tables", "group"] } 46 | hex = "0.4" 47 | hex-literal = "0.4" 48 | json = "0.12.4" 49 | k256 = { version = "0.13", features = ["arithmetic"] } 50 | libtest-mimic = "0.8.1" 51 | p256 = { version = "0.13", features = ["arithmetic"] } 52 | serde = { version = "1.0.219", features = ["derive"] } 53 | serde_json = "1.0.140" 54 | sha2 = "0.10" 55 | 56 | # Include criterion without rayon when compiling for the WASM WASIv1 environment, and with rayon otherwise. 57 | [target.'cfg(not(all(target_os = "wasi", target_env = "p1")))'.dev-dependencies] 58 | criterion = { version = "0.8", features = ["html_reports"] } 59 | # TODO(victor): Benchmarking works on my machine in WASM, but criterion does not build in CI right now. 60 | # [target.'cfg(all(target_os = "wasi", target_env = "p1"))'.dev-dependencies] 61 | # criterion = { version = "0.8", default-features = false } 62 | 63 | [[bench]] 64 | name = "msm" 65 | harness = false 66 | 67 | [profile.dev] 68 | # Makes tests run much faster at the cost of slightly longer builds and worse debug info. 69 | opt-level = 1 70 | 71 | -------------------------------------------------------------------------------- /tests/spec_duplex_sponge.rs: -------------------------------------------------------------------------------- 1 | use libtest_mimic::{Arguments, Failed, Trial}; 2 | use serde::{Deserialize, Serialize}; 3 | use sigma_proofs::{DuplexSpongeInterface, KeccakDuplexSponge, ShakeDuplexSponge}; 4 | use std::collections::HashMap; 5 | 6 | #[derive(Debug, Deserialize, Serialize)] 7 | struct TestVector { 8 | #[serde(rename = "Expected")] 9 | expected: String, 10 | #[serde(rename = "HashFunction")] 11 | hash_function: String, 12 | #[serde(rename = "Operations")] 13 | operations: Vec, 14 | #[serde(rename = "IV")] 15 | iv: String, 16 | } 17 | 18 | #[derive(Debug, Deserialize, Serialize)] 19 | struct Operation { 20 | #[serde(rename = "type")] 21 | op_type: String, 22 | #[serde(skip_serializing_if = "Option::is_none")] 23 | data: Option, 24 | #[serde(skip_serializing_if = "Option::is_none")] 25 | length: Option, 26 | } 27 | 28 | fn hex_decode(hex_str: &str) -> Vec { 29 | (0..hex_str.len()) 30 | .step_by(2) 31 | .map(|i| u8::from_str_radix(&hex_str[i..i + 2], 16).unwrap()) 32 | .collect() 33 | } 34 | 35 | fn load_test_vectors() -> HashMap { 36 | let json_data = include_str!("./spec/vectors/duplexSpongeVectors.json"); 37 | serde_json::from_str(json_data).expect("Failed to parse test vectors JSON") 38 | } 39 | 40 | fn run_test_vector(name: &str, test_vector: &TestVector) -> Result<(), Failed> { 41 | let iv_bytes = hex_decode(&test_vector.iv); 42 | let iv_array: [u8; 64] = iv_bytes.try_into().unwrap(); 43 | 44 | let mut sponge: Box = match test_vector.hash_function.as_str() { 45 | "Keccak-f[1600] overwrite mode" => Box::new(KeccakDuplexSponge::new(iv_array)), 46 | "SHAKE128" => Box::new(ShakeDuplexSponge::new(iv_array)), 47 | _ => panic!("Unknown hash function: {}", test_vector.hash_function), 48 | }; 49 | let mut final_output = Vec::new(); 50 | 51 | for operation in &test_vector.operations { 52 | match operation.op_type.as_str() { 53 | "absorb" => { 54 | if let Some(data_hex) = &operation.data { 55 | let data = hex_decode(data_hex); 56 | sponge.absorb(&data); 57 | } 58 | } 59 | "squeeze" => { 60 | if let Some(length) = operation.length { 61 | let output = sponge.squeeze(length); 62 | final_output = output; 63 | } 64 | } 65 | _ => panic!("Unknown operation type: {}", operation.op_type), 66 | } 67 | } 68 | 69 | assert_eq!( 70 | hex::encode(final_output), 71 | test_vector.expected, 72 | "Test vector '{name}' failed" 73 | ); 74 | Ok(()) 75 | } 76 | 77 | #[test] 78 | fn test_all_duplex_sponge_vectors() { 79 | let test_vectors = load_test_vectors(); 80 | 81 | let tests = test_vectors 82 | .into_iter() 83 | .map(|(name, test_vector)| { 84 | Trial::test( 85 | format!("tests::spec::test_duplex_sponge::{}", name), 86 | move || run_test_vector(&name, &test_vector), 87 | ) 88 | }) 89 | .collect(); 90 | 91 | libtest_mimic::run(&Arguments::from_args(), tests).exit(); 92 | } 93 | -------------------------------------------------------------------------------- /src/errors.rs: -------------------------------------------------------------------------------- 1 | //! # Error: Error Types for Zero-Knowledge Proofs. 2 | //! 3 | //! This module defines the [`Error`] enum, which enumerates the possible failure modes 4 | //! encountered during the execution of interactive or non-interactive Sigma protocols. 5 | //! 6 | //! These errors include: 7 | //! - Failed proof verification, 8 | //! - Mismatched parameter lengths (e.g., during batch verification), 9 | //! - Access to unassigned group variables in constraint systems. 10 | 11 | use alloc::string::String; 12 | #[cfg(not(feature = "std"))] 13 | use core::fmt; 14 | 15 | /// Represents an invalid instance error. 16 | #[derive(Debug)] 17 | #[cfg_attr(feature = "std", derive(thiserror::Error))] 18 | #[cfg_attr(feature = "std", error("Invalid instance: {message}"))] 19 | pub struct InvalidInstance { 20 | /// The error message describing what's invalid about the instance. 21 | pub message: String, 22 | } 23 | 24 | impl InvalidInstance { 25 | /// Create a new InvalidInstance error with the given message. 26 | pub fn new(message: impl Into) -> Self { 27 | Self { 28 | message: message.into(), 29 | } 30 | } 31 | } 32 | 33 | impl From for Error { 34 | fn from(_err: InvalidInstance) -> Self { 35 | Error::InvalidInstanceWitnessPair 36 | } 37 | } 38 | 39 | /// Represents an error encountered during the execution of a Sigma protocol. 40 | /// 41 | /// This may occur during proof generation, response computation, or verification. 42 | #[non_exhaustive] 43 | #[derive(Debug)] 44 | #[cfg_attr(feature = "std", derive(thiserror::Error))] 45 | pub enum Error { 46 | /// The proof is invalid: verification failed. 47 | #[cfg_attr(feature = "std", error("Verification failed."))] 48 | VerificationFailure, 49 | /// Indicates an invalid statement/witness pair 50 | #[cfg_attr(feature = "std", error("Invalid instance/witness pair."))] 51 | InvalidInstanceWitnessPair, 52 | /// Uninitialized group element variable. 53 | #[cfg_attr( 54 | feature = "std", 55 | error("Uninitialized group element variable: {var_debug}") 56 | )] 57 | UnassignedGroupVar { 58 | /// Debug representation of the unassigned variable. 59 | var_debug: String, 60 | }, 61 | } 62 | 63 | // Manual Display implementation for no_std compatibility 64 | #[cfg(not(feature = "std"))] 65 | impl fmt::Display for InvalidInstance { 66 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 67 | write!(f, "Invalid instance: {}", self.message) 68 | } 69 | } 70 | 71 | #[cfg(not(feature = "std"))] 72 | impl fmt::Display for Error { 73 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 74 | match self { 75 | Error::VerificationFailure => write!(f, "Verification failed."), 76 | Error::InvalidInstanceWitnessPair => write!(f, "Invalid instance/witness pair."), 77 | Error::UnassignedGroupVar { var_debug } => { 78 | write!(f, "Uninitialized group element variable: {}", var_debug) 79 | } 80 | } 81 | } 82 | } 83 | 84 | pub type Result = core::result::Result; 85 | 86 | /// Construct an `Ok` value of type `Result`. 87 | pub const fn Ok(value: T) -> Result { 88 | Result::Ok(value) 89 | } 90 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # sigma-proofs 2 | 3 | A Rust library for building and composing Σ-protocols (Sigma protocols) for zero-knowledge proofs. 4 | 5 | This library focuses on any statement that can be expressed as a linear relation over group elements. Using the Fiat-Shamir transformation, these interactive protocols become non-interactive proofs suitable for real-world applications. 6 | 7 | ## Quick Example 8 | 9 | ```rust 10 | use curve25519_dalek::ristretto::RistrettoPoint; 11 | use curve25519_dalek::scalar::Scalar; 12 | use group::Group; 13 | let mut instance = sigma_proofs::LinearRelation::new(); 14 | let mut rng = rand::thread_rng(); 15 | 16 | // Define the statement: 17 | // Prove knowledge of (x, r) such that C = x·G + r·H (Pedersen commitment) 18 | let [var_x, var_r] = instance.allocate_scalars(); 19 | let [var_G, var_H] = instance.allocate_elements(); 20 | instance.allocate_eq(var_G * var_x + var_H * var_r); 21 | instance.set_elements([(var_G, RistrettoPoint::generator()), (var_H, RistrettoPoint::random(&mut rng))]); 22 | 23 | // Assign the image of the linear map. 24 | let witness = vec![Scalar::random(&mut rng), Scalar::random(&mut rng)]; 25 | instance.compute_image(&witness); 26 | 27 | // Create a non-interactive argument for the instance. 28 | let nizk = instance.into_nizk(b"your session identifier").unwrap(); 29 | let narg_string: Vec = nizk.prove_batchable(&witness, &mut rng).unwrap(); 30 | // Print the narg string. 31 | println!("{}", hex::encode(narg_string)); 32 | ``` 33 | 34 | ## Composition Example 35 | 36 | Prove complex statements with AND/OR logic: 37 | 38 | ```rust 39 | // Prove: (I know x for A = x·G) OR (I know y,z for B = y·G AND C = z·H) 40 | let or_protocol = Protocol::Or(vec![ 41 | Protocol::from(dlog_relation), // First option 42 | Protocol::And(vec![ // Second option 43 | Protocol::from(relation_B), 44 | Protocol::from(relation_C), 45 | ]) 46 | ]); 47 | 48 | // If we know the second option, create witness for index 1 49 | let witness = ProtocolWitness::Or(1, vec![ 50 | ProtocolWitness::And(vec![ 51 | ProtocolWitness::Simple(vec![y]), 52 | ProtocolWitness::Simple(vec![z]), 53 | ]) 54 | ]); 55 | ``` 56 | 57 | ## Examples 58 | 59 | See the [examples/](examples/) directory: 60 | - `schnorr.rs` - Discrete logarithm proof 61 | - `simple_composition.rs` - OR-proof composition 62 | 63 | ## Status 64 | 65 | **⚠️ NOT YET READY FOR PRODUCTION USE** 66 | 67 | This library is under active development. While the API is stabilizing, there are no guarantees on proof compatibility between versions. 68 | 69 | ## Background 70 | 71 | This crate continues the work from the original `zkp` toolkit in [`dalek-cryptography`](https://github.com/dalek-cryptography), modernized with updated dependencies and improved Fiat-Shamir transforms. It implements the general framework for Sigma protocols as described in [Maurer (2009)](https://doi.org/10.1007/978-3-642-02384-2_17). 72 | 73 | ## Funding 74 | 75 | This project is funded through [NGI0 Entrust](https://nlnet.nl/entrust), a fund established by [NLnet](https://nlnet.nl) with financial support from the European Commission's [Next Generation Internet](https://ngi.eu) program. Learn more at the [NLnet project page](https://nlnet.nl/project/sigmaprotocols). 76 | 77 | [NLnet foundation logo](https://nlnet.nl) 78 | [NGI Zero Logo](https://nlnet.nl/entrust) 79 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! # Σ-rs: Sigma Protocols in Rust 2 | //! 3 | //! **Σ-rs** is a Rust library for constructing zero-knowledge proofs using Sigma protocols (Σ-protocols). 4 | //! It allows proving knowledge of secret data without revealing the data itself. 5 | //! 6 | //! --- 7 | //! 8 | //! ## What are Sigma Protocols? 9 | //! 10 | //! Sigma protocols are interactive cryptographic protocols that allow a prover to convince 11 | //! a verifier they know a secret (like a private key) without revealing the secret itself. 12 | //! They follow a simple three-step pattern: commitment, challenge, response. 13 | //! 14 | //! --- 15 | //! 16 | //! ## Basic Usage 17 | //! 18 | //! ```rust 19 | //! # use curve25519_dalek::ristretto::RistrettoPoint; 20 | //! # use curve25519_dalek::scalar::Scalar; 21 | //! # use group::Group; 22 | //! let mut instance = sigma_proofs::LinearRelation::new(); 23 | //! let mut rng = rand::thread_rng(); 24 | //! 25 | //! // Define the statement: 26 | //! // Prove knowledge of (x, r) such that C = x·G + r·H (Pedersen commitment) 27 | //! let [var_x, var_r] = instance.allocate_scalars(); 28 | //! let [var_G, var_H] = instance.allocate_elements(); 29 | //! instance.allocate_eq(var_G * var_x + var_H * var_r); 30 | //! instance.set_elements([(var_G, RistrettoPoint::generator()), (var_H, RistrettoPoint::random(&mut rng))]); 31 | //! 32 | //! // Assign the image of the linear map. 33 | //! let witness = vec![Scalar::random(&mut rng), Scalar::random(&mut rng)]; 34 | //! instance.compute_image(&witness); 35 | //! 36 | //! // Create a non-interactive argument for the instance. 37 | //! let nizk = instance.into_nizk(b"your session identifier").unwrap(); 38 | //! let narg_string: Vec = nizk.prove_batchable(&witness, &mut rng).unwrap(); 39 | //! // Print the narg string. 40 | //! println!("{}", hex::encode(narg_string)); 41 | //! ``` 42 | //! 43 | //! The library provides building blocks for creating zero-knowledge proofs: 44 | //! 45 | //! 1. Define your mathematical relation using [`LinearRelation`] 46 | //! 2. Convert to non-interactive using [`fiat_shamir::Nizk`] 47 | //! 3. Generate and verify proofs. 48 | //! 49 | //! --- 50 | //! 51 | //! ## Core Components 52 | //! 53 | //! - **[`traits::SigmaProtocol`]**: The fundamental three-move protocol interface 54 | //! - **[`linear_relation::LinearRelation`]**: Express mathematical relations over groups 55 | //! - **[`fiat_shamir::Nizk`]**: Convert interactive proofs to standalone proofs 56 | //! - **[`composition::ComposedRelation`]**: Combine multiple proofs together 57 | //! - **[`codec`]**: Mapping from and to the hash function domain 58 | //! 59 | //! --- 60 | //! 61 | //! Σ-rs is designed to be modular, extensible, and easy to integrate into different 62 | //! groups, protocols depending on sigma protocols, and other proof systems. 63 | 64 | #![cfg_attr(not(feature = "std"), no_std)] 65 | #![allow(non_snake_case)] 66 | #![doc(html_logo_url = "https://mmaker.github.io/sigma-rs/")] 67 | #![deny(unused_variables)] 68 | #![deny(unused_mut)] 69 | 70 | extern crate alloc; 71 | 72 | pub mod codec; 73 | pub mod composition; 74 | pub mod errors; 75 | pub mod group; 76 | pub mod linear_relation; 77 | pub mod traits; 78 | 79 | pub(crate) mod duplex_sponge; 80 | pub(crate) mod fiat_shamir; 81 | pub(crate) mod schnorr_protocol; 82 | 83 | pub use duplex_sponge::{ 84 | keccak::KeccakDuplexSponge, shake::ShakeDuplexSponge, DuplexSpongeInterface, 85 | }; 86 | pub use fiat_shamir::Nizk; 87 | pub use group::msm::VariableMultiScalarMul; 88 | pub use linear_relation::LinearRelation; 89 | 90 | #[deprecated = "Use sigma_proofs::group::serialization instead"] 91 | pub use group::serialization; 92 | -------------------------------------------------------------------------------- /examples/simple_composition.rs: -------------------------------------------------------------------------------- 1 | //! OR-proof composition example. 2 | 3 | use curve25519_dalek::ristretto::RistrettoPoint; 4 | use curve25519_dalek::scalar::Scalar; 5 | use group::Group; 6 | use rand::rngs::OsRng; 7 | use sigma_proofs::{ 8 | codec::Shake128DuplexSponge, 9 | composition::{ComposedRelation, ComposedWitness}, 10 | errors::Error, 11 | LinearRelation, Nizk, 12 | }; 13 | 14 | type G = RistrettoPoint; 15 | type ProofResult = Result; 16 | 17 | /// Create an OR relation between two statements: 18 | /// 1. Knowledge of discrete log: P1 = x1 * G 19 | /// 2. Knowledge of DLEQ: (P2 = x2 * G, Q = x2 * H) 20 | #[allow(non_snake_case)] 21 | fn create_relation(P1: G, P2: G, Q: G, H: G) -> ComposedRelation { 22 | // First relation: discrete logarithm P1 = x1 * G 23 | let mut rel1 = LinearRelation::::new(); 24 | let x1 = rel1.allocate_scalar(); 25 | let G1 = rel1.allocate_element(); 26 | let P1_var = rel1.allocate_eq(x1 * G1); 27 | rel1.set_element(G1, G::generator()); 28 | rel1.set_element(P1_var, P1); 29 | 30 | // Second relation: DLEQ (P2 = x2 * G, Q = x2 * H) 31 | let mut rel2 = LinearRelation::::new(); 32 | let x2 = rel2.allocate_scalar(); 33 | let G2 = rel2.allocate_element(); 34 | let H_var = rel2.allocate_element(); 35 | let P2_var = rel2.allocate_eq(x2 * G2); 36 | let Q_var = rel2.allocate_eq(x2 * H_var); 37 | rel2.set_element(G2, G::generator()); 38 | rel2.set_element(H_var, H); 39 | rel2.set_element(P2_var, P2); 40 | rel2.set_element(Q_var, Q); 41 | 42 | // Compose into OR protocol 43 | ComposedRelation::or([rel1.canonical().unwrap(), rel2.canonical().unwrap()]) 44 | } 45 | 46 | /// Prove knowledge of one of the witnesses (we know x2 for the DLEQ) 47 | #[allow(non_snake_case)] 48 | fn prove(P1: G, x2: Scalar, H: G) -> ProofResult> { 49 | // Compute public values 50 | let P2 = G::generator() * x2; 51 | let Q = H * x2; 52 | 53 | let instance = create_relation(P1, P2, Q, H); 54 | // Create OR witness with branch 1 being the real one (index 1) 55 | let witness = ComposedWitness::Or(vec![ 56 | ComposedWitness::Simple(vec![Scalar::from(0u64)]), 57 | ComposedWitness::Simple(vec![x2]), 58 | ]); 59 | let nizk = Nizk::<_, Shake128DuplexSponge>::new(b"or_proof_example", instance); 60 | 61 | nizk.prove_batchable(&witness, &mut OsRng) 62 | } 63 | 64 | /// Verify an OR proof given the public values 65 | #[allow(non_snake_case)] 66 | fn verify(P1: G, P2: G, Q: G, H: G, proof: &[u8]) -> ProofResult<()> { 67 | let protocol = create_relation(P1, P2, Q, H); 68 | let nizk = Nizk::<_, Shake128DuplexSponge>::new(b"or_proof_example", protocol); 69 | 70 | nizk.verify_batchable(proof) 71 | } 72 | 73 | #[allow(non_snake_case)] 74 | fn main() { 75 | // Setup: We don't know x1, but we do know x2 76 | let x1 = Scalar::random(&mut OsRng); 77 | let x2 = Scalar::random(&mut OsRng); 78 | let H = G::random(&mut OsRng); 79 | 80 | // Compute public values 81 | let P1 = G::generator() * x1; // We don't actually know x1 in the proof 82 | let P2 = G::generator() * x2; // We know x2 83 | let Q = H * x2; // Q = x2 * H 84 | 85 | println!("OR-proof example: Proving knowledge of x1 OR x2"); 86 | println!("(We only know x2, not x1)"); 87 | 88 | match prove(P1, x2, H) { 89 | Ok(proof) => { 90 | println!("Proof generated successfully"); 91 | println!("Proof (hex): {}", hex::encode(&proof)); 92 | 93 | // Verify the proof 94 | match verify(P1, P2, Q, H, &proof) { 95 | Ok(()) => println!("✓ Proof verified successfully!"), 96 | Err(e) => println!("✗ Proof verification failed: {e:?}"), 97 | } 98 | } 99 | Err(e) => println!("✗ Failed to generate proof: {e:?}"), 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /benches/msm.rs: -------------------------------------------------------------------------------- 1 | use std::hint::black_box; 2 | 3 | use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; 4 | use ff::Field; 5 | use group::Group; 6 | use rand::thread_rng; 7 | use sigma_proofs::VariableMultiScalarMul; 8 | 9 | fn bench_msm_curve25519_dalek(c: &mut Criterion) { 10 | use curve25519_dalek::{RistrettoPoint, Scalar}; 11 | 12 | let mut group = c.benchmark_group("MSM curve25519-dalek RistrettoPoint"); 13 | let mut rng = thread_rng(); 14 | 15 | for size in [1, 2, 4, 8, 16, 64, 256, 1024].iter() { 16 | let scalars: Vec = (0..*size).map(|_| Scalar::random(&mut rng)).collect(); 17 | let bases: Vec = (0..*size) 18 | .map(|_| RistrettoPoint::random(&mut rng)) 19 | .collect(); 20 | 21 | group.bench_with_input(BenchmarkId::new("size", size), size, |b, _| { 22 | b.iter(|| RistrettoPoint::msm(black_box(&scalars), black_box(&bases))) 23 | }); 24 | } 25 | group.finish(); 26 | } 27 | 28 | fn bench_msm_k256(c: &mut Criterion) { 29 | use k256::{ProjectivePoint, Scalar}; 30 | 31 | let mut group = c.benchmark_group("MSM k256 ProjectivePoint"); 32 | let mut rng = thread_rng(); 33 | 34 | for size in [1, 2, 4, 8, 16, 64, 256, 1024].iter() { 35 | let scalars: Vec = (0..*size).map(|_| Scalar::random(&mut rng)).collect(); 36 | let bases: Vec = (0..*size) 37 | .map(|_| ProjectivePoint::random(&mut rng)) 38 | .collect(); 39 | 40 | group.bench_with_input(BenchmarkId::new("size", size), size, |b, _| { 41 | b.iter(|| ProjectivePoint::msm(black_box(&scalars), black_box(&bases))) 42 | }); 43 | } 44 | group.finish(); 45 | } 46 | 47 | fn bench_msm_p256(c: &mut Criterion) { 48 | use p256::{ProjectivePoint, Scalar}; 49 | 50 | let mut group = c.benchmark_group("MSM p256 ProjectivePoint"); 51 | let mut rng = thread_rng(); 52 | 53 | for size in [1, 2, 4, 8, 16, 64, 256, 1024].iter() { 54 | let scalars: Vec = (0..*size).map(|_| Scalar::random(&mut rng)).collect(); 55 | let bases: Vec = (0..*size) 56 | .map(|_| ProjectivePoint::random(&mut rng)) 57 | .collect(); 58 | 59 | group.bench_with_input(BenchmarkId::new("size", size), size, |b, _| { 60 | b.iter(|| ProjectivePoint::msm(black_box(&scalars), black_box(&bases))) 61 | }); 62 | } 63 | group.finish(); 64 | } 65 | 66 | fn bench_msm_bls12_381_g1(c: &mut Criterion) { 67 | use bls12_381::{G1Projective, Scalar}; 68 | 69 | let mut group = c.benchmark_group("MSM bls12_381 G1Projective"); 70 | let mut rng = thread_rng(); 71 | 72 | for size in [1, 2, 4, 8, 16, 64, 256, 1024].iter() { 73 | let scalars: Vec = (0..*size).map(|_| Scalar::random(&mut rng)).collect(); 74 | let bases: Vec = (0..*size).map(|_| G1Projective::random(&mut rng)).collect(); 75 | 76 | group.bench_with_input(BenchmarkId::new("size", size), size, |b, _| { 77 | b.iter(|| G1Projective::msm(black_box(&scalars), black_box(&bases))) 78 | }); 79 | } 80 | group.finish(); 81 | } 82 | 83 | fn bench_msm_bls12_381_g2(c: &mut Criterion) { 84 | use bls12_381::{G2Projective, Scalar}; 85 | 86 | let mut group = c.benchmark_group("MSM bls12_381 G2Projective"); 87 | let mut rng = thread_rng(); 88 | 89 | for size in [1, 2, 4, 8, 16, 64, 256, 1024].iter() { 90 | let scalars: Vec = (0..*size).map(|_| Scalar::random(&mut rng)).collect(); 91 | let bases: Vec = (0..*size).map(|_| G2Projective::random(&mut rng)).collect(); 92 | 93 | group.bench_with_input(BenchmarkId::new("size", size), size, |b, _| { 94 | b.iter(|| G2Projective::msm(black_box(&scalars), black_box(&bases))) 95 | }); 96 | } 97 | group.finish(); 98 | } 99 | 100 | criterion_group!( 101 | benches, 102 | bench_msm_curve25519_dalek, 103 | bench_msm_k256, 104 | bench_msm_p256, 105 | bench_msm_bls12_381_g1, 106 | bench_msm_bls12_381_g2, 107 | ); 108 | criterion_main!(benches); 109 | -------------------------------------------------------------------------------- /src/duplex_sponge/keccak.rs: -------------------------------------------------------------------------------- 1 | //! Keccak-based duplex sponge implementation 2 | //! 3 | //! This module implements a duplex sponge construction using the Keccak-f\[1600\] permutation. 4 | //! It is designed to match test vectors from the original Sage implementation. 5 | 6 | use crate::duplex_sponge::DuplexSpongeInterface; 7 | use alloc::vec::Vec; 8 | use zerocopy::IntoBytes; 9 | 10 | const RATE: usize = 136; 11 | const LENGTH: usize = 136 + 64; 12 | 13 | /// Low-level Keccak-f\[1600\] state representation. 14 | #[derive(Clone, Default)] 15 | pub struct KeccakPermutationState([u64; LENGTH / 8]); 16 | 17 | impl KeccakPermutationState { 18 | pub fn new(iv: [u8; 64]) -> Self { 19 | let mut state = Self::default(); 20 | state.as_mut()[RATE..RATE + 64].copy_from_slice(&iv); 21 | state 22 | } 23 | 24 | pub fn permute(&mut self) { 25 | keccak::f1600(&mut self.0); 26 | } 27 | } 28 | 29 | impl AsRef<[u8]> for KeccakPermutationState { 30 | fn as_ref(&self) -> &[u8] { 31 | self.0.as_bytes() 32 | } 33 | } 34 | 35 | impl AsMut<[u8]> for KeccakPermutationState { 36 | fn as_mut(&mut self) -> &mut [u8] { 37 | self.0.as_mut_bytes() 38 | } 39 | } 40 | 41 | /// Duplex sponge construction using Keccak-f\[1600\]. 42 | #[derive(Clone)] 43 | pub struct KeccakDuplexSponge { 44 | state: KeccakPermutationState, 45 | absorb_index: usize, 46 | squeeze_index: usize, 47 | } 48 | 49 | impl KeccakDuplexSponge { 50 | pub fn new(iv: [u8; 64]) -> Self { 51 | let state = KeccakPermutationState::new(iv); 52 | KeccakDuplexSponge { 53 | state, 54 | absorb_index: 0, 55 | squeeze_index: RATE, 56 | } 57 | } 58 | } 59 | 60 | impl DuplexSpongeInterface for KeccakDuplexSponge { 61 | fn new(iv: [u8; 64]) -> Self { 62 | KeccakDuplexSponge::new(iv) 63 | } 64 | 65 | fn absorb(&mut self, mut input: &[u8]) { 66 | self.squeeze_index = RATE; 67 | 68 | while !input.is_empty() { 69 | if self.absorb_index == RATE { 70 | self.state.permute(); 71 | self.absorb_index = 0; 72 | } 73 | 74 | let chunk_size = usize::min(RATE - self.absorb_index, input.len()); 75 | let dest = &mut self.state.as_mut()[self.absorb_index..self.absorb_index + chunk_size]; 76 | dest.copy_from_slice(&input[..chunk_size]); 77 | self.absorb_index += chunk_size; 78 | input = &input[chunk_size..]; 79 | } 80 | } 81 | 82 | fn squeeze(&mut self, mut length: usize) -> Vec { 83 | let mut output = Vec::new(); 84 | while length != 0 { 85 | if self.squeeze_index == RATE { 86 | self.state.permute(); 87 | self.squeeze_index = 0; 88 | self.absorb_index = 0; 89 | } 90 | 91 | let chunk_size = usize::min(RATE - self.squeeze_index, length); 92 | output.extend_from_slice( 93 | &self.state.as_mut()[self.squeeze_index..self.squeeze_index + chunk_size], 94 | ); 95 | self.squeeze_index += chunk_size; 96 | length -= chunk_size; 97 | } 98 | output 99 | } 100 | } 101 | 102 | #[cfg(test)] 103 | mod tests { 104 | use super::*; 105 | use crate::duplex_sponge::DuplexSpongeInterface; 106 | use hex_literal::hex; 107 | 108 | #[test] 109 | fn test_associativity_of_absorb() { 110 | let expected_output = 111 | hex!("efc1c34f94c0d9cfe051561f8206543056ce660fd17834b2eeb9431a4c65bc77"); 112 | let tag = *b"absorb-associativity-domain-----absorb-associativity-domain-----"; 113 | 114 | // Absorb all at once 115 | let mut sponge1 = KeccakDuplexSponge::new(tag); 116 | sponge1.absorb(b"hello world"); 117 | let out1 = sponge1.squeeze(32); 118 | 119 | // Absorb in two parts 120 | let mut sponge2 = KeccakDuplexSponge::new(tag); 121 | sponge2.absorb(b"hello"); 122 | sponge2.absorb(b" world"); 123 | let out2 = sponge2.squeeze(32); 124 | 125 | assert_eq!(out1, expected_output); 126 | assert_eq!(out2, expected_output); 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /tests/spec/custom_schnorr_protocol.rs: -------------------------------------------------------------------------------- 1 | use group::prime::PrimeGroup; 2 | use rand::{CryptoRng, Rng}; 3 | 4 | use crate::spec::random::SRandom; 5 | use sigma_proofs::errors::Error; 6 | use sigma_proofs::linear_relation::{CanonicalLinearRelation, LinearRelation}; 7 | use sigma_proofs::traits::{SigmaProtocol, SigmaProtocolSimulator}; 8 | 9 | pub struct DeterministicSchnorrProof(pub CanonicalLinearRelation); 10 | 11 | impl TryFrom> for DeterministicSchnorrProof { 12 | type Error = Error; 13 | 14 | fn try_from(linear_relation: LinearRelation) -> Result { 15 | let relation = CanonicalLinearRelation::try_from(&linear_relation)?; 16 | Ok(Self(relation)) 17 | } 18 | } 19 | 20 | impl From> for DeterministicSchnorrProof { 21 | fn from(canonical_relation: CanonicalLinearRelation) -> Self { 22 | Self(canonical_relation) 23 | } 24 | } 25 | 26 | impl SigmaProtocol for DeterministicSchnorrProof { 27 | type Commitment = as SigmaProtocol>::Commitment; 28 | type ProverState = as SigmaProtocol>::ProverState; 29 | type Response = as SigmaProtocol>::Response; 30 | type Witness = as SigmaProtocol>::Witness; 31 | type Challenge = as SigmaProtocol>::Challenge; 32 | 33 | fn prover_commit( 34 | &self, 35 | witness: &Self::Witness, 36 | rng: &mut (impl Rng + CryptoRng), 37 | ) -> Result<(Self::Commitment, Self::ProverState), Error> { 38 | let mut nonces: Vec = Vec::new(); 39 | for _i in 0..self.0.num_scalars { 40 | nonces.push(::random_scalar_elt(rng)); 41 | } 42 | let commitment = self.0.evaluate(&nonces); 43 | let prover_state = (nonces.to_vec(), witness.to_vec()); 44 | Ok((commitment, prover_state)) 45 | } 46 | 47 | fn prover_response( 48 | &self, 49 | state: Self::ProverState, 50 | challenge: &Self::Challenge, 51 | ) -> Result { 52 | self.0.prover_response(state, challenge) 53 | } 54 | 55 | fn verifier( 56 | &self, 57 | commitment: &Self::Commitment, 58 | challenge: &Self::Challenge, 59 | response: &Self::Response, 60 | ) -> Result<(), Error> { 61 | self.0.verifier(commitment, challenge, response) 62 | } 63 | 64 | fn serialize_commitment(&self, commitment: &Self::Commitment) -> Vec { 65 | self.0.serialize_commitment(commitment) 66 | } 67 | 68 | fn serialize_challenge(&self, challenge: &Self::Challenge) -> Vec { 69 | self.0.serialize_challenge(challenge) 70 | } 71 | 72 | fn serialize_response(&self, response: &Self::Response) -> Vec { 73 | self.0.serialize_response(response) 74 | } 75 | 76 | fn deserialize_commitment(&self, data: &[u8]) -> Result { 77 | self.0.deserialize_commitment(data) 78 | } 79 | 80 | fn deserialize_challenge(&self, data: &[u8]) -> Result { 81 | self.0.deserialize_challenge(data) 82 | } 83 | 84 | fn deserialize_response(&self, data: &[u8]) -> Result { 85 | self.0.deserialize_response(data) 86 | } 87 | fn instance_label(&self) -> impl AsRef<[u8]> { 88 | self.0.instance_label() 89 | } 90 | 91 | fn protocol_identifier(&self) -> [u8; 64] { 92 | self.0.protocol_identifier() 93 | } 94 | } 95 | 96 | impl SigmaProtocolSimulator for DeterministicSchnorrProof { 97 | fn simulate_response(&self, rng: &mut R) -> Self::Response { 98 | self.0.simulate_response(rng) 99 | } 100 | 101 | fn simulate_transcript( 102 | &self, 103 | rng: &mut R, 104 | ) -> Result<(Self::Commitment, Self::Challenge, Self::Response), Error> { 105 | self.0.simulate_transcript(rng) 106 | } 107 | 108 | fn simulate_commitment( 109 | &self, 110 | challenge: &Self::Challenge, 111 | response: &Self::Response, 112 | ) -> Result { 113 | self.0.simulate_commitment(challenge, response) 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /src/group/serialization.rs: -------------------------------------------------------------------------------- 1 | //! Serialization and deserialization utilities for group elements and scalars. 2 | //! 3 | //! This module provides functions to convert group elements and scalars to and from 4 | //! byte representations using canonical encodings. 5 | 6 | use alloc::vec::Vec; 7 | use ff::PrimeField; 8 | use group::prime::PrimeGroup; 9 | 10 | /// Get the serialized length of a group element in bytes. 11 | /// 12 | /// # Returns 13 | /// The number of bytes required to serialize a group element. 14 | pub fn group_elt_serialized_len() -> usize { 15 | G::Repr::default().as_ref().len() 16 | } 17 | 18 | /// Serialize a slice of group elements into a byte vector. 19 | /// 20 | /// # Parameters 21 | /// - `elements`: A slice of group elements to serialize. 22 | /// 23 | /// # Returns 24 | /// - A `Vec` containing the concatenated canonical compressed byte representations. 25 | pub fn serialize_elements<'a, G: PrimeGroup>(elements: impl IntoIterator) -> Vec { 26 | let mut bytes = Vec::new(); 27 | for element in elements { 28 | bytes.extend_from_slice(element.to_bytes().as_ref()); 29 | } 30 | bytes 31 | } 32 | 33 | /// Deserialize a byte slice into a vector of group elements. 34 | /// 35 | /// # Parameters 36 | /// - `data`: A byte slice containing the serialized representations of group elements. 37 | /// - `count`: The number of elements to deserialize. 38 | /// 39 | /// # Returns 40 | /// - `Some(Vec)`: The deserialized group elements if all are valid. 41 | /// - `None`: If the byte slice length is incorrect or any element is invalid. 42 | pub fn deserialize_elements(data: &[u8], count: usize) -> Option> { 43 | let element_len = group_elt_serialized_len::(); 44 | let expected_len = count * element_len; 45 | 46 | if data.len() < expected_len { 47 | return None; 48 | } 49 | 50 | let mut elements = Vec::with_capacity(count); 51 | for i in 0..count { 52 | let start = i * element_len; 53 | let end = start + element_len; 54 | let slice = &data[start..end]; 55 | 56 | let mut repr = G::Repr::default(); 57 | repr.as_mut().copy_from_slice(slice); 58 | let element = G::from_bytes(&repr).into(); 59 | let element: Option = element; 60 | elements.push(element?); 61 | } 62 | 63 | Some(elements) 64 | } 65 | 66 | /// Serialize a slice of scalar field elements into a byte vector. 67 | /// 68 | /// This method internally relies on the underlying group serialization function, 69 | /// and is meant to match the Internet Draft for point compression. 70 | /// 71 | /// # Parameters 72 | /// - `scalars`: A slice of scalar field elements to serialize. 73 | /// 74 | /// # Returns 75 | /// - A `Vec` containing the scalar bytes in big-endian order. 76 | pub fn serialize_scalars(scalars: &[G::Scalar]) -> Vec { 77 | let mut bytes = Vec::new(); 78 | for scalar in scalars { 79 | let mut scalar_bytes = scalar.to_repr().as_ref().to_vec(); 80 | scalar_bytes.reverse(); 81 | bytes.extend_from_slice(&scalar_bytes); 82 | } 83 | bytes 84 | } 85 | 86 | /// Deserialize a byte slice into a vector of scalar field elements. 87 | /// 88 | /// # Parameters 89 | /// - `data`: A byte slice containing the serialized scalars in little-endian order. 90 | /// - `count`: The number of scalars to deserialize. 91 | /// 92 | /// # Returns 93 | /// - `Some(Vec)`: The deserialized scalars if all are valid. 94 | /// - `None`: If the byte slice length is incorrect or any scalar is invalid. 95 | pub fn deserialize_scalars(data: &[u8], count: usize) -> Option> { 96 | #[allow(clippy::manual_div_ceil)] 97 | let scalar_len = (G::Scalar::NUM_BITS as usize + 7) / 8; 98 | let expected_len = count * scalar_len; 99 | 100 | if data.len() < expected_len { 101 | return None; 102 | } 103 | 104 | let mut scalars = Vec::with_capacity(count); 105 | for i in 0..count { 106 | let start = i * scalar_len; 107 | let end = start + scalar_len; 108 | let slice = &data[start..end]; 109 | 110 | let mut repr = ::Repr::default(); 111 | repr.as_mut().copy_from_slice(slice); 112 | repr.as_mut().reverse(); 113 | 114 | let scalar = G::Scalar::from_repr(repr).into(); 115 | let scalar: Option = scalar; 116 | scalars.push(scalar?); 117 | } 118 | 119 | Some(scalars) 120 | } 121 | -------------------------------------------------------------------------------- /tests/test_relations.rs: -------------------------------------------------------------------------------- 1 | use ff::Field; 2 | 3 | use sigma_proofs::codec::Shake128DuplexSponge; 4 | use sigma_proofs::linear_relation::{CanonicalLinearRelation, LinearRelation}; 5 | use sigma_proofs::Nizk; 6 | 7 | mod relations; 8 | use relations::*; 9 | 10 | #[test] 11 | #[allow(non_snake_case)] 12 | fn test_cmz_wallet_with_fee() { 13 | use group::Group; 14 | type G = bls12_381::G1Projective; 15 | 16 | let mut rng = rand::thread_rng(); 17 | 18 | // This version should fail with InvalidInstanceWitnessPair 19 | // because it uses a scalar constant directly in the equation 20 | let P_W = G::random(&mut rng); 21 | let A = G::random(&mut rng); 22 | 23 | let n_balance = ::Scalar::random(&mut rng); 24 | let i_price = ::Scalar::random(&mut rng); 25 | let _fee = ::Scalar::from(5u64); 26 | let z_w_balance = ::Scalar::random(&mut rng); 27 | 28 | let mut relation = LinearRelation::::new(); 29 | 30 | let var_n_balance = relation.allocate_scalar(); 31 | let var_i_price = relation.allocate_scalar(); 32 | let var_z_w_balance = relation.allocate_scalar(); 33 | 34 | let var_P_W = relation.allocate_element(); 35 | let var_A = relation.allocate_element(); 36 | 37 | // This equation has a scalar constant (fee) which causes the error 38 | let _var_C = relation.allocate_eq( 39 | (var_n_balance + var_i_price + ::Scalar::from(5)) * var_P_W 40 | + var_z_w_balance * var_A, 41 | ); 42 | 43 | relation.set_elements([(var_P_W, P_W), (var_A, A)]); 44 | relation 45 | .compute_image(&[n_balance, i_price, z_w_balance]) 46 | .unwrap(); 47 | 48 | // Try to convert to CanonicalLinearRelation - this should fail 49 | let nizk = relation.into_nizk(b"session_identifier").unwrap(); 50 | let result = nizk.prove_batchable(&vec![n_balance, i_price, z_w_balance], &mut rng); 51 | assert!(result.is_ok()); 52 | let proof = result.unwrap(); 53 | let verify_result = nizk.verify_batchable(&proof); 54 | assert!(verify_result.is_ok()); 55 | } 56 | 57 | /// Generic helper function to test both relation correctness and NIZK functionality 58 | #[test] 59 | fn test_relations() { 60 | type G = bls12_381::G1Projective; 61 | 62 | let instance_generators: Vec<(_, &'static dyn Fn(&mut _) -> _)> = vec![ 63 | ("dlog", &discrete_logarithm), 64 | ("shifted_dlog", &shifted_dlog), 65 | ("dleq", &dleq), 66 | ("shifted_dleq", &shifted_dleq), 67 | ("pedersen_commitment", &pedersen_commitment), 68 | ("twisted_pedersen_commitment", &twisted_pedersen_commitment), 69 | ("pedersen_commitment_dleq", &pedersen_commitment_equality), 70 | ("bbs_blind_commitment", &bbs_blind_commitment), 71 | ("test_range", &test_range), 72 | ("weird_linear_combination", &weird_linear_combination), 73 | ("simple_subtractions", &simple_subtractions), 74 | ("subtractions_with_shift", &subtractions_with_shift), 75 | ("cmz_wallet_spend_relation", &cmz_wallet_spend_relation), 76 | ("nested_affine_relation", &nested_affine_relation), 77 | ("elgamal_public_subtract", &elgamal_subtraction), 78 | ]; 79 | 80 | for (relation_name, relation_sampler) in instance_generators.iter() { 81 | let mut rng = rand::thread_rng(); 82 | let (canonical_relation, witness) = relation_sampler(&mut rng); 83 | 84 | // Test the NIZK protocol 85 | let domain_sep = format!("test-fiat-shamir-{relation_name}") 86 | .as_bytes() 87 | .to_vec(); 88 | let nizk = Nizk::, Shake128DuplexSponge>::new( 89 | &domain_sep, 90 | canonical_relation, 91 | ); 92 | 93 | // Test both proof types 94 | let proof_batchable = nizk 95 | .prove_batchable(&witness, &mut rng) 96 | .unwrap_or_else(|_| panic!("Failed to create batchable proof for {relation_name}")); 97 | let proof_compact = nizk 98 | .prove_compact(&witness, &mut rng) 99 | .unwrap_or_else(|_| panic!("Failed to create compact proof for {relation_name}")); 100 | 101 | // Verify both proof types 102 | assert!( 103 | nizk.verify_batchable(&proof_batchable).is_ok(), 104 | "Batchable proof verification failed for {relation_name}" 105 | ); 106 | assert!( 107 | nizk.verify_compact(&proof_compact).is_ok(), 108 | "Compact proof verification failed for {relation_name}" 109 | ); 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /tests/test_composition.rs: -------------------------------------------------------------------------------- 1 | use curve25519_dalek::ristretto::RistrettoPoint as G; 2 | use group::Group; 3 | 4 | use sigma_proofs::composition::{ComposedRelation, ComposedWitness}; 5 | 6 | mod relations; 7 | pub use relations::*; 8 | 9 | #[allow(non_snake_case)] 10 | #[test] 11 | fn test_composition_example() { 12 | // Composition and verification of proof for the following protocol : 13 | // 14 | // And( 15 | // Or( dleq, pedersen_commitment ), 16 | // Simple( discrete_logarithm ), 17 | // And( pedersen_commitment_dleq, bbs_blind_commitment_computation ) 18 | // ) 19 | let domain_sep = b"hello world"; 20 | 21 | // definitions of the underlying protocols 22 | let mut rng = rand::thread_rng(); 23 | let (relation1, witness1) = dleq(&mut rng); 24 | let (relation2, witness2) = pedersen_commitment(&mut rng); 25 | let (relation3, witness3) = discrete_logarithm(&mut rng); 26 | let (relation4, witness4) = pedersen_commitment(&mut rng); 27 | let (relation5, witness5) = bbs_blind_commitment(&mut rng); 28 | 29 | let wrong_witness2 = (0..witness2.len()) 30 | .map(|_| ::Scalar::random(&mut rng)) 31 | .collect::>(); 32 | // second layer protocol definitions 33 | let or_protocol1 = ComposedRelation::::or([relation1, relation2]); 34 | let or_witness1 = ComposedWitness::or([witness1, wrong_witness2]); 35 | 36 | let and_protocol1 = ComposedRelation::and([relation4, relation5]); 37 | let and_witness1 = ComposedWitness::and([witness4, witness5]); 38 | 39 | // definition of the final protocol 40 | let instance = ComposedRelation::and([or_protocol1, relation3.into(), and_protocol1]); 41 | let witness = ComposedWitness::and([or_witness1, witness3.into(), and_witness1]); 42 | 43 | let nizk = instance.into_nizk(domain_sep); 44 | 45 | // Batchable and compact proofs 46 | let proof_batchable_bytes = nizk.prove_batchable(&witness, &mut rng).unwrap(); 47 | let proof_compact_bytes = nizk.prove_compact(&witness, &mut rng).unwrap(); 48 | // Verify proofs 49 | assert!(nizk.verify_batchable(&proof_batchable_bytes).is_ok()); 50 | assert!(nizk.verify_compact(&proof_compact_bytes).is_ok()); 51 | } 52 | 53 | #[allow(non_snake_case)] 54 | #[test] 55 | fn test_or_one_true() { 56 | // Test composition of a basic OR protocol, with one of the two witnesses being valid. 57 | 58 | // definitions of the underlying protocols 59 | let mut rng = rand::thread_rng(); 60 | let (relation1, witness1) = dleq::(&mut rng); 61 | let (relation2, witness2) = dleq::(&mut rng); 62 | 63 | let wrong_witness1 = (0..witness1.len()) 64 | .map(|_| ::Scalar::random(&mut rng)) 65 | .collect::>(); 66 | let wrong_witness2 = (0..witness2.len()) 67 | .map(|_| ::Scalar::random(&mut rng)) 68 | .collect::>(); 69 | 70 | let or_protocol = ComposedRelation::or([relation1, relation2]); 71 | 72 | // Construct two witnesses to the protocol, the first and then the second as the true branch. 73 | let witness_or_1 = ComposedWitness::or([witness1, wrong_witness2]); 74 | let witness_or_2 = ComposedWitness::or([wrong_witness1, witness2]); 75 | 76 | let nizk = or_protocol.into_nizk(b"test_or_one_true"); 77 | 78 | for witness in [witness_or_1, witness_or_2] { 79 | // Batchable and compact proofs 80 | let proof_batchable_bytes = nizk.prove_batchable(&witness, &mut rng).unwrap(); 81 | let proof_compact_bytes = nizk.prove_compact(&witness, &mut rng).unwrap(); 82 | // Verify proofs 83 | assert!(nizk.verify_batchable(&proof_batchable_bytes).is_ok()); 84 | assert!(nizk.verify_compact(&proof_compact_bytes).is_ok()); 85 | } 86 | } 87 | 88 | #[allow(non_snake_case)] 89 | #[test] 90 | fn test_or_both_true() { 91 | // Test composition of a basic OR protocol, with both of the two witnesses being valid. 92 | 93 | // definitions of the underlying protocols 94 | let mut rng = rand::thread_rng(); 95 | let (relation1, witness1) = dleq::(&mut rng); 96 | let (relation2, witness2) = dleq::(&mut rng); 97 | 98 | let or_protocol = ComposedRelation::or([relation1, relation2]); 99 | 100 | let witness = ComposedWitness::or([witness1, witness2]); 101 | let nizk = or_protocol.into_nizk(b"test_or_both_true"); 102 | 103 | // Batchable and compact proofs 104 | let proof_batchable_bytes = nizk.prove_batchable(&witness, &mut rng).unwrap(); 105 | let proof_compact_bytes = nizk.prove_compact(&witness, &mut rng).unwrap(); 106 | // Verify proofs 107 | assert!(nizk.verify_batchable(&proof_batchable_bytes).is_ok()); 108 | assert!(nizk.verify_compact(&proof_compact_bytes).is_ok()); 109 | } 110 | -------------------------------------------------------------------------------- /src/linear_relation/convert.rs: -------------------------------------------------------------------------------- 1 | use alloc::vec; 2 | use alloc::vec::Vec; 3 | use ff::Field; 4 | use group::Group; 5 | 6 | use super::{GroupVar, ScalarTerm, ScalarVar, Sum, Term, Weighted}; 7 | 8 | impl From> for ScalarTerm { 9 | fn from(value: ScalarVar) -> Self { 10 | Self::Var(value) 11 | } 12 | } 13 | 14 | impl From> for Weighted, G::Scalar> { 15 | fn from(value: ScalarVar) -> Self { 16 | ScalarTerm::from(value).into() 17 | } 18 | } 19 | 20 | impl From, G::Scalar>> for Weighted, G::Scalar> { 21 | fn from(value: Weighted, G::Scalar>) -> Self { 22 | Self { 23 | term: value.term.into(), 24 | weight: value.weight, 25 | } 26 | } 27 | } 28 | 29 | // NOTE: Rust does not accept an impl over From 30 | impl, G: Group> From for Weighted, G::Scalar> { 31 | fn from(value: T) -> Self { 32 | Self { 33 | term: ScalarTerm::Unit, 34 | weight: value.into(), 35 | } 36 | } 37 | } 38 | 39 | impl From<(ScalarVar, GroupVar)> for Term { 40 | fn from((scalar, elem): (ScalarVar, GroupVar)) -> Self { 41 | Self { 42 | scalar: scalar.into(), 43 | elem, 44 | } 45 | } 46 | } 47 | 48 | impl From<(ScalarTerm, GroupVar)> for Term { 49 | fn from((scalar, elem): (ScalarTerm, GroupVar)) -> Self { 50 | Self { scalar, elem } 51 | } 52 | } 53 | 54 | impl From> for Term { 55 | fn from(value: GroupVar) -> Self { 56 | Term { 57 | scalar: ScalarTerm::Unit, 58 | elem: value, 59 | } 60 | } 61 | } 62 | 63 | impl From<(ScalarVar, GroupVar)> for Weighted, G::Scalar> { 64 | fn from(pair: (ScalarVar, GroupVar)) -> Self { 65 | Term::from(pair).into() 66 | } 67 | } 68 | 69 | impl From<(ScalarTerm, GroupVar)> for Weighted, G::Scalar> { 70 | fn from(pair: (ScalarTerm, GroupVar)) -> Self { 71 | Term::from(pair).into() 72 | } 73 | } 74 | 75 | impl From> for Weighted, G::Scalar> { 76 | fn from(value: GroupVar) -> Self { 77 | Term::from(value).into() 78 | } 79 | } 80 | 81 | impl From, G::Scalar>> for Weighted, G::Scalar> { 82 | fn from(value: Weighted, G::Scalar>) -> Self { 83 | Weighted { 84 | term: value.term.into(), 85 | weight: value.weight, 86 | } 87 | } 88 | } 89 | 90 | impl From for Weighted { 91 | fn from(term: T) -> Self { 92 | Self { 93 | term, 94 | weight: F::ONE, 95 | } 96 | } 97 | } 98 | 99 | // NOTE: This is implemented directly for each of the key types to avoid collision with the blanket 100 | // Into impl provided by the standard library. 101 | macro_rules! impl_from_for_sum { 102 | ($($type:ty),+) => { 103 | $( 104 | impl> From for Sum<$type> { 105 | fn from(value: T) -> Self { 106 | Sum(vec![value.into()]) 107 | } 108 | } 109 | 110 | impl> From> for Sum<$type> { 111 | fn from(terms: Vec) -> Self { 112 | Self::from_iter(terms) 113 | } 114 | } 115 | 116 | impl, const N: usize> From<[T; N]> for Sum<$type> { 117 | fn from(terms: [T; N]) -> Self { 118 | Self::from_iter(terms) 119 | } 120 | } 121 | 122 | impl> FromIterator for Sum<$type> { 123 | fn from_iter>(iter: I) -> Self { 124 | Self(iter.into_iter().map(|x| x.into()).collect()) 125 | } 126 | } 127 | )+ 128 | }; 129 | } 130 | 131 | impl_from_for_sum!( 132 | ScalarVar, 133 | GroupVar, 134 | Term, 135 | Weighted, G::Scalar>, 136 | Weighted, G::Scalar>, 137 | Weighted, G::Scalar> 138 | ); 139 | 140 | impl From> for Sum> { 141 | fn from(sum: Sum) -> Self { 142 | Self(sum.0.into_iter().map(|x| x.into()).collect()) 143 | } 144 | } 145 | 146 | // Manual implementation for ScalarTerm sum conversion 147 | impl From> for Sum, G::Scalar>> { 148 | fn from(value: ScalarTerm) -> Self { 149 | Sum(vec![value.into()]) 150 | } 151 | } 152 | 153 | impl From, G::Scalar>>> for Sum, G::Scalar>> { 154 | fn from(sum: Sum, G::Scalar>>) -> Self { 155 | let sum = sum.0.into_iter().map(|x| x.into()).collect::>(); 156 | Self(sum) 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /src/codec.rs: -------------------------------------------------------------------------------- 1 | //! Encoding and decoding utilities for Fiat-Shamir and group operations. 2 | 3 | use crate::duplex_sponge::DuplexSpongeInterface; 4 | use crate::duplex_sponge::{keccak::KeccakDuplexSponge, shake::ShakeDuplexSponge}; 5 | use alloc::vec; 6 | use ff::PrimeField; 7 | use group::prime::PrimeGroup; 8 | use num_bigint::BigUint; 9 | use num_traits::identities::One; 10 | 11 | /// A trait defining the behavior of a domain-separated codec hashing, which is typically used for [`crate::traits::SigmaProtocol`]s. 12 | /// 13 | /// A domain-separated hashing codec is a codec, identified by a domain, which is incremented with successive messages ("absorb"). The codec can then output a bit stream of any length, which is typically used to generate a challenge unique to the given codec ("squeeze"). (See Sponge Construction). 14 | /// 15 | /// The output is deterministic for a given set of input. Thus, both Prover and Verifier can generate the codec on their sides and ensure the same inputs have been used in both side of the protocol. 16 | /// 17 | /// ## Minimal Implementation 18 | /// Types implementing [`Codec`] must define: 19 | /// - `new` 20 | /// - `prover_message` 21 | /// - `verifier_challenge` 22 | pub trait Codec { 23 | type Challenge; 24 | 25 | /// Generates an empty codec that can be identified by a domain separator. 26 | fn new( 27 | protocol_identifier: &[u8; 64], 28 | session_identifier: &[u8], 29 | instance_label: &[u8], 30 | ) -> Self; 31 | 32 | /// Allows for precomputed initialization of the codec with a specific IV. 33 | fn from_iv(iv: [u8; 64]) -> Self; 34 | 35 | /// Absorbs data into the codec. 36 | fn prover_message(&mut self, data: &[u8]); 37 | 38 | /// Produces a scalar that can be used as a challenge from the codec. 39 | fn verifier_challenge(&mut self) -> Self::Challenge; 40 | } 41 | 42 | fn cardinal() -> BigUint { 43 | let bytes = (F::ZERO - F::ONE).to_repr(); 44 | BigUint::from_bytes_le(bytes.as_ref()) + BigUint::one() 45 | } 46 | 47 | /// A byte-level Schnorr codec that works with any duplex sponge. 48 | /// 49 | /// This codec is generic over both the group `G` and the hash function `H`. 50 | /// It can be used with different duplex sponge implementations. 51 | #[derive(Clone)] 52 | pub struct ByteSchnorrCodec 53 | where 54 | G: PrimeGroup, 55 | H: DuplexSpongeInterface, 56 | { 57 | hasher: H, 58 | _marker: core::marker::PhantomData, 59 | } 60 | 61 | const WORD_SIZE: usize = 4; 62 | 63 | fn length_to_bytes(x: usize) -> [u8; WORD_SIZE] { 64 | (x as u32).to_be_bytes() 65 | } 66 | 67 | /// Compute the initialization vector (IV) for a protocol instance. 68 | /// 69 | /// This function computes a deterministic IV from the protocol identifier, 70 | /// session identifier, and instance label using the specified duplex sponge. 71 | pub fn compute_iv( 72 | protocol_id: &[u8; 64], 73 | session_id: &[u8], 74 | instance_label: &[u8], 75 | ) -> [u8; 64] { 76 | let mut tmp = H::new([0u8; 64]); 77 | tmp.absorb(protocol_id); 78 | tmp.absorb(&length_to_bytes(session_id.len())); 79 | tmp.absorb(session_id); 80 | tmp.absorb(&length_to_bytes(instance_label.len())); 81 | tmp.absorb(instance_label); 82 | tmp.squeeze(64).try_into().unwrap() 83 | } 84 | 85 | impl Codec for ByteSchnorrCodec 86 | where 87 | G: PrimeGroup, 88 | H: DuplexSpongeInterface, 89 | { 90 | type Challenge = G::Scalar; 91 | 92 | fn new(protocol_id: &[u8; 64], session_id: &[u8], instance_label: &[u8]) -> Self { 93 | let mut hasher = H::new(*protocol_id); 94 | hasher.absorb(&length_to_bytes(session_id.len())); 95 | hasher.absorb(session_id); 96 | hasher.absorb(&length_to_bytes(instance_label.len())); 97 | hasher.absorb(instance_label); 98 | Self { 99 | hasher, 100 | _marker: core::marker::PhantomData, 101 | } 102 | } 103 | 104 | fn from_iv(iv: [u8; 64]) -> Self { 105 | Self { 106 | hasher: H::new(iv), 107 | _marker: core::marker::PhantomData, 108 | } 109 | } 110 | 111 | fn prover_message(&mut self, data: &[u8]) { 112 | self.hasher.absorb(data); 113 | } 114 | 115 | fn verifier_challenge(&mut self) -> Self::Challenge { 116 | #[allow(clippy::manual_div_ceil)] 117 | let scalar_byte_length = (G::Scalar::NUM_BITS as usize + 7) / 8; 118 | 119 | let uniform_bytes = self.hasher.squeeze(scalar_byte_length + 16); 120 | let scalar = BigUint::from_bytes_be(&uniform_bytes); 121 | let reduced = scalar % cardinal::(); 122 | 123 | let mut bytes = vec![0u8; scalar_byte_length]; 124 | let reduced_bytes = reduced.to_bytes_be(); 125 | let start = bytes.len() - reduced_bytes.len(); 126 | bytes[start..].copy_from_slice(&reduced_bytes); 127 | bytes.reverse(); 128 | 129 | let mut repr = ::Repr::default(); 130 | repr.as_mut().copy_from_slice(&bytes); 131 | 132 | ::from_repr(repr).expect("Error") 133 | } 134 | } 135 | 136 | /// Type alias for a Keccak-based ByteSchnorrCodec. 137 | /// This is the codec used for matching test vectors from Sage. 138 | pub type KeccakByteSchnorrCodec = ByteSchnorrCodec; 139 | 140 | /// Type alias for a SHAKE-based ByteSchnorrCodec. 141 | pub type Shake128DuplexSponge = ByteSchnorrCodec; 142 | -------------------------------------------------------------------------------- /tests/spec_vectors.rs: -------------------------------------------------------------------------------- 1 | use bls12_381::G1Projective as G; 2 | use hex::FromHex; 3 | use json::JsonValue; 4 | use std::collections::HashMap; 5 | 6 | use sigma_proofs::codec::KeccakByteSchnorrCodec; 7 | use sigma_proofs::linear_relation::CanonicalLinearRelation; 8 | use sigma_proofs::Nizk; 9 | 10 | mod spec; 11 | use spec::{custom_schnorr_protocol::DeterministicSchnorrProof, rng::TestDRNG}; 12 | 13 | type SchnorrNizk = Nizk, KeccakByteSchnorrCodec>; 14 | 15 | #[derive(Debug)] 16 | struct TestVector { 17 | ciphersuite: String, 18 | session_id: Vec, 19 | statement: Vec, 20 | witness: Vec, 21 | proof: Vec, 22 | } 23 | 24 | #[allow(clippy::type_complexity)] 25 | #[allow(non_snake_case)] 26 | #[test] 27 | fn test_spec_testvectors() { 28 | let proof_generation_rng_seed = b"proof_generation_seed"; 29 | let vectors = extract_vectors_new().unwrap(); 30 | 31 | // Define supported ciphersuites 32 | let mut supported_ciphersuites = HashMap::new(); 33 | supported_ciphersuites.insert( 34 | "sigma/OWKeccak1600+Bls12381".to_string(), 35 | "BLS12-381 with Keccak-based sponge", 36 | ); 37 | 38 | // Order of test names to match JSON vector order 39 | let test_names = [ 40 | "bbs_blind_commitment_computation", 41 | "discrete_logarithm", 42 | "dleq", 43 | "pedersen_commitment", 44 | "pedersen_commitment_dleq", 45 | ]; 46 | 47 | for test_name in test_names.iter() { 48 | let vector = &vectors[*test_name]; 49 | 50 | // Verify the ciphersuite is supported 51 | assert!( 52 | supported_ciphersuites.contains_key(&vector.ciphersuite), 53 | "Unsupported ciphersuite '{}' in test vector {test_name}", 54 | vector.ciphersuite 55 | ); 56 | 57 | // Parse the statement from the test vector 58 | let parsed_instance = CanonicalLinearRelation::::from_label(&vector.statement) 59 | .expect("Failed to parse statement"); 60 | 61 | // Decode the witness from the test vector 62 | let witness = sigma_proofs::group::serialization::deserialize_scalars::( 63 | &vector.witness, 64 | parsed_instance.num_scalars, 65 | ) 66 | .expect("Failed to deserialize witness"); 67 | 68 | // Verify the parsed instance can be re-serialized to the same label 69 | assert_eq!( 70 | parsed_instance.label(), 71 | vector.statement, 72 | "parsed statement doesn't match original for {test_name}" 73 | ); 74 | 75 | // Create NIZK with the session_id from the test vector 76 | let protocol = DeterministicSchnorrProof::from(parsed_instance.clone()); 77 | let nizk = SchnorrNizk::new(&vector.session_id, protocol); 78 | 79 | // Verify that the computed IV matches the test vector IV 80 | // Ensure the provided test vector proof verifies. 81 | assert!( 82 | nizk.verify_batchable(&vector.proof).is_ok(), 83 | "Fiat-Shamir Schnorr proof from vectors did not verify for {test_name}" 84 | ); 85 | 86 | // Generate proof with the proof generation RNG 87 | let mut proof_rng = TestDRNG::new(proof_generation_rng_seed); 88 | let proof_bytes = nizk.prove_batchable(&witness, &mut proof_rng).unwrap(); 89 | 90 | // Verify the proof matches 91 | assert_eq!( 92 | proof_bytes, vector.proof, 93 | "proof bytes for test vector {test_name} do not match" 94 | ); 95 | 96 | // Verify the proof is valid 97 | let verified = nizk.verify_batchable(&proof_bytes).is_ok(); 98 | assert!( 99 | verified, 100 | "Fiat-Shamir Schnorr proof verification failed for {test_name}" 101 | ); 102 | } 103 | } 104 | 105 | fn extract_vectors_new() -> Result, String> { 106 | use std::collections::HashMap; 107 | 108 | let content = include_str!("./spec/vectors/testSigmaProtocols.json"); 109 | let root: JsonValue = json::parse(content).map_err(|e| format!("JSON parsing error: {e}"))?; 110 | 111 | let mut vectors = HashMap::new(); 112 | 113 | for (name, obj) in root.entries() { 114 | let ciphersuite = obj["Ciphersuite"] 115 | .as_str() 116 | .ok_or_else(|| format!("Ciphersuite field not found for {name}"))? 117 | .to_string(); 118 | 119 | let session_id = Vec::from_hex( 120 | obj["SessionId"] 121 | .as_str() 122 | .ok_or_else(|| format!("SessionId field not found for {name}"))?, 123 | ) 124 | .map_err(|e| format!("Invalid hex in SessionId for {name}: {e}"))?; 125 | 126 | let statement = Vec::from_hex( 127 | obj["Statement"] 128 | .as_str() 129 | .ok_or_else(|| format!("Statement field not found for {name}"))?, 130 | ) 131 | .map_err(|e| format!("Invalid hex in Statement for {name}: {e}"))?; 132 | 133 | let witness = Vec::from_hex( 134 | obj["Witness"] 135 | .as_str() 136 | .ok_or_else(|| format!("Witness field not found for {name}"))?, 137 | ) 138 | .map_err(|e| format!("Invalid hex in Witness for {name}: {e}"))?; 139 | 140 | let proof = Vec::from_hex( 141 | obj["Batchable Proof"] 142 | .as_str() 143 | .ok_or_else(|| format!("Proof field not found for {name}"))?, 144 | ) 145 | .map_err(|e| format!("Invalid hex in Proof for {name}: {e}"))?; 146 | 147 | vectors.insert( 148 | name.to_string(), 149 | TestVector { 150 | ciphersuite, 151 | session_id, 152 | statement, 153 | witness, 154 | proof, 155 | }, 156 | ); 157 | } 158 | 159 | Ok(vectors) 160 | } 161 | -------------------------------------------------------------------------------- /src/traits.rs: -------------------------------------------------------------------------------- 1 | //! Generic interface for 3-message Sigma protocols. 2 | //! 3 | //! This module defines the [`SigmaProtocol`] and [`SigmaProtocolSimulator`] traits, 4 | //! used to describe interactive zero-knowledge proofs of knowledge, 5 | //! such as Schnorr proofs, that follow the 3-message Sigma protocol structure. 6 | 7 | use crate::errors::Error; 8 | use alloc::vec::Vec; 9 | #[cfg(feature = "std")] 10 | use rand::{CryptoRng, Rng}; 11 | #[cfg(not(feature = "std"))] 12 | use rand_core::{CryptoRng, RngCore as Rng}; 13 | 14 | /// A trait defining the behavior of a generic Sigma protocol. 15 | /// 16 | /// A Sigma protocol is a 3-message proof protocol where a prover can convince 17 | /// a verifier of knowledge of a witness for a given public statement 18 | /// without revealing the witness. 19 | /// 20 | /// ## Associated Types 21 | /// - `Commitment`: The prover's initial commitment. 22 | /// - `ProverState`: The prover's internal state needed to compute a response. 23 | /// - `Response`: The prover's response to a verifier's challenge. 24 | /// - `Witness`: The prover's secret knowledge. 25 | /// - `Challenge`: The verifier's challenge value. 26 | /// 27 | /// ## Minimal Implementation 28 | /// Types implementing [`SigmaProtocol`] must define: 29 | /// - `prover_commit` — Generates a commitment and internal state. 30 | /// - `prover_response` — Computes a response to a challenge. 31 | /// - `verifier` — Verifies a full transcript `(commitment, challenge, response)`. 32 | /// 33 | /// ## Serialization 34 | /// Implementors must also provide methods for serialization and deserialization 35 | /// of each component of the proof. 36 | /// Required methods: 37 | /// - `serialize_commitment` / `deserialize_commitment` 38 | /// - `serialize_challenge` / `deserialize_challenge` 39 | /// - `serialize_response` / `deserialize_response` 40 | /// 41 | /// These functions should encode/decode each component into/from a compact binary format. 42 | /// 43 | /// ## Identification 44 | /// To allow transcript hash binding and protocol distinction, 45 | /// implementors must provide: 46 | /// - `protocol_identifier` — A fixed byte identifier of the protocol. 47 | /// - `instance_label` — A label specific to the instance being proven. 48 | pub trait SigmaProtocol { 49 | type Commitment; 50 | type ProverState; 51 | type Response; 52 | type Witness; 53 | type Challenge; 54 | 55 | /// First step of the protocol. Given the witness and RNG, this generates: 56 | /// - A public commitment to send to the verifier. 57 | /// - The internal state to use when computing the response. 58 | fn prover_commit( 59 | &self, 60 | witness: &Self::Witness, 61 | rng: &mut (impl Rng + CryptoRng), 62 | ) -> Result<(Self::Commitment, Self::ProverState), Error>; 63 | 64 | /// Computes the prover's response to a challenge based on the prover state. 65 | fn prover_response( 66 | &self, 67 | state: Self::ProverState, 68 | challenge: &Self::Challenge, 69 | ) -> Result; 70 | 71 | /// Final step of the protocol: checks that the commitment, challenge, and response form a valid transcript. 72 | /// 73 | /// Returns: 74 | /// - `Ok(())` if the transcript is valid. 75 | /// - `Err(())` otherwise. 76 | fn verifier( 77 | &self, 78 | commitment: &Self::Commitment, 79 | challenge: &Self::Challenge, 80 | response: &Self::Response, 81 | ) -> Result<(), Error>; 82 | 83 | /// Serializes a commitment to bytes. 84 | fn serialize_commitment(&self, commitment: &Self::Commitment) -> Vec; 85 | 86 | /// Serializes a challenge to bytes. 87 | fn serialize_challenge(&self, challenge: &Self::Challenge) -> Vec; 88 | 89 | /// Serializes a response to bytes. 90 | fn serialize_response(&self, response: &Self::Response) -> Vec; 91 | 92 | /// Deserializes a commitment from bytes. 93 | fn deserialize_commitment(&self, data: &[u8]) -> Result; 94 | 95 | /// Deserializes a challenge from bytes. 96 | fn deserialize_challenge(&self, data: &[u8]) -> Result; 97 | 98 | /// Deserializes a response from bytes. 99 | fn deserialize_response(&self, data: &[u8]) -> Result; 100 | 101 | fn protocol_identifier(&self) -> [u8; 64]; 102 | 103 | fn instance_label(&self) -> impl AsRef<[u8]>; 104 | } 105 | 106 | type Transcript

= ( 107 |

::Commitment, 108 |

::Challenge, 109 |

::Response, 110 | ); 111 | 112 | /// A trait defining the behavior of a Sigma protocol for which simulation of transcripts is necessary. 113 | /// 114 | /// Every Sigma protocol can be simulated, but in practice, this is primarily used 115 | /// for proving security properties (zero-knowledge, soundness, etc.). 116 | /// 117 | /// Some protocols (e.g. OR compositions) require simulation capabilities during actual proof generation. 118 | /// 119 | /// ## Minimal Implementation 120 | /// Types implementing [`SigmaProtocolSimulator`] must define: 121 | /// - `simulate_proof` 122 | /// - `simulate_transcript` 123 | pub trait SigmaProtocolSimulator: SigmaProtocol { 124 | /// Generates a random response (e.g. for simulation or OR composition). 125 | /// 126 | /// Typically used to simulate a proof without a witness. 127 | fn simulate_response(&self, rng: &mut R) -> Self::Response; 128 | 129 | /// Simulates a commitment for which ('commitment', 'challenge', 'response') is a valid transcript. 130 | /// 131 | /// This function allows to omit commitment in compact proofs of the type ('challenge', 'response'). 132 | fn simulate_commitment( 133 | &self, 134 | challenge: &Self::Challenge, 135 | response: &Self::Response, 136 | ) -> Result; 137 | 138 | /// Generates a full simulated proof transcript (commitment, challenge, response) 139 | /// without requiring knowledge of a witness. 140 | fn simulate_transcript( 141 | &self, 142 | rng: &mut R, 143 | ) -> Result, Error>; 144 | } 145 | -------------------------------------------------------------------------------- /tests/spec/vectors/testSigmaProtocols.json: -------------------------------------------------------------------------------- 1 | { 2 | "bbs_blind_commitment_computation": { 3 | "Batchable Proof": "8c7090509cb33c86ee2457d1ffab3111fa1a7c9fc2ee532add24838e6aeaa4c1ab5ab243db2d35607590c0a219c134bc1625cc8be1c746bcb3cb08fe97b908ed2cac87152d70d0e29c187e503f6fa95f71c0dbabde943e3ab63fa04c277bfde5f7234ffee03bb25a54ef5db94ba5ff1516c5c42bf0d38087815407c6df12d3344e12d77a79c57e9bdf7fc319245299cd492b513fb0fa97114e507509a532a52298871a491a3709a7e1aa414ee021b31d", 4 | "Ciphersuite": "sigma/OWKeccak1600+Bls12381", 5 | "Proof": "3c381afe83881263c3e51bcb6aad39409ac6c787a5f04c666edb2fc4f4b1d7815ab9da2fea14007309c0261a58806d1577e06bdabc64ac6f810d0c4d69cee7b05283de734a0e895db3e180073d7b17edffef459a72a210f16e019456b42fc51106dd4e3f83a0f45adf45ecf6afbf8cce3109d7d8f766a21c0318dac5f8459772516cf2a463d777c19d4ee83ffe57ed67fe621afbecd787a1fe246443098094be", 6 | "SessionId": "6262735f626c696e645f636f6d6d69746d656e745f636f6d7075746174696f6e", 7 | "Statement": "0100000004000000040000000000000000000000010000000100000002000000020000000300000003000000b537255188baffeccd66d810bc5952bd1f887b215a32c6028d439c77722007dcc67dc88addc8fc1419eeb2a337a22336a8662bc3cf5d295960ed2e6df20ffd5a4d05c8ed10d6f872a9286fda210db31acc9dd5a7e988f65d09575f3997b8cdfba048221961233304dddf1ee09939bb646b322cc5d4646c1b2ff90fca056ef29720be065104cd43b5f17d3c35f48dd2cfa2561cd091db0302192aca13657010c86a101ba7f616f53ff5f71ba69d84e5062ce45094e2deea115175c78fc49bab628c38bc769a15a9b38335c732155d2ae373f87c7a986966b5ca95a8902fb6c2971cd0a3eddc54beedcb17c33ce6f96ee2", 8 | "Witness": "62e3b38b436d705037a1954f1689674edbdef23bb7c0835f6a14320bf5668de8409945826e10a6fe36c3ce257246ae694efe54ae488badfd379e48c44ab2086f63d0453c8654db5feb745feeff5518e70aeb8224a27f660ef7b5d8ff1877fd2e14091668789158918aac10d2e3c1003a8a2aec035572ee03356d3045b9e38c3b" 9 | }, 10 | "discrete_logarithm": { 11 | "Batchable Proof": "a8ba164c1cd96e6629165e1979f86430bfa8faad9b56172d792757e42e98d69e08e40025496bed79556b602a8428c511602226780a679f788087bc14d549973670d6b22ecaf6c80d2922e3182e1a946c", 12 | "Ciphersuite": "sigma/OWKeccak1600+Bls12381", 13 | "Proof": "5c64d17c9ee9d22f6f0eeef742ae22122cf847a89e215409e4fdd3378353e3726dd0475b820382391bc7282ff9df0fecaa52675a63da863e891ad34d10ebd9fb", 14 | "SessionId": "64697363726574655f6c6f6761726974686d", 15 | "Statement": "010000000100000001000000000000000000000097f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bbb537255188baffeccd66d810bc5952bd1f887b215a32c6028d439c77722007dcc67dc88addc8fc1419eeb2a337a22336", 16 | "Witness": "2c7538adafe40d6f0f111f344e46f02c16c338d5be2b3abb620a8ee35d194350" 17 | }, 18 | "dleq": { 19 | "Batchable Proof": "a8ba164c1cd96e6629165e1979f86430bfa8faad9b56172d792757e42e98d69e08e40025496bed79556b602a8428c511b2028b870f588e8a374d45535bba6a6d97b9142a6c4eb3b9e22d9700aa08a3616a0d2969852447e1e790c520ec11997634e397f0a66aa7e28b1f76acaef2c11012a02b7d84699e0190f54f13ca71b592", 20 | "Ciphersuite": "sigma/OWKeccak1600+Bls12381", 21 | "Proof": "082745ee1fe233275708a14848d55c740f0261d8750827be85aafe0595333da028a582c331d8d224171246779566a2610d5448a01181cb2d0b3a645894e15220", 22 | "SessionId": "646c6571", 23 | "Statement": "02000000010000000100000000000000000000000300000001000000000000000200000097f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bba8662bc3cf5d295960ed2e6df20ffd5a4d05c8ed10d6f872a9286fda210db31acc9dd5a7e988f65d09575f3997b8cdfbb537255188baffeccd66d810bc5952bd1f887b215a32c6028d439c77722007dcc67dc88addc8fc1419eeb2a337a22336b4483f8d8655b8448a187ba1a98aa146145f862d5bb0c1f4561a14260cce8d55f4d43e3da1863fdf9dc4fdf859f28b9f", 24 | "Witness": "5c18c45b445e82400e1c9198e5929495504a21e6d3956329f56a09f9a0f1666f" 25 | }, 26 | "pedersen_commitment": { 27 | "Batchable Proof": "9899a74231316713e9527d401f333231316ad97062e9e37ef636ff536535029b24b89947e621867004ec5b0108cb82ac734e792c8e6612ba5bbb478a60043b105b3a6041f52fafe46492032576a0758564d1be052d918fca57222b2e918975229e5abcefcaa73cba5523c9f41fba2b76", 28 | "Ciphersuite": "sigma/OWKeccak1600+Bls12381", 29 | "Proof": "2bc3744f34c93a070722523a5b38ed7b60453e480b0d9238ed2cf071f00aab0f554e402fc538a16b3a02ca6bb5fa951b4f7d8f426eb3bbc707c3bb4c2a8e7983593412f80f8bd4e597bdb6a5a3a7122238e7df2951f55e27b96b98251001ea06", 30 | "SessionId": "706564657273656e5f636f6d6d69746d656e74", 31 | "Statement": "0100000002000000020000000000000000000000010000000100000097f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bbb537255188baffeccd66d810bc5952bd1f887b215a32c6028d439c77722007dcc67dc88addc8fc1419eeb2a337a223368dc806cea8183a52d50d9a525942c0310f0b34eadcce52a40d06e7057043544c62026c0ec24cea8b822ae68c6c33a2d5", 32 | "Witness": "5c18c45b445e82400e1c9198e5929495504a21e6d3956329f56a09f9a0f1666f38c3532866f2be8b864b5c3739af40703153b939716a80e6a47793c041267fbf" 33 | }, 34 | "pedersen_commitment_dleq": { 35 | "Batchable Proof": "a2a4fb6a89b811dfe9608638afb316fad66d1454641c2ef76fa3ffea578f98d5df92ef5f50029d2ebdbf51ef8721ed8cadcaabd57762ff7a03fe34460ab60013e16cce171ddb2e1855d25f180c6ad6b525b36a9d5e7f53cd7385abadf3b361a569593eef1c7d53c10177269b3022fa5fb15f383a5ce4e2ff97086b1c8b70b0760bfa367ebe37359f40bc801e50cf67f2b291a65560e34b323c2ce3934b607bdf", 36 | "Ciphersuite": "sigma/OWKeccak1600+Bls12381", 37 | "Proof": "0f836764c565624f9e7b8a9429d75235197bd5631a129836237d945f83e8d3103c0cd961d44c7905b116c799e3c58b8b319604c7cbd15dbfcad2ce4c711760992b500ccac95601be42fec4950fce30fd6d6df2a15aefd427f40048e6bf72da92", 38 | "SessionId": "706564657273656e5f636f6d6d69746d656e745f646c6571", 39 | "Statement": "02000000020000000200000000000000000000000100000001000000050000000200000000000000030000000100000004000000b537255188baffeccd66d810bc5952bd1f887b215a32c6028d439c77722007dcc67dc88addc8fc1419eeb2a337a22336a8662bc3cf5d295960ed2e6df20ffd5a4d05c8ed10d6f872a9286fda210db31acc9dd5a7e988f65d09575f3997b8cdfb82c9b34528286ef1c4a7dd8a3b02a9edfc9adc49d3034f0c90d7927ce1de8ccc7a80bf877c291faf90832fa7b5f86904a048221961233304dddf1ee09939bb646b322cc5d4646c1b2ff90fca056ef29720be065104cd43b5f17d3c35f48dd2cfa2561cd091db0302192aca13657010c86a101ba7f616f53ff5f71ba69d84e5062ce45094e2deea115175c78fc49bab629719cfef2f3c2cd0dfb3cf4b6cb3cb1504ab86f98c26d81192f4d73c5ce72118d3924b19119a64d7dc9aea75661c1488", 40 | "Witness": "409945826e10a6fe36c3ce257246ae694efe54ae488badfd379e48c44ab2086f63d0453c8654db5feb745feeff5518e70aeb8224a27f660ef7b5d8ff1877fd2e" 41 | } 42 | } -------------------------------------------------------------------------------- /src/group/msm.rs: -------------------------------------------------------------------------------- 1 | use alloc::vec; 2 | use alloc::vec::Vec; 3 | use ff::PrimeField; 4 | use group::prime::PrimeGroup; 5 | 6 | /// The result of this function is only approximately `ln(a)`. This is inherited from Zexe and libsnark. 7 | #[inline] 8 | const fn ln_without_floats(a: usize) -> usize { 9 | if a == 0 { 10 | 1 11 | } else { 12 | // log2(a) * ln(2), ensure minimum value of 1 13 | let result = (usize::BITS - (a - 1).leading_zeros()) as usize * 69 / 100; 14 | if result == 0 { 15 | 1 16 | } else { 17 | result 18 | } 19 | } 20 | } 21 | 22 | /// Trait for performing Multi-Scalar Multiplication (MSM). 23 | /// 24 | /// MSM computes the sum: 25 | /// ```text 26 | /// result = Σ (scalar[i] * point[i]) 27 | /// ``` 28 | /// Implementations can override this with optimized algorithms for specific groups, 29 | /// while a default naive implementation is provided for all [`PrimeGroup`] types. 30 | pub trait VariableMultiScalarMul { 31 | /// The scalar field type associated with the group. 32 | type Scalar; 33 | /// The group element (point) type. 34 | type Point; 35 | 36 | /// Computes the multi-scalar multiplication (MSM) over the provided scalars and points. 37 | /// 38 | /// # Parameters 39 | /// - `scalars`: Slice of scalar multipliers. 40 | /// - `bases`: Slice of group elements to be multiplied by the scalars. 41 | /// 42 | /// # Returns 43 | /// The resulting group element from the MSM computation. 44 | /// 45 | /// # Panics 46 | /// Panics if `scalars.len() != bases.len()`. 47 | fn msm(scalars: &[Self::Scalar], bases: &[Self::Point]) -> Self; 48 | } 49 | 50 | impl VariableMultiScalarMul for G { 51 | type Scalar = G::Scalar; 52 | type Point = G; 53 | 54 | /// Default naive MSM implementation for any [`PrimeGroup`]. 55 | /// 56 | /// This method performs a straightforward sum of scalar multiplications: 57 | /// ```text 58 | /// Σ (scalar[i] * point[i]) 59 | /// ``` 60 | /// Complexity: **O(n)** group multiplications and additions. 61 | /// 62 | /// # Panics 63 | /// Panics if `scalars.len() != bases.len()`. 64 | fn msm(scalars: &[Self::Scalar], bases: &[Self::Point]) -> Self { 65 | assert_eq!(scalars.len(), bases.len()); 66 | 67 | // NOTE: Based on the msm benchmark in this repo, msm_pippenger provides improvements over 68 | // msm_naive past a small constant size, but is significantly slower for very small MSMs. 69 | match scalars.len() { 70 | 0 => Self::identity(), 71 | 1..16 => msm_naive(bases, scalars), 72 | 16.. => msm_pippenger(bases, scalars), 73 | } 74 | } 75 | } 76 | 77 | /// A naive MSM implementation. 78 | fn msm_naive(bases: &[G], scalars: &[G::Scalar]) -> G { 79 | core::iter::zip(bases, scalars).map(|(g, x)| *g * x).sum() 80 | } 81 | 82 | /// An MSM implementation that employ's Pippenger's algorithm and works for all groups that 83 | /// implement `PrimeGroup`. 84 | fn msm_pippenger(bases: &[G], scalars: &[G::Scalar]) -> G { 85 | let c = ln_without_floats(scalars.len()); 86 | let num_bits = ::NUM_BITS as usize; 87 | // split `num_bits` into steps of `c`, but skip window 0. 88 | let windows = (0..num_bits).step_by(c); 89 | let buckets_num = 1 << c; 90 | 91 | let mut window_buckets = Vec::with_capacity(windows.len()); 92 | for window in windows { 93 | window_buckets.push((window, vec![G::identity(); buckets_num])); 94 | } 95 | 96 | for (scalar, base) in scalars.iter().zip(bases) { 97 | for (w, bucket) in window_buckets.iter_mut() { 98 | let scalar_repr = scalar.to_repr(); 99 | let scalar_bytes = scalar_repr.as_ref(); 100 | 101 | // Extract the relevant bits for this window 102 | let window_start = *w; 103 | let window_end = (window_start + c).min(scalar_bytes.len() * 8); 104 | 105 | if window_start >= scalar_bytes.len() * 8 { 106 | continue; // Window is beyond the scalar size 107 | } 108 | 109 | let mut scalar_bits = 0u64; 110 | 111 | // Extract bits from the byte representation 112 | for bit_idx in window_start..window_end { 113 | let byte_idx = bit_idx / 8; 114 | let bit_in_byte = bit_idx % 8; 115 | 116 | if byte_idx < scalar_bytes.len() { 117 | let bit = (scalar_bytes[byte_idx] >> bit_in_byte) & 1; 118 | scalar_bits |= (bit as u64) << (bit_idx - window_start); 119 | } 120 | } 121 | 122 | // If the scalar is non-zero, we update the corresponding bucket. 123 | // (Recall that `buckets` doesn't have a zero bucket.) 124 | if scalar_bits != 0 { 125 | bucket[(scalar_bits - 1) as usize].add_assign(base); 126 | } 127 | } 128 | } 129 | 130 | let mut window_sums = window_buckets.iter().rev().map(|(_w, bucket)| { 131 | // `running_sum` = sum_{j in i..num_buckets} bucket[j], 132 | // where we iterate backward from i = num_buckets to 0. 133 | let mut bucket_sum = G::identity(); 134 | let mut bucket_running_sum = G::identity(); 135 | bucket.iter().rev().for_each(|b| { 136 | bucket_running_sum += b; 137 | bucket_sum += &bucket_running_sum; 138 | }); 139 | bucket_sum 140 | }); 141 | 142 | // We're traversing windows from high to low. 143 | let first = window_sums.next().unwrap(); 144 | window_sums.fold(first, |mut total, sum_i| { 145 | for _ in 0..c { 146 | total = total.double(); 147 | } 148 | total + sum_i 149 | }) 150 | } 151 | 152 | #[cfg(test)] 153 | mod tests { 154 | use super::*; 155 | use ff::Field; 156 | use group::Group; 157 | 158 | #[test] 159 | fn test_msm() { 160 | use bls12_381::{G1Projective, Scalar}; 161 | use rand::thread_rng; 162 | 163 | let mut rng = thread_rng(); 164 | const N: usize = 1024; 165 | 166 | // Generate random scalars and bases 167 | let scalars: Vec = (0..N).map(|_| Scalar::random(&mut rng)).collect(); 168 | let bases: Vec = (0..N).map(|_| G1Projective::random(&mut rng)).collect(); 169 | 170 | // Compute MSM using our optimized implementation 171 | let msm_result = G1Projective::msm(&scalars, &bases); 172 | 173 | // Compute reference result using naive scalar multiplication and sum 174 | let naive_result = scalars 175 | .iter() 176 | .zip(bases.iter()) 177 | .map(|(scalar, base)| base * scalar) 178 | .fold(G1Projective::identity(), |acc, x| acc + x); 179 | 180 | assert_eq!( 181 | msm_result, naive_result, 182 | "MSM result should equal naive computation" 183 | ); 184 | } 185 | } 186 | -------------------------------------------------------------------------------- /src/fiat_shamir.rs: -------------------------------------------------------------------------------- 1 | //! Fiat-Shamir transformation for [`SigmaProtocol`]s. 2 | //! 3 | //! This module defines [`Nizk`], a generic non-interactive Sigma protocol wrapper, 4 | //! based on applying the Fiat-Shamir heuristic using a codec. 5 | //! 6 | //! It transforms an interactive [`SigmaProtocol`] into a non-interactive one, 7 | //! by deriving challenges deterministically from previous protocol messages 8 | //! via a cryptographic sponge function (Codec). 9 | //! 10 | //! # Usage 11 | //! This struct is generic over: 12 | //! - `P`: the underlying Sigma protocol ([`SigmaProtocol`] trait). 13 | //! - `C`: the codec ([`Codec`] trait). 14 | 15 | use crate::errors::Error; 16 | use crate::traits::SigmaProtocol; 17 | use crate::{codec::Codec, traits::SigmaProtocolSimulator}; 18 | use alloc::vec::Vec; 19 | 20 | #[cfg(feature = "std")] 21 | use rand::{CryptoRng, RngCore}; 22 | #[cfg(not(feature = "std"))] 23 | use rand_core::{CryptoRng, RngCore}; 24 | 25 | type Transcript

= ( 26 |

::Commitment, 27 |

::Challenge, 28 |

::Response, 29 | ); 30 | 31 | /// A Fiat-Shamir transformation of a [`SigmaProtocol`] into a non-interactive proof. 32 | /// 33 | /// [`Nizk`] wraps an interactive Sigma protocol `P` 34 | /// and a hash-based codec `C`, to produce non-interactive proofs. 35 | /// 36 | /// It manages the domain separation, codec reset, 37 | /// proof generation, and proof verification. 38 | /// 39 | /// # Type Parameters 40 | /// - `P`: the Sigma protocol implementation. 41 | /// - `C`: the codec used for Fiat-Shamir. 42 | #[derive(Debug)] 43 | pub struct Nizk 44 | where 45 | P: SigmaProtocol, 46 | P::Challenge: PartialEq, 47 | C: Codec, 48 | { 49 | /// Current codec state. 50 | pub hash_state: C, 51 | /// Underlying interactive proof. 52 | pub interactive_proof: P, 53 | } 54 | 55 | impl Nizk 56 | where 57 | P: SigmaProtocol, 58 | P::Challenge: PartialEq, 59 | C: Codec + Clone, 60 | { 61 | /// Constructs a new [`Nizk`] instance. 62 | /// 63 | /// # Parameters 64 | /// - `iv`: Domain separation tag for the hash function (e.g., protocol name or context). 65 | /// - `instance`: An instance of the interactive Sigma protocol. 66 | /// 67 | /// # Returns 68 | /// A new [`Nizk`] that can generate and verify non-interactive proofs. 69 | pub fn new(session_identifier: &[u8], interactive_proof: P) -> Self { 70 | let hash_state = C::new( 71 | &interactive_proof.protocol_identifier(), 72 | session_identifier, 73 | interactive_proof.instance_label().as_ref(), 74 | ); 75 | Self { 76 | hash_state, 77 | interactive_proof, 78 | } 79 | } 80 | 81 | pub fn from_iv(iv: [u8; 64], interactive_proof: P) -> Self { 82 | let hash_state = C::from_iv(iv); 83 | Self { 84 | hash_state, 85 | interactive_proof, 86 | } 87 | } 88 | 89 | /// Generates a non-interactive proof for a witness. 90 | /// 91 | /// Executes the interactive protocol steps (commit, derive challenge via hash, respond), 92 | /// and checks the result locally for consistency. 93 | /// 94 | /// # Parameters 95 | /// - `witness`: The secret witness for the Sigma protocol. 96 | /// - `rng`: A cryptographically secure random number generator. 97 | /// 98 | /// # Returns 99 | /// A [`Result`] containing a `Transcript

` on success. The `Transcript` includes: 100 | /// - `P::Commitment`: The prover's commitment(s). 101 | /// - `P::Challenge`: The challenge derived via Fiat-Shamir. 102 | /// - `P::Response`: The prover's response. 103 | /// 104 | /// # Panics 105 | /// Panics if local verification fails. 106 | fn prove( 107 | &self, 108 | witness: &P::Witness, 109 | rng: &mut (impl RngCore + CryptoRng), 110 | ) -> Result, Error> { 111 | let mut hash_state = self.hash_state.clone(); 112 | 113 | let (commitment, prover_state) = self.interactive_proof.prover_commit(witness, rng)?; 114 | // Fiat Shamir challenge 115 | let serialized_commitment = self.interactive_proof.serialize_commitment(&commitment); 116 | hash_state.prover_message(&serialized_commitment); 117 | let challenge = hash_state.verifier_challenge(); 118 | // Prover's response 119 | let response = self 120 | .interactive_proof 121 | .prover_response(prover_state, &challenge)?; 122 | 123 | Ok((commitment, challenge, response)) 124 | } 125 | 126 | /// Verifies a non-interactive proof using the Fiat-Shamir transformation. 127 | /// 128 | /// # Parameters 129 | /// - `commitment`: The commitment(s) sent by the prover. 130 | /// - `challenge`: The challenge allegedly derived via Fiat-Shamir. 131 | /// - `response`: The prover's response to the challenge. 132 | /// 133 | /// # Returns 134 | /// - `Ok(())` if the proof is valid. 135 | /// - `Err(Error::VerificationFailure)` if the challenge is invalid or the response fails to verify. 136 | /// 137 | /// # Errors 138 | /// - Returns [`Error::VerificationFailure`] if: 139 | /// - The challenge doesn't match the recomputed one from the commitment. 140 | /// - The response fails verification under the Sigma protocol. 141 | fn verify( 142 | &self, 143 | commitment: &P::Commitment, 144 | challenge: &P::Challenge, 145 | response: &P::Response, 146 | ) -> Result<(), Error> { 147 | let mut hash_state = self.hash_state.clone(); 148 | 149 | // Recompute the challenge 150 | let serialized_commitment = self.interactive_proof.serialize_commitment(commitment); 151 | hash_state.prover_message(&serialized_commitment); 152 | let expected_challenge = hash_state.verifier_challenge(); 153 | // Verification of the proof 154 | match *challenge == expected_challenge { 155 | true => self 156 | .interactive_proof 157 | .verifier(commitment, challenge, response), 158 | false => Err(Error::VerificationFailure), 159 | } 160 | } 161 | /// Generates a batchable, serialized non-interactive proof. 162 | /// 163 | /// # Parameters 164 | /// - `witness`: The secret witness. 165 | /// - `rng`: A cryptographically secure random number generator. 166 | /// 167 | /// # Returns 168 | /// A serialized proof suitable for batch verification. 169 | /// 170 | /// # Panics 171 | /// Panics if serialization fails (should not happen under correct implementation). 172 | pub fn prove_batchable( 173 | &self, 174 | witness: &P::Witness, 175 | rng: &mut (impl RngCore + CryptoRng), 176 | ) -> Result, Error> { 177 | let (commitment, _challenge, response) = self.prove(witness, rng)?; 178 | let mut bytes = Vec::new(); 179 | bytes.extend_from_slice(&self.interactive_proof.serialize_commitment(&commitment)); 180 | bytes.extend_from_slice(&self.interactive_proof.serialize_response(&response)); 181 | Ok(bytes) 182 | } 183 | 184 | /// Verifies a batchable non-interactive proof. 185 | /// 186 | /// # Parameters 187 | /// - `proof`: A serialized batchable proof. 188 | /// 189 | /// # Returns 190 | /// - `Ok(())` if the proof is valid. 191 | /// - `Err(Error)` if deserialization or verification fails. 192 | /// 193 | /// # Errors 194 | /// - Returns [`Error::VerificationFailure`] if: 195 | /// - The challenge doesn't match the recomputed one from the commitment. 196 | /// - The response fails verification under the Sigma protocol. 197 | pub fn verify_batchable(&self, proof: &[u8]) -> Result<(), Error> { 198 | let commitment = self.interactive_proof.deserialize_commitment(proof)?; 199 | let commitment_size = self 200 | .interactive_proof 201 | .serialize_commitment(&commitment) 202 | .len(); 203 | let response = self 204 | .interactive_proof 205 | .deserialize_response(&proof[commitment_size..])?; 206 | let response_size = self.interactive_proof.serialize_response(&response).len(); 207 | 208 | // Proof size check 209 | if proof.len() != commitment_size + response_size { 210 | return Err(Error::VerificationFailure); 211 | } 212 | 213 | // Assert correct proof size 214 | let total_expected_len = 215 | commitment_size + self.interactive_proof.serialize_response(&response).len(); 216 | if proof.len() != total_expected_len { 217 | return Err(Error::VerificationFailure); 218 | } 219 | 220 | let mut hash_state = self.hash_state.clone(); 221 | 222 | // Recompute the challenge 223 | let serialized_commitment = self.interactive_proof.serialize_commitment(&commitment); 224 | hash_state.prover_message(&serialized_commitment); 225 | let challenge = hash_state.verifier_challenge(); 226 | // Verification of the proof 227 | self.interactive_proof 228 | .verifier(&commitment, &challenge, &response) 229 | } 230 | } 231 | 232 | impl Nizk 233 | where 234 | P: SigmaProtocol + SigmaProtocolSimulator, 235 | P::Challenge: PartialEq, 236 | C: Codec + Clone, 237 | { 238 | /// Generates a compact serialized proof. 239 | /// 240 | /// Uses a more space-efficient representation compared to batchable proofs. 241 | /// 242 | /// # Parameters 243 | /// - `witness`: The secret witness. 244 | /// - `rng`: A cryptographically secure random number generator. 245 | /// 246 | /// # Returns 247 | /// A compact, serialized proof. 248 | /// 249 | /// # Panics 250 | /// Panics if serialization fails. 251 | pub fn prove_compact( 252 | &self, 253 | witness: &P::Witness, 254 | rng: &mut (impl RngCore + CryptoRng), 255 | ) -> Result, Error> { 256 | let (_commitment, challenge, response) = self.prove(witness, rng)?; 257 | let mut bytes = Vec::new(); 258 | bytes.extend_from_slice(&self.interactive_proof.serialize_challenge(&challenge)); 259 | bytes.extend_from_slice(&self.interactive_proof.serialize_response(&response)); 260 | Ok(bytes) 261 | } 262 | 263 | /// Verifies a compact proof. 264 | /// 265 | /// Recomputes the commitment from the challenge and response, then verifies it. 266 | /// 267 | /// # Parameters 268 | /// - `proof`: A compact serialized proof. 269 | /// 270 | /// # Returns 271 | /// - `Ok(())` if the proof is valid. 272 | /// - `Err(Error)` if deserialization or verification fails. 273 | /// 274 | /// # Errors 275 | /// - Returns [`Error::VerificationFailure`] if: 276 | /// - Deserialization fails. 277 | /// - The recomputed commitment or response is invalid under the Sigma protocol. 278 | pub fn verify_compact(&self, proof: &[u8]) -> Result<(), Error> { 279 | // Deserialize challenge and response from compact proof 280 | let challenge = self.interactive_proof.deserialize_challenge(proof)?; 281 | let challenge_size = self.interactive_proof.serialize_challenge(&challenge).len(); 282 | let response = self 283 | .interactive_proof 284 | .deserialize_response(&proof[challenge_size..])?; 285 | let response_size = self.interactive_proof.serialize_response(&response).len(); 286 | 287 | // Proof size check 288 | if proof.len() != challenge_size + response_size { 289 | return Err(Error::VerificationFailure); 290 | } 291 | 292 | // Assert correct proof size 293 | let total_expected_len = 294 | challenge_size + self.interactive_proof.serialize_response(&response).len(); 295 | if proof.len() != total_expected_len { 296 | return Err(Error::VerificationFailure); 297 | } 298 | 299 | // Compute the commitments 300 | let commitment = self 301 | .interactive_proof 302 | .simulate_commitment(&challenge, &response)?; 303 | // Verify the proof 304 | self.verify(&commitment, &challenge, &response) 305 | } 306 | } 307 | -------------------------------------------------------------------------------- /src/schnorr_protocol.rs: -------------------------------------------------------------------------------- 1 | //! Implementation of the generic Schnorr Sigma Protocol over a [`group::Group`]. 2 | //! 3 | //! This module defines the [`SchnorrProof`] structure, which implements 4 | //! a Sigma protocol proving different types of discrete logarithm relations (eg. Schnorr, Pedersen's commitments) 5 | //! through a group morphism abstraction (see [Maurer09](https://crypto-test.ethz.ch/publications/files/Maurer09.pdf)). 6 | 7 | use crate::errors::Error; 8 | use crate::group::serialization::{ 9 | deserialize_elements, deserialize_scalars, serialize_elements, serialize_scalars, 10 | }; 11 | use crate::linear_relation::CanonicalLinearRelation; 12 | use crate::traits::{SigmaProtocol, SigmaProtocolSimulator}; 13 | use alloc::vec::Vec; 14 | 15 | use ff::Field; 16 | use group::prime::PrimeGroup; 17 | #[cfg(feature = "std")] 18 | use rand::{CryptoRng, Rng, RngCore}; 19 | #[cfg(not(feature = "std"))] 20 | use rand_core::{CryptoRng, RngCore, RngCore as Rng}; 21 | 22 | impl SigmaProtocol for CanonicalLinearRelation { 23 | type Commitment = Vec; 24 | type ProverState = (Vec, Vec); 25 | type Response = Vec; 26 | type Witness = Vec; 27 | type Challenge = G::Scalar; 28 | 29 | /// Prover's first message: generates a commitment using random nonces. 30 | /// 31 | /// # Parameters 32 | /// - `witness`: A vector of scalars that satisfy the linear map relation. 33 | /// - `rng`: A cryptographically secure random number generator. 34 | /// 35 | /// # Returns 36 | /// - A tuple containing: 37 | /// - The commitment (a vector of group elements). 38 | /// - The prover state (random nonces and witness) used to compute the response. 39 | /// 40 | /// # Errors 41 | /// 42 | /// -[`Error::InvalidInstanceWitnessPair`] if the witness vector length is less than the number of scalar variables. 43 | /// If the witness vector is larger, extra variables are ignored. 44 | fn prover_commit( 45 | &self, 46 | witness: &Self::Witness, 47 | rng: &mut (impl RngCore + CryptoRng), 48 | ) -> Result<(Self::Commitment, Self::ProverState), Error> { 49 | if witness.len() < self.num_scalars { 50 | return Err(Error::InvalidInstanceWitnessPair); 51 | } 52 | 53 | // TODO: Check this when constructing the CanonicalLinearRelation instead of here. 54 | // If the image is the identity, then the relation must be 55 | // trivial, or else the proof will be unsound 56 | if self 57 | .image_elements() 58 | .zip(self.linear_combinations.iter()) 59 | .any(|(x, c)| x == G::identity() && !c.is_empty()) 60 | { 61 | return Err(Error::InvalidInstanceWitnessPair); 62 | } 63 | 64 | let nonces = (0..self.num_scalars) 65 | .map(|_| G::Scalar::random(&mut *rng)) 66 | .collect::>(); 67 | 68 | let commitment = self.evaluate(&nonces); 69 | let prover_state = (nonces.to_vec(), witness.to_vec()); 70 | Ok((commitment, prover_state)) 71 | } 72 | 73 | /// Computes the prover's response (second message) using the challenge. 74 | /// 75 | /// # Parameters 76 | /// - `state`: The prover state returned by `prover_commit`, typically containing randomness and witness components. 77 | /// - `challenge`: The verifier's challenge scalar. 78 | /// 79 | /// # Returns 80 | /// - A vector of scalars forming the prover's response. 81 | /// 82 | /// # Errors 83 | /// - Returns [`Error::InvalidInstanceWitnessPair`] if the prover state vectors have incorrect lengths. 84 | fn prover_response( 85 | &self, 86 | prover_state: Self::ProverState, 87 | challenge: &Self::Challenge, 88 | ) -> Result { 89 | let (nonces, witness) = prover_state; 90 | 91 | let responses = nonces 92 | .into_iter() 93 | .zip(witness) 94 | .map(|(r, w)| r + w * challenge) 95 | .collect(); 96 | Ok(responses) 97 | } 98 | /// Verifies the correctness of the proof. 99 | /// 100 | /// # Parameters 101 | /// - `commitment`: The prover's commitment vector (group elements). 102 | /// - `challenge`: The challenge scalar. 103 | /// - `response`: The prover's response vector. 104 | /// 105 | /// # Returns 106 | /// - `Ok(())` if the proof is valid. 107 | /// - `Err(Error::VerificationFailure)` if the proof is invalid. 108 | /// - `Err(Error::InvalidInstanceWitnessPair)` if the lengths of commitment or response do not match the expected counts. 109 | /// 110 | /// # Errors 111 | /// -[`Error::VerificationFailure`] if the computed relation 112 | /// does not hold for the provided challenge and response, indicating proof invalidity. 113 | /// -[`Error::InvalidInstanceWitnessPair`] if the commitment or response length is incorrect. 114 | fn verifier( 115 | &self, 116 | commitment: &Self::Commitment, 117 | challenge: &Self::Challenge, 118 | response: &Self::Response, 119 | ) -> Result<(), Error> { 120 | if commitment.len() != self.image.len() || response.len() != self.num_scalars { 121 | return Err(Error::InvalidInstanceWitnessPair); 122 | } 123 | 124 | let lhs = self.evaluate(response); 125 | let mut rhs = Vec::new(); 126 | for (img, g) in self.image_elements().zip(commitment) { 127 | rhs.push(img * challenge + g); 128 | } 129 | if lhs == rhs { 130 | Ok(()) 131 | } else { 132 | Err(Error::VerificationFailure) 133 | } 134 | } 135 | 136 | /// Serializes the prover's commitment into a byte vector. 137 | /// 138 | /// This function encodes the vector of group elements (the commitment) 139 | /// into a binary format suitable for transmission or storage. This is 140 | /// typically the first message sent in a Sigma protocol round. 141 | /// 142 | /// # Parameters 143 | /// - `commitment`: A vector of group elements representing the prover's commitment. 144 | /// 145 | /// # Returns 146 | /// A `Vec` containing the serialized group elements. 147 | fn serialize_commitment(&self, commitment: &Self::Commitment) -> Vec { 148 | serialize_elements(commitment) 149 | } 150 | 151 | /// Serializes the verifier's challenge scalar into bytes. 152 | /// 153 | /// Converts the challenge scalar into a fixed-length byte encoding. This can be used 154 | /// for Fiat–Shamir hashing, transcript recording, or proof transmission. 155 | /// 156 | /// # Parameters 157 | /// - `challenge`: The scalar challenge value. 158 | /// 159 | /// # Returns 160 | /// A `Vec` containing the serialized scalar. 161 | fn serialize_challenge(&self, &challenge: &Self::Challenge) -> Vec { 162 | serialize_scalars::(&[challenge]) 163 | } 164 | 165 | /// Serializes the prover's response vector into a byte format. 166 | /// 167 | /// The response is a vector of scalars computed by the prover after receiving 168 | /// the verifier's challenge. This function encodes the vector into a format 169 | /// suitable for transmission or inclusion in a batchable proof. 170 | /// 171 | /// # Parameters 172 | /// - `response`: A vector of scalar responses computed by the prover. 173 | /// 174 | /// # Returns 175 | /// A `Vec` containing the serialized scalars. 176 | fn serialize_response(&self, response: &Self::Response) -> Vec { 177 | serialize_scalars::(response) 178 | } 179 | 180 | /// Deserializes a byte slice into a vector of group elements (commitment). 181 | /// 182 | /// This function reconstructs the prover’s commitment from its binary representation. 183 | /// The number of elements expected is determined by the number of linear constraints 184 | /// in the underlying linear relation. 185 | /// 186 | /// # Parameters 187 | /// - `data`: A byte slice containing the serialized commitment. 188 | /// 189 | /// # Returns 190 | /// A `Vec` containing the deserialized group elements. 191 | /// 192 | /// # Errors 193 | /// - Returns [`Error::VerificationFailure`] if the data is malformed or contains an invalid encoding. 194 | fn deserialize_commitment(&self, data: &[u8]) -> Result { 195 | deserialize_elements::(data, self.image.len()).ok_or(Error::VerificationFailure) 196 | } 197 | 198 | /// Deserializes a byte slice into a challenge scalar. 199 | /// 200 | /// This function expects a single scalar to be encoded and returns it as the verifier's challenge. 201 | /// 202 | /// # Parameters 203 | /// - `data`: A byte slice containing the serialized scalar challenge. 204 | /// 205 | /// # Returns 206 | /// The deserialized scalar challenge value. 207 | /// 208 | /// # Errors 209 | /// - Returns [`Error::VerificationFailure`] if deserialization fails or data is invalid. 210 | fn deserialize_challenge(&self, data: &[u8]) -> Result { 211 | let scalars = deserialize_scalars::(data, 1).ok_or(Error::VerificationFailure)?; 212 | Ok(scalars[0]) 213 | } 214 | 215 | /// Deserializes a byte slice into the prover's response vector. 216 | /// 217 | /// The response vector contains scalars used in the second round of the Sigma protocol. 218 | /// The expected number of scalars matches the number of witness variables. 219 | /// 220 | /// # Parameters 221 | /// - `data`: A byte slice containing the serialized response. 222 | /// 223 | /// # Returns 224 | /// A vector of deserialized scalars. 225 | /// 226 | /// # Errors 227 | /// - Returns [`Error::VerificationFailure`] if the byte data is malformed or the length is incorrect. 228 | fn deserialize_response(&self, data: &[u8]) -> Result { 229 | deserialize_scalars::(data, self.num_scalars).ok_or(Error::VerificationFailure) 230 | } 231 | 232 | fn instance_label(&self) -> impl AsRef<[u8]> { 233 | self.label() 234 | } 235 | 236 | fn protocol_identifier(&self) -> [u8; 64] { 237 | const PROTOCOL_ID: &[u8; 32] = b"ietf sigma proof linear relation"; 238 | let mut protocol_id = [0; 64]; 239 | protocol_id[..32].clone_from_slice(PROTOCOL_ID); 240 | protocol_id 241 | } 242 | } 243 | 244 | impl SigmaProtocolSimulator for CanonicalLinearRelation 245 | where 246 | G: PrimeGroup, 247 | { 248 | /// Simulates a valid transcript for a given challenge without a witness. 249 | /// 250 | /// # Parameters 251 | /// - `challenge`: A scalar value representing the challenge. 252 | /// - `rng`: A cryptographically secure RNG. 253 | /// 254 | /// # Returns 255 | /// - A commitment and response forming a valid proof for the given challenge. 256 | fn simulate_response(&self, rng: &mut R) -> Self::Response { 257 | let response: Vec = (0..self.num_scalars) 258 | .map(|_| G::Scalar::random(&mut *rng)) 259 | .collect(); 260 | response 261 | } 262 | 263 | /// Simulates a full proof transcript using a randomly generated challenge. 264 | /// 265 | /// # Parameters 266 | /// - `rng`: A cryptographically secure RNG. 267 | /// 268 | /// # Returns 269 | /// - A tuple `(commitment, challenge, response)` forming a valid proof. 270 | fn simulate_transcript( 271 | &self, 272 | rng: &mut R, 273 | ) -> Result<(Self::Commitment, Self::Challenge, Self::Response), Error> { 274 | let challenge = G::Scalar::random(&mut *rng); 275 | let response = self.simulate_response(&mut *rng); 276 | let commitment = self.simulate_commitment(&challenge, &response)?; 277 | Ok((commitment, challenge, response)) 278 | } 279 | 280 | /// Recomputes the commitment from the challenge and response (used in compact proofs). 281 | /// 282 | /// # Parameters 283 | /// - `challenge`: The challenge scalar issued by the verifier or derived via Fiat–Shamir. 284 | /// - `response`: The prover's response vector. 285 | /// 286 | /// # Returns 287 | /// - A vector of group elements representing the simulated commitment (one per linear constraint). 288 | /// 289 | /// # Errors 290 | /// - [`Error::InvalidInstanceWitnessPair`] if the response length does not match the expected number of scalars. 291 | fn simulate_commitment( 292 | &self, 293 | challenge: &Self::Challenge, 294 | response: &Self::Response, 295 | ) -> Result { 296 | if response.len() != self.num_scalars { 297 | return Err(Error::InvalidInstanceWitnessPair); 298 | } 299 | 300 | let response_image = self.evaluate(response); 301 | let commitment = response_image 302 | .iter() 303 | .zip(self.image_elements()) 304 | .map(|(res, img)| *res - img * challenge) 305 | .collect::>(); 306 | Ok(commitment) 307 | } 308 | } 309 | -------------------------------------------------------------------------------- /tests/spec/vectors/duplexSpongeVectors.json: -------------------------------------------------------------------------------- 1 | { 2 | "test_absorb_empty_after_does_not_break_Keccak": { 3 | "Expected": "30837d887e28e7fccda401051fc14f666e79cd235ba1f27afae21969262d51d22acebf59c4d07e03f54e2a6a5141b9815da0513f98f487b7418d315f2613a9a4", 4 | "HashFunction": "Keccak-f[1600] overwrite mode", 5 | "IV": "756e69745f74657374735f6b656363616b5f69760000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", 6 | "Operations": [ 7 | { 8 | "data": "", 9 | "type": "absorb" 10 | }, 11 | { 12 | "data": "656d707479206d657373616765206265666f7265", 13 | "type": "absorb" 14 | }, 15 | { 16 | "length": 64, 17 | "type": "squeeze" 18 | } 19 | ] 20 | }, 21 | "test_absorb_empty_after_does_not_break_SHAKE128": { 22 | "Expected": "6e475edd3c400bec314d5891af570841a547c95d1a651adff9a8bfb70719a79b5afde316386da13fa83525662df3c5b2367d987bf3dc4199efdb9d0612572785", 23 | "HashFunction": "SHAKE128", 24 | "IV": "756e69745f74657374735f6b656363616b5f69760000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", 25 | "Operations": [ 26 | { 27 | "data": "", 28 | "type": "absorb" 29 | }, 30 | { 31 | "data": "656d707479206d657373616765206265666f7265", 32 | "type": "absorb" 33 | }, 34 | { 35 | "length": 64, 36 | "type": "squeeze" 37 | } 38 | ] 39 | }, 40 | "test_absorb_empty_before_does_not_break_Keccak": { 41 | "Expected": "e9b56085153c758ce1305371309bc39fc7e08cb82706ab766fa6c5869090e81f332844ebec52dde7b8c020e977d4e7589c8f93f733b8639c3bc728320730d324", 42 | "HashFunction": "Keccak-f[1600] overwrite mode", 43 | "IV": "756e69745f74657374735f6b656363616b5f69760000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", 44 | "Operations": [ 45 | { 46 | "data": "656d707479206d657373616765206166746572", 47 | "type": "absorb" 48 | }, 49 | { 50 | "data": "", 51 | "type": "absorb" 52 | }, 53 | { 54 | "length": 64, 55 | "type": "squeeze" 56 | } 57 | ] 58 | }, 59 | "test_absorb_empty_before_does_not_break_SHAKE128": { 60 | "Expected": "3953e577d9e5d4dc7b86d1a62e881f2d1eb750ea3550fcae315854d166136ae816ca922a4c7e54d711b8721c8969598449922122768c50313f47eef35020b73c", 61 | "HashFunction": "SHAKE128", 62 | "IV": "756e69745f74657374735f6b656363616b5f69760000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", 63 | "Operations": [ 64 | { 65 | "data": "656d707479206d657373616765206166746572", 66 | "type": "absorb" 67 | }, 68 | { 69 | "data": "", 70 | "type": "absorb" 71 | }, 72 | { 73 | "length": 64, 74 | "type": "squeeze" 75 | } 76 | ] 77 | }, 78 | "test_absorb_squeeze_absorb_consistency_Keccak": { 79 | "Expected": "c81f5779e63bf853c89a3108bd9c65aca437a7680f849f6c0bbdcd517d6b5dcf", 80 | "HashFunction": "Keccak-f[1600] overwrite mode", 81 | "IV": "656467652d636173652d746573742d646f6d61696e2d6162736f7262000000000000000000000000000000000000000000000000000000000000000000000000", 82 | "Operations": [ 83 | { 84 | "data": "696e7465726c65617665206669727374", 85 | "type": "absorb" 86 | }, 87 | { 88 | "length": 32, 89 | "type": "squeeze" 90 | }, 91 | { 92 | "data": "696e7465726c65617665207365636f6e64", 93 | "type": "absorb" 94 | }, 95 | { 96 | "length": 32, 97 | "type": "squeeze" 98 | } 99 | ] 100 | }, 101 | "test_absorb_squeeze_absorb_consistency_SHAKE128": { 102 | "Expected": "4d31a75f29851f9f15cd54fa6f2335cbe07b947b9d3c28092c1ba7315e295921", 103 | "HashFunction": "SHAKE128", 104 | "IV": "656467652d636173652d746573742d646f6d61696e2d6162736f7262000000000000000000000000000000000000000000000000000000000000000000000000", 105 | "Operations": [ 106 | { 107 | "data": "696e7465726c65617665206669727374", 108 | "type": "absorb" 109 | }, 110 | { 111 | "length": 32, 112 | "type": "squeeze" 113 | }, 114 | { 115 | "data": "696e7465726c65617665207365636f6e64", 116 | "type": "absorb" 117 | }, 118 | { 119 | "length": 32, 120 | "type": "squeeze" 121 | } 122 | ] 123 | }, 124 | "test_associativity_of_absorb_Keccak": { 125 | "Expected": "28536e7df3b7fd15b0b6ee38ebf930c3162dee584c655e6a8896d4fb2f3a6cef", 126 | "HashFunction": "Keccak-f[1600] overwrite mode", 127 | "IV": "6162736f72622d6173736f6369617469766974792d646f6d61696e00000000000000000000000000000000000000000000000000000000000000000000000000", 128 | "Operations": [ 129 | { 130 | "data": "6173736f63696174697669747920746573742066756c6c", 131 | "type": "absorb" 132 | }, 133 | { 134 | "length": 32, 135 | "type": "squeeze" 136 | } 137 | ] 138 | }, 139 | "test_associativity_of_absorb_SHAKE128": { 140 | "Expected": "c0faa351141d60678dceff4f3a5760381bb335ad113958b70edf7b242df01c8a", 141 | "HashFunction": "SHAKE128", 142 | "IV": "6162736f72622d6173736f6369617469766974792d646f6d61696e00000000000000000000000000000000000000000000000000000000000000000000000000", 143 | "Operations": [ 144 | { 145 | "data": "6173736f63696174697669747920746573742066756c6c", 146 | "type": "absorb" 147 | }, 148 | { 149 | "length": 32, 150 | "type": "squeeze" 151 | } 152 | ] 153 | }, 154 | "test_iv_affects_output_Keccak": { 155 | "Expected": "ad8446208ba3a95a5673bc4e8885074d5e6b48836cee66b64343bbea05bd3369", 156 | "HashFunction": "Keccak-f[1600] overwrite mode", 157 | "IV": "646f6d61696e2d6f6e652d646966666572732d686572650000000000000000000000000000000000000000000000000000000000000000000000000000000000", 158 | "Operations": [ 159 | { 160 | "data": "697620646966666572656e63652074657374", 161 | "type": "absorb" 162 | }, 163 | { 164 | "length": 32, 165 | "type": "squeeze" 166 | } 167 | ] 168 | }, 169 | "test_iv_affects_output_SHAKE128": { 170 | "Expected": "7650642267cc544abf0e01ce28e2595aec4c2f5b5e5e3720ab551449637b35f2", 171 | "HashFunction": "SHAKE128", 172 | "IV": "646f6d61696e2d6f6e652d646966666572732d686572650000000000000000000000000000000000000000000000000000000000000000000000000000000000", 173 | "Operations": [ 174 | { 175 | "data": "697620646966666572656e63652074657374", 176 | "type": "absorb" 177 | }, 178 | { 179 | "length": 32, 180 | "type": "squeeze" 181 | } 182 | ] 183 | }, 184 | "test_keccak_duplex_sponge_Keccak": { 185 | "Expected": "920dc791ed15ee912e3d8595b0b8718380f6678c5601128555dfeaecea0ec923597e0b9db5d5952c17ddf94eba5f8dff9e50ea581ef40d749086dbf5d1b0a9d4", 186 | "HashFunction": "Keccak-f[1600] overwrite mode", 187 | "IV": "756e69745f74657374735f6b656363616b5f69760000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", 188 | "Operations": [ 189 | { 190 | "data": "6261736963206475706c65782073706f6e67652074657374", 191 | "type": "absorb" 192 | }, 193 | { 194 | "length": 64, 195 | "type": "squeeze" 196 | } 197 | ] 198 | }, 199 | "test_keccak_duplex_sponge_SHAKE128": { 200 | "Expected": "f845c3ef4231a4d6e09c29b1eea0055842246fd57558fd7d93e1302f7799dd9593d2e4d06eda72d5252ca5b2feff4b8cb324ec96673a7417cf70fa77b1898991", 201 | "HashFunction": "SHAKE128", 202 | "IV": "756e69745f74657374735f6b656363616b5f69760000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", 203 | "Operations": [ 204 | { 205 | "data": "6261736963206475706c65782073706f6e67652074657374", 206 | "type": "absorb" 207 | }, 208 | { 209 | "length": 64, 210 | "type": "squeeze" 211 | } 212 | ] 213 | }, 214 | "test_multiple_blocks_absorb_squeeze_Keccak": { 215 | "Expected": "711ed4286a9656423a8f95b24208cf298fc7023c8b3254f5bad04c21b752f524a7c4fcd0559d7ce6f749a4a144fb76dc42089bc56c97b1354370e6f66ff80cbbeedd7e2b9506de87e54099bb04a172e099925a200b1b8a35a7b5c5f70cb725bd3ca840bc2bfd25498914cf5b02c4d65ea6f84aa82db6e7411304c69622ef90011955dbffb86abedbd918608273965788ed2eb40d1168b6f6bce287b5791f9e6bdfc1b298e4179ae339d390b08ef5b049723b159f6c2646ff6f4a9add7a268f99839c24ae79c6c0115c88ab852fdbc253e2e3d21033957428d603c71bc3ac8b3356ed8cc6a46519a4ab0825916e5c6591ec97036a6c27779c28fc736a399d2f1fff964c8c4afd754eec0c790c0d6f8959049e2337b10765c8b72dc1815238f7088407b818da90f61399f96ab3a632a6a2d14638d1c7d91ba693e8099bfe6a4cebe977500ec439a6bb07f3c52484cb39ebf58df05c68eefcf9d796c747051356d86ffaa576484fc000b02332f229c0bcf6044c1b6cdba0d0d6a828cc16194ab0aae9d646ffb95807a74a6a4101b3d7b4385cea43acdea0fde6ada9621075838157fdd4e2e89d5fc6bebc9d25a5738ac6eaa550cd6f6231b5897d64e086727b5430f21fa5759dc58076ad922e8b4c751210c97b1db01942474bce5d242ad14124da5ec79a6b841acaf290097455129b5b40d136fca40bff0dd23efe53993dd610c025fa224f28e58fc675b59ec52513b402729030bb0ebd65aca21175b1ae11228018389aac78a371ed72c8d466a171ff5e6b7b12614731c93071a5d6f41afc48c5b54c0a5d6d1f8aef1f32ef0207a2aa944012cbfd6568382a", 216 | "HashFunction": "Keccak-f[1600] overwrite mode", 217 | "IV": "6d756c74692d626c6f636b2d6162736f72622d746573740000000000000000000000000000000000000000000000000000000000000000000000000000000000", 218 | "Operations": [ 219 | { 220 | "data": "abababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababab", 221 | "type": "absorb" 222 | }, 223 | { 224 | "length": 600, 225 | "type": "squeeze" 226 | } 227 | ] 228 | }, 229 | "test_multiple_blocks_absorb_squeeze_SHAKE128": { 230 | "Expected": "526d4f6cfca230e0654bf8749bddc0f4416a8a164c50f3c1b0bc1d5272a88b9a524e73cafad76691a29c0e03a5255fd8fb9d778ef5a0c8c9e11e003011d256bf92dd36233e4c6c360baca0f8ac305d459adb1231a801742669efa051396e96417814448b5328336d028a62dbddf24d1bb68496d27f1944eb24d4b2812d9ad4eae6c260b720c44ed2be8bfeeed3acc2640edbab987674f2cef8ceacda1e04f254170aba4241dabc6364ed5afc09b58205682d5e8413bf5f9d97e9c799b97876ccd1c48d86759ade5871acc4c5d41d37f2b1843c8b6f9e0bade78342d56f9b1e8232d4c7553674d889e69fe24dea31f42f0b02b70161876ceb12cc0b36868c262cbebb5e815a1eceaee97aed3402a518287c32f2f469c3a38a17afd0f0d82433acf695ae143ded9412b4e6b6144bd6d4be6bb7de33c05f560480c63aa89336954f1cf5992399e6ed59d406adb4497bb88aa897fd3d65646cf86e796da4f193c418a74d662f57e0e0c775386abdace02157e519ba54495555145016c550ff32004981d0e34f0abe7d814ac4fe25260473ffa87460a736f20954e8d3b9f16140e79451953fe6cfc222cba6ad4f85a2e2efd6ff8f5fef65d8480e6af40baab298c4de57f30d08a5e1b4c10d123a5af7702ff26ba9a84a6fe92f48391b23a7e8e8cb06deda74d1b10870611995f6bfe4df60320a0b7f2c891cad5a5645ecec80868ed568591a74dafb35cabb42dae1a1085269b655db1ebf09929f63d5af775a24e43759f673b83aeefef382bc2b7bf175bb9d90e77911466ffb3b2307547765cd5adc30a6b07881a88fd1511e5f8d2dcc4347c076e6c79676d8df", 231 | "HashFunction": "SHAKE128", 232 | "IV": "6d756c74692d626c6f636b2d6162736f72622d746573740000000000000000000000000000000000000000000000000000000000000000000000000000000000", 233 | "Operations": [ 234 | { 235 | "data": "abababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababab", 236 | "type": "absorb" 237 | }, 238 | { 239 | "length": 600, 240 | "type": "squeeze" 241 | } 242 | ] 243 | }, 244 | "test_squeeze_zero_after_behavior_Keccak": { 245 | "Expected": "8532fba67d7e5a2241eab397cf2e26a3e5b4f1f54f2a7e3d47f17448e0149354d5f54c43c88d7c45de8aadc24c83e519cec9286e567b5401e4072065d6c8bd3e", 246 | "HashFunction": "Keccak-f[1600] overwrite mode", 247 | "IV": "756e69745f74657374735f6b656363616b5f69760000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", 248 | "Operations": [ 249 | { 250 | "length": 0, 251 | "type": "squeeze" 252 | }, 253 | { 254 | "data": "7a65726f2073717565657a65206166746572", 255 | "type": "absorb" 256 | }, 257 | { 258 | "length": 64, 259 | "type": "squeeze" 260 | } 261 | ] 262 | }, 263 | "test_squeeze_zero_after_behavior_SHAKE128": { 264 | "Expected": "bd9278e6f65cb854935b3f6b2c51ab158be8ea09744509519b8f06f0c501d07c429e37f232b6f0955b620ff6226d9d02e4817b1447e7309023a3a14f735876ec", 265 | "HashFunction": "SHAKE128", 266 | "IV": "756e69745f74657374735f6b656363616b5f69760000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", 267 | "Operations": [ 268 | { 269 | "length": 0, 270 | "type": "squeeze" 271 | }, 272 | { 273 | "data": "7a65726f2073717565657a65206166746572", 274 | "type": "absorb" 275 | }, 276 | { 277 | "length": 64, 278 | "type": "squeeze" 279 | } 280 | ] 281 | }, 282 | "test_squeeze_zero_behavior_Keccak": { 283 | "Expected": "affcac33ae7d12f1d986e109175fbc46be1821f77f67779e2357232f88b5959e0c244a67099ac4a7f7706bcfc5803b7ad0affa9ef7a5f93615b2df82e900dc3f", 284 | "HashFunction": "Keccak-f[1600] overwrite mode", 285 | "IV": "756e69745f74657374735f6b656363616b5f69760000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", 286 | "Operations": [ 287 | { 288 | "length": 0, 289 | "type": "squeeze" 290 | }, 291 | { 292 | "data": "7a65726f2073717565657a652074657374", 293 | "type": "absorb" 294 | }, 295 | { 296 | "length": 0, 297 | "type": "squeeze" 298 | }, 299 | { 300 | "length": 64, 301 | "type": "squeeze" 302 | } 303 | ] 304 | }, 305 | "test_squeeze_zero_behavior_SHAKE128": { 306 | "Expected": "4cf7f008057b63cb615547a143f42cf793b86b239f404d2f28b3f09197d850eb029df3024ad468be5aceb2fa60e9fb7add98436236be69ddb34314ce7a905f23", 307 | "HashFunction": "SHAKE128", 308 | "IV": "756e69745f74657374735f6b656363616b5f69760000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", 309 | "Operations": [ 310 | { 311 | "length": 0, 312 | "type": "squeeze" 313 | }, 314 | { 315 | "data": "7a65726f2073717565657a652074657374", 316 | "type": "absorb" 317 | }, 318 | { 319 | "length": 0, 320 | "type": "squeeze" 321 | }, 322 | { 323 | "length": 64, 324 | "type": "squeeze" 325 | } 326 | ] 327 | } 328 | } -------------------------------------------------------------------------------- /tests/test_validation_criteria.rs: -------------------------------------------------------------------------------- 1 | //! Validation criteria tests for sigma protocols 2 | //! 3 | //! This module contains tests for validating both instances and proofs, 4 | //! ensuring that malformed inputs are properly rejected. 5 | 6 | #[cfg(test)] 7 | mod instance_validation { 8 | use bls12_381::{G1Projective as G, Scalar}; 9 | use ff::Field; 10 | use group::Group; 11 | use sigma_proofs::linear_relation::{CanonicalLinearRelation, LinearRelation}; 12 | 13 | #[test] 14 | fn test_unassigned_group_vars() { 15 | // Create a linear relation with unassigned group variables 16 | let mut relation = LinearRelation::::new(); 17 | 18 | // Allocate scalars and elements 19 | let [var_x] = relation.allocate_scalars(); 20 | let [var_g, var_x_g] = relation.allocate_elements(); 21 | 22 | // Set only one element, leaving var_g unassigned 23 | let x_val = G::generator() * Scalar::from(42u64); 24 | relation.set_element(var_x_g, x_val); 25 | 26 | // Add equation: X = x * G (but G is not set) 27 | relation.append_equation(var_x_g, var_x * var_g); 28 | 29 | // Try to convert to canonical form - should fail 30 | let result = CanonicalLinearRelation::try_from(&relation); 31 | assert!(result.is_err()); 32 | } 33 | 34 | #[test] 35 | #[allow(non_snake_case)] 36 | fn test_zero_image() { 37 | // Create a linear relation with zero elements in the image 38 | // 0 = x * G (which is invalid) 39 | let mut relation = LinearRelation::::new(); 40 | let [var_x] = relation.allocate_scalars(); 41 | let [var_G] = relation.allocate_elements(); 42 | let var_X = relation.allocate_eq(var_G * var_x); 43 | relation.set_element(var_G, G::generator()); 44 | relation.set_element(var_X, G::identity()); 45 | let result = CanonicalLinearRelation::try_from(&relation); 46 | assert!(result.is_err()); 47 | 48 | // Create a trivially valid linear relation with zero elements in the image 49 | // 0 = 0*B 50 | let mut relation = LinearRelation::::new(); 51 | let [var_B] = relation.allocate_elements(); 52 | let var_X = relation.allocate_eq(var_B * Scalar::from(0)); 53 | relation.set_element(var_B, G::generator()); 54 | relation.set_element(var_X, G::identity()); 55 | let result = CanonicalLinearRelation::try_from(&relation); 56 | assert!(result.is_ok()); 57 | 58 | // Create a valid linear relation with zero elements in the image 59 | // 0 = 0*x*C 60 | let mut relation = LinearRelation::::new(); 61 | let [var_x] = relation.allocate_scalars(); 62 | let [var_C] = relation.allocate_elements(); 63 | let var_X = relation.allocate_eq(var_C * var_x * Scalar::from(0)); 64 | relation.set_element(var_C, G::generator()); 65 | relation.set_element(var_X, G::identity()); 66 | let result = CanonicalLinearRelation::try_from(&relation); 67 | assert!(result.is_ok()); 68 | } 69 | 70 | #[test] 71 | #[allow(non_snake_case)] 72 | pub fn test_degenerate_equation() { 73 | // This relation should fail for two reasons: 74 | // 1. because var_B is not assigned 75 | let mut relation = LinearRelation::::new(); 76 | let x = relation.allocate_scalar(); 77 | let var_B = relation.allocate_element(); 78 | let var_X = relation.allocate_eq((x + (-Scalar::ONE)) * var_B + (-var_B)); 79 | relation.set_element(var_X, G::identity()); 80 | assert!(CanonicalLinearRelation::try_from(&relation).is_err()); 81 | 82 | // 2. because var_X is not assigned 83 | let mut relation = LinearRelation::::new(); 84 | let x = relation.allocate_scalar(); 85 | let var_B = relation.allocate_element(); 86 | let _var_X = relation.allocate_eq((x + (-Scalar::ONE)) * var_B + (-var_B)); 87 | relation.set_element(var_B, G::generator()); 88 | assert!(CanonicalLinearRelation::try_from(&relation).is_err()); 89 | } 90 | 91 | #[test] 92 | fn test_inconsistent_equation_count() { 93 | // Create a relation with mismatched equations and image elements 94 | let mut relation = LinearRelation::::new(); 95 | let [var_x] = relation.allocate_scalars(); 96 | let [var_g, var_h] = relation.allocate_elements(); 97 | relation.set_elements([ 98 | (var_g, G::generator()), 99 | (var_h, G::generator() * Scalar::from(2u64)), 100 | ]); 101 | 102 | // Add two equations but only one image element 103 | let var_img_1 = relation.allocate_eq(var_x * var_g + var_h); 104 | relation.allocate_eq(var_x * var_h + var_g); 105 | relation.set_element(var_g, G::generator()); 106 | relation.set_element(var_h, G::generator() * Scalar::from(2)); 107 | relation.set_element(var_img_1, G::generator() * Scalar::from(3)); 108 | assert!(relation.canonical().is_err()); 109 | } 110 | 111 | #[test] 112 | #[allow(non_snake_case)] 113 | fn test_empty_string() { 114 | let rng = &mut rand::thread_rng(); 115 | let relation = LinearRelation::::new(); 116 | let nizk = relation.into_nizk(b"test_session").unwrap(); 117 | let narg_string = nizk.prove_batchable(&vec![], rng).unwrap(); 118 | assert!(narg_string.is_empty()); 119 | 120 | let mut relation = LinearRelation::::new(); 121 | let var_B = relation.allocate_element(); 122 | let var_C = relation.allocate_eq(var_B * Scalar::from(1)); 123 | relation.set_elements([(var_B, G::generator()), (var_C, G::generator())]); 124 | assert!(CanonicalLinearRelation::try_from(&relation).is_ok()); 125 | } 126 | 127 | #[test] 128 | #[allow(non_snake_case)] 129 | fn test_statement_without_witness() { 130 | let pub_scalar = Scalar::from(42); 131 | let A = G::generator(); 132 | let B = G::generator() * Scalar::from(42); 133 | let C = B * pub_scalar + A * Scalar::from(3); 134 | 135 | let X = G::generator() * Scalar::from(4); 136 | 137 | // The following relation is trivially invalid. 138 | // That is, we know that no witness will ever satisfy it. 139 | let mut linear_relation = LinearRelation::::new(); 140 | let B_var = linear_relation.allocate_element(); 141 | let C_var = linear_relation.allocate_eq(B_var); 142 | linear_relation.set_elements([(B_var, B), (C_var, C)]); 143 | assert!(linear_relation 144 | .canonical() 145 | .err() 146 | .unwrap() 147 | .message 148 | .contains("trivially false constraint")); 149 | 150 | // Also in this case, we know that no witness will ever satisfy the relation. 151 | // X != B * pub_scalar + A * 3 152 | let mut linear_relation = LinearRelation::::new(); 153 | let [B_var, A_var] = linear_relation.allocate_elements(); 154 | let X_var = linear_relation.allocate_eq(B_var * pub_scalar + A_var * Scalar::from(3)); 155 | linear_relation.set_elements([(B_var, B), (A_var, A), (X_var, X)]); 156 | assert!(linear_relation 157 | .canonical() 158 | .err() 159 | .unwrap() 160 | .message 161 | .contains("trivially false constraint")); 162 | 163 | // The following relation is valid and should pass. 164 | let mut linear_relation = LinearRelation::::new(); 165 | let B_var = linear_relation.allocate_element(); 166 | let C_var = linear_relation.allocate_eq(B_var); 167 | linear_relation.set_elements([(B_var, B), (C_var, B)]); 168 | assert!(linear_relation.canonical().is_ok()); 169 | 170 | // The following relation is valid and should pass. 171 | // C = B * pub_scalar + A * 3 172 | let mut linear_relation = LinearRelation::::new(); 173 | let [B_var, A_var] = linear_relation.allocate_elements(); 174 | let C_var = linear_relation.allocate_eq(B_var * pub_scalar + A_var * Scalar::from(3)); 175 | linear_relation.set_elements([(B_var, B), (A_var, A), (C_var, C)]); 176 | assert!(linear_relation.canonical().is_ok()); 177 | 178 | // The following relation is for 179 | // X = B * x + B * pub_scalar + A * 3 180 | // and should be considered a valid instance. 181 | let mut linear_relation = LinearRelation::::new(); 182 | let x_var = linear_relation.allocate_scalar(); 183 | let [B_var, A_var] = linear_relation.allocate_elements(); 184 | let X_var = linear_relation 185 | .allocate_eq(B_var * x_var + B_var * pub_scalar + A_var * Scalar::from(3)); 186 | linear_relation.set_elements([(B_var, B), (A_var, A), (X_var, X)]); 187 | assert!(linear_relation.canonical().is_ok()); 188 | } 189 | 190 | #[test] 191 | #[allow(non_snake_case)] 192 | fn test_statement_with_trivial_image() { 193 | let mut rng = rand::thread_rng(); 194 | let mut linear_relation = LinearRelation::new(); 195 | 196 | let [x_var, y_var] = linear_relation.allocate_scalars(); 197 | let [Z_var, A_var, B_var, C_var] = linear_relation.allocate_elements(); 198 | linear_relation.append_equation(Z_var, x_var * A_var + y_var * B_var + C_var); 199 | 200 | let [x, y] = [Scalar::random(&mut rng), Scalar::random(&mut rng)]; 201 | let Z = G::identity(); 202 | let A = G::random(&mut rng); 203 | let B = G::generator(); 204 | let C = -x * A - y * B; 205 | 206 | // The equation 0 = x*A + y*B + C 207 | // Has a non-trivial solution. 208 | linear_relation.set_elements([(Z_var, Z), (A_var, A), (B_var, B), (C_var, C)]); 209 | assert!(linear_relation.canonical().is_ok()); 210 | 211 | // Adding more non-trivial statements does not affect the validity of the relation. 212 | let F_var = linear_relation.allocate_element(); 213 | let f_var = linear_relation.allocate_scalar(); 214 | linear_relation.append_equation(F_var, f_var * A_var); 215 | let f = Scalar::random(&mut rng); 216 | let F = A * f; 217 | linear_relation.set_elements([(F_var, F), (A_var, A)]); 218 | assert!(linear_relation.canonical().is_ok()); 219 | } 220 | } 221 | 222 | #[cfg(test)] 223 | mod proof_validation { 224 | use bls12_381::{G1Projective as G, Scalar}; 225 | use ff::Field; 226 | use rand::RngCore; 227 | use sigma_proofs::codec::KeccakByteSchnorrCodec; 228 | use sigma_proofs::composition::{ComposedRelation, ComposedWitness}; 229 | use sigma_proofs::linear_relation::{CanonicalLinearRelation, LinearRelation}; 230 | use sigma_proofs::Nizk; 231 | 232 | type TestNizk = Nizk, KeccakByteSchnorrCodec>; 233 | 234 | /// Helper function to create a simple discrete log proof 235 | fn create_valid_proof() -> (Vec, TestNizk) { 236 | let mut rng = rand::thread_rng(); 237 | 238 | // Create a simple discrete log relation 239 | let mut relation = LinearRelation::::new(); 240 | let [var_x] = relation.allocate_scalars(); 241 | let [var_g, var_x_g] = relation.allocate_elements::<2>(); 242 | 243 | let x = Scalar::from(42u64); 244 | let x_g = G::generator() * x; 245 | 246 | relation.set_elements([(var_g, G::generator()), (var_x_g, x_g)]); 247 | relation.append_equation(var_x_g, var_x * var_g); 248 | 249 | let nizk = TestNizk::new(b"test_session", relation.canonical().unwrap()); 250 | 251 | let witness = vec![x]; 252 | let proof = nizk.prove_batchable(&witness, &mut rng).unwrap(); 253 | 254 | (proof, nizk) 255 | } 256 | 257 | #[test] 258 | fn test_proof_bitflip() { 259 | let (mut proof, nizk) = create_valid_proof(); 260 | 261 | // Verify the original proof is valid 262 | assert!(nizk.verify_batchable(&proof).is_ok()); 263 | 264 | // Test bitflips at various positions 265 | for pos in 0..proof.len() { 266 | let original_byte = proof[pos]; 267 | 268 | // Flip each bit in the byte 269 | for bit in 0..8 { 270 | proof[pos] ^= 1 << bit; 271 | 272 | // Verification should fail 273 | assert!( 274 | nizk.verify_batchable(&proof).is_err(), 275 | "Proof verification should fail with bit {bit} flipped at position {pos}" 276 | ); 277 | 278 | // Restore original byte 279 | proof[pos] = original_byte; 280 | } 281 | } 282 | } 283 | 284 | #[test] 285 | fn test_proof_append_bytes() { 286 | let (mut proof, nizk) = create_valid_proof(); 287 | 288 | // Verify the original proof is valid 289 | assert!(nizk.verify_batchable(&proof).is_ok()); 290 | 291 | // Test appending various amounts of bytes 292 | let append_sizes = [1, 8, 32, 100]; 293 | 294 | for &size in &append_sizes { 295 | let original_len = proof.len(); 296 | 297 | // Append random bytes 298 | let mut rng = rand::thread_rng(); 299 | let mut extra_bytes = vec![0u8; size]; 300 | rng.fill_bytes(&mut extra_bytes); 301 | proof.extend_from_slice(&extra_bytes); 302 | 303 | // Verification should fail 304 | assert!( 305 | nizk.verify_batchable(&proof).is_err(), 306 | "Proof verification should fail with {size} bytes appended" 307 | ); 308 | 309 | // Restore original proof 310 | proof.truncate(original_len); 311 | } 312 | } 313 | 314 | #[test] 315 | fn test_proof_prepend_bytes() { 316 | let (proof, nizk) = create_valid_proof(); 317 | 318 | // Verify the original proof is valid 319 | assert!(nizk.verify_batchable(&proof).is_ok()); 320 | 321 | // Test prepending various amounts of bytes 322 | let prepend_sizes = [1, 8, 32, 100]; 323 | 324 | for &size in &prepend_sizes { 325 | // Create new proof with prepended bytes 326 | let mut rng = rand::thread_rng(); 327 | let mut prepended_proof = vec![0u8; size]; 328 | rng.fill_bytes(&mut prepended_proof); 329 | prepended_proof.extend_from_slice(&proof); 330 | 331 | // Verification should fail 332 | assert!( 333 | nizk.verify_batchable(&prepended_proof).is_err(), 334 | "Proof verification should fail with {size} bytes prepended" 335 | ); 336 | } 337 | } 338 | 339 | #[test] 340 | fn test_proof_truncation() { 341 | let (proof, nizk) = create_valid_proof(); 342 | 343 | // Verify the original proof is valid 344 | assert!(nizk.verify_batchable(&proof).is_ok()); 345 | 346 | // Test truncating various amounts 347 | let truncate_sizes = [1, 8, proof.len() / 2, proof.len() - 1]; 348 | 349 | for &size in &truncate_sizes { 350 | if size < proof.len() { 351 | let truncated_proof = &proof[..proof.len() - size]; 352 | 353 | // Verification should fail 354 | assert!( 355 | nizk.verify_batchable(truncated_proof).is_err(), 356 | "Proof verification should fail with {size} bytes truncated" 357 | ); 358 | } 359 | } 360 | } 361 | 362 | #[test] 363 | fn test_empty_proof() { 364 | let (_, nizk) = create_valid_proof(); 365 | let empty_proof = vec![]; 366 | 367 | // Verification should fail for empty proof 368 | assert!( 369 | nizk.verify_batchable(&empty_proof).is_err(), 370 | "Proof verification should fail for empty proof" 371 | ); 372 | } 373 | 374 | #[test] 375 | fn test_random_bytes_as_proof() { 376 | let (valid_proof, nizk) = create_valid_proof(); 377 | let proof_len = valid_proof.len(); 378 | 379 | // Test with completely random bytes of the same length 380 | let mut rng = rand::thread_rng(); 381 | let mut random_proof = vec![0u8; proof_len]; 382 | rng.fill_bytes(&mut random_proof); 383 | 384 | // Verification should fail 385 | assert!( 386 | nizk.verify_batchable(&random_proof).is_err(), 387 | "Proof verification should fail for random bytes" 388 | ); 389 | } 390 | 391 | #[test] 392 | #[allow(non_snake_case)] 393 | fn test_or_relation() { 394 | // This test reproduces the issue from sigma_compiler's simple_or test 395 | // where an OR relation fails verification when using the wrong branch 396 | let mut rng = rand::thread_rng(); 397 | 398 | // Create generators 399 | // For this test, we'll use two different multiples of the generator 400 | let B = G::generator(); 401 | let A = B * Scalar::from(42u64); // Different generator 402 | 403 | // Create scalars 404 | let x = Scalar::random(&mut rng); 405 | let y = Scalar::random(&mut rng); 406 | 407 | // Set C = y*B (so the second branch should be satisfied) 408 | let C = B * y; 409 | 410 | // Create the first branch: C = x*A 411 | let mut lr1 = LinearRelation::new(); 412 | let x_var = lr1.allocate_scalar(); 413 | let A_var = lr1.allocate_element(); 414 | let eq1 = lr1.allocate_eq(x_var * A_var); 415 | lr1.set_element(A_var, A); 416 | lr1.set_element(eq1, C); 417 | // Create the second branch: C = y*B 418 | let mut lr2 = LinearRelation::new(); 419 | let y_var = lr2.allocate_scalar(); 420 | let B_var = lr2.allocate_element(); 421 | let eq2 = lr2.allocate_eq(y_var * B_var); 422 | lr2.set_element(B_var, B); 423 | lr2.set_element(eq2, C); 424 | 425 | // Create OR composition 426 | let or_relation = 427 | ComposedRelation::or([lr1.canonical().unwrap(), lr2.canonical().unwrap()]); 428 | let nizk = or_relation.into_nizk(b"test_or_relation"); 429 | 430 | // Create a correct witness for branch 1 (C = y*B) 431 | // Note: x is NOT a valid witness for branch 0 because C ≠ x*A 432 | let witness_correct = ComposedWitness::Or(vec![ 433 | ComposedWitness::Simple(vec![x]), 434 | ComposedWitness::Simple(vec![y]), 435 | ]); 436 | let proof = nizk.prove_batchable(&witness_correct, &mut rng).unwrap(); 437 | assert!( 438 | nizk.verify_batchable(&proof).is_ok(), 439 | "Valid proof should verify" 440 | ); 441 | 442 | // Now test with ONLY invalid witnesses (neither branch satisfied) 443 | // Branch 0 requires C = x*A, but we use random x 444 | // Branch 1 requires C = y*B, but we use a different random value 445 | let wrong_y = Scalar::random(&mut rng); 446 | let witness_wrong = ComposedWitness::Or(vec![ 447 | ComposedWitness::Simple(vec![x]), 448 | ComposedWitness::Simple(vec![wrong_y]), 449 | ]); 450 | let proof_result = nizk.prove_batchable(&witness_wrong, &mut rng); 451 | assert!( 452 | proof_result.is_err(), 453 | "Proof should fail with invalid witnesses" 454 | ); 455 | 456 | // Create a correct witness for both branches 457 | let witness_correct = ComposedWitness::Or(vec![ 458 | ComposedWitness::Simple(vec![y]), 459 | ComposedWitness::Simple(vec![y]), 460 | ]); 461 | let proof = nizk.prove_batchable(&witness_correct, &mut rng).unwrap(); 462 | assert!( 463 | nizk.verify_batchable(&proof).is_ok(), 464 | "Prover fails when all witnesses in an OR proof are valid" 465 | ); 466 | } 467 | } 468 | -------------------------------------------------------------------------------- /tests/relations/mod.rs: -------------------------------------------------------------------------------- 1 | use ff::Field; 2 | use group::prime::PrimeGroup; 3 | use rand::RngCore; 4 | 5 | use sigma_proofs::linear_relation::{CanonicalLinearRelation, LinearRelation, Sum}; 6 | 7 | /// LinearMap for knowledge of a discrete logarithm relative to a fixed basepoint. 8 | #[allow(non_snake_case)] 9 | pub fn discrete_logarithm( 10 | rng: &mut R, 11 | ) -> (CanonicalLinearRelation, Vec) { 12 | let x = G::Scalar::random(rng); 13 | let mut relation = LinearRelation::new(); 14 | 15 | let var_x = relation.allocate_scalar(); 16 | let var_G = relation.allocate_element(); 17 | 18 | let var_X = relation.allocate_eq(var_x * var_G); 19 | 20 | relation.set_element(var_G, G::generator()); 21 | relation.compute_image(&[x]).unwrap(); 22 | 23 | let X = relation.linear_map.group_elements.get(var_X).unwrap(); 24 | 25 | assert_eq!(X, G::generator() * x); 26 | let witness = vec![x]; 27 | let instance = (&relation).try_into().unwrap(); 28 | (instance, witness) 29 | } 30 | 31 | /// LinearMap for knowledge of a shifted discrete logarithm relative to a fixed basepoint. 32 | #[allow(non_snake_case)] 33 | pub fn shifted_dlog( 34 | rng: &mut R, 35 | ) -> (CanonicalLinearRelation, Vec) { 36 | let x = G::Scalar::random(rng); 37 | let mut relation = LinearRelation::new(); 38 | 39 | let var_x = relation.allocate_scalar(); 40 | let var_G = relation.allocate_element(); 41 | 42 | let var_X = relation.allocate_eq(var_G * var_x + var_G * ::ONE); 43 | // another way of writing this is: 44 | relation.append_equation(var_X, (var_x + G::Scalar::from(1)) * var_G); 45 | 46 | relation.set_element(var_G, G::generator()); 47 | relation.compute_image(&[x]).unwrap(); 48 | 49 | let witness = vec![x]; 50 | let instance = (&relation).try_into().unwrap(); 51 | (instance, witness) 52 | } 53 | 54 | /// LinearMap for knowledge of a discrete logarithm equality between two pairs. 55 | #[allow(non_snake_case)] 56 | pub fn dleq( 57 | rng: &mut R, 58 | ) -> (CanonicalLinearRelation, Vec) { 59 | let H = G::random(&mut *rng); 60 | let x = G::Scalar::random(&mut *rng); 61 | let mut relation = LinearRelation::new(); 62 | 63 | let var_x = relation.allocate_scalar(); 64 | let [var_G, var_H] = relation.allocate_elements(); 65 | 66 | let var_X = relation.allocate_eq(var_x * var_G); 67 | let var_Y = relation.allocate_eq(var_x * var_H); 68 | 69 | relation.set_elements([(var_G, G::generator()), (var_H, H)]); 70 | relation.compute_image(&[x]).unwrap(); 71 | 72 | let X = relation.linear_map.group_elements.get(var_X).unwrap(); 73 | let Y = relation.linear_map.group_elements.get(var_Y).unwrap(); 74 | 75 | assert_eq!(X, G::generator() * x); 76 | assert_eq!(Y, H * x); 77 | let witness = vec![x]; 78 | let instance = (&relation).try_into().unwrap(); 79 | (instance, witness) 80 | } 81 | 82 | /// LinearMap for knowledge of a shifted dleq. 83 | #[allow(non_snake_case)] 84 | pub fn shifted_dleq( 85 | rng: &mut R, 86 | ) -> (CanonicalLinearRelation, Vec) { 87 | let H = G::random(&mut *rng); 88 | let x = G::Scalar::random(&mut *rng); 89 | let mut relation = LinearRelation::new(); 90 | 91 | let var_x = relation.allocate_scalar(); 92 | let [var_G, var_H] = relation.allocate_elements(); 93 | 94 | let var_X = relation.allocate_eq(var_x * var_G + var_H); 95 | let var_Y = relation.allocate_eq(var_x * var_H + var_G); 96 | 97 | relation.set_elements([(var_G, G::generator()), (var_H, H)]); 98 | relation.compute_image(&[x]).unwrap(); 99 | 100 | let X = relation.linear_map.group_elements.get(var_X).unwrap(); 101 | let Y = relation.linear_map.group_elements.get(var_Y).unwrap(); 102 | 103 | assert_eq!(X, G::generator() * x + H); 104 | assert_eq!(Y, H * x + G::generator()); 105 | let witness = vec![x]; 106 | let instance = (&relation).try_into().unwrap(); 107 | (instance, witness) 108 | } 109 | 110 | /// LinearMap for knowledge of an opening to a Pedersen commitment. 111 | #[allow(non_snake_case)] 112 | pub fn pedersen_commitment( 113 | rng: &mut R, 114 | ) -> (CanonicalLinearRelation, Vec) { 115 | let H = G::random(&mut *rng); 116 | let x = G::Scalar::random(&mut *rng); 117 | let r = G::Scalar::random(&mut *rng); 118 | let mut relation = LinearRelation::new(); 119 | 120 | let [var_x, var_r] = relation.allocate_scalars(); 121 | let [var_G, var_H] = relation.allocate_elements(); 122 | 123 | let var_C = relation.allocate_eq(var_x * var_G + var_r * var_H); 124 | 125 | relation.set_elements([(var_H, H), (var_G, G::generator())]); 126 | relation.compute_image(&[x, r]).unwrap(); 127 | 128 | let C = relation.linear_map.group_elements.get(var_C).unwrap(); 129 | 130 | let witness = vec![x, r]; 131 | assert_eq!(C, G::generator() * x + H * r); 132 | let instance = (&relation).try_into().unwrap(); 133 | (instance, witness) 134 | } 135 | 136 | #[allow(non_snake_case)] 137 | pub fn twisted_pedersen_commitment( 138 | rng: &mut R, 139 | ) -> (CanonicalLinearRelation, Vec) { 140 | let H = G::random(&mut *rng); 141 | let x = G::Scalar::random(&mut *rng); 142 | let r = G::Scalar::random(&mut *rng); 143 | let mut relation = LinearRelation::new(); 144 | 145 | let [var_x, var_r] = relation.allocate_scalars(); 146 | let [var_G, var_H] = relation.allocate_elements(); 147 | 148 | relation.allocate_eq( 149 | (var_x * G::Scalar::from(3)) * var_G 150 | + (var_r * G::Scalar::from(2) + G::Scalar::from(3)) * var_H, 151 | ); 152 | 153 | relation.set_elements([(var_H, H), (var_G, G::generator())]); 154 | relation.compute_image(&[x, r]).unwrap(); 155 | 156 | let witness = vec![x, r]; 157 | let instance = (&relation).try_into().unwrap(); 158 | (instance, witness) 159 | } 160 | 161 | /// Test that a Pedersen commitment is in the given range. 162 | #[allow(non_snake_case)] 163 | pub fn range_instance_generation( 164 | mut rng: &mut R, 165 | input: u64, 166 | range: std::ops::Range, 167 | ) -> (CanonicalLinearRelation, Vec) { 168 | let G = G::generator(); 169 | let H = G::random(&mut rng); 170 | 171 | let delta = range.end - range.start; 172 | let whole_bits = (delta - 1).ilog2() as usize; 173 | let remainder = delta - (1 << whole_bits); 174 | 175 | // Compute the bases used to express the input as a linear combination of the bit decomposition 176 | // of the input. 177 | let mut bases = (0..whole_bits).map(|i| 1 << i).collect::>(); 178 | bases.push(remainder); 179 | assert_eq!(range.start + bases.iter().sum::(), range.end - 1); 180 | 181 | let mut instance = LinearRelation::new(); 182 | let [var_G, var_H] = instance.allocate_elements(); 183 | let [var_x, var_r] = instance.allocate_scalars(); 184 | let vars_b = instance.allocate_scalars_vec(bases.len()); 185 | let vars_s = instance.allocate_scalars_vec(bases.len()); 186 | let var_s2 = instance.allocate_scalars_vec(bases.len()); 187 | let var_Ds = instance.allocate_elements_vec(bases.len()); 188 | 189 | // `var_C` is a Pedersen commitment to `var_x`. 190 | let var_C = instance.allocate_eq(var_x * var_G + var_r * var_H); 191 | // `var_Ds[i]` are bit commitments... 192 | for i in 0..bases.len() { 193 | instance.append_equation(var_Ds[i], vars_b[i] * var_G + vars_s[i] * var_H); 194 | instance.append_equation(var_Ds[i], vars_b[i] * var_Ds[i] + var_s2[i] * var_H); 195 | } 196 | // ... satisfying that sum(Ds[i] * bases[i]) = C 197 | instance.append_equation( 198 | var_C, 199 | var_G * G::Scalar::from(range.start) 200 | + (0..bases.len()) 201 | .map(|i| var_Ds[i] * G::Scalar::from(bases[i])) 202 | .sum::>(), 203 | ); 204 | 205 | // Compute the witness 206 | let r = G::Scalar::random(&mut rng); 207 | let x = G::Scalar::from(input); 208 | 209 | // IMPORTANT: this segment of the witness generation is NOT constant-time. 210 | // See PR #80 for details. 211 | let b = { 212 | let mut rest = input - range.start; 213 | let mut b = vec![G::Scalar::ZERO; bases.len()]; 214 | assert!(rest < delta); 215 | for (i, &base) in bases.iter().enumerate().rev() { 216 | if rest >= base { 217 | b[i] = G::Scalar::ONE; 218 | rest -= base; 219 | } 220 | } 221 | 222 | b 223 | }; 224 | assert_eq!( 225 | x, 226 | G::Scalar::from(range.start) 227 | + (0..bases.len()) 228 | .map(|i| G::Scalar::from(bases[i]) * b[i]) 229 | .sum::() 230 | ); 231 | // set the randomness for the bit decomposition 232 | let mut s = (0..bases.len()) 233 | .map(|_| G::Scalar::random(&mut rng)) 234 | .collect::>(); 235 | let partial_sum = (1..bases.len()) 236 | .map(|i| G::Scalar::from(bases[i]) * s[i]) 237 | .sum::(); 238 | s[0] = r - partial_sum; 239 | let s2 = (0..bases.len()) 240 | .map(|i| (G::Scalar::ONE - b[i]) * s[i]) 241 | .collect::>(); 242 | let witness = [x, r] 243 | .iter() 244 | .chain(&b) 245 | .chain(&s) 246 | .chain(&s2) 247 | .copied() 248 | .collect::>(); 249 | 250 | instance.set_elements([(var_G, G), (var_H, H)]); 251 | instance.set_element(var_C, G * x + H * r); 252 | for i in 0..bases.len() { 253 | instance.set_element(var_Ds[i], G * b[i] + H * s[i]); 254 | } 255 | 256 | (instance.canonical().unwrap(), witness) 257 | } 258 | 259 | /// Test that a Pedersen commitment is in `[0, bound)` for any `bound >= 0`. 260 | #[allow(non_snake_case)] 261 | pub fn test_range( 262 | mut rng: &mut R, 263 | ) -> (CanonicalLinearRelation, Vec) { 264 | range_instance_generation(&mut rng, 822, 0..1337) 265 | } 266 | 267 | /// LinearMap for knowledge of an opening for use in a BBS commitment. 268 | // BBS message length is 3 269 | #[allow(non_snake_case)] 270 | pub fn bbs_blind_commitment( 271 | rng: &mut R, 272 | ) -> (CanonicalLinearRelation, Vec) { 273 | let [Q_2, J_1, J_2, J_3] = [ 274 | G::random(&mut *rng), 275 | G::random(&mut *rng), 276 | G::random(&mut *rng), 277 | G::random(&mut *rng), 278 | ]; 279 | let [msg_1, msg_2, msg_3] = [ 280 | G::Scalar::random(&mut *rng), 281 | G::Scalar::random(&mut *rng), 282 | G::Scalar::random(&mut *rng), 283 | ]; 284 | let secret_prover_blind = G::Scalar::random(&mut *rng); 285 | let mut relation = LinearRelation::new(); 286 | 287 | // these are computed before the proof in the specification 288 | let C = Q_2 * secret_prover_blind + J_1 * msg_1 + J_2 * msg_2 + J_3 * msg_3; 289 | 290 | // This is the part that needs to be changed in the specification of blind bbs. 291 | let [var_secret_prover_blind, var_msg_1, var_msg_2, var_msg_3] = relation.allocate_scalars(); 292 | 293 | // Match Sage's allocation order: allocate all elements in the same order 294 | let [var_Q_2, var_J_1, var_J_2, var_J_3] = relation.allocate_elements(); 295 | let var_C = relation.allocate_element(); // Allocate var_C separately, giving it index 4 296 | 297 | // Now append the equation separately (like Sage's append_equation) 298 | relation.append_equation( 299 | var_C, 300 | var_secret_prover_blind * var_Q_2 301 | + var_msg_1 * var_J_1 302 | + var_msg_2 * var_J_2 303 | + var_msg_3 * var_J_3, 304 | ); 305 | 306 | relation.set_elements([ 307 | (var_Q_2, Q_2), 308 | (var_J_1, J_1), 309 | (var_J_2, J_2), 310 | (var_J_3, J_3), 311 | (var_C, C), 312 | ]); 313 | 314 | let witness = vec![secret_prover_blind, msg_1, msg_2, msg_3]; 315 | 316 | assert!(vec![C] == relation.linear_map.evaluate(&witness).unwrap()); 317 | let instance = (&relation).try_into().unwrap(); 318 | (instance, witness) 319 | } 320 | 321 | /// LinearMap for the user's specific relation: A * 1 + gen__disj1_x_r * B 322 | #[allow(non_snake_case)] 323 | pub fn weird_linear_combination( 324 | rng: &mut R, 325 | ) -> (CanonicalLinearRelation, Vec) { 326 | let B = G::random(&mut *rng); 327 | let gen__disj1_x_r = G::Scalar::random(&mut *rng); 328 | let mut sigma__lr = LinearRelation::new(); 329 | 330 | let gen__disj1_x_r_var = sigma__lr.allocate_scalar(); 331 | let A = sigma__lr.allocate_element(); 332 | let var_B = sigma__lr.allocate_element(); 333 | 334 | let sigma__eq1 = sigma__lr.allocate_eq(A * G::Scalar::from(1) + gen__disj1_x_r_var * var_B); 335 | 336 | // Set the group elements 337 | sigma__lr.set_elements([(A, G::generator()), (var_B, B)]); 338 | sigma__lr.compute_image(&[gen__disj1_x_r]).unwrap(); 339 | 340 | let result = sigma__lr.linear_map.group_elements.get(sigma__eq1).unwrap(); 341 | 342 | // Verify the relation computes correctly 343 | let expected = G::generator() + B * gen__disj1_x_r; 344 | assert_eq!(result, expected); 345 | 346 | let witness = vec![gen__disj1_x_r]; 347 | let instance = (&sigma__lr).try_into().unwrap(); 348 | (instance, witness) 349 | } 350 | 351 | #[allow(non_snake_case)] 352 | pub fn simple_subtractions( 353 | mut rng: &mut R, 354 | ) -> (CanonicalLinearRelation, Vec) { 355 | let x = G::Scalar::random(&mut rng); 356 | let B = G::random(&mut rng); 357 | let X = B * (x - G::Scalar::from(1)); 358 | 359 | let mut linear_relation = LinearRelation::::new(); 360 | let var_x = linear_relation.allocate_scalar(); 361 | let var_B = linear_relation.allocate_element(); 362 | let var_X = linear_relation.allocate_eq((var_x + (-G::Scalar::from(1))) * var_B); 363 | linear_relation.set_element(var_B, B); 364 | linear_relation.set_element(var_X, X); 365 | 366 | let instance = (&linear_relation).try_into().unwrap(); 367 | let witness = vec![x]; 368 | (instance, witness) 369 | } 370 | 371 | #[allow(non_snake_case)] 372 | pub fn subtractions_with_shift( 373 | rng: &mut R, 374 | ) -> (CanonicalLinearRelation, Vec) { 375 | let B = G::generator(); 376 | let x = G::Scalar::random(rng); 377 | let X = B * (x - G::Scalar::from(2)); 378 | 379 | let mut linear_relation = LinearRelation::::new(); 380 | let var_x = linear_relation.allocate_scalar(); 381 | let var_B = linear_relation.allocate_element(); 382 | let var_X = linear_relation.allocate_eq((var_x + (-G::Scalar::from(1))) * var_B + (-var_B)); 383 | 384 | linear_relation.set_element(var_B, B); 385 | linear_relation.set_element(var_X, X); 386 | let instance = (&linear_relation).try_into().unwrap(); 387 | let witness = vec![x]; 388 | (instance, witness) 389 | } 390 | 391 | #[allow(non_snake_case)] 392 | pub fn cmz_wallet_spend_relation( 393 | mut rng: &mut R, 394 | ) -> (CanonicalLinearRelation, Vec) { 395 | // Simulate the wallet spend relation from cmz 396 | let P_W = G::random(&mut rng); 397 | let A = G::random(&mut rng); 398 | 399 | // Secret values 400 | let n_balance = G::Scalar::random(&mut rng); 401 | let i_price = G::Scalar::random(&mut rng); 402 | let fee = G::Scalar::from(5u64); 403 | let z_w_balance = G::Scalar::random(&mut rng); 404 | 405 | // W.balance = N.balance + I.price + fee 406 | let w_balance = n_balance + i_price + fee; 407 | 408 | let mut relation = LinearRelation::new(); 409 | 410 | let var_n_balance = relation.allocate_scalar(); 411 | let var_i_price = relation.allocate_scalar(); 412 | let var_z_w_balance = relation.allocate_scalar(); 413 | 414 | let var_P_W = relation.allocate_element(); 415 | let var_A = relation.allocate_element(); 416 | 417 | // C_show_Hattr_W_balance = (N.balance + I.price + fee) * P_W + z_w_balance * A 418 | let var_C = relation 419 | .allocate_eq((var_n_balance + var_i_price + fee) * var_P_W + var_z_w_balance * var_A); 420 | 421 | relation.set_elements([(var_P_W, P_W), (var_A, A)]); 422 | 423 | // Include fee in the witness 424 | relation 425 | .compute_image(&[n_balance, i_price, z_w_balance]) 426 | .unwrap(); 427 | 428 | let C = relation.linear_map.group_elements.get(var_C).unwrap(); 429 | let expected = P_W * w_balance + A * z_w_balance; 430 | assert_eq!(C, expected); 431 | 432 | let witness = vec![n_balance, i_price, z_w_balance]; 433 | let instance = (&relation).try_into().unwrap(); 434 | (instance, witness) 435 | } 436 | 437 | #[allow(non_snake_case)] 438 | pub fn nested_affine_relation( 439 | mut rng: &mut R, 440 | ) -> (CanonicalLinearRelation, Vec) { 441 | let mut instance = LinearRelation::::new(); 442 | let var_r = instance.allocate_scalar(); 443 | let var_A = instance.allocate_element(); 444 | let var_B = instance.allocate_element(); 445 | let eq1 = instance.allocate_eq( 446 | var_A * G::Scalar::from(4) + (var_r * G::Scalar::from(2) + G::Scalar::from(3)) * var_B, 447 | ); 448 | 449 | let A = G::random(&mut rng); 450 | let B = G::random(&mut rng); 451 | let r = G::Scalar::random(&mut rng); 452 | let C = A * G::Scalar::from(4) + B * (r * G::Scalar::from(2) + G::Scalar::from(3)); 453 | instance.set_element(var_A, A); 454 | instance.set_element(var_B, B); 455 | instance.set_element(eq1, C); 456 | 457 | let witness = vec![r]; 458 | let instance = CanonicalLinearRelation::try_from(&instance).unwrap(); 459 | (instance, witness) 460 | } 461 | 462 | #[allow(non_snake_case)] 463 | pub fn pedersen_commitment_equality( 464 | rng: &mut R, 465 | ) -> (CanonicalLinearRelation, Vec) { 466 | let mut instance = LinearRelation::new(); 467 | 468 | let [m, r1, r2] = instance.allocate_scalars(); 469 | let [var_G, var_H] = instance.allocate_elements(); 470 | // This relation is redundant and inefficient. 471 | instance.allocate_eq(var_G * m + var_H * r1); 472 | instance.allocate_eq(var_G * m + var_H * r2); 473 | 474 | instance.set_elements([(var_G, G::generator()), (var_H, G::random(&mut *rng))]); 475 | 476 | let witness = vec![ 477 | G::Scalar::from(42), 478 | G::Scalar::random(&mut *rng), 479 | G::Scalar::random(&mut *rng), 480 | ]; 481 | instance.compute_image(&witness).unwrap(); 482 | 483 | (instance.canonical().unwrap(), witness) 484 | } 485 | 486 | #[allow(non_snake_case)] 487 | pub fn elgamal_subtraction( 488 | rng: &mut R, 489 | ) -> (CanonicalLinearRelation, Vec) { 490 | let mut instance = LinearRelation::new(); 491 | let [dk, a, r] = instance.allocate_scalars(); 492 | let [ek, C, D, H, G] = instance.allocate_elements(); 493 | 494 | instance.append_equation(ek, dk * H); 495 | 496 | instance.append_equation(D, r * H); 497 | instance.append_equation(C, r * ek + a * G); 498 | 499 | instance.append_equation(C, dk * D + a * G); 500 | 501 | let witness_dk = G::Scalar::from(4242); 502 | let witness_a = G::Scalar::from(1000); 503 | let witness_r = G::Scalar::random(&mut *rng); 504 | let witness = vec![witness_dk, witness_a, witness_r]; 505 | 506 | // Assign group elements consistent with the witness so compute_image is unnecessary. 507 | let alt_gen = G::random(&mut *rng); 508 | instance.set_elements([(G, G::generator()), (H, alt_gen)]); 509 | let ek_val = alt_gen * witness_dk; 510 | let D_val = alt_gen * witness_r; 511 | let C_val = ek_val * witness_r + G::generator() * witness_a; 512 | instance.set_elements([(ek, ek_val), (D, D_val), (C, C_val)]); 513 | 514 | (instance.canonical().unwrap(), witness) 515 | } 516 | -------------------------------------------------------------------------------- /src/linear_relation/canonical.rs: -------------------------------------------------------------------------------- 1 | #[cfg(not(feature = "std"))] 2 | use ahash::RandomState; 3 | use alloc::format; 4 | use alloc::vec::Vec; 5 | use core::iter; 6 | use core::marker::PhantomData; 7 | #[cfg(not(feature = "std"))] 8 | use hashbrown::HashMap; 9 | #[cfg(feature = "std")] 10 | use std::collections::HashMap; 11 | 12 | use ff::Field; 13 | use group::prime::PrimeGroup; 14 | use subtle::{Choice, ConstantTimeEq}; 15 | 16 | use super::{GroupMap, GroupVar, LinearCombination, LinearRelation, ScalarTerm, ScalarVar}; 17 | use crate::errors::{Error, InvalidInstance}; 18 | use crate::group::msm::VariableMultiScalarMul; 19 | use crate::serialization::serialize_elements; 20 | 21 | // XXX. this definition is uncomfortably similar to LinearRelation, exception made for the weights. 22 | // It'd be nice to better compress potentially duplicated code. 23 | /// A normalized form of the [`LinearRelation`], which is used for serialization into the transcript. 24 | /// 25 | /// This struct represents a normalized form of a linear relation where each 26 | /// constraint is of the form: image_i = Σ (scalar_j * group_element_k) 27 | /// without weights or extra scalars. 28 | #[derive(Clone, Debug, Default)] 29 | pub struct CanonicalLinearRelation { 30 | /// The image group elements (left-hand side of equations) 31 | pub image: Vec>, 32 | /// The constraints, where each constraint is a vector of (scalar_var, group_var) pairs 33 | /// representing the right-hand side of the equation 34 | pub linear_combinations: Vec, GroupVar)>>, 35 | /// The group elements map 36 | pub group_elements: GroupMap, 37 | /// Number of scalar variables 38 | pub num_scalars: usize, 39 | } 40 | 41 | /// Private type alias used to simplify function signatures below. 42 | /// 43 | /// The cache is essentially a mapping (GroupVar, Scalar) => GroupVar, which maps the original 44 | /// weighted group vars to a new assignment, such that if a pair appears more than once, it will 45 | /// map to the same group variable in the canonical linear relation. 46 | #[cfg(feature = "std")] 47 | type WeightedGroupCache = HashMap, Vec<(::Scalar, GroupVar)>>; 48 | #[cfg(not(feature = "std"))] 49 | type WeightedGroupCache = 50 | HashMap, Vec<(::Scalar, GroupVar)>, RandomState>; 51 | 52 | impl CanonicalLinearRelation { 53 | /// Create a new empty canonical linear relation. 54 | /// 55 | /// This function is not meant to be publicly exposed. It is internally used to build a type-safe linear relation, 56 | /// so that all instances guaranteed to be "good" relations over which the prover will want to make a proof. 57 | fn new() -> Self { 58 | Self { 59 | image: Vec::new(), 60 | linear_combinations: Vec::new(), 61 | group_elements: GroupMap::default(), 62 | num_scalars: 0, 63 | } 64 | } 65 | 66 | /// Evaluate the canonical linear relation with the provided scalars 67 | /// 68 | /// This returns a list of image points produced by evaluating each linear combination in the 69 | /// relation. The order of the returned list matches the order of [`Self::linear_combinations`]. 70 | /// 71 | /// # Panic 72 | /// 73 | /// Panics if the number of scalars given is less than the number of scalar variables in this 74 | /// linear relation. 75 | /// If the vector of scalars if longer than the number of terms in each linear combinations, the extra terms are ignored. 76 | pub fn evaluate(&self, scalars: &[G::Scalar]) -> Vec { 77 | self.linear_combinations 78 | .iter() 79 | .map(|lc| { 80 | let scalars = lc 81 | .iter() 82 | .map(|(scalar_var, _)| scalars[scalar_var.index()]) 83 | .collect::>(); 84 | let bases = lc 85 | .iter() 86 | .map(|(_, group_var)| self.group_elements.get(*group_var).unwrap()) 87 | .collect::>(); 88 | G::msm(&scalars, &bases) 89 | }) 90 | .collect() 91 | } 92 | 93 | /// Get or create a GroupVar for a weighted group element, with deduplication 94 | fn get_or_create_weighted_group_var( 95 | &mut self, 96 | group_var: GroupVar, 97 | weight: &G::Scalar, 98 | original_group_elements: &GroupMap, 99 | weighted_group_cache: &mut WeightedGroupCache, 100 | ) -> Result, InvalidInstance> { 101 | // Check if we already have this (weight, group_var) combination 102 | let entry = weighted_group_cache.entry(group_var).or_default(); 103 | 104 | // Find if we already have this weight for this group_var 105 | if let Some((_, existing_var)) = entry.iter().find(|(w, _)| w == weight) { 106 | return Ok(*existing_var); 107 | } 108 | 109 | // Create new weighted group element 110 | // Use a special case for one, as this is the most common weight. 111 | let original_group_val = original_group_elements.get(group_var)?; 112 | let weighted_group = match *weight == G::Scalar::ONE { 113 | true => original_group_val, 114 | false => original_group_val * weight, 115 | }; 116 | 117 | // Add to our group elements with new index (length) 118 | let new_var = self.group_elements.push(weighted_group); 119 | 120 | // Cache the mapping for this group_var and weight 121 | entry.push((*weight, new_var)); 122 | 123 | Ok(new_var) 124 | } 125 | 126 | /// Process a single constraint equation and add it to the canonical relation. 127 | fn process_constraint( 128 | &mut self, 129 | &image_var: &GroupVar, 130 | equation: &LinearCombination, 131 | original_relation: &LinearRelation, 132 | weighted_group_cache: &mut WeightedGroupCache, 133 | ) -> Result<(), InvalidInstance> { 134 | let mut rhs_terms = Vec::new(); 135 | 136 | // Collect RHS terms that have scalar variables and apply weights 137 | for weighted_term in equation.terms() { 138 | if let ScalarTerm::Var(scalar_var) = weighted_term.term.scalar { 139 | let group_var = weighted_term.term.elem; 140 | let weight = &weighted_term.weight; 141 | 142 | if weight.is_zero_vartime() { 143 | continue; // Skip zero weights 144 | } 145 | 146 | let canonical_group_var = self.get_or_create_weighted_group_var( 147 | group_var, 148 | weight, 149 | &original_relation.linear_map.group_elements, 150 | weighted_group_cache, 151 | )?; 152 | 153 | rhs_terms.push((scalar_var, canonical_group_var)); 154 | } 155 | } 156 | 157 | // Compute the canonical image by subtracting constant terms from the original image 158 | let mut canonical_image = original_relation.linear_map.group_elements.get(image_var)?; 159 | for weighted_term in equation.terms() { 160 | if let ScalarTerm::Unit = weighted_term.term.scalar { 161 | let group_val = original_relation 162 | .linear_map 163 | .group_elements 164 | .get(weighted_term.term.elem)?; 165 | canonical_image -= group_val * weighted_term.weight; 166 | } 167 | } 168 | 169 | // Only include constraints that are non-trivial (not zero constraints). 170 | if rhs_terms.is_empty() { 171 | if canonical_image.is_identity().into() { 172 | return Ok(()); 173 | } 174 | return Err(InvalidInstance::new( 175 | "trivially false constraint: constraint has empty right-hand side and non-identity left-hand side", 176 | )); 177 | } 178 | 179 | let canonical_image_group_var = self.group_elements.push(canonical_image); 180 | self.image.push(canonical_image_group_var); 181 | self.linear_combinations.push(rhs_terms); 182 | 183 | Ok(()) 184 | } 185 | 186 | /// Serialize the linear relation to bytes. 187 | /// 188 | /// The output format is: 189 | /// 190 | /// - `[Ne: u32]` number of equations 191 | /// - `Ne × equations`: 192 | /// - `[lhs_index: u32]` output group element index 193 | /// - `[Nt: u32]` number of terms 194 | /// - `Nt × [scalar_index: u32, group_index: u32]` term entries 195 | /// - All group elements in serialized form. 196 | pub fn label(&self) -> Vec { 197 | let mut out = Vec::new(); 198 | 199 | // Build constraint data in the same order as original, as a nested list of group and 200 | // scalar indices. Note that the group indices are into group_elements_ordered. 201 | let mut constraint_data = Vec::<(u32, Vec<(u32, u32)>)>::new(); 202 | 203 | for (image_var, constraint_terms) in iter::zip(&self.image, &self.linear_combinations) { 204 | // Build the RHS terms 205 | let mut rhs_terms = Vec::new(); 206 | for (scalar_var, group_var) in constraint_terms { 207 | rhs_terms.push((scalar_var.0 as u32, group_var.0 as u32)); 208 | } 209 | 210 | constraint_data.push((image_var.0 as u32, rhs_terms)); 211 | } 212 | 213 | // 1. Number of equations 214 | let ne = constraint_data.len(); 215 | out.extend_from_slice(&(ne as u32).to_le_bytes()); 216 | 217 | // 2. Encode each equation 218 | for (lhs_index, rhs_terms) in constraint_data { 219 | // a. Output point index (LHS) 220 | out.extend_from_slice(&lhs_index.to_le_bytes()); 221 | 222 | // b. Number of terms in the RHS linear combination 223 | out.extend_from_slice(&(rhs_terms.len() as u32).to_le_bytes()); 224 | 225 | // c. Each term: scalar index and point index 226 | for (scalar_index, group_index) in rhs_terms { 227 | out.extend_from_slice(&scalar_index.to_le_bytes()); 228 | out.extend_from_slice(&group_index.to_le_bytes()); 229 | } 230 | } 231 | 232 | // Dump the group elements. 233 | let group_reprs = serialize_elements( 234 | self.group_elements 235 | .iter() 236 | .map(|(_, elem)| elem.expect("expected group variable to be assigned")), 237 | ); 238 | out.extend_from_slice(&group_reprs); 239 | 240 | out 241 | } 242 | 243 | /// Parse a canonical linear relation from its label representation. 244 | /// 245 | /// Returns an [`InvalidInstance`] error if the label is malformed. 246 | /// 247 | /// # Examples 248 | /// 249 | /// ``` 250 | /// use hex_literal::hex; 251 | /// use sigma_proofs::linear_relation::CanonicalLinearRelation; 252 | /// type G = bls12_381::G1Projective; 253 | /// 254 | /// let dlog_instance_label = hex!("01000000000000000100000000000000010000009823a3def60a6e07fb25feb35f211ee2cbc9c130c1959514f5df6b5021a2b21a4c973630ec2090c733c1fe791834ce1197f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb"); 255 | /// let instance = CanonicalLinearRelation::::from_label(&dlog_instance_label).unwrap(); 256 | /// assert_eq!(&dlog_instance_label[..], &instance.label()[..]); 257 | /// ``` 258 | pub fn from_label(data: &[u8]) -> Result { 259 | use crate::errors::InvalidInstance; 260 | use crate::group::serialization::group_elt_serialized_len; 261 | 262 | let mut offset = 0; 263 | 264 | // Read number of equations (4 bytes, little endian) 265 | if data.len() < 4 { 266 | return Err(InvalidInstance::new("Invalid label: too short for equation count").into()); 267 | } 268 | let num_equations = u32::from_le_bytes([data[0], data[1], data[2], data[3]]) as usize; 269 | offset += 4; 270 | 271 | // Parse constraints and collect unique group element indices 272 | let mut constraint_data = Vec::new(); 273 | let mut max_scalar_index = 0u32; 274 | let mut max_group_index = 0u32; 275 | 276 | for _ in 0..num_equations { 277 | // Read LHS index (4 bytes) 278 | if offset + 4 > data.len() { 279 | return Err(InvalidInstance::new("Invalid label: truncated LHS index").into()); 280 | } 281 | let lhs_index = u32::from_le_bytes([ 282 | data[offset], 283 | data[offset + 1], 284 | data[offset + 2], 285 | data[offset + 3], 286 | ]); 287 | offset += 4; 288 | max_group_index = max_group_index.max(lhs_index); 289 | 290 | // Read number of RHS terms (4 bytes) 291 | if offset + 4 > data.len() { 292 | return Err(InvalidInstance::new("Invalid label: truncated RHS count").into()); 293 | } 294 | let num_rhs_terms = u32::from_le_bytes([ 295 | data[offset], 296 | data[offset + 1], 297 | data[offset + 2], 298 | data[offset + 3], 299 | ]) as usize; 300 | offset += 4; 301 | 302 | // Read RHS terms 303 | let mut rhs_terms = Vec::new(); 304 | for _ in 0..num_rhs_terms { 305 | // Read scalar index (4 bytes) 306 | if offset + 4 > data.len() { 307 | return Err( 308 | InvalidInstance::new("Invalid label: truncated scalar index").into(), 309 | ); 310 | } 311 | let scalar_index = u32::from_le_bytes([ 312 | data[offset], 313 | data[offset + 1], 314 | data[offset + 2], 315 | data[offset + 3], 316 | ]); 317 | offset += 4; 318 | max_scalar_index = max_scalar_index.max(scalar_index); 319 | 320 | // Read group index (4 bytes) 321 | if offset + 4 > data.len() { 322 | return Err(InvalidInstance::new("Invalid label: truncated group index").into()); 323 | } 324 | let group_index = u32::from_le_bytes([ 325 | data[offset], 326 | data[offset + 1], 327 | data[offset + 2], 328 | data[offset + 3], 329 | ]); 330 | offset += 4; 331 | max_group_index = max_group_index.max(group_index); 332 | 333 | rhs_terms.push((scalar_index, group_index)); 334 | } 335 | 336 | constraint_data.push((lhs_index, rhs_terms)); 337 | } 338 | 339 | // Calculate expected number of group elements 340 | let num_group_elements = (max_group_index + 1) as usize; 341 | let group_element_size = group_elt_serialized_len::(); 342 | let expected_remaining = num_group_elements * group_element_size; 343 | 344 | if data.len() - offset != expected_remaining { 345 | return Err(InvalidInstance::new(format!( 346 | "Invalid label: expected {} bytes for {} group elements, got {}", 347 | expected_remaining, 348 | num_group_elements, 349 | data.len() - offset 350 | )) 351 | .into()); 352 | } 353 | 354 | // Parse group elements 355 | let mut group_elements_ordered = Vec::new(); 356 | for i in 0..num_group_elements { 357 | let start = offset + i * group_element_size; 358 | let end = start + group_element_size; 359 | let elem_bytes = &data[start..end]; 360 | 361 | let mut repr = G::Repr::default(); 362 | repr.as_mut().copy_from_slice(elem_bytes); 363 | 364 | let elem = Option::::from(G::from_bytes(&repr)).ok_or_else(|| { 365 | Error::from(InvalidInstance::new(format!( 366 | "Invalid group element at index {i}" 367 | ))) 368 | })?; 369 | 370 | group_elements_ordered.push(elem); 371 | } 372 | 373 | // Build the canonical relation 374 | let mut canonical = Self::new(); 375 | canonical.num_scalars = (max_scalar_index + 1) as usize; 376 | 377 | // Add all group elements to the map 378 | let mut group_var_map = Vec::new(); 379 | for elem in &group_elements_ordered { 380 | let var = canonical.group_elements.push(*elem); 381 | group_var_map.push(var); 382 | } 383 | 384 | // Build constraints 385 | for (lhs_index, rhs_terms) in constraint_data { 386 | // Add image element 387 | canonical.image.push(group_var_map[lhs_index as usize]); 388 | 389 | // Build linear combination 390 | let mut linear_combination = Vec::new(); 391 | for (scalar_index, group_index) in rhs_terms { 392 | let scalar_var = ScalarVar(scalar_index as usize, PhantomData); 393 | let group_var = group_var_map[group_index as usize]; 394 | linear_combination.push((scalar_var, group_var)); 395 | } 396 | canonical.linear_combinations.push(linear_combination); 397 | } 398 | 399 | Ok(canonical) 400 | } 401 | 402 | /// Access the group elements associated with the image (i.e. left-hand side), panicking if any 403 | /// of the image variables are unassigned in the group mkap. 404 | pub(crate) fn image_elements(&self) -> impl Iterator + use<'_, G> { 405 | self.image.iter().map(|var| { 406 | self.group_elements 407 | .get(*var) 408 | .expect("expected group variable to be assigned") 409 | }) 410 | } 411 | } 412 | 413 | impl TryFrom> for CanonicalLinearRelation { 414 | type Error = InvalidInstance; 415 | 416 | fn try_from(value: LinearRelation) -> Result { 417 | Self::try_from(&value) 418 | } 419 | } 420 | 421 | impl TryFrom<&LinearRelation> for CanonicalLinearRelation { 422 | type Error = InvalidInstance; 423 | 424 | fn try_from(relation: &LinearRelation) -> Result { 425 | if relation.image.len() != relation.linear_map.linear_combinations.len() { 426 | return Err(InvalidInstance::new( 427 | "Number of equations must be equal to number of image elements.", 428 | )); 429 | } 430 | 431 | let mut canonical = CanonicalLinearRelation::new(); 432 | canonical.num_scalars = relation.linear_map.num_scalars; 433 | 434 | // Cache for deduplicating weighted group elements 435 | #[cfg(feature = "std")] 436 | let mut weighted_group_cache = HashMap::new(); 437 | #[cfg(not(feature = "std"))] 438 | let mut weighted_group_cache = HashMap::with_hasher(RandomState::new()); 439 | 440 | // Process each constraint using the modular helper method 441 | for (lhs, rhs) in iter::zip(&relation.image, &relation.linear_map.linear_combinations) { 442 | // If any group element in the image is not assigned, return `InvalidInstance`. 443 | let lhs_value = relation.linear_map.group_elements.get(*lhs)?; 444 | 445 | // Compute the constant terms on the right-hand side of the equation. 446 | // If any group element in the linear constraints is not assigned, return `InvalidInstance`. 447 | let rhs_constant_terms = rhs 448 | .0 449 | .iter() 450 | .filter(|term| matches!(term.term.scalar, ScalarTerm::Unit)) 451 | .map(|term| { 452 | let elem = relation.linear_map.group_elements.get(term.term.elem)?; 453 | let scalar = term.weight; 454 | Ok((elem, scalar)) 455 | }) 456 | .collect::, Vec), _>>()?; 457 | 458 | let rhs_constant_term = G::msm(&rhs_constant_terms.1, &rhs_constant_terms.0); 459 | 460 | // We say that an equation is trivial if it contains no scalar variables. 461 | // To "contain no scalar variables" means that each term in the right-hand side is a unit or its weight is zero. 462 | let is_trivial = rhs.0.iter().all(|term| { 463 | matches!(term.term.scalar, ScalarTerm::Unit) || term.weight.is_zero_vartime() 464 | }); 465 | 466 | // We say that an equation is homogenous if the constant term is zero. 467 | let is_homogenous = rhs_constant_term == lhs_value; 468 | 469 | // Skip processing trivial equations that are always true. 470 | // There's nothing to prove here. 471 | if is_trivial && is_homogenous { 472 | continue; 473 | } 474 | 475 | // Disallow non-trivial equations with trivial solutions. 476 | if !is_trivial && is_homogenous { 477 | return Err(InvalidInstance::new("Trivial kernel in this relation")); 478 | } 479 | 480 | canonical.process_constraint(lhs, rhs, relation, &mut weighted_group_cache)?; 481 | } 482 | 483 | Ok(canonical) 484 | } 485 | } 486 | 487 | impl CanonicalLinearRelation { 488 | /// Tests is the witness is valid. 489 | /// 490 | /// Returns a [`Choice`] indicating if the witness is valid for the instance constructed. 491 | /// 492 | /// # Panic 493 | /// 494 | /// Panics if the number of scalars given is less than the number of scalar variables. 495 | /// If the number of scalars is more than the number of scalar variables, the extra elements are ignored. 496 | pub fn is_witness_valid(&self, witness: &[G::Scalar]) -> Choice { 497 | let got = self.evaluate(witness); 498 | self.image_elements() 499 | .zip(got) 500 | .fold(Choice::from(1), |acc, (lhs, rhs)| acc & lhs.ct_eq(&rhs)) 501 | } 502 | } 503 | -------------------------------------------------------------------------------- /src/linear_relation/mod.rs: -------------------------------------------------------------------------------- 1 | //! # Linear Maps and Relations Handling. 2 | //! 3 | //! This module provides utilities for describing and manipulating **linear group linear maps**, 4 | //! supporting sigma protocols over group-based statements (e.g., discrete logarithms, DLEQ proofs). See Maurer09. 5 | //! 6 | //! It includes: 7 | //! - [`LinearCombination`]: a sparse representation of scalar multiplication relations. 8 | //! - [`LinearMap`]: a collection of linear combinations acting on group elements. 9 | //! - [`LinearRelation`]: a higher-level structure managing linear maps and their associated images. 10 | 11 | use alloc::format; 12 | use alloc::vec::Vec; 13 | use core::iter; 14 | use core::marker::PhantomData; 15 | 16 | use ff::Field; 17 | use group::prime::PrimeGroup; 18 | 19 | use crate::codec::Shake128DuplexSponge; 20 | use crate::errors::{Error, InvalidInstance}; 21 | use crate::group::msm::VariableMultiScalarMul; 22 | use crate::Nizk; 23 | 24 | /// Implementations of conversion operations such as From and FromIterator for var and term types. 25 | mod convert; 26 | /// Implementations of core ops for the linear combination types. 27 | mod ops; 28 | 29 | /// Implementation of canonical linear relation. 30 | mod canonical; 31 | pub use canonical::CanonicalLinearRelation; 32 | 33 | /// A wrapper representing an index for a scalar variable. 34 | /// 35 | /// Used to reference scalars in sparse linear combinations. 36 | #[derive(Copy, Clone, Debug, PartialEq, Eq)] 37 | pub struct ScalarVar(usize, PhantomData); 38 | 39 | impl ScalarVar { 40 | pub fn index(&self) -> usize { 41 | self.0 42 | } 43 | } 44 | 45 | impl core::hash::Hash for ScalarVar { 46 | fn hash(&self, state: &mut H) { 47 | self.0.hash(state) 48 | } 49 | } 50 | 51 | /// A wrapper representing an index for a group element (point). 52 | /// 53 | /// Used to reference group elements in sparse linear combinations. 54 | #[derive(Copy, Clone, Debug, PartialEq, Eq)] 55 | pub struct GroupVar(usize, PhantomData); 56 | 57 | impl GroupVar { 58 | pub fn index(&self) -> usize { 59 | self.0 60 | } 61 | } 62 | 63 | impl core::hash::Hash for GroupVar { 64 | fn hash(&self, state: &mut H) { 65 | self.0.hash(state) 66 | } 67 | } 68 | 69 | #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] 70 | pub enum ScalarTerm { 71 | Var(ScalarVar), 72 | Unit, 73 | } 74 | 75 | impl ScalarTerm { 76 | // NOTE: This function is private intentionally as it would be replaced if a ScalarMap struct 77 | // were to be added. 78 | fn value(self, scalars: &[G::Scalar]) -> G::Scalar { 79 | match self { 80 | Self::Var(var) => scalars[var.0], 81 | Self::Unit => G::Scalar::ONE, 82 | } 83 | } 84 | } 85 | 86 | /// A term in a linear combination, representing `scalar * elem`. 87 | #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] 88 | pub struct Term { 89 | scalar: ScalarTerm, 90 | elem: GroupVar, 91 | } 92 | 93 | #[derive(Copy, Clone, Debug)] 94 | pub struct Weighted { 95 | pub term: T, 96 | pub weight: F, 97 | } 98 | 99 | #[derive(Clone, Debug)] 100 | pub struct Sum(Vec); 101 | 102 | impl Sum { 103 | /// Access the terms of the sum as slice reference. 104 | pub fn terms(&self) -> &[T] { 105 | &self.0 106 | } 107 | } 108 | 109 | impl core::iter::Sum for Sum { 110 | /// Add a bunch of `T` to yield a `Sum` 111 | fn sum(iter: I) -> Self 112 | where 113 | I: Iterator, 114 | { 115 | Self(iter.collect()) 116 | } 117 | } 118 | 119 | /// Represents a sparse linear combination of scalars and group elements. 120 | /// 121 | /// For example, it can represent an equation like: 122 | /// `w_1 * (s_1 * P_1) + w_2 * (s_2 * P_2) + ... + w_n * (s_n * P_n)` 123 | /// 124 | /// where: 125 | /// - `(s_i * P_i)` are the terms, with `s_i` scalars (referenced by `scalar_vars`) and `P_i` group elements (referenced by `element_vars`). 126 | /// - `w_i` are the constant weight scalars 127 | /// 128 | /// The indices refer to external lists managed by the containing LinearMap. 129 | pub type LinearCombination = Sum, ::Scalar>>; 130 | 131 | impl LinearMap { 132 | fn map(&self, scalars: &[G::Scalar]) -> Result, InvalidInstance> { 133 | self.linear_combinations 134 | .iter() 135 | .map(|lc| { 136 | let weighted_coefficients = 137 | lc.0.iter() 138 | .map(|weighted| weighted.term.scalar.value(scalars) * weighted.weight) 139 | .collect::>(); 140 | let elements = 141 | lc.0.iter() 142 | .map(|weighted| self.group_elements.get(weighted.term.elem)) 143 | .collect::, InvalidInstance>>(); 144 | match elements { 145 | Ok(elements) => Ok(G::msm(&weighted_coefficients, &elements)), 146 | Err(error) => Err(error), 147 | } 148 | }) 149 | .collect::, InvalidInstance>>() 150 | } 151 | } 152 | 153 | /// Ordered mapping of [GroupVar] to group elements assignments. 154 | #[derive(Clone, Debug)] 155 | pub struct GroupMap(Vec>); 156 | 157 | impl GroupMap { 158 | /// Assign a group element value to a point variable. 159 | /// 160 | /// # Parameters 161 | /// 162 | /// - `var`: The variable to assign. 163 | /// - `element`: The value to assign to the variable. 164 | /// 165 | /// # Panics 166 | /// 167 | /// Panics if the given assignment conflicts with the existing assignment. 168 | pub fn assign_element(&mut self, var: GroupVar, element: G) { 169 | if self.0.len() <= var.0 { 170 | self.0.resize(var.0 + 1, None); 171 | } else if let Some(assignment) = self.0[var.0] { 172 | assert_eq!( 173 | assignment, element, 174 | "conflicting assignments for var {var:?}" 175 | ) 176 | } 177 | self.0[var.0] = Some(element); 178 | } 179 | 180 | /// Assigns specific group elements to point variables (indices). 181 | /// 182 | /// # Parameters 183 | /// 184 | /// - `assignments`: A collection of `(GroupVar, GroupElement)` pairs that can be iterated over. 185 | /// 186 | /// # Panics 187 | /// 188 | /// Panics if the collection contains two conflicting assignments for the same variable. 189 | pub fn assign_elements(&mut self, assignments: impl IntoIterator, G)>) { 190 | for (var, elem) in assignments.into_iter() { 191 | self.assign_element(var, elem); 192 | } 193 | } 194 | 195 | /// Get the element value assigned to the given point var. 196 | /// 197 | /// Returns [`InvalidInstance`] if a value is not assigned. 198 | pub fn get(&self, var: GroupVar) -> Result { 199 | match self.0.get(var.0) { 200 | Some(Some(elem)) => Ok(*elem), 201 | Some(None) | None => Err(InvalidInstance::new(format!( 202 | "unassigned group variable {}", 203 | var.0 204 | ))), 205 | } 206 | } 207 | 208 | /// Iterate over the assigned variable and group element pairs in this mapping. 209 | // NOTE: Not implemented as `IntoIterator` for now because doing so requires explicitly 210 | // defining an iterator type, See https://github.com/rust-lang/rust/issues/63063 211 | #[allow(clippy::should_implement_trait)] 212 | pub fn into_iter(self) -> impl Iterator, Option)> { 213 | self.0 214 | .into_iter() 215 | .enumerate() 216 | .map(|(i, x)| (GroupVar(i, PhantomData), x)) 217 | } 218 | 219 | pub fn iter(&self) -> impl Iterator, Option<&G>)> { 220 | self.0 221 | .iter() 222 | .enumerate() 223 | .map(|(i, opt)| (GroupVar(i, PhantomData), opt.as_ref())) 224 | } 225 | 226 | /// Add a new group element to the map and return its variable index 227 | pub fn push(&mut self, element: G) -> GroupVar { 228 | let index = self.0.len(); 229 | self.0.push(Some(element)); 230 | GroupVar(index, PhantomData) 231 | } 232 | 233 | /// Get the number of elements in the map 234 | pub fn len(&self) -> usize { 235 | self.0.len() 236 | } 237 | 238 | /// Check if the map is empty 239 | pub fn is_empty(&self) -> bool { 240 | self.0.is_empty() 241 | } 242 | } 243 | 244 | impl Default for GroupMap { 245 | fn default() -> Self { 246 | Self(Vec::default()) 247 | } 248 | } 249 | 250 | impl FromIterator<(GroupVar, G)> for GroupMap { 251 | fn from_iter, G)>>(iter: T) -> Self { 252 | iter.into_iter() 253 | .fold(Self::default(), |mut instance, (var, val)| { 254 | instance.assign_element(var, val); 255 | instance 256 | }) 257 | } 258 | } 259 | 260 | /// A LinearMap represents a list of linear combinations over group elements. 261 | /// 262 | /// It supports dynamic allocation of scalars and elements, 263 | /// and evaluates by performing multi-scalar multiplications. 264 | #[derive(Clone, Default, Debug)] 265 | pub struct LinearMap { 266 | /// The set of linear combination constraints (equations). 267 | pub linear_combinations: Vec>, 268 | /// The list of group elements referenced in the linear map. 269 | /// 270 | /// Uninitialized group elements are presented with `None`. 271 | pub group_elements: GroupMap, 272 | /// The total number of scalar variables allocated. 273 | pub num_scalars: usize, 274 | /// The total number of group element variables allocated. 275 | pub num_elements: usize, 276 | } 277 | 278 | impl LinearMap { 279 | /// Creates a new empty [`LinearMap`]. 280 | /// 281 | /// # Returns 282 | /// 283 | /// A [`LinearMap`] instance with empty linear combinations and group elements, 284 | /// and zero allocated scalars and elements. 285 | pub fn new() -> Self { 286 | Self { 287 | linear_combinations: Vec::new(), 288 | group_elements: GroupMap::default(), 289 | num_scalars: 0, 290 | num_elements: 0, 291 | } 292 | } 293 | 294 | /// Returns the number of constraints (equations) in this linear map. 295 | pub fn num_constraints(&self) -> usize { 296 | self.linear_combinations.len() 297 | } 298 | 299 | /// Adds a new linear combination constraint to the linear map. 300 | /// 301 | /// # Parameters 302 | /// - `lc`: The [`LinearCombination`] to add. 303 | pub fn append(&mut self, lc: LinearCombination) { 304 | self.linear_combinations.push(lc); 305 | } 306 | 307 | /// Evaluates all linear combinations in the linear map with the provided scalars. 308 | /// 309 | /// # Parameters 310 | /// - `scalars`: A slice of scalar values corresponding to the scalar variables. 311 | /// 312 | /// # Returns 313 | /// 314 | /// A vector of group elements, each being the result of evaluating one linear combination with the scalars. 315 | pub fn evaluate(&self, scalars: &[G::Scalar]) -> Result, Error> { 316 | self.linear_combinations 317 | .iter() 318 | .map(|lc| { 319 | // TODO: The multiplication by the (public) weight is potentially wasteful in the 320 | // weight is most commonly 1, but multiplication is constant time. 321 | let weighted_coefficients = 322 | lc.0.iter() 323 | .map(|weighted| weighted.term.scalar.value(scalars) * weighted.weight) 324 | .collect::>(); 325 | let elements = 326 | lc.0.iter() 327 | .map(|weighted| self.group_elements.get(weighted.term.elem)) 328 | .collect::, _>>()?; 329 | Ok(G::msm(&weighted_coefficients, &elements)) 330 | }) 331 | .collect() 332 | } 333 | } 334 | 335 | /// A wrapper struct coupling a [`LinearMap`] with the corresponding expected output (image) elements. 336 | /// 337 | /// This structure represents the *preimage problem* for a group linear map: given a set of scalar inputs, 338 | /// determine whether their image under the linear map matches a target set of group elements. 339 | /// 340 | /// Internally, the constraint system is defined through: 341 | /// - A list of group elements and linear equations (held in the [`LinearMap`] field), 342 | /// - A list of [`GroupVar`] indices (`image`) that specify the expected output for each constraint. 343 | #[derive(Clone, Default, Debug)] 344 | pub struct LinearRelation { 345 | /// The underlying linear map describing the structure of the statement. 346 | pub linear_map: LinearMap, 347 | /// Indices pointing to elements representing the "target" images for each constraint. 348 | pub image: Vec>, 349 | } 350 | 351 | impl LinearRelation { 352 | /// Create a new empty [`LinearRelation`]. 353 | pub fn new() -> Self { 354 | Self { 355 | linear_map: LinearMap::new(), 356 | image: Vec::new(), 357 | } 358 | } 359 | 360 | /// Adds a new equation to the statement of the form: 361 | /// `lhs = Σ weight_i * (scalar_i * point_i)`. 362 | /// 363 | /// # Parameters 364 | /// - `lhs`: The image group element variable (left-hand side of the equation). 365 | /// - `rhs`: An instance of [`LinearCombination`] representing the linear combination on the right-hand side. 366 | pub fn append_equation(&mut self, lhs: GroupVar, rhs: impl Into>) { 367 | self.linear_map.append(rhs.into()); 368 | self.image.push(lhs); 369 | } 370 | 371 | /// Adds a new equation to the statement of the form: 372 | /// `lhs = Σ weight_i * (scalar_i * point_i)` without allocating `lhs`. 373 | /// 374 | /// # Parameters 375 | /// - `rhs`: An instance of [`LinearCombination`] representing the linear combination on the right-hand side. 376 | pub fn allocate_eq(&mut self, rhs: impl Into>) -> GroupVar { 377 | let var = self.allocate_element(); 378 | self.append_equation(var, rhs); 379 | var 380 | } 381 | 382 | /// Allocates a scalar variable for use in the linear map. 383 | pub fn allocate_scalar(&mut self) -> ScalarVar { 384 | self.linear_map.num_scalars += 1; 385 | ScalarVar(self.linear_map.num_scalars - 1, PhantomData) 386 | } 387 | 388 | /// Allocates space for `N` new scalar variables. 389 | /// 390 | /// # Returns 391 | /// An array of [`ScalarVar`] representing the newly allocated scalar indices. 392 | /// 393 | /// # Example 394 | /// ``` 395 | /// # use sigma_proofs::LinearRelation; 396 | /// use curve25519_dalek::RistrettoPoint as G; 397 | /// 398 | /// let mut relation = LinearRelation::::new(); 399 | /// let [var_x, var_y] = relation.allocate_scalars(); 400 | /// let vars = relation.allocate_scalars::<10>(); 401 | /// ``` 402 | pub fn allocate_scalars(&mut self) -> [ScalarVar; N] { 403 | let mut vars = [ScalarVar(usize::MAX, PhantomData); N]; 404 | for var in vars.iter_mut() { 405 | *var = self.allocate_scalar(); 406 | } 407 | vars 408 | } 409 | 410 | /// Allocates a vector of new scalar variables. 411 | /// 412 | /// # Returns 413 | /// A vector of [`ScalarVar`] representing the newly allocated scalar indices. 414 | /// /// # Example 415 | /// ``` 416 | /// # use sigma_proofs::LinearRelation; 417 | /// use curve25519_dalek::RistrettoPoint as G; 418 | /// 419 | /// let mut relation = LinearRelation::::new(); 420 | /// let [var_x, var_y] = relation.allocate_scalars(); 421 | /// let vars = relation.allocate_scalars_vec(10); 422 | /// ``` 423 | pub fn allocate_scalars_vec(&mut self, n: usize) -> Vec> { 424 | (0..n).map(|_| self.allocate_scalar()).collect() 425 | } 426 | 427 | /// Allocates a point variable (group element) for use in the linear map. 428 | pub fn allocate_element(&mut self) -> GroupVar { 429 | self.linear_map.num_elements += 1; 430 | GroupVar(self.linear_map.num_elements - 1, PhantomData) 431 | } 432 | 433 | /// Allocates a point variable (group element) and sets it immediately to the given value 434 | pub fn allocate_element_with(&mut self, element: G) -> GroupVar { 435 | let var = self.allocate_element(); 436 | self.set_element(var, element); 437 | var 438 | } 439 | 440 | /// Allocates `N` point variables (group elements) for use in the linear map. 441 | /// 442 | /// # Returns 443 | /// An array of [`GroupVar`] representing the newly allocated group element indices. 444 | /// 445 | /// # Example 446 | /// ``` 447 | /// # use sigma_proofs::LinearRelation; 448 | /// use curve25519_dalek::RistrettoPoint as G; 449 | /// 450 | /// let mut relation = LinearRelation::::new(); 451 | /// let [var_g, var_h] = relation.allocate_elements(); 452 | /// let vars = relation.allocate_elements::<10>(); 453 | /// ``` 454 | pub fn allocate_elements(&mut self) -> [GroupVar; N] { 455 | let mut vars = [GroupVar(usize::MAX, PhantomData); N]; 456 | for var in vars.iter_mut() { 457 | *var = self.allocate_element(); 458 | } 459 | vars 460 | } 461 | 462 | /// Allocates a vector of new point variables (group elements). 463 | /// 464 | /// # Returns 465 | /// A vector of [`GroupVar`] representing the newly allocated group element indices. 466 | /// 467 | /// # Example 468 | /// ``` 469 | /// # use sigma_proofs::LinearRelation; 470 | /// use curve25519_dalek::RistrettoPoint as G; 471 | /// let mut relation = LinearRelation::::new(); 472 | /// let [var_g, var_h 473 | /// ] = relation.allocate_elements(); 474 | /// let vars = relation.allocate_elements_vec(10); 475 | /// ``` 476 | pub fn allocate_elements_vec(&mut self, n: usize) -> Vec> { 477 | (0..n).map(|_| self.allocate_element()).collect() 478 | } 479 | 480 | /// Allocates a point variable (group element) and sets it immediately to the given value. 481 | pub fn allocate_elements_with(&mut self, elements: &[G]) -> Vec> { 482 | elements 483 | .iter() 484 | .map(|element| self.allocate_element_with(*element)) 485 | .collect() 486 | } 487 | 488 | /// Assign a group element value to a point variable. 489 | /// 490 | /// # Parameters 491 | /// 492 | /// - `var`: The variable to assign. 493 | /// - `element`: The value to assign to the variable. 494 | /// 495 | /// # Panics 496 | /// 497 | /// Panics if the given assignment conflicts with the existing assignment. 498 | pub fn set_element(&mut self, var: GroupVar, element: G) { 499 | self.linear_map.group_elements.assign_element(var, element) 500 | } 501 | 502 | /// Assigns specific group elements to point variables (indices). 503 | /// 504 | /// # Parameters 505 | /// 506 | /// - `assignments`: A collection of `(GroupVar, GroupElement)` pairs that can be iterated over. 507 | /// 508 | /// # Panics 509 | /// 510 | /// Panics if the collection contains two conflicting assignments for the same variable. 511 | pub fn set_elements(&mut self, assignments: impl IntoIterator, G)>) { 512 | self.linear_map.group_elements.assign_elements(assignments) 513 | } 514 | 515 | /// Evaluates all linear combinations in the linear map with the provided scalars, computing the 516 | /// left-hand side of this constraints (i.e. the image). 517 | /// 518 | /// After calling this function, all point variables will be assigned. 519 | /// 520 | /// # Parameters 521 | /// 522 | /// - `scalars`: A slice of scalar values corresponding to the scalar variables. 523 | /// 524 | /// # Returns 525 | /// 526 | /// Return `Ok` on success, and an error if unassigned elements prevent the image from being 527 | /// computed. Modifies the group elements assigned in the [LinearRelation]. 528 | pub fn compute_image(&mut self, scalars: &[G::Scalar]) -> Result<(), Error> { 529 | if self.linear_map.num_constraints() != self.image.len() { 530 | // NOTE: This is a panic, rather than a returned error, because this can only happen if 531 | // this implementation has a bug. 532 | panic!("invalid LinearRelation: different number of constraints and image variables"); 533 | } 534 | 535 | let mapped_scalars = self.linear_map.map(scalars)?; 536 | 537 | for (mapped_scalar, lhs) in iter::zip(mapped_scalars, &self.image) { 538 | self.linear_map 539 | .group_elements 540 | .assign_element(*lhs, mapped_scalar) 541 | } 542 | Ok(()) 543 | } 544 | 545 | /// Returns the current group elements corresponding to the image variables. 546 | /// 547 | /// # Returns 548 | /// 549 | /// A vector of group elements (`Vec`) representing the linear map's image. 550 | // TODO: Should this return GroupMap? 551 | pub fn image(&self) -> Result, InvalidInstance> { 552 | self.image 553 | .iter() 554 | .map(|&var| self.linear_map.group_elements.get(var)) 555 | .collect() 556 | } 557 | 558 | /// Convert this LinearRelation into a non-interactive zero-knowledge protocol 559 | /// using the ShakeCodec and a specified context/domain separator. 560 | /// 561 | /// # Parameters 562 | /// - `context`: Domain separator bytes for the Fiat-Shamir transform 563 | /// 564 | /// # Returns 565 | /// A `Nizk` instance ready for proving and verification 566 | /// 567 | /// # Example 568 | /// ``` 569 | /// # use sigma_proofs::{LinearRelation, Nizk}; 570 | /// # use curve25519_dalek::RistrettoPoint as G; 571 | /// # use curve25519_dalek::scalar::Scalar; 572 | /// # use rand::rngs::OsRng; 573 | /// # use group::Group; 574 | /// 575 | /// let mut relation = LinearRelation::::new(); 576 | /// let x_var = relation.allocate_scalar(); 577 | /// let g_var = relation.allocate_element(); 578 | /// let p_var = relation.allocate_eq(x_var * g_var); 579 | /// 580 | /// relation.set_element(g_var, G::generator()); 581 | /// let x = Scalar::random(&mut OsRng); 582 | /// relation.compute_image(&[x]).unwrap(); 583 | /// 584 | /// // Convert to NIZK with custom context 585 | /// let nizk = relation.into_nizk(b"my-protocol-v1").unwrap(); 586 | /// let proof = nizk.prove_batchable(&vec![x], &mut OsRng).unwrap(); 587 | /// assert!(nizk.verify_batchable(&proof).is_ok()); 588 | /// ``` 589 | pub fn into_nizk( 590 | self, 591 | session_identifier: &[u8], 592 | ) -> Result, Shake128DuplexSponge>, InvalidInstance> { 593 | Ok(Nizk::new(session_identifier, self.try_into()?)) 594 | } 595 | 596 | /// Construct a [CanonicalLinearRelation] from this generalized linear relation. 597 | /// 598 | /// The construction may fail if the linear relation is malformed, unsatisfiable, or trivial. 599 | pub fn canonical(&self) -> Result, InvalidInstance> { 600 | self.try_into() 601 | } 602 | } 603 | --------------------------------------------------------------------------------