├── src ├── util │ ├── mod.rs │ ├── hypercube.rs │ ├── vec.rs │ └── mle.rs ├── ccs │ ├── mod.rs │ ├── pedersen.rs │ ├── ccs.rs │ ├── util.rs │ ├── lcccs.rs │ └── cccs.rs ├── espresso │ ├── mod.rs │ ├── util.rs │ ├── errors.rs │ ├── sum_check │ │ ├── structs.rs │ │ ├── mod.rs │ │ ├── prover.rs │ │ └── verifier.rs │ ├── multilinear_polynomial.rs │ └── virtual_polynomial.rs ├── lib.rs └── multifolding.rs ├── doc └── images │ └── multifolding_diagram.png ├── TODO.md ├── .gitignore ├── Cargo.toml ├── LICENSE └── README.md /src/util/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod hypercube; 2 | pub mod mle; 3 | pub mod vec; 4 | -------------------------------------------------------------------------------- /doc/images/multifolding_diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/privacy-ethereum/multifolding-poc/HEAD/doc/images/multifolding_diagram.png -------------------------------------------------------------------------------- /src/ccs/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod cccs; 2 | #[allow(clippy::module_inception)] 3 | pub mod ccs; 4 | pub mod lcccs; 5 | pub mod pedersen; 6 | pub mod util; 7 | -------------------------------------------------------------------------------- /src/espresso/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod errors; 2 | pub mod multilinear_polynomial; 3 | pub mod util; 4 | pub mod virtual_polynomial; 5 | 6 | pub mod sum_check; 7 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![warn(missing_debug_implementations, rust_2018_idioms)] 2 | #![allow(non_snake_case)] 3 | #![allow(non_upper_case_globals)] 4 | 5 | pub mod ccs; 6 | pub mod multifolding; 7 | 8 | pub mod espresso; 9 | pub mod util; 10 | -------------------------------------------------------------------------------- /TODO.md: -------------------------------------------------------------------------------- 1 | # TODO 2 | 3 | - Security: Think thoroughly about the FS transcript inputs (implement [strong FS](https://eprint.iacr.org/2023/691)) 4 | - Fix: Properly implement error handling and removal of unwraps() 5 | - Feature: Write benchmarks 6 | - Fix: Go over the remaining TODOs and XXXs in the codebase 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | debug/ 4 | target/ 5 | 6 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 7 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 8 | Cargo.lock 9 | 10 | # These are backup files generated by rustfmt 11 | **/*.rs.bk 12 | 13 | # MSVC Windows builds of rustc generate these, which store debugging information 14 | *.pdb 15 | -------------------------------------------------------------------------------- /src/espresso/util.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023 Espresso Systems (espressosys.com) 2 | // This file is part of the HyperPlonk library. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HyperPlonk library. If not, see . 6 | 7 | use ark_std::log2; 8 | 9 | /// Return the number of variables that one need for an MLE to 10 | /// batch the list of MLEs 11 | #[inline] 12 | pub fn get_batched_nv(num_var: usize, polynomials_len: usize) -> usize { 13 | num_var + log2(polynomials_len) as usize 14 | } 15 | -------------------------------------------------------------------------------- /src/espresso/errors.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023 Espresso Systems (espressosys.com) 2 | // This file is part of the HyperPlonk library. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HyperPlonk library. If not, see . 6 | 7 | //! Error module. 8 | 9 | use ark_std::string::String; 10 | use displaydoc::Display; 11 | 12 | /// A `enum` specifying the possible failure modes of the arithmetics. 13 | #[derive(Display, Debug)] 14 | pub enum ArithErrors { 15 | /// Invalid parameters: {0} 16 | InvalidParameters(String), 17 | /// Should not arrive to this point 18 | ShouldNotArrive, 19 | /// An error during (de)serialization: {0} 20 | SerializationErrors(ark_serialize::SerializationError), 21 | } 22 | 23 | impl From for ArithErrors { 24 | fn from(e: ark_serialize::SerializationError) -> Self { 25 | Self::SerializationErrors(e) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "multifolding-poc" 3 | version = "0.2.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | ark-bls12-381 = "0.4.0" 10 | ark-ff = {version="^0.4.0", features=["parallel"]} 11 | ark-poly = {version="^0.4.0", features=["parallel"]} 12 | ark-ec = "^0.4.0" 13 | ark-serialize = "0.4.2" 14 | ark-std = { version = "^0.4.0", features=["parallel"] } 15 | displaydoc = "0.2.4" 16 | rayon = "1.7.0" 17 | thiserror = "1.0.40" 18 | 19 | # import for poly_iop & transcript 20 | subroutines = {git="https://github.com/EspressoSystems/hyperplonk"} 21 | transcript = {git="https://github.com/EspressoSystems/hyperplonk"} 22 | 23 | [features] 24 | # default = [ "parallel", "print-trace" ] 25 | default = ["parallel"] 26 | # extensive sanity checks that are useful for debugging 27 | extensive_sanity_checks = [ ] 28 | 29 | parallel = [ 30 | "ark-std/parallel", 31 | "ark-ff/parallel", 32 | "ark-poly/parallel", 33 | ] 34 | print-trace = [ 35 | "ark-std/print-trace", 36 | ] 37 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Privacy & Scaling Explorations (formerly known as appliedzkp) 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Hypernova multifolding 2 | 3 | A complete implementation of the [Hypernova](https://eprint.iacr.org/2023/573) folding scheme (see section 5 of the paper) in arkworks. 4 | 5 | This implementation is not meant to be used in production. Its purpose is to help us refine the interfaces and abstractions, so that multifolding can be integrated into a wider Hypernova implementation. A complete Hypernova implementation includes IVC, an in-circuit multifolding verifier, and R1CS-to-CCS and Plonkish-to-CCS compilers. 6 | 7 | 8 |
9 | 13 |
14 | 15 | ## Documentation 16 | 17 | See `src/multifolding.rs:test_basic_multifolding()` for a demonstration of the multifolding. 18 | 19 | See `TODO.md` for open future tasks. 20 | 21 | ## Building & Running 22 | 23 | As usual, you can run the tests using `cargo test --release`. 24 | 25 | ## Acknowledgements 26 | 27 | Shoutout to Espresso Systems for the [Hyperplonk implementation](https://github.com/EspressoSystems/hyperplonk/tree/main/arithmetic/src) that included useful multivariate polynomial routines. 28 | -------------------------------------------------------------------------------- /src/espresso/sum_check/structs.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023 Espresso Systems (espressosys.com) 2 | // This file is part of the HyperPlonk library. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HyperPlonk library. If not, see . 6 | 7 | //! This module defines structs that are shared by all sub protocols. 8 | 9 | use crate::espresso::virtual_polynomial::VirtualPolynomial; 10 | use ark_ff::PrimeField; 11 | use ark_serialize::CanonicalSerialize; 12 | 13 | /// An IOP proof is a collections of 14 | /// - messages from prover to verifier at each round through the interactive 15 | /// protocol. 16 | /// - a point that is generated by the transcript for evaluation 17 | #[derive(Clone, Debug, Default, PartialEq, Eq)] 18 | pub struct IOPProof { 19 | pub point: Vec, 20 | pub proofs: Vec>, 21 | } 22 | 23 | /// A message from the prover to the verifier at a given round 24 | /// is a list of evaluations. 25 | #[derive(Clone, Debug, Default, PartialEq, Eq, CanonicalSerialize)] 26 | pub struct IOPProverMessage { 27 | pub(crate) evaluations: Vec, 28 | } 29 | 30 | /// Prover State of a PolyIOP. 31 | #[derive(Debug)] 32 | pub struct IOPProverState { 33 | /// sampled randomness given by the verifier 34 | pub challenges: Vec, 35 | /// the current round number 36 | pub(crate) round: usize, 37 | /// pointer to the virtual polynomial 38 | pub(crate) poly: VirtualPolynomial, 39 | /// points with precomputed barycentric weights for extrapolating smaller 40 | /// degree uni-polys to `max_degree + 1` evaluations. 41 | pub(crate) extrapolation_aux: Vec<(Vec, Vec)>, 42 | } 43 | 44 | /// Prover State of a PolyIOP 45 | #[derive(Debug)] 46 | pub struct IOPVerifierState { 47 | pub(crate) round: usize, 48 | pub(crate) num_vars: usize, 49 | pub(crate) max_degree: usize, 50 | pub(crate) finished: bool, 51 | /// a list storing the univariate polynomial in evaluation form sent by the 52 | /// prover at each round 53 | pub(crate) polynomials_received: Vec>, 54 | /// a list storing the randomness sampled by the verifier at each round 55 | pub(crate) challenges: Vec, 56 | } 57 | -------------------------------------------------------------------------------- /src/util/hypercube.rs: -------------------------------------------------------------------------------- 1 | /// A boolean hypercube structure to create an ergonomic evaluation domain 2 | use crate::espresso::virtual_polynomial::bit_decompose; 3 | use ark_ff::PrimeField; 4 | 5 | use std::marker::PhantomData; 6 | 7 | /// A boolean hypercube that returns its points as an iterator 8 | /// If you iterate on it for 3 variables you will get points in little-endian order: 9 | /// 000 -> 100 -> 010 -> 110 -> 001 -> 101 -> 011 -> 111 10 | #[derive(Debug)] 11 | pub struct BooleanHypercube { 12 | _f: PhantomData, 13 | n_vars: usize, 14 | current: u64, 15 | max: u64, 16 | } 17 | 18 | impl BooleanHypercube { 19 | pub fn new(n_vars: usize) -> Self { 20 | BooleanHypercube:: { 21 | _f: PhantomData::, 22 | n_vars, 23 | current: 0, 24 | max: 2_u32.pow(n_vars as u32) as u64, 25 | } 26 | } 27 | 28 | /// returns the entry at given i (which is the little-endian bit representation of i) 29 | pub fn at_i(&self, i: usize) -> Vec { 30 | assert!(i < self.max as usize); 31 | let bits = bit_decompose((i) as u64, self.n_vars); 32 | bits.iter().map(|&x| F::from(x)).collect() 33 | } 34 | } 35 | 36 | impl Iterator for BooleanHypercube { 37 | type Item = Vec; 38 | 39 | fn next(&mut self) -> Option { 40 | let bits = bit_decompose(self.current, self.n_vars); 41 | let result: Vec = bits.iter().map(|&x| F::from(x)).collect(); 42 | self.current += 1; 43 | 44 | if self.current > self.max { 45 | return None; 46 | } 47 | 48 | Some(result) 49 | } 50 | } 51 | 52 | #[cfg(test)] 53 | mod test { 54 | use super::*; 55 | use ark_bls12_381::Fr; 56 | use ark_ff::One; 57 | use ark_ff::Zero; 58 | 59 | #[test] 60 | fn test_hypercube() { 61 | let expected_results = vec![ 62 | vec![Fr::zero(), Fr::zero(), Fr::zero()], 63 | vec![Fr::one(), Fr::zero(), Fr::zero()], 64 | vec![Fr::zero(), Fr::one(), Fr::zero()], 65 | vec![Fr::one(), Fr::one(), Fr::zero()], 66 | vec![Fr::zero(), Fr::zero(), Fr::one()], 67 | vec![Fr::one(), Fr::zero(), Fr::one()], 68 | vec![Fr::zero(), Fr::one(), Fr::one()], 69 | vec![Fr::one(), Fr::one(), Fr::one()], 70 | ]; 71 | 72 | for (i, point) in BooleanHypercube::::new(3).enumerate() { 73 | assert_eq!(point, expected_results[i], "Failed at iteration {}", i); 74 | } 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /src/util/vec.rs: -------------------------------------------------------------------------------- 1 | /// Some basic utilities 2 | /// 3 | /// Stole a bunch of code from Alex in https://github.com/alex-ozdemir/bulletproofs 4 | /// and wrote some lame tests for it 5 | use ark_ff::PrimeField; 6 | use ark_std::cfg_iter; 7 | 8 | use rayon::iter::IndexedParallelIterator; 9 | use rayon::iter::IntoParallelRefIterator; 10 | use rayon::iter::ParallelIterator; 11 | 12 | /// A sparse representation of constraint matrices. 13 | pub type Matrix = Vec>; 14 | 15 | /// Hadamard product between two vectors 16 | pub fn hadamard(a: &Vec, b: &Vec) -> Vec { 17 | cfg_iter!(a).zip(b).map(|(a, b)| *a * b).collect() 18 | } 19 | 20 | // Multiply matrix by vector 21 | pub fn mat_vec_mul(mat: &Matrix, vec: &[F]) -> Vec { 22 | // matrices are lists of rows 23 | // rows are (value, idx) pairs 24 | let mut result = vec![F::zero(); mat.len()]; 25 | for (r, mat_row) in mat.iter().enumerate() { 26 | for (c, mat_val) in mat_row.iter().enumerate() { 27 | assert!(c < vec.len()); 28 | result[r] += *mat_val * vec[c]; 29 | } 30 | } 31 | result 32 | } 33 | 34 | // Multiply vector by scalar 35 | pub fn vec_scalar_mul(vec: &[F], c: &F) -> Vec { 36 | let mut result = vec![F::zero(); vec.len()]; 37 | for (i, a) in vec.iter().enumerate() { 38 | result[i] = *a * c; 39 | } 40 | result 41 | } 42 | 43 | // Add two vectors 44 | pub fn vec_add(vec_a: &[F], vec_b: &[F]) -> Vec { 45 | assert_eq!(vec_a.len(), vec_b.len()); 46 | 47 | let mut result = vec![F::zero(); vec_a.len()]; 48 | for i in 0..vec_a.len() { 49 | result[i] = vec_a[i] + vec_b[i]; 50 | } 51 | result 52 | } 53 | 54 | pub fn to_F_matrix(M: Vec>) -> Vec> { 55 | let mut R: Vec> = vec![Vec::new(); M.len()]; 56 | for i in 0..M.len() { 57 | R[i] = vec![F::zero(); M[i].len()]; 58 | for j in 0..M[i].len() { 59 | R[i][j] = F::from(M[i][j] as u64); 60 | } 61 | } 62 | R 63 | } 64 | 65 | pub fn to_F_vec(z: Vec) -> Vec { 66 | let mut r: Vec = vec![F::zero(); z.len()]; 67 | for i in 0..z.len() { 68 | r[i] = F::from(z[i] as u64); 69 | } 70 | r 71 | } 72 | 73 | #[cfg(test)] 74 | mod test { 75 | use super::*; 76 | use ark_bls12_381::Fr; 77 | 78 | #[test] 79 | fn test_hadamard() -> () { 80 | let A = vec![ 81 | Fr::from(1u64), 82 | Fr::from(2u64), 83 | Fr::from(3u64), 84 | Fr::from(4u64), 85 | Fr::from(5u64), 86 | Fr::from(6u64), 87 | ]; 88 | 89 | let B = vec![ 90 | Fr::from(6u64), 91 | Fr::from(5u64), 92 | Fr::from(4u64), 93 | Fr::from(3u64), 94 | Fr::from(2u64), 95 | Fr::from(1u64), 96 | ]; 97 | 98 | let C = hadamard(&A, &B); 99 | assert_eq!( 100 | C, 101 | vec![ 102 | Fr::from(6u64), 103 | Fr::from(10u64), 104 | Fr::from(12u64), 105 | Fr::from(12u64), 106 | Fr::from(10u64), 107 | Fr::from(6u64) 108 | ] 109 | ); 110 | } 111 | 112 | #[test] 113 | fn test_mat_vec_mul() -> () { 114 | let A = vec![ 115 | vec![Fr::from(2u64), Fr::from(3u64), Fr::from(4u64)], 116 | vec![Fr::from(4u64), Fr::from(11u64), Fr::from(14u64)], 117 | vec![Fr::from(2u64), Fr::from(8u64), Fr::from(17u64)], 118 | ]; 119 | let v = vec![Fr::from(19u64), Fr::from(55u64), Fr::from(50u64)]; 120 | 121 | let result = mat_vec_mul(&A, &v); 122 | assert_eq!( 123 | result, 124 | vec![Fr::from(403u64), Fr::from(1381u64), Fr::from(1328u64)] 125 | ); 126 | 127 | assert_eq!( 128 | vec_scalar_mul(&result, &Fr::from(2u64)), 129 | vec![Fr::from(806u64), Fr::from(2762u64), Fr::from(2656u64)] 130 | ); 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /src/ccs/pedersen.rs: -------------------------------------------------------------------------------- 1 | use ark_ec::CurveGroup; 2 | 3 | use crate::util::vec::{vec_add, vec_scalar_mul}; 4 | use transcript::IOPTranscript; 5 | 6 | use ark_std::{rand::Rng, UniformRand}; 7 | 8 | use std::marker::PhantomData; 9 | 10 | #[derive(Clone, Debug)] 11 | pub struct Proof { 12 | R: C, 13 | u_: Vec, 14 | ru_: C::ScalarField, 15 | } 16 | 17 | #[derive(Clone, Debug)] 18 | pub struct Params { 19 | h: C, 20 | pub generators: Vec, // Affine for the MSM 21 | } 22 | 23 | #[derive(Debug, Clone, Eq, PartialEq)] 24 | pub struct Commitment(pub C); 25 | 26 | #[derive(Clone, Debug)] 27 | pub struct Pedersen { 28 | _c: PhantomData, 29 | } 30 | 31 | impl Pedersen { 32 | pub fn new_params(rng: &mut R, max: usize) -> Params { 33 | let h_scalar = C::ScalarField::rand(rng); 34 | let g: C = C::generator(); 35 | let generators: Vec = vec![C::Affine::rand(rng); max]; 36 | Params { 37 | h: g.mul(h_scalar), 38 | generators, 39 | } 40 | } 41 | 42 | pub fn commit( 43 | params: &Params, 44 | v: &[C::ScalarField], 45 | r: &C::ScalarField, // random value is provided, in order to be choosen by other parts of the protocol 46 | ) -> Commitment { 47 | let msm = C::msm(¶ms.generators, v).unwrap(); 48 | 49 | let cm = params.h.mul(r) + msm; 50 | Commitment(cm) 51 | } 52 | 53 | pub fn prove( 54 | params: &Params, 55 | transcript: &mut IOPTranscript, 56 | cm: &Commitment, 57 | v: &Vec, 58 | r: &C::ScalarField, 59 | ) -> Proof { 60 | let r1 = transcript.get_and_append_challenge(b"r1").unwrap(); 61 | let d = transcript 62 | .get_and_append_challenge_vectors(b"d", v.len()) 63 | .unwrap(); 64 | 65 | let msm = C::msm(¶ms.generators, &d).unwrap(); 66 | let R: C = params.h.mul(r1) + msm; 67 | 68 | transcript 69 | .append_serializable_element(b"cm", &cm.0) 70 | .unwrap(); 71 | transcript.append_serializable_element(b"R", &R).unwrap(); 72 | let e = transcript.get_and_append_challenge(b"e").unwrap(); 73 | 74 | let u_ = vec_add(&vec_scalar_mul(v, &e), &d); 75 | let ru_ = e * r + r1; 76 | 77 | Proof { R, u_, ru_ } 78 | } 79 | pub fn verify( 80 | params: &Params, 81 | transcript: &mut IOPTranscript, 82 | cm: Commitment, 83 | proof: Proof, 84 | ) -> bool { 85 | // r1, d just to match Prover's transcript 86 | transcript.get_and_append_challenge(b"r1").unwrap(); // r_1 87 | transcript 88 | .get_and_append_challenge_vectors(b"d", proof.u_.len()) 89 | .unwrap(); // d 90 | 91 | transcript 92 | .append_serializable_element(b"cm", &cm.0) 93 | .unwrap(); 94 | transcript 95 | .append_serializable_element(b"R", &proof.R) 96 | .unwrap(); 97 | let e = transcript.get_and_append_challenge(b"e").unwrap(); 98 | let lhs = proof.R + cm.0.mul(e); 99 | 100 | let msm = C::msm(¶ms.generators, &proof.u_).unwrap(); 101 | let rhs = params.h.mul(proof.ru_) + msm; 102 | if lhs != rhs { 103 | return false; 104 | } 105 | true 106 | } 107 | } 108 | 109 | #[cfg(test)] 110 | mod tests { 111 | use super::*; 112 | use ark_bls12_381::{Fr, G1Projective}; 113 | 114 | #[test] 115 | fn test_pedersen_commitment() { 116 | let mut rng = ark_std::test_rng(); 117 | 118 | const n: usize = 10; 119 | // setup params 120 | let params = Pedersen::new_params(&mut rng, n); 121 | 122 | // init Prover's transcript 123 | let mut transcript_p = IOPTranscript::::new(b"pedersen_test"); 124 | transcript_p.append_message(b"init", b"init").unwrap(); 125 | // init Verifier's transcript 126 | let mut transcript_v = IOPTranscript::::new(b"pedersen_test"); 127 | transcript_v.append_message(b"init", b"init").unwrap(); 128 | 129 | let v: Vec = vec![Fr::rand(&mut rng); n]; 130 | let r: Fr = Fr::rand(&mut rng); 131 | 132 | let cm = Pedersen::::commit(¶ms, &v, &r); 133 | let proof = Pedersen::::prove(¶ms, &mut transcript_p, &cm, &v, &r); 134 | let v = Pedersen::::verify(¶ms, &mut transcript_v, cm, proof); 135 | assert!(v); 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /src/ccs/ccs.rs: -------------------------------------------------------------------------------- 1 | use ark_ec::CurveGroup; 2 | use ark_std::{One, Zero}; 3 | 4 | // XXX use thiserror everywhere? espresso doesnt use it... 5 | use thiserror::Error; 6 | 7 | use crate::util::vec::*; 8 | 9 | #[derive(Error, Debug)] 10 | pub enum CCSError { 11 | #[error("Relation not satisfied")] 12 | NotSatisfied, 13 | } 14 | 15 | /// A CCS structure 16 | #[derive(Debug, Clone, Eq, PartialEq)] 17 | pub struct CCS { 18 | // m: number of columns in M_i (such that M_i \in F^{m, n}) 19 | pub m: usize, 20 | // n = |z|, number of rows in M_i 21 | pub n: usize, 22 | // l = |io|, size of public input/output 23 | pub l: usize, 24 | // t = |M|, number of matrices 25 | pub t: usize, 26 | // q = |c| = |S|, number of multisets 27 | pub q: usize, 28 | // d: max degree in each variable 29 | pub d: usize, 30 | // s = log(m), dimension of x 31 | pub s: usize, 32 | // s_prime = log(n), dimension of y 33 | pub s_prime: usize, 34 | 35 | // Vector of matrices 36 | pub M: Vec>, 37 | // Vector of multisets 38 | pub S: Vec>, 39 | // Vector of coefficients 40 | pub c: Vec, 41 | } 42 | 43 | impl CCS { 44 | /// Check that a CCS structure is satisfied by a z vector. 45 | /// This works with matrices. It doesn't do any polynomial stuff 46 | /// Only for testing 47 | pub fn check_relation(&self, z: &[C::ScalarField]) -> Result<(), CCSError> { 48 | let mut result = vec![C::ScalarField::zero(); self.m]; 49 | 50 | for i in 0..self.q { 51 | // XXX This can be done more neatly with a .fold() or .reduce() 52 | 53 | // Extract the needed M_j matrices out of S_i 54 | let vec_M_j: Vec<&Matrix> = 55 | self.S[i].iter().map(|j| &self.M[*j]).collect(); 56 | 57 | // Complete the hadamard chain 58 | let mut hadamard_result = vec![C::ScalarField::one(); self.m]; 59 | for M_j in vec_M_j.into_iter() { 60 | hadamard_result = hadamard(&hadamard_result, &mat_vec_mul(M_j, z)); 61 | } 62 | 63 | // Multiply by the coefficient of this step 64 | let c_M_j_z = vec_scalar_mul(&hadamard_result, &self.c[i]); 65 | 66 | // Add it to the final vector 67 | result = vec_add(&result, &c_M_j_z); 68 | } 69 | 70 | // Make sure the final vector is all zeroes 71 | for e in result { 72 | if !e.is_zero() { 73 | return Err(CCSError::NotSatisfied); 74 | } 75 | } 76 | 77 | Ok(()) 78 | } 79 | } 80 | 81 | #[cfg(test)] 82 | pub mod test { 83 | use super::*; 84 | use ark_bls12_381::G1Projective; 85 | use ark_ff::PrimeField; 86 | use ark_std::log2; 87 | use std::ops::Neg; 88 | 89 | /// Converts the R1CS structure to the CCS structure 90 | fn CCS_from_r1cs( 91 | A: Vec>, 92 | B: Vec>, 93 | C: Vec>, 94 | io_len: usize, 95 | ) -> CCS { 96 | let m = A.len(); 97 | let n = A[0].len(); 98 | CCS { 99 | m, 100 | n, 101 | l: io_len, 102 | s: log2(m) as usize, 103 | s_prime: log2(n) as usize, 104 | t: 3, 105 | q: 2, 106 | d: 2, 107 | 108 | S: vec![vec![0, 1], vec![2]], 109 | c: vec![C::ScalarField::one(), C::ScalarField::one().neg()], 110 | M: vec![A, B, C], 111 | } 112 | } 113 | 114 | /// Return a CCS circuit that implements the Vitalik `x^3 + x + 5 == 35` (from 115 | /// https://www.vitalik.ca/general/2016/12/10/qap.html ) 116 | #[cfg(test)] 117 | pub fn get_test_ccs() -> CCS { 118 | let A = to_F_matrix(vec![ 119 | vec![0, 1, 0, 0, 0, 0], 120 | vec![0, 0, 0, 1, 0, 0], 121 | vec![0, 1, 0, 0, 1, 0], 122 | vec![5, 0, 0, 0, 0, 1], 123 | ]); 124 | let B = to_F_matrix(vec![ 125 | vec![0, 1, 0, 0, 0, 0], 126 | vec![0, 1, 0, 0, 0, 0], 127 | vec![1, 0, 0, 0, 0, 0], 128 | vec![1, 0, 0, 0, 0, 0], 129 | ]); 130 | let C = to_F_matrix(vec![ 131 | vec![0, 0, 0, 1, 0, 0], 132 | vec![0, 0, 0, 0, 1, 0], 133 | vec![0, 0, 0, 0, 0, 1], 134 | vec![0, 0, 1, 0, 0, 0], 135 | ]); 136 | CCS_from_r1cs(A, B, C, 1) 137 | } 138 | 139 | /// Computes the z vector for the given input for Vitalik's equation. 140 | #[cfg(test)] 141 | pub fn get_test_z(input: usize) -> Vec { 142 | // z = (1, io, w) 143 | to_F_vec(vec![ 144 | 1, 145 | input, 146 | input * input * input + input + 5, // x^3 + x + 5 147 | input * input, // x^2 148 | input * input * input, // x^2 * x 149 | input * input * input + input, // x^3 + x 150 | ]) 151 | } 152 | 153 | /// Test that a basic CCS relation can be satisfied 154 | #[test] 155 | fn test_ccs_relation() -> () { 156 | let ccs = get_test_ccs::(); 157 | let z = get_test_z(3); 158 | 159 | ccs.check_relation(&z).unwrap(); 160 | } 161 | } 162 | -------------------------------------------------------------------------------- /src/util/mle.rs: -------------------------------------------------------------------------------- 1 | /// Some basic MLE utilities 2 | use ark_ff::PrimeField; 3 | use ark_poly::DenseMultilinearExtension; 4 | use ark_std::log2; 5 | 6 | use super::vec::Matrix; 7 | 8 | /// Pad matrix so that its columns and rows are powers of two 9 | fn pad_matrix(matrix: &Matrix) -> Matrix { 10 | // Find the desired dimensions after padding 11 | let rows = matrix.len(); 12 | let cols = matrix[0].len(); 13 | let padded_rows = rows.next_power_of_two(); 14 | let padded_cols = cols.next_power_of_two(); 15 | 16 | // Create a new padded matrix 17 | // XXX inefficient. take a mutable matrix as input instead? 18 | let mut padded_matrix = vec![vec![F::zero(); padded_cols]; padded_rows]; 19 | 20 | // Copy values from the input matrix to the padded matrix 21 | for (i, row) in matrix.iter().enumerate() { 22 | for (j, &value) in row.iter().enumerate() { 23 | padded_matrix[i][j] = value; 24 | } 25 | } 26 | 27 | padded_matrix 28 | } 29 | 30 | // XXX shouldn't consume the matrix 31 | pub fn matrix_to_mle(matrix: Matrix) -> DenseMultilinearExtension { 32 | let n_vars: usize = (log2(matrix.len()) + log2(matrix[0].len())) as usize; // n_vars = s + s' 33 | 34 | // Matrices might need to get padded before turned into an MLE 35 | let padded_matrix = pad_matrix(&matrix); 36 | 37 | // Flatten matrix into a vector 38 | let M_evals: Vec = padded_matrix.into_iter().flatten().collect(); 39 | 40 | vec_to_mle(n_vars, &M_evals) 41 | } 42 | 43 | pub fn vec_to_mle(n_vars: usize, v: &Vec) -> DenseMultilinearExtension { 44 | // Pad to 2^n_vars 45 | let v_padded: Vec = [ 46 | v.clone(), 47 | std::iter::repeat(F::zero()) 48 | .take((1 << n_vars) - v.len()) 49 | .collect(), 50 | ] 51 | .concat(); 52 | DenseMultilinearExtension::::from_evaluations_vec(n_vars, v_padded) 53 | } 54 | 55 | #[cfg(test)] 56 | mod tests { 57 | use super::*; 58 | use crate::{ 59 | ccs::ccs::test::get_test_z, 60 | espresso::multilinear_polynomial::fix_variables, 61 | espresso::multilinear_polynomial::testing_code::fix_last_variables, 62 | util::{hypercube::BooleanHypercube, vec::to_F_matrix}, 63 | }; 64 | use ark_poly::MultilinearExtension; 65 | use ark_std::Zero; 66 | 67 | use ark_bls12_381::Fr; 68 | 69 | #[test] 70 | fn test_matrix_to_mle() { 71 | let A = to_F_matrix::(vec![ 72 | vec![2, 3, 4, 4], 73 | vec![4, 11, 14, 14], 74 | vec![2, 8, 17, 17], 75 | vec![420, 4, 2, 0], 76 | ]); 77 | 78 | let A_mle = matrix_to_mle(A); 79 | assert_eq!(A_mle.evaluations.len(), 16); // 4x4 matrix, thus 2bit x 2bit, thus 2^4=16 evals 80 | 81 | let A = to_F_matrix::(vec![ 82 | vec![2, 3, 4, 4, 1], 83 | vec![4, 11, 14, 14, 2], 84 | vec![2, 8, 17, 17, 3], 85 | vec![420, 4, 2, 0, 4], 86 | vec![420, 4, 2, 0, 5], 87 | ]); 88 | let A_mle = matrix_to_mle(A.clone()); 89 | assert_eq!(A_mle.evaluations.len(), 64); // 5x5 matrix, thus 3bit x 3bit, thus 2^6=64 evals 90 | 91 | // check that the A_mle evaluated over the boolean hypercube equals the matrix A_i_j values 92 | let bhc = BooleanHypercube::new(A_mle.num_vars); 93 | let A_padded = pad_matrix(&A); 94 | for (i, A_i) in A_padded.iter().enumerate() { 95 | for (j, _) in A_i.iter().enumerate() { 96 | let s_i_j = bhc.at_i(i * A_i.len() + j); 97 | assert_eq!(A_mle.evaluate(&s_i_j).unwrap(), A_padded[i][j]); 98 | } 99 | } 100 | } 101 | 102 | #[test] 103 | fn test_vec_to_mle() { 104 | let z = get_test_z::(3); 105 | let n_vars = 3; 106 | let z_mle = vec_to_mle(n_vars, &z); 107 | 108 | // check that the z_mle evaluated over the boolean hypercube equals the vec z_i values 109 | let bhc = BooleanHypercube::new(z_mle.num_vars); 110 | for i in 0..z.len() { 111 | let s_i = bhc.at_i(i); 112 | assert_eq!(z_mle.evaluate(&s_i).unwrap(), z[i]); 113 | } 114 | // for the rest of elements of the boolean hypercube, expect it to evaluate to zero 115 | for i in (z.len())..(1 << z_mle.num_vars) { 116 | let s_i = bhc.at_i(i); 117 | assert_eq!(z_mle.evaluate(&s_i).unwrap(), Fr::zero()); 118 | } 119 | } 120 | 121 | #[test] 122 | fn test_fix_variables() { 123 | let A = to_F_matrix(vec![ 124 | vec![2, 3, 4, 4], 125 | vec![4, 11, 14, 14], 126 | vec![2, 8, 17, 17], 127 | vec![420, 4, 2, 0], 128 | ]); 129 | 130 | let A_mle = matrix_to_mle(A.clone()); 131 | let bhc = BooleanHypercube::new(2); 132 | for (i, y) in bhc.enumerate() { 133 | // First check that the arkworks and espresso funcs match 134 | let expected_fix_left = A_mle.fix_variables(&y); // try arkworks fix_variables 135 | let fix_left = fix_variables(&A_mle, &y); // try espresso fix_variables 136 | 137 | assert_eq!(fix_left, expected_fix_left); 138 | 139 | // Check that fixing first variables pins down a column 140 | // i.e. fixing x to 0 will return the first column 141 | // fixing x to 1 will return the second column etc. 142 | let column_i: Vec = A.clone().iter().map(|x| x[i]).collect(); 143 | assert_eq!(fix_left.evaluations, column_i); 144 | 145 | // Now check that fixing last variables pins down a row 146 | // i.e. fixing y to 0 will return the first row 147 | // fixing y to 1 will return the second row etc. 148 | let row_i: Vec = A[i].clone(); 149 | let fix_right = fix_last_variables(&A_mle, &y); 150 | assert_eq!(fix_right.evaluations, row_i); 151 | } 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /src/ccs/util.rs: -------------------------------------------------------------------------------- 1 | use ark_ff::PrimeField; 2 | use ark_poly::DenseMultilinearExtension; 3 | use ark_poly::MultilinearExtension; 4 | use std::ops::Add; 5 | 6 | use crate::espresso::multilinear_polynomial::fix_variables; 7 | use crate::espresso::multilinear_polynomial::scalar_mul; 8 | 9 | use crate::util::hypercube::BooleanHypercube; 10 | use crate::util::mle::matrix_to_mle; 11 | use crate::util::mle::vec_to_mle; 12 | use crate::util::vec::Matrix; 13 | 14 | /// Return a vector of evaluations p_j(r) = \sum_{y \in {0,1}^s'} M_j(r, y) * z(y) 15 | /// for all j values in 0..self.t 16 | pub fn compute_all_sum_Mz_evals( 17 | vec_M: &[Matrix], 18 | z: &Vec, 19 | r: &[F], 20 | s_prime: usize, 21 | ) -> Vec { 22 | // Convert z to MLE 23 | let z_y_mle = vec_to_mle(s_prime, z); 24 | // Convert all matrices to MLE 25 | let M_x_y_mle: Vec> = 26 | vec_M.iter().cloned().map(matrix_to_mle).collect(); 27 | 28 | let mut v = Vec::with_capacity(M_x_y_mle.len()); 29 | for M_i in M_x_y_mle { 30 | let sum_Mz = compute_sum_Mz(M_i, &z_y_mle, s_prime); 31 | let v_i = sum_Mz.evaluate(r).unwrap(); 32 | v.push(v_i); 33 | } 34 | v 35 | } 36 | 37 | /// Return the multilinear polynomial p(x) = \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) 38 | pub fn compute_sum_Mz( 39 | M_j: DenseMultilinearExtension, 40 | z: &DenseMultilinearExtension, 41 | s_prime: usize, 42 | ) -> DenseMultilinearExtension { 43 | let mut sum_Mz = DenseMultilinearExtension { 44 | evaluations: vec![F::zero(); M_j.evaluations.len()], 45 | num_vars: M_j.num_vars - s_prime, 46 | }; 47 | 48 | let bhc = BooleanHypercube::new(s_prime); 49 | for y in bhc.into_iter() { 50 | // In a slightly counter-intuitive fashion fix_variables() fixes the right-most variables of the polynomial. So 51 | // for a polynomial M(x,y) and a random field element r, if we do fix_variables(M,r) we will get M(x,r). 52 | let M_j_y = fix_variables(&M_j, &y); 53 | let z_y = z.evaluate(&y).unwrap(); 54 | let M_j_z = scalar_mul(&M_j_y, &z_y); 55 | sum_Mz = sum_Mz.add(M_j_z); 56 | } 57 | sum_Mz 58 | } 59 | 60 | #[cfg(test)] 61 | pub mod test { 62 | use super::*; 63 | 64 | use ark_bls12_381::{Fr, G1Projective}; 65 | use ark_std::test_rng; 66 | use ark_std::One; 67 | use ark_std::UniformRand; 68 | use ark_std::Zero; 69 | 70 | use crate::ccs::ccs::test::{get_test_ccs, get_test_z}; 71 | use crate::espresso::multilinear_polynomial::testing_code::fix_last_variables; 72 | use crate::espresso::virtual_polynomial::eq_eval; 73 | 74 | use crate::ccs::util::compute_sum_Mz; 75 | 76 | #[test] 77 | fn test_compute_sum_Mz_over_boolean_hypercube() -> () { 78 | let ccs = get_test_ccs::(); 79 | let z = get_test_z(3); 80 | ccs.check_relation(&z).unwrap(); 81 | let z_mle = vec_to_mle(ccs.s_prime, &z); 82 | 83 | // check that evaluating over all the values x over the boolean hypercube, the result of 84 | // the next for loop is equal to 0 85 | for x in BooleanHypercube::new(ccs.s).into_iter() { 86 | // println!("x {:?}", x); 87 | let mut r = Fr::zero(); 88 | for i in 0..ccs.q { 89 | let mut Sj_prod = Fr::one(); 90 | for j in ccs.S[i].clone() { 91 | let M_j = matrix_to_mle(ccs.M[j].clone()); 92 | let sum_Mz = compute_sum_Mz(M_j, &z_mle, ccs.s_prime); 93 | let sum_Mz_x = sum_Mz.evaluate(&x).unwrap(); 94 | Sj_prod *= sum_Mz_x; 95 | } 96 | r += Sj_prod * ccs.c[i]; 97 | } 98 | assert_eq!(r, Fr::zero()); 99 | } 100 | } 101 | 102 | /// Given M(x,y) matrix and a random field element `r`, test that ~M(r,y) is is an s'-variable polynomial which 103 | /// compresses every column j of the M(x,y) matrix by performing a random linear combination between the elements 104 | /// of the column and the values eq_i(r) where i is the row of that element 105 | /// 106 | /// For example, for matrix M: 107 | /// 108 | /// [2, 3, 4, 4 109 | /// 4, 4, 3, 2 110 | /// 2, 8, 9, 2 111 | /// 9, 4, 2, 0] 112 | /// 113 | /// The polynomial ~M(r,y) is a polynomial in F^2 which evaluates to the following values in the hypercube: 114 | /// - M(00) = 2*eq_00(r) + 4*eq_10(r) + 2*eq_01(r) + 9*eq_11(r) 115 | /// - M(10) = 3*eq_00(r) + 4*eq_10(r) + 8*eq_01(r) + 4*eq_11(r) 116 | /// - M(01) = 4*eq_00(r) + 3*eq_10(r) + 9*eq_01(r) + 2*eq_11(r) 117 | /// - M(11) = 4*eq_00(r) + 2*eq_10(r) + 2*eq_01(r) + 0*eq_11(r) 118 | /// 119 | /// This is used by Hypernova in LCCCS to perform a verifier-chosen random linear combination between the columns 120 | /// of the matrix and the z vector. This technique is also used extensively in "An Algebraic Framework for 121 | /// Universal and Updatable SNARKs". 122 | #[test] 123 | fn test_compute_M_r_y_compression() -> () { 124 | let mut rng = test_rng(); 125 | 126 | // s = 2, s' = 3 127 | let ccs = get_test_ccs::(); 128 | 129 | let M = ccs.M[0].clone(); 130 | let M_mle = matrix_to_mle(M.clone()); 131 | 132 | // Fix the polynomial ~M(r,y) 133 | let r: Vec = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect(); 134 | let M_r_y = fix_last_variables(&M_mle, &r); 135 | 136 | // Now let's compute M_r_y the other way around 137 | for j in 0..M[0].len() { 138 | // Go over every column of M 139 | let column_j: Vec = M.clone().iter().map(|x| x[j]).collect(); 140 | 141 | // and perform the random lincomb between the elements of the column and eq_i(r) 142 | let rlc = BooleanHypercube::new(ccs.s) 143 | .enumerate() 144 | .into_iter() 145 | .map(|(i, x)| column_j[i] * eq_eval(&x, &r).unwrap()) 146 | .fold(Fr::zero(), |acc, result| acc + result); 147 | 148 | assert_eq!(M_r_y.evaluations[j], rlc); 149 | } 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /src/espresso/multilinear_polynomial.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023 Espresso Systems (espressosys.com) 2 | // This file is part of the HyperPlonk library. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HyperPlonk library. If not, see . 6 | 7 | use ark_ff::Field; 8 | #[cfg(feature = "parallel")] 9 | use rayon::prelude::{IndexedParallelIterator, IntoParallelRefMutIterator, ParallelIterator}; 10 | 11 | pub use ark_poly::DenseMultilinearExtension; 12 | 13 | pub fn fix_variables( 14 | poly: &DenseMultilinearExtension, 15 | partial_point: &[F], 16 | ) -> DenseMultilinearExtension { 17 | assert!( 18 | partial_point.len() <= poly.num_vars, 19 | "invalid size of partial point" 20 | ); 21 | let nv = poly.num_vars; 22 | let mut poly = poly.evaluations.to_vec(); 23 | let dim = partial_point.len(); 24 | // evaluate single variable of partial point from left to right 25 | for (i, point) in partial_point.iter().enumerate().take(dim) { 26 | poly = fix_one_variable_helper(&poly, nv - i, point); 27 | } 28 | 29 | DenseMultilinearExtension::::from_evaluations_slice(nv - dim, &poly[..(1 << (nv - dim))]) 30 | } 31 | 32 | fn fix_one_variable_helper(data: &[F], nv: usize, point: &F) -> Vec { 33 | let mut res = vec![F::zero(); 1 << (nv - 1)]; 34 | 35 | // evaluate single variable of partial point from left to right 36 | #[cfg(not(feature = "parallel"))] 37 | for i in 0..(1 << (nv - 1)) { 38 | res[i] = data[i] + (data[(i << 1) + 1] - data[i << 1]) * point; 39 | } 40 | 41 | #[cfg(feature = "parallel")] 42 | res.par_iter_mut().enumerate().for_each(|(i, x)| { 43 | *x = data[i << 1] + (data[(i << 1) + 1] - data[i << 1]) * point; 44 | }); 45 | 46 | res 47 | } 48 | 49 | pub fn evaluate_no_par(poly: &DenseMultilinearExtension, point: &[F]) -> F { 50 | assert_eq!(poly.num_vars, point.len()); 51 | fix_variables_no_par(poly, point).evaluations[0] 52 | } 53 | 54 | fn fix_variables_no_par( 55 | poly: &DenseMultilinearExtension, 56 | partial_point: &[F], 57 | ) -> DenseMultilinearExtension { 58 | assert!( 59 | partial_point.len() <= poly.num_vars, 60 | "invalid size of partial point" 61 | ); 62 | let nv = poly.num_vars; 63 | let mut poly = poly.evaluations.to_vec(); 64 | let dim = partial_point.len(); 65 | // evaluate single variable of partial point from left to right 66 | for i in 1..dim + 1 { 67 | let r = partial_point[i - 1]; 68 | for b in 0..(1 << (nv - i)) { 69 | poly[b] = poly[b << 1] + (poly[(b << 1) + 1] - poly[b << 1]) * r; 70 | } 71 | } 72 | DenseMultilinearExtension::from_evaluations_slice(nv - dim, &poly[..(1 << (nv - dim))]) 73 | } 74 | 75 | /// Given multilinear polynomial `p(x)` and s `s`, compute `s*p(x)` 76 | pub fn scalar_mul( 77 | poly: &DenseMultilinearExtension, 78 | s: &F, 79 | ) -> DenseMultilinearExtension { 80 | DenseMultilinearExtension { 81 | evaluations: poly.evaluations.iter().map(|e| *e * s).collect(), 82 | num_vars: poly.num_vars, 83 | } 84 | } 85 | 86 | /// Test-only methods used in virtual_polynomial.rs 87 | #[cfg(test)] 88 | pub mod testing_code { 89 | use super::*; 90 | use ark_ff::PrimeField; 91 | use ark_std::rand::RngCore; 92 | use ark_std::{end_timer, start_timer}; 93 | use std::sync::Arc; 94 | 95 | pub fn fix_last_variables( 96 | poly: &DenseMultilinearExtension, 97 | partial_point: &[F], 98 | ) -> DenseMultilinearExtension { 99 | assert!( 100 | partial_point.len() <= poly.num_vars, 101 | "invalid size of partial point" 102 | ); 103 | let nv = poly.num_vars; 104 | let mut poly = poly.evaluations.to_vec(); 105 | let dim = partial_point.len(); 106 | // evaluate single variable of partial point from left to right 107 | for (i, point) in partial_point.iter().rev().enumerate().take(dim) { 108 | poly = fix_last_variable_helper(&poly, nv - i, point); 109 | } 110 | 111 | DenseMultilinearExtension::::from_evaluations_slice(nv - dim, &poly[..(1 << (nv - dim))]) 112 | } 113 | 114 | fn fix_last_variable_helper(data: &[F], nv: usize, point: &F) -> Vec { 115 | let half_len = 1 << (nv - 1); 116 | let mut res = vec![F::zero(); half_len]; 117 | 118 | // evaluate single variable of partial point from left to right 119 | #[cfg(not(feature = "parallel"))] 120 | for b in 0..half_len { 121 | res[b] = data[b] + (data[b + half_len] - data[b]) * point; 122 | } 123 | 124 | #[cfg(feature = "parallel")] 125 | res.par_iter_mut().enumerate().for_each(|(i, x)| { 126 | *x = data[i] + (data[i + half_len] - data[i]) * point; 127 | }); 128 | 129 | res 130 | } 131 | 132 | /// Sample a random list of multilinear polynomials. 133 | /// Returns 134 | /// - the list of polynomials, 135 | /// - its sum of polynomial evaluations over the boolean hypercube. 136 | #[cfg(test)] 137 | pub fn random_mle_list( 138 | nv: usize, 139 | degree: usize, 140 | rng: &mut R, 141 | ) -> (Vec>>, F) { 142 | let start = start_timer!(|| "sample random mle list"); 143 | let mut multiplicands = Vec::with_capacity(degree); 144 | for _ in 0..degree { 145 | multiplicands.push(Vec::with_capacity(1 << nv)) 146 | } 147 | let mut sum = F::zero(); 148 | 149 | for _ in 0..(1 << nv) { 150 | let mut product = F::one(); 151 | 152 | for e in multiplicands.iter_mut() { 153 | let val = F::rand(rng); 154 | e.push(val); 155 | product *= val; 156 | } 157 | sum += product; 158 | } 159 | 160 | let list = multiplicands 161 | .into_iter() 162 | .map(|x| Arc::new(DenseMultilinearExtension::from_evaluations_vec(nv, x))) 163 | .collect(); 164 | 165 | end_timer!(start); 166 | (list, sum) 167 | } 168 | 169 | // Build a randomize list of mle-s whose sum is zero. 170 | #[cfg(test)] 171 | pub fn random_zero_mle_list( 172 | nv: usize, 173 | degree: usize, 174 | rng: &mut R, 175 | ) -> Vec>> { 176 | let start = start_timer!(|| "sample random zero mle list"); 177 | 178 | let mut multiplicands = Vec::with_capacity(degree); 179 | for _ in 0..degree { 180 | multiplicands.push(Vec::with_capacity(1 << nv)) 181 | } 182 | for _ in 0..(1 << nv) { 183 | multiplicands[0].push(F::zero()); 184 | for e in multiplicands.iter_mut().skip(1) { 185 | e.push(F::rand(rng)); 186 | } 187 | } 188 | 189 | let list = multiplicands 190 | .into_iter() 191 | .map(|x| Arc::new(DenseMultilinearExtension::from_evaluations_vec(nv, x))) 192 | .collect(); 193 | 194 | end_timer!(start); 195 | list 196 | } 197 | } 198 | -------------------------------------------------------------------------------- /src/ccs/lcccs.rs: -------------------------------------------------------------------------------- 1 | use ark_ec::CurveGroup; 2 | use ark_poly::DenseMultilinearExtension; 3 | use ark_std::{One}; 4 | use std::sync::Arc; 5 | 6 | use ark_std::{rand::Rng, UniformRand}; 7 | 8 | use crate::ccs::cccs::Witness; 9 | use crate::ccs::ccs::{CCSError, CCS}; 10 | use crate::ccs::util::{compute_all_sum_Mz_evals, compute_sum_Mz}; 11 | 12 | use crate::ccs::pedersen::{Commitment, Params as PedersenParams, Pedersen}; 13 | use crate::espresso::virtual_polynomial::VirtualPolynomial; 14 | use crate::util::mle::matrix_to_mle; 15 | use crate::util::mle::vec_to_mle; 16 | 17 | /// Linearized Committed CCS instance 18 | #[derive(Debug, Clone, Eq, PartialEq)] 19 | pub struct LCCCS { 20 | // Underlying CCS structure 21 | pub ccs: CCS, 22 | 23 | // TODO: Further improve the abstractions here. We should not need so many public fields 24 | 25 | // Commitment to witness 26 | pub C: Commitment, 27 | // Relaxation factor of z for folded LCCCS 28 | pub u: C::ScalarField, 29 | // Public input/output 30 | pub x: Vec, 31 | // Random evaluation point for the v_i 32 | pub r_x: Vec, 33 | // Vector of v_i 34 | pub v: Vec, 35 | } 36 | 37 | impl CCS { 38 | /// Compute v_j values of the linearized committed CCS form 39 | /// Given `r`, compute: \sum_{y \in {0,1}^s'} M_j(r, y) * z(y) 40 | fn compute_v_j(&self, z: &[C::ScalarField], r: &[C::ScalarField]) -> Vec { 41 | compute_all_sum_Mz_evals(&self.M, &z.to_vec(), r, self.s_prime) 42 | } 43 | 44 | pub fn to_lcccs( 45 | &self, 46 | rng: &mut R, 47 | pedersen_params: &PedersenParams, 48 | z: &[C::ScalarField], 49 | ) -> (LCCCS, Witness) { 50 | let w: Vec = z[(1 + self.l)..].to_vec(); 51 | let r_w = C::ScalarField::rand(rng); 52 | let C = Pedersen::commit(pedersen_params, &w, &r_w); 53 | 54 | let r_x: Vec = (0..self.s).map(|_| C::ScalarField::rand(rng)).collect(); 55 | let v = self.compute_v_j(z, &r_x); 56 | 57 | ( 58 | LCCCS:: { 59 | ccs: self.clone(), 60 | C, 61 | u: C::ScalarField::one(), 62 | x: z[1..(1 + self.l)].to_vec(), 63 | r_x, 64 | v, 65 | }, 66 | Witness:: { w, r_w }, 67 | ) 68 | } 69 | } 70 | 71 | impl LCCCS { 72 | /// Compute all L_j(x) polynomials 73 | pub fn compute_Ls(&self, z: &Vec) -> Vec> { 74 | let z_mle = vec_to_mle(self.ccs.s_prime, z); 75 | // Convert all matrices to MLE 76 | let M_x_y_mle: Vec> = 77 | self.ccs.M.clone().into_iter().map(matrix_to_mle).collect(); 78 | 79 | let mut vec_L_j_x = Vec::with_capacity(self.ccs.t); 80 | for M_j in M_x_y_mle { 81 | let sum_Mz = compute_sum_Mz(M_j, &z_mle, self.ccs.s_prime); 82 | let sum_Mz_virtual = 83 | VirtualPolynomial::new_from_mle(&Arc::new(sum_Mz.clone()), C::ScalarField::one()); 84 | let L_j_x = sum_Mz_virtual.build_f_hat(&self.r_x).unwrap(); 85 | vec_L_j_x.push(L_j_x); 86 | } 87 | 88 | vec_L_j_x 89 | } 90 | 91 | /// Perform the check of the LCCCS instance described at section 4.2 92 | pub fn check_relation( 93 | &self, 94 | pedersen_params: &PedersenParams, 95 | w: &Witness, 96 | ) -> Result<(), CCSError> { 97 | // check that C is the commitment of w. Notice that this is not verifying a Pedersen 98 | // opening, but checking that the Commmitment comes from committing to the witness. 99 | assert_eq!(self.C.0, Pedersen::commit(pedersen_params, &w.w, &w.r_w).0); 100 | 101 | // check CCS relation 102 | let z: Vec = [vec![self.u], self.x.clone(), w.w.to_vec()].concat(); 103 | let computed_v = compute_all_sum_Mz_evals(&self.ccs.M, &z, &self.r_x, self.ccs.s_prime); 104 | assert_eq!(computed_v, self.v); 105 | Ok(()) 106 | } 107 | } 108 | 109 | #[cfg(test)] 110 | pub mod test { 111 | use super::*; 112 | use ark_std::Zero; 113 | 114 | use crate::ccs::ccs::test::{get_test_ccs, get_test_z}; 115 | use crate::util::hypercube::BooleanHypercube; 116 | use ark_std::test_rng; 117 | 118 | use ark_bls12_381::{Fr, G1Projective}; 119 | 120 | #[test] 121 | /// Test linearized CCCS v_j against the L_j(x) 122 | fn test_lcccs_v_j() -> () { 123 | let mut rng = test_rng(); 124 | 125 | let ccs = get_test_ccs(); 126 | let z = get_test_z(3); 127 | ccs.check_relation(&z.clone()).unwrap(); 128 | 129 | let pedersen_params = Pedersen::::new_params(&mut rng, ccs.n - ccs.l - 1); 130 | let (lcccs, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z); 131 | // with our test vector comming from R1CS, v should have length 3 132 | assert_eq!(lcccs.v.len(), 3); 133 | 134 | let vec_L_j_x = lcccs.compute_Ls(&z); 135 | assert_eq!(vec_L_j_x.len(), lcccs.v.len()); 136 | 137 | for (v_i, L_j_x) in lcccs.v.into_iter().zip(vec_L_j_x) { 138 | let sum_L_j_x = BooleanHypercube::new(ccs.s) 139 | .into_iter() 140 | .map(|y| L_j_x.evaluate(&y).unwrap()) 141 | .fold(Fr::zero(), |acc, result| acc + result); 142 | assert_eq!(v_i, sum_L_j_x); 143 | } 144 | } 145 | 146 | /// Given a bad z, check that the v_j should not match with the L_j(x) 147 | #[test] 148 | fn test_bad_v_j() -> () { 149 | let mut rng = test_rng(); 150 | 151 | let ccs = get_test_ccs(); 152 | let z = get_test_z(3); 153 | ccs.check_relation(&z.clone()).unwrap(); 154 | 155 | // Mutate z so that the relation does not hold 156 | let mut bad_z = z.clone(); 157 | bad_z[3] = Fr::zero(); 158 | assert!(ccs.check_relation(&bad_z.clone()).is_err()); 159 | 160 | let pedersen_params = Pedersen::::new_params(&mut rng, ccs.n - ccs.l - 1); 161 | // Compute v_j with the right z 162 | let (lcccs, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z); 163 | // with our test vector comming from R1CS, v should have length 3 164 | assert_eq!(lcccs.v.len(), 3); 165 | 166 | // Bad compute L_j(x) with the bad z 167 | let vec_L_j_x = lcccs.compute_Ls(&bad_z); 168 | assert_eq!(vec_L_j_x.len(), lcccs.v.len()); 169 | 170 | // Make sure that the LCCCS is not satisfied given these L_j(x) 171 | // i.e. summing L_j(x) over the hypercube should not give v_j for all j 172 | let mut satisfied = true; 173 | for (v_i, L_j_x) in lcccs.v.into_iter().zip(vec_L_j_x) { 174 | let sum_L_j_x = BooleanHypercube::new(ccs.s) 175 | .into_iter() 176 | .map(|y| L_j_x.evaluate(&y).unwrap()) 177 | .fold(Fr::zero(), |acc, result| acc + result); 178 | if v_i != sum_L_j_x { 179 | satisfied = false; 180 | } 181 | } 182 | 183 | assert_eq!(satisfied, false); 184 | } 185 | } 186 | -------------------------------------------------------------------------------- /src/espresso/sum_check/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023 Espresso Systems (espressosys.com) 2 | // This file is part of the HyperPlonk library. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HyperPlonk library. If not, see . 6 | 7 | //! This module implements the sum check protocol. 8 | 9 | use crate::espresso::virtual_polynomial::{VPAuxInfo, VirtualPolynomial}; 10 | use ark_ff::PrimeField; 11 | use ark_poly::DenseMultilinearExtension; 12 | use ark_std::{end_timer, start_timer}; 13 | use std::{fmt::Debug, sync::Arc}; 14 | 15 | use structs::{IOPProof, IOPProverState, IOPVerifierState}; 16 | use subroutines::poly_iop::{prelude::PolyIOPErrors, PolyIOP}; 17 | use transcript::IOPTranscript; 18 | 19 | mod prover; 20 | pub mod structs; 21 | pub mod verifier; 22 | 23 | /// Trait for doing sum check protocols. 24 | pub trait SumCheck { 25 | type VirtualPolynomial; 26 | type VPAuxInfo; 27 | type MultilinearExtension; 28 | 29 | type SumCheckProof: Clone + Debug + Default + PartialEq; 30 | type Transcript; 31 | type SumCheckSubClaim: Clone + Debug + Default + PartialEq; 32 | 33 | /// Extract sum from the proof 34 | fn extract_sum(proof: &Self::SumCheckProof) -> F; 35 | 36 | /// Initialize the system with a transcript 37 | /// 38 | /// This function is optional -- in the case where a SumCheck is 39 | /// an building block for a more complex protocol, the transcript 40 | /// may be initialized by this complex protocol, and passed to the 41 | /// SumCheck prover/verifier. 42 | fn init_transcript() -> Self::Transcript; 43 | 44 | /// Generate proof of the sum of polynomial over {0,1}^`num_vars` 45 | /// 46 | /// The polynomial is represented in the form of a VirtualPolynomial. 47 | fn prove( 48 | poly: &Self::VirtualPolynomial, 49 | transcript: &mut Self::Transcript, 50 | ) -> Result; 51 | 52 | /// Verify the claimed sum using the proof 53 | fn verify( 54 | sum: F, 55 | proof: &Self::SumCheckProof, 56 | aux_info: &Self::VPAuxInfo, 57 | transcript: &mut Self::Transcript, 58 | ) -> Result; 59 | } 60 | 61 | /// Trait for sum check protocol prover side APIs. 62 | pub trait SumCheckProver 63 | where 64 | Self: Sized, 65 | { 66 | type VirtualPolynomial; 67 | type ProverMessage; 68 | 69 | /// Initialize the prover state to argue for the sum of the input polynomial 70 | /// over {0,1}^`num_vars`. 71 | fn prover_init(polynomial: &Self::VirtualPolynomial) -> Result; 72 | 73 | /// Receive message from verifier, generate prover message, and proceed to 74 | /// next round. 75 | /// 76 | /// Main algorithm used is from section 3.2 of [XZZPS19](https://eprint.iacr.org/2019/317.pdf#subsection.3.2). 77 | fn prove_round_and_update_state( 78 | &mut self, 79 | challenge: &Option, 80 | ) -> Result; 81 | } 82 | 83 | /// Trait for sum check protocol verifier side APIs. 84 | pub trait SumCheckVerifier { 85 | type VPAuxInfo; 86 | type ProverMessage; 87 | type Challenge; 88 | type Transcript; 89 | type SumCheckSubClaim; 90 | 91 | /// Initialize the verifier's state. 92 | fn verifier_init(index_info: &Self::VPAuxInfo) -> Self; 93 | 94 | /// Run verifier for the current round, given a prover message. 95 | /// 96 | /// Note that `verify_round_and_update_state` only samples and stores 97 | /// challenges; and update the verifier's state accordingly. The actual 98 | /// verifications are deferred (in batch) to `check_and_generate_subclaim` 99 | /// at the last step. 100 | fn verify_round_and_update_state( 101 | &mut self, 102 | prover_msg: &Self::ProverMessage, 103 | transcript: &mut Self::Transcript, 104 | ) -> Result; 105 | 106 | /// This function verifies the deferred checks in the interactive version of 107 | /// the protocol; and generate the subclaim. Returns an error if the 108 | /// proof failed to verify. 109 | /// 110 | /// If the asserted sum is correct, then the multilinear polynomial 111 | /// evaluated at `subclaim.point` will be `subclaim.expected_evaluation`. 112 | /// Otherwise, it is highly unlikely that those two will be equal. 113 | /// Larger field size guarantees smaller soundness error. 114 | fn check_and_generate_subclaim( 115 | &self, 116 | asserted_sum: &F, 117 | ) -> Result; 118 | } 119 | 120 | /// A SumCheckSubClaim is a claim generated by the verifier at the end of 121 | /// verification when it is convinced. 122 | #[derive(Clone, Debug, Default, PartialEq, Eq)] 123 | pub struct SumCheckSubClaim { 124 | /// the multi-dimensional point that this multilinear extension is evaluated 125 | /// to 126 | pub point: Vec, 127 | /// the expected evaluation 128 | pub expected_evaluation: F, 129 | } 130 | 131 | impl SumCheck for PolyIOP { 132 | type SumCheckProof = IOPProof; 133 | type VirtualPolynomial = VirtualPolynomial; 134 | type VPAuxInfo = VPAuxInfo; 135 | type MultilinearExtension = Arc>; 136 | type SumCheckSubClaim = SumCheckSubClaim; 137 | type Transcript = IOPTranscript; 138 | 139 | fn extract_sum(proof: &Self::SumCheckProof) -> F { 140 | let start = start_timer!(|| "extract sum"); 141 | let res = proof.proofs[0].evaluations[0] + proof.proofs[0].evaluations[1]; 142 | end_timer!(start); 143 | res 144 | } 145 | 146 | fn init_transcript() -> Self::Transcript { 147 | let start = start_timer!(|| "init transcript"); 148 | let res = IOPTranscript::::new(b"Initializing SumCheck transcript"); 149 | end_timer!(start); 150 | res 151 | } 152 | 153 | fn prove( 154 | poly: &Self::VirtualPolynomial, 155 | transcript: &mut Self::Transcript, 156 | ) -> Result { 157 | let start = start_timer!(|| "sum check prove"); 158 | 159 | transcript.append_serializable_element(b"aux info", &poly.aux_info)?; 160 | 161 | let mut prover_state = IOPProverState::prover_init(poly)?; 162 | let mut challenge = None; 163 | let mut prover_msgs = Vec::with_capacity(poly.aux_info.num_variables); 164 | for _ in 0..poly.aux_info.num_variables { 165 | let prover_msg = 166 | IOPProverState::prove_round_and_update_state(&mut prover_state, &challenge)?; 167 | transcript.append_serializable_element(b"prover msg", &prover_msg)?; 168 | prover_msgs.push(prover_msg); 169 | challenge = Some(transcript.get_and_append_challenge(b"Internal round")?); 170 | } 171 | // pushing the last challenge point to the state 172 | if let Some(p) = challenge { 173 | prover_state.challenges.push(p) 174 | }; 175 | 176 | end_timer!(start); 177 | Ok(IOPProof { 178 | point: prover_state.challenges, 179 | proofs: prover_msgs, 180 | }) 181 | } 182 | 183 | fn verify( 184 | claimed_sum: F, 185 | proof: &Self::SumCheckProof, 186 | aux_info: &Self::VPAuxInfo, 187 | transcript: &mut Self::Transcript, 188 | ) -> Result { 189 | let start = start_timer!(|| "sum check verify"); 190 | 191 | transcript.append_serializable_element(b"aux info", aux_info)?; 192 | let mut verifier_state = IOPVerifierState::verifier_init(aux_info); 193 | for i in 0..aux_info.num_variables { 194 | let prover_msg = proof.proofs.get(i).expect("proof is incomplete"); 195 | transcript.append_serializable_element(b"prover msg", prover_msg)?; 196 | IOPVerifierState::verify_round_and_update_state( 197 | &mut verifier_state, 198 | prover_msg, 199 | transcript, 200 | )?; 201 | } 202 | 203 | let res = IOPVerifierState::check_and_generate_subclaim(&verifier_state, &claimed_sum); 204 | 205 | end_timer!(start); 206 | res 207 | } 208 | } 209 | -------------------------------------------------------------------------------- /src/espresso/sum_check/prover.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023 Espresso Systems (espressosys.com) 2 | // This file is part of the HyperPlonk library. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HyperPlonk library. If not, see . 6 | 7 | //! Prover subroutines for a SumCheck protocol. 8 | 9 | use super::SumCheckProver; 10 | use crate::espresso::multilinear_polynomial::fix_variables; 11 | use crate::espresso::virtual_polynomial::VirtualPolynomial; 12 | use ark_ff::{batch_inversion, PrimeField}; 13 | use ark_poly::DenseMultilinearExtension; 14 | use ark_std::{cfg_into_iter, end_timer, start_timer, vec::Vec}; 15 | use rayon::prelude::{IntoParallelIterator, IntoParallelRefIterator}; 16 | use std::sync::Arc; 17 | 18 | use super::structs::{IOPProverMessage, IOPProverState}; 19 | use subroutines::poly_iop::prelude::PolyIOPErrors; 20 | 21 | // #[cfg(feature = "parallel")] 22 | use rayon::iter::{IntoParallelRefMutIterator, ParallelIterator}; 23 | 24 | impl SumCheckProver for IOPProverState { 25 | type VirtualPolynomial = VirtualPolynomial; 26 | type ProverMessage = IOPProverMessage; 27 | 28 | /// Initialize the prover state to argue for the sum of the input polynomial 29 | /// over {0,1}^`num_vars`. 30 | fn prover_init(polynomial: &Self::VirtualPolynomial) -> Result { 31 | let start = start_timer!(|| "sum check prover init"); 32 | if polynomial.aux_info.num_variables == 0 { 33 | return Err(PolyIOPErrors::InvalidParameters( 34 | "Attempt to prove a constant.".to_string(), 35 | )); 36 | } 37 | end_timer!(start); 38 | 39 | Ok(Self { 40 | challenges: Vec::with_capacity(polynomial.aux_info.num_variables), 41 | round: 0, 42 | poly: polynomial.clone(), 43 | extrapolation_aux: (1..polynomial.aux_info.max_degree) 44 | .map(|degree| { 45 | let points = (0..1 + degree as u64).map(F::from).collect::>(); 46 | let weights = barycentric_weights(&points); 47 | (points, weights) 48 | }) 49 | .collect(), 50 | }) 51 | } 52 | 53 | /// Receive message from verifier, generate prover message, and proceed to 54 | /// next round. 55 | /// 56 | /// Main algorithm used is from section 3.2 of [XZZPS19](https://eprint.iacr.org/2019/317.pdf#subsection.3.2). 57 | fn prove_round_and_update_state( 58 | &mut self, 59 | challenge: &Option, 60 | ) -> Result { 61 | // let start = 62 | // start_timer!(|| format!("sum check prove {}-th round and update state", 63 | // self.round)); 64 | 65 | if self.round >= self.poly.aux_info.num_variables { 66 | return Err(PolyIOPErrors::InvalidProver( 67 | "Prover is not active".to_string(), 68 | )); 69 | } 70 | 71 | // let fix_argument = start_timer!(|| "fix argument"); 72 | 73 | // Step 1: 74 | // fix argument and evaluate f(x) over x_m = r; where r is the challenge 75 | // for the current round, and m is the round number, indexed from 1 76 | // 77 | // i.e.: 78 | // at round m <= n, for each mle g(x_1, ... x_n) within the flattened_mle 79 | // which has already been evaluated to 80 | // 81 | // g(r_1, ..., r_{m-1}, x_m ... x_n) 82 | // 83 | // eval g over r_m, and mutate g to g(r_1, ... r_m,, x_{m+1}... x_n) 84 | let mut flattened_ml_extensions: Vec> = self 85 | .poly 86 | .flattened_ml_extensions 87 | .par_iter() 88 | .map(|x| x.as_ref().clone()) 89 | .collect(); 90 | 91 | if let Some(chal) = challenge { 92 | if self.round == 0 { 93 | return Err(PolyIOPErrors::InvalidProver( 94 | "first round should be prover first.".to_string(), 95 | )); 96 | } 97 | self.challenges.push(*chal); 98 | 99 | let r = self.challenges[self.round - 1]; 100 | // #[cfg(feature = "parallel")] 101 | flattened_ml_extensions 102 | .par_iter_mut() 103 | .for_each(|mle| *mle = fix_variables(mle, &[r])); 104 | // #[cfg(not(feature = "parallel"))] 105 | // flattened_ml_extensions 106 | // .iter_mut() 107 | // .for_each(|mle| *mle = fix_variables(mle, &[r])); 108 | } else if self.round > 0 { 109 | return Err(PolyIOPErrors::InvalidProver( 110 | "verifier message is empty".to_string(), 111 | )); 112 | } 113 | // end_timer!(fix_argument); 114 | 115 | self.round += 1; 116 | 117 | let products_list = self.poly.products.clone(); 118 | let mut products_sum = vec![F::zero(); self.poly.aux_info.max_degree + 1]; 119 | 120 | // Step 2: generate sum for the partial evaluated polynomial: 121 | // f(r_1, ... r_m,, x_{m+1}... x_n) 122 | 123 | products_list.iter().for_each(|(coefficient, products)| { 124 | let mut sum = cfg_into_iter!(0..1 << (self.poly.aux_info.num_variables - self.round)) 125 | .fold( 126 | || { 127 | ( 128 | vec![(F::zero(), F::zero()); products.len()], 129 | vec![F::zero(); products.len() + 1], 130 | ) 131 | }, 132 | |(mut buf, mut acc), b| { 133 | buf.iter_mut() 134 | .zip(products.iter()) 135 | .for_each(|((eval, step), f)| { 136 | let table = &flattened_ml_extensions[*f]; 137 | *eval = table[b << 1]; 138 | *step = table[(b << 1) + 1] - table[b << 1]; 139 | }); 140 | acc[0] += buf.iter().map(|(eval, _)| eval).product::(); 141 | acc[1..].iter_mut().for_each(|acc| { 142 | buf.iter_mut().for_each(|(eval, step)| *eval += step as &_); 143 | *acc += buf.iter().map(|(eval, _)| eval).product::(); 144 | }); 145 | (buf, acc) 146 | }, 147 | ) 148 | .map(|(_, partial)| partial) 149 | .reduce( 150 | || vec![F::zero(); products.len() + 1], 151 | |mut sum, partial| { 152 | sum.iter_mut() 153 | .zip(partial.iter()) 154 | .for_each(|(sum, partial)| *sum += partial); 155 | sum 156 | }, 157 | ); 158 | sum.iter_mut().for_each(|sum| *sum *= coefficient); 159 | let extraploation = cfg_into_iter!(0..self.poly.aux_info.max_degree - products.len()) 160 | .map(|i| { 161 | let (points, weights) = &self.extrapolation_aux[products.len() - 1]; 162 | let at = F::from((products.len() + 1 + i) as u64); 163 | extrapolate(points, weights, &sum, &at) 164 | }) 165 | .collect::>(); 166 | products_sum 167 | .iter_mut() 168 | .zip(sum.iter().chain(extraploation.iter())) 169 | .for_each(|(products_sum, sum)| *products_sum += sum); 170 | }); 171 | 172 | // update prover's state to the partial evaluated polynomial 173 | self.poly.flattened_ml_extensions = flattened_ml_extensions 174 | .par_iter() 175 | .map(|x| Arc::new(x.clone())) 176 | .collect(); 177 | 178 | Ok(IOPProverMessage { 179 | evaluations: products_sum, 180 | }) 181 | } 182 | } 183 | 184 | fn barycentric_weights(points: &[F]) -> Vec { 185 | let mut weights = points 186 | .iter() 187 | .enumerate() 188 | .map(|(j, point_j)| { 189 | points 190 | .iter() 191 | .enumerate() 192 | .filter_map(|(i, point_i)| (i != j).then(|| *point_j - point_i)) 193 | .reduce(|acc, value| acc * value) 194 | .unwrap_or_else(F::one) 195 | }) 196 | .collect::>(); 197 | batch_inversion(&mut weights); 198 | weights 199 | } 200 | 201 | fn extrapolate(points: &[F], weights: &[F], evals: &[F], at: &F) -> F { 202 | let (coeffs, sum_inv) = { 203 | let mut coeffs = points.iter().map(|point| *at - point).collect::>(); 204 | batch_inversion(&mut coeffs); 205 | coeffs.iter_mut().zip(weights).for_each(|(coeff, weight)| { 206 | *coeff *= weight; 207 | }); 208 | let sum_inv = coeffs.iter().sum::().inverse().unwrap_or_default(); 209 | (coeffs, sum_inv) 210 | }; 211 | coeffs 212 | .iter() 213 | .zip(evals) 214 | .map(|(coeff, eval)| *coeff * eval) 215 | .sum::() 216 | * sum_inv 217 | } 218 | -------------------------------------------------------------------------------- /src/ccs/cccs.rs: -------------------------------------------------------------------------------- 1 | use ark_ec::CurveGroup; 2 | use ark_ff::PrimeField; 3 | use ark_std::One; 4 | use ark_std::Zero; 5 | use std::ops::Add; 6 | use std::sync::Arc; 7 | 8 | use ark_std::{rand::Rng, UniformRand}; 9 | 10 | use crate::ccs::ccs::{CCSError, CCS}; 11 | use crate::ccs::util::compute_sum_Mz; 12 | 13 | use crate::ccs::pedersen::{Commitment, Params as PedersenParams, Pedersen}; 14 | use crate::espresso::virtual_polynomial::VirtualPolynomial; 15 | use crate::util::hypercube::BooleanHypercube; 16 | use crate::util::mle::matrix_to_mle; 17 | use crate::util::mle::vec_to_mle; 18 | 19 | /// Witness for the LCCCS & CCCS, containing the w vector, and the r_w used as randomness in the Pedersen commitment. 20 | #[derive(Debug, Clone)] 21 | pub struct Witness { 22 | pub w: Vec, 23 | pub r_w: F, // randomness used in the Pedersen commitment of w 24 | } 25 | 26 | /// Committed CCS instance 27 | #[derive(Debug, Clone)] 28 | pub struct CCCS { 29 | // Underlying CCS structure 30 | pub ccs: CCS, 31 | 32 | // Commitment to witness 33 | pub C: Commitment, 34 | // Public input/output 35 | pub x: Vec, 36 | } 37 | 38 | impl CCS { 39 | pub fn to_cccs( 40 | &self, 41 | rng: &mut R, 42 | pedersen_params: &PedersenParams, 43 | z: &[C::ScalarField], 44 | ) -> (CCCS, Witness) { 45 | let w: Vec = z[(1 + self.l)..].to_vec(); 46 | let r_w = C::ScalarField::rand(rng); 47 | let C = Pedersen::::commit(pedersen_params, &w, &r_w); 48 | 49 | ( 50 | CCCS:: { 51 | ccs: self.clone(), 52 | C, 53 | x: z[1..(1 + self.l)].to_vec(), 54 | }, 55 | Witness:: { w, r_w }, 56 | ) 57 | } 58 | } 59 | 60 | impl CCCS { 61 | /// Computes q(x) = \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) ) 62 | /// polynomial over x 63 | pub fn compute_q(&self, z: &Vec) -> VirtualPolynomial { 64 | let z_mle = vec_to_mle(self.ccs.s_prime, z); 65 | let mut q = VirtualPolynomial::::new(self.ccs.s); 66 | 67 | for i in 0..self.ccs.q { 68 | let mut prod: VirtualPolynomial = 69 | VirtualPolynomial::::new(self.ccs.s); 70 | for j in self.ccs.S[i].clone() { 71 | let M_j = matrix_to_mle(self.ccs.M[j].clone()); 72 | 73 | let sum_Mz = compute_sum_Mz(M_j, &z_mle, self.ccs.s_prime); 74 | 75 | // Fold this sum into the running product 76 | if prod.products.is_empty() { 77 | // If this is the first time we are adding something to this virtual polynomial, we need to 78 | // explicitly add the products using add_mle_list() 79 | // XXX is this true? improve API 80 | prod.add_mle_list([Arc::new(sum_Mz)], C::ScalarField::one()) 81 | .unwrap(); 82 | } else { 83 | prod.mul_by_mle(Arc::new(sum_Mz), C::ScalarField::one()) 84 | .unwrap(); 85 | } 86 | } 87 | // Multiply by the product by the coefficient c_i 88 | prod.scalar_mul(&self.ccs.c[i]); 89 | // Add it to the running sum 90 | q = q.add(&prod); 91 | } 92 | q 93 | } 94 | 95 | /// Computes Q(x) = eq(beta, x) * q(x) 96 | /// = eq(beta, x) * \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) ) 97 | /// polynomial over x 98 | pub fn compute_Q( 99 | &self, 100 | z: &Vec, 101 | beta: &[C::ScalarField], 102 | ) -> VirtualPolynomial { 103 | let q = self.compute_q(z); 104 | q.build_f_hat(beta).unwrap() 105 | } 106 | 107 | /// Perform the check of the CCCS instance described at section 4.1 108 | pub fn check_relation( 109 | &self, 110 | pedersen_params: &PedersenParams, 111 | w: &Witness, 112 | ) -> Result<(), CCSError> { 113 | // check that C is the commitment of w. Notice that this is not verifying a Pedersen 114 | // opening, but checking that the Commmitment comes from committing to the witness. 115 | assert_eq!(self.C.0, Pedersen::commit(pedersen_params, &w.w, &w.r_w).0); 116 | 117 | // check CCCS relation 118 | let z: Vec = 119 | [vec![C::ScalarField::one()], self.x.clone(), w.w.to_vec()].concat(); 120 | 121 | // A CCCS relation is satisfied if the q(x) multivariate polynomial evaluates to zero in the hypercube 122 | let q_x = self.compute_q(&z); 123 | for x in BooleanHypercube::new(self.ccs.s) { 124 | if !q_x.evaluate(&x).unwrap().is_zero() { 125 | return Err(CCSError::NotSatisfied); 126 | } 127 | } 128 | 129 | Ok(()) 130 | } 131 | } 132 | 133 | #[cfg(test)] 134 | pub mod test { 135 | use super::*; 136 | use crate::ccs::ccs::test::{get_test_ccs, get_test_z}; 137 | use ark_std::test_rng; 138 | use ark_std::UniformRand; 139 | 140 | use ark_bls12_381::{Fr, G1Projective}; 141 | 142 | /// Do some sanity checks on q(x). It's a multivariable polynomial and it should evaluate to zero inside the 143 | /// hypercube, but to not-zero outside the hypercube. 144 | #[test] 145 | fn test_compute_q() -> () { 146 | let mut rng = test_rng(); 147 | 148 | let ccs = get_test_ccs::(); 149 | let z = get_test_z(3); 150 | 151 | let pedersen_params = Pedersen::::new_params(&mut rng, ccs.n - ccs.l - 1); 152 | let (cccs, _) = ccs.to_cccs(&mut rng, &pedersen_params, &z); 153 | let q = cccs.compute_q(&z); 154 | 155 | // Evaluate inside the hypercube 156 | for x in BooleanHypercube::new(ccs.s).into_iter() { 157 | assert_eq!(Fr::zero(), q.evaluate(&x).unwrap()); 158 | } 159 | 160 | // Evaluate outside the hypercube 161 | let beta: Vec = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect(); 162 | assert_ne!(Fr::zero(), q.evaluate(&beta).unwrap()); 163 | } 164 | 165 | /// Perform some sanity checks on Q(x). 166 | #[test] 167 | fn test_compute_Q() -> () { 168 | let mut rng = test_rng(); 169 | 170 | let ccs = get_test_ccs(); 171 | let z = get_test_z(3); 172 | ccs.check_relation(&z).unwrap(); 173 | 174 | let pedersen_params = Pedersen::::new_params(&mut rng, ccs.n - ccs.l - 1); 175 | let (cccs, _) = ccs.to_cccs(&mut rng, &pedersen_params, &z); 176 | 177 | let beta: Vec = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect(); 178 | 179 | // Compute Q(x) = eq(beta, x) * q(x). 180 | let Q = cccs.compute_Q(&z, &beta); 181 | 182 | // Let's consider the multilinear polynomial G(x) = \sum_{y \in {0, 1}^s} eq(x, y) q(y) 183 | // which interpolates the multivariate polynomial q(x) inside the hypercube. 184 | // 185 | // Observe that summing Q(x) inside the hypercube, directly computes G(\beta). 186 | // 187 | // Now, G(x) is multilinear and agrees with q(x) inside the hypercube. Since q(x) vanishes inside the 188 | // hypercube, this means that G(x) also vanishes in the hypercube. Since G(x) is multilinear and vanishes 189 | // inside the hypercube, this makes it the zero polynomial. 190 | // 191 | // Hence, evaluating G(x) at a random beta should give zero. 192 | 193 | // Now sum Q(x) evaluations in the hypercube and expect it to be 0 194 | let r = BooleanHypercube::new(ccs.s) 195 | .into_iter() 196 | .map(|x| Q.evaluate(&x).unwrap()) 197 | .fold(Fr::zero(), |acc, result| acc + result); 198 | assert_eq!(r, Fr::zero()); 199 | } 200 | 201 | /// The polynomial G(x) (see above) interpolates q(x) inside the hypercube. 202 | /// Summing Q(x) over the hypercube is equivalent to evaluating G(x) at some point. 203 | /// This test makes sure that G(x) agrees with q(x) inside the hypercube, but not outside 204 | #[test] 205 | fn test_Q_against_q() -> () { 206 | let mut rng = test_rng(); 207 | 208 | let ccs = get_test_ccs(); 209 | let z = get_test_z(3); 210 | ccs.check_relation(&z).unwrap(); 211 | 212 | let pedersen_params = Pedersen::::new_params(&mut rng, ccs.n - ccs.l - 1); 213 | let (cccs, _) = ccs.to_cccs(&mut rng, &pedersen_params, &z); 214 | 215 | // Now test that if we create Q(x) with eq(d,y) where d is inside the hypercube, \sum Q(x) should be G(d) which 216 | // should be equal to q(d), since G(x) interpolates q(x) inside the hypercube 217 | let q = cccs.compute_q(&z); 218 | for d in BooleanHypercube::new(ccs.s) { 219 | let Q_at_d = cccs.compute_Q(&z, &d); 220 | 221 | // Get G(d) by summing over Q_d(x) over the hypercube 222 | let G_at_d = BooleanHypercube::new(ccs.s) 223 | .into_iter() 224 | .map(|x| Q_at_d.evaluate(&x).unwrap()) 225 | .fold(Fr::zero(), |acc, result| acc + result); 226 | assert_eq!(G_at_d, q.evaluate(&d).unwrap()); 227 | } 228 | 229 | // Now test that they should disagree outside of the hypercube 230 | let r: Vec = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect(); 231 | let Q_at_r = cccs.compute_Q(&z, &r); 232 | 233 | // Get G(d) by summing over Q_d(x) over the hypercube 234 | let G_at_r = BooleanHypercube::new(ccs.s) 235 | .into_iter() 236 | .map(|x| Q_at_r.evaluate(&x).unwrap()) 237 | .fold(Fr::zero(), |acc, result| acc + result); 238 | assert_ne!(G_at_r, q.evaluate(&r).unwrap()); 239 | } 240 | } 241 | -------------------------------------------------------------------------------- /src/espresso/sum_check/verifier.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023 Espresso Systems (espressosys.com) 2 | // This file is part of the HyperPlonk library. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HyperPlonk library. If not, see . 6 | 7 | //! Verifier subroutines for a SumCheck protocol. 8 | 9 | use super::{SumCheckSubClaim, SumCheckVerifier}; 10 | use crate::espresso::virtual_polynomial::VPAuxInfo; 11 | use ark_ff::PrimeField; 12 | use ark_std::{end_timer, start_timer}; 13 | 14 | use super::structs::{IOPProverMessage, IOPVerifierState}; 15 | use subroutines::poly_iop::prelude::PolyIOPErrors; 16 | use transcript::IOPTranscript; 17 | 18 | #[cfg(feature = "parallel")] 19 | use rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator}; 20 | 21 | impl SumCheckVerifier for IOPVerifierState { 22 | type VPAuxInfo = VPAuxInfo; 23 | type ProverMessage = IOPProverMessage; 24 | type Challenge = F; 25 | type Transcript = IOPTranscript; 26 | type SumCheckSubClaim = SumCheckSubClaim; 27 | 28 | /// Initialize the verifier's state. 29 | fn verifier_init(index_info: &Self::VPAuxInfo) -> Self { 30 | let start = start_timer!(|| "sum check verifier init"); 31 | let res = Self { 32 | round: 1, 33 | num_vars: index_info.num_variables, 34 | max_degree: index_info.max_degree, 35 | finished: false, 36 | polynomials_received: Vec::with_capacity(index_info.num_variables), 37 | challenges: Vec::with_capacity(index_info.num_variables), 38 | }; 39 | end_timer!(start); 40 | res 41 | } 42 | 43 | /// Run verifier for the current round, given a prover message. 44 | /// 45 | /// Note that `verify_round_and_update_state` only samples and stores 46 | /// challenges; and update the verifier's state accordingly. The actual 47 | /// verifications are deferred (in batch) to `check_and_generate_subclaim` 48 | /// at the last step. 49 | fn verify_round_and_update_state( 50 | &mut self, 51 | prover_msg: &Self::ProverMessage, 52 | transcript: &mut Self::Transcript, 53 | ) -> Result { 54 | let start = 55 | start_timer!(|| format!("sum check verify {}-th round and update state", self.round)); 56 | 57 | if self.finished { 58 | return Err(PolyIOPErrors::InvalidVerifier( 59 | "Incorrect verifier state: Verifier is already finished.".to_string(), 60 | )); 61 | } 62 | 63 | // In an interactive protocol, the verifier should 64 | // 65 | // 1. check if the received 'P(0) + P(1) = expected`. 66 | // 2. set `expected` to P(r)` 67 | // 68 | // When we turn the protocol to a non-interactive one, it is sufficient to defer 69 | // such checks to `check_and_generate_subclaim` after the last round. 70 | 71 | let challenge = transcript.get_and_append_challenge(b"Internal round")?; 72 | self.challenges.push(challenge); 73 | self.polynomials_received 74 | .push(prover_msg.evaluations.to_vec()); 75 | 76 | if self.round == self.num_vars { 77 | // accept and close 78 | self.finished = true; 79 | } else { 80 | // proceed to the next round 81 | self.round += 1; 82 | } 83 | 84 | end_timer!(start); 85 | Ok(challenge) 86 | } 87 | 88 | /// This function verifies the deferred checks in the interactive version of 89 | /// the protocol; and generate the subclaim. Returns an error if the 90 | /// proof failed to verify. 91 | /// 92 | /// If the asserted sum is correct, then the multilinear polynomial 93 | /// evaluated at `subclaim.point` will be `subclaim.expected_evaluation`. 94 | /// Otherwise, it is highly unlikely that those two will be equal. 95 | /// Larger field size guarantees smaller soundness error. 96 | fn check_and_generate_subclaim( 97 | &self, 98 | asserted_sum: &F, 99 | ) -> Result { 100 | let start = start_timer!(|| "sum check check and generate subclaim"); 101 | if !self.finished { 102 | return Err(PolyIOPErrors::InvalidVerifier( 103 | "Incorrect verifier state: Verifier has not finished.".to_string(), 104 | )); 105 | } 106 | 107 | if self.polynomials_received.len() != self.num_vars { 108 | return Err(PolyIOPErrors::InvalidVerifier( 109 | "insufficient rounds".to_string(), 110 | )); 111 | } 112 | 113 | // the deferred check during the interactive phase: 114 | // 2. set `expected` to P(r)` 115 | #[cfg(feature = "parallel")] 116 | let mut expected_vec = self 117 | .polynomials_received 118 | .clone() 119 | .into_par_iter() 120 | .zip(self.challenges.clone().into_par_iter()) 121 | .map(|(evaluations, challenge)| { 122 | if evaluations.len() != self.max_degree + 1 { 123 | return Err(PolyIOPErrors::InvalidVerifier(format!( 124 | "incorrect number of evaluations: {} vs {}", 125 | evaluations.len(), 126 | self.max_degree + 1 127 | ))); 128 | } 129 | interpolate_uni_poly::(&evaluations, challenge) 130 | }) 131 | .collect::, PolyIOPErrors>>()?; 132 | 133 | #[cfg(not(feature = "parallel"))] 134 | let mut expected_vec = self 135 | .polynomials_received 136 | .clone() 137 | .into_iter() 138 | .zip(self.challenges.clone().into_iter()) 139 | .map(|(evaluations, challenge)| { 140 | if evaluations.len() != self.max_degree + 1 { 141 | return Err(PolyIOPErrors::InvalidVerifier(format!( 142 | "incorrect number of evaluations: {} vs {}", 143 | evaluations.len(), 144 | self.max_degree + 1 145 | ))); 146 | } 147 | interpolate_uni_poly::(&evaluations, challenge) 148 | }) 149 | .collect::, PolyIOPErrors>>()?; 150 | 151 | // insert the asserted_sum to the first position of the expected vector 152 | expected_vec.insert(0, *asserted_sum); 153 | 154 | for (evaluations, &expected) in self 155 | .polynomials_received 156 | .iter() 157 | .zip(expected_vec.iter()) 158 | .take(self.num_vars) 159 | { 160 | // the deferred check during the interactive phase: 161 | // 1. check if the received 'P(0) + P(1) = expected`. 162 | if evaluations[0] + evaluations[1] != expected { 163 | return Err(PolyIOPErrors::InvalidProof( 164 | "Prover message is not consistent with the claim.".to_string(), 165 | )); 166 | } 167 | } 168 | end_timer!(start); 169 | Ok(SumCheckSubClaim { 170 | point: self.challenges.clone(), 171 | // the last expected value (not checked within this function) will be included in the 172 | // subclaim 173 | expected_evaluation: expected_vec[self.num_vars], 174 | }) 175 | } 176 | } 177 | 178 | /// Interpolate a uni-variate degree-`p_i.len()-1` polynomial and evaluate this 179 | /// polynomial at `eval_at`: 180 | /// 181 | /// \sum_{i=0}^len p_i * (\prod_{j!=i} (eval_at - j)/(i-j) ) 182 | /// 183 | /// This implementation is linear in number of inputs in terms of field 184 | /// operations. It also has a quadratic term in primitive operations which is 185 | /// negligible compared to field operations. 186 | /// TODO: The quadratic term can be removed by precomputing the lagrange 187 | /// coefficients. 188 | pub fn interpolate_uni_poly(p_i: &[F], eval_at: F) -> Result { 189 | let start = start_timer!(|| "sum check interpolate uni poly opt"); 190 | 191 | let len = p_i.len(); 192 | let mut evals = vec![]; 193 | let mut prod = eval_at; 194 | evals.push(eval_at); 195 | 196 | // `prod = \prod_{j} (eval_at - j)` 197 | for e in 1..len { 198 | let tmp = eval_at - F::from(e as u64); 199 | evals.push(tmp); 200 | prod *= tmp; 201 | } 202 | let mut res = F::zero(); 203 | // we want to compute \prod (j!=i) (i-j) for a given i 204 | // 205 | // we start from the last step, which is 206 | // denom[len-1] = (len-1) * (len-2) *... * 2 * 1 207 | // the step before that is 208 | // denom[len-2] = (len-2) * (len-3) * ... * 2 * 1 * -1 209 | // and the step before that is 210 | // denom[len-3] = (len-3) * (len-4) * ... * 2 * 1 * -1 * -2 211 | // 212 | // i.e., for any i, the one before this will be derived from 213 | // denom[i-1] = denom[i] * (len-i) / i 214 | // 215 | // that is, we only need to store 216 | // - the last denom for i = len-1, and 217 | // - the ratio between current step and fhe last step, which is the product of 218 | // (len-i) / i from all previous steps and we store this product as a fraction 219 | // number to reduce field divisions. 220 | 221 | // We know 222 | // - 2^61 < factorial(20) < 2^62 223 | // - 2^122 < factorial(33) < 2^123 224 | // so we will be able to compute the ratio 225 | // - for len <= 20 with i64 226 | // - for len <= 33 with i128 227 | // - for len > 33 with BigInt 228 | if p_i.len() <= 20 { 229 | let last_denominator = F::from(u64_factorial(len - 1)); 230 | let mut ratio_numerator = 1i64; 231 | let mut ratio_denominator = 1u64; 232 | 233 | for i in (0..len).rev() { 234 | let ratio_numerator_f = if ratio_numerator < 0 { 235 | -F::from((-ratio_numerator) as u64) 236 | } else { 237 | F::from(ratio_numerator as u64) 238 | }; 239 | 240 | res += p_i[i] * prod * F::from(ratio_denominator) 241 | / (last_denominator * ratio_numerator_f * evals[i]); 242 | 243 | // compute denom for the next step is current_denom * (len-i)/i 244 | if i != 0 { 245 | ratio_numerator *= -(len as i64 - i as i64); 246 | ratio_denominator *= i as u64; 247 | } 248 | } 249 | } else if p_i.len() <= 33 { 250 | let last_denominator = F::from(u128_factorial(len - 1)); 251 | let mut ratio_numerator = 1i128; 252 | let mut ratio_denominator = 1u128; 253 | 254 | for i in (0..len).rev() { 255 | let ratio_numerator_f = if ratio_numerator < 0 { 256 | -F::from((-ratio_numerator) as u128) 257 | } else { 258 | F::from(ratio_numerator as u128) 259 | }; 260 | 261 | res += p_i[i] * prod * F::from(ratio_denominator) 262 | / (last_denominator * ratio_numerator_f * evals[i]); 263 | 264 | // compute denom for the next step is current_denom * (len-i)/i 265 | if i != 0 { 266 | ratio_numerator *= -(len as i128 - i as i128); 267 | ratio_denominator *= i as u128; 268 | } 269 | } 270 | } else { 271 | let mut denom_up = field_factorial::(len - 1); 272 | let mut denom_down = F::one(); 273 | 274 | for i in (0..len).rev() { 275 | res += p_i[i] * prod * denom_down / (denom_up * evals[i]); 276 | 277 | // compute denom for the next step is current_denom * (len-i)/i 278 | if i != 0 { 279 | denom_up *= -F::from((len - i) as u64); 280 | denom_down *= F::from(i as u64); 281 | } 282 | } 283 | } 284 | end_timer!(start); 285 | Ok(res) 286 | } 287 | 288 | /// compute the factorial(a) = 1 * 2 * ... * a 289 | #[inline] 290 | fn field_factorial(a: usize) -> F { 291 | let mut res = F::one(); 292 | for i in 2..=a { 293 | res *= F::from(i as u64); 294 | } 295 | res 296 | } 297 | 298 | /// compute the factorial(a) = 1 * 2 * ... * a 299 | #[inline] 300 | fn u128_factorial(a: usize) -> u128 { 301 | let mut res = 1u128; 302 | for i in 2..=a { 303 | res *= i as u128; 304 | } 305 | res 306 | } 307 | 308 | /// compute the factorial(a) = 1 * 2 * ... * a 309 | #[inline] 310 | fn u64_factorial(a: usize) -> u64 { 311 | let mut res = 1u64; 312 | for i in 2..=a { 313 | res *= i as u64; 314 | } 315 | res 316 | } 317 | 318 | /* 319 | #[cfg(test)] 320 | mod test { 321 | use super::interpolate_uni_poly; 322 | use crate::poly_iop::errors::PolyIOPErrors; 323 | use ark_bls12_381::Fr; 324 | use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Polynomial}; 325 | use ark_std::{vec::Vec, UniformRand}; 326 | 327 | #[test] 328 | fn test_interpolation() -> Result<(), PolyIOPErrors> { 329 | let mut prng = ark_std::test_rng(); 330 | 331 | // test a polynomial with 20 known points, i.e., with degree 19 332 | let poly = DensePolynomial::::rand(20 - 1, &mut prng); 333 | let evals = (0..20) 334 | .map(|i| poly.evaluate(&Fr::from(i))) 335 | .collect::>(); 336 | let query = Fr::rand(&mut prng); 337 | 338 | assert_eq!(poly.evaluate(&query), interpolate_uni_poly(&evals, query)?); 339 | 340 | // test a polynomial with 33 known points, i.e., with degree 32 341 | let poly = DensePolynomial::::rand(33 - 1, &mut prng); 342 | let evals = (0..33) 343 | .map(|i| poly.evaluate(&Fr::from(i))) 344 | .collect::>(); 345 | let query = Fr::rand(&mut prng); 346 | 347 | assert_eq!(poly.evaluate(&query), interpolate_uni_poly(&evals, query)?); 348 | 349 | // test a polynomial with 64 known points, i.e., with degree 63 350 | let poly = DensePolynomial::::rand(64 - 1, &mut prng); 351 | let evals = (0..64) 352 | .map(|i| poly.evaluate(&Fr::from(i))) 353 | .collect::>(); 354 | let query = Fr::rand(&mut prng); 355 | 356 | assert_eq!(poly.evaluate(&query), interpolate_uni_poly(&evals, query)?); 357 | 358 | Ok(()) 359 | } 360 | } 361 | */ 362 | -------------------------------------------------------------------------------- /src/espresso/virtual_polynomial.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023 Espresso Systems (espressosys.com) 2 | // This file is part of the HyperPlonk library. 3 | 4 | // You should have received a copy of the MIT License 5 | // along with the HyperPlonk library. If not, see . 6 | 7 | //! This module defines our main mathematical object `VirtualPolynomial`; and 8 | //! various functions associated with it. 9 | 10 | use crate::espresso::errors::ArithErrors; 11 | use ark_ff::PrimeField; 12 | use ark_poly::{DenseMultilinearExtension, MultilinearExtension}; 13 | use ark_serialize::CanonicalSerialize; 14 | use ark_std::{end_timer, start_timer}; 15 | use rayon::prelude::*; 16 | use std::{cmp::max, collections::HashMap, marker::PhantomData, ops::Add, sync::Arc}; 17 | 18 | #[rustfmt::skip] 19 | /// A virtual polynomial is a sum of products of multilinear polynomials; 20 | /// where the multilinear polynomials are stored via their multilinear 21 | /// extensions: `(coefficient, DenseMultilinearExtension)` 22 | /// 23 | /// * Number of products n = `polynomial.products.len()`, 24 | /// * Number of multiplicands of ith product m_i = 25 | /// `polynomial.products[i].1.len()`, 26 | /// * Coefficient of ith product c_i = `polynomial.products[i].0` 27 | /// 28 | /// The resulting polynomial is 29 | /// 30 | /// $$ \sum_{i=0}^{n} c_i \cdot \prod_{j=0}^{m_i} P_{ij} $$ 31 | /// 32 | /// Example: 33 | /// f = c0 * f0 * f1 * f2 + c1 * f3 * f4 34 | /// where f0 ... f4 are multilinear polynomials 35 | /// 36 | /// - flattened_ml_extensions stores the multilinear extension representation of 37 | /// f0, f1, f2, f3 and f4 38 | /// - products is 39 | /// \[ 40 | /// (c0, \[0, 1, 2\]), 41 | /// (c1, \[3, 4\]) 42 | /// \] 43 | /// - raw_pointers_lookup_table maps fi to i 44 | /// 45 | #[derive(Clone, Debug, Default, PartialEq)] 46 | pub struct VirtualPolynomial { 47 | /// Aux information about the multilinear polynomial 48 | pub aux_info: VPAuxInfo, 49 | /// list of reference to products (as usize) of multilinear extension 50 | pub products: Vec<(F, Vec)>, 51 | /// Stores multilinear extensions in which product multiplicand can refer 52 | /// to. 53 | pub flattened_ml_extensions: Vec>>, 54 | /// Pointers to the above poly extensions 55 | raw_pointers_lookup_table: HashMap<*const DenseMultilinearExtension, usize>, 56 | } 57 | 58 | #[derive(Clone, Debug, Default, PartialEq, Eq, CanonicalSerialize)] 59 | /// Auxiliary information about the multilinear polynomial 60 | pub struct VPAuxInfo { 61 | /// max number of multiplicands in each product 62 | pub max_degree: usize, 63 | /// number of variables of the polynomial 64 | pub num_variables: usize, 65 | /// Associated field 66 | #[doc(hidden)] 67 | pub phantom: PhantomData, 68 | } 69 | 70 | impl Add for &VirtualPolynomial { 71 | type Output = VirtualPolynomial; 72 | fn add(self, other: &VirtualPolynomial) -> Self::Output { 73 | let start = start_timer!(|| "virtual poly add"); 74 | let mut res = self.clone(); 75 | for products in other.products.iter() { 76 | let cur: Vec>> = products 77 | .1 78 | .iter() 79 | .map(|&x| other.flattened_ml_extensions[x].clone()) 80 | .collect(); 81 | 82 | res.add_mle_list(cur, products.0) 83 | .expect("add product failed"); 84 | } 85 | end_timer!(start); 86 | res 87 | } 88 | } 89 | 90 | // TODO: convert this into a trait 91 | impl VirtualPolynomial { 92 | /// Creates an empty virtual polynomial with `num_variables`. 93 | pub fn new(num_variables: usize) -> Self { 94 | VirtualPolynomial { 95 | aux_info: VPAuxInfo { 96 | max_degree: 0, 97 | num_variables, 98 | phantom: PhantomData::default(), 99 | }, 100 | products: Vec::new(), 101 | flattened_ml_extensions: Vec::new(), 102 | raw_pointers_lookup_table: HashMap::new(), 103 | } 104 | } 105 | 106 | /// Creates an new virtual polynomial from a MLE and its coefficient. 107 | pub fn new_from_mle(mle: &Arc>, coefficient: F) -> Self { 108 | let mle_ptr: *const DenseMultilinearExtension = Arc::as_ptr(mle); 109 | let mut hm = HashMap::new(); 110 | hm.insert(mle_ptr, 0); 111 | 112 | VirtualPolynomial { 113 | aux_info: VPAuxInfo { 114 | // The max degree is the max degree of any individual variable 115 | max_degree: 1, 116 | num_variables: mle.num_vars, 117 | phantom: PhantomData::default(), 118 | }, 119 | // here `0` points to the first polynomial of `flattened_ml_extensions` 120 | products: vec![(coefficient, vec![0])], 121 | flattened_ml_extensions: vec![mle.clone()], 122 | raw_pointers_lookup_table: hm, 123 | } 124 | } 125 | 126 | /// Add a product of list of multilinear extensions to self 127 | /// Returns an error if the list is empty, or the MLE has a different 128 | /// `num_vars` from self. 129 | /// 130 | /// The MLEs will be multiplied together, and then multiplied by the scalar 131 | /// `coefficient`. 132 | pub fn add_mle_list( 133 | &mut self, 134 | mle_list: impl IntoIterator>>, 135 | coefficient: F, 136 | ) -> Result<(), ArithErrors> { 137 | let mle_list: Vec>> = mle_list.into_iter().collect(); 138 | let mut indexed_product = Vec::with_capacity(mle_list.len()); 139 | 140 | if mle_list.is_empty() { 141 | return Err(ArithErrors::InvalidParameters( 142 | "input mle_list is empty".to_string(), 143 | )); 144 | } 145 | 146 | self.aux_info.max_degree = max(self.aux_info.max_degree, mle_list.len()); 147 | 148 | for mle in mle_list { 149 | if mle.num_vars != self.aux_info.num_variables { 150 | return Err(ArithErrors::InvalidParameters(format!( 151 | "product has a multiplicand with wrong number of variables {} vs {}", 152 | mle.num_vars, self.aux_info.num_variables 153 | ))); 154 | } 155 | 156 | let mle_ptr: *const DenseMultilinearExtension = Arc::as_ptr(&mle); 157 | if let Some(index) = self.raw_pointers_lookup_table.get(&mle_ptr) { 158 | indexed_product.push(*index) 159 | } else { 160 | let curr_index = self.flattened_ml_extensions.len(); 161 | self.flattened_ml_extensions.push(mle.clone()); 162 | self.raw_pointers_lookup_table.insert(mle_ptr, curr_index); 163 | indexed_product.push(curr_index); 164 | } 165 | } 166 | self.products.push((coefficient, indexed_product)); 167 | Ok(()) 168 | } 169 | 170 | /// Multiple the current VirtualPolynomial by an MLE: 171 | /// - add the MLE to the MLE list; 172 | /// - multiple each product by MLE and its coefficient. 173 | /// Returns an error if the MLE has a different `num_vars` from self. 174 | pub fn mul_by_mle( 175 | &mut self, 176 | mle: Arc>, 177 | coefficient: F, 178 | ) -> Result<(), ArithErrors> { 179 | let start = start_timer!(|| "mul by mle"); 180 | 181 | if mle.num_vars != self.aux_info.num_variables { 182 | return Err(ArithErrors::InvalidParameters(format!( 183 | "product has a multiplicand with wrong number of variables {} vs {}", 184 | mle.num_vars, self.aux_info.num_variables 185 | ))); 186 | } 187 | 188 | let mle_ptr: *const DenseMultilinearExtension = Arc::as_ptr(&mle); 189 | 190 | // check if this mle already exists in the virtual polynomial 191 | let mle_index = match self.raw_pointers_lookup_table.get(&mle_ptr) { 192 | Some(&p) => p, 193 | None => { 194 | self.raw_pointers_lookup_table 195 | .insert(mle_ptr, self.flattened_ml_extensions.len()); 196 | self.flattened_ml_extensions.push(mle); 197 | self.flattened_ml_extensions.len() - 1 198 | } 199 | }; 200 | 201 | for (prod_coef, indices) in self.products.iter_mut() { 202 | // - add the MLE to the MLE list; 203 | // - multiple each product by MLE and its coefficient. 204 | indices.push(mle_index); 205 | *prod_coef *= coefficient; 206 | } 207 | 208 | // increase the max degree by one as the MLE has degree 1. 209 | self.aux_info.max_degree += 1; 210 | end_timer!(start); 211 | Ok(()) 212 | } 213 | 214 | /// Given virtual polynomial `p(x)` and scalar `s`, compute `s*p(x)` 215 | pub fn scalar_mul(&mut self, s: &F) { 216 | for (prod_coef, _) in self.products.iter_mut() { 217 | *prod_coef *= s; 218 | } 219 | } 220 | 221 | /// Evaluate the virtual polynomial at point `point`. 222 | /// Returns an error is point.len() does not match `num_variables`. 223 | pub fn evaluate(&self, point: &[F]) -> Result { 224 | let start = start_timer!(|| "evaluation"); 225 | 226 | if self.aux_info.num_variables != point.len() { 227 | return Err(ArithErrors::InvalidParameters(format!( 228 | "wrong number of variables {} vs {}", 229 | self.aux_info.num_variables, 230 | point.len() 231 | ))); 232 | } 233 | 234 | // Evaluate all the MLEs at `point` 235 | let evals: Vec = self 236 | .flattened_ml_extensions 237 | .iter() 238 | .map(|x| { 239 | x.evaluate(point).unwrap() // safe unwrap here since we have 240 | // already checked that num_var 241 | // matches 242 | }) 243 | .collect(); 244 | 245 | let res = self 246 | .products 247 | .iter() 248 | .map(|(c, p)| *c * p.iter().map(|&i| evals[i]).product::()) 249 | .sum(); 250 | 251 | end_timer!(start); 252 | Ok(res) 253 | } 254 | 255 | // Input poly f(x) and a random vector r, output 256 | // \hat f(x) = \sum_{x_i \in eval_x} f(x_i) eq(x, r) 257 | // where 258 | // eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i)) 259 | // 260 | // This function is used in ZeroCheck. 261 | pub fn build_f_hat(&self, r: &[F]) -> Result { 262 | let start = start_timer!(|| "zero check build hat f"); 263 | 264 | if self.aux_info.num_variables != r.len() { 265 | return Err(ArithErrors::InvalidParameters(format!( 266 | "r.len() is different from number of variables: {} vs {}", 267 | r.len(), 268 | self.aux_info.num_variables 269 | ))); 270 | } 271 | 272 | let eq_x_r = build_eq_x_r(r)?; 273 | let mut res = self.clone(); 274 | res.mul_by_mle(eq_x_r, F::one())?; 275 | 276 | end_timer!(start); 277 | Ok(res) 278 | } 279 | } 280 | 281 | /// Evaluate eq polynomial. 282 | pub fn eq_eval(x: &[F], y: &[F]) -> Result { 283 | if x.len() != y.len() { 284 | return Err(ArithErrors::InvalidParameters( 285 | "x and y have different length".to_string(), 286 | )); 287 | } 288 | let start = start_timer!(|| "eq_eval"); 289 | let mut res = F::one(); 290 | for (&xi, &yi) in x.iter().zip(y.iter()) { 291 | let xi_yi = xi * yi; 292 | res *= xi_yi + xi_yi - xi - yi + F::one(); 293 | } 294 | end_timer!(start); 295 | Ok(res) 296 | } 297 | 298 | /// This function build the eq(x, r) polynomial for any given r. 299 | /// 300 | /// Evaluate 301 | /// eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i)) 302 | /// over r, which is 303 | /// eq(x,y) = \prod_i=1^num_var (x_i * r_i + (1-x_i)*(1-r_i)) 304 | fn build_eq_x_r(r: &[F]) -> Result>, ArithErrors> { 305 | let evals = build_eq_x_r_vec(r)?; 306 | let mle = DenseMultilinearExtension::from_evaluations_vec(r.len(), evals); 307 | 308 | Ok(Arc::new(mle)) 309 | } 310 | /// This function build the eq(x, r) polynomial for any given r, and output the 311 | /// evaluation of eq(x, r) in its vector form. 312 | /// 313 | /// Evaluate 314 | /// eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i)) 315 | /// over r, which is 316 | /// eq(x,y) = \prod_i=1^num_var (x_i * r_i + (1-x_i)*(1-r_i)) 317 | fn build_eq_x_r_vec(r: &[F]) -> Result, ArithErrors> { 318 | // we build eq(x,r) from its evaluations 319 | // we want to evaluate eq(x,r) over x \in {0, 1}^num_vars 320 | // for example, with num_vars = 4, x is a binary vector of 4, then 321 | // 0 0 0 0 -> (1-r0) * (1-r1) * (1-r2) * (1-r3) 322 | // 1 0 0 0 -> r0 * (1-r1) * (1-r2) * (1-r3) 323 | // 0 1 0 0 -> (1-r0) * r1 * (1-r2) * (1-r3) 324 | // 1 1 0 0 -> r0 * r1 * (1-r2) * (1-r3) 325 | // .... 326 | // 1 1 1 1 -> r0 * r1 * r2 * r3 327 | // we will need 2^num_var evaluations 328 | 329 | let mut eval = Vec::new(); 330 | build_eq_x_r_helper(r, &mut eval)?; 331 | 332 | Ok(eval) 333 | } 334 | 335 | /// A helper function to build eq(x, r) recursively. 336 | /// This function takes `r.len()` steps, and for each step it requires a maximum 337 | /// `r.len()-1` multiplications. 338 | fn build_eq_x_r_helper(r: &[F], buf: &mut Vec) -> Result<(), ArithErrors> { 339 | if r.is_empty() { 340 | return Err(ArithErrors::InvalidParameters("r length is 0".to_string())); 341 | } else if r.len() == 1 { 342 | // initializing the buffer with [1-r_0, r_0] 343 | buf.push(F::one() - r[0]); 344 | buf.push(r[0]); 345 | } else { 346 | build_eq_x_r_helper(&r[1..], buf)?; 347 | 348 | // suppose at the previous step we received [b_1, ..., b_k] 349 | // for the current step we will need 350 | // if x_0 = 0: (1-r0) * [b_1, ..., b_k] 351 | // if x_0 = 1: r0 * [b_1, ..., b_k] 352 | // let mut res = vec![]; 353 | // for &b_i in buf.iter() { 354 | // let tmp = r[0] * b_i; 355 | // res.push(b_i - tmp); 356 | // res.push(tmp); 357 | // } 358 | // *buf = res; 359 | 360 | let mut res = vec![F::zero(); buf.len() << 1]; 361 | res.par_iter_mut().enumerate().for_each(|(i, val)| { 362 | let bi = buf[i >> 1]; 363 | let tmp = r[0] * bi; 364 | if i & 1 == 0 { 365 | *val = bi - tmp; 366 | } else { 367 | *val = tmp; 368 | } 369 | }); 370 | *buf = res; 371 | } 372 | 373 | Ok(()) 374 | } 375 | 376 | /// Decompose an integer into a binary vector in little endian. 377 | pub fn bit_decompose(input: u64, num_var: usize) -> Vec { 378 | let mut res = Vec::with_capacity(num_var); 379 | let mut i = input; 380 | for _ in 0..num_var { 381 | res.push(i & 1 == 1); 382 | i >>= 1; 383 | } 384 | res 385 | } 386 | 387 | #[cfg(test)] 388 | mod test { 389 | use super::*; 390 | use crate::espresso::multilinear_polynomial::testing_code::random_mle_list; 391 | use ark_bls12_381::Fr; 392 | use ark_ff::UniformRand; 393 | use ark_std::{ 394 | rand::{Rng, RngCore}, 395 | test_rng, 396 | }; 397 | 398 | impl VirtualPolynomial { 399 | /// Sample a random virtual polynomial, return the polynomial and its sum. 400 | fn rand( 401 | nv: usize, 402 | num_multiplicands_range: (usize, usize), 403 | num_products: usize, 404 | rng: &mut R, 405 | ) -> Result<(Self, F), ArithErrors> { 406 | let start = start_timer!(|| "sample random virtual polynomial"); 407 | 408 | let mut sum = F::zero(); 409 | let mut poly = VirtualPolynomial::new(nv); 410 | for _ in 0..num_products { 411 | let num_multiplicands = 412 | rng.gen_range(num_multiplicands_range.0..num_multiplicands_range.1); 413 | let (product, product_sum) = random_mle_list(nv, num_multiplicands, rng); 414 | let coefficient = F::rand(rng); 415 | poly.add_mle_list(product.into_iter(), coefficient)?; 416 | sum += product_sum * coefficient; 417 | } 418 | 419 | end_timer!(start); 420 | Ok((poly, sum)) 421 | } 422 | } 423 | 424 | #[test] 425 | fn test_virtual_polynomial_additions() -> Result<(), ArithErrors> { 426 | let mut rng = test_rng(); 427 | for nv in 2..5 { 428 | for num_products in 2..5 { 429 | let base: Vec = (0..nv).map(|_| Fr::rand(&mut rng)).collect(); 430 | 431 | let (a, _a_sum) = 432 | VirtualPolynomial::::rand(nv, (2, 3), num_products, &mut rng)?; 433 | let (b, _b_sum) = 434 | VirtualPolynomial::::rand(nv, (2, 3), num_products, &mut rng)?; 435 | let c = &a + &b; 436 | 437 | assert_eq!( 438 | a.evaluate(base.as_ref())? + b.evaluate(base.as_ref())?, 439 | c.evaluate(base.as_ref())? 440 | ); 441 | } 442 | } 443 | 444 | Ok(()) 445 | } 446 | 447 | #[test] 448 | fn test_virtual_polynomial_mul_by_mle() -> Result<(), ArithErrors> { 449 | let mut rng = test_rng(); 450 | for nv in 2..5 { 451 | for num_products in 2..5 { 452 | let base: Vec = (0..nv).map(|_| Fr::rand(&mut rng)).collect(); 453 | 454 | let (a, _a_sum) = 455 | VirtualPolynomial::::rand(nv, (2, 3), num_products, &mut rng)?; 456 | let (b, _b_sum) = random_mle_list(nv, 1, &mut rng); 457 | let b_mle = b[0].clone(); 458 | let coeff = Fr::rand(&mut rng); 459 | let b_vp = VirtualPolynomial::new_from_mle(&b_mle, coeff); 460 | 461 | let mut c = a.clone(); 462 | 463 | c.mul_by_mle(b_mle, coeff)?; 464 | 465 | assert_eq!( 466 | a.evaluate(base.as_ref())? * b_vp.evaluate(base.as_ref())?, 467 | c.evaluate(base.as_ref())? 468 | ); 469 | } 470 | } 471 | 472 | Ok(()) 473 | } 474 | 475 | #[test] 476 | fn test_eq_xr() { 477 | let mut rng = test_rng(); 478 | for nv in 4..10 { 479 | let r: Vec = (0..nv).map(|_| Fr::rand(&mut rng)).collect(); 480 | let eq_x_r = build_eq_x_r(r.as_ref()).unwrap(); 481 | let eq_x_r2 = build_eq_x_r_for_test(r.as_ref()); 482 | assert_eq!(eq_x_r, eq_x_r2); 483 | } 484 | } 485 | 486 | /// Naive method to build eq(x, r). 487 | /// Only used for testing purpose. 488 | // Evaluate 489 | // eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i)) 490 | // over r, which is 491 | // eq(x,y) = \prod_i=1^num_var (x_i * r_i + (1-x_i)*(1-r_i)) 492 | fn build_eq_x_r_for_test(r: &[F]) -> Arc> { 493 | // we build eq(x,r) from its evaluations 494 | // we want to evaluate eq(x,r) over x \in {0, 1}^num_vars 495 | // for example, with num_vars = 4, x is a binary vector of 4, then 496 | // 0 0 0 0 -> (1-r0) * (1-r1) * (1-r2) * (1-r3) 497 | // 1 0 0 0 -> r0 * (1-r1) * (1-r2) * (1-r3) 498 | // 0 1 0 0 -> (1-r0) * r1 * (1-r2) * (1-r3) 499 | // 1 1 0 0 -> r0 * r1 * (1-r2) * (1-r3) 500 | // .... 501 | // 1 1 1 1 -> r0 * r1 * r2 * r3 502 | // we will need 2^num_var evaluations 503 | 504 | // First, we build array for {1 - r_i} 505 | let one_minus_r: Vec = r.iter().map(|ri| F::one() - ri).collect(); 506 | 507 | let num_var = r.len(); 508 | let mut eval = vec![]; 509 | 510 | for i in 0..1 << num_var { 511 | let mut current_eval = F::one(); 512 | let bit_sequence = bit_decompose(i, num_var); 513 | 514 | for (&bit, (ri, one_minus_ri)) in 515 | bit_sequence.iter().zip(r.iter().zip(one_minus_r.iter())) 516 | { 517 | current_eval *= if bit { *ri } else { *one_minus_ri }; 518 | } 519 | eval.push(current_eval); 520 | } 521 | 522 | let mle = DenseMultilinearExtension::from_evaluations_vec(num_var, eval); 523 | 524 | Arc::new(mle) 525 | } 526 | } 527 | -------------------------------------------------------------------------------- /src/multifolding.rs: -------------------------------------------------------------------------------- 1 | use ark_ec::CurveGroup; 2 | use ark_ff::Field; 3 | use ark_std::{One, Zero}; 4 | use std::ops::Add; 5 | 6 | use subroutines::PolyIOP; 7 | use transcript::IOPTranscript; 8 | 9 | use crate::ccs::cccs::{Witness, CCCS}; 10 | use crate::ccs::ccs::CCS; 11 | use crate::ccs::lcccs::LCCCS; 12 | use crate::ccs::pedersen::Commitment; 13 | use crate::ccs::util::compute_all_sum_Mz_evals; 14 | use crate::espresso::sum_check::structs::IOPProof as SumCheckProof; 15 | use crate::espresso::sum_check::{verifier::interpolate_uni_poly, SumCheck}; 16 | use crate::espresso::virtual_polynomial::{eq_eval, VPAuxInfo, VirtualPolynomial}; 17 | use crate::util::hypercube::BooleanHypercube; 18 | 19 | use std::marker::PhantomData; 20 | 21 | /// Proof defines a multifolding proof 22 | #[derive(Debug)] 23 | pub struct Proof { 24 | pub sc_proof: SumCheckProof, 25 | pub sigmas: Vec>, 26 | pub thetas: Vec>, 27 | } 28 | 29 | #[derive(Debug)] 30 | pub struct Multifolding { 31 | pub _c: PhantomData, 32 | } 33 | 34 | impl Multifolding { 35 | /// Compute the arrays of sigma_i and theta_i from step 4 corresponding to the LCCCS and CCCS 36 | /// instances 37 | pub fn compute_sigmas_and_thetas( 38 | ccs: &CCS, 39 | z_lcccs: &[Vec], 40 | z_cccs: &[Vec], 41 | r_x_prime: &[C::ScalarField], 42 | ) -> (Vec>, Vec>) { 43 | let mut sigmas: Vec> = Vec::new(); 44 | for z_lcccs_i in z_lcccs { 45 | // sigmas 46 | let sigma_i = compute_all_sum_Mz_evals(&ccs.M, z_lcccs_i, r_x_prime, ccs.s_prime); 47 | sigmas.push(sigma_i); 48 | } 49 | let mut thetas: Vec> = Vec::new(); 50 | for z_cccs_i in z_cccs { 51 | // thetas 52 | let theta_i = compute_all_sum_Mz_evals(&ccs.M, z_cccs_i, r_x_prime, ccs.s_prime); 53 | thetas.push(theta_i); 54 | } 55 | (sigmas, thetas) 56 | } 57 | 58 | /// Compute the right-hand-side of step 5 of the multifolding scheme 59 | pub fn compute_c_from_sigmas_and_thetas( 60 | ccs: &CCS, 61 | vec_sigmas: &[Vec], 62 | vec_thetas: &[Vec], 63 | gamma: C::ScalarField, 64 | beta: &[C::ScalarField], 65 | vec_r_x: &Vec>, 66 | r_x_prime: &[C::ScalarField], 67 | ) -> C::ScalarField { 68 | let mut c = C::ScalarField::zero(); 69 | 70 | let mut e_lcccs = Vec::new(); 71 | for r_x in vec_r_x { 72 | e_lcccs.push(eq_eval(r_x, r_x_prime).unwrap()); 73 | } 74 | for (i, sigmas) in vec_sigmas.iter().enumerate() { 75 | // (sum gamma^j * e_i * sigma_j) 76 | for (j, sigma_j) in sigmas.iter().enumerate() { 77 | let gamma_j = gamma.pow([(i * ccs.t + j) as u64]); 78 | c += gamma_j * e_lcccs[i] * sigma_j; 79 | } 80 | } 81 | 82 | let mu = vec_sigmas.len(); 83 | let e2 = eq_eval(beta, r_x_prime).unwrap(); 84 | for (k, thetas) in vec_thetas.iter().enumerate() { 85 | // + gamma^{t+1} * e2 * sum c_i * prod theta_j 86 | let mut lhs = C::ScalarField::zero(); 87 | for i in 0..ccs.q { 88 | let mut prod = C::ScalarField::one(); 89 | for j in ccs.S[i].clone() { 90 | prod *= thetas[j]; 91 | } 92 | lhs += ccs.c[i] * prod; 93 | } 94 | let gamma_t1 = gamma.pow([(mu * ccs.t + k) as u64]); 95 | c += gamma_t1 * e2 * lhs; 96 | } 97 | c 98 | } 99 | 100 | /// Compute g(x) polynomial for the given inputs. 101 | pub fn compute_g( 102 | running_instances: &[LCCCS], 103 | cccs_instances: &[CCCS], 104 | z_lcccs: &[Vec], 105 | z_cccs: &[Vec], 106 | gamma: C::ScalarField, 107 | beta: &[C::ScalarField], 108 | ) -> VirtualPolynomial { 109 | let mu = running_instances.len(); 110 | let mut vec_Ls: Vec> = Vec::new(); 111 | for (i, running_instance) in running_instances.iter().enumerate() { 112 | let mut Ls = running_instance.compute_Ls(&z_lcccs[i]); 113 | vec_Ls.append(&mut Ls); 114 | } 115 | let mut vec_Q: Vec> = Vec::new(); 116 | for (i, cccs_instance) in cccs_instances.iter().enumerate() { 117 | let Q = cccs_instance.compute_Q(&z_cccs[i], beta); 118 | vec_Q.push(Q); 119 | } 120 | let mut g = vec_Ls[0].clone(); 121 | 122 | // note: the following two loops can be integrated in the previous two loops, but left 123 | // separated for clarity in the PoC implementation. 124 | for (j, L_j) in vec_Ls.iter_mut().enumerate().skip(1) { 125 | let gamma_j = gamma.pow([j as u64]); 126 | L_j.scalar_mul(&gamma_j); 127 | g = g.add(L_j); 128 | } 129 | for (i, Q_i) in vec_Q.iter_mut().enumerate() { 130 | let gamma_mut_i = gamma.pow([(mu * cccs_instances[0].ccs.t + i) as u64]); 131 | Q_i.scalar_mul(&gamma_mut_i); 132 | g = g.add(Q_i); 133 | } 134 | g 135 | } 136 | 137 | pub fn fold( 138 | lcccs: &[LCCCS], 139 | cccs: &[CCCS], 140 | sigmas: &[Vec], 141 | thetas: &[Vec], 142 | r_x_prime: Vec, 143 | rho: C::ScalarField, 144 | ) -> LCCCS { 145 | let mut C_folded = C::zero(); 146 | let mut u_folded = C::ScalarField::zero(); 147 | let mut x_folded: Vec = vec![C::ScalarField::zero(); lcccs[0].x.len()]; 148 | let mut v_folded: Vec = vec![C::ScalarField::zero(); sigmas[0].len()]; 149 | 150 | for i in 0..(lcccs.len() + cccs.len()) { 151 | let rho_i = rho.pow([i as u64]); 152 | 153 | let c: C; 154 | let u: C::ScalarField; 155 | let x: Vec; 156 | let v: Vec; 157 | if i < lcccs.len() { 158 | c = lcccs[i].C.0; 159 | u = lcccs[i].u; 160 | x = lcccs[i].x.clone(); 161 | v = sigmas[i].clone(); 162 | } else { 163 | c = cccs[i - lcccs.len()].C.0; 164 | u = C::ScalarField::one(); 165 | x = cccs[i - lcccs.len()].x.clone(); 166 | v = thetas[i - lcccs.len()].clone(); 167 | } 168 | 169 | C_folded += c.mul(rho_i); 170 | u_folded += rho_i * u; 171 | x_folded = x_folded 172 | .iter() 173 | .zip( 174 | x.iter() 175 | .map(|x_i| *x_i * rho_i) 176 | .collect::>(), 177 | ) 178 | .map(|(a_i, b_i)| *a_i + b_i) 179 | .collect(); 180 | 181 | v_folded = v_folded 182 | .iter() 183 | .zip( 184 | v.iter() 185 | .map(|x_i| *x_i * rho_i) 186 | .collect::>(), 187 | ) 188 | .map(|(a_i, b_i)| *a_i + b_i) 189 | .collect(); 190 | } 191 | 192 | LCCCS:: { 193 | C: Commitment(C_folded), 194 | ccs: lcccs[0].ccs.clone(), 195 | u: u_folded, 196 | x: x_folded, 197 | r_x: r_x_prime, 198 | v: v_folded, 199 | } 200 | } 201 | 202 | pub fn fold_witness( 203 | w_lcccs: &[Witness], 204 | w_cccs: &[Witness], 205 | rho: C::ScalarField, 206 | ) -> Witness { 207 | let mut w_folded: Vec = vec![C::ScalarField::zero(); w_lcccs[0].w.len()]; 208 | let mut r_w_folded = C::ScalarField::zero(); 209 | 210 | for i in 0..(w_lcccs.len() + w_cccs.len()) { 211 | let rho_i = rho.pow([i as u64]); 212 | let w: Vec; 213 | let r_w: C::ScalarField; 214 | 215 | if i < w_lcccs.len() { 216 | w = w_lcccs[i].w.clone(); 217 | r_w = w_lcccs[i].r_w; 218 | } else { 219 | w = w_cccs[i - w_lcccs.len()].w.clone(); 220 | r_w = w_cccs[i - w_lcccs.len()].r_w; 221 | } 222 | 223 | w_folded = w_folded 224 | .iter() 225 | .zip( 226 | w.iter() 227 | .map(|x_i| *x_i * rho_i) 228 | .collect::>(), 229 | ) 230 | .map(|(a_i, b_i)| *a_i + b_i) 231 | .collect(); 232 | 233 | r_w_folded += rho_i * r_w; 234 | } 235 | Witness { 236 | w: w_folded, 237 | r_w: r_w_folded, 238 | } 239 | } 240 | 241 | /// Perform the multifolding prover. 242 | /// 243 | /// Given μ LCCCS instances and ν CCS instances, fold them into a single LCCCS instance. Since 244 | /// this is the prover, also fold their witness. 245 | /// 246 | /// Return the final folded LCCCS, the folded witness, the sumcheck proof, and the helper 247 | /// sumcheck claims sigmas and thetas. 248 | pub fn prove( 249 | transcript: &mut IOPTranscript, 250 | running_instances: &[LCCCS], 251 | new_instances: &[CCCS], 252 | w_lcccs: &[Witness], 253 | w_cccs: &[Witness], 254 | ) -> (Proof, LCCCS, Witness) { 255 | // TODO appends to transcript 256 | 257 | assert!(!running_instances.is_empty()); 258 | assert!(!new_instances.is_empty()); 259 | 260 | // construct the LCCCS z vector from the relaxation factor, public IO and witness 261 | // XXX this deserves its own function in LCCCS 262 | let mut z_lcccs = Vec::new(); 263 | for (i, running_instance) in running_instances.iter().enumerate() { 264 | let z_1: Vec = [ 265 | vec![running_instance.u], 266 | running_instance.x.clone(), 267 | w_lcccs[i].w.to_vec(), 268 | ] 269 | .concat(); 270 | z_lcccs.push(z_1); 271 | } 272 | // construct the CCCS z vector from the public IO and witness 273 | let mut z_cccs = Vec::new(); 274 | for (i, new_instance) in new_instances.iter().enumerate() { 275 | let z_2: Vec = [ 276 | vec![C::ScalarField::one()], 277 | new_instance.x.clone(), 278 | w_cccs[i].w.to_vec(), 279 | ] 280 | .concat(); 281 | z_cccs.push(z_2); 282 | } 283 | 284 | // Step 1: Get some challenges 285 | let gamma: C::ScalarField = transcript.get_and_append_challenge(b"gamma").unwrap(); 286 | let beta: Vec = transcript 287 | .get_and_append_challenge_vectors(b"beta", running_instances[0].ccs.s) 288 | .unwrap(); 289 | 290 | // Compute g(x) 291 | let g = Self::compute_g( 292 | running_instances, 293 | new_instances, 294 | &z_lcccs, 295 | &z_cccs, 296 | gamma, 297 | &beta, 298 | ); 299 | 300 | // Step 3: Run the sumcheck prover 301 | let sumcheck_proof = 302 | as SumCheck>::prove(&g, transcript).unwrap(); // XXX unwrap 303 | 304 | // Note: The following two "sanity checks" are done for this prototype, in a final version 305 | // they should be removed. 306 | // 307 | // Sanity check 1: evaluate g(x) over x \in {0,1} (the boolean hypercube), and check that 308 | // its sum is equal to the extracted_sum from the SumCheck. 309 | ////////////////////////////////////////////////////////////////////// 310 | let mut g_over_bhc = C::ScalarField::zero(); 311 | for x in BooleanHypercube::new(running_instances[0].ccs.s) { 312 | g_over_bhc += g.evaluate(&x).unwrap(); 313 | } 314 | 315 | // note: this is the sum of g(x) over the whole boolean hypercube 316 | let extracted_sum = 317 | as SumCheck>::extract_sum(&sumcheck_proof); 318 | assert_eq!(extracted_sum, g_over_bhc); 319 | // Sanity check 2: expect \sum v_j * gamma^j to be equal to the sum of g(x) over the 320 | // boolean hypercube (and also equal to the extracted_sum from the SumCheck). 321 | let mut sum_v_j_gamma = C::ScalarField::zero(); 322 | for (i, running_instance) in running_instances.iter().enumerate() { 323 | for j in 0..running_instance.v.len() { 324 | let gamma_j = gamma.pow([(i * running_instances[0].ccs.t + j) as u64]); 325 | sum_v_j_gamma += running_instance.v[j] * gamma_j; 326 | } 327 | } 328 | assert_eq!(g_over_bhc, sum_v_j_gamma); 329 | assert_eq!(extracted_sum, sum_v_j_gamma); 330 | ////////////////////////////////////////////////////////////////////// 331 | 332 | // Step 2: dig into the sumcheck and extract r_x_prime 333 | let r_x_prime = sumcheck_proof.point.clone(); 334 | 335 | // Step 4: compute sigmas and thetas 336 | let (sigmas, thetas) = Self::compute_sigmas_and_thetas( 337 | &running_instances[0].ccs, 338 | &z_lcccs, 339 | &z_cccs, 340 | &r_x_prime, 341 | ); 342 | 343 | // Step 6: Get the folding challenge 344 | let rho: C::ScalarField = transcript.get_and_append_challenge(b"rho").unwrap(); 345 | 346 | // Step 7: Create the folded instance 347 | let folded_lcccs = Self::fold( 348 | running_instances, 349 | new_instances, 350 | &sigmas, 351 | &thetas, 352 | r_x_prime, 353 | rho, 354 | ); 355 | 356 | // Step 8: Fold the witnesses 357 | let folded_witness = Self::fold_witness(w_lcccs, w_cccs, rho); 358 | 359 | ( 360 | Proof:: { 361 | sc_proof: sumcheck_proof, 362 | sigmas, 363 | thetas, 364 | }, 365 | folded_lcccs, 366 | folded_witness, 367 | ) 368 | } 369 | 370 | /// Perform the multifolding verifier: 371 | /// 372 | /// Given μ LCCCS instances and ν CCS instances, fold them into a single LCCCS instance. 373 | /// 374 | /// Return the folded LCCCS instance. 375 | pub fn verify( 376 | transcript: &mut IOPTranscript, 377 | running_instances: &[LCCCS], 378 | new_instances: &[CCCS], 379 | proof: Proof, 380 | ) -> LCCCS { 381 | // TODO appends to transcript 382 | 383 | assert!(!running_instances.is_empty()); 384 | assert!(!new_instances.is_empty()); 385 | 386 | // Step 1: Get some challenges 387 | let gamma: C::ScalarField = transcript.get_and_append_challenge(b"gamma").unwrap(); 388 | let beta: Vec = transcript 389 | .get_and_append_challenge_vectors(b"beta", running_instances[0].ccs.s) 390 | .unwrap(); 391 | 392 | let vp_aux_info = VPAuxInfo:: { 393 | max_degree: running_instances[0].ccs.d + 1, 394 | num_variables: running_instances[0].ccs.s, 395 | phantom: PhantomData::, 396 | }; 397 | 398 | // Step 3: Start verifying the sumcheck 399 | // First, compute the expected sumcheck sum: \sum gamma^j v_j 400 | let mut sum_v_j_gamma = C::ScalarField::zero(); 401 | for (i, running_instance) in running_instances.iter().enumerate() { 402 | for j in 0..running_instance.v.len() { 403 | let gamma_j = gamma.pow([(i * running_instances[0].ccs.t + j) as u64]); 404 | sum_v_j_gamma += running_instance.v[j] * gamma_j; 405 | } 406 | } 407 | 408 | // Verify the interactive part of the sumcheck 409 | let sumcheck_subclaim = as SumCheck>::verify( 410 | sum_v_j_gamma, 411 | &proof.sc_proof, 412 | &vp_aux_info, 413 | transcript, 414 | ) 415 | .unwrap(); 416 | 417 | // Step 2: Dig into the sumcheck claim and extract the randomness used 418 | let r_x_prime = sumcheck_subclaim.point.clone(); 419 | 420 | // Step 5: Finish verifying sumcheck (verify the claim c) 421 | let c = Self::compute_c_from_sigmas_and_thetas( 422 | &new_instances[0].ccs, 423 | &proof.sigmas, 424 | &proof.thetas, 425 | gamma, 426 | &beta, 427 | &running_instances 428 | .iter() 429 | .map(|lcccs| lcccs.r_x.clone()) 430 | .collect(), 431 | &r_x_prime, 432 | ); 433 | // check that the g(r_x') from the sumcheck proof is equal to the computed c from sigmas&thetas 434 | assert_eq!(c, sumcheck_subclaim.expected_evaluation); 435 | 436 | // Sanity check: we can also compute g(r_x') from the proof last evaluation value, and 437 | // should be equal to the previously obtained values. 438 | let g_on_rxprime_from_sumcheck_last_eval = interpolate_uni_poly::( 439 | &proof.sc_proof.proofs.last().unwrap().evaluations, 440 | *r_x_prime.last().unwrap(), 441 | ) 442 | .unwrap(); 443 | assert_eq!(g_on_rxprime_from_sumcheck_last_eval, c); 444 | assert_eq!( 445 | g_on_rxprime_from_sumcheck_last_eval, 446 | sumcheck_subclaim.expected_evaluation 447 | ); 448 | 449 | // Step 6: Get the folding challenge 450 | let rho: C::ScalarField = transcript.get_and_append_challenge(b"rho").unwrap(); 451 | 452 | // Step 7: Compute the folded instance 453 | Self::fold( 454 | running_instances, 455 | new_instances, 456 | &proof.sigmas, 457 | &proof.thetas, 458 | r_x_prime, 459 | rho, 460 | ) 461 | } 462 | } 463 | 464 | #[cfg(test)] 465 | pub mod test { 466 | use super::*; 467 | use crate::ccs::ccs::test::{get_test_ccs, get_test_z}; 468 | use ark_std::test_rng; 469 | use ark_std::UniformRand; 470 | 471 | use crate::ccs::pedersen::Pedersen; 472 | use ark_bls12_381::{Fr, G1Projective}; 473 | 474 | // NIMFS: Non Interactive Multifolding Scheme 475 | type NIMFS = Multifolding; 476 | 477 | #[test] 478 | fn test_compute_sigmas_and_thetas() -> () { 479 | let ccs = get_test_ccs(); 480 | let z1 = get_test_z(3); 481 | let z2 = get_test_z(4); 482 | ccs.check_relation(&z1).unwrap(); 483 | ccs.check_relation(&z2).unwrap(); 484 | 485 | let mut rng = test_rng(); 486 | let gamma: Fr = Fr::rand(&mut rng); 487 | let beta: Vec = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect(); 488 | let r_x_prime: Vec = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect(); 489 | 490 | // Initialize a multifolding object 491 | let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1); 492 | let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1); 493 | let (cccs_instance, _) = ccs.to_cccs(&mut rng, &pedersen_params, &z2); 494 | 495 | let (sigmas, thetas) = NIMFS::compute_sigmas_and_thetas( 496 | &lcccs_instance.ccs, 497 | &vec![z1.clone()], 498 | &vec![z2.clone()], 499 | &r_x_prime, 500 | ); 501 | 502 | let g = NIMFS::compute_g( 503 | &vec![lcccs_instance.clone()], 504 | &vec![cccs_instance.clone()], 505 | &vec![z1.clone()], 506 | &vec![z2.clone()], 507 | gamma, 508 | &beta, 509 | ); 510 | 511 | // we expect g(r_x_prime) to be equal to: 512 | // c = (sum gamma^j * e1 * sigma_j) + gamma^{t+1} * e2 * sum c_i * prod theta_j 513 | // from compute_c_from_sigmas_and_thetas 514 | let expected_c = g.evaluate(&r_x_prime).unwrap(); 515 | let c = NIMFS::compute_c_from_sigmas_and_thetas( 516 | &ccs, 517 | &sigmas, 518 | &thetas, 519 | gamma, 520 | &beta, 521 | &vec![lcccs_instance.r_x], 522 | &r_x_prime, 523 | ); 524 | assert_eq!(c, expected_c); 525 | } 526 | 527 | #[test] 528 | fn test_compute_g() -> () { 529 | let ccs = get_test_ccs(); 530 | let z1 = get_test_z(3); 531 | let z2 = get_test_z(4); 532 | ccs.check_relation(&z1).unwrap(); 533 | ccs.check_relation(&z2).unwrap(); 534 | 535 | let mut rng = test_rng(); // TMP 536 | let gamma: Fr = Fr::rand(&mut rng); 537 | let beta: Vec = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect(); 538 | 539 | // Initialize a multifolding object 540 | let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1); 541 | let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1); 542 | let (cccs_instance, _) = ccs.to_cccs(&mut rng, &pedersen_params, &z2); 543 | 544 | let mut sum_v_j_gamma = Fr::zero(); 545 | for j in 0..lcccs_instance.v.len() { 546 | let gamma_j = gamma.pow([j as u64]); 547 | sum_v_j_gamma += lcccs_instance.v[j] * gamma_j; 548 | } 549 | 550 | // Compute g(x) with that r_x 551 | let g = NIMFS::compute_g( 552 | &vec![lcccs_instance.clone()], 553 | &vec![cccs_instance.clone()], 554 | &vec![z1.clone()], 555 | &vec![z2.clone()], 556 | gamma, 557 | &beta, 558 | ); 559 | 560 | // evaluate g(x) over x \in {0,1}^s 561 | let mut g_on_bhc = Fr::zero(); 562 | for x in BooleanHypercube::new(ccs.s).into_iter() { 563 | g_on_bhc += g.evaluate(&x).unwrap(); 564 | } 565 | 566 | // evaluate sum_{j \in [t]} (gamma^j * Lj(x)) over x \in {0,1}^s 567 | let mut sum_Lj_on_bhc = Fr::zero(); 568 | let vec_L = lcccs_instance.compute_Ls(&z1); 569 | for x in BooleanHypercube::new(ccs.s).into_iter() { 570 | for j in 0..vec_L.len() { 571 | let gamma_j = gamma.pow([j as u64]); 572 | sum_Lj_on_bhc += vec_L[j].evaluate(&x).unwrap() * gamma_j; 573 | } 574 | } 575 | 576 | // Q(x) over bhc is assumed to be zero, as checked in the test 'test_compute_Q' 577 | assert_ne!(g_on_bhc, Fr::zero()); 578 | 579 | // evaluating g(x) over the boolean hypercube should give the same result as evaluating the 580 | // sum of gamma^j * Lj(x) over the boolean hypercube 581 | assert_eq!(g_on_bhc, sum_Lj_on_bhc); 582 | 583 | // evaluating g(x) over the boolean hypercube should give the same result as evaluating the 584 | // sum of gamma^j * v_j over j \in [t] 585 | assert_eq!(g_on_bhc, sum_v_j_gamma); 586 | } 587 | 588 | #[test] 589 | fn test_fold() -> () { 590 | let ccs = get_test_ccs(); 591 | let z1 = get_test_z(3); 592 | let z2 = get_test_z(4); 593 | ccs.check_relation(&z1).unwrap(); 594 | ccs.check_relation(&z2).unwrap(); 595 | 596 | let mut rng = test_rng(); 597 | let r_x_prime: Vec = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect(); 598 | 599 | // Initialize a multifolding object 600 | let pedersen_params = Pedersen::::new_params(&mut rng, ccs.n - ccs.l - 1); 601 | let (running_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1); 602 | 603 | let (sigmas, thetas) = Multifolding::::compute_sigmas_and_thetas( 604 | &running_instance.ccs, 605 | &vec![z1.clone()], 606 | &vec![z2.clone()], 607 | &r_x_prime, 608 | ); 609 | 610 | let pedersen_params = Pedersen::::new_params(&mut rng, ccs.n - ccs.l - 1); 611 | 612 | let (lcccs, w1) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1); 613 | let (cccs, w2) = ccs.to_cccs(&mut rng, &pedersen_params, &z2); 614 | 615 | lcccs.check_relation(&pedersen_params, &w1).unwrap(); 616 | cccs.check_relation(&pedersen_params, &w2).unwrap(); 617 | 618 | let mut rng = test_rng(); 619 | let rho = Fr::rand(&mut rng); 620 | 621 | let folded = Multifolding::::fold( 622 | &vec![lcccs], 623 | &vec![cccs], 624 | &sigmas, 625 | &thetas, 626 | r_x_prime, 627 | rho, 628 | ); 629 | 630 | let w_folded = Multifolding::::fold_witness(&vec![w1], &vec![w2], rho); 631 | 632 | // check lcccs relation 633 | folded.check_relation(&pedersen_params, &w_folded).unwrap(); 634 | } 635 | 636 | /// Perform multifolding of an LCCCS instance with a CCCS instance (as described in the paper) 637 | #[test] 638 | pub fn test_basic_multifolding() { 639 | let mut rng = test_rng(); 640 | 641 | // Create a basic CCS circuit 642 | let ccs = get_test_ccs::(); 643 | let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1); 644 | 645 | // Generate a satisfying witness 646 | let z_1 = get_test_z(3); 647 | // Generate another satisfying witness 648 | let z_2 = get_test_z(4); 649 | 650 | // Create the LCCCS instance out of z_1 651 | let (running_instance, w1) = ccs.to_lcccs(&mut rng, &pedersen_params, &z_1); 652 | // Create the CCCS instance out of z_2 653 | let (new_instance, w2) = ccs.to_cccs(&mut rng, &pedersen_params, &z_2); 654 | 655 | // Prover's transcript 656 | let mut transcript_p = IOPTranscript::::new(b"multifolding"); 657 | transcript_p.append_message(b"init", b"init").unwrap(); 658 | 659 | // Run the prover side of the multifolding 660 | let (proof, folded_lcccs, folded_witness) = NIMFS::prove( 661 | &mut transcript_p, 662 | &vec![running_instance.clone()], 663 | &vec![new_instance.clone()], 664 | &vec![w1], 665 | &vec![w2], 666 | ); 667 | 668 | // Verifier's transcript 669 | let mut transcript_v = IOPTranscript::::new(b"multifolding"); 670 | transcript_v.append_message(b"init", b"init").unwrap(); 671 | 672 | // Run the verifier side of the multifolding 673 | let folded_lcccs_v = NIMFS::verify( 674 | &mut transcript_v, 675 | &vec![running_instance.clone()], 676 | &vec![new_instance.clone()], 677 | proof, 678 | ); 679 | assert_eq!(folded_lcccs, folded_lcccs_v); 680 | 681 | // Check that the folded LCCCS instance is a valid instance with respect to the folded witness 682 | folded_lcccs 683 | .check_relation(&pedersen_params, &folded_witness) 684 | .unwrap(); 685 | } 686 | 687 | /// Perform multiple steps of multifolding of an LCCCS instance with a CCCS instance 688 | #[test] 689 | pub fn test_multifolding_two_instances_multiple_steps() { 690 | let mut rng = test_rng(); 691 | 692 | let ccs = get_test_ccs::(); 693 | 694 | let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1); 695 | 696 | // LCCCS witness 697 | let z_1 = get_test_z(2); 698 | let (mut running_instance, mut w1) = ccs.to_lcccs(&mut rng, &pedersen_params, &z_1); 699 | 700 | let mut transcript_p = IOPTranscript::::new(b"multifolding"); 701 | let mut transcript_v = IOPTranscript::::new(b"multifolding"); 702 | transcript_p.append_message(b"init", b"init").unwrap(); 703 | transcript_v.append_message(b"init", b"init").unwrap(); 704 | 705 | let n: usize = 10; 706 | for i in 3..n { 707 | println!("\niteration: i {}", i); // DBG 708 | 709 | // CCS witness 710 | let z_2 = get_test_z(i); 711 | println!("z_2 {:?}", z_2); // DBG 712 | 713 | let (new_instance, w2) = ccs.to_cccs(&mut rng, &pedersen_params, &z_2); 714 | 715 | // run the prover side of the multifolding 716 | let (proof, folded_lcccs, folded_witness) = NIMFS::prove( 717 | &mut transcript_p, 718 | &vec![running_instance.clone()], 719 | &vec![new_instance.clone()], 720 | &vec![w1], 721 | &vec![w2], 722 | ); 723 | 724 | // run the verifier side of the multifolding 725 | let folded_lcccs_v = NIMFS::verify( 726 | &mut transcript_v, 727 | &vec![running_instance.clone()], 728 | &vec![new_instance.clone()], 729 | proof, 730 | ); 731 | 732 | assert_eq!(folded_lcccs, folded_lcccs_v); 733 | 734 | // check that the folded instance with the folded witness holds the LCCCS relation 735 | println!("check_relation {}", i); 736 | folded_lcccs 737 | .check_relation(&pedersen_params, &folded_witness) 738 | .unwrap(); 739 | 740 | running_instance = folded_lcccs; 741 | w1 = folded_witness; 742 | } 743 | } 744 | 745 | /// Test that generates mu>1 and nu>1 instances, and folds them in a single multifolding step. 746 | #[test] 747 | pub fn test_multifolding_mu_nu_instances() { 748 | let mut rng = test_rng(); 749 | 750 | // Create a basic CCS circuit 751 | let ccs = get_test_ccs::(); 752 | let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1); 753 | 754 | let mu = 10; 755 | let nu = 15; 756 | 757 | // Generate a mu LCCCS & nu CCCS satisfying witness 758 | let mut z_lcccs = Vec::new(); 759 | for i in 0..mu { 760 | let z = get_test_z(i + 3); 761 | z_lcccs.push(z); 762 | } 763 | let mut z_cccs = Vec::new(); 764 | for i in 0..nu { 765 | let z = get_test_z(nu + i + 3); 766 | z_cccs.push(z); 767 | } 768 | 769 | // Create the LCCCS instances out of z_lcccs 770 | let mut lcccs_instances = Vec::new(); 771 | let mut w_lcccs = Vec::new(); 772 | for i in 0..mu { 773 | let (running_instance, w) = ccs.to_lcccs(&mut rng, &pedersen_params, &z_lcccs[i]); 774 | lcccs_instances.push(running_instance); 775 | w_lcccs.push(w); 776 | } 777 | // Create the CCCS instance out of z_cccs 778 | let mut cccs_instances = Vec::new(); 779 | let mut w_cccs = Vec::new(); 780 | for i in 0..nu { 781 | let (new_instance, w) = ccs.to_cccs(&mut rng, &pedersen_params, &z_cccs[i]); 782 | cccs_instances.push(new_instance); 783 | w_cccs.push(w); 784 | } 785 | 786 | // Prover's transcript 787 | let mut transcript_p = IOPTranscript::::new(b"multifolding"); 788 | transcript_p.append_message(b"init", b"init").unwrap(); 789 | 790 | // Run the prover side of the multifolding 791 | let (proof, folded_lcccs, folded_witness) = NIMFS::prove( 792 | &mut transcript_p, 793 | &lcccs_instances, 794 | &cccs_instances, 795 | &w_lcccs, 796 | &w_cccs, 797 | ); 798 | 799 | // Verifier's transcript 800 | let mut transcript_v = IOPTranscript::::new(b"multifolding"); 801 | transcript_v.append_message(b"init", b"init").unwrap(); 802 | 803 | // Run the verifier side of the multifolding 804 | let folded_lcccs_v = 805 | NIMFS::verify(&mut transcript_v, &lcccs_instances, &cccs_instances, proof); 806 | assert_eq!(folded_lcccs, folded_lcccs_v); 807 | 808 | // Check that the folded LCCCS instance is a valid instance with respect to the folded witness 809 | folded_lcccs 810 | .check_relation(&pedersen_params, &folded_witness) 811 | .unwrap(); 812 | } 813 | 814 | /// Test that generates mu>1 and nu>1 instances, and folds them in a single multifolding step 815 | /// and repeats the process doing multiple steps. 816 | #[test] 817 | pub fn test_multifolding_mu_nu_instances_multiple_steps() { 818 | let mut rng = test_rng(); 819 | 820 | // Create a basic CCS circuit 821 | let ccs = get_test_ccs::(); 822 | let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1); 823 | 824 | // Prover's transcript 825 | let mut transcript_p = IOPTranscript::::new(b"multifolding"); 826 | transcript_p.append_message(b"init", b"init").unwrap(); 827 | 828 | // Verifier's transcript 829 | let mut transcript_v = IOPTranscript::::new(b"multifolding"); 830 | transcript_v.append_message(b"init", b"init").unwrap(); 831 | 832 | let n_steps = 3; 833 | 834 | // number of LCCCS & CCCS instances in each multifolding step 835 | let mu = 10; 836 | let nu = 15; 837 | 838 | // Generate a mu LCCCS & nu CCCS satisfying witness, for each step 839 | for step in 0..n_steps { 840 | let mut z_lcccs = Vec::new(); 841 | for i in 0..mu { 842 | let z = get_test_z(step + i + 3); 843 | z_lcccs.push(z); 844 | } 845 | let mut z_cccs = Vec::new(); 846 | for i in 0..nu { 847 | let z = get_test_z(nu + i + 3); 848 | z_cccs.push(z); 849 | } 850 | 851 | // Create the LCCCS instances out of z_lcccs 852 | let mut lcccs_instances = Vec::new(); 853 | let mut w_lcccs = Vec::new(); 854 | for i in 0..mu { 855 | let (running_instance, w) = ccs.to_lcccs(&mut rng, &pedersen_params, &z_lcccs[i]); 856 | lcccs_instances.push(running_instance); 857 | w_lcccs.push(w); 858 | } 859 | // Create the CCCS instance out of z_cccs 860 | let mut cccs_instances = Vec::new(); 861 | let mut w_cccs = Vec::new(); 862 | for i in 0..nu { 863 | let (new_instance, w) = ccs.to_cccs(&mut rng, &pedersen_params, &z_cccs[i]); 864 | cccs_instances.push(new_instance); 865 | w_cccs.push(w); 866 | } 867 | 868 | // Run the prover side of the multifolding 869 | let (proof, folded_lcccs, folded_witness) = NIMFS::prove( 870 | &mut transcript_p, 871 | &lcccs_instances, 872 | &cccs_instances, 873 | &w_lcccs, 874 | &w_cccs, 875 | ); 876 | 877 | // Run the verifier side of the multifolding 878 | let folded_lcccs_v = 879 | NIMFS::verify(&mut transcript_v, &lcccs_instances, &cccs_instances, proof); 880 | assert_eq!(folded_lcccs, folded_lcccs_v); 881 | 882 | // Check that the folded LCCCS instance is a valid instance with respect to the folded witness 883 | folded_lcccs 884 | .check_relation(&pedersen_params, &folded_witness) 885 | .unwrap(); 886 | } 887 | } 888 | } 889 | --------------------------------------------------------------------------------