├── src
├── util
│ ├── mod.rs
│ ├── hypercube.rs
│ ├── vec.rs
│ └── mle.rs
├── ccs
│ ├── mod.rs
│ ├── pedersen.rs
│ ├── ccs.rs
│ ├── util.rs
│ ├── lcccs.rs
│ └── cccs.rs
├── espresso
│ ├── mod.rs
│ ├── util.rs
│ ├── errors.rs
│ ├── sum_check
│ │ ├── structs.rs
│ │ ├── mod.rs
│ │ ├── prover.rs
│ │ └── verifier.rs
│ ├── multilinear_polynomial.rs
│ └── virtual_polynomial.rs
├── lib.rs
└── multifolding.rs
├── doc
└── images
│ └── multifolding_diagram.png
├── TODO.md
├── .gitignore
├── Cargo.toml
├── LICENSE
└── README.md
/src/util/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod hypercube;
2 | pub mod mle;
3 | pub mod vec;
4 |
--------------------------------------------------------------------------------
/doc/images/multifolding_diagram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/privacy-ethereum/multifolding-poc/HEAD/doc/images/multifolding_diagram.png
--------------------------------------------------------------------------------
/src/ccs/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod cccs;
2 | #[allow(clippy::module_inception)]
3 | pub mod ccs;
4 | pub mod lcccs;
5 | pub mod pedersen;
6 | pub mod util;
7 |
--------------------------------------------------------------------------------
/src/espresso/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod errors;
2 | pub mod multilinear_polynomial;
3 | pub mod util;
4 | pub mod virtual_polynomial;
5 |
6 | pub mod sum_check;
7 |
--------------------------------------------------------------------------------
/src/lib.rs:
--------------------------------------------------------------------------------
1 | #![warn(missing_debug_implementations, rust_2018_idioms)]
2 | #![allow(non_snake_case)]
3 | #![allow(non_upper_case_globals)]
4 |
5 | pub mod ccs;
6 | pub mod multifolding;
7 |
8 | pub mod espresso;
9 | pub mod util;
10 |
--------------------------------------------------------------------------------
/TODO.md:
--------------------------------------------------------------------------------
1 | # TODO
2 |
3 | - Security: Think thoroughly about the FS transcript inputs (implement [strong FS](https://eprint.iacr.org/2023/691))
4 | - Fix: Properly implement error handling and removal of unwraps()
5 | - Feature: Write benchmarks
6 | - Fix: Go over the remaining TODOs and XXXs in the codebase
7 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Generated by Cargo
2 | # will have compiled files and executables
3 | debug/
4 | target/
5 |
6 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
7 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
8 | Cargo.lock
9 |
10 | # These are backup files generated by rustfmt
11 | **/*.rs.bk
12 |
13 | # MSVC Windows builds of rustc generate these, which store debugging information
14 | *.pdb
15 |
--------------------------------------------------------------------------------
/src/espresso/util.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2023 Espresso Systems (espressosys.com)
2 | // This file is part of the HyperPlonk library.
3 |
4 | // You should have received a copy of the MIT License
5 | // along with the HyperPlonk library. If not, see .
6 |
7 | use ark_std::log2;
8 |
9 | /// Return the number of variables that one need for an MLE to
10 | /// batch the list of MLEs
11 | #[inline]
12 | pub fn get_batched_nv(num_var: usize, polynomials_len: usize) -> usize {
13 | num_var + log2(polynomials_len) as usize
14 | }
15 |
--------------------------------------------------------------------------------
/src/espresso/errors.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2023 Espresso Systems (espressosys.com)
2 | // This file is part of the HyperPlonk library.
3 |
4 | // You should have received a copy of the MIT License
5 | // along with the HyperPlonk library. If not, see .
6 |
7 | //! Error module.
8 |
9 | use ark_std::string::String;
10 | use displaydoc::Display;
11 |
12 | /// A `enum` specifying the possible failure modes of the arithmetics.
13 | #[derive(Display, Debug)]
14 | pub enum ArithErrors {
15 | /// Invalid parameters: {0}
16 | InvalidParameters(String),
17 | /// Should not arrive to this point
18 | ShouldNotArrive,
19 | /// An error during (de)serialization: {0}
20 | SerializationErrors(ark_serialize::SerializationError),
21 | }
22 |
23 | impl From for ArithErrors {
24 | fn from(e: ark_serialize::SerializationError) -> Self {
25 | Self::SerializationErrors(e)
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "multifolding-poc"
3 | version = "0.2.0"
4 | edition = "2021"
5 |
6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
7 |
8 | [dependencies]
9 | ark-bls12-381 = "0.4.0"
10 | ark-ff = {version="^0.4.0", features=["parallel"]}
11 | ark-poly = {version="^0.4.0", features=["parallel"]}
12 | ark-ec = "^0.4.0"
13 | ark-serialize = "0.4.2"
14 | ark-std = { version = "^0.4.0", features=["parallel"] }
15 | displaydoc = "0.2.4"
16 | rayon = "1.7.0"
17 | thiserror = "1.0.40"
18 |
19 | # import for poly_iop & transcript
20 | subroutines = {git="https://github.com/EspressoSystems/hyperplonk"}
21 | transcript = {git="https://github.com/EspressoSystems/hyperplonk"}
22 |
23 | [features]
24 | # default = [ "parallel", "print-trace" ]
25 | default = ["parallel"]
26 | # extensive sanity checks that are useful for debugging
27 | extensive_sanity_checks = [ ]
28 |
29 | parallel = [
30 | "ark-std/parallel",
31 | "ark-ff/parallel",
32 | "ark-poly/parallel",
33 | ]
34 | print-trace = [
35 | "ark-std/print-trace",
36 | ]
37 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Privacy & Scaling Explorations (formerly known as appliedzkp)
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Hypernova multifolding
2 |
3 | A complete implementation of the [Hypernova](https://eprint.iacr.org/2023/573) folding scheme (see section 5 of the paper) in arkworks.
4 |
5 | This implementation is not meant to be used in production. Its purpose is to help us refine the interfaces and abstractions, so that multifolding can be integrated into a wider Hypernova implementation. A complete Hypernova implementation includes IVC, an in-circuit multifolding verifier, and R1CS-to-CCS and Plonkish-to-CCS compilers.
6 |
7 |
8 |
9 |
13 |
14 |
15 | ## Documentation
16 |
17 | See `src/multifolding.rs:test_basic_multifolding()` for a demonstration of the multifolding.
18 |
19 | See `TODO.md` for open future tasks.
20 |
21 | ## Building & Running
22 |
23 | As usual, you can run the tests using `cargo test --release`.
24 |
25 | ## Acknowledgements
26 |
27 | Shoutout to Espresso Systems for the [Hyperplonk implementation](https://github.com/EspressoSystems/hyperplonk/tree/main/arithmetic/src) that included useful multivariate polynomial routines.
28 |
--------------------------------------------------------------------------------
/src/espresso/sum_check/structs.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2023 Espresso Systems (espressosys.com)
2 | // This file is part of the HyperPlonk library.
3 |
4 | // You should have received a copy of the MIT License
5 | // along with the HyperPlonk library. If not, see .
6 |
7 | //! This module defines structs that are shared by all sub protocols.
8 |
9 | use crate::espresso::virtual_polynomial::VirtualPolynomial;
10 | use ark_ff::PrimeField;
11 | use ark_serialize::CanonicalSerialize;
12 |
13 | /// An IOP proof is a collections of
14 | /// - messages from prover to verifier at each round through the interactive
15 | /// protocol.
16 | /// - a point that is generated by the transcript for evaluation
17 | #[derive(Clone, Debug, Default, PartialEq, Eq)]
18 | pub struct IOPProof {
19 | pub point: Vec,
20 | pub proofs: Vec>,
21 | }
22 |
23 | /// A message from the prover to the verifier at a given round
24 | /// is a list of evaluations.
25 | #[derive(Clone, Debug, Default, PartialEq, Eq, CanonicalSerialize)]
26 | pub struct IOPProverMessage {
27 | pub(crate) evaluations: Vec,
28 | }
29 |
30 | /// Prover State of a PolyIOP.
31 | #[derive(Debug)]
32 | pub struct IOPProverState {
33 | /// sampled randomness given by the verifier
34 | pub challenges: Vec,
35 | /// the current round number
36 | pub(crate) round: usize,
37 | /// pointer to the virtual polynomial
38 | pub(crate) poly: VirtualPolynomial,
39 | /// points with precomputed barycentric weights for extrapolating smaller
40 | /// degree uni-polys to `max_degree + 1` evaluations.
41 | pub(crate) extrapolation_aux: Vec<(Vec, Vec)>,
42 | }
43 |
44 | /// Prover State of a PolyIOP
45 | #[derive(Debug)]
46 | pub struct IOPVerifierState {
47 | pub(crate) round: usize,
48 | pub(crate) num_vars: usize,
49 | pub(crate) max_degree: usize,
50 | pub(crate) finished: bool,
51 | /// a list storing the univariate polynomial in evaluation form sent by the
52 | /// prover at each round
53 | pub(crate) polynomials_received: Vec>,
54 | /// a list storing the randomness sampled by the verifier at each round
55 | pub(crate) challenges: Vec,
56 | }
57 |
--------------------------------------------------------------------------------
/src/util/hypercube.rs:
--------------------------------------------------------------------------------
1 | /// A boolean hypercube structure to create an ergonomic evaluation domain
2 | use crate::espresso::virtual_polynomial::bit_decompose;
3 | use ark_ff::PrimeField;
4 |
5 | use std::marker::PhantomData;
6 |
7 | /// A boolean hypercube that returns its points as an iterator
8 | /// If you iterate on it for 3 variables you will get points in little-endian order:
9 | /// 000 -> 100 -> 010 -> 110 -> 001 -> 101 -> 011 -> 111
10 | #[derive(Debug)]
11 | pub struct BooleanHypercube {
12 | _f: PhantomData,
13 | n_vars: usize,
14 | current: u64,
15 | max: u64,
16 | }
17 |
18 | impl BooleanHypercube {
19 | pub fn new(n_vars: usize) -> Self {
20 | BooleanHypercube:: {
21 | _f: PhantomData::,
22 | n_vars,
23 | current: 0,
24 | max: 2_u32.pow(n_vars as u32) as u64,
25 | }
26 | }
27 |
28 | /// returns the entry at given i (which is the little-endian bit representation of i)
29 | pub fn at_i(&self, i: usize) -> Vec {
30 | assert!(i < self.max as usize);
31 | let bits = bit_decompose((i) as u64, self.n_vars);
32 | bits.iter().map(|&x| F::from(x)).collect()
33 | }
34 | }
35 |
36 | impl Iterator for BooleanHypercube {
37 | type Item = Vec;
38 |
39 | fn next(&mut self) -> Option {
40 | let bits = bit_decompose(self.current, self.n_vars);
41 | let result: Vec = bits.iter().map(|&x| F::from(x)).collect();
42 | self.current += 1;
43 |
44 | if self.current > self.max {
45 | return None;
46 | }
47 |
48 | Some(result)
49 | }
50 | }
51 |
52 | #[cfg(test)]
53 | mod test {
54 | use super::*;
55 | use ark_bls12_381::Fr;
56 | use ark_ff::One;
57 | use ark_ff::Zero;
58 |
59 | #[test]
60 | fn test_hypercube() {
61 | let expected_results = vec![
62 | vec![Fr::zero(), Fr::zero(), Fr::zero()],
63 | vec![Fr::one(), Fr::zero(), Fr::zero()],
64 | vec![Fr::zero(), Fr::one(), Fr::zero()],
65 | vec![Fr::one(), Fr::one(), Fr::zero()],
66 | vec![Fr::zero(), Fr::zero(), Fr::one()],
67 | vec![Fr::one(), Fr::zero(), Fr::one()],
68 | vec![Fr::zero(), Fr::one(), Fr::one()],
69 | vec![Fr::one(), Fr::one(), Fr::one()],
70 | ];
71 |
72 | for (i, point) in BooleanHypercube::::new(3).enumerate() {
73 | assert_eq!(point, expected_results[i], "Failed at iteration {}", i);
74 | }
75 | }
76 | }
77 |
--------------------------------------------------------------------------------
/src/util/vec.rs:
--------------------------------------------------------------------------------
1 | /// Some basic utilities
2 | ///
3 | /// Stole a bunch of code from Alex in https://github.com/alex-ozdemir/bulletproofs
4 | /// and wrote some lame tests for it
5 | use ark_ff::PrimeField;
6 | use ark_std::cfg_iter;
7 |
8 | use rayon::iter::IndexedParallelIterator;
9 | use rayon::iter::IntoParallelRefIterator;
10 | use rayon::iter::ParallelIterator;
11 |
12 | /// A sparse representation of constraint matrices.
13 | pub type Matrix = Vec>;
14 |
15 | /// Hadamard product between two vectors
16 | pub fn hadamard(a: &Vec, b: &Vec) -> Vec {
17 | cfg_iter!(a).zip(b).map(|(a, b)| *a * b).collect()
18 | }
19 |
20 | // Multiply matrix by vector
21 | pub fn mat_vec_mul(mat: &Matrix, vec: &[F]) -> Vec {
22 | // matrices are lists of rows
23 | // rows are (value, idx) pairs
24 | let mut result = vec![F::zero(); mat.len()];
25 | for (r, mat_row) in mat.iter().enumerate() {
26 | for (c, mat_val) in mat_row.iter().enumerate() {
27 | assert!(c < vec.len());
28 | result[r] += *mat_val * vec[c];
29 | }
30 | }
31 | result
32 | }
33 |
34 | // Multiply vector by scalar
35 | pub fn vec_scalar_mul(vec: &[F], c: &F) -> Vec {
36 | let mut result = vec![F::zero(); vec.len()];
37 | for (i, a) in vec.iter().enumerate() {
38 | result[i] = *a * c;
39 | }
40 | result
41 | }
42 |
43 | // Add two vectors
44 | pub fn vec_add(vec_a: &[F], vec_b: &[F]) -> Vec {
45 | assert_eq!(vec_a.len(), vec_b.len());
46 |
47 | let mut result = vec![F::zero(); vec_a.len()];
48 | for i in 0..vec_a.len() {
49 | result[i] = vec_a[i] + vec_b[i];
50 | }
51 | result
52 | }
53 |
54 | pub fn to_F_matrix(M: Vec>) -> Vec> {
55 | let mut R: Vec> = vec![Vec::new(); M.len()];
56 | for i in 0..M.len() {
57 | R[i] = vec![F::zero(); M[i].len()];
58 | for j in 0..M[i].len() {
59 | R[i][j] = F::from(M[i][j] as u64);
60 | }
61 | }
62 | R
63 | }
64 |
65 | pub fn to_F_vec(z: Vec) -> Vec {
66 | let mut r: Vec = vec![F::zero(); z.len()];
67 | for i in 0..z.len() {
68 | r[i] = F::from(z[i] as u64);
69 | }
70 | r
71 | }
72 |
73 | #[cfg(test)]
74 | mod test {
75 | use super::*;
76 | use ark_bls12_381::Fr;
77 |
78 | #[test]
79 | fn test_hadamard() -> () {
80 | let A = vec![
81 | Fr::from(1u64),
82 | Fr::from(2u64),
83 | Fr::from(3u64),
84 | Fr::from(4u64),
85 | Fr::from(5u64),
86 | Fr::from(6u64),
87 | ];
88 |
89 | let B = vec![
90 | Fr::from(6u64),
91 | Fr::from(5u64),
92 | Fr::from(4u64),
93 | Fr::from(3u64),
94 | Fr::from(2u64),
95 | Fr::from(1u64),
96 | ];
97 |
98 | let C = hadamard(&A, &B);
99 | assert_eq!(
100 | C,
101 | vec![
102 | Fr::from(6u64),
103 | Fr::from(10u64),
104 | Fr::from(12u64),
105 | Fr::from(12u64),
106 | Fr::from(10u64),
107 | Fr::from(6u64)
108 | ]
109 | );
110 | }
111 |
112 | #[test]
113 | fn test_mat_vec_mul() -> () {
114 | let A = vec![
115 | vec![Fr::from(2u64), Fr::from(3u64), Fr::from(4u64)],
116 | vec![Fr::from(4u64), Fr::from(11u64), Fr::from(14u64)],
117 | vec![Fr::from(2u64), Fr::from(8u64), Fr::from(17u64)],
118 | ];
119 | let v = vec![Fr::from(19u64), Fr::from(55u64), Fr::from(50u64)];
120 |
121 | let result = mat_vec_mul(&A, &v);
122 | assert_eq!(
123 | result,
124 | vec![Fr::from(403u64), Fr::from(1381u64), Fr::from(1328u64)]
125 | );
126 |
127 | assert_eq!(
128 | vec_scalar_mul(&result, &Fr::from(2u64)),
129 | vec![Fr::from(806u64), Fr::from(2762u64), Fr::from(2656u64)]
130 | );
131 | }
132 | }
133 |
--------------------------------------------------------------------------------
/src/ccs/pedersen.rs:
--------------------------------------------------------------------------------
1 | use ark_ec::CurveGroup;
2 |
3 | use crate::util::vec::{vec_add, vec_scalar_mul};
4 | use transcript::IOPTranscript;
5 |
6 | use ark_std::{rand::Rng, UniformRand};
7 |
8 | use std::marker::PhantomData;
9 |
10 | #[derive(Clone, Debug)]
11 | pub struct Proof {
12 | R: C,
13 | u_: Vec,
14 | ru_: C::ScalarField,
15 | }
16 |
17 | #[derive(Clone, Debug)]
18 | pub struct Params {
19 | h: C,
20 | pub generators: Vec, // Affine for the MSM
21 | }
22 |
23 | #[derive(Debug, Clone, Eq, PartialEq)]
24 | pub struct Commitment(pub C);
25 |
26 | #[derive(Clone, Debug)]
27 | pub struct Pedersen {
28 | _c: PhantomData,
29 | }
30 |
31 | impl Pedersen {
32 | pub fn new_params(rng: &mut R, max: usize) -> Params {
33 | let h_scalar = C::ScalarField::rand(rng);
34 | let g: C = C::generator();
35 | let generators: Vec = vec![C::Affine::rand(rng); max];
36 | Params {
37 | h: g.mul(h_scalar),
38 | generators,
39 | }
40 | }
41 |
42 | pub fn commit(
43 | params: &Params,
44 | v: &[C::ScalarField],
45 | r: &C::ScalarField, // random value is provided, in order to be choosen by other parts of the protocol
46 | ) -> Commitment {
47 | let msm = C::msm(¶ms.generators, v).unwrap();
48 |
49 | let cm = params.h.mul(r) + msm;
50 | Commitment(cm)
51 | }
52 |
53 | pub fn prove(
54 | params: &Params,
55 | transcript: &mut IOPTranscript,
56 | cm: &Commitment,
57 | v: &Vec,
58 | r: &C::ScalarField,
59 | ) -> Proof {
60 | let r1 = transcript.get_and_append_challenge(b"r1").unwrap();
61 | let d = transcript
62 | .get_and_append_challenge_vectors(b"d", v.len())
63 | .unwrap();
64 |
65 | let msm = C::msm(¶ms.generators, &d).unwrap();
66 | let R: C = params.h.mul(r1) + msm;
67 |
68 | transcript
69 | .append_serializable_element(b"cm", &cm.0)
70 | .unwrap();
71 | transcript.append_serializable_element(b"R", &R).unwrap();
72 | let e = transcript.get_and_append_challenge(b"e").unwrap();
73 |
74 | let u_ = vec_add(&vec_scalar_mul(v, &e), &d);
75 | let ru_ = e * r + r1;
76 |
77 | Proof { R, u_, ru_ }
78 | }
79 | pub fn verify(
80 | params: &Params,
81 | transcript: &mut IOPTranscript,
82 | cm: Commitment,
83 | proof: Proof,
84 | ) -> bool {
85 | // r1, d just to match Prover's transcript
86 | transcript.get_and_append_challenge(b"r1").unwrap(); // r_1
87 | transcript
88 | .get_and_append_challenge_vectors(b"d", proof.u_.len())
89 | .unwrap(); // d
90 |
91 | transcript
92 | .append_serializable_element(b"cm", &cm.0)
93 | .unwrap();
94 | transcript
95 | .append_serializable_element(b"R", &proof.R)
96 | .unwrap();
97 | let e = transcript.get_and_append_challenge(b"e").unwrap();
98 | let lhs = proof.R + cm.0.mul(e);
99 |
100 | let msm = C::msm(¶ms.generators, &proof.u_).unwrap();
101 | let rhs = params.h.mul(proof.ru_) + msm;
102 | if lhs != rhs {
103 | return false;
104 | }
105 | true
106 | }
107 | }
108 |
109 | #[cfg(test)]
110 | mod tests {
111 | use super::*;
112 | use ark_bls12_381::{Fr, G1Projective};
113 |
114 | #[test]
115 | fn test_pedersen_commitment() {
116 | let mut rng = ark_std::test_rng();
117 |
118 | const n: usize = 10;
119 | // setup params
120 | let params = Pedersen::new_params(&mut rng, n);
121 |
122 | // init Prover's transcript
123 | let mut transcript_p = IOPTranscript::::new(b"pedersen_test");
124 | transcript_p.append_message(b"init", b"init").unwrap();
125 | // init Verifier's transcript
126 | let mut transcript_v = IOPTranscript::::new(b"pedersen_test");
127 | transcript_v.append_message(b"init", b"init").unwrap();
128 |
129 | let v: Vec = vec![Fr::rand(&mut rng); n];
130 | let r: Fr = Fr::rand(&mut rng);
131 |
132 | let cm = Pedersen::::commit(¶ms, &v, &r);
133 | let proof = Pedersen::::prove(¶ms, &mut transcript_p, &cm, &v, &r);
134 | let v = Pedersen::::verify(¶ms, &mut transcript_v, cm, proof);
135 | assert!(v);
136 | }
137 | }
138 |
--------------------------------------------------------------------------------
/src/ccs/ccs.rs:
--------------------------------------------------------------------------------
1 | use ark_ec::CurveGroup;
2 | use ark_std::{One, Zero};
3 |
4 | // XXX use thiserror everywhere? espresso doesnt use it...
5 | use thiserror::Error;
6 |
7 | use crate::util::vec::*;
8 |
9 | #[derive(Error, Debug)]
10 | pub enum CCSError {
11 | #[error("Relation not satisfied")]
12 | NotSatisfied,
13 | }
14 |
15 | /// A CCS structure
16 | #[derive(Debug, Clone, Eq, PartialEq)]
17 | pub struct CCS {
18 | // m: number of columns in M_i (such that M_i \in F^{m, n})
19 | pub m: usize,
20 | // n = |z|, number of rows in M_i
21 | pub n: usize,
22 | // l = |io|, size of public input/output
23 | pub l: usize,
24 | // t = |M|, number of matrices
25 | pub t: usize,
26 | // q = |c| = |S|, number of multisets
27 | pub q: usize,
28 | // d: max degree in each variable
29 | pub d: usize,
30 | // s = log(m), dimension of x
31 | pub s: usize,
32 | // s_prime = log(n), dimension of y
33 | pub s_prime: usize,
34 |
35 | // Vector of matrices
36 | pub M: Vec>,
37 | // Vector of multisets
38 | pub S: Vec>,
39 | // Vector of coefficients
40 | pub c: Vec,
41 | }
42 |
43 | impl CCS {
44 | /// Check that a CCS structure is satisfied by a z vector.
45 | /// This works with matrices. It doesn't do any polynomial stuff
46 | /// Only for testing
47 | pub fn check_relation(&self, z: &[C::ScalarField]) -> Result<(), CCSError> {
48 | let mut result = vec![C::ScalarField::zero(); self.m];
49 |
50 | for i in 0..self.q {
51 | // XXX This can be done more neatly with a .fold() or .reduce()
52 |
53 | // Extract the needed M_j matrices out of S_i
54 | let vec_M_j: Vec<&Matrix> =
55 | self.S[i].iter().map(|j| &self.M[*j]).collect();
56 |
57 | // Complete the hadamard chain
58 | let mut hadamard_result = vec![C::ScalarField::one(); self.m];
59 | for M_j in vec_M_j.into_iter() {
60 | hadamard_result = hadamard(&hadamard_result, &mat_vec_mul(M_j, z));
61 | }
62 |
63 | // Multiply by the coefficient of this step
64 | let c_M_j_z = vec_scalar_mul(&hadamard_result, &self.c[i]);
65 |
66 | // Add it to the final vector
67 | result = vec_add(&result, &c_M_j_z);
68 | }
69 |
70 | // Make sure the final vector is all zeroes
71 | for e in result {
72 | if !e.is_zero() {
73 | return Err(CCSError::NotSatisfied);
74 | }
75 | }
76 |
77 | Ok(())
78 | }
79 | }
80 |
81 | #[cfg(test)]
82 | pub mod test {
83 | use super::*;
84 | use ark_bls12_381::G1Projective;
85 | use ark_ff::PrimeField;
86 | use ark_std::log2;
87 | use std::ops::Neg;
88 |
89 | /// Converts the R1CS structure to the CCS structure
90 | fn CCS_from_r1cs(
91 | A: Vec>,
92 | B: Vec>,
93 | C: Vec>,
94 | io_len: usize,
95 | ) -> CCS {
96 | let m = A.len();
97 | let n = A[0].len();
98 | CCS {
99 | m,
100 | n,
101 | l: io_len,
102 | s: log2(m) as usize,
103 | s_prime: log2(n) as usize,
104 | t: 3,
105 | q: 2,
106 | d: 2,
107 |
108 | S: vec![vec![0, 1], vec![2]],
109 | c: vec![C::ScalarField::one(), C::ScalarField::one().neg()],
110 | M: vec![A, B, C],
111 | }
112 | }
113 |
114 | /// Return a CCS circuit that implements the Vitalik `x^3 + x + 5 == 35` (from
115 | /// https://www.vitalik.ca/general/2016/12/10/qap.html )
116 | #[cfg(test)]
117 | pub fn get_test_ccs() -> CCS {
118 | let A = to_F_matrix(vec![
119 | vec![0, 1, 0, 0, 0, 0],
120 | vec![0, 0, 0, 1, 0, 0],
121 | vec![0, 1, 0, 0, 1, 0],
122 | vec![5, 0, 0, 0, 0, 1],
123 | ]);
124 | let B = to_F_matrix(vec![
125 | vec![0, 1, 0, 0, 0, 0],
126 | vec![0, 1, 0, 0, 0, 0],
127 | vec![1, 0, 0, 0, 0, 0],
128 | vec![1, 0, 0, 0, 0, 0],
129 | ]);
130 | let C = to_F_matrix(vec![
131 | vec![0, 0, 0, 1, 0, 0],
132 | vec![0, 0, 0, 0, 1, 0],
133 | vec![0, 0, 0, 0, 0, 1],
134 | vec![0, 0, 1, 0, 0, 0],
135 | ]);
136 | CCS_from_r1cs(A, B, C, 1)
137 | }
138 |
139 | /// Computes the z vector for the given input for Vitalik's equation.
140 | #[cfg(test)]
141 | pub fn get_test_z(input: usize) -> Vec {
142 | // z = (1, io, w)
143 | to_F_vec(vec![
144 | 1,
145 | input,
146 | input * input * input + input + 5, // x^3 + x + 5
147 | input * input, // x^2
148 | input * input * input, // x^2 * x
149 | input * input * input + input, // x^3 + x
150 | ])
151 | }
152 |
153 | /// Test that a basic CCS relation can be satisfied
154 | #[test]
155 | fn test_ccs_relation() -> () {
156 | let ccs = get_test_ccs::();
157 | let z = get_test_z(3);
158 |
159 | ccs.check_relation(&z).unwrap();
160 | }
161 | }
162 |
--------------------------------------------------------------------------------
/src/util/mle.rs:
--------------------------------------------------------------------------------
1 | /// Some basic MLE utilities
2 | use ark_ff::PrimeField;
3 | use ark_poly::DenseMultilinearExtension;
4 | use ark_std::log2;
5 |
6 | use super::vec::Matrix;
7 |
8 | /// Pad matrix so that its columns and rows are powers of two
9 | fn pad_matrix(matrix: &Matrix) -> Matrix {
10 | // Find the desired dimensions after padding
11 | let rows = matrix.len();
12 | let cols = matrix[0].len();
13 | let padded_rows = rows.next_power_of_two();
14 | let padded_cols = cols.next_power_of_two();
15 |
16 | // Create a new padded matrix
17 | // XXX inefficient. take a mutable matrix as input instead?
18 | let mut padded_matrix = vec![vec![F::zero(); padded_cols]; padded_rows];
19 |
20 | // Copy values from the input matrix to the padded matrix
21 | for (i, row) in matrix.iter().enumerate() {
22 | for (j, &value) in row.iter().enumerate() {
23 | padded_matrix[i][j] = value;
24 | }
25 | }
26 |
27 | padded_matrix
28 | }
29 |
30 | // XXX shouldn't consume the matrix
31 | pub fn matrix_to_mle(matrix: Matrix) -> DenseMultilinearExtension {
32 | let n_vars: usize = (log2(matrix.len()) + log2(matrix[0].len())) as usize; // n_vars = s + s'
33 |
34 | // Matrices might need to get padded before turned into an MLE
35 | let padded_matrix = pad_matrix(&matrix);
36 |
37 | // Flatten matrix into a vector
38 | let M_evals: Vec = padded_matrix.into_iter().flatten().collect();
39 |
40 | vec_to_mle(n_vars, &M_evals)
41 | }
42 |
43 | pub fn vec_to_mle(n_vars: usize, v: &Vec) -> DenseMultilinearExtension {
44 | // Pad to 2^n_vars
45 | let v_padded: Vec = [
46 | v.clone(),
47 | std::iter::repeat(F::zero())
48 | .take((1 << n_vars) - v.len())
49 | .collect(),
50 | ]
51 | .concat();
52 | DenseMultilinearExtension::::from_evaluations_vec(n_vars, v_padded)
53 | }
54 |
55 | #[cfg(test)]
56 | mod tests {
57 | use super::*;
58 | use crate::{
59 | ccs::ccs::test::get_test_z,
60 | espresso::multilinear_polynomial::fix_variables,
61 | espresso::multilinear_polynomial::testing_code::fix_last_variables,
62 | util::{hypercube::BooleanHypercube, vec::to_F_matrix},
63 | };
64 | use ark_poly::MultilinearExtension;
65 | use ark_std::Zero;
66 |
67 | use ark_bls12_381::Fr;
68 |
69 | #[test]
70 | fn test_matrix_to_mle() {
71 | let A = to_F_matrix::(vec![
72 | vec![2, 3, 4, 4],
73 | vec![4, 11, 14, 14],
74 | vec![2, 8, 17, 17],
75 | vec![420, 4, 2, 0],
76 | ]);
77 |
78 | let A_mle = matrix_to_mle(A);
79 | assert_eq!(A_mle.evaluations.len(), 16); // 4x4 matrix, thus 2bit x 2bit, thus 2^4=16 evals
80 |
81 | let A = to_F_matrix::(vec![
82 | vec![2, 3, 4, 4, 1],
83 | vec![4, 11, 14, 14, 2],
84 | vec![2, 8, 17, 17, 3],
85 | vec![420, 4, 2, 0, 4],
86 | vec![420, 4, 2, 0, 5],
87 | ]);
88 | let A_mle = matrix_to_mle(A.clone());
89 | assert_eq!(A_mle.evaluations.len(), 64); // 5x5 matrix, thus 3bit x 3bit, thus 2^6=64 evals
90 |
91 | // check that the A_mle evaluated over the boolean hypercube equals the matrix A_i_j values
92 | let bhc = BooleanHypercube::new(A_mle.num_vars);
93 | let A_padded = pad_matrix(&A);
94 | for (i, A_i) in A_padded.iter().enumerate() {
95 | for (j, _) in A_i.iter().enumerate() {
96 | let s_i_j = bhc.at_i(i * A_i.len() + j);
97 | assert_eq!(A_mle.evaluate(&s_i_j).unwrap(), A_padded[i][j]);
98 | }
99 | }
100 | }
101 |
102 | #[test]
103 | fn test_vec_to_mle() {
104 | let z = get_test_z::(3);
105 | let n_vars = 3;
106 | let z_mle = vec_to_mle(n_vars, &z);
107 |
108 | // check that the z_mle evaluated over the boolean hypercube equals the vec z_i values
109 | let bhc = BooleanHypercube::new(z_mle.num_vars);
110 | for i in 0..z.len() {
111 | let s_i = bhc.at_i(i);
112 | assert_eq!(z_mle.evaluate(&s_i).unwrap(), z[i]);
113 | }
114 | // for the rest of elements of the boolean hypercube, expect it to evaluate to zero
115 | for i in (z.len())..(1 << z_mle.num_vars) {
116 | let s_i = bhc.at_i(i);
117 | assert_eq!(z_mle.evaluate(&s_i).unwrap(), Fr::zero());
118 | }
119 | }
120 |
121 | #[test]
122 | fn test_fix_variables() {
123 | let A = to_F_matrix(vec![
124 | vec![2, 3, 4, 4],
125 | vec![4, 11, 14, 14],
126 | vec![2, 8, 17, 17],
127 | vec![420, 4, 2, 0],
128 | ]);
129 |
130 | let A_mle = matrix_to_mle(A.clone());
131 | let bhc = BooleanHypercube::new(2);
132 | for (i, y) in bhc.enumerate() {
133 | // First check that the arkworks and espresso funcs match
134 | let expected_fix_left = A_mle.fix_variables(&y); // try arkworks fix_variables
135 | let fix_left = fix_variables(&A_mle, &y); // try espresso fix_variables
136 |
137 | assert_eq!(fix_left, expected_fix_left);
138 |
139 | // Check that fixing first variables pins down a column
140 | // i.e. fixing x to 0 will return the first column
141 | // fixing x to 1 will return the second column etc.
142 | let column_i: Vec = A.clone().iter().map(|x| x[i]).collect();
143 | assert_eq!(fix_left.evaluations, column_i);
144 |
145 | // Now check that fixing last variables pins down a row
146 | // i.e. fixing y to 0 will return the first row
147 | // fixing y to 1 will return the second row etc.
148 | let row_i: Vec = A[i].clone();
149 | let fix_right = fix_last_variables(&A_mle, &y);
150 | assert_eq!(fix_right.evaluations, row_i);
151 | }
152 | }
153 | }
154 |
--------------------------------------------------------------------------------
/src/ccs/util.rs:
--------------------------------------------------------------------------------
1 | use ark_ff::PrimeField;
2 | use ark_poly::DenseMultilinearExtension;
3 | use ark_poly::MultilinearExtension;
4 | use std::ops::Add;
5 |
6 | use crate::espresso::multilinear_polynomial::fix_variables;
7 | use crate::espresso::multilinear_polynomial::scalar_mul;
8 |
9 | use crate::util::hypercube::BooleanHypercube;
10 | use crate::util::mle::matrix_to_mle;
11 | use crate::util::mle::vec_to_mle;
12 | use crate::util::vec::Matrix;
13 |
14 | /// Return a vector of evaluations p_j(r) = \sum_{y \in {0,1}^s'} M_j(r, y) * z(y)
15 | /// for all j values in 0..self.t
16 | pub fn compute_all_sum_Mz_evals(
17 | vec_M: &[Matrix],
18 | z: &Vec,
19 | r: &[F],
20 | s_prime: usize,
21 | ) -> Vec {
22 | // Convert z to MLE
23 | let z_y_mle = vec_to_mle(s_prime, z);
24 | // Convert all matrices to MLE
25 | let M_x_y_mle: Vec> =
26 | vec_M.iter().cloned().map(matrix_to_mle).collect();
27 |
28 | let mut v = Vec::with_capacity(M_x_y_mle.len());
29 | for M_i in M_x_y_mle {
30 | let sum_Mz = compute_sum_Mz(M_i, &z_y_mle, s_prime);
31 | let v_i = sum_Mz.evaluate(r).unwrap();
32 | v.push(v_i);
33 | }
34 | v
35 | }
36 |
37 | /// Return the multilinear polynomial p(x) = \sum_{y \in {0,1}^s'} M_j(x, y) * z(y)
38 | pub fn compute_sum_Mz(
39 | M_j: DenseMultilinearExtension,
40 | z: &DenseMultilinearExtension,
41 | s_prime: usize,
42 | ) -> DenseMultilinearExtension {
43 | let mut sum_Mz = DenseMultilinearExtension {
44 | evaluations: vec![F::zero(); M_j.evaluations.len()],
45 | num_vars: M_j.num_vars - s_prime,
46 | };
47 |
48 | let bhc = BooleanHypercube::new(s_prime);
49 | for y in bhc.into_iter() {
50 | // In a slightly counter-intuitive fashion fix_variables() fixes the right-most variables of the polynomial. So
51 | // for a polynomial M(x,y) and a random field element r, if we do fix_variables(M,r) we will get M(x,r).
52 | let M_j_y = fix_variables(&M_j, &y);
53 | let z_y = z.evaluate(&y).unwrap();
54 | let M_j_z = scalar_mul(&M_j_y, &z_y);
55 | sum_Mz = sum_Mz.add(M_j_z);
56 | }
57 | sum_Mz
58 | }
59 |
60 | #[cfg(test)]
61 | pub mod test {
62 | use super::*;
63 |
64 | use ark_bls12_381::{Fr, G1Projective};
65 | use ark_std::test_rng;
66 | use ark_std::One;
67 | use ark_std::UniformRand;
68 | use ark_std::Zero;
69 |
70 | use crate::ccs::ccs::test::{get_test_ccs, get_test_z};
71 | use crate::espresso::multilinear_polynomial::testing_code::fix_last_variables;
72 | use crate::espresso::virtual_polynomial::eq_eval;
73 |
74 | use crate::ccs::util::compute_sum_Mz;
75 |
76 | #[test]
77 | fn test_compute_sum_Mz_over_boolean_hypercube() -> () {
78 | let ccs = get_test_ccs::();
79 | let z = get_test_z(3);
80 | ccs.check_relation(&z).unwrap();
81 | let z_mle = vec_to_mle(ccs.s_prime, &z);
82 |
83 | // check that evaluating over all the values x over the boolean hypercube, the result of
84 | // the next for loop is equal to 0
85 | for x in BooleanHypercube::new(ccs.s).into_iter() {
86 | // println!("x {:?}", x);
87 | let mut r = Fr::zero();
88 | for i in 0..ccs.q {
89 | let mut Sj_prod = Fr::one();
90 | for j in ccs.S[i].clone() {
91 | let M_j = matrix_to_mle(ccs.M[j].clone());
92 | let sum_Mz = compute_sum_Mz(M_j, &z_mle, ccs.s_prime);
93 | let sum_Mz_x = sum_Mz.evaluate(&x).unwrap();
94 | Sj_prod *= sum_Mz_x;
95 | }
96 | r += Sj_prod * ccs.c[i];
97 | }
98 | assert_eq!(r, Fr::zero());
99 | }
100 | }
101 |
102 | /// Given M(x,y) matrix and a random field element `r`, test that ~M(r,y) is is an s'-variable polynomial which
103 | /// compresses every column j of the M(x,y) matrix by performing a random linear combination between the elements
104 | /// of the column and the values eq_i(r) where i is the row of that element
105 | ///
106 | /// For example, for matrix M:
107 | ///
108 | /// [2, 3, 4, 4
109 | /// 4, 4, 3, 2
110 | /// 2, 8, 9, 2
111 | /// 9, 4, 2, 0]
112 | ///
113 | /// The polynomial ~M(r,y) is a polynomial in F^2 which evaluates to the following values in the hypercube:
114 | /// - M(00) = 2*eq_00(r) + 4*eq_10(r) + 2*eq_01(r) + 9*eq_11(r)
115 | /// - M(10) = 3*eq_00(r) + 4*eq_10(r) + 8*eq_01(r) + 4*eq_11(r)
116 | /// - M(01) = 4*eq_00(r) + 3*eq_10(r) + 9*eq_01(r) + 2*eq_11(r)
117 | /// - M(11) = 4*eq_00(r) + 2*eq_10(r) + 2*eq_01(r) + 0*eq_11(r)
118 | ///
119 | /// This is used by Hypernova in LCCCS to perform a verifier-chosen random linear combination between the columns
120 | /// of the matrix and the z vector. This technique is also used extensively in "An Algebraic Framework for
121 | /// Universal and Updatable SNARKs".
122 | #[test]
123 | fn test_compute_M_r_y_compression() -> () {
124 | let mut rng = test_rng();
125 |
126 | // s = 2, s' = 3
127 | let ccs = get_test_ccs::();
128 |
129 | let M = ccs.M[0].clone();
130 | let M_mle = matrix_to_mle(M.clone());
131 |
132 | // Fix the polynomial ~M(r,y)
133 | let r: Vec = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
134 | let M_r_y = fix_last_variables(&M_mle, &r);
135 |
136 | // Now let's compute M_r_y the other way around
137 | for j in 0..M[0].len() {
138 | // Go over every column of M
139 | let column_j: Vec = M.clone().iter().map(|x| x[j]).collect();
140 |
141 | // and perform the random lincomb between the elements of the column and eq_i(r)
142 | let rlc = BooleanHypercube::new(ccs.s)
143 | .enumerate()
144 | .into_iter()
145 | .map(|(i, x)| column_j[i] * eq_eval(&x, &r).unwrap())
146 | .fold(Fr::zero(), |acc, result| acc + result);
147 |
148 | assert_eq!(M_r_y.evaluations[j], rlc);
149 | }
150 | }
151 | }
152 |
--------------------------------------------------------------------------------
/src/espresso/multilinear_polynomial.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2023 Espresso Systems (espressosys.com)
2 | // This file is part of the HyperPlonk library.
3 |
4 | // You should have received a copy of the MIT License
5 | // along with the HyperPlonk library. If not, see .
6 |
7 | use ark_ff::Field;
8 | #[cfg(feature = "parallel")]
9 | use rayon::prelude::{IndexedParallelIterator, IntoParallelRefMutIterator, ParallelIterator};
10 |
11 | pub use ark_poly::DenseMultilinearExtension;
12 |
13 | pub fn fix_variables(
14 | poly: &DenseMultilinearExtension,
15 | partial_point: &[F],
16 | ) -> DenseMultilinearExtension {
17 | assert!(
18 | partial_point.len() <= poly.num_vars,
19 | "invalid size of partial point"
20 | );
21 | let nv = poly.num_vars;
22 | let mut poly = poly.evaluations.to_vec();
23 | let dim = partial_point.len();
24 | // evaluate single variable of partial point from left to right
25 | for (i, point) in partial_point.iter().enumerate().take(dim) {
26 | poly = fix_one_variable_helper(&poly, nv - i, point);
27 | }
28 |
29 | DenseMultilinearExtension::::from_evaluations_slice(nv - dim, &poly[..(1 << (nv - dim))])
30 | }
31 |
32 | fn fix_one_variable_helper(data: &[F], nv: usize, point: &F) -> Vec {
33 | let mut res = vec![F::zero(); 1 << (nv - 1)];
34 |
35 | // evaluate single variable of partial point from left to right
36 | #[cfg(not(feature = "parallel"))]
37 | for i in 0..(1 << (nv - 1)) {
38 | res[i] = data[i] + (data[(i << 1) + 1] - data[i << 1]) * point;
39 | }
40 |
41 | #[cfg(feature = "parallel")]
42 | res.par_iter_mut().enumerate().for_each(|(i, x)| {
43 | *x = data[i << 1] + (data[(i << 1) + 1] - data[i << 1]) * point;
44 | });
45 |
46 | res
47 | }
48 |
49 | pub fn evaluate_no_par(poly: &DenseMultilinearExtension, point: &[F]) -> F {
50 | assert_eq!(poly.num_vars, point.len());
51 | fix_variables_no_par(poly, point).evaluations[0]
52 | }
53 |
54 | fn fix_variables_no_par(
55 | poly: &DenseMultilinearExtension,
56 | partial_point: &[F],
57 | ) -> DenseMultilinearExtension {
58 | assert!(
59 | partial_point.len() <= poly.num_vars,
60 | "invalid size of partial point"
61 | );
62 | let nv = poly.num_vars;
63 | let mut poly = poly.evaluations.to_vec();
64 | let dim = partial_point.len();
65 | // evaluate single variable of partial point from left to right
66 | for i in 1..dim + 1 {
67 | let r = partial_point[i - 1];
68 | for b in 0..(1 << (nv - i)) {
69 | poly[b] = poly[b << 1] + (poly[(b << 1) + 1] - poly[b << 1]) * r;
70 | }
71 | }
72 | DenseMultilinearExtension::from_evaluations_slice(nv - dim, &poly[..(1 << (nv - dim))])
73 | }
74 |
75 | /// Given multilinear polynomial `p(x)` and s `s`, compute `s*p(x)`
76 | pub fn scalar_mul(
77 | poly: &DenseMultilinearExtension,
78 | s: &F,
79 | ) -> DenseMultilinearExtension {
80 | DenseMultilinearExtension {
81 | evaluations: poly.evaluations.iter().map(|e| *e * s).collect(),
82 | num_vars: poly.num_vars,
83 | }
84 | }
85 |
86 | /// Test-only methods used in virtual_polynomial.rs
87 | #[cfg(test)]
88 | pub mod testing_code {
89 | use super::*;
90 | use ark_ff::PrimeField;
91 | use ark_std::rand::RngCore;
92 | use ark_std::{end_timer, start_timer};
93 | use std::sync::Arc;
94 |
95 | pub fn fix_last_variables(
96 | poly: &DenseMultilinearExtension,
97 | partial_point: &[F],
98 | ) -> DenseMultilinearExtension {
99 | assert!(
100 | partial_point.len() <= poly.num_vars,
101 | "invalid size of partial point"
102 | );
103 | let nv = poly.num_vars;
104 | let mut poly = poly.evaluations.to_vec();
105 | let dim = partial_point.len();
106 | // evaluate single variable of partial point from left to right
107 | for (i, point) in partial_point.iter().rev().enumerate().take(dim) {
108 | poly = fix_last_variable_helper(&poly, nv - i, point);
109 | }
110 |
111 | DenseMultilinearExtension::::from_evaluations_slice(nv - dim, &poly[..(1 << (nv - dim))])
112 | }
113 |
114 | fn fix_last_variable_helper(data: &[F], nv: usize, point: &F) -> Vec {
115 | let half_len = 1 << (nv - 1);
116 | let mut res = vec![F::zero(); half_len];
117 |
118 | // evaluate single variable of partial point from left to right
119 | #[cfg(not(feature = "parallel"))]
120 | for b in 0..half_len {
121 | res[b] = data[b] + (data[b + half_len] - data[b]) * point;
122 | }
123 |
124 | #[cfg(feature = "parallel")]
125 | res.par_iter_mut().enumerate().for_each(|(i, x)| {
126 | *x = data[i] + (data[i + half_len] - data[i]) * point;
127 | });
128 |
129 | res
130 | }
131 |
132 | /// Sample a random list of multilinear polynomials.
133 | /// Returns
134 | /// - the list of polynomials,
135 | /// - its sum of polynomial evaluations over the boolean hypercube.
136 | #[cfg(test)]
137 | pub fn random_mle_list(
138 | nv: usize,
139 | degree: usize,
140 | rng: &mut R,
141 | ) -> (Vec>>, F) {
142 | let start = start_timer!(|| "sample random mle list");
143 | let mut multiplicands = Vec::with_capacity(degree);
144 | for _ in 0..degree {
145 | multiplicands.push(Vec::with_capacity(1 << nv))
146 | }
147 | let mut sum = F::zero();
148 |
149 | for _ in 0..(1 << nv) {
150 | let mut product = F::one();
151 |
152 | for e in multiplicands.iter_mut() {
153 | let val = F::rand(rng);
154 | e.push(val);
155 | product *= val;
156 | }
157 | sum += product;
158 | }
159 |
160 | let list = multiplicands
161 | .into_iter()
162 | .map(|x| Arc::new(DenseMultilinearExtension::from_evaluations_vec(nv, x)))
163 | .collect();
164 |
165 | end_timer!(start);
166 | (list, sum)
167 | }
168 |
169 | // Build a randomize list of mle-s whose sum is zero.
170 | #[cfg(test)]
171 | pub fn random_zero_mle_list(
172 | nv: usize,
173 | degree: usize,
174 | rng: &mut R,
175 | ) -> Vec>> {
176 | let start = start_timer!(|| "sample random zero mle list");
177 |
178 | let mut multiplicands = Vec::with_capacity(degree);
179 | for _ in 0..degree {
180 | multiplicands.push(Vec::with_capacity(1 << nv))
181 | }
182 | for _ in 0..(1 << nv) {
183 | multiplicands[0].push(F::zero());
184 | for e in multiplicands.iter_mut().skip(1) {
185 | e.push(F::rand(rng));
186 | }
187 | }
188 |
189 | let list = multiplicands
190 | .into_iter()
191 | .map(|x| Arc::new(DenseMultilinearExtension::from_evaluations_vec(nv, x)))
192 | .collect();
193 |
194 | end_timer!(start);
195 | list
196 | }
197 | }
198 |
--------------------------------------------------------------------------------
/src/ccs/lcccs.rs:
--------------------------------------------------------------------------------
1 | use ark_ec::CurveGroup;
2 | use ark_poly::DenseMultilinearExtension;
3 | use ark_std::{One};
4 | use std::sync::Arc;
5 |
6 | use ark_std::{rand::Rng, UniformRand};
7 |
8 | use crate::ccs::cccs::Witness;
9 | use crate::ccs::ccs::{CCSError, CCS};
10 | use crate::ccs::util::{compute_all_sum_Mz_evals, compute_sum_Mz};
11 |
12 | use crate::ccs::pedersen::{Commitment, Params as PedersenParams, Pedersen};
13 | use crate::espresso::virtual_polynomial::VirtualPolynomial;
14 | use crate::util::mle::matrix_to_mle;
15 | use crate::util::mle::vec_to_mle;
16 |
17 | /// Linearized Committed CCS instance
18 | #[derive(Debug, Clone, Eq, PartialEq)]
19 | pub struct LCCCS {
20 | // Underlying CCS structure
21 | pub ccs: CCS,
22 |
23 | // TODO: Further improve the abstractions here. We should not need so many public fields
24 |
25 | // Commitment to witness
26 | pub C: Commitment,
27 | // Relaxation factor of z for folded LCCCS
28 | pub u: C::ScalarField,
29 | // Public input/output
30 | pub x: Vec,
31 | // Random evaluation point for the v_i
32 | pub r_x: Vec,
33 | // Vector of v_i
34 | pub v: Vec,
35 | }
36 |
37 | impl CCS {
38 | /// Compute v_j values of the linearized committed CCS form
39 | /// Given `r`, compute: \sum_{y \in {0,1}^s'} M_j(r, y) * z(y)
40 | fn compute_v_j(&self, z: &[C::ScalarField], r: &[C::ScalarField]) -> Vec {
41 | compute_all_sum_Mz_evals(&self.M, &z.to_vec(), r, self.s_prime)
42 | }
43 |
44 | pub fn to_lcccs(
45 | &self,
46 | rng: &mut R,
47 | pedersen_params: &PedersenParams,
48 | z: &[C::ScalarField],
49 | ) -> (LCCCS, Witness) {
50 | let w: Vec = z[(1 + self.l)..].to_vec();
51 | let r_w = C::ScalarField::rand(rng);
52 | let C = Pedersen::commit(pedersen_params, &w, &r_w);
53 |
54 | let r_x: Vec = (0..self.s).map(|_| C::ScalarField::rand(rng)).collect();
55 | let v = self.compute_v_j(z, &r_x);
56 |
57 | (
58 | LCCCS:: {
59 | ccs: self.clone(),
60 | C,
61 | u: C::ScalarField::one(),
62 | x: z[1..(1 + self.l)].to_vec(),
63 | r_x,
64 | v,
65 | },
66 | Witness:: { w, r_w },
67 | )
68 | }
69 | }
70 |
71 | impl LCCCS {
72 | /// Compute all L_j(x) polynomials
73 | pub fn compute_Ls(&self, z: &Vec) -> Vec> {
74 | let z_mle = vec_to_mle(self.ccs.s_prime, z);
75 | // Convert all matrices to MLE
76 | let M_x_y_mle: Vec> =
77 | self.ccs.M.clone().into_iter().map(matrix_to_mle).collect();
78 |
79 | let mut vec_L_j_x = Vec::with_capacity(self.ccs.t);
80 | for M_j in M_x_y_mle {
81 | let sum_Mz = compute_sum_Mz(M_j, &z_mle, self.ccs.s_prime);
82 | let sum_Mz_virtual =
83 | VirtualPolynomial::new_from_mle(&Arc::new(sum_Mz.clone()), C::ScalarField::one());
84 | let L_j_x = sum_Mz_virtual.build_f_hat(&self.r_x).unwrap();
85 | vec_L_j_x.push(L_j_x);
86 | }
87 |
88 | vec_L_j_x
89 | }
90 |
91 | /// Perform the check of the LCCCS instance described at section 4.2
92 | pub fn check_relation(
93 | &self,
94 | pedersen_params: &PedersenParams,
95 | w: &Witness,
96 | ) -> Result<(), CCSError> {
97 | // check that C is the commitment of w. Notice that this is not verifying a Pedersen
98 | // opening, but checking that the Commmitment comes from committing to the witness.
99 | assert_eq!(self.C.0, Pedersen::commit(pedersen_params, &w.w, &w.r_w).0);
100 |
101 | // check CCS relation
102 | let z: Vec = [vec![self.u], self.x.clone(), w.w.to_vec()].concat();
103 | let computed_v = compute_all_sum_Mz_evals(&self.ccs.M, &z, &self.r_x, self.ccs.s_prime);
104 | assert_eq!(computed_v, self.v);
105 | Ok(())
106 | }
107 | }
108 |
109 | #[cfg(test)]
110 | pub mod test {
111 | use super::*;
112 | use ark_std::Zero;
113 |
114 | use crate::ccs::ccs::test::{get_test_ccs, get_test_z};
115 | use crate::util::hypercube::BooleanHypercube;
116 | use ark_std::test_rng;
117 |
118 | use ark_bls12_381::{Fr, G1Projective};
119 |
120 | #[test]
121 | /// Test linearized CCCS v_j against the L_j(x)
122 | fn test_lcccs_v_j() -> () {
123 | let mut rng = test_rng();
124 |
125 | let ccs = get_test_ccs();
126 | let z = get_test_z(3);
127 | ccs.check_relation(&z.clone()).unwrap();
128 |
129 | let pedersen_params = Pedersen::::new_params(&mut rng, ccs.n - ccs.l - 1);
130 | let (lcccs, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z);
131 | // with our test vector comming from R1CS, v should have length 3
132 | assert_eq!(lcccs.v.len(), 3);
133 |
134 | let vec_L_j_x = lcccs.compute_Ls(&z);
135 | assert_eq!(vec_L_j_x.len(), lcccs.v.len());
136 |
137 | for (v_i, L_j_x) in lcccs.v.into_iter().zip(vec_L_j_x) {
138 | let sum_L_j_x = BooleanHypercube::new(ccs.s)
139 | .into_iter()
140 | .map(|y| L_j_x.evaluate(&y).unwrap())
141 | .fold(Fr::zero(), |acc, result| acc + result);
142 | assert_eq!(v_i, sum_L_j_x);
143 | }
144 | }
145 |
146 | /// Given a bad z, check that the v_j should not match with the L_j(x)
147 | #[test]
148 | fn test_bad_v_j() -> () {
149 | let mut rng = test_rng();
150 |
151 | let ccs = get_test_ccs();
152 | let z = get_test_z(3);
153 | ccs.check_relation(&z.clone()).unwrap();
154 |
155 | // Mutate z so that the relation does not hold
156 | let mut bad_z = z.clone();
157 | bad_z[3] = Fr::zero();
158 | assert!(ccs.check_relation(&bad_z.clone()).is_err());
159 |
160 | let pedersen_params = Pedersen::::new_params(&mut rng, ccs.n - ccs.l - 1);
161 | // Compute v_j with the right z
162 | let (lcccs, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z);
163 | // with our test vector comming from R1CS, v should have length 3
164 | assert_eq!(lcccs.v.len(), 3);
165 |
166 | // Bad compute L_j(x) with the bad z
167 | let vec_L_j_x = lcccs.compute_Ls(&bad_z);
168 | assert_eq!(vec_L_j_x.len(), lcccs.v.len());
169 |
170 | // Make sure that the LCCCS is not satisfied given these L_j(x)
171 | // i.e. summing L_j(x) over the hypercube should not give v_j for all j
172 | let mut satisfied = true;
173 | for (v_i, L_j_x) in lcccs.v.into_iter().zip(vec_L_j_x) {
174 | let sum_L_j_x = BooleanHypercube::new(ccs.s)
175 | .into_iter()
176 | .map(|y| L_j_x.evaluate(&y).unwrap())
177 | .fold(Fr::zero(), |acc, result| acc + result);
178 | if v_i != sum_L_j_x {
179 | satisfied = false;
180 | }
181 | }
182 |
183 | assert_eq!(satisfied, false);
184 | }
185 | }
186 |
--------------------------------------------------------------------------------
/src/espresso/sum_check/mod.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2023 Espresso Systems (espressosys.com)
2 | // This file is part of the HyperPlonk library.
3 |
4 | // You should have received a copy of the MIT License
5 | // along with the HyperPlonk library. If not, see .
6 |
7 | //! This module implements the sum check protocol.
8 |
9 | use crate::espresso::virtual_polynomial::{VPAuxInfo, VirtualPolynomial};
10 | use ark_ff::PrimeField;
11 | use ark_poly::DenseMultilinearExtension;
12 | use ark_std::{end_timer, start_timer};
13 | use std::{fmt::Debug, sync::Arc};
14 |
15 | use structs::{IOPProof, IOPProverState, IOPVerifierState};
16 | use subroutines::poly_iop::{prelude::PolyIOPErrors, PolyIOP};
17 | use transcript::IOPTranscript;
18 |
19 | mod prover;
20 | pub mod structs;
21 | pub mod verifier;
22 |
23 | /// Trait for doing sum check protocols.
24 | pub trait SumCheck {
25 | type VirtualPolynomial;
26 | type VPAuxInfo;
27 | type MultilinearExtension;
28 |
29 | type SumCheckProof: Clone + Debug + Default + PartialEq;
30 | type Transcript;
31 | type SumCheckSubClaim: Clone + Debug + Default + PartialEq;
32 |
33 | /// Extract sum from the proof
34 | fn extract_sum(proof: &Self::SumCheckProof) -> F;
35 |
36 | /// Initialize the system with a transcript
37 | ///
38 | /// This function is optional -- in the case where a SumCheck is
39 | /// an building block for a more complex protocol, the transcript
40 | /// may be initialized by this complex protocol, and passed to the
41 | /// SumCheck prover/verifier.
42 | fn init_transcript() -> Self::Transcript;
43 |
44 | /// Generate proof of the sum of polynomial over {0,1}^`num_vars`
45 | ///
46 | /// The polynomial is represented in the form of a VirtualPolynomial.
47 | fn prove(
48 | poly: &Self::VirtualPolynomial,
49 | transcript: &mut Self::Transcript,
50 | ) -> Result;
51 |
52 | /// Verify the claimed sum using the proof
53 | fn verify(
54 | sum: F,
55 | proof: &Self::SumCheckProof,
56 | aux_info: &Self::VPAuxInfo,
57 | transcript: &mut Self::Transcript,
58 | ) -> Result;
59 | }
60 |
61 | /// Trait for sum check protocol prover side APIs.
62 | pub trait SumCheckProver
63 | where
64 | Self: Sized,
65 | {
66 | type VirtualPolynomial;
67 | type ProverMessage;
68 |
69 | /// Initialize the prover state to argue for the sum of the input polynomial
70 | /// over {0,1}^`num_vars`.
71 | fn prover_init(polynomial: &Self::VirtualPolynomial) -> Result;
72 |
73 | /// Receive message from verifier, generate prover message, and proceed to
74 | /// next round.
75 | ///
76 | /// Main algorithm used is from section 3.2 of [XZZPS19](https://eprint.iacr.org/2019/317.pdf#subsection.3.2).
77 | fn prove_round_and_update_state(
78 | &mut self,
79 | challenge: &Option,
80 | ) -> Result;
81 | }
82 |
83 | /// Trait for sum check protocol verifier side APIs.
84 | pub trait SumCheckVerifier {
85 | type VPAuxInfo;
86 | type ProverMessage;
87 | type Challenge;
88 | type Transcript;
89 | type SumCheckSubClaim;
90 |
91 | /// Initialize the verifier's state.
92 | fn verifier_init(index_info: &Self::VPAuxInfo) -> Self;
93 |
94 | /// Run verifier for the current round, given a prover message.
95 | ///
96 | /// Note that `verify_round_and_update_state` only samples and stores
97 | /// challenges; and update the verifier's state accordingly. The actual
98 | /// verifications are deferred (in batch) to `check_and_generate_subclaim`
99 | /// at the last step.
100 | fn verify_round_and_update_state(
101 | &mut self,
102 | prover_msg: &Self::ProverMessage,
103 | transcript: &mut Self::Transcript,
104 | ) -> Result;
105 |
106 | /// This function verifies the deferred checks in the interactive version of
107 | /// the protocol; and generate the subclaim. Returns an error if the
108 | /// proof failed to verify.
109 | ///
110 | /// If the asserted sum is correct, then the multilinear polynomial
111 | /// evaluated at `subclaim.point` will be `subclaim.expected_evaluation`.
112 | /// Otherwise, it is highly unlikely that those two will be equal.
113 | /// Larger field size guarantees smaller soundness error.
114 | fn check_and_generate_subclaim(
115 | &self,
116 | asserted_sum: &F,
117 | ) -> Result;
118 | }
119 |
120 | /// A SumCheckSubClaim is a claim generated by the verifier at the end of
121 | /// verification when it is convinced.
122 | #[derive(Clone, Debug, Default, PartialEq, Eq)]
123 | pub struct SumCheckSubClaim {
124 | /// the multi-dimensional point that this multilinear extension is evaluated
125 | /// to
126 | pub point: Vec,
127 | /// the expected evaluation
128 | pub expected_evaluation: F,
129 | }
130 |
131 | impl SumCheck for PolyIOP {
132 | type SumCheckProof = IOPProof;
133 | type VirtualPolynomial = VirtualPolynomial;
134 | type VPAuxInfo = VPAuxInfo;
135 | type MultilinearExtension = Arc>;
136 | type SumCheckSubClaim = SumCheckSubClaim;
137 | type Transcript = IOPTranscript;
138 |
139 | fn extract_sum(proof: &Self::SumCheckProof) -> F {
140 | let start = start_timer!(|| "extract sum");
141 | let res = proof.proofs[0].evaluations[0] + proof.proofs[0].evaluations[1];
142 | end_timer!(start);
143 | res
144 | }
145 |
146 | fn init_transcript() -> Self::Transcript {
147 | let start = start_timer!(|| "init transcript");
148 | let res = IOPTranscript::::new(b"Initializing SumCheck transcript");
149 | end_timer!(start);
150 | res
151 | }
152 |
153 | fn prove(
154 | poly: &Self::VirtualPolynomial,
155 | transcript: &mut Self::Transcript,
156 | ) -> Result {
157 | let start = start_timer!(|| "sum check prove");
158 |
159 | transcript.append_serializable_element(b"aux info", &poly.aux_info)?;
160 |
161 | let mut prover_state = IOPProverState::prover_init(poly)?;
162 | let mut challenge = None;
163 | let mut prover_msgs = Vec::with_capacity(poly.aux_info.num_variables);
164 | for _ in 0..poly.aux_info.num_variables {
165 | let prover_msg =
166 | IOPProverState::prove_round_and_update_state(&mut prover_state, &challenge)?;
167 | transcript.append_serializable_element(b"prover msg", &prover_msg)?;
168 | prover_msgs.push(prover_msg);
169 | challenge = Some(transcript.get_and_append_challenge(b"Internal round")?);
170 | }
171 | // pushing the last challenge point to the state
172 | if let Some(p) = challenge {
173 | prover_state.challenges.push(p)
174 | };
175 |
176 | end_timer!(start);
177 | Ok(IOPProof {
178 | point: prover_state.challenges,
179 | proofs: prover_msgs,
180 | })
181 | }
182 |
183 | fn verify(
184 | claimed_sum: F,
185 | proof: &Self::SumCheckProof,
186 | aux_info: &Self::VPAuxInfo,
187 | transcript: &mut Self::Transcript,
188 | ) -> Result {
189 | let start = start_timer!(|| "sum check verify");
190 |
191 | transcript.append_serializable_element(b"aux info", aux_info)?;
192 | let mut verifier_state = IOPVerifierState::verifier_init(aux_info);
193 | for i in 0..aux_info.num_variables {
194 | let prover_msg = proof.proofs.get(i).expect("proof is incomplete");
195 | transcript.append_serializable_element(b"prover msg", prover_msg)?;
196 | IOPVerifierState::verify_round_and_update_state(
197 | &mut verifier_state,
198 | prover_msg,
199 | transcript,
200 | )?;
201 | }
202 |
203 | let res = IOPVerifierState::check_and_generate_subclaim(&verifier_state, &claimed_sum);
204 |
205 | end_timer!(start);
206 | res
207 | }
208 | }
209 |
--------------------------------------------------------------------------------
/src/espresso/sum_check/prover.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2023 Espresso Systems (espressosys.com)
2 | // This file is part of the HyperPlonk library.
3 |
4 | // You should have received a copy of the MIT License
5 | // along with the HyperPlonk library. If not, see .
6 |
7 | //! Prover subroutines for a SumCheck protocol.
8 |
9 | use super::SumCheckProver;
10 | use crate::espresso::multilinear_polynomial::fix_variables;
11 | use crate::espresso::virtual_polynomial::VirtualPolynomial;
12 | use ark_ff::{batch_inversion, PrimeField};
13 | use ark_poly::DenseMultilinearExtension;
14 | use ark_std::{cfg_into_iter, end_timer, start_timer, vec::Vec};
15 | use rayon::prelude::{IntoParallelIterator, IntoParallelRefIterator};
16 | use std::sync::Arc;
17 |
18 | use super::structs::{IOPProverMessage, IOPProverState};
19 | use subroutines::poly_iop::prelude::PolyIOPErrors;
20 |
21 | // #[cfg(feature = "parallel")]
22 | use rayon::iter::{IntoParallelRefMutIterator, ParallelIterator};
23 |
24 | impl SumCheckProver for IOPProverState {
25 | type VirtualPolynomial = VirtualPolynomial;
26 | type ProverMessage = IOPProverMessage;
27 |
28 | /// Initialize the prover state to argue for the sum of the input polynomial
29 | /// over {0,1}^`num_vars`.
30 | fn prover_init(polynomial: &Self::VirtualPolynomial) -> Result {
31 | let start = start_timer!(|| "sum check prover init");
32 | if polynomial.aux_info.num_variables == 0 {
33 | return Err(PolyIOPErrors::InvalidParameters(
34 | "Attempt to prove a constant.".to_string(),
35 | ));
36 | }
37 | end_timer!(start);
38 |
39 | Ok(Self {
40 | challenges: Vec::with_capacity(polynomial.aux_info.num_variables),
41 | round: 0,
42 | poly: polynomial.clone(),
43 | extrapolation_aux: (1..polynomial.aux_info.max_degree)
44 | .map(|degree| {
45 | let points = (0..1 + degree as u64).map(F::from).collect::>();
46 | let weights = barycentric_weights(&points);
47 | (points, weights)
48 | })
49 | .collect(),
50 | })
51 | }
52 |
53 | /// Receive message from verifier, generate prover message, and proceed to
54 | /// next round.
55 | ///
56 | /// Main algorithm used is from section 3.2 of [XZZPS19](https://eprint.iacr.org/2019/317.pdf#subsection.3.2).
57 | fn prove_round_and_update_state(
58 | &mut self,
59 | challenge: &Option,
60 | ) -> Result {
61 | // let start =
62 | // start_timer!(|| format!("sum check prove {}-th round and update state",
63 | // self.round));
64 |
65 | if self.round >= self.poly.aux_info.num_variables {
66 | return Err(PolyIOPErrors::InvalidProver(
67 | "Prover is not active".to_string(),
68 | ));
69 | }
70 |
71 | // let fix_argument = start_timer!(|| "fix argument");
72 |
73 | // Step 1:
74 | // fix argument and evaluate f(x) over x_m = r; where r is the challenge
75 | // for the current round, and m is the round number, indexed from 1
76 | //
77 | // i.e.:
78 | // at round m <= n, for each mle g(x_1, ... x_n) within the flattened_mle
79 | // which has already been evaluated to
80 | //
81 | // g(r_1, ..., r_{m-1}, x_m ... x_n)
82 | //
83 | // eval g over r_m, and mutate g to g(r_1, ... r_m,, x_{m+1}... x_n)
84 | let mut flattened_ml_extensions: Vec> = self
85 | .poly
86 | .flattened_ml_extensions
87 | .par_iter()
88 | .map(|x| x.as_ref().clone())
89 | .collect();
90 |
91 | if let Some(chal) = challenge {
92 | if self.round == 0 {
93 | return Err(PolyIOPErrors::InvalidProver(
94 | "first round should be prover first.".to_string(),
95 | ));
96 | }
97 | self.challenges.push(*chal);
98 |
99 | let r = self.challenges[self.round - 1];
100 | // #[cfg(feature = "parallel")]
101 | flattened_ml_extensions
102 | .par_iter_mut()
103 | .for_each(|mle| *mle = fix_variables(mle, &[r]));
104 | // #[cfg(not(feature = "parallel"))]
105 | // flattened_ml_extensions
106 | // .iter_mut()
107 | // .for_each(|mle| *mle = fix_variables(mle, &[r]));
108 | } else if self.round > 0 {
109 | return Err(PolyIOPErrors::InvalidProver(
110 | "verifier message is empty".to_string(),
111 | ));
112 | }
113 | // end_timer!(fix_argument);
114 |
115 | self.round += 1;
116 |
117 | let products_list = self.poly.products.clone();
118 | let mut products_sum = vec![F::zero(); self.poly.aux_info.max_degree + 1];
119 |
120 | // Step 2: generate sum for the partial evaluated polynomial:
121 | // f(r_1, ... r_m,, x_{m+1}... x_n)
122 |
123 | products_list.iter().for_each(|(coefficient, products)| {
124 | let mut sum = cfg_into_iter!(0..1 << (self.poly.aux_info.num_variables - self.round))
125 | .fold(
126 | || {
127 | (
128 | vec![(F::zero(), F::zero()); products.len()],
129 | vec![F::zero(); products.len() + 1],
130 | )
131 | },
132 | |(mut buf, mut acc), b| {
133 | buf.iter_mut()
134 | .zip(products.iter())
135 | .for_each(|((eval, step), f)| {
136 | let table = &flattened_ml_extensions[*f];
137 | *eval = table[b << 1];
138 | *step = table[(b << 1) + 1] - table[b << 1];
139 | });
140 | acc[0] += buf.iter().map(|(eval, _)| eval).product::();
141 | acc[1..].iter_mut().for_each(|acc| {
142 | buf.iter_mut().for_each(|(eval, step)| *eval += step as &_);
143 | *acc += buf.iter().map(|(eval, _)| eval).product::();
144 | });
145 | (buf, acc)
146 | },
147 | )
148 | .map(|(_, partial)| partial)
149 | .reduce(
150 | || vec![F::zero(); products.len() + 1],
151 | |mut sum, partial| {
152 | sum.iter_mut()
153 | .zip(partial.iter())
154 | .for_each(|(sum, partial)| *sum += partial);
155 | sum
156 | },
157 | );
158 | sum.iter_mut().for_each(|sum| *sum *= coefficient);
159 | let extraploation = cfg_into_iter!(0..self.poly.aux_info.max_degree - products.len())
160 | .map(|i| {
161 | let (points, weights) = &self.extrapolation_aux[products.len() - 1];
162 | let at = F::from((products.len() + 1 + i) as u64);
163 | extrapolate(points, weights, &sum, &at)
164 | })
165 | .collect::>();
166 | products_sum
167 | .iter_mut()
168 | .zip(sum.iter().chain(extraploation.iter()))
169 | .for_each(|(products_sum, sum)| *products_sum += sum);
170 | });
171 |
172 | // update prover's state to the partial evaluated polynomial
173 | self.poly.flattened_ml_extensions = flattened_ml_extensions
174 | .par_iter()
175 | .map(|x| Arc::new(x.clone()))
176 | .collect();
177 |
178 | Ok(IOPProverMessage {
179 | evaluations: products_sum,
180 | })
181 | }
182 | }
183 |
184 | fn barycentric_weights(points: &[F]) -> Vec {
185 | let mut weights = points
186 | .iter()
187 | .enumerate()
188 | .map(|(j, point_j)| {
189 | points
190 | .iter()
191 | .enumerate()
192 | .filter_map(|(i, point_i)| (i != j).then(|| *point_j - point_i))
193 | .reduce(|acc, value| acc * value)
194 | .unwrap_or_else(F::one)
195 | })
196 | .collect::>();
197 | batch_inversion(&mut weights);
198 | weights
199 | }
200 |
201 | fn extrapolate(points: &[F], weights: &[F], evals: &[F], at: &F) -> F {
202 | let (coeffs, sum_inv) = {
203 | let mut coeffs = points.iter().map(|point| *at - point).collect::>();
204 | batch_inversion(&mut coeffs);
205 | coeffs.iter_mut().zip(weights).for_each(|(coeff, weight)| {
206 | *coeff *= weight;
207 | });
208 | let sum_inv = coeffs.iter().sum::().inverse().unwrap_or_default();
209 | (coeffs, sum_inv)
210 | };
211 | coeffs
212 | .iter()
213 | .zip(evals)
214 | .map(|(coeff, eval)| *coeff * eval)
215 | .sum::()
216 | * sum_inv
217 | }
218 |
--------------------------------------------------------------------------------
/src/ccs/cccs.rs:
--------------------------------------------------------------------------------
1 | use ark_ec::CurveGroup;
2 | use ark_ff::PrimeField;
3 | use ark_std::One;
4 | use ark_std::Zero;
5 | use std::ops::Add;
6 | use std::sync::Arc;
7 |
8 | use ark_std::{rand::Rng, UniformRand};
9 |
10 | use crate::ccs::ccs::{CCSError, CCS};
11 | use crate::ccs::util::compute_sum_Mz;
12 |
13 | use crate::ccs::pedersen::{Commitment, Params as PedersenParams, Pedersen};
14 | use crate::espresso::virtual_polynomial::VirtualPolynomial;
15 | use crate::util::hypercube::BooleanHypercube;
16 | use crate::util::mle::matrix_to_mle;
17 | use crate::util::mle::vec_to_mle;
18 |
19 | /// Witness for the LCCCS & CCCS, containing the w vector, and the r_w used as randomness in the Pedersen commitment.
20 | #[derive(Debug, Clone)]
21 | pub struct Witness {
22 | pub w: Vec,
23 | pub r_w: F, // randomness used in the Pedersen commitment of w
24 | }
25 |
26 | /// Committed CCS instance
27 | #[derive(Debug, Clone)]
28 | pub struct CCCS {
29 | // Underlying CCS structure
30 | pub ccs: CCS,
31 |
32 | // Commitment to witness
33 | pub C: Commitment,
34 | // Public input/output
35 | pub x: Vec,
36 | }
37 |
38 | impl CCS {
39 | pub fn to_cccs(
40 | &self,
41 | rng: &mut R,
42 | pedersen_params: &PedersenParams,
43 | z: &[C::ScalarField],
44 | ) -> (CCCS, Witness) {
45 | let w: Vec = z[(1 + self.l)..].to_vec();
46 | let r_w = C::ScalarField::rand(rng);
47 | let C = Pedersen::::commit(pedersen_params, &w, &r_w);
48 |
49 | (
50 | CCCS:: {
51 | ccs: self.clone(),
52 | C,
53 | x: z[1..(1 + self.l)].to_vec(),
54 | },
55 | Witness:: { w, r_w },
56 | )
57 | }
58 | }
59 |
60 | impl CCCS {
61 | /// Computes q(x) = \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) )
62 | /// polynomial over x
63 | pub fn compute_q(&self, z: &Vec) -> VirtualPolynomial {
64 | let z_mle = vec_to_mle(self.ccs.s_prime, z);
65 | let mut q = VirtualPolynomial::::new(self.ccs.s);
66 |
67 | for i in 0..self.ccs.q {
68 | let mut prod: VirtualPolynomial =
69 | VirtualPolynomial::::new(self.ccs.s);
70 | for j in self.ccs.S[i].clone() {
71 | let M_j = matrix_to_mle(self.ccs.M[j].clone());
72 |
73 | let sum_Mz = compute_sum_Mz(M_j, &z_mle, self.ccs.s_prime);
74 |
75 | // Fold this sum into the running product
76 | if prod.products.is_empty() {
77 | // If this is the first time we are adding something to this virtual polynomial, we need to
78 | // explicitly add the products using add_mle_list()
79 | // XXX is this true? improve API
80 | prod.add_mle_list([Arc::new(sum_Mz)], C::ScalarField::one())
81 | .unwrap();
82 | } else {
83 | prod.mul_by_mle(Arc::new(sum_Mz), C::ScalarField::one())
84 | .unwrap();
85 | }
86 | }
87 | // Multiply by the product by the coefficient c_i
88 | prod.scalar_mul(&self.ccs.c[i]);
89 | // Add it to the running sum
90 | q = q.add(&prod);
91 | }
92 | q
93 | }
94 |
95 | /// Computes Q(x) = eq(beta, x) * q(x)
96 | /// = eq(beta, x) * \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) )
97 | /// polynomial over x
98 | pub fn compute_Q(
99 | &self,
100 | z: &Vec,
101 | beta: &[C::ScalarField],
102 | ) -> VirtualPolynomial {
103 | let q = self.compute_q(z);
104 | q.build_f_hat(beta).unwrap()
105 | }
106 |
107 | /// Perform the check of the CCCS instance described at section 4.1
108 | pub fn check_relation(
109 | &self,
110 | pedersen_params: &PedersenParams,
111 | w: &Witness,
112 | ) -> Result<(), CCSError> {
113 | // check that C is the commitment of w. Notice that this is not verifying a Pedersen
114 | // opening, but checking that the Commmitment comes from committing to the witness.
115 | assert_eq!(self.C.0, Pedersen::commit(pedersen_params, &w.w, &w.r_w).0);
116 |
117 | // check CCCS relation
118 | let z: Vec =
119 | [vec![C::ScalarField::one()], self.x.clone(), w.w.to_vec()].concat();
120 |
121 | // A CCCS relation is satisfied if the q(x) multivariate polynomial evaluates to zero in the hypercube
122 | let q_x = self.compute_q(&z);
123 | for x in BooleanHypercube::new(self.ccs.s) {
124 | if !q_x.evaluate(&x).unwrap().is_zero() {
125 | return Err(CCSError::NotSatisfied);
126 | }
127 | }
128 |
129 | Ok(())
130 | }
131 | }
132 |
133 | #[cfg(test)]
134 | pub mod test {
135 | use super::*;
136 | use crate::ccs::ccs::test::{get_test_ccs, get_test_z};
137 | use ark_std::test_rng;
138 | use ark_std::UniformRand;
139 |
140 | use ark_bls12_381::{Fr, G1Projective};
141 |
142 | /// Do some sanity checks on q(x). It's a multivariable polynomial and it should evaluate to zero inside the
143 | /// hypercube, but to not-zero outside the hypercube.
144 | #[test]
145 | fn test_compute_q() -> () {
146 | let mut rng = test_rng();
147 |
148 | let ccs = get_test_ccs::();
149 | let z = get_test_z(3);
150 |
151 | let pedersen_params = Pedersen::::new_params(&mut rng, ccs.n - ccs.l - 1);
152 | let (cccs, _) = ccs.to_cccs(&mut rng, &pedersen_params, &z);
153 | let q = cccs.compute_q(&z);
154 |
155 | // Evaluate inside the hypercube
156 | for x in BooleanHypercube::new(ccs.s).into_iter() {
157 | assert_eq!(Fr::zero(), q.evaluate(&x).unwrap());
158 | }
159 |
160 | // Evaluate outside the hypercube
161 | let beta: Vec = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
162 | assert_ne!(Fr::zero(), q.evaluate(&beta).unwrap());
163 | }
164 |
165 | /// Perform some sanity checks on Q(x).
166 | #[test]
167 | fn test_compute_Q() -> () {
168 | let mut rng = test_rng();
169 |
170 | let ccs = get_test_ccs();
171 | let z = get_test_z(3);
172 | ccs.check_relation(&z).unwrap();
173 |
174 | let pedersen_params = Pedersen::::new_params(&mut rng, ccs.n - ccs.l - 1);
175 | let (cccs, _) = ccs.to_cccs(&mut rng, &pedersen_params, &z);
176 |
177 | let beta: Vec = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
178 |
179 | // Compute Q(x) = eq(beta, x) * q(x).
180 | let Q = cccs.compute_Q(&z, &beta);
181 |
182 | // Let's consider the multilinear polynomial G(x) = \sum_{y \in {0, 1}^s} eq(x, y) q(y)
183 | // which interpolates the multivariate polynomial q(x) inside the hypercube.
184 | //
185 | // Observe that summing Q(x) inside the hypercube, directly computes G(\beta).
186 | //
187 | // Now, G(x) is multilinear and agrees with q(x) inside the hypercube. Since q(x) vanishes inside the
188 | // hypercube, this means that G(x) also vanishes in the hypercube. Since G(x) is multilinear and vanishes
189 | // inside the hypercube, this makes it the zero polynomial.
190 | //
191 | // Hence, evaluating G(x) at a random beta should give zero.
192 |
193 | // Now sum Q(x) evaluations in the hypercube and expect it to be 0
194 | let r = BooleanHypercube::new(ccs.s)
195 | .into_iter()
196 | .map(|x| Q.evaluate(&x).unwrap())
197 | .fold(Fr::zero(), |acc, result| acc + result);
198 | assert_eq!(r, Fr::zero());
199 | }
200 |
201 | /// The polynomial G(x) (see above) interpolates q(x) inside the hypercube.
202 | /// Summing Q(x) over the hypercube is equivalent to evaluating G(x) at some point.
203 | /// This test makes sure that G(x) agrees with q(x) inside the hypercube, but not outside
204 | #[test]
205 | fn test_Q_against_q() -> () {
206 | let mut rng = test_rng();
207 |
208 | let ccs = get_test_ccs();
209 | let z = get_test_z(3);
210 | ccs.check_relation(&z).unwrap();
211 |
212 | let pedersen_params = Pedersen::::new_params(&mut rng, ccs.n - ccs.l - 1);
213 | let (cccs, _) = ccs.to_cccs(&mut rng, &pedersen_params, &z);
214 |
215 | // Now test that if we create Q(x) with eq(d,y) where d is inside the hypercube, \sum Q(x) should be G(d) which
216 | // should be equal to q(d), since G(x) interpolates q(x) inside the hypercube
217 | let q = cccs.compute_q(&z);
218 | for d in BooleanHypercube::new(ccs.s) {
219 | let Q_at_d = cccs.compute_Q(&z, &d);
220 |
221 | // Get G(d) by summing over Q_d(x) over the hypercube
222 | let G_at_d = BooleanHypercube::new(ccs.s)
223 | .into_iter()
224 | .map(|x| Q_at_d.evaluate(&x).unwrap())
225 | .fold(Fr::zero(), |acc, result| acc + result);
226 | assert_eq!(G_at_d, q.evaluate(&d).unwrap());
227 | }
228 |
229 | // Now test that they should disagree outside of the hypercube
230 | let r: Vec = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
231 | let Q_at_r = cccs.compute_Q(&z, &r);
232 |
233 | // Get G(d) by summing over Q_d(x) over the hypercube
234 | let G_at_r = BooleanHypercube::new(ccs.s)
235 | .into_iter()
236 | .map(|x| Q_at_r.evaluate(&x).unwrap())
237 | .fold(Fr::zero(), |acc, result| acc + result);
238 | assert_ne!(G_at_r, q.evaluate(&r).unwrap());
239 | }
240 | }
241 |
--------------------------------------------------------------------------------
/src/espresso/sum_check/verifier.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2023 Espresso Systems (espressosys.com)
2 | // This file is part of the HyperPlonk library.
3 |
4 | // You should have received a copy of the MIT License
5 | // along with the HyperPlonk library. If not, see