├── .gitignore ├── src ├── aggregation │ ├── mod.rs │ ├── merlin.rs │ ├── single.rs │ └── multiple.rs ├── utils │ ├── mod.rs │ ├── ec.rs │ └── poly.rs ├── pcs │ ├── kzg │ │ ├── commitment.rs │ │ ├── lagrange.rs │ │ ├── urs.rs │ │ ├── params.rs │ │ └── mod.rs │ ├── id │ │ └── mod.rs │ └── mod.rs ├── shplonk.rs ├── fflonk.rs └── lib.rs ├── LICENSE-MIT ├── Cargo.toml ├── .github └── workflows │ └── rust.yml ├── README.md ├── benches ├── primitives.rs └── multiexps.rs ├── tests └── plonk │ ├── main.rs │ ├── fflonky.rs │ ├── README.md │ └── batchy.rs └── LICENSE-APACHE /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | Cargo.lock 3 | /.idea -------------------------------------------------------------------------------- /src/aggregation/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod merlin; 2 | pub mod multiple; 3 | pub mod single; 4 | -------------------------------------------------------------------------------- /src/utils/mod.rs: -------------------------------------------------------------------------------- 1 | use ark_ec::pairing::Pairing; 2 | use ark_ff::Field; 3 | 4 | pub mod ec; 5 | pub mod poly; 6 | 7 | pub fn powers(base: F) -> impl Iterator { 8 | ark_std::iter::successors(Some(F::one()), move |power| Some(base * power)) 9 | } 10 | 11 | pub fn curve_name() -> &'static str { 12 | // ark_ec::models::bw6::BW6 13 | let full_name = ark_std::any::type_name::(); 14 | full_name 15 | .split_once("<") 16 | .unwrap() 17 | .1 18 | .split_once(":") 19 | .unwrap() 20 | .0 21 | } 22 | -------------------------------------------------------------------------------- /src/aggregation/merlin.rs: -------------------------------------------------------------------------------- 1 | use ark_ff::PrimeField; 2 | use ark_serialize::{CanonicalSerialize, Compress}; 3 | use ark_std::vec; 4 | 5 | use crate::aggregation::multiple::Transcript; 6 | use crate::pcs::PCS; 7 | 8 | impl> Transcript for merlin::Transcript { 9 | fn get_gamma(&mut self) -> F { 10 | let mut buf = [0u8; 16]; 11 | self.challenge_bytes(b"gamma", &mut buf); 12 | F::from_random_bytes(&buf).unwrap() 13 | } 14 | 15 | fn commit_to_q(&mut self, q: &CS::C) { 16 | let mut buf = vec![0; q.serialized_size(Compress::No)]; 17 | q.serialize_uncompressed(&mut buf).unwrap(); 18 | self.append_message(b"q", &buf); 19 | } 20 | 21 | fn get_zeta(&mut self) -> F { 22 | let mut buf = [0u8; 16]; 23 | self.challenge_bytes(b"zeta", &mut buf); 24 | F::from_random_bytes(&buf).unwrap() 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Web3 Foundation 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "w3f-pcs" 3 | version = "0.0.3" 4 | edition = "2021" 5 | authors = ["Sergey Vasilyev "] 6 | license = "MIT/Apache-2.0" 7 | description = "Collection of tools for augmenting polynomial commitment schemes" 8 | keywords = ["cryptography", "pcs", "kzg"] 9 | repository = "https://github.com/w3f/fflonk" 10 | 11 | [dependencies] 12 | ark-std = { version = "0.5", default-features = false } 13 | ark-ff = { version = "0.5", default-features = false } 14 | ark-ec = { version = "0.5", default-features = false } 15 | ark-poly = { version = "0.5", default-features = false } 16 | ark-serialize = { version = "0.5", default-features = false, features = ["derive"] } 17 | 18 | rayon = { version = "1.0", optional = true } 19 | merlin = { version = "3.0", default-features = false } 20 | 21 | [dev-dependencies] 22 | criterion = "0.5" 23 | ark-bw6-761 = { version = "0.5", default-features = false } 24 | ark-bls12-381 = { version = "0.5", default-features = false, features = ["curve"] } 25 | 26 | [[bench]] 27 | name = "primitives" 28 | harness = false 29 | 30 | [[bench]] 31 | name = "multiexps" 32 | harness = false 33 | 34 | [[test]] 35 | name = "plonk" 36 | harness = true 37 | 38 | [features] 39 | default = [] 40 | asm = ["ark-ff/asm"] 41 | std = ["ark-std/std", "ark-ff/std", "ark-ec/std", "ark-poly/std", "ark-serialize/std", "merlin/std"] 42 | parallel = ["std", "rayon", "ark-std/parallel", "ark-ff/parallel", "ark-ec/parallel", "ark-poly/parallel"] 43 | print-trace = ["ark-std/print-trace"] 44 | -------------------------------------------------------------------------------- /src/pcs/kzg/commitment.rs: -------------------------------------------------------------------------------- 1 | use ark_ec::pairing::Pairing; 2 | use ark_ec::{AffineRepr, CurveGroup}; 3 | use ark_serialize::*; 4 | use ark_std::iter::Sum; 5 | use ark_std::ops::{Add, Mul, Sub}; 6 | use ark_std::vec::Vec; 7 | 8 | use crate::pcs::Commitment; 9 | use crate::utils::ec::small_multiexp_affine; 10 | 11 | /// KZG commitment to G1 represented in affine coordinates. 12 | #[derive(Clone, Debug, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize)] 13 | pub struct KzgCommitment(pub E::G1Affine); 14 | 15 | impl Mul for KzgCommitment { 16 | type Output = Self; 17 | 18 | fn mul(self, by: E::ScalarField) -> Self { 19 | (&self).mul(by) 20 | } 21 | } 22 | 23 | impl Commitment for KzgCommitment { 24 | fn mul(&self, by: E::ScalarField) -> KzgCommitment { 25 | KzgCommitment(self.0.mul(by).into()) 26 | } 27 | 28 | fn combine(coeffs: &[::ScalarField], commitments: &[Self]) -> Self { 29 | let bases = commitments.iter().map(|c| c.0).collect::>(); 30 | let prod = small_multiexp_affine(coeffs, &bases); 31 | KzgCommitment(prod.into()) 32 | } 33 | } 34 | 35 | impl Add for KzgCommitment { 36 | type Output = KzgCommitment; 37 | 38 | fn add(self, other: KzgCommitment) -> KzgCommitment { 39 | KzgCommitment((self.0 + other.0).into_affine()) 40 | } 41 | } 42 | 43 | impl Sub for KzgCommitment { 44 | type Output = KzgCommitment; 45 | 46 | fn sub(self, other: KzgCommitment) -> KzgCommitment { 47 | KzgCommitment((self.0 + -other.0.into_group()).into_affine()) 48 | } 49 | } 50 | 51 | impl Sum for KzgCommitment { 52 | fn sum>(iter: I) -> KzgCommitment { 53 | KzgCommitment(iter.map(|c| c.0.into_group()).sum::().into_affine()) 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | # Run CI on push only for 'main' branch 5 | push: 6 | branches: [main] 7 | # Run CI on pull request for all branches 8 | pull_request: 9 | branches: ["**"] 10 | 11 | concurrency: 12 | group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} 13 | cancel-in-progress: true 14 | 15 | env: 16 | CARGO_TERM_COLOR: always 17 | RUSTFLAGS: '-D warnings' 18 | RUST_BACKTRACE: 1 19 | 20 | jobs: 21 | format: 22 | runs-on: ubuntu-latest 23 | timeout-minutes: 5 24 | steps: 25 | - uses: actions/checkout@v3 26 | - name: Install toolchain 27 | uses: actions-rs/toolchain@v1 28 | with: 29 | profile: minimal 30 | toolchain: stable 31 | components: rustfmt 32 | - name: Format 33 | run: cargo fmt --all --check 34 | 35 | build: 36 | runs-on: ubuntu-latest 37 | timeout-minutes: 5 38 | steps: 39 | - uses: actions/checkout@v3 40 | - name: Install toolchain 41 | uses: actions-rs/toolchain@v1 42 | with: 43 | profile: minimal 44 | toolchain: stable 45 | - name: Build 46 | run: cargo build --verbose 47 | 48 | build-wasm32: 49 | runs-on: ubuntu-latest 50 | timeout-minutes: 5 51 | steps: 52 | - uses: actions/checkout@v3 53 | - name: Install toolchain 54 | uses: actions-rs/toolchain@v1 55 | with: 56 | profile: minimal 57 | toolchain: stable 58 | target: wasm32-unknown-unknown 59 | - name: Build 60 | run: cargo build --verbose --no-default-features --target wasm32-unknown-unknown 61 | 62 | test: 63 | runs-on: ubuntu-latest 64 | timeout-minutes: 5 65 | steps: 66 | - uses: actions/checkout@v3 67 | - name: Install toolchain 68 | uses: actions-rs/toolchain@v1 69 | with: 70 | profile: minimal 71 | toolchain: stable 72 | - name: Run tests 73 | run: cargo test --release 74 | 75 | -------------------------------------------------------------------------------- /src/utils/ec.rs: -------------------------------------------------------------------------------- 1 | use ark_ec::{AdditiveGroup, AffineRepr, CurveGroup}; 2 | use ark_ff::{BigInteger, PrimeField, Zero}; 3 | use ark_std::vec::Vec; 4 | 5 | pub fn naive_multiexp_affine(coeffs: &[G::ScalarField], bases: &[G]) -> G::Group { 6 | bases 7 | .iter() 8 | .zip(coeffs.iter()) 9 | .map(|(b, &c)| b.mul(c)) 10 | .sum() 11 | } 12 | 13 | /// Performs a small multi-exponentiation operation. 14 | /// Uses the double-and-add algorithm with doublings shared across points. 15 | // adopted from https://github.com/zcash/halo2/pull/20 16 | pub fn small_multiexp_affine(coeffs: &[G::ScalarField], bases: &[G]) -> G::Group { 17 | let bytes_in_repr = ::BigInt::NUM_LIMBS * 8; 18 | let coeffs: Vec<_> = coeffs 19 | .iter() 20 | .map(|c| c.into_bigint().to_bytes_le()) 21 | .collect(); 22 | 23 | let mut acc = G::Group::zero(); 24 | 25 | // for byte idx 26 | for byte_idx in (0..bytes_in_repr).rev() { 27 | // for bit idx 28 | for bit_idx in (0..8).rev() { 29 | acc.double_in_place(); 30 | // for each coeff 31 | for coeff_idx in 0..coeffs.len() { 32 | let byte = coeffs[coeff_idx][byte_idx]; 33 | if ((byte >> bit_idx) & 1) != 0 { 34 | acc += &bases[coeff_idx]; 35 | } 36 | } 37 | } 38 | } 39 | 40 | acc 41 | } 42 | 43 | pub fn small_multiexp_proj(coeffs: &[G::ScalarField], bases: &[G]) -> G { 44 | let bases = G::normalize_batch(bases); 45 | small_multiexp_affine(coeffs, &bases) 46 | } 47 | 48 | pub fn _small_multiexp_proj_2(coeffs: &[G::ScalarField], bases: &[G]) -> G { 49 | let bytes_in_repr = ::BigInt::NUM_LIMBS * 8; 50 | let coeffs: Vec<_> = coeffs 51 | .iter() 52 | .map(|c| c.into_bigint().to_bytes_le()) 53 | .collect(); 54 | 55 | let mut acc = G::zero(); 56 | 57 | // for byte idx 58 | for byte_idx in (0..bytes_in_repr).rev() { 59 | // for bit idx 60 | for bit_idx in (0..8).rev() { 61 | acc.double_in_place(); 62 | // for each coeff 63 | for coeff_idx in 0..coeffs.len() { 64 | let byte = coeffs[coeff_idx][byte_idx]; 65 | if ((byte >> bit_idx) & 1) != 0 { 66 | acc += bases[coeff_idx]; 67 | } 68 | } 69 | } 70 | } 71 | 72 | acc 73 | } 74 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ### Not a single line of code in this repo has been reviewed (let alone audited). Use on your own risk. 2 | 3 | This repo aims to build a collection of tools for augmenting polynomial commitment schemes (PCS, from now on). 4 | 5 | ## Shplonk 6 | [Shplonk](https://eprint.iacr.org/2020/081.pdf), scheme #2 (aka private aggregation scheme from [Halo Infinite](https://eprint.iacr.org/2020/1536.pdf), section 4) accumulates `k` opening tuples `(Cf, z, v)`, each claiming that `f(z) = v, commit(f) = Cf` for a univariate polynomial `f` and points `z, v`, into a single one `(C', z', v')`, such that a proof for the aggregate claim attests that all other `k` claims are valid. It allows to compile a simple PCS, capable of proving single evaluation of a single polynomial at a time, into a PCS opening multiple polynomials each in different sets of points with a single proof. The overhead limits to producing the valid aggregate claim, that is generating one commitment by the prover, linear combination of `k+2` commitments by the verifier, `O(klog(k))` field operations for both, and one commitment of communication. 7 | 8 | Notice that this type of aggregation is different from [aggregation for vector commitments](https://eprint.iacr.org/2020/527.pdf) in that the proof for the aggregated claim is produced from scratch, while in the latter case the aggregate proof is computed from the proofs for the individual claims. At least, it is not implemented. 9 | 10 | ## FFlonk 11 | 12 | [FFlonk](https://eprint.iacr.org/2021/1167.pdf) from `n` polynomials of degrees not exceeding `d` constructs a polynomial of degree less that `nd`, such that evaluating each of the individual polynomials in the same `k` points is equivalent to evaluating the combined polynomial in `nk` points. In combination with a PCS enjoying efficient multipoint openings, asymptotically reduces the number of commitments to transfer and improves verifier performance. 13 | 14 | ### What is implemented 15 | 1. Traits for a minimal PCS (far from being perfect). 16 | 2. Simplest form of KZG implementing these traits. 17 | 3. Halo Inifinite private aggregation (aka Schplonk scheme #2) generic over the traits. 18 | 4. Fflonk routines: combining polynomials, converting evaluations. 19 | 5. Fflonk PCS from [the original paper](https://eprint.iacr.org/2021/1167.pdf): opens Ffflonk-combined polynomials with Shplonk-compiled KZG. 20 | 6. [A test](/tests/plonk) comparing opening a very vanilla (not zk, arithmetic gate only, no polynomial splitting) Plonk polynomial assignment using 21 | * Batching verification of KZG proofs in different points and linearization, as described in the original [P lonK](plonk) 22 | * The scheme described in [ff lonK](https://eprint.iacr.org/2021/1167.pdf), Section 7. 23 | 24 | #### KZG benchmarks 25 | ``` 26 | cargo test bench_minimal_kzg --release --features "parallel print-trace" -- --nocapture --ignored 27 | ``` 28 | outputs timings for generating a setup, committing to a 2^16-degree polynomial, proving and verifying an opening in a single point. 29 | -------------------------------------------------------------------------------- /benches/primitives.rs: -------------------------------------------------------------------------------- 1 | use std::ops::Mul; 2 | 3 | use ark_bw6_761::BW6_761; 4 | use ark_ec::pairing::Pairing; 5 | use ark_ec::{AdditiveGroup, AffineRepr, CurveGroup}; 6 | use ark_ff::UniformRand; 7 | use ark_std::test_rng; 8 | use criterion::{criterion_group, criterion_main, Criterion}; 9 | 10 | use w3f_pcs::utils::curve_name; 11 | 12 | fn scalar_mul(c: &mut Criterion) { 13 | let mut group = c.benchmark_group(format!("{}/scalar-mul", curve_name::())); 14 | 15 | let rng = &mut test_rng(); 16 | let n = 100; 17 | 18 | let mut exps = vec![]; 19 | exps.resize_with(n, || E::ScalarField::rand(rng)); 20 | // the timing depends on the exponent 21 | let bases_projective = vec![E::G1::rand(rng); n]; 22 | let bases_affine = vec![E::G1Affine::rand(rng); n]; 23 | 24 | let _res: E::G1 = bases_affine[0].mul(exps[0]); // result of affine mul is projective 25 | 26 | let mut i = 0; 27 | group.bench_function("proj", |b| { 28 | b.iter_with_setup( 29 | || { 30 | let pair = (bases_projective[i], exps[i]); 31 | i = (i + 1) % n; 32 | pair 33 | }, 34 | |(base, exp)| base.mul(exp), 35 | ) 36 | }); 37 | 38 | let mut i = 0; 39 | group.bench_function("aff", |b| { 40 | b.iter_with_setup( 41 | || { 42 | let pair = (bases_affine[i], exps[i]); 43 | i = (i + 1) % n; 44 | pair 45 | }, 46 | |(base, exp)| base.mul(exp), 47 | ) 48 | }); 49 | 50 | group.finish(); 51 | } 52 | 53 | fn coordinates_conversion(c: &mut Criterion) { 54 | let mut group = c.benchmark_group(format!("{}/into", curve_name::())); 55 | let rng = &mut test_rng(); 56 | let projective = E::G1::rand(rng); 57 | let affine = E::G1Affine::rand(rng); 58 | group.bench_function("affine", |b| b.iter(|| projective.into_affine())); 59 | group.bench_function("projective", |b| b.iter(|| affine.into_group())); 60 | group.finish(); 61 | } 62 | 63 | fn additions(c: &mut Criterion) { 64 | let mut group = c.benchmark_group(format!("{}/addition", curve_name::())); 65 | let rng = &mut test_rng(); 66 | let a_projective = E::G1::rand(rng); 67 | let b_projective = E::G1::rand(rng); 68 | let a_affine = E::G1Affine::rand(rng); 69 | let b_affine = E::G1Affine::rand(rng); 70 | group.bench_function("projective", |b| b.iter(|| a_projective + b_projective)); 71 | group.bench_function("affine", |b| b.iter(|| a_affine + b_affine)); 72 | group.bench_function("mixed", |b| b.iter(|| a_projective + &b_affine)); 73 | group.bench_function("doubling", |b| b.iter(|| a_projective.double())); 74 | // group.bench_function("doubling", |b| b.iter(|| CurveGroup::double_in_place(&mut b_projective))); 75 | group.finish(); 76 | } 77 | 78 | criterion_group!( 79 | benches, 80 | scalar_mul::, 81 | coordinates_conversion::, 82 | additions:: 83 | ); 84 | criterion_main!(benches); 85 | -------------------------------------------------------------------------------- /src/pcs/id/mod.rs: -------------------------------------------------------------------------------- 1 | use ark_poly::Polynomial; 2 | use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; 3 | use ark_std::vec::Vec; 4 | 5 | use crate::pcs::*; 6 | use crate::utils::poly; 7 | use crate::Poly; 8 | 9 | #[derive(Clone, PartialEq, Eq, Debug, CanonicalSerialize, CanonicalDeserialize)] 10 | pub struct WrappedPolynomial(pub Poly); 11 | 12 | impl WrappedPolynomial { 13 | fn evaluate(&self, x: &F) -> F { 14 | self.0.evaluate(x) 15 | } 16 | } 17 | 18 | impl Add for WrappedPolynomial { 19 | type Output = WrappedPolynomial; 20 | 21 | fn add(self, other: WrappedPolynomial) -> Self::Output { 22 | WrappedPolynomial(self.0 + other.0) 23 | } 24 | } 25 | 26 | impl Sub for WrappedPolynomial { 27 | type Output = WrappedPolynomial; 28 | 29 | fn sub(self, other: WrappedPolynomial) -> Self::Output { 30 | WrappedPolynomial(self.0 - other.0) 31 | } 32 | } 33 | 34 | impl Sum for WrappedPolynomial { 35 | fn sum>(iter: I) -> Self { 36 | iter.reduce(|a, b| a + b).unwrap() 37 | } 38 | } 39 | 40 | impl Mul for WrappedPolynomial { 41 | type Output = WrappedPolynomial; 42 | 43 | fn mul(self, by: F) -> Self { 44 | (&self).mul(by) 45 | } 46 | } 47 | 48 | impl Commitment for WrappedPolynomial { 49 | fn mul(&self, by: F) -> Self { 50 | WrappedPolynomial(&self.0 * by) 51 | } 52 | 53 | fn combine(coeffs: &[F], commitments: &[Self]) -> Self { 54 | let polys = commitments 55 | .to_vec() 56 | .into_iter() 57 | .map(|c| c.0) 58 | .collect::>(); 59 | let combined = poly::sum_with_coeffs(coeffs.to_vec(), &polys); 60 | WrappedPolynomial(combined) 61 | } 62 | } 63 | 64 | impl CommitterKey for () { 65 | fn max_degree(&self) -> usize { 66 | usize::MAX >> 1 67 | } 68 | } 69 | 70 | impl VerifierKey for () { 71 | fn max_points(&self) -> usize { 72 | 1 73 | } 74 | } 75 | 76 | impl RawVerifierKey for () { 77 | type VK = (); 78 | 79 | fn prepare(&self) -> () { 80 | () 81 | } 82 | } 83 | 84 | impl PcsParams for () { 85 | type CK = (); 86 | type VK = (); 87 | type RVK = (); 88 | 89 | fn ck(&self) -> () { 90 | () 91 | } 92 | 93 | fn vk(&self) -> () { 94 | () 95 | } 96 | 97 | fn raw_vk(&self) -> () { 98 | () 99 | } 100 | } 101 | 102 | #[derive(Clone)] 103 | pub struct IdentityCommitment {} 104 | 105 | impl PCS for IdentityCommitment { 106 | type C = WrappedPolynomial; 107 | type Proof = (); 108 | type CK = (); 109 | type VK = (); 110 | type Params = (); 111 | 112 | fn setup(_max_degree: usize, _rng: &mut R) -> Self::Params { 113 | () 114 | } 115 | 116 | fn commit(_ck: &(), p: &Poly) -> Result { 117 | Ok(WrappedPolynomial(p.clone())) 118 | } 119 | 120 | fn open(_ck: &(), _p: &Poly, _x: F) -> Result { 121 | Ok(()) 122 | } 123 | 124 | fn verify(_vk: &(), c: Self::C, x: F, z: F, _proof: Self::Proof) -> Result<(), ()> { 125 | (c.evaluate(&x) == z).then(|| ()).ok_or(()) 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /src/pcs/kzg/lagrange.rs: -------------------------------------------------------------------------------- 1 | use ark_ec::{AffineRepr, CurveGroup, ScalarMul}; 2 | use ark_ff::Zero; 3 | use ark_poly::{EvaluationDomain, GeneralEvaluationDomain}; 4 | use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; 5 | use ark_std::rand::RngCore; 6 | use ark_std::vec::Vec; 7 | use ark_std::UniformRand; 8 | 9 | use crate::pcs::kzg::params::MonomialCK; 10 | use crate::pcs::CommitterKey; 11 | 12 | /// Used to commit to univariate polynomials represented in the evaluation form. 13 | #[derive(Clone, Debug, CanonicalSerialize, CanonicalDeserialize)] 14 | pub struct LagrangianCK< 15 | G: AffineRepr, 16 | D: EvaluationDomain = GeneralEvaluationDomain<::ScalarField>, 17 | > { 18 | // L_0(tau).G, L_1(tau).G, ..., L_{n-1}(tau).G 19 | pub lis_in_g: Vec, 20 | pub domain: D, 21 | } 22 | 23 | impl CommitterKey for LagrangianCK { 24 | fn max_degree(&self) -> usize { 25 | self.lis_in_g.len() - 1 26 | } 27 | } 28 | 29 | impl> LagrangianCK { 30 | pub fn generate(domain: D, rng: &mut R) -> Self { 31 | let tau = G::ScalarField::rand(rng); 32 | let g = G::Group::rand(rng); 33 | Self::from_trapdoor(domain, tau, g) 34 | } 35 | 36 | pub fn from_trapdoor(domain: D, tau: G::ScalarField, g: G::Group) -> Self { 37 | assert!(!domain.evaluate_vanishing_polynomial(tau).is_zero()); // doesn't give a basis 38 | let lis_at_tau = domain.evaluate_all_lagrange_coefficients(tau); // L_i(tau) 39 | let lis_in_g = g.batch_mul(&lis_at_tau); // L_i(tau).G 40 | Self { lis_in_g, domain } 41 | } 42 | } 43 | 44 | impl MonomialCK { 45 | pub fn to_lagrangian>( 46 | &self, 47 | domain: D, 48 | ) -> LagrangianCK { 49 | assert!(self.max_evals() >= domain.size()); 50 | let mut monomial_bases = self 51 | .powers_in_g1 52 | .iter() 53 | .take(domain.size()) 54 | .map(|p| p.into_group()) 55 | .collect(); 56 | 57 | let lagrangian_bases = { 58 | domain.ifft_in_place(&mut monomial_bases); 59 | monomial_bases 60 | }; 61 | 62 | let lis_in_g = G::Group::normalize_batch(&lagrangian_bases); 63 | LagrangianCK { lis_in_g, domain } 64 | } 65 | } 66 | 67 | #[cfg(test)] 68 | mod tests { 69 | use ark_ec::pairing::Pairing; 70 | use ark_poly::{EvaluationDomain, GeneralEvaluationDomain}; 71 | use ark_std::test_rng; 72 | 73 | use crate::pcs::kzg::urs::URS; 74 | use crate::pcs::PcsParams; 75 | use crate::tests::TestCurve; 76 | 77 | use super::*; 78 | 79 | #[test] 80 | fn test_derivation_from_monomial_urs() { 81 | let rng = &mut test_rng(); 82 | let domain_size = 16; 83 | let domain = GeneralEvaluationDomain::new(domain_size).unwrap(); 84 | 85 | let (tau, g1, g2) = URS::::random_params(rng); 86 | let urs = URS::::from_trapdoor(tau, domain_size, 0, g1, g2); 87 | let monomial_ck = urs.ck().monomial; 88 | let lagrangian_ck_from_monomial_urs = monomial_ck.to_lagrangian(domain); 89 | 90 | let lagrangian_ck_from_trapdoor = 91 | LagrangianCK::<::G1Affine>::from_trapdoor(domain, tau, g1); 92 | assert_eq!( 93 | lagrangian_ck_from_monomial_urs.lis_in_g, 94 | lagrangian_ck_from_trapdoor.lis_in_g 95 | ); 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /src/pcs/mod.rs: -------------------------------------------------------------------------------- 1 | use ark_ff::PrimeField; 2 | use ark_poly::Evaluations; 3 | use ark_serialize::*; 4 | use ark_std::fmt::Debug; 5 | use ark_std::iter::Sum; 6 | use ark_std::ops::{Add, Mul, Sub}; 7 | use ark_std::rand::Rng; 8 | use ark_std::vec::Vec; 9 | 10 | pub use id::IdentityCommitment; 11 | 12 | use crate::Poly; 13 | 14 | mod id; 15 | pub mod kzg; 16 | 17 | pub trait Commitment: 18 | Eq 19 | + Sized 20 | + Clone 21 | + Debug 22 | + Add 23 | + Mul 24 | + Sub 25 | + Sum 26 | + CanonicalSerialize 27 | + CanonicalDeserialize 28 | { 29 | fn mul(&self, by: F) -> Self; 30 | fn combine(coeffs: &[F], commitments: &[Self]) -> Self; 31 | } 32 | 33 | /// Can be used to commit and open commitments to DensePolynomial of degree up to max_degree. 34 | pub trait CommitterKey: Clone + Debug + CanonicalSerialize + CanonicalDeserialize { 35 | /// Maximal degree of a polynomial supported. 36 | fn max_degree(&self) -> usize; 37 | 38 | /// Maximal number of evaluations supported when committing in the Lagrangian base. 39 | fn max_evals(&self) -> usize { 40 | self.max_degree() + 1 41 | } 42 | } 43 | 44 | /// Can be used to verify openings to commitments. 45 | pub trait VerifierKey: Clone + Debug { 46 | /// Maximal number of openings that can be verified. 47 | fn max_points(&self) -> usize { 48 | 1 49 | } 50 | } 51 | 52 | /// Generates a `VerifierKey`, serializable 53 | pub trait RawVerifierKey: 54 | Clone + Debug + Eq + PartialEq + CanonicalSerialize + CanonicalDeserialize 55 | { 56 | type VK: VerifierKey; 57 | 58 | fn prepare(&self) -> Self::VK; 59 | } 60 | 61 | pub trait PcsParams { 62 | type CK: CommitterKey; 63 | type VK: VerifierKey; 64 | type RVK: RawVerifierKey; 65 | 66 | fn ck(&self) -> Self::CK; 67 | fn vk(&self) -> Self::VK; 68 | fn raw_vk(&self) -> Self::RVK; 69 | 70 | fn ck_with_lagrangian(&self, _domain_size: usize) -> Self::CK { 71 | unimplemented!(); 72 | } 73 | } 74 | 75 | /// Polynomial commitment scheme. 76 | pub trait PCS { 77 | type C: Commitment; 78 | 79 | type Proof: Clone + CanonicalSerialize + CanonicalDeserialize; 80 | 81 | type CK: CommitterKey; 82 | 83 | // vk needs to be convertible to a ck that is only required to commit to the p=1 constant polynomial, 84 | // see https://eprint.iacr.org/archive/2020/1536/1629188090.pdf, section 4.2 85 | type VK: VerifierKey + Into; 86 | 87 | type Params: PcsParams; 88 | 89 | fn setup(max_degree: usize, rng: &mut R) -> Self::Params; 90 | 91 | fn commit(ck: &Self::CK, p: &Poly) -> Result; 92 | 93 | fn commit_evals(ck: &Self::CK, evals: &Evaluations) -> Result { 94 | let poly = evals.interpolate_by_ref(); 95 | Self::commit(ck, &poly) 96 | } 97 | 98 | fn open(ck: &Self::CK, p: &Poly, x: F) -> Result; 99 | 100 | fn verify(vk: &Self::VK, c: Self::C, x: F, z: F, proof: Self::Proof) -> Result<(), ()>; 101 | 102 | // TODO: is the default implementation useful? 103 | fn batch_verify( 104 | vk: &Self::VK, 105 | c: Vec, 106 | x: Vec, 107 | y: Vec, 108 | proof: Vec, 109 | _rng: &mut R, 110 | ) -> Result<(), ()> { 111 | assert_eq!(c.len(), x.len()); 112 | assert_eq!(c.len(), y.len()); 113 | c.into_iter() 114 | .zip(x.into_iter()) 115 | .zip(y.into_iter()) 116 | .zip(proof.into_iter()) 117 | .all(|(((c, x), y), proof)| Self::verify(vk, c, x, y, proof).is_ok()) 118 | .then(|| ()) 119 | .ok_or(()) 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /benches/multiexps.rs: -------------------------------------------------------------------------------- 1 | use ark_ec::pairing::Pairing; 2 | use ark_ec::VariableBaseMSM; 3 | use ark_ff::{PrimeField, UniformRand}; 4 | use ark_std::test_rng; 5 | use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; 6 | 7 | use w3f_pcs::utils::ec; 8 | 9 | fn small_multiexp_affine(c: &mut Criterion) { 10 | let rng = &mut test_rng(); 11 | let n = 10; 12 | 13 | let bases = (0..n).map(|_| E::G1Affine::rand(rng)).collect::>(); 14 | let exps_full = (0..n) 15 | .map(|_| E::ScalarField::rand(rng)) 16 | .collect::>(); 17 | let exps_128 = (0..n) 18 | .map(|_| E::ScalarField::from(u128::rand(rng))) 19 | .collect::>(); 20 | 21 | let mut group = c.benchmark_group("small-multiexp-affine"); 22 | group.bench_with_input(BenchmarkId::new("small-multiexp-full", n), &n, |b, _n| { 23 | b.iter(|| ec::small_multiexp_affine(&exps_full, &bases)) 24 | }); 25 | group.bench_with_input(BenchmarkId::new("naive-multiexp-full", n), &n, |b, _n| { 26 | b.iter(|| ec::naive_multiexp_affine(&exps_full, &bases)) 27 | }); 28 | group.bench_with_input(BenchmarkId::new("small-multiexp-128", n), &n, |b, _n| { 29 | b.iter(|| ec::small_multiexp_affine(&exps_128, &bases)) 30 | }); 31 | group.bench_with_input(BenchmarkId::new("naive-multiexp-128", n), &n, |b, _n| { 32 | b.iter(|| ec::naive_multiexp_affine(&exps_128, &bases)) 33 | }); 34 | group.finish(); 35 | } 36 | 37 | fn small_multiexp_proj(c: &mut Criterion) { 38 | let rng = &mut test_rng(); 39 | let n = 10; 40 | 41 | let bases = (0..n).map(|_| E::G1::rand(rng)).collect::>(); 42 | let exps_128 = (0..n) 43 | .map(|_| E::ScalarField::from(u128::rand(rng))) 44 | .collect::>(); 45 | 46 | let mut group = c.benchmark_group("small-multiexp-proj"); 47 | group.bench_with_input(BenchmarkId::new("in_affine", n), &n, |b, _n| { 48 | b.iter(|| ec::small_multiexp_proj(&exps_128, &bases)) 49 | }); 50 | group.bench_with_input(BenchmarkId::new("in-proj", n), &n, |b, _n| { 51 | b.iter(|| ec::_small_multiexp_proj_2(&exps_128, &bases)) 52 | }); 53 | group.finish(); 54 | } 55 | 56 | fn small_multiexp_vs_msm(c: &mut Criterion) { 57 | let rng = &mut test_rng(); 58 | let mut group = c.benchmark_group("small-multiexp-vs-msm"); 59 | 60 | for n in [10, 20] { 61 | let bases = (0..n).map(|_| E::G1Affine::rand(rng)).collect::>(); 62 | 63 | let exps_full = (0..n) 64 | .map(|_| E::ScalarField::rand(rng)) 65 | .collect::>(); 66 | let exps_128 = (0..n) 67 | .map(|_| E::ScalarField::from(u128::rand(rng))) 68 | .collect::>(); 69 | 70 | let exps_full_repr = exps_full 71 | .iter() 72 | .map(|exp| exp.into_bigint()) 73 | .collect::>(); 74 | let exps_128_repr = exps_128 75 | .iter() 76 | .map(|exp| exp.into_bigint()) 77 | .collect::>(); 78 | 79 | group.bench_with_input(BenchmarkId::new("small-multiexp-full", n), &n, |b, _n| { 80 | b.iter(|| ec::small_multiexp_affine(&exps_full, &bases)) 81 | }); 82 | group.bench_with_input(BenchmarkId::new("var-base-msm-full", n), &n, |b, _n| { 83 | b.iter(|| ::msm_bigint(&bases, &exps_full_repr)) 84 | }); 85 | group.bench_with_input(BenchmarkId::new("small-multiexp-128", n), &n, |b, _n| { 86 | b.iter(|| ec::small_multiexp_affine(&exps_128, &bases)) 87 | }); 88 | group.bench_with_input(BenchmarkId::new("var-base-msm-128", n), &n, |b, _n| { 89 | b.iter(|| ::msm_bigint(&bases, &exps_128_repr)) 90 | }); 91 | } 92 | 93 | group.finish(); 94 | } 95 | 96 | criterion_group!( 97 | benches, 98 | small_multiexp_affine::, 99 | small_multiexp_proj::, 100 | small_multiexp_vs_msm::, 101 | ); 102 | criterion_main!(benches); 103 | -------------------------------------------------------------------------------- /src/pcs/kzg/urs.rs: -------------------------------------------------------------------------------- 1 | use ark_ec::pairing::Pairing; 2 | use ark_ec::ScalarMul; 3 | use ark_ff::{FftField, UniformRand}; 4 | use ark_serialize::*; 5 | use ark_std::format; 6 | use ark_std::rand::RngCore; 7 | use ark_std::vec::Vec; 8 | use ark_std::{end_timer, start_timer}; 9 | 10 | use crate::utils; 11 | 12 | /// Updatable Universal References String 13 | #[derive(Clone, Debug, CanonicalSerialize, CanonicalDeserialize)] 14 | pub struct URS { 15 | // g1, tau.g1, tau^2.g1, ..., tau^{n1-1}.g1, where g1 is a generator of G1 16 | pub powers_in_g1: Vec, 17 | // g2, tau.g2, tau^2.g2, ..., tau^{n2-1}.g2, where g2 is a generator of G2 18 | pub powers_in_g2: Vec, 19 | } 20 | 21 | impl URS { 22 | /// Generates a random URS with the given number of G1 and G2 bases. 23 | pub fn generate(n1: usize, n2: usize, rng: &mut R) -> Self { 24 | let (tau, g1, g2) = Self::random_params(rng); 25 | Self::from_trapdoor(tau, n1, n2, g1, g2) 26 | } 27 | 28 | /// Generates random parameters for a URS. 29 | /// Returns (trapdoor, G1 generator, G2 generator). 30 | pub fn random_params(rng: &mut R) -> (E::ScalarField, E::G1, E::G2) { 31 | let tau = E::ScalarField::rand(rng); 32 | let g1 = E::G1::rand(rng); 33 | let g2 = E::G2::rand(rng); 34 | (tau, g1, g2) 35 | } 36 | 37 | /// Generates URS of the form: g1, tau.g1, ..., tau^{n1-1}.g1, g2, tau.g2, ..., tau^{n2-1}.g2 38 | pub fn from_trapdoor(tau: E::ScalarField, n1: usize, n2: usize, g1: E::G1, g2: E::G2) -> Self { 39 | let n = n1.max(n2); 40 | assert!(n > 0, "nothing to generate"); 41 | 42 | // Until ECFFT for more curves is implemented, see https://github.com/wborgeaud/ecfft-bn254. 43 | // 44 | // Assertion note: as `TWO_ADICITY` for the field can be >= 32 and on 32-bit machine targets 45 | // `usize` is just 32-bit we move the check in the `u64` domain to avoid a panic. 46 | assert!( 47 | n as u64 <= 1u64 << E::ScalarField::TWO_ADICITY, 48 | "number of bases exceeds curve 2-adicity" 49 | ); 50 | 51 | let t_powers = start_timer!(|| format!("Computing {} scalars powers", n)); 52 | // tau^0, ..., tau^(n-1)) 53 | let powers_of_tau: Vec = utils::powers(tau).take(n).collect(); 54 | end_timer!(t_powers); 55 | 56 | let t_msm_g1 = start_timer!(|| format!("{}-scalar mul in G1", n1)); 57 | let powers_in_g1 = g1.batch_mul(&powers_of_tau[..n1]); 58 | end_timer!(t_msm_g1); 59 | 60 | let t_msm_g2 = start_timer!(|| format!("{}-scalar mul in G1", n2)); 61 | let powers_in_g2 = g2.batch_mul(&powers_of_tau[..n2]); 62 | end_timer!(t_msm_g2); 63 | 64 | URS { 65 | powers_in_g1, 66 | powers_in_g2, 67 | } 68 | } 69 | } 70 | 71 | #[cfg(test)] 72 | mod tests { 73 | use ark_std::test_rng; 74 | 75 | use crate::tests::{BenchCurve, TestCurve}; 76 | 77 | use super::*; 78 | 79 | fn _test_urs_generation(log_n1: usize, log_n2: usize) { 80 | let n1 = 1 << log_n1; 81 | let n2 = 1 << log_n2; 82 | 83 | let t_generate = start_timer!(|| format!( 84 | "Generate 2^{} G1 and 2^{} G2 bases for {}", 85 | log_n1, 86 | log_n2, 87 | crate::utils::curve_name::() 88 | )); 89 | let urs = URS::::generate(n1, n2, &mut test_rng()); 90 | end_timer!(t_generate); 91 | 92 | assert_eq!(urs.powers_in_g1.len(), n1); 93 | assert_eq!(urs.powers_in_g2.len(), n2); 94 | } 95 | 96 | #[test] 97 | #[ignore] 98 | fn bench_urs_generation() { 99 | _test_urs_generation::(16, 16); 100 | } 101 | 102 | #[test] 103 | fn test_urs_generation() { 104 | _test_urs_generation::(8, 1); 105 | } 106 | 107 | #[test] 108 | #[should_panic] 109 | fn test_max_bases() { 110 | let max_bases = 1 << ark_bw6_761::Fr::TWO_ADICITY; 111 | URS::::generate(max_bases + 1, 0, &mut test_rng()); 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /src/aggregation/single.rs: -------------------------------------------------------------------------------- 1 | use ark_ec::{AffineRepr, CurveGroup}; 2 | use ark_ff::{PrimeField, Zero}; 3 | use ark_poly::Polynomial; 4 | use ark_std::vec::Vec; 5 | 6 | use crate::pcs::{Commitment, PCS}; 7 | use crate::utils::ec::small_multiexp_affine; 8 | use crate::Poly; 9 | 10 | /// A tuple (c, x, y) of the form (G, F, F). Represents a claim that {f(x) = y, for a polynomial f such that commit(f) = c}. 11 | /// In other words, it is am instance in some language of "correct polynomial evaluations". 12 | /// Soundness properties of a claim are defined by that of the argument. 13 | #[derive(Clone, Debug)] 14 | pub struct Claim> { 15 | pub c: C, 16 | pub x: F, 17 | pub y: F, 18 | } 19 | 20 | impl> Claim { 21 | pub fn new(ck: &CS::CK, poly: &Poly, at: F) -> Claim 22 | where 23 | CS: PCS, 24 | { 25 | Claim { 26 | c: CS::commit(ck, poly).unwrap(), 27 | x: at, 28 | y: poly.evaluate(&at), 29 | } 30 | } 31 | } 32 | 33 | /// Aggregates claims for different polynomials evaluated at the same point. 34 | /// 35 | /// Claims `[(Ci, xi, yi)]`, such that `xi = x` for any `i`, 36 | /// can be aggregated using randomness `r` to a claim `(C', x, y')`, 37 | /// where `C' = r_agg([Ci], r)` and `y' = r_agg([yi], r)`. 38 | /// 39 | /// If CS is knowledge-sound than an aggregate opening is a proof of knowledge for 40 | /// `{[(C_i, x, y_i)]; [f_i]): fi(x) = yi and CS::commit(fi) = ci}`. 41 | pub fn aggregate_claims>( 42 | claims: &[Claim], 43 | rs: &[F], 44 | ) -> Claim { 45 | assert_eq!(claims.len(), rs.len()); 46 | 47 | let mut iter_over_xs = claims.iter().map(|cl| cl.x); 48 | let same_x = iter_over_xs.next().expect("claims is empty"); 49 | assert!( 50 | iter_over_xs.all(|x| x == same_x), 51 | "multiple evaluation points" 52 | ); 53 | 54 | // TODO: Detect duplicate claims? 55 | // Consider (Cf, x, y1) and (Cf, x, y2). 56 | // If y1 = y2 = f(x) both claims are valid 57 | // If y1 != y2, at least one of the 2 claims is invalid 58 | 59 | let (rcs, rys): (Vec, Vec) = claims 60 | .iter() 61 | .zip(rs.iter()) 62 | .map(|(cl, &r)| (cl.c.mul(r), r * cl.y)) 63 | .unzip(); 64 | 65 | Claim { 66 | c: rcs.into_iter().sum(), 67 | x: same_x, 68 | y: rys.iter().sum(), 69 | } 70 | } 71 | 72 | pub fn aggregate_claims_multiexp(cs: Vec, ys: Vec, rs: &[F]) -> (C, F) 73 | where 74 | F: PrimeField, 75 | C: AffineRepr, 76 | { 77 | assert_eq!(cs.len(), rs.len()); 78 | assert_eq!(ys.len(), rs.len()); 79 | 80 | let agg_c = small_multiexp_affine(rs, &cs); 81 | let agg_y = ys.into_iter().zip(rs.iter()).map(|(y, r)| y * r).sum(); 82 | 83 | (agg_c.into_affine(), agg_y) 84 | } 85 | 86 | // for opening in a single point, the aggregate polynomial doesn't depend on the point. 87 | pub fn aggregate_polys(polys: &[Poly], rs: &[F]) -> Poly { 88 | assert_eq!(polys.len(), rs.len()); 89 | polys 90 | .iter() 91 | .zip(rs.iter()) 92 | .map(|(p, &r)| p * r) 93 | .fold(Poly::zero(), |acc, p| acc + p) 94 | } 95 | 96 | #[cfg(test)] 97 | mod tests { 98 | use ark_poly::DenseUVPolynomial; 99 | use ark_std::test_rng; 100 | 101 | use crate::pcs::IdentityCommitment; 102 | use crate::pcs::PcsParams; 103 | use crate::tests::{TestField, TestKzg}; 104 | 105 | use super::*; 106 | 107 | fn _test_aggregation>() { 108 | let rng = &mut test_rng(); 109 | let d = 15; 110 | let t = 4; 111 | let params = CS::setup(d, rng); 112 | let ck = params.ck(); 113 | 114 | assert!(aggregate_polys::(&[], &[]).is_zero()); 115 | 116 | // common randomness 117 | let rs = (0..t).map(|_| F::rand(rng)).collect::>(); 118 | 119 | let polys = (0..t).map(|_| Poly::::rand(d, rng)).collect::>(); 120 | let agg_poly = aggregate_polys(&polys, &rs); 121 | 122 | let same_x = F::rand(rng); 123 | let claims_at_same_x = polys 124 | .iter() 125 | .map(|p| Claim::new::(&ck, p, same_x)) 126 | .collect::>(); 127 | let agg_claim = aggregate_claims::(&claims_at_same_x, &rs); 128 | 129 | assert_eq!(CS::commit(&ck, &agg_poly).unwrap(), agg_claim.c); 130 | assert_eq!(same_x, agg_claim.x); 131 | assert_eq!(agg_poly.evaluate(&same_x), agg_claim.y); 132 | } 133 | 134 | #[test] 135 | fn test_aggregation() { 136 | _test_aggregation::(); 137 | _test_aggregation::(); 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /src/utils/poly.rs: -------------------------------------------------------------------------------- 1 | use ark_ff::{FftField, Field, PrimeField, Zero}; 2 | use ark_poly::polynomial::univariate::DensePolynomial; 3 | use ark_poly::{DenseUVPolynomial, Polynomial}; 4 | use ark_std::{vec, vec::Vec}; 5 | 6 | use crate::utils::powers; 7 | use crate::Poly; 8 | 9 | /// Field element represented as a constant polynomial. 10 | pub(crate) fn constant(c: F) -> Poly { 11 | Poly::from_coefficients_vec(vec![c]) 12 | } 13 | 14 | /// The vanishing polynomial of a point x. 15 | /// z(X) = X - x 16 | pub(crate) fn z_of_point(x: &F) -> Poly { 17 | Poly::from_coefficients_vec(vec![x.neg(), F::one()]) 18 | } 19 | 20 | /// The vanishing polynomial of a set. 21 | /// z(X) = (X - x1) * .. * (X - xn) 22 | pub(crate) fn z_of_set<'a, F: FftField>(xs: impl IntoIterator) -> DensePolynomial { 23 | xs.into_iter() 24 | .map(|x| z_of_point(x)) 25 | .reduce(|a, b| &a * &b) 26 | .unwrap() 27 | } 28 | 29 | pub fn sum_with_coeffs>(coeffs: Vec, polys: &[P]) -> P { 30 | assert_eq!(coeffs.len(), polys.len()); 31 | let mut res = P::zero(); 32 | for (c, p) in coeffs.into_iter().zip(polys.iter()) { 33 | res += (c, p); 34 | } 35 | res 36 | } 37 | 38 | pub fn sum_with_powers>(r: F, polys: &[P]) -> P { 39 | let powers = powers(r).take(polys.len()).collect::>(); 40 | sum_with_coeffs(powers, polys) 41 | } 42 | 43 | pub fn interpolate(xs: &[F], ys: &[F]) -> Poly { 44 | let x1 = xs[0]; 45 | let mut l = z_of_point(&x1); 46 | for &xj in xs.iter().skip(1) { 47 | let q = z_of_point(&xj); 48 | l = &l * &q; 49 | } 50 | 51 | let mut ws = vec![]; 52 | for xj in xs { 53 | let mut wj = F::one(); 54 | for xk in xs { 55 | if xk != xj { 56 | let d = *xj - xk; 57 | wj *= d; 58 | } 59 | } 60 | ws.push(wj); 61 | } 62 | ark_ff::batch_inversion(&mut ws); 63 | 64 | let mut res = Poly::zero(); 65 | for ((&wi, &xi), &yi) in ws.iter().zip(xs).zip(ys) { 66 | let d = z_of_point(&xi); 67 | let mut z = &l / &d; 68 | z = &z * wi; 69 | z = &z * yi; 70 | res = res + z; 71 | } 72 | res 73 | } 74 | 75 | /// Given a polynomial `r` in evaluation form {(xi, yi)}, 76 | /// i.e. lowest degree `r` such that `r(xi) = yi` for all `i`s, 77 | /// and a point zeta, 78 | /// computes `r(zeta)` and `z(zeta)`, 79 | /// where `z` is the vanishing polynomial of `x`s. 80 | // Implements barycentric formula of some form. 81 | pub(crate) fn interpolate_evaluate(xs: &[F], ys: &[F], zeta: &F) -> (F, F) { 82 | assert_eq!(xs.len(), ys.len()); 83 | 84 | let zeta_minus_xs = ark_std::iter::repeat(zeta) 85 | .zip(xs.iter()) 86 | .map(|(&zeta, xi)| zeta - xi) 87 | .collect::>(); 88 | 89 | let l_at_zeta = zeta_minus_xs 90 | .iter() 91 | .cloned() 92 | .reduce(|acc, item| item * acc) 93 | .expect("TODO"); 94 | 95 | let mut ws = vec![]; 96 | for xj in xs { 97 | let mut wj = F::one(); 98 | for xk in xs { 99 | if xk != xj { 100 | let d = *xj - xk; 101 | wj *= d; 102 | } 103 | } 104 | ws.push(wj); 105 | } 106 | 107 | let mut denominator = ws 108 | .into_iter() 109 | .zip(zeta_minus_xs.iter()) 110 | .map(|(a, b)| a * b) 111 | .collect::>(); 112 | 113 | ark_ff::batch_inversion(&mut denominator); 114 | 115 | let sum = denominator 116 | .into_iter() 117 | .zip(ys.iter()) 118 | .map(|(a, b)| a * b) 119 | .sum::(); 120 | (sum * l_at_zeta, l_at_zeta) 121 | } 122 | 123 | #[cfg(test)] 124 | mod tests { 125 | use ark_ff::UniformRand; 126 | use ark_poly::Polynomial; 127 | use ark_std::test_rng; 128 | 129 | use crate::tests::BenchField; 130 | use crate::utils::poly::z_of_set; 131 | 132 | use super::*; 133 | 134 | #[test] 135 | fn test_interpolation() { 136 | let rng = &mut test_rng(); 137 | 138 | let d = 15; 139 | let (xs, ys): (Vec<_>, Vec<_>) = (0..d + 1) 140 | .map(|_| (BenchField::rand(rng), BenchField::rand(rng))) 141 | .unzip(); 142 | 143 | let poly = interpolate(&xs, &ys); 144 | 145 | assert_eq!(poly.degree(), d); 146 | assert!(xs 147 | .iter() 148 | .zip(ys.iter()) 149 | .all(|(x, &y)| poly.evaluate(x) == y)); 150 | 151 | for _ in 0..10 { 152 | let zeta = BenchField::rand(rng); 153 | let (r_at_zeta, z_at_zeta) = interpolate_evaluate(&xs, &ys, &zeta); 154 | assert_eq!(r_at_zeta, poly.evaluate(&zeta)); 155 | assert_eq!(z_at_zeta, z_of_set(&xs).evaluate(&zeta)); 156 | } 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /tests/plonk/main.rs: -------------------------------------------------------------------------------- 1 | use ark_bls12_381::Bls12_381; 2 | use ark_ff::PrimeField; 3 | use ark_poly::DenseUVPolynomial; 4 | use ark_poly::EvaluationDomain; 5 | use ark_poly::Radix2EvaluationDomain; 6 | use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress}; 7 | use ark_std::rand::Rng; 8 | use ark_std::test_rng; 9 | use ark_std::{end_timer, start_timer}; 10 | 11 | use w3f_pcs::pcs::kzg::KZG; 12 | use w3f_pcs::pcs::PCS; 13 | use w3f_pcs::Poly; 14 | 15 | use crate::batchy::PlonkBatchKzgTest; 16 | use crate::fflonky::PlonkWithFflonkTest; 17 | 18 | mod batchy; 19 | mod fflonky; 20 | 21 | struct VanillaPlonkAssignments { 22 | degree: usize, 23 | max_degree: usize, 24 | 25 | // [Poly; 8], max_deg = d 26 | preprocessed_polynomials: Vec>, 27 | // [Poly; 3], max_deg = d 28 | wire_polynomials: Vec>, 29 | // max_deg = d 30 | permutation_polynomial: Poly, 31 | // max_deg = 3 * d 32 | arithmetic_constraint: Poly, 33 | // max_deg = 2 * d 34 | permutation_constraint_1: Poly, 35 | // // max_deg = 4 * d 36 | permutation_constraint_2: Poly, 37 | 38 | domain: Radix2EvaluationDomain, 39 | omega: F, 40 | } 41 | 42 | fn random_polynomials(k: usize, degree: usize, rng: &mut R) -> Vec> { 43 | (0..k).map(|_| Poly::rand(degree, rng)).collect() 44 | } 45 | 46 | impl VanillaPlonkAssignments { 47 | fn new(domain_size: usize, rng: &mut R) -> Self { 48 | let degree = domain_size - 1; 49 | let max_degree = 3 * degree; // permutation_constraint_2 / Z 50 | let domain = Radix2EvaluationDomain::::new(domain_size).unwrap(); 51 | let omega = domain.group_gen; 52 | Self { 53 | degree, 54 | max_degree, 55 | preprocessed_polynomials: random_polynomials(8, degree, rng), 56 | wire_polynomials: random_polynomials(3, degree, rng), 57 | permutation_polynomial: Poly::rand(degree, rng), 58 | arithmetic_constraint: Poly::rand(3 * degree, rng), 59 | permutation_constraint_1: Poly::rand(2 * degree, rng), 60 | permutation_constraint_2: Poly::rand(4 * degree, rng), 61 | domain, 62 | omega, 63 | } 64 | } 65 | 66 | fn quotient(&self, constraint: &Poly) -> Poly { 67 | constraint.divide_by_vanishing_poly(self.domain).0 68 | } 69 | } 70 | 71 | trait DecoyPlonk> { 72 | type Proof: CanonicalSerialize + CanonicalDeserialize; 73 | 74 | fn new(polys: VanillaPlonkAssignments, rng: &mut R) -> Self; 75 | 76 | fn setup(&mut self, rng: &mut R) -> (CS::CK, CS::VK); 77 | fn preprocess(&mut self, ck: &CS::CK) -> Vec; 78 | fn prove(&mut self, ck: &CS::CK) -> Self::Proof; 79 | fn verify(&self, vk: &CS::VK, preprocessed_commitments: Vec, proof: Self::Proof) 80 | -> bool; 81 | } 82 | 83 | fn _test_vanilla_plonk_opening, T: DecoyPlonk>(log_n: usize) { 84 | let rng = &mut test_rng(); 85 | let n = 1 << log_n; 86 | let polys = VanillaPlonkAssignments::::new(n, rng); 87 | 88 | let mut test = T::new(polys, rng); 89 | 90 | let t_test = start_timer!(|| format!( 91 | "domain_size = {}, curve = {}", 92 | n, 93 | w3f_pcs::utils::curve_name::() 94 | )); 95 | 96 | let t_setup = start_timer!(|| "Setup"); 97 | let (ck, vk) = test.setup(rng); 98 | end_timer!(t_setup); 99 | 100 | let t_preprocess = start_timer!(|| "Preprocessing"); 101 | let commitments_to_preprocessed_polynomials = test.preprocess(&ck); 102 | end_timer!(t_preprocess); 103 | 104 | let preprocessed_size = commitments_to_preprocessed_polynomials.serialized_size(Compress::Yes); 105 | 106 | let t_prove = start_timer!(|| "Proving"); 107 | let proof = test.prove(&ck); 108 | end_timer!(t_prove); 109 | 110 | let proof_size = proof.serialized_size(Compress::Yes); 111 | let mut serialized_proof = vec![0; proof_size]; 112 | proof 113 | .serialize_compressed(&mut serialized_proof[..]) 114 | .unwrap(); 115 | let proof = T::Proof::deserialize_compressed(&serialized_proof[..]).unwrap(); 116 | 117 | let t_verify = start_timer!(|| "Verifying"); 118 | let valid = test.verify(&vk, commitments_to_preprocessed_polynomials, proof); 119 | end_timer!(t_verify); 120 | 121 | end_timer!(t_test); 122 | 123 | println!( 124 | "proof size = {}, preprocessed data size = {}", 125 | proof_size, preprocessed_size 126 | ); 127 | 128 | assert!(valid); 129 | } 130 | 131 | #[test] 132 | #[ignore] 133 | fn test_vanilla_plonk_batch_kzg_opening() { 134 | _test_vanilla_plonk_opening::<_, KZG, PlonkBatchKzgTest<_, _>>(16); 135 | } 136 | 137 | #[test] 138 | #[ignore] 139 | fn test_vanilla_plonk_with_fflonk_opening() { 140 | _test_vanilla_plonk_opening::<_, KZG, PlonkWithFflonkTest<_, _>>(16); 141 | } 142 | -------------------------------------------------------------------------------- /src/pcs/kzg/params.rs: -------------------------------------------------------------------------------- 1 | use ark_ec::pairing::Pairing; 2 | use ark_ec::AffineRepr; 3 | use ark_poly::{EvaluationDomain, GeneralEvaluationDomain}; 4 | use ark_serialize::*; 5 | use ark_std::{vec, vec::Vec}; 6 | 7 | use crate::pcs::kzg::lagrange::LagrangianCK; 8 | use crate::pcs::kzg::urs::URS; 9 | use crate::pcs::{CommitterKey, PcsParams, RawVerifierKey, VerifierKey}; 10 | 11 | impl PcsParams for URS { 12 | type CK = KzgCommitterKey; 13 | type VK = KzgVerifierKey; 14 | type RVK = RawKzgVerifierKey; 15 | 16 | fn ck(&self) -> Self::CK { 17 | let monomial = MonomialCK { 18 | powers_in_g1: self.powers_in_g1.clone(), 19 | }; 20 | KzgCommitterKey { 21 | monomial, 22 | lagrangian: None, 23 | } 24 | } 25 | 26 | fn ck_with_lagrangian(&self, domain_size: usize) -> Self::CK { 27 | let domain = GeneralEvaluationDomain::new(domain_size).unwrap(); 28 | assert_eq!( 29 | domain.size(), 30 | domain_size, 31 | "domains of size {} are not supported", 32 | domain_size 33 | ); 34 | assert!(domain_size <= self.powers_in_g1.len()); 35 | let monomial = MonomialCK { 36 | powers_in_g1: self.powers_in_g1[0..domain_size].to_vec(), 37 | }; 38 | let lagrangian = Some(monomial.to_lagrangian(domain)); 39 | KzgCommitterKey { 40 | monomial, 41 | lagrangian, 42 | } 43 | } 44 | 45 | fn vk(&self) -> Self::VK { 46 | self.raw_vk().prepare() 47 | } 48 | 49 | /// Non-prepared verifier key. Can be used for serialization. 50 | fn raw_vk(&self) -> Self::RVK { 51 | assert!(self.powers_in_g1.len() > 0, "no G1 generator"); 52 | assert!( 53 | self.powers_in_g2.len() > 1, 54 | "{} powers in G2", 55 | self.powers_in_g2.len() 56 | ); 57 | 58 | RawKzgVerifierKey { 59 | g1: self.powers_in_g1[0], 60 | g2: self.powers_in_g2[0], 61 | tau_in_g2: self.powers_in_g2[1], 62 | } 63 | } 64 | } 65 | 66 | #[derive(Clone, Debug, CanonicalSerialize, CanonicalDeserialize)] 67 | pub struct KzgCommitterKey { 68 | pub monomial: MonomialCK, 69 | pub lagrangian: Option>, 70 | } 71 | 72 | /// Used to commit to and to open univariate polynomials of degree up to self.max_degree(). 73 | #[derive(Clone, Debug, CanonicalSerialize, CanonicalDeserialize)] 74 | pub struct MonomialCK { 75 | // G1, tau.G1, tau^2.G1, ..., tau^n1.G1 76 | pub(crate) powers_in_g1: Vec, 77 | } 78 | 79 | impl CommitterKey for MonomialCK { 80 | fn max_degree(&self) -> usize { 81 | self.powers_in_g1.len() - 1 82 | } 83 | } 84 | 85 | impl CommitterKey for KzgCommitterKey { 86 | fn max_degree(&self) -> usize { 87 | self.monomial.max_degree() 88 | } 89 | } 90 | 91 | /// Verifier key with G2 elements not "prepared". Exists only to be serializable. 92 | /// KzgVerifierKey is used for verification. 93 | #[derive(Clone, Debug, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] 94 | pub struct RawKzgVerifierKey { 95 | pub g1: E::G1Affine, 96 | // generator of G1 97 | pub g2: E::G2Affine, 98 | // generator of G2 99 | pub tau_in_g2: E::G2Affine, // tau.g2 100 | } 101 | 102 | impl RawVerifierKey for RawKzgVerifierKey { 103 | type VK = KzgVerifierKey; 104 | 105 | /// Returns the key that is used to verify openings in a single point. It has points in G2 "prepared". 106 | /// "Preparation" is a pre-computation that makes pairing computation with these points more efficient. 107 | /// At the same time usual arithmetic operations are not implemented for "prepared" points. 108 | fn prepare(&self) -> KzgVerifierKey { 109 | KzgVerifierKey { 110 | g1: self.g1, 111 | g2: self.g2.into(), 112 | tau_in_g2: self.tau_in_g2.into(), 113 | } 114 | } 115 | } 116 | 117 | /// "Prepared" verifier key capable of verifying opening in a single point, given the commitment is in G1. 118 | /// Use RawKzgVerifierKey for serialization. 119 | #[derive(Clone, Debug)] 120 | pub struct KzgVerifierKey { 121 | // generator of G1 122 | pub(crate) g1: E::G1Affine, 123 | // G1Prepared is just a wrapper around G1Affine // TODO: fixed-base precomputations 124 | // generator of G2, prepared 125 | pub(crate) g2: E::G2Prepared, 126 | // G2Prepared can be used as a pairing RHS only 127 | // tau.g2, prepared 128 | pub(crate) tau_in_g2: E::G2Prepared, // G2Prepared can be used as a pairing RHS only 129 | } 130 | 131 | impl VerifierKey for KzgVerifierKey {} 132 | 133 | impl From> for KzgCommitterKey { 134 | fn from(vk: KzgVerifierKey) -> Self { 135 | let monomial = MonomialCK { 136 | powers_in_g1: vec![vk.g1], 137 | }; 138 | Self { 139 | monomial, 140 | lagrangian: None, 141 | } 142 | } 143 | } 144 | -------------------------------------------------------------------------------- /src/shplonk.rs: -------------------------------------------------------------------------------- 1 | use ark_ff::PrimeField; 2 | use ark_poly::{DenseUVPolynomial, Polynomial}; 3 | use ark_serialize::*; 4 | use ark_std::collections::BTreeSet; 5 | use ark_std::marker::PhantomData; 6 | use ark_std::vec::Vec; 7 | 8 | use crate::aggregation::multiple::{ 9 | aggregate_claims, aggregate_polys, group_by_commitment, Transcript, 10 | }; 11 | use crate::pcs::PCS; 12 | use crate::Poly; 13 | 14 | pub struct Shplonk> { 15 | _field: PhantomData, 16 | _pcs: PhantomData, 17 | } 18 | 19 | #[derive(Clone, Debug, CanonicalSerialize, CanonicalDeserialize)] 20 | pub struct AggregateProof> { 21 | agg_proof: CS::C, 22 | opening_proof: CS::Proof, 23 | } 24 | 25 | impl> Shplonk { 26 | pub fn open_many>( 27 | ck: &CS::CK, 28 | fs: &[Poly], 29 | xss: &[BTreeSet], 30 | transcript: &mut T, 31 | ) -> AggregateProof { 32 | let (agg_poly, zeta, agg_proof) = aggregate_polys::(ck, fs, xss, transcript); 33 | assert!(agg_poly.evaluate(&zeta).is_zero()); 34 | let opening_proof = CS::open(ck, &agg_poly, zeta).unwrap(); 35 | AggregateProof { 36 | agg_proof, 37 | opening_proof, 38 | } 39 | } 40 | 41 | pub fn verify_many>( 42 | vk: &CS::VK, 43 | fcs: &[CS::C], 44 | proof: AggregateProof, 45 | xss: &Vec>, 46 | yss: &Vec>, 47 | transcript: &mut T, 48 | ) -> bool { 49 | let AggregateProof { 50 | agg_proof, 51 | opening_proof, 52 | } = proof; 53 | let onec = CS::commit( 54 | &vk.clone().into(), 55 | &Poly::from_coefficients_slice(&[F::one()]), 56 | ) 57 | .unwrap(); 58 | let claims = group_by_commitment(fcs, xss, yss); 59 | let agg_claim = aggregate_claims::(claims, &agg_proof, &onec, transcript); 60 | CS::verify( 61 | vk, 62 | agg_claim.c, 63 | agg_claim.xs[0], 64 | agg_claim.ys[0], 65 | opening_proof, 66 | ) 67 | .is_ok() 68 | } 69 | } 70 | 71 | #[cfg(test)] 72 | pub(crate) mod tests { 73 | use ark_std::iter::FromIterator; 74 | use ark_std::rand::Rng; 75 | use ark_std::test_rng; 76 | 77 | use crate::pcs::IdentityCommitment; 78 | use crate::pcs::{Commitment, PcsParams}; 79 | use crate::tests::{TestField, TestKzg}; 80 | use crate::Poly; 81 | 82 | use super::*; 83 | 84 | pub struct TestOpening> { 85 | pub fs: Vec>, 86 | pub fcs: Vec, 87 | pub xss: Vec>, 88 | pub yss: Vec>, 89 | } 90 | 91 | pub(crate) fn random_xss( 92 | rng: &mut R, 93 | t: usize, // number of polynomials 94 | max_m: usize, // maximal number of opening points per polynomial 95 | ) -> Vec> { 96 | (0..t) 97 | .map(|_| { 98 | (0..rng.gen_range(1..max_m)) 99 | .map(|_| F::rand(rng)) 100 | .collect::>() 101 | }) 102 | .collect() 103 | } 104 | 105 | pub(crate) fn random_opening( 106 | rng: &mut R, 107 | ck: &CS::CK, 108 | d: usize, // degree of polynomials 109 | t: usize, // number of polynomials 110 | xss: Vec>, // vecs of opening points per polynomial 111 | ) -> TestOpening 112 | where 113 | R: Rng, 114 | F: PrimeField, 115 | CS: PCS, 116 | { 117 | // polynomials 118 | let fs: Vec<_> = (0..t).map(|_| Poly::::rand(d, rng)).collect(); 119 | // commitments 120 | let fcs: Vec<_> = fs.iter().map(|fi| CS::commit(&ck, fi).unwrap()).collect(); 121 | 122 | // evaluations per polynomial 123 | let yss: Vec<_> = fs 124 | .iter() 125 | .zip(xss.iter()) 126 | .map(|(f, xs)| xs.iter().map(|x| f.evaluate(x)).collect::>()) 127 | .collect(); 128 | 129 | TestOpening { fs, fcs, xss, yss } 130 | } 131 | 132 | fn _test_shplonk>() { 133 | let rng = &mut test_rng(); 134 | 135 | let d = 15; // degree of polynomials 136 | let t = 4; // number of polynomials 137 | let max_m = 3; // maximal number of opening points per polynomial 138 | 139 | let params = CS::setup(d, rng); 140 | 141 | let xss = random_xss(rng, t, max_m); 142 | let opening = random_opening::<_, _, CS>(rng, ¶ms.ck(), d, t, xss); 143 | 144 | let sets_of_xss: Vec> = opening 145 | .xss 146 | .iter() 147 | .map(|xs| BTreeSet::from_iter(xs.iter().cloned())) 148 | .collect(); 149 | 150 | let transcript = &mut (F::rand(rng), F::rand(rng)); 151 | 152 | let proof = 153 | Shplonk::::open_many(¶ms.ck(), &opening.fs, &sets_of_xss, transcript); 154 | 155 | assert!(Shplonk::::verify_many( 156 | ¶ms.vk(), 157 | &opening.fcs, 158 | proof, 159 | &opening.xss, 160 | &opening.yss, 161 | transcript 162 | )) 163 | } 164 | 165 | #[test] 166 | fn test_shplonk() { 167 | _test_shplonk::(); 168 | _test_shplonk::(); 169 | } 170 | } 171 | -------------------------------------------------------------------------------- /tests/plonk/fflonky.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | use ark_ff::{PrimeField, UniformRand, Zero}; 4 | use ark_poly::Polynomial; 5 | use ark_serialize::*; 6 | use ark_std::rand::Rng; 7 | use ark_std::test_rng; 8 | use ark_std::{end_timer, start_timer}; 9 | 10 | use w3f_pcs::fflonk::Fflonk; 11 | use w3f_pcs::pcs::PcsParams; 12 | use w3f_pcs::pcs::PCS; 13 | use w3f_pcs::shplonk::AggregateProof; 14 | use w3f_pcs::{FflonkyKzg, Poly}; 15 | 16 | use crate::{DecoyPlonk, VanillaPlonkAssignments}; 17 | 18 | impl VanillaPlonkAssignments { 19 | fn combinations(&self) -> Vec> { 20 | let zeta: F = u128::rand(&mut test_rng()).into(); 21 | let omega = self.domain.group_gen; 22 | let t0 = self.quotient(&self.arithmetic_constraint); 23 | let t1 = self.quotient(&self.permutation_constraint_1); 24 | let t2 = self.quotient(&self.permutation_constraint_2); 25 | let z = self.permutation_polynomial.clone(); 26 | 27 | let fs0 = self.preprocessed_polynomials.clone(); 28 | let mut fs1 = self.wire_polynomials.clone(); 29 | fs1.push(t0); 30 | let fs2 = vec![z, t1, t2, Poly::zero()]; //TODO: zero is not strictly necessary 31 | vec![ 32 | Combination { 33 | fs: fs0, 34 | roots_of_xs: vec![zeta], 35 | }, 36 | Combination { 37 | fs: fs1, 38 | roots_of_xs: vec![zeta], 39 | }, 40 | Combination { 41 | fs: fs2, 42 | roots_of_xs: vec![zeta, zeta * omega], 43 | }, 44 | ] 45 | } 46 | } 47 | 48 | struct Combination { 49 | fs: Vec>, 50 | roots_of_xs: Vec, 51 | } 52 | 53 | impl Combination { 54 | fn max_degree(&self) -> usize { 55 | self.fs.iter().map(|f| f.degree()).max().unwrap() 56 | } 57 | 58 | fn t(&self) -> usize { 59 | self.fs.len().next_power_of_two() //TODO: should work fine for other roots 60 | } 61 | 62 | fn max_combined_degree(&self) -> usize { 63 | self.t() * (self.max_degree() + 1) - 1 64 | } 65 | 66 | fn xs(&self) -> Vec { 67 | self.roots_of_xs 68 | .iter() // opening points 69 | .map(|root| root.pow([self.t() as u64])) 70 | .collect() 71 | } 72 | 73 | fn yss(&self) -> Vec> { 74 | self.xs() 75 | .iter() 76 | .map(|x| self.fs.iter().map(|f| f.evaluate(x)).collect()) 77 | .collect() 78 | } 79 | } 80 | 81 | pub struct PlonkWithFflonkTest> { 82 | combinations: Vec>, 83 | cs: PhantomData, 84 | } 85 | 86 | impl> PlonkWithFflonkTest { 87 | fn _commit_proof_polynomials(&self, ck: &CS::CK) -> Vec { 88 | let t_commitment = start_timer!(|| format!( 89 | "Committing to {} proof polynomials", 90 | self.combinations.len() - 1 91 | )); 92 | let commitments = self 93 | .combinations 94 | .iter() 95 | .enumerate() 96 | .skip(1) // preprocessing 97 | .map(|(i, _)| self._commit_single(i, ck)) 98 | .collect(); 99 | end_timer!(t_commitment); 100 | commitments 101 | } 102 | 103 | fn _commit_single(&self, i: usize, ck: &CS::CK) -> CS::C { 104 | let combination = &self.combinations[i]; 105 | let t_commit = start_timer!(|| format!("Committing to combination #{}", i)); 106 | 107 | let t_combine = start_timer!(|| format!( 108 | "combining {} polynomials: t = {}, max_degree = {}", 109 | combination.fs.len(), 110 | combination.t(), 111 | combination.max_degree() 112 | )); 113 | let poly = Fflonk::combine(combination.t(), &combination.fs); 114 | end_timer!(t_combine); 115 | 116 | let t_commit_combined = start_timer!(|| format!( 117 | "committing to the combined polynomial: degree = {}", 118 | poly.degree() 119 | )); 120 | let commitment = CS::commit(ck, &poly).unwrap(); 121 | end_timer!(t_commit_combined); 122 | 123 | end_timer!(t_commit); 124 | commitment 125 | } 126 | 127 | fn _open(&self, transcript: &mut merlin::Transcript, ck: &CS::CK) -> AggregateProof { 128 | let (ts, (fss, xss)): (Vec<_>, (Vec<_>, Vec<_>)) = self 129 | .combinations 130 | .iter() 131 | .map(|c| (c.t(), (c.fs.clone(), c.roots_of_xs.clone()))) 132 | .unzip(); 133 | 134 | let t_open = start_timer!(|| "Opening"); 135 | let proof = FflonkyKzg::::open(ck, &fss, &ts, &xss, transcript); 136 | end_timer!(t_open); 137 | proof 138 | } 139 | 140 | fn _evaluate(&self) -> Vec>> { 141 | self.combinations.iter().map(|c| c.yss()).collect() 142 | } 143 | } 144 | 145 | #[derive(CanonicalSerialize, CanonicalDeserialize)] 146 | pub struct FflonkyPlonkProof> { 147 | cs_proof: AggregateProof, 148 | evals: Vec>>, 149 | commitments: Vec, 150 | } 151 | 152 | impl> DecoyPlonk for PlonkWithFflonkTest { 153 | type Proof = FflonkyPlonkProof; 154 | 155 | fn new(polys: VanillaPlonkAssignments, _rng: &mut R) -> Self { 156 | Self { 157 | combinations: polys.combinations(), 158 | cs: PhantomData, 159 | } 160 | } 161 | 162 | fn setup(&mut self, rng: &mut R) -> (CS::CK, CS::VK) { 163 | let max_degree = self 164 | .combinations 165 | .iter() 166 | .map(|c| c.max_combined_degree()) 167 | .max() 168 | .unwrap(); 169 | let params = CS::setup(max_degree, rng); 170 | (params.ck(), params.vk()) 171 | } 172 | 173 | fn preprocess(&mut self, ck: &CS::CK) -> Vec { 174 | vec![self._commit_single(0, ck)] 175 | } 176 | 177 | fn prove(&mut self, ck: &CS::CK) -> FflonkyPlonkProof { 178 | let empty_transcript = &mut merlin::Transcript::new(b"plonk-fflonk-shplonk-kzg"); 179 | 180 | let commitments = self._commit_proof_polynomials(ck); 181 | let cs_proof = self._open(empty_transcript, ck); 182 | let evals = self._evaluate(); 183 | FflonkyPlonkProof { 184 | cs_proof, 185 | evals, 186 | commitments, 187 | } 188 | } 189 | 190 | fn verify( 191 | &self, 192 | vk: &CS::VK, 193 | preprocessed_commitments: Vec, 194 | proof: FflonkyPlonkProof, 195 | ) -> bool { 196 | let empty_transcript = &mut merlin::Transcript::new(b"plonk-fflonk-shplonk-kzg"); 197 | 198 | let (ts, xss): (Vec<_>, Vec<_>) = self 199 | .combinations 200 | .iter() 201 | .map(|c| (c.t(), c.roots_of_xs.clone())) 202 | .unzip(); 203 | let commitments = [preprocessed_commitments, proof.commitments].concat(); 204 | FflonkyKzg::::verify( 205 | vk, 206 | &commitments, 207 | &ts, 208 | proof.cs_proof, 209 | &xss, 210 | &proof.evals, 211 | empty_transcript, 212 | ) 213 | } 214 | } 215 | -------------------------------------------------------------------------------- /tests/plonk/README.md: -------------------------------------------------------------------------------- 1 | > cargo test test_vanilla_plonk_batch_kzg_opening --release --features "parallel print-trace" -- --nocapture --ignored 2 | 3 | ``` 4 | running 1 test 5 | Start: domain_size = 65536, curve = ark_bls12_381 6 | ··Start: Setup 7 | ····Start: Computing 196606 scalars powers 8 | ····End: Computing 196606 scalars powers .......................................9.130ms 9 | ····Start: 196606-scalar mul in G1 10 | ····End: 196606-scalar mul in G1 ...............................................1.291s 11 | ····Start: 2-scalar mul in G1 12 | ····End: 2-scalar mul in G1 ....................................................5.446ms 13 | ··End: Setup ...................................................................1.318s 14 | ··Start: Preprocessing 15 | ····Start: Committing to batch of 8 polynomials 16 | ······Start: Committing to degree 65535 polynomials 17 | ······End: Committing to degree 65535 polynomials ..............................415.679ms 18 | ······Start: Committing to degree 65535 polynomials 19 | ······End: Committing to degree 65535 polynomials ..............................390.461ms 20 | ······Start: Committing to degree 65535 polynomials 21 | ······End: Committing to degree 65535 polynomials ..............................417.321ms 22 | ······Start: Committing to degree 65535 polynomials 23 | ······End: Committing to degree 65535 polynomials ..............................447.720ms 24 | ······Start: Committing to degree 65535 polynomials 25 | ······End: Committing to degree 65535 polynomials ..............................440.322ms 26 | ······Start: Committing to degree 65535 polynomials 27 | ······End: Committing to degree 65535 polynomials ..............................423.799ms 28 | ······Start: Committing to degree 65535 polynomials 29 | ······End: Committing to degree 65535 polynomials ..............................503.049ms 30 | ······Start: Committing to degree 65535 polynomials 31 | ······End: Committing to degree 65535 polynomials ..............................439.959ms 32 | ····End: Committing to batch of 8 polynomials ..................................3.481s 33 | ··End: Preprocessing ...........................................................3.481s 34 | ··Start: Proving 35 | ····Start: Committing to batch of 3 polynomials 36 | ······Start: Committing to degree 65535 polynomials 37 | ······End: Committing to degree 65535 polynomials ..............................447.844ms 38 | ······Start: Committing to degree 65535 polynomials 39 | ······End: Committing to degree 65535 polynomials ..............................436.756ms 40 | ······Start: Committing to degree 65535 polynomials 41 | ······End: Committing to degree 65535 polynomials ..............................419.075ms 42 | ····End: Committing to batch of 3 polynomials ..................................1.304s 43 | ····Start: Committing to degree 65535 polynomials 44 | ····End: Committing to degree 65535 polynomials ................................402.517ms 45 | ····Start: Committing to degree 196604 polynomials 46 | ····End: Committing to degree 196604 polynomials ...............................1.117s 47 | ····Start: Extra: commiting to the linearization polynomial 48 | ······Start: Committing to degree 65535 polynomials 49 | ······End: Committing to degree 65535 polynomials ..............................420.764ms 50 | ····End: Extra: commiting to the linearization polynomial ......................422.113ms 51 | ··End: Proving .................................................................4.155s 52 | ··Start: Verifying 53 | ····Start: Reconstructing the commitment to the linearization polynomial: 7-multiexp 54 | ····End: Reconstructing the commitment to the linearization polynomial: 7-multiexp 1.072ms 55 | ····Start: KZG batch verification 56 | ······Start: aggregate evaluation claims at zeta 57 | ······End: aggregate evaluation claims at zeta .................................465.700µs 58 | ······Start: batched KZG openning 59 | ······End: batched KZG openning ................................................3.311ms 60 | ····End: KZG batch verification ................................................4.219ms 61 | ··End: Verifying ...............................................................5.789ms 62 | End: domain_size = 65536, curve = ark_bls12_381 ...............................8.963s 63 | proof size = 624, preprocessed data size = 392 64 | ``` 65 | 66 | > cargo test test_vanilla_plonk_with_fflonk_opening --release --features "parallel print-trace" -- --nocapture --ignored 67 | 68 | ``` 69 | Start: domain_size = 65536, curve = ark_bls12_381 70 | ··Start: Setup 71 | ····Start: Computing 786420 scalars powers 72 | ····End: Computing 786420 scalars powers .......................................32.611ms 73 | ····Start: 786420-scalar mul in G1 74 | ····End: 786420-scalar mul in G1 ...............................................2.971s 75 | ····Start: 2-scalar mul in G1 76 | ····End: 2-scalar mul in G1 ....................................................3.355ms 77 | ··End: Setup ...................................................................3.035s 78 | ··Start: Preprocessing 79 | ····Start: Committing to combination #0 80 | ······Start: combining 8 polynomials: t = 8, max_degree = 65535 81 | ······End: combining 8 polynomials: t = 8, max_degree = 65535 ..................9.411ms 82 | ······Start: committing to the combined polynomial: degree = 524287 83 | ······End: committing to the combined polynomial: degree = 524287 ..............1.952s 84 | ····End: Committing to combination #0 ..........................................1.962s 85 | ··End: Preprocessing ...........................................................1.963s 86 | ··Start: Proving 87 | ····Start: Committing to 2 proof polynomials 88 | ······Start: Committing to combination #1 89 | ········Start: combining 4 polynomials: t = 4, max_degree = 131069 90 | ········End: combining 4 polynomials: t = 4, max_degree = 131069 ...............5.066ms 91 | ········Start: committing to the combined polynomial: degree = 524279 92 | ········End: committing to the combined polynomial: degree = 524279 ............1.526s 93 | ······End: Committing to combination #1 ........................................1.532s 94 | ······Start: Committing to combination #2 95 | ········Start: combining 4 polynomials: t = 4, max_degree = 196604 96 | ········End: combining 4 polynomials: t = 4, max_degree = 196604 ...............7.040ms 97 | ········Start: committing to the combined polynomial: degree = 786418 98 | ········End: committing to the combined polynomial: degree = 786418 ............1.970s 99 | ······End: Committing to combination #2 ........................................1.978s 100 | ····End: Committing to 2 proof polynomials .....................................3.515s 101 | ····Start: Opening 102 | ······Start: polynomial divisions 103 | ······End: polynomial divisions ................................................454.483ms 104 | ······Start: commitment to a degree-786410 polynomial 105 | ······End: commitment to a degree-786410 polynomial ............................3.255s 106 | ······Start: linear combination of polynomials 107 | ······End: linear combination of polynomials ...................................87.627ms 108 | ····End: Opening ...............................................................8.278s 109 | ··End: Proving .................................................................11.819s 110 | ··Start: Verifying 111 | ····Start: barycentric evaluations 112 | ····End: barycentric evaluations ...............................................93.500µs 113 | ····Start: multiexp 114 | ····End: multiexp ..............................................................545.300µs 115 | ··End: Verifying ...............................................................5.437ms 116 | End: domain_size = 65536, curve = ark_bls12_381 ...............................16.826s 117 | proof size = 904, preprocessed data size = 56 118 | ``` -------------------------------------------------------------------------------- /src/fflonk.rs: -------------------------------------------------------------------------------- 1 | //! [fflonk: a Fast-Fourier inspired verifier efficient version of PlonK](https://eprint.iacr.org/2021/1167) 2 | //! by Ariel Gabizon and Zachary J. Williamson suggests a reduction from opening multiple 3 | //! polynomials each in the same point to opening a single polynomial in multiple points. 4 | 5 | use ark_ff::FftField; 6 | use ark_poly::DenseUVPolynomial; 7 | use ark_std::convert::TryInto; 8 | use ark_std::marker::PhantomData; 9 | use ark_std::ops::Div; 10 | use ark_std::{vec, vec::Vec}; 11 | 12 | use crate::utils; 13 | 14 | pub struct Fflonk> { 15 | _field: PhantomData, 16 | _poly: PhantomData

, 17 | } 18 | 19 | impl> Fflonk 20 | where 21 | for<'a, 'b> &'a P: Div<&'b P, Output = P>, 22 | { 23 | // Given `t` degree ` P { 26 | assert!(fs.len() <= t); 27 | let max_degree = fs.iter().map(|fi| fi.degree()).max().unwrap(); 28 | // Flattens the matrix (given as a list of rows) of coefficients by concatenating its columns. 29 | // Rows are right padded by 0s to `max_degree + 1`. If `fs.len() < t`, zero rows are added. 30 | let mut res = vec![F::zero(); t * (max_degree + 1)]; 31 | for (i, fi) in fs.iter().enumerate() { 32 | for (j, fij) in fi.coeffs().iter().enumerate() { 33 | res[t * j + i] = *fij; 34 | } 35 | } 36 | P::from_coefficients_vec(res) 37 | } 38 | 39 | // Given a `t`-th root `z` of `x` returns all the `t`-th roots of `x` 40 | // `z, zw, ..., zw^{t-1}`, where w is a primitive `t`-th root of unity. 41 | // TODO: fix the order 42 | pub fn roots(t: usize, root_t_of_x: F) -> Vec { 43 | let omega_t = F::get_root_of_unity(t.try_into().unwrap()).expect("root of unity not found"); 44 | let mut acc = root_t_of_x; 45 | let mut res = vec![root_t_of_x]; 46 | res.resize_with(t, || { 47 | acc *= omega_t; 48 | acc 49 | }); 50 | res 51 | } 52 | 53 | // The vanishing polynomial of the set of all the t-th roots of x, 54 | // given any of its t-th roots. 55 | // Z(x) = X^t-x 56 | fn z_of_roots(t: usize, root_t_of_x: F) -> P { 57 | let x = root_t_of_x.pow([t as u64]); 58 | let mut z = vec![F::zero(); t + 1]; // deg(Z) = t 59 | // coeffs(Z) = [-x, ..., 1] 60 | z[0] = -x; 61 | z[t] = F::one(); 62 | P::from_coefficients_vec(z) 63 | } 64 | 65 | // Reduces opening of f1,...,ft in 1 point to opening of g = combine(f1,...,ft) in t points. 66 | // The input opening is given as an evaluation point x (it's t-th root) 67 | // and a list of values fj(x), j=1,...,t. 68 | // The output opening is returned as the vanishing polynomial z of the points and the remainder r. 69 | pub fn opening_as_polynomials(t: usize, root_of_x: F, evals_at_x: &[F]) -> (P, P) { 70 | let z = Self::z_of_roots(t, root_of_x); 71 | let r = P::from_coefficients_slice(evals_at_x); 72 | (z, r) 73 | } 74 | 75 | // Let z be some t-th root of x. Then all the t roots of x of degree t are given by zj = z*w^j, j=0,...,t-1, where w is a primitive t-th root of unity. 76 | // Given vi=fi(x), i=0,...,t-1 -- evaluations of t polynomials each in the same point x, 77 | // computes sum(vi*zj^i, i=0,...,t-1), j=0,...,t-1. 78 | pub fn opening_as_points(t: usize, root_of_x: F, evals_at_x: &[F]) -> (Vec, Vec) { 79 | assert_eq!(evals_at_x.len(), t); //TODO: may be 0-padded 80 | let roots = Self::roots(t, root_of_x); 81 | let evals_at_roots = roots 82 | .iter() 83 | .map(|&root| { 84 | evals_at_x 85 | .iter() 86 | .zip(utils::powers(root)) 87 | .map(|(&eval, next_root)| eval * next_root) 88 | .sum() 89 | }) 90 | .collect(); 91 | (roots, evals_at_roots) 92 | } 93 | 94 | // Reduces opening of f1,...,ft in m points to opening of g = combine(f1,...,ft) in m*t points, 95 | // The input opening is given as a list of evaluation points x1,...,xm (their t-th roots) 96 | // and a list of lists of values [[fj(xi), j=1,...,t], i=1,...,m]. 97 | // The output opening is returned as a list of evaluation points and a list of values. 98 | pub fn multiopening(t: usize, roots_of_xs: &[F], evals_at_xs: &[Vec]) -> (Vec, Vec) { 99 | assert_eq!(roots_of_xs.len(), evals_at_xs.len()); 100 | assert!(evals_at_xs.iter().all(|evals_at_x| evals_at_x.len() == t)); 101 | let polys = evals_at_xs 102 | .iter() 103 | .map(|evals_at_x| P::from_coefficients_slice(evals_at_x)); 104 | let roots = roots_of_xs 105 | .iter() 106 | .map(|&root_of_x| Self::roots(t, root_of_x)); 107 | let xs: Vec<_> = roots.clone().flatten().collect(); 108 | let vs: Vec<_> = polys 109 | .zip(roots) 110 | .flat_map(|(poly, roots)| Self::multievaluate(&poly, &roots)) 111 | .collect(); 112 | (xs, vs) 113 | } 114 | 115 | // TODO: improve 116 | fn multievaluate(poly: &P, xs: &[F]) -> Vec { 117 | assert!(poly.degree() + 1 <= xs.len()); 118 | xs.iter().map(|p| poly.evaluate(p)).collect() 119 | } 120 | } 121 | 122 | #[cfg(test)] 123 | mod tests { 124 | use ark_ff::Field; 125 | use ark_poly::univariate::{DenseOrSparsePolynomial, DensePolynomial}; 126 | use ark_poly::Polynomial; 127 | use ark_std::{test_rng, UniformRand, Zero}; 128 | 129 | use super::*; 130 | 131 | type F = ark_bw6_761::Fr; 132 | type P = DensePolynomial; 133 | 134 | type FflonkBw6 = Fflonk; 135 | 136 | #[test] 137 | fn test_single_opening() { 138 | let rng = &mut test_rng(); 139 | 140 | let d = 15; // degree of polynomials 141 | let t = 4; // number of polynomials 142 | let root_t_of_x = F::rand(rng); // a t-th root of the opening point 143 | let x = root_t_of_x.pow([t as u64]); // the opening point 144 | 145 | let fs: Vec

= (0..t).map(|_| P::rand(d, rng)).collect(); 146 | let fs_at_x: Vec = fs 147 | .iter() // 148 | .map(|fi| fi.evaluate(&x)) 149 | .collect(); 150 | 151 | let g = FflonkBw6::combine(t, &fs); 152 | 153 | let (z, r) = FflonkBw6::opening_as_polynomials(t, root_t_of_x, &fs_at_x); 154 | let (xs, vs) = FflonkBw6::opening_as_points(t, root_t_of_x, &fs_at_x); 155 | 156 | // g(xi) = vi 157 | assert!(xs.iter().zip(vs.iter()).all(|(x, &v)| g.evaluate(x) == v)); 158 | // z -- vanishes xs 159 | assert!(xs.iter().all(|x| z.evaluate(x).is_zero())); 160 | // r -- interpolates vs in xs 161 | assert!(xs.iter().zip(vs.iter()).all(|(x, &v)| r.evaluate(x) == v)); 162 | // g mod z = r 163 | let (_, g_mod_z) = 164 | DenseOrSparsePolynomial::divide_with_q_and_r(&(&g.into()), &(&z.into())).unwrap(); 165 | assert_eq!(r, g_mod_z); 166 | } 167 | 168 | #[test] 169 | fn test_multiopening() { 170 | let rng = &mut test_rng(); 171 | 172 | let d = 15; // degree of polynomials 173 | let t = 4; // number of polynomials 174 | let m = 3; // number of opening points 175 | 176 | let roots_of_xs: Vec = (0..m) // t-th roots of opening points 177 | .map(|_| F::rand(rng)) 178 | .collect(); 179 | let xs: Vec = roots_of_xs 180 | .iter() // opening points 181 | .map(|root_t_of_x| root_t_of_x.pow([t as u64])) 182 | .collect(); 183 | 184 | let fs: Vec

= (0..t).map(|_| P::rand(d, rng)).collect(); 185 | let fs_at_xs: Vec> = xs 186 | .iter() 187 | .map(|x| fs.iter().map(|fi| fi.evaluate(&x)).collect()) 188 | .collect(); 189 | 190 | let g = FflonkBw6::combine(t, &fs); 191 | 192 | let (xs, vs) = FflonkBw6::multiopening(t, &roots_of_xs, &fs_at_xs); 193 | 194 | assert!(xs.iter().zip(vs).all(|(x, v)| g.evaluate(x) == v)); 195 | } 196 | 197 | #[test] 198 | fn test_openings_consistency() { 199 | let rng = &mut test_rng(); 200 | 201 | let d = 15; // degree of polynomials 202 | let t = 4; // number of polynomials 203 | let root_t_of_x = F::rand(rng); // a t-th root of the opening point 204 | let x = root_t_of_x.pow([t as u64]); // the opening point 205 | 206 | let fs: Vec

= (0..t).map(|_| P::rand(d, rng)).collect(); 207 | let fs_at_x: Vec = fs 208 | .iter() // 209 | .map(|fi| fi.evaluate(&x)) 210 | .collect(); 211 | 212 | let (z, r) = FflonkBw6::opening_as_polynomials(t, root_t_of_x, &fs_at_x); 213 | let (xs, vs) = FflonkBw6::multiopening(t, &[root_t_of_x], &[fs_at_x]); 214 | 215 | assert!(xs.iter().all(|x| z.evaluate(x).is_zero())); 216 | assert!(xs.iter().zip(vs).all(|(x, v)| r.evaluate(x) == v)); 217 | } 218 | } 219 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(not(feature = "std"), no_std)] 2 | 3 | use ark_ff::PrimeField; 4 | use ark_poly::univariate::{DenseOrSparsePolynomial, DensePolynomial}; 5 | use ark_std::marker::PhantomData; 6 | use ark_std::rand::Rng; 7 | use ark_std::vec::Vec; 8 | 9 | use aggregation::multiple::Transcript; 10 | 11 | use crate::fflonk::Fflonk; 12 | use crate::pcs::PCS; 13 | use crate::shplonk::{AggregateProof, Shplonk}; 14 | 15 | pub mod aggregation; 16 | pub mod fflonk; 17 | pub mod pcs; 18 | pub mod shplonk; 19 | pub mod utils; 20 | 21 | pub use ark_poly::polynomial::DenseUVPolynomial; 22 | pub use ark_poly::polynomial::Polynomial; 23 | 24 | pub type Poly = DensePolynomial; // currently SparsePolynomial doesn't implement DenseUVPolynomial anyway 25 | 26 | pub trait EuclideanPolynomial { 27 | fn divide_with_q_and_r(&self, divisor: &Poly) -> (Poly, Poly); 28 | } 29 | 30 | impl EuclideanPolynomial for Poly { 31 | fn divide_with_q_and_r(&self, divisor: &Poly) -> (Poly, Poly) { 32 | let a: DenseOrSparsePolynomial = self.into(); 33 | let b: DenseOrSparsePolynomial = divisor.into(); 34 | a.divide_with_q_and_r(&b).unwrap() 35 | } 36 | } 37 | 38 | pub struct FflonkyKzg> { 39 | _field: PhantomData, 40 | _pcs: PhantomData, 41 | } 42 | 43 | impl> FflonkyKzg { 44 | pub fn setup(max_degree: usize, rng: &mut R) -> CS::Params { 45 | CS::setup(max_degree, rng) 46 | } 47 | 48 | pub fn open>( 49 | ck: &CS::CK, 50 | fss: &[Vec>], // vecs of polynomials to combine 51 | ts: &[usize], // lengths of each combination 52 | // TODO: ts can be inferred from li := len(fss[i]) as ti = min(x : x >= li and x | p-1) 53 | rootss: &[Vec], // sets of opening points per a combined polynomial presented as t-th roots 54 | transcript: &mut T, 55 | ) -> AggregateProof { 56 | let k = fss.len(); 57 | assert_eq!(k, ts.len()); 58 | assert_eq!(k, rootss.len()); 59 | let gs: Vec> = fss 60 | .iter() 61 | .zip(ts.iter()) 62 | .map(|(fs, t)| Fflonk::combine(*t, fs)) 63 | .collect(); 64 | let xss: Vec<_> = rootss 65 | .iter() 66 | .zip(ts.iter()) 67 | .map(|(roots, t)| { 68 | roots 69 | .iter() 70 | .flat_map(|root| Fflonk::>::roots(*t, *root)) 71 | .collect() 72 | }) 73 | .collect(); 74 | 75 | Shplonk::::open_many(ck, &gs, &xss, transcript) 76 | } 77 | 78 | pub fn verify>( 79 | vk: &CS::VK, 80 | gcs: &[CS::C], 81 | ts: &[usize], 82 | proof: AggregateProof, 83 | rootss: &[Vec], 84 | vss: &[Vec>], 85 | transcript: &mut T, 86 | ) -> bool { 87 | let (xss, yss) = rootss 88 | .iter() 89 | .zip(vss.iter()) 90 | .zip(ts.iter()) 91 | .map(|((roots, vs), t)| Fflonk::>::multiopening(*t, roots, vs)) 92 | .unzip(); 93 | 94 | Shplonk::::verify_many(vk, &gcs, proof, &xss, &yss, transcript) 95 | } 96 | 97 | pub fn open_single>( 98 | ck: &CS::CK, 99 | fs: &[Poly], // polynomials to combine 100 | t: usize, // lengths of the combination 101 | roots: &[F], // set of opening points presented as t-th roots 102 | transcript: &mut T, 103 | ) -> AggregateProof { 104 | Self::open(ck, &[fs.to_vec()], &[t], &[roots.to_vec()], transcript) 105 | } 106 | 107 | pub fn verify_single>( 108 | vk: &CS::VK, 109 | gc: &CS::C, 110 | t: usize, 111 | proof: AggregateProof, 112 | roots: &[F], 113 | vss: &[Vec], // evaluations per point // TODO: shplonk provides API with evals per polynomial 114 | transcript: &mut T, 115 | ) -> bool { 116 | Self::verify( 117 | vk, 118 | &[(*gc).clone()], 119 | &[t], 120 | proof, 121 | &[roots.to_vec()], 122 | &[vss.to_vec()], 123 | transcript, 124 | ) 125 | } 126 | } 127 | 128 | #[cfg(test)] 129 | mod tests { 130 | use ark_ec::pairing::Pairing; 131 | use ark_poly::{DenseUVPolynomial, Polynomial}; 132 | use ark_std::rand::Rng; 133 | use ark_std::test_rng; 134 | use ark_std::vec; 135 | 136 | use crate::pcs::kzg::KZG; 137 | use crate::pcs::IdentityCommitment; 138 | use crate::pcs::PcsParams; 139 | 140 | use super::*; 141 | 142 | pub(crate) type TestCurve = ark_bls12_381::Bls12_381; 143 | pub(crate) type TestField = ::ScalarField; 144 | pub(crate) type TestKzg = KZG; 145 | 146 | pub(crate) type BenchCurve = ark_bw6_761::BW6_761; 147 | pub(crate) type BenchField = ::ScalarField; 148 | 149 | #[allow(dead_code)] // used by ignored tests 150 | pub(crate) type BenchKzg = KZG; 151 | 152 | pub const BENCH_DEG_LOG1: usize = 10; 153 | // pub const BENCH_DEG_LOG2: usize = 16; 154 | // const BENCH_DEG_LOG3: usize = 24; Eth 2.0 coming? 155 | 156 | impl> Transcript for (F, F) { 157 | fn get_gamma(&mut self) -> F { 158 | self.0 159 | } 160 | 161 | fn commit_to_q(&mut self, _q: &CS::C) {} 162 | 163 | fn get_zeta(&mut self) -> F { 164 | self.1 165 | } 166 | } 167 | 168 | fn generate_test_data( 169 | rng: &mut R, 170 | d: usize, // degree of polynomials 171 | t: usize, // number of polynomials 172 | m: usize, // number of opening points 173 | ) -> ( 174 | Vec>, // polynomials 175 | Vec, // roots of evaluation points 176 | Vec>, // evaluations per point 177 | ) 178 | where 179 | R: Rng, 180 | F: PrimeField, 181 | { 182 | // polynomials 183 | let fs: Vec> = (0..t).map(|_| Poly::rand(d, rng)).collect(); 184 | 185 | let roots: Vec<_> = (0..m).map(|_| F::rand(rng)).collect(); 186 | 187 | let xs: Vec = roots 188 | .iter() // opening points 189 | .map(|root| root.pow([t as u64])) 190 | .collect(); 191 | 192 | // evaluations per point 193 | let vss: Vec<_> = xs 194 | .iter() 195 | .map(|x| fs.iter().map(|f| f.evaluate(x)).collect::>()) 196 | .collect(); 197 | 198 | (fs, roots, vss) 199 | } 200 | 201 | fn _test_fflonk_single>() { 202 | let rng = &mut test_rng(); 203 | let transcript = &mut (F::rand(rng), F::rand(rng)); 204 | 205 | let params = FflonkyKzg::::setup(123, rng); 206 | 207 | let t = 4; // number of polynomials in a combination 208 | let m = 3; // number of opening points per a combination 209 | let d = 15; 210 | 211 | let (fs, roots, vss) = generate_test_data(rng, d, t, m); 212 | 213 | let g = Fflonk::combine(t, &fs); 214 | let gc = CS::commit(¶ms.ck(), &g).unwrap(); 215 | 216 | let proof = FflonkyKzg::::open_single(¶ms.ck(), &fs, t, &roots, transcript); 217 | assert!(FflonkyKzg::::verify_single( 218 | ¶ms.vk(), 219 | &gc, 220 | t, 221 | proof, 222 | &roots, 223 | &vss, 224 | transcript 225 | )); 226 | } 227 | 228 | fn _test_fflonk>() { 229 | let rng = &mut test_rng(); 230 | let transcript = &mut (F::rand(rng), F::rand(rng)); 231 | 232 | let params = FflonkyKzg::::setup(123, rng); 233 | 234 | let ds = [31, 15]; 235 | let ts = [2, 4]; // number of polynomials in a combination 236 | let ms = [2, 2]; // number of opening points per a combination 237 | 238 | let mut fss = vec![]; 239 | let mut rootss = vec![]; 240 | let mut vsss = vec![]; 241 | for ((d, t), m) in ds.into_iter().zip(ts).zip(ms) { 242 | let (fs, roots, vss) = generate_test_data(rng, d, t, m); 243 | fss.push(fs); 244 | rootss.push(roots); 245 | vsss.push(vss); 246 | } 247 | 248 | let gcs: Vec<_> = fss 249 | .iter() 250 | .zip(ts) 251 | .map(|(fs, t)| CS::commit(¶ms.ck(), &Fflonk::combine(t, &fs)).unwrap()) 252 | .collect(); 253 | 254 | let proof = FflonkyKzg::::open(¶ms.ck(), &fss, &ts, &rootss, transcript); 255 | assert!(FflonkyKzg::::verify( 256 | ¶ms.vk(), 257 | &gcs, 258 | &ts, 259 | proof, 260 | &rootss, 261 | &vsss, 262 | transcript 263 | )); 264 | } 265 | 266 | #[test] 267 | fn test_fflonk_single() { 268 | _test_fflonk_single::(); 269 | _test_fflonk_single::(); 270 | } 271 | 272 | #[test] 273 | fn test_fflonk() { 274 | _test_fflonk::(); 275 | _test_fflonk::(); 276 | } 277 | } 278 | -------------------------------------------------------------------------------- /tests/plonk/batchy.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | use ark_ff::{PrimeField, UniformRand}; 4 | use ark_poly::{DenseUVPolynomial, Polynomial}; 5 | use ark_serialize::*; 6 | use ark_std::rand::Rng; 7 | use ark_std::{end_timer, start_timer, test_rng}; 8 | 9 | use w3f_pcs::pcs::{Commitment, PcsParams, PCS}; 10 | use w3f_pcs::utils::poly; 11 | use w3f_pcs::Poly; 12 | 13 | use crate::{DecoyPlonk, VanillaPlonkAssignments}; 14 | 15 | impl VanillaPlonkAssignments { 16 | fn constraints(&self) -> Vec> { 17 | vec![ 18 | self.arithmetic_constraint.clone(), 19 | self.permutation_constraint_1.clone(), 20 | self.permutation_constraint_2.clone(), 21 | ] 22 | } 23 | 24 | fn polys_to_commit_1(&self) -> Vec> { 25 | self.wire_polynomials.clone() 26 | } 27 | 28 | fn poly_to_commit_2(&self, _beta_gamma: (F, F)) -> Poly { 29 | self.permutation_polynomial.clone() 30 | } 31 | 32 | fn poly_to_commit_3(&self, alpha: F) -> Poly { 33 | let aggregate_constraint = poly::sum_with_powers(alpha, &self.constraints()); 34 | self.quotient(&aggregate_constraint) 35 | } 36 | 37 | fn polys_to_evaluate_at_zeta_4(&self) -> Vec> { 38 | [&self.wire_polynomials, &self.preprocessed_polynomials[5..7]].concat() // a, b, c, S_{sigma_1}, S_{sigma_2} 39 | } 40 | 41 | fn poly_to_evaluate_at_zeta_omega_4(&self) -> Poly { 42 | self.permutation_polynomial.clone() 43 | } 44 | 45 | fn polys_to_open_at_zeta_5(&self) -> Vec> { 46 | self.polys_to_evaluate_at_zeta_4() 47 | } 48 | 49 | fn poly_to_open_at_zeta_omega_5(&self) -> Poly { 50 | self.poly_to_evaluate_at_zeta_omega_4() 51 | } 52 | } 53 | 54 | struct Challenges { 55 | // verifier challenges in order: 56 | // permutation argument challenges (aka "permutation challenges") 57 | beta_gamma: (F, F), 58 | // constraint aggregation challenge (aka "quotient challenge") 59 | alpha: F, 60 | // evaluation challenge 61 | zeta: F, 62 | // polynomial aggregation challenge (aka "opening challenge") 63 | nus: Vec, 64 | } 65 | 66 | impl Challenges { 67 | fn new(rng: &mut R) -> Self { 68 | let beta_gamma: (F, F) = ( 69 | Self::get_128_bit_challenge(rng), 70 | Self::get_128_bit_challenge(rng), 71 | ); 72 | let alpha: F = Self::get_128_bit_challenge(rng); 73 | let zeta: F = Self::get_128_bit_challenge(rng); 74 | let one = std::iter::once(F::one()); 75 | let nus = one 76 | .chain((1..6).map(|_| Self::get_128_bit_challenge(rng))) 77 | .collect(); 78 | Self { 79 | beta_gamma, 80 | alpha, 81 | zeta, 82 | nus, 83 | } 84 | } 85 | 86 | fn get_128_bit_challenge(rng: &mut R) -> F { 87 | u128::rand(rng).into() 88 | } 89 | } 90 | 91 | pub struct PlonkBatchKzgTest> { 92 | polys: VanillaPlonkAssignments, 93 | linearization_polynomial: Poly, 94 | challenges: Challenges, 95 | cs: PhantomData, 96 | } 97 | 98 | impl> PlonkBatchKzgTest { 99 | fn commit_polynomial(&self, ck: &CS::CK, poly: &Poly) -> CS::C { 100 | let t_commitment = 101 | start_timer!(|| format!("Committing to degree {} polynomials", poly.degree())); 102 | let commitment = CS::commit(ck, poly).unwrap(); 103 | end_timer!(t_commitment); 104 | commitment 105 | } 106 | 107 | fn commit_polynomials(&self, ck: &CS::CK, polys: &[Poly]) -> Vec { 108 | let t_commitment = 109 | start_timer!(|| format!("Committing to batch of {} polynomials", polys.len())); 110 | let commitments = polys 111 | .iter() 112 | .map(|p| self.commit_polynomial(ck, p)) 113 | .collect(); 114 | end_timer!(t_commitment); 115 | 116 | commitments 117 | } 118 | } 119 | 120 | #[derive(CanonicalSerialize, CanonicalDeserialize)] 121 | pub struct BatchyPlonkProof> { 122 | wire_polynomials_c: Vec, 123 | permutation_polynomial_c: CS::C, 124 | quotient_polynomial_c: CS::C, 125 | evals_at_zeta: Vec, 126 | evals_at_zeta_omega: F, 127 | // [W_{\zeta}]_1 128 | proof_at_zeta: CS::Proof, 129 | // [W_{\zeta\omega}]_1 130 | proof_at_zeta_omega: CS::Proof, 131 | extra: (CS::C, F), // commitment and evaluation of the linearization poly //TODO: remove 132 | } 133 | 134 | impl> DecoyPlonk for PlonkBatchKzgTest { 135 | type Proof = BatchyPlonkProof; 136 | 137 | fn new(polys: VanillaPlonkAssignments, rng: &mut R) -> Self { 138 | let linearization_polynomial = Poly::rand(polys.degree, rng); // TODO: compute from known commitments 139 | let challenges = Challenges::new(rng); 140 | Self { 141 | polys, 142 | linearization_polynomial, 143 | challenges, 144 | cs: PhantomData, 145 | } 146 | } 147 | 148 | fn setup(&mut self, rng: &mut R) -> (CS::CK, CS::VK) { 149 | let params = CS::setup(self.polys.max_degree, rng); 150 | (params.ck(), params.vk()) 151 | } 152 | 153 | fn preprocess(&mut self, ck: &CS::CK) -> Vec { 154 | self.commit_polynomials(ck, &self.polys.preprocessed_polynomials) 155 | } 156 | 157 | fn prove(&mut self, ck: &CS::CK) -> BatchyPlonkProof { 158 | let wire_polynomials_c = self.commit_polynomials(ck, &self.polys.polys_to_commit_1()); 159 | let permutation_polynomial_c = 160 | self.commit_polynomial(ck, &self.polys.poly_to_commit_2(self.challenges.beta_gamma)); 161 | let quotient_polynomial_c = 162 | self.commit_polynomial(ck, &self.polys.poly_to_commit_3(self.challenges.alpha)); 163 | 164 | let zeta = self.challenges.zeta; 165 | let evals_at_zeta = self 166 | .polys 167 | .polys_to_evaluate_at_zeta_4() 168 | .iter() 169 | .map(|p| p.evaluate(&zeta)) 170 | .collect(); 171 | let zeta_omega = zeta * self.polys.omega; 172 | let evals_at_zeta_omega = self 173 | .polys 174 | .poly_to_evaluate_at_zeta_omega_4() 175 | .evaluate(&zeta_omega); 176 | 177 | // TODO: should be computed by verifier from other commitments 178 | let linearization_polynomial = self.linearization_polynomial.clone(); 179 | 180 | let mut polys_to_open_at_zeta = vec![linearization_polynomial.clone()]; 181 | polys_to_open_at_zeta.extend_from_slice(&self.polys.polys_to_open_at_zeta_5()); 182 | let agg_poly_at_zeta = 183 | poly::sum_with_coeffs(self.challenges.nus.clone(), &polys_to_open_at_zeta); 184 | 185 | let proof_at_zeta = CS::open(ck, &agg_poly_at_zeta, zeta).unwrap(); 186 | let proof_at_zeta_omega = 187 | CS::open(ck, &self.polys.poly_to_open_at_zeta_omega_5(), zeta_omega).unwrap(); 188 | 189 | // TODO: compute 190 | let t_extra = start_timer!(|| "Extra: commiting to the linearization polynomial"); 191 | let extra_comm = self.commit_polynomial(ck, &linearization_polynomial); 192 | let extra_eval = linearization_polynomial.evaluate(&zeta); 193 | end_timer!(t_extra); 194 | 195 | BatchyPlonkProof { 196 | wire_polynomials_c, 197 | permutation_polynomial_c: permutation_polynomial_c, 198 | quotient_polynomial_c, 199 | evals_at_zeta, 200 | evals_at_zeta_omega, 201 | proof_at_zeta, 202 | proof_at_zeta_omega, 203 | extra: (extra_comm, extra_eval), 204 | } 205 | } 206 | 207 | fn verify( 208 | &self, 209 | vk: &CS::VK, 210 | preprocessed_commitments: Vec, 211 | proof: BatchyPlonkProof, 212 | ) -> bool { 213 | // TODO: 214 | let t_reconstruct = start_timer!(|| { 215 | "Reconstructing the commitment to the linearization polynomial: 7-multiexp" 216 | }); 217 | let bases = [ 218 | &preprocessed_commitments[0..4], 219 | &vec![ 220 | proof.permutation_polynomial_c.clone(), 221 | preprocessed_commitments[7].clone(), 222 | proof.quotient_polynomial_c, 223 | ], 224 | ] 225 | .concat(); 226 | assert_eq!(bases.len(), 7); // [q_C]_1 has exp = 1 227 | let coeffs = (0..7).map(|_| F::rand(&mut test_rng())).collect::>(); 228 | let _comm = CS::C::combine(&coeffs, &bases); 229 | end_timer!(t_reconstruct); 230 | 231 | let t_kzg = start_timer!(|| "KZG batch verification"); 232 | let (agg_comm, agg_eval) = { 233 | let t_aggregate_claims = start_timer!(|| "aggregate evaluation claims at zeta"); 234 | 235 | let nus = self.challenges.nus.clone(); 236 | 237 | let mut comms = vec![proof.extra.0]; 238 | comms.extend_from_slice(&(proof.wire_polynomials_c)); 239 | comms.extend_from_slice(&preprocessed_commitments[5..7]); 240 | assert_eq!(comms.len(), nus.len()); 241 | let agg_comms = CS::C::combine(&nus, &comms); 242 | 243 | let mut evals = vec![proof.extra.1]; 244 | evals.extend_from_slice(&proof.evals_at_zeta); 245 | assert_eq!(evals.len(), nus.len()); 246 | let agg_evals = evals.into_iter().zip(nus.iter()).map(|(y, r)| y * r).sum(); 247 | 248 | end_timer!(t_aggregate_claims); 249 | (agg_comms, agg_evals) 250 | }; 251 | 252 | let t_kzg_batch_opening = start_timer!(|| "batched KZG openning"); 253 | let zeta = self.challenges.zeta; 254 | let zeta_omega = zeta * self.polys.omega; 255 | let valid = CS::batch_verify( 256 | vk, 257 | vec![agg_comm, proof.permutation_polynomial_c], 258 | vec![zeta, zeta_omega], 259 | vec![agg_eval, proof.evals_at_zeta_omega], 260 | vec![proof.proof_at_zeta, proof.proof_at_zeta_omega], 261 | &mut test_rng(), 262 | ); 263 | end_timer!(t_kzg_batch_opening); 264 | end_timer!(t_kzg); 265 | valid.is_ok() 266 | } 267 | } 268 | -------------------------------------------------------------------------------- /src/aggregation/multiple.rs: -------------------------------------------------------------------------------- 1 | use ark_ff::PrimeField; 2 | use ark_poly::Polynomial; 3 | use ark_std::collections::BTreeSet; 4 | use ark_std::iterable::Iterable; 5 | use ark_std::{end_timer, start_timer}; 6 | use ark_std::{vec, vec::Vec}; 7 | 8 | use crate::pcs::{Commitment, PCS}; 9 | use crate::utils::poly; 10 | use crate::utils::poly::interpolate_evaluate; 11 | use crate::{utils, EuclideanPolynomial, Poly}; 12 | 13 | pub struct MultipointClaim> { 14 | pub c: C, 15 | pub xs: Vec, 16 | pub ys: Vec, 17 | } 18 | 19 | pub trait Transcript> { 20 | fn get_gamma(&mut self) -> F; 21 | fn commit_to_q(&mut self, q: &CS::C); 22 | fn get_zeta(&mut self) -> F; 23 | } 24 | 25 | pub fn aggregate_polys, T: Transcript>( 26 | ck: &CS::CK, 27 | fs: &[Poly], 28 | xss: &[BTreeSet], 29 | transcript: &mut T, 30 | ) -> (Poly, F, CS::C) { 31 | assert_eq!( 32 | xss.len(), 33 | fs.len(), 34 | "{} opening sets specified for {} polynomials", 35 | xss.len(), 36 | fs.len() 37 | ); 38 | // Both Halo-inf and fflonk/shplonk use the notation "complement" in set-theoretical sense to that used in the code. 39 | // The papers consider vanishing polynomials of the complements of the opening sets, 40 | // while in the code vanishing polynomials of the opening sets are used directly. 41 | // Comments bellow bridge notation between the code and the papers to explain that the code is equivalent 42 | // using https://eprint.iacr.org/2021/1167.pdf, Lemma 4.2. as the authority. 43 | 44 | // zi - the vanishing polynomial of the set xsi ("Si" in the paper) of the opening points for fi, i = 0,...,k-1 45 | let zs: Vec<_> = xss.iter().map(|xsi| poly::z_of_set(xsi)).collect(); 46 | // The paper defines "T" as the set of all the opening points, "Z_T", it's vanishing polynomial, 47 | // and "Z_{T\S_i}" as the vanishing polynomial of the complement of "Si" in "T". 48 | // Observe that for zi computed above, "Z_T" = zi * "Z_{T\S_i}" (*) 49 | 50 | // (qi, ri) - the quotient and the remainder of division of fi by the corresponding vanishing polynomial zi 51 | // qi = (fi - ri) / zi (**) 52 | let t_divisions = start_timer!(|| "polynomial divisions"); 53 | let (qs, rs): (Vec<_>, Vec<_>) = fs 54 | .iter() 55 | .zip(zs.iter()) 56 | .map(|(fi, zi)| fi.divide_with_q_and_r(zi)) 57 | .unzip(); 58 | end_timer!(t_divisions); 59 | 60 | let gamma = transcript.get_gamma(); 61 | 62 | // The paper defines f = sum(gamma^i * "Z_{T\S_i}" * (fi - ri)) 63 | // Let q := f / "Z_T" 64 | // By (*) "Z_T" = zi * "Z_{T\S_i}", hence q = f / (zi * "Z_{T\S_i})" = sum(gamma^i * (fi - ri) / zi) 65 | // By (**) qi = (fi - ri) / zi, thus q = sum(gamma^i * qi) 66 | let q = poly::sum_with_powers(gamma, &qs); 67 | let t_commit = 68 | start_timer!(|| ark_std::format!("commitment to a degree-{} polynomial", q.degree())); 69 | let qc = CS::commit(ck, &q).unwrap(); 70 | // "W" in the paper 71 | end_timer!(t_commit); 72 | transcript.commit_to_q(&qc); 73 | 74 | let zeta = transcript.get_zeta(); 75 | 76 | let rs_at_zeta: Vec<_> = rs.iter().map(|ri| ri.evaluate(&zeta)).collect(); 77 | let zs_at_zeta: Vec<_> = zs.iter().map(|zi| zi.evaluate(&zeta)).collect(); 78 | 79 | // Let pi(X) = fi(X) - ri(zeta) 80 | let ps: Vec> = fs 81 | .iter() 82 | .zip(rs_at_zeta) 83 | .map(|(fi, ri)| fi - &poly::constant(ri)) 84 | .collect(); 85 | 86 | // From (*) follows that "Z_{T\S_i}"(zeta) = "Z_T"(zeta) / zi(zeta), so 87 | // 1. "L" = sum([gamma^i * "Z_T"(zeta) / zi(zeta)] * pi) - "Z_T"(zeta) * q 88 | // 2. "Z_{T\S_0}"(zeta) = "Z_T"(zeta) / z0(zeta) 89 | // We want to compute l_norm = "L"/"Z_{T\S_0}"(zeta) = "L" * z0(zeta) / "Z_T"(zeta) 90 | // Notice that "Z_T"(zeta) cancels out from the both terms of "L" 91 | 92 | // Finally l_norm = sum([gamma^i * z0(zeta) / zi(zeta)] * pi) - z0(zeta) * q 93 | // normalizer := z0(zeta) 94 | // coeff_i := gamma^i * z0(zeta) / zi(zeta) 95 | let (coeffs, normalizer) = get_coeffs(zs_at_zeta, gamma); 96 | let t_combine = start_timer!(|| "linear combination of polynomials"); 97 | let l_norm = &poly::sum_with_coeffs(coeffs, &ps) - &(&q * normalizer); 98 | end_timer!(t_combine); 99 | 100 | // It remains to notice that "W'" is a KZG opening proof for polynomial l_norm in point zeta. 101 | (l_norm, zeta, qc) 102 | } 103 | 104 | /// Takes evaluations of vanishing polynomials at a random point `zeta`, and a random challenge `gamma`, 105 | /// and returns coefficients for the random linear combination of polynomials/commitments. 106 | fn get_coeffs(zs_at_zeta: Vec, gamma: F) -> (Vec, F) { 107 | assert!(!zs_at_zeta.is_empty(), "empty vec"); 108 | let normalizer = zs_at_zeta[0]; 109 | let mut zs_at_zeta_inv = zs_at_zeta; 110 | ark_ff::batch_inversion(&mut zs_at_zeta_inv); 111 | 112 | let coeffs = zs_at_zeta_inv 113 | .iter() 114 | .zip(utils::powers(gamma)) 115 | .map(|(zi_inv, gamma_to_i)| gamma_to_i * zi_inv * normalizer) 116 | .collect(); 117 | 118 | (coeffs, normalizer) 119 | } 120 | 121 | pub fn group_by_commitment>( 122 | fcs: &[C], 123 | xss: &Vec>, 124 | yss: &Vec>, 125 | ) -> Vec> { 126 | fcs.iter() 127 | .cloned() 128 | .zip(xss.iter().cloned()) 129 | .zip(yss.iter().cloned()) 130 | .map(|((c, xs), ys)| MultipointClaim { c, xs, ys }) 131 | .collect() 132 | } 133 | 134 | pub fn aggregate_claims, T: Transcript>( 135 | claims: Vec>, 136 | qc: &CS::C, 137 | onec: &CS::C, 138 | transcript: &mut T, 139 | ) -> MultipointClaim { 140 | let gamma = transcript.get_gamma(); 141 | transcript.commit_to_q(&qc); 142 | let zeta = transcript.get_zeta(); 143 | 144 | // For each polynomial fi the opening claim {(xj, yj)} can be presented in polynomial form 145 | // as a pair of polynomials (ri, zi), where zi is the vanishing polynomial of the set {xj}, 146 | // and ri is the interpolation polynomial of the set {(xj, yj)}. 147 | // ri(zeta), zi(zeta) 148 | let t_eval = start_timer!(|| "barycentric evaluations"); 149 | let (rs_at_zeta, zs_at_zeta): (Vec<_>, Vec<_>) = claims 150 | .iter() 151 | .map(|MultipointClaim { c: _, xs, ys }| interpolate_evaluate(xs, ys, &zeta)) 152 | .unzip(); 153 | end_timer!(t_eval); 154 | 155 | let (mut coeffs, normalizer) = get_coeffs(zs_at_zeta, gamma); 156 | assert!(coeffs[0].is_one()); 157 | 158 | let agg_r_at_zeta: F = rs_at_zeta 159 | .into_iter() 160 | .zip(coeffs.iter()) 161 | .map(|(ri_at_zeta, coeff)| ri_at_zeta * coeff) 162 | .sum(); 163 | 164 | let mut commitments = claims.into_iter().map(|cl| cl.c).collect::>(); 165 | coeffs.push(-agg_r_at_zeta); 166 | commitments.push(onec.clone()); 167 | coeffs.push(-normalizer); 168 | commitments.push(qc.clone()); 169 | 170 | let t_combine = start_timer!(|| "multiexp"); 171 | let lc = CS::C::combine(&coeffs, &commitments); 172 | end_timer!(t_combine); 173 | MultipointClaim { 174 | c: lc, 175 | xs: vec![zeta], 176 | ys: vec![F::zero()], 177 | } 178 | } 179 | 180 | #[cfg(test)] 181 | mod tests { 182 | use ark_ff::{One, UniformRand}; 183 | use ark_std::format; 184 | use ark_std::iter::FromIterator; 185 | use ark_std::test_rng; 186 | use ark_std::{end_timer, start_timer}; 187 | 188 | use crate::pcs::IdentityCommitment; 189 | use crate::pcs::PcsParams; 190 | use crate::shplonk::tests::{random_opening, random_xss}; 191 | use crate::tests::{BenchField, BenchKzg, TestField, TestKzg, BENCH_DEG_LOG1}; 192 | 193 | use super::*; 194 | 195 | #[test] 196 | fn test_get_coeffs() { 197 | let rng = &mut test_rng(); 198 | 199 | let zs = (0..10).map(|_| TestField::rand(rng)).collect::>(); 200 | 201 | let gamma = TestField::rand(rng); 202 | let (coeffs, _) = get_coeffs(zs.clone(), gamma); 203 | assert_eq!(coeffs.len(), zs.len()); 204 | assert!(coeffs[0].is_one()); 205 | 206 | let gamma = TestField::one(); 207 | let (coeffs, normalizer) = get_coeffs(zs.clone(), gamma); 208 | assert!(coeffs.iter().zip(zs).all(|(c, z)| z * c == normalizer)); 209 | } 210 | 211 | fn _test_aggregation>(d: usize) { 212 | // degree of polynomials 213 | let rng = &mut test_rng(); 214 | 215 | let t = 8; // number of polynomials 216 | let max_m = 3; // maximal number of opening points per polynomial 217 | 218 | let params = CS::setup(d, rng); 219 | let (ck, vk) = (params.ck(), params.vk()); 220 | 221 | let xss = random_xss(rng, t, max_m); 222 | let opening = random_opening::<_, _, CS>(rng, &ck, d, t, xss); 223 | 224 | let sets_of_xss: Vec> = opening 225 | .xss 226 | .iter() 227 | .map(|xs| BTreeSet::from_iter(xs.iter().cloned())) 228 | .collect(); 229 | 230 | let transcript = &mut (F::rand(rng), F::rand(rng)); 231 | 232 | let t_aggregate_polys = 233 | start_timer!(|| format!("Aggregate {} degree-{} polynomials", t, d)); 234 | let (agg_poly, zeta, agg_proof) = 235 | aggregate_polys::<_, CS, _>(&ck, &opening.fs, &sets_of_xss, transcript); 236 | end_timer!(t_aggregate_polys); 237 | 238 | let claims = group_by_commitment(&opening.fcs, &opening.xss, &opening.yss); 239 | let onec = CS::commit(&vk.clone().into(), &poly::constant(F::one())).unwrap(); 240 | 241 | let t_aggregate_claims = start_timer!(|| format!("Aggregate {} claims", claims.len())); 242 | let agg_claim = aggregate_claims::<_, CS, _>(claims, &agg_proof, &onec, transcript); 243 | end_timer!(t_aggregate_claims); 244 | 245 | assert_eq!(CS::commit(&ck, &agg_poly).unwrap(), agg_claim.c); 246 | assert_eq!(zeta, agg_claim.xs[0]); 247 | assert_eq!(agg_poly.evaluate(&zeta), agg_claim.ys[0]); 248 | assert!(agg_claim.ys[0].is_zero()); 249 | } 250 | 251 | #[test] 252 | fn test_aggregation() { 253 | _test_aggregation::(15); 254 | _test_aggregation::(15); 255 | } 256 | 257 | #[test] 258 | #[ignore] 259 | fn bench_aggregation() { 260 | _test_aggregation::((1 << BENCH_DEG_LOG1) - 1); 261 | } 262 | } 263 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /src/pcs/kzg/mod.rs: -------------------------------------------------------------------------------- 1 | use ark_ec::pairing::Pairing; 2 | use ark_ec::CurveGroup; 3 | use ark_ec::VariableBaseMSM; 4 | use ark_ff::{One, UniformRand, Zero}; 5 | use ark_poly::{DenseUVPolynomial, Evaluations, Polynomial}; 6 | use ark_std::marker::PhantomData; 7 | use ark_std::ops::Mul; 8 | use ark_std::rand::Rng; 9 | use ark_std::vec::Vec; 10 | 11 | use crate::pcs::kzg::commitment::KzgCommitment; 12 | use crate::pcs::kzg::params::{KzgCommitterKey, KzgVerifierKey}; 13 | use crate::pcs::kzg::urs::URS; 14 | use crate::pcs::{CommitterKey, PCS}; 15 | use crate::utils::ec::{small_multiexp_affine, small_multiexp_proj}; 16 | use crate::Poly; 17 | 18 | pub mod commitment; 19 | mod lagrange; 20 | pub mod params; 21 | pub mod urs; 22 | 23 | #[derive(Clone)] 24 | pub struct KZG { 25 | _engine: PhantomData, 26 | } 27 | 28 | /// e(acc, g2) = e(proof, tau.g2) 29 | #[derive(Clone, Debug)] 30 | pub struct AccumulatedOpening { 31 | pub acc: E::G1Affine, 32 | pub proof: E::G1Affine, 33 | } 34 | 35 | #[derive(Clone, Debug)] 36 | pub struct KzgOpening { 37 | pub c: E::G1Affine, 38 | pub x: E::ScalarField, 39 | pub y: E::ScalarField, 40 | pub proof: E::G1Affine, 41 | } 42 | 43 | impl KZG { 44 | fn z(x: E::ScalarField) -> Poly { 45 | Poly::from_coefficients_slice(&[-x, E::ScalarField::one()]) 46 | } 47 | 48 | fn q(p: &Poly, d: &Poly) -> Poly { 49 | p / d 50 | } 51 | 52 | fn compute_quotient(p: &Poly, x: E::ScalarField) -> Poly { 53 | Self::q(p, &Self::z(x)) 54 | } 55 | 56 | fn parse(openings: Vec>) -> Vec<((E::G1, E::G1Affine), E::ScalarField)> { 57 | openings 58 | .into_iter() 59 | .map(|KzgOpening { c, x, y, proof }| ((proof.mul(x) + &c, proof), y)) 60 | .collect() 61 | } 62 | 63 | pub fn accumulate( 64 | openings: Vec>, 65 | rs: &[E::ScalarField], 66 | vk: &KzgVerifierKey, 67 | ) -> AccumulatedOpening { 68 | let openings = Self::parse(openings); 69 | let ((accs, proofs), ys): ((Vec, Vec), Vec) = 70 | openings.into_iter().unzip(); 71 | let sum_ry = rs 72 | .iter() 73 | .zip(ys.into_iter()) 74 | .map(|(r, y)| y * r) 75 | .sum::(); 76 | let acc = vk.g1.mul(sum_ry) - small_multiexp_proj(rs, &accs); 77 | let proof = small_multiexp_affine(rs, &proofs); 78 | let points = E::G1::normalize_batch(&[acc, proof]); 79 | let acc = points[0]; 80 | let proof = points[1]; 81 | AccumulatedOpening { acc, proof } 82 | } 83 | 84 | fn accumulate_single(opening: KzgOpening, g1: &E::G1Affine) -> AccumulatedOpening { 85 | let KzgOpening { c, x, y, proof } = opening; 86 | let acc = (g1.mul(y) - (proof.mul(x) + &c)).into_affine(); 87 | AccumulatedOpening { acc, proof } 88 | } 89 | 90 | pub fn verify_accumulated(opening: AccumulatedOpening, vk: &KzgVerifierKey) -> bool { 91 | E::multi_pairing( 92 | &[opening.acc, opening.proof], 93 | [vk.g2.clone(), vk.tau_in_g2.clone()], 94 | ) 95 | .is_zero() 96 | } 97 | 98 | pub fn verify_single(opening: KzgOpening, vk: &KzgVerifierKey) -> bool { 99 | let acc_opening = Self::accumulate_single(opening, &vk.g1); 100 | Self::verify_accumulated(acc_opening, vk) 101 | } 102 | 103 | pub fn verify_batch( 104 | openings: Vec>, 105 | vk: &KzgVerifierKey, 106 | rng: &mut R, 107 | ) -> bool { 108 | let one = ark_std::iter::once(E::ScalarField::one()); 109 | let coeffs: Vec = one 110 | .chain((1..openings.len()).map(|_| u128::rand(rng).into())) 111 | .collect(); 112 | let acc_opening = Self::accumulate(openings, &coeffs, vk); 113 | Self::verify_accumulated(acc_opening, vk) 114 | } 115 | 116 | fn _commit(coeffs: &[E::ScalarField], bases: &[E::G1Affine]) -> KzgCommitment { 117 | // `msm` allows to call into implementation of `VariableBaseMSM` for `Projective. 118 | // This allows to call into custom implementations of `msm` (`msm_unchecked` not). 119 | let proj = ::msm(&bases[..coeffs.len()], &coeffs).unwrap(); 120 | KzgCommitment(proj.into_affine()) 121 | } 122 | } 123 | 124 | impl PCS for KZG { 125 | type C = KzgCommitment; 126 | type Proof = E::G1Affine; 127 | 128 | type CK = KzgCommitterKey; 129 | type VK = KzgVerifierKey; 130 | type Params = URS; 131 | 132 | fn setup(max_degree: usize, rng: &mut R) -> Self::Params { 133 | URS::::generate(max_degree + 1, 2, rng) 134 | } 135 | 136 | fn commit(ck: &Self::CK, p: &Poly) -> Result { 137 | let ck = &ck.monomial; 138 | if p.degree() > ck.max_degree() { 139 | return Err(()); 140 | } 141 | Ok(Self::_commit(&p.coeffs, &ck.powers_in_g1)) 142 | } 143 | 144 | fn commit_evals(ck: &Self::CK, evals: &Evaluations) -> Result { 145 | let ck = ck 146 | .lagrangian 147 | .as_ref() 148 | .expect("lagrangian key hadn't been generated"); 149 | if evals.evals.len() > ck.max_evals() || evals.domain() != ck.domain { 150 | return Err(()); 151 | } 152 | Ok(Self::_commit(&evals.evals, &ck.lis_in_g)) 153 | } 154 | 155 | fn open(ck: &Self::CK, p: &Poly, x: E::ScalarField) -> Result { 156 | let q = Self::compute_quotient(p, x); 157 | Self::commit(ck, &q).map(|c| c.0) 158 | } 159 | 160 | fn verify( 161 | vk: &KzgVerifierKey, 162 | c: Self::C, 163 | x: E::ScalarField, 164 | y: E::ScalarField, 165 | proof: Self::Proof, 166 | ) -> Result<(), ()> { 167 | let opening = KzgOpening { 168 | c: c.0, 169 | x, 170 | y, 171 | proof, 172 | }; 173 | Self::verify_single(opening, vk).then(|| ()).ok_or(()) 174 | } 175 | 176 | fn batch_verify( 177 | vk: &KzgVerifierKey, 178 | c: Vec, 179 | x: Vec, 180 | y: Vec, 181 | proof: Vec, 182 | rng: &mut R, 183 | ) -> Result<(), ()> { 184 | if c.len() != x.len() || c.len() != y.len() { 185 | return Err(()); 186 | } 187 | let openings = c 188 | .into_iter() 189 | .zip(x.into_iter()) 190 | .zip(y.into_iter()) 191 | .zip(proof.into_iter()) 192 | .map(|(((c, x), y), proof)| KzgOpening { 193 | c: c.0, 194 | x, 195 | y, 196 | proof, 197 | }) 198 | .collect(); 199 | Self::verify_batch(openings, vk, rng).then(|| ()).ok_or(()) 200 | } 201 | } 202 | 203 | #[cfg(test)] 204 | mod tests { 205 | use ark_ff::PrimeField; 206 | use ark_ff::UniformRand; 207 | use ark_poly::{DenseUVPolynomial, EvaluationDomain, GeneralEvaluationDomain}; 208 | use ark_std::format; 209 | use ark_std::test_rng; 210 | use ark_std::vec; 211 | use ark_std::{end_timer, start_timer}; 212 | 213 | use crate::pcs::PcsParams; 214 | use crate::tests::{BenchCurve, TestCurve, TestField}; 215 | 216 | use super::*; 217 | 218 | fn _test_minimal_kzg(log_n: usize) { 219 | let rng = &mut test_rng(); 220 | 221 | let max_degree = (1 << log_n) - 1; 222 | 223 | let t_setup = start_timer!(|| format!( 224 | "KZG setup of size 2^{} on {}", 225 | log_n, 226 | crate::utils::curve_name::() 227 | )); 228 | let urs = KZG::::setup(max_degree, rng); 229 | end_timer!(t_setup); 230 | 231 | let ck = urs.ck(); 232 | let vk = urs.vk(); 233 | 234 | let p = Poly::::rand(ck.max_degree(), rng); 235 | let x = E::ScalarField::rand(rng); 236 | let z = p.evaluate(&x); 237 | 238 | let t_commit = start_timer!(|| format!( 239 | "Committing to a dense degree-{} polynomial", 240 | ck.max_degree() 241 | )); 242 | let c = KZG::::commit(&ck, &p).unwrap(); 243 | end_timer!(t_commit); 244 | 245 | let t_prove = start_timer!(|| "Generating an opening proof for a single point"); 246 | let proof = KZG::::open(&ck, &p, x).unwrap(); 247 | end_timer!(t_prove); 248 | 249 | let t_verify = start_timer!(|| "Verification of a single-point opening"); 250 | assert!(KZG::::verify(&vk, c, x, z, proof).is_ok()); 251 | end_timer!(t_verify); 252 | } 253 | 254 | fn random_openings( 255 | k: usize, 256 | ck: &KzgCommitterKey, 257 | xs: Vec, 258 | rng: &mut R, 259 | ) -> Vec> { 260 | assert_eq!(xs.len(), k); 261 | let d = ck.max_degree(); 262 | 263 | (0..k) 264 | .map(|i| { 265 | let f = Poly::::rand(d, rng); 266 | let x = xs[i]; 267 | let y = f.evaluate(&x); 268 | let c = KZG::::commit(ck, &f).unwrap().0; 269 | let proof = KZG::::open(ck, &f, x).unwrap(); 270 | KzgOpening { c, x, y, proof } 271 | }) 272 | .collect() 273 | } 274 | 275 | fn _test_batch_verification(log_n: usize, k: usize) { 276 | let rng = &mut test_rng(); 277 | 278 | let max_degree = (1 << log_n) - 1; 279 | 280 | let urs = KZG::::setup(max_degree, rng); 281 | let (ck, vk) = (urs.ck(), urs.vk()); 282 | 283 | let xs = (0..k).map(|_| E::ScalarField::rand(rng)).collect(); 284 | let openings = random_openings(k, &ck, xs, rng); 285 | let t_verify_batch = start_timer!(|| format!( 286 | "Batch verification of {} openings of degree ~2^{} on {} with {}-bit xs", 287 | k, 288 | log_n, 289 | crate::utils::curve_name::(), 290 | E::ScalarField::MODULUS_BIT_SIZE 291 | )); 292 | assert!(KZG::::verify_batch(openings, &vk, rng)); 293 | end_timer!(t_verify_batch); 294 | 295 | let xs = (0..k) 296 | .map(|_| E::ScalarField::from(u128::rand(rng))) 297 | .collect(); 298 | let openings = random_openings(k, &ck, xs, rng); 299 | let t_verify_batch = start_timer!(|| format!( 300 | "Batch verification of {} openings of degree ~2^{} on {} with {}-bit xs", 301 | k, 302 | log_n, 303 | crate::utils::curve_name::(), 304 | 128 305 | )); 306 | assert!(KZG::::verify_batch(openings, &vk, rng)); 307 | end_timer!(t_verify_batch); 308 | } 309 | 310 | #[test] 311 | fn test_minimal_kzg() { 312 | _test_minimal_kzg::(8); 313 | } 314 | 315 | #[test] 316 | #[ignore] 317 | fn bench_minimal_kzg() { 318 | _test_minimal_kzg::(16); 319 | } 320 | 321 | #[test] 322 | fn test_batch_verification() { 323 | _test_batch_verification::(8, 4); 324 | } 325 | 326 | #[test] 327 | #[ignore] 328 | fn bench_batch_verification() { 329 | _test_batch_verification::(12, 5); 330 | } 331 | 332 | #[test] 333 | fn test_commitments_match() { 334 | let rng = &mut test_rng(); 335 | let domain_size = 16; 336 | let domain = GeneralEvaluationDomain::new(domain_size).unwrap(); 337 | 338 | let urs = KZG::::setup(domain_size - 1, rng); 339 | let ck = urs.ck_with_lagrangian(domain_size); 340 | 341 | let mut evals = vec![TestField::zero(); domain_size]; 342 | evals[0] = TestField::one(); 343 | let evals = Evaluations::from_vec_and_domain(evals, domain); 344 | let t_commit = start_timer!(|| format!("Committing to a sparse vec using lagrangian SRS")); 345 | let c_evals = KZG::::commit_evals(&ck, &evals); 346 | end_timer!(t_commit); 347 | 348 | let poly = evals.interpolate(); 349 | let t_commit = start_timer!(|| format!("Committing to a sparse vec using monomial SRS")); 350 | let c_poly = KZG::::commit(&ck, &poly); 351 | end_timer!(t_commit); 352 | 353 | assert_eq!(c_evals, c_poly); 354 | } 355 | } 356 | --------------------------------------------------------------------------------