├── .cargo └── config ├── .github └── workflows │ └── rust.yml ├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Cargo.toml ├── LICENSE ├── NOTICE.md ├── README.md ├── SECURITY.md ├── benches ├── nizk.rs └── snark.rs ├── examples └── cubic.rs ├── profiler ├── nizk.rs └── snark.rs ├── rustfmt.toml └── src ├── commitments.rs ├── dense_mlpoly.rs ├── errors.rs ├── group.rs ├── lib.rs ├── math.rs ├── nizk ├── bullet.rs └── mod.rs ├── product_tree.rs ├── r1cs.rs ├── r1csproof.rs ├── random.rs ├── scalar ├── mod.rs └── ristretto255.rs ├── sparse_mlpoly.rs ├── sumcheck.rs ├── timer.rs ├── transcript.rs └── unipoly.rs /.cargo/config: -------------------------------------------------------------------------------- 1 | [build] 2 | rustflags = [ 3 | "-C", "target-cpu=native", 4 | ] -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Build and Test Spartan 2 | 3 | permissions: 4 | contents: read 5 | pull-requests: write 6 | 7 | on: 8 | push: 9 | branches: [ master ] 10 | pull_request: 11 | branches: [ master ] 12 | 13 | jobs: 14 | build: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: actions/checkout@v2 18 | - name: Build 19 | uses: actions-rs/toolchain@v1 20 | with: 21 | toolchain: stable 22 | - uses: actions-rs/cargo@v1 23 | with: 24 | command: build 25 | args: --examples --benches --verbose 26 | 27 | build-no-default-features: 28 | runs-on: ubuntu-latest 29 | steps: 30 | - uses: actions/checkout@v2 31 | - name: Build 32 | uses: actions-rs/toolchain@v1 33 | with: 34 | toolchain: stable 35 | - uses: actions-rs/cargo@v1 36 | with: 37 | command: build 38 | args: --no-default-features --examples --benches --verbose 39 | 40 | build-wasm: 41 | runs-on: ubuntu-latest 42 | steps: 43 | - uses: actions/checkout@v2 44 | - name: Wasm build 45 | uses: actions-rs/toolchain@v1 46 | with: 47 | toolchain: stable 48 | target: wasm32-unknown-unknown 49 | - uses: actions-rs/cargo@v1 50 | with: 51 | command: build 52 | args: --no-default-features --target wasm32-unknown-unknown 53 | 54 | build-wasi: 55 | runs-on: ubuntu-latest 56 | steps: 57 | - uses: actions/checkout@v2 58 | - name: Install wasm32-wasip1 target 59 | run: rustup target add wasm32-wasip1 60 | - uses: actions-rs/cargo@v1 61 | with: 62 | command: build 63 | args: --no-default-features --target wasm32-wasip1 64 | 65 | test: 66 | runs-on: ubuntu-latest 67 | steps: 68 | - uses: actions/checkout@v2 69 | - name: Run tests 70 | uses: actions-rs/toolchain@v1 71 | with: 72 | toolchain: stable 73 | - uses: actions-rs/cargo@v1 74 | with: 75 | command: test 76 | args: --release --verbose 77 | 78 | test-no-default-features: 79 | runs-on: ubuntu-latest 80 | steps: 81 | - uses: actions/checkout@v2 82 | - name: Run tests 83 | uses: actions-rs/toolchain@v1 84 | with: 85 | toolchain: stable 86 | - uses: actions-rs/cargo@v1 87 | with: 88 | command: test 89 | args: --no-default-features --release --verbose 90 | 91 | fmt: 92 | runs-on: ubuntu-latest 93 | steps: 94 | - uses: actions/checkout@v2 95 | - name: Check Rustfmt Code Style 96 | uses: actions-rs/toolchain@v1 97 | with: 98 | toolchain: stable 99 | components: rustfmt 100 | - uses: actions-rs/cargo@v1 101 | with: 102 | command: fmt 103 | args: --all -- --check 104 | 105 | clippy: 106 | runs-on: ubuntu-latest 107 | steps: 108 | - uses: actions/checkout@v2 109 | - name: Check clippy warnings 110 | uses: actions-rs/toolchain@v1 111 | with: 112 | toolchain: stable 113 | components: clippy 114 | - uses: actions-rs/cargo@v1 115 | with: 116 | command: clippy 117 | args: --all-targets -- -D warnings 118 | 119 | spelling: 120 | runs-on: ubuntu-latest 121 | steps: 122 | - name: Checkout Actions Repository 123 | uses: actions/checkout@v3 124 | - name: Spell Check Repo 125 | uses: crate-ci/typos@685eb3d55be2f85191e8c84acb9f44d7756f84ab 126 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 7 | Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | 12 | *.txt 13 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Microsoft Open Source Code of Conduct 2 | 3 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 4 | 5 | Resources: 6 | 7 | - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) 8 | - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) 9 | - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns 10 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | This project welcomes contributions and suggestions. Most contributions require you to 2 | agree to a Contributor License Agreement (CLA) declaring that you have the right to, 3 | and actually do, grant us the rights to use your contribution. For details, visit 4 | https://cla.microsoft.com. 5 | 6 | When you submit a pull request, a CLA-bot will automatically determine whether you need 7 | to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the 8 | instructions provided by the bot. You will only need to do this once across all repositories using our CLA. 9 | 10 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 11 | For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) 12 | or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "spartan" 3 | version = "0.9.0" 4 | authors = ["Srinath Setty "] 5 | edition = "2021" 6 | description = "High-speed zkSNARKs without trusted setup" 7 | documentation = "https://docs.rs/spartan/" 8 | readme = "README.md" 9 | repository = "https://github.com/microsoft/Spartan" 10 | license-file = "LICENSE" 11 | keywords = ["zkSNARKs", "cryptography", "proofs"] 12 | 13 | [dependencies] 14 | curve25519-dalek = { version = "4.1.1", features = [ 15 | "serde", 16 | "alloc", 17 | "rand_core", 18 | ], default-features = false } 19 | merlin = { version = "3.0.0", default-features = false } 20 | rand = "0.8.5" 21 | rand_core = { version = "0.6", default-features = false } 22 | digest = { version = "0.8.1", default-features = false } 23 | sha3 = { version = "0.8.2", default-features = false } 24 | byteorder = { version = "1.3.4", default-features = false } 25 | rayon = { version = "1.3.0", optional = true } 26 | serde = { version = "1.0.106", features = ["derive"], default-features = false } 27 | bincode = { version = "1.3.3", default-features = false } 28 | subtle = { version = "2.4", features = ["i128"], default-features = false } 29 | itertools = { version = "0.10.0", default-features = false } 30 | colored = { version = "2.0.0", default-features = false, optional = true } 31 | flate2 = { version = "1.0.14" } 32 | 33 | [target.'cfg(target_arch = "wasm32")'.dependencies] 34 | getrandom = { version = "0.2.15", default-features = false, features = ["js"] } 35 | 36 | [dev-dependencies] 37 | criterion = "0.3.1" 38 | typos = "0.10.33" 39 | 40 | [lib] 41 | name = "libspartan" 42 | path = "src/lib.rs" 43 | 44 | [[bin]] 45 | name = "snark" 46 | path = "profiler/snark.rs" 47 | required-features = ["std"] 48 | 49 | [[bin]] 50 | name = "nizk" 51 | path = "profiler/nizk.rs" 52 | required-features = ["std"] 53 | 54 | [[bench]] 55 | name = "snark" 56 | harness = false 57 | required-features = ["std"] 58 | 59 | [[bench]] 60 | name = "nizk" 61 | harness = false 62 | required-features = ["std"] 63 | 64 | [features] 65 | default = ["std"] 66 | std = [ 67 | "digest/std", 68 | "merlin/std", 69 | "rand/std", 70 | "sha3/std", 71 | "byteorder/std", 72 | "serde/std", 73 | "subtle/std", 74 | "itertools/use_std", 75 | "flate2/rust_backend", 76 | ] 77 | multicore = ["rayon"] 78 | profile = ["colored"] 79 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Microsoft Corporation. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE 22 | -------------------------------------------------------------------------------- /NOTICE.md: -------------------------------------------------------------------------------- 1 | This repository includes the following third-party open-source code. 2 | 3 | * The code in `src/scalar/ristretto255.rs` is derived from [bls12-381](https://github.com/zkcrypto/bls12_381). 4 | Specifically, from [src/bls12_381/scalar.rs](https://github.com/zkcrypto/bls12_381/blob/master/src/scalar.rs) and [src/bls12_381/util.rs](https://github.com/zkcrypto/bls12_381/blob/master/src/util.rs), which has the following copyright and license. 5 | 6 | Permission is hereby granted, free of charge, to any 7 | person obtaining a copy of this software and associated 8 | documentation files (the "Software"), to deal in the 9 | Software without restriction, including without 10 | limitation the rights to use, copy, modify, merge, 11 | publish, distribute, sublicense, and/or sell copies of 12 | the Software, and to permit persons to whom the Software 13 | is furnished to do so, subject to the following 14 | conditions: 15 | 16 | The above copyright notice and this permission notice 17 | shall be included in all copies or substantial portions 18 | of the Software. 19 | 20 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 21 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 22 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 23 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 24 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 25 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 26 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 27 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 28 | DEALINGS IN THE SOFTWARE. 29 | 30 | 31 | * The `invert` and `batch_invert` methods in src/scalar/ristretto255.rs is from [curve25519-dalek](https://github.com/dalek-cryptography/curve25519-dalek), which has the following license. 32 | 33 | Copyright (c) 2016-2019 Isis Agora Lovecruft, Henry de Valence. All rights reserved. 34 | 35 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 36 | 37 | 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 38 | 39 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 40 | 41 | 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 42 | 43 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 44 | PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 | 46 | ======================================================================== 47 | 48 | Portions of curve25519-dalek were originally derived from Adam Langley's Go ed25519 implementation, found at , under the following licence: 49 | 50 | ======================================================================== 51 | 52 | Copyright (c) 2012 The Go Authors. All rights reserved. 53 | 54 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 55 | 56 | * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 57 | * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 58 | * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 59 | 60 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 61 | 62 | 63 | * The module `src/nizk/bullet.rs` is derived from [bulletproofs](https://github.com/dalek-cryptography/bulletproofs/), which has the following license: 64 | 65 | MIT License 66 | 67 | Copyright (c) 2018 Chain, Inc. 68 | 69 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 70 | 71 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 72 | 73 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Spartan: High-speed zkSNARKs without trusted setup 2 | 3 | ![Rust](https://github.com/microsoft/Spartan/actions/workflows/rust.yml/badge.svg) 4 | [![](https://img.shields.io/crates/v/spartan.svg)](<(https://crates.io/crates/spartan)>) 5 | 6 | Spartan is a high-speed zero-knowledge proof system, a cryptographic primitive that enables a prover to prove a mathematical statement to a verifier without revealing anything besides the validity of the statement. This repository provides `libspartan,` a Rust library that implements a zero-knowledge succinct non-interactive argument of knowledge (zkSNARK), which is a type of zero-knowledge proof system with short proofs and fast verification times. The details of the Spartan proof system are described in our [paper](https://eprint.iacr.org/2019/550) published at [CRYPTO 2020](https://crypto.iacr.org/2020/). The security of the Spartan variant implemented in this library is based on the discrete logarithm problem in the random oracle model. 7 | 8 | A simple example application is proving the knowledge of a secret s such that H(s) == d for a public d, where H is a cryptographic hash function (e.g., SHA-256, Keccak). A more complex application is a database-backed cloud service that produces proofs of correct state machine transitions for auditability. See this [paper](https://eprint.iacr.org/2020/758.pdf) for an overview and this [paper](https://eprint.iacr.org/2018/907.pdf) for details. 9 | 10 | Note that this library has _not_ received a security review or audit. 11 | 12 | ## Highlights 13 | 14 | We now highlight Spartan's distinctive features. 15 | 16 | - **No "toxic" waste:** Spartan is a _transparent_ zkSNARK and does not require a trusted setup. So, it does not involve any trapdoors that must be kept secret or require a multi-party ceremony to produce public parameters. 17 | 18 | - **General-purpose:** Spartan produces proofs for arbitrary NP statements. `libspartan` supports NP statements expressed as rank-1 constraint satisfiability (R1CS) instances, a popular language for which there exists efficient transformations and compiler toolchains from high-level programs of interest. 19 | 20 | - **Sub-linear verification costs:** Spartan is the first transparent proof system with sub-linear verification costs for arbitrary NP statements (e.g., R1CS). 21 | 22 | - **Standardized security:** Spartan's security relies on the hardness of computing discrete logarithms (a standard cryptographic assumption) in the random oracle model. `libspartan` uses `ristretto255`, a prime-order group abstraction atop `curve25519` (a high-speed elliptic curve). We use [`curve25519-dalek`](https://docs.rs/curve25519-dalek) for arithmetic over `ristretto255`. 23 | 24 | - **State-of-the-art performance:** 25 | Among transparent SNARKs, Spartan offers the fastest prover with speedups of 36–152× depending on the baseline, produces proofs that are shorter by 1.2–416×, and incurs the lowest verification times with speedups of 3.6–1326×. The only exception is proof sizes under Bulletproofs, but Bulletproofs incurs slower verification both asymptotically and concretely. When compared to the state-of-the-art zkSNARK with trusted setup, Spartan’s prover is 2× faster for arbitrary R1CS instances and 16× faster for data-parallel workloads. 26 | 27 | ### Implementation details 28 | 29 | `libspartan` uses [`merlin`](https://docs.rs/merlin/) to automate the Fiat-Shamir transform. We also introduce a new type called `RandomTape` that extends a `Transcript` in `merlin` to allow the prover's internal methods to produce private randomness using its private transcript without having to create `OsRng` objects throughout the code. An object of type `RandomTape` is initialized with a new random seed from `OsRng` for each proof produced by the library. 30 | 31 | ## Examples 32 | 33 | To import `libspartan` into your Rust project, add the following dependency to `Cargo.toml`: 34 | 35 | ```text 36 | spartan = "0.8.0" 37 | ``` 38 | 39 | The following example shows how to use `libspartan` to create and verify a SNARK proof. 40 | Some of our public APIs' style is inspired by the underlying crates we use. 41 | 42 | ```rust 43 | extern crate libspartan; 44 | extern crate merlin; 45 | use libspartan::{Instance, SNARKGens, SNARK}; 46 | use merlin::Transcript; 47 | fn main() { 48 | // specify the size of an R1CS instance 49 | let num_vars = 1024; 50 | let num_cons = 1024; 51 | let num_inputs = 10; 52 | let num_non_zero_entries = 1024; 53 | 54 | // produce public parameters 55 | let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries); 56 | 57 | // ask the library to produce a synthentic R1CS instance 58 | let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); 59 | 60 | // create a commitment to the R1CS instance 61 | let (comm, decomm) = SNARK::encode(&inst, &gens); 62 | 63 | // produce a proof of satisfiability 64 | let mut prover_transcript = Transcript::new(b"snark_example"); 65 | let proof = SNARK::prove(&inst, &comm, &decomm, vars, &inputs, &gens, &mut prover_transcript); 66 | 67 | // verify the proof of satisfiability 68 | let mut verifier_transcript = Transcript::new(b"snark_example"); 69 | assert!(proof 70 | .verify(&comm, &inputs, &mut verifier_transcript, &gens) 71 | .is_ok()); 72 | println!("proof verification successful!"); 73 | } 74 | ``` 75 | 76 | Here is another example to use the NIZK variant of the Spartan proof system: 77 | 78 | ```rust 79 | extern crate libspartan; 80 | extern crate merlin; 81 | use libspartan::{Instance, NIZKGens, NIZK}; 82 | use merlin::Transcript; 83 | fn main() { 84 | // specify the size of an R1CS instance 85 | let num_vars = 1024; 86 | let num_cons = 1024; 87 | let num_inputs = 10; 88 | 89 | // produce public parameters 90 | let gens = NIZKGens::new(num_cons, num_vars, num_inputs); 91 | 92 | // ask the library to produce a synthentic R1CS instance 93 | let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); 94 | 95 | // produce a proof of satisfiability 96 | let mut prover_transcript = Transcript::new(b"nizk_example"); 97 | let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript); 98 | 99 | // verify the proof of satisfiability 100 | let mut verifier_transcript = Transcript::new(b"nizk_example"); 101 | assert!(proof 102 | .verify(&inst, &inputs, &mut verifier_transcript, &gens) 103 | .is_ok()); 104 | println!("proof verification successful!"); 105 | } 106 | ``` 107 | 108 | Finally, we provide an example that specifies a custom R1CS instance instead of using a synthetic instance 109 | 110 | ```rust 111 | #![allow(non_snake_case)] 112 | extern crate curve25519_dalek; 113 | extern crate libspartan; 114 | extern crate merlin; 115 | use curve25519_dalek::scalar::Scalar; 116 | use libspartan::{InputsAssignment, Instance, SNARKGens, VarsAssignment, SNARK}; 117 | use merlin::Transcript; 118 | use rand::rngs::OsRng; 119 | 120 | fn main() { 121 | // produce a tiny instance 122 | let ( 123 | num_cons, 124 | num_vars, 125 | num_inputs, 126 | num_non_zero_entries, 127 | inst, 128 | assignment_vars, 129 | assignment_inputs, 130 | ) = produce_tiny_r1cs(); 131 | 132 | // produce public parameters 133 | let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries); 134 | 135 | // create a commitment to the R1CS instance 136 | let (comm, decomm) = SNARK::encode(&inst, &gens); 137 | 138 | // produce a proof of satisfiability 139 | let mut prover_transcript = Transcript::new(b"snark_example"); 140 | let proof = SNARK::prove( 141 | &inst, 142 | &comm, 143 | &decomm, 144 | assignment_vars, 145 | &assignment_inputs, 146 | &gens, 147 | &mut prover_transcript, 148 | ); 149 | 150 | // verify the proof of satisfiability 151 | let mut verifier_transcript = Transcript::new(b"snark_example"); 152 | assert!(proof 153 | .verify(&comm, &assignment_inputs, &mut verifier_transcript, &gens) 154 | .is_ok()); 155 | println!("proof verification successful!"); 156 | } 157 | 158 | fn produce_tiny_r1cs() -> ( 159 | usize, 160 | usize, 161 | usize, 162 | usize, 163 | Instance, 164 | VarsAssignment, 165 | InputsAssignment, 166 | ) { 167 | // We will use the following example, but one could construct any R1CS instance. 168 | // Our R1CS instance is three constraints over five variables and two public inputs 169 | // (Z0 + Z1) * I0 - Z2 = 0 170 | // (Z0 + I1) * Z2 - Z3 = 0 171 | // Z4 * 1 - 0 = 0 172 | 173 | // parameters of the R1CS instance rounded to the nearest power of two 174 | let num_cons = 4; 175 | let num_vars = 5; 176 | let num_inputs = 2; 177 | let num_non_zero_entries = 5; 178 | 179 | // We will encode the above constraints into three matrices, where 180 | // the coefficients in the matrix are in the little-endian byte order 181 | let mut A: Vec<(usize, usize, [u8; 32])> = Vec::new(); 182 | let mut B: Vec<(usize, usize, [u8; 32])> = Vec::new(); 183 | let mut C: Vec<(usize, usize, [u8; 32])> = Vec::new(); 184 | 185 | // The constraint system is defined over a finite field, which in our case is 186 | // the scalar field of ristreeto255/curve25519 i.e., p = 2^{252}+27742317777372353535851937790883648493 187 | // To construct these matrices, we will use `curve25519-dalek` but one can use any other method. 188 | 189 | // a variable that holds a byte representation of 1 190 | let one = Scalar::ONE.to_bytes(); 191 | 192 | // R1CS is a set of three sparse matrices A B C, where is a row for every 193 | // constraint and a column for every entry in z = (vars, 1, inputs) 194 | // An R1CS instance is satisfiable iff: 195 | // Az \circ Bz = Cz, where z = (vars, 1, inputs) 196 | 197 | // constraint 0 entries in (A,B,C) 198 | // constraint 0 is (Z0 + Z1) * I0 - Z2 = 0. 199 | // We set 1 in matrix A for columns that correspond to Z0 and Z1 200 | // We set 1 in matrix B for column that corresponds to I0 201 | // We set 1 in matrix C for column that corresponds to Z2 202 | A.push((0, 0, one)); 203 | A.push((0, 1, one)); 204 | B.push((0, num_vars + 1, one)); 205 | C.push((0, 2, one)); 206 | 207 | // constraint 1 entries in (A,B,C) 208 | A.push((1, 0, one)); 209 | A.push((1, num_vars + 2, one)); 210 | B.push((1, 2, one)); 211 | C.push((1, 3, one)); 212 | 213 | // constraint 3 entries in (A,B,C) 214 | A.push((2, 4, one)); 215 | B.push((2, num_vars, one)); 216 | 217 | let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap(); 218 | 219 | // compute a satisfying assignment 220 | let mut csprng: OsRng = OsRng; 221 | let i0 = Scalar::random(&mut csprng); 222 | let i1 = Scalar::random(&mut csprng); 223 | let z0 = Scalar::random(&mut csprng); 224 | let z1 = Scalar::random(&mut csprng); 225 | let z2 = (z0 + z1) * i0; // constraint 0 226 | let z3 = (z0 + i1) * z2; // constraint 1 227 | let z4 = Scalar::ZERO; //constraint 2 228 | 229 | // create a VarsAssignment 230 | let mut vars = vec![Scalar::ZERO.to_bytes(); num_vars]; 231 | vars[0] = z0.to_bytes(); 232 | vars[1] = z1.to_bytes(); 233 | vars[2] = z2.to_bytes(); 234 | vars[3] = z3.to_bytes(); 235 | vars[4] = z4.to_bytes(); 236 | let assignment_vars = VarsAssignment::new(&vars).unwrap(); 237 | 238 | // create an InputsAssignment 239 | let mut inputs = vec![Scalar::ZERO.to_bytes(); num_inputs]; 240 | inputs[0] = i0.to_bytes(); 241 | inputs[1] = i1.to_bytes(); 242 | let assignment_inputs = InputsAssignment::new(&inputs).unwrap(); 243 | 244 | // check if the instance we created is satisfiable 245 | let res = inst.is_sat(&assignment_vars, &assignment_inputs); 246 | assert_eq!(res.unwrap(), true); 247 | 248 | ( 249 | num_cons, 250 | num_vars, 251 | num_inputs, 252 | num_non_zero_entries, 253 | inst, 254 | assignment_vars, 255 | assignment_inputs, 256 | ) 257 | } 258 | ``` 259 | 260 | For more examples, see [`examples/`](examples) directory in this repo. 261 | 262 | ## Building `libspartan` 263 | 264 | Install [`rustup`](https://rustup.rs/) 265 | 266 | Switch to nightly Rust using `rustup`: 267 | 268 | ```text 269 | rustup default nightly 270 | ``` 271 | 272 | Clone the repository: 273 | 274 | ```text 275 | git clone https://github.com/Microsoft/Spartan 276 | cd Spartan 277 | ``` 278 | 279 | To build docs for public APIs of `libspartan`: 280 | 281 | ```text 282 | cargo doc 283 | ``` 284 | 285 | To run tests: 286 | 287 | ```text 288 | RUSTFLAGS='-C target_cpu=native --cfg curve25519_dalek_backend="BACKEND"' cargo test 289 | ``` 290 | 291 | To build `libspartan`: 292 | 293 | ```text 294 | RUSTFLAGS='-C target_cpu=native --cfg curve25519_dalek_backend="BACKEND"' cargo build --release 295 | ``` 296 | 297 | > NOTE: We enable SIMD instructions in `curve25519-dalek` by default, so if it fails to build remove the argument passed to curve25519_dalek in the above command. 298 | 299 | ### Supported features 300 | 301 | - `std`: enables std features (enabled by default) 302 | - `profile`: enables fine-grained profiling information (see below for its use) 303 | 304 | ### WASM Support 305 | 306 | `libspartan` depends upon `rand::OsRng` (internally uses `getrandom` crate), it has out of box support for `wasm32-wasi`. 307 | 308 | For the target `wasm32-unknown-unknown` disable default features for spartan 309 | and add direct dependency on `getrandom` with `wasm-bindgen` feature enabled. 310 | 311 | ```toml 312 | [dependencies] 313 | spartan = { version = "0.7", default-features = false } 314 | # since spartan uses getrandom(rand's OsRng), we need to enable 'wasm-bindgen' 315 | # feature to make it feed rand seed from js/nodejs env 316 | # https://docs.rs/getrandom/0.1.16/getrandom/index.html#support-for-webassembly-and-asmjs 317 | getrandom = { version = "0.1", features = ["wasm-bindgen"] } 318 | ``` 319 | 320 | ## Performance 321 | 322 | ### End-to-end benchmarks 323 | 324 | `libspartan` includes two benches: `benches/nizk.rs` and `benches/snark.rs`. If you report the performance of Spartan in a research paper, we recommend using these benches for higher accuracy instead of fine-grained profiling (listed below). 325 | 326 | To run end-to-end benchmarks: 327 | 328 | ```text 329 | RUSTFLAGS='-C target_cpu=native --cfg curve25519_dalek_backend="BACKEND"' cargo bench 330 | ``` 331 | 332 | ### Fine-grained profiling 333 | 334 | Build `libspartan` with `profile` feature enabled. It creates two profilers: `./target/release/snark` and `./target/release/nizk`. 335 | 336 | These profilers report performance as depicted below (for varying R1CS instance sizes). The reported 337 | performance is from running the profilers on a Microsoft Surface Laptop 3 on a single CPU core of Intel Core i7-1065G7 running Ubuntu 20.04 (atop WSL2 on Windows 10). 338 | See Section 9 in our [paper](https://eprint.iacr.org/2019/550) to see how this compares with other zkSNARKs in the literature. 339 | 340 | ```text 341 | $ ./target/release/snark 342 | Profiler:: SNARK 343 | * number_of_constraints 1048576 344 | * number_of_variables 1048576 345 | * number_of_inputs 10 346 | * number_non-zero_entries_A 1048576 347 | * number_non-zero_entries_B 1048576 348 | * number_non-zero_entries_C 1048576 349 | * SNARK::encode 350 | * SNARK::encode 14.2644201s 351 | * SNARK::prove 352 | * R1CSProof::prove 353 | * polycommit 354 | * polycommit 2.7175848s 355 | * prove_sc_phase_one 356 | * prove_sc_phase_one 683.7481ms 357 | * prove_sc_phase_two 358 | * prove_sc_phase_two 846.1056ms 359 | * polyeval 360 | * polyeval 193.4216ms 361 | * R1CSProof::prove 4.4416193s 362 | * len_r1cs_sat_proof 47024 363 | * eval_sparse_polys 364 | * eval_sparse_polys 377.357ms 365 | * R1CSEvalProof::prove 366 | * commit_nondet_witness 367 | * commit_nondet_witness 14.4507331s 368 | * build_layered_network 369 | * build_layered_network 3.4360521s 370 | * evalproof_layered_network 371 | * len_product_layer_proof 64712 372 | * evalproof_layered_network 15.5708066s 373 | * R1CSEvalProof::prove 34.2930559s 374 | * len_r1cs_eval_proof 133720 375 | * SNARK::prove 39.1297568s 376 | * SNARK::proof_compressed_len 141768 377 | * SNARK::verify 378 | * verify_sat_proof 379 | * verify_sat_proof 20.0828ms 380 | * verify_eval_proof 381 | * verify_polyeval_proof 382 | * verify_prod_proof 383 | * verify_prod_proof 1.1847ms 384 | * verify_hash_proof 385 | * verify_hash_proof 81.06ms 386 | * verify_polyeval_proof 82.3583ms 387 | * verify_eval_proof 82.8937ms 388 | * SNARK::verify 103.0536ms 389 | ``` 390 | 391 | ```text 392 | $ ./target/release/nizk 393 | Profiler:: NIZK 394 | * number_of_constraints 1048576 395 | * number_of_variables 1048576 396 | * number_of_inputs 10 397 | * number_non-zero_entries_A 1048576 398 | * number_non-zero_entries_B 1048576 399 | * number_non-zero_entries_C 1048576 400 | * NIZK::prove 401 | * R1CSProof::prove 402 | * polycommit 403 | * polycommit 2.7220635s 404 | * prove_sc_phase_one 405 | * prove_sc_phase_one 722.5487ms 406 | * prove_sc_phase_two 407 | * prove_sc_phase_two 862.6796ms 408 | * polyeval 409 | * polyeval 190.2233ms 410 | * R1CSProof::prove 4.4982305s 411 | * len_r1cs_sat_proof 47024 412 | * NIZK::prove 4.5139888s 413 | * NIZK::proof_compressed_len 48134 414 | * NIZK::verify 415 | * eval_sparse_polys 416 | * eval_sparse_polys 395.0847ms 417 | * verify_sat_proof 418 | * verify_sat_proof 19.286ms 419 | * NIZK::verify 414.5102ms 420 | ``` 421 | 422 | ## LICENSE 423 | 424 | See [LICENSE](./LICENSE) 425 | 426 | ## Contributing 427 | 428 | See [CONTRIBUTING](./CONTRIBUTING.md) 429 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Security 4 | 5 | Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). 6 | 7 | If you believe you have found a security vulnerability in any Microsoft-owned repository that meets Microsoft's [Microsoft's definition of a security vulnerability](https://docs.microsoft.com/en-us/previous-versions/tn-archive/cc751383(v=technet.10)) of a security vulnerability, please report it to us as described below. 8 | 9 | ## Reporting Security Issues 10 | 11 | **Please do not report security vulnerabilities through public GitHub issues.** 12 | 13 | Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://msrc.microsoft.com/create-report). 14 | 15 | If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the the [Microsoft Security Response Center PGP Key page](https://www.microsoft.com/en-us/msrc/pgp-key-msrc). 16 | 17 | You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc). 18 | 19 | Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: 20 | 21 | * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) 22 | * Full paths of source file(s) related to the manifestation of the issue 23 | * The location of the affected source code (tag/branch/commit or direct URL) 24 | * Any special configuration required to reproduce the issue 25 | * Step-by-step instructions to reproduce the issue 26 | * Proof-of-concept or exploit code (if possible) 27 | * Impact of the issue, including how an attacker might exploit the issue 28 | 29 | This information will help us triage your report more quickly. 30 | 31 | If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://microsoft.com/msrc/bounty) page for more details about our active programs. 32 | 33 | ## Preferred Languages 34 | 35 | We prefer all communications to be in English. 36 | 37 | ## Policy 38 | 39 | Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://www.microsoft.com/en-us/msrc/cvd). 40 | 41 | 42 | -------------------------------------------------------------------------------- /benches/nizk.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::assertions_on_result_states)] 2 | extern crate byteorder; 3 | extern crate core; 4 | extern crate criterion; 5 | extern crate digest; 6 | extern crate libspartan; 7 | extern crate merlin; 8 | extern crate rand; 9 | extern crate sha3; 10 | 11 | use libspartan::{Instance, NIZKGens, NIZK}; 12 | use merlin::Transcript; 13 | 14 | use criterion::*; 15 | 16 | fn nizk_prove_benchmark(c: &mut Criterion) { 17 | for &s in [10, 12, 16].iter() { 18 | let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); 19 | let mut group = c.benchmark_group("NIZK_prove_benchmark"); 20 | group.plot_config(plot_config); 21 | 22 | let num_vars = (2_usize).pow(s as u32); 23 | let num_cons = num_vars; 24 | let num_inputs = 10; 25 | 26 | let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); 27 | 28 | let gens = NIZKGens::new(num_cons, num_vars, num_inputs); 29 | 30 | let name = format!("NIZK_prove_{num_vars}"); 31 | group.bench_function(&name, move |b| { 32 | b.iter(|| { 33 | let mut prover_transcript = Transcript::new(b"example"); 34 | NIZK::prove( 35 | black_box(&inst), 36 | black_box(vars.clone()), 37 | black_box(&inputs), 38 | black_box(&gens), 39 | black_box(&mut prover_transcript), 40 | ); 41 | }); 42 | }); 43 | group.finish(); 44 | } 45 | } 46 | 47 | fn nizk_verify_benchmark(c: &mut Criterion) { 48 | for &s in [10, 12, 16].iter() { 49 | let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); 50 | let mut group = c.benchmark_group("NIZK_verify_benchmark"); 51 | group.plot_config(plot_config); 52 | 53 | let num_vars = (2_usize).pow(s as u32); 54 | let num_cons = num_vars; 55 | let num_inputs = 10; 56 | let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); 57 | 58 | let gens = NIZKGens::new(num_cons, num_vars, num_inputs); 59 | 60 | // produce a proof of satisfiability 61 | let mut prover_transcript = Transcript::new(b"example"); 62 | let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript); 63 | 64 | let name = format!("NIZK_verify_{num_cons}"); 65 | group.bench_function(&name, move |b| { 66 | b.iter(|| { 67 | let mut verifier_transcript = Transcript::new(b"example"); 68 | assert!(proof 69 | .verify( 70 | black_box(&inst), 71 | black_box(&inputs), 72 | black_box(&mut verifier_transcript), 73 | black_box(&gens) 74 | ) 75 | .is_ok()); 76 | }); 77 | }); 78 | group.finish(); 79 | } 80 | } 81 | 82 | fn set_duration() -> Criterion { 83 | Criterion::default().sample_size(10) 84 | } 85 | 86 | criterion_group! { 87 | name = benches_nizk; 88 | config = set_duration(); 89 | targets = nizk_prove_benchmark, nizk_verify_benchmark 90 | } 91 | 92 | criterion_main!(benches_nizk); 93 | -------------------------------------------------------------------------------- /benches/snark.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::assertions_on_result_states)] 2 | extern crate libspartan; 3 | extern crate merlin; 4 | 5 | use libspartan::{Instance, SNARKGens, SNARK}; 6 | use merlin::Transcript; 7 | 8 | use criterion::*; 9 | 10 | fn snark_encode_benchmark(c: &mut Criterion) { 11 | for &s in [10, 12, 16].iter() { 12 | let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); 13 | let mut group = c.benchmark_group("SNARK_encode_benchmark"); 14 | group.plot_config(plot_config); 15 | 16 | let num_vars = (2_usize).pow(s as u32); 17 | let num_cons = num_vars; 18 | let num_inputs = 10; 19 | let (inst, _vars, _inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); 20 | 21 | // produce public parameters 22 | let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons); 23 | 24 | // produce a commitment to R1CS instance 25 | let name = format!("SNARK_encode_{num_cons}"); 26 | group.bench_function(&name, move |b| { 27 | b.iter(|| { 28 | SNARK::encode(black_box(&inst), black_box(&gens)); 29 | }); 30 | }); 31 | group.finish(); 32 | } 33 | } 34 | 35 | fn snark_prove_benchmark(c: &mut Criterion) { 36 | for &s in [10, 12, 16].iter() { 37 | let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); 38 | let mut group = c.benchmark_group("SNARK_prove_benchmark"); 39 | group.plot_config(plot_config); 40 | 41 | let num_vars = (2_usize).pow(s as u32); 42 | let num_cons = num_vars; 43 | let num_inputs = 10; 44 | 45 | let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); 46 | 47 | // produce public parameters 48 | let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons); 49 | 50 | // produce a commitment to R1CS instance 51 | let (comm, decomm) = SNARK::encode(&inst, &gens); 52 | 53 | // produce a proof 54 | let name = format!("SNARK_prove_{num_cons}"); 55 | group.bench_function(&name, move |b| { 56 | b.iter(|| { 57 | let mut prover_transcript = Transcript::new(b"example"); 58 | SNARK::prove( 59 | black_box(&inst), 60 | black_box(&comm), 61 | black_box(&decomm), 62 | black_box(vars.clone()), 63 | black_box(&inputs), 64 | black_box(&gens), 65 | black_box(&mut prover_transcript), 66 | ); 67 | }); 68 | }); 69 | group.finish(); 70 | } 71 | } 72 | 73 | fn snark_verify_benchmark(c: &mut Criterion) { 74 | for &s in [10, 12, 16].iter() { 75 | let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); 76 | let mut group = c.benchmark_group("SNARK_verify_benchmark"); 77 | group.plot_config(plot_config); 78 | 79 | let num_vars = (2_usize).pow(s as u32); 80 | let num_cons = num_vars; 81 | let num_inputs = 10; 82 | let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); 83 | 84 | // produce public parameters 85 | let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons); 86 | 87 | // produce a commitment to R1CS instance 88 | let (comm, decomm) = SNARK::encode(&inst, &gens); 89 | 90 | // produce a proof of satisfiability 91 | let mut prover_transcript = Transcript::new(b"example"); 92 | let proof = SNARK::prove( 93 | &inst, 94 | &comm, 95 | &decomm, 96 | vars, 97 | &inputs, 98 | &gens, 99 | &mut prover_transcript, 100 | ); 101 | 102 | // verify the proof 103 | let name = format!("SNARK_verify_{num_cons}"); 104 | group.bench_function(&name, move |b| { 105 | b.iter(|| { 106 | let mut verifier_transcript = Transcript::new(b"example"); 107 | assert!(proof 108 | .verify( 109 | black_box(&comm), 110 | black_box(&inputs), 111 | black_box(&mut verifier_transcript), 112 | black_box(&gens) 113 | ) 114 | .is_ok()); 115 | }); 116 | }); 117 | group.finish(); 118 | } 119 | } 120 | 121 | fn set_duration() -> Criterion { 122 | Criterion::default().sample_size(10) 123 | } 124 | 125 | criterion_group! { 126 | name = benches_snark; 127 | config = set_duration(); 128 | targets = snark_encode_benchmark, snark_prove_benchmark, snark_verify_benchmark 129 | } 130 | 131 | criterion_main!(benches_snark); 132 | -------------------------------------------------------------------------------- /examples/cubic.rs: -------------------------------------------------------------------------------- 1 | //! Demonstrates how to produces a proof for canonical cubic equation: `x^3 + x + 5 = y`. 2 | //! The example is described in detail [here]. 3 | //! 4 | //! The R1CS for this problem consists of the following 4 constraints: 5 | //! `Z0 * Z0 - Z1 = 0` 6 | //! `Z1 * Z0 - Z2 = 0` 7 | //! `(Z2 + Z0) * 1 - Z3 = 0` 8 | //! `(Z3 + 5) * 1 - I0 = 0` 9 | //! 10 | //! [here]: https://medium.com/@VitalikButerin/quadratic-arithmetic-programs-from-zero-to-hero-f6d558cea649 11 | #![allow(clippy::assertions_on_result_states)] 12 | use curve25519_dalek::scalar::Scalar; 13 | use libspartan::{InputsAssignment, Instance, SNARKGens, VarsAssignment, SNARK}; 14 | use merlin::Transcript; 15 | use rand::rngs::OsRng; 16 | 17 | #[allow(non_snake_case)] 18 | fn produce_r1cs() -> ( 19 | usize, 20 | usize, 21 | usize, 22 | usize, 23 | Instance, 24 | VarsAssignment, 25 | InputsAssignment, 26 | ) { 27 | // parameters of the R1CS instance 28 | let num_cons = 4; 29 | let num_vars = 4; 30 | let num_inputs = 1; 31 | let num_non_zero_entries = 8; 32 | 33 | // We will encode the above constraints into three matrices, where 34 | // the coefficients in the matrix are in the little-endian byte order 35 | let mut A: Vec<(usize, usize, [u8; 32])> = Vec::new(); 36 | let mut B: Vec<(usize, usize, [u8; 32])> = Vec::new(); 37 | let mut C: Vec<(usize, usize, [u8; 32])> = Vec::new(); 38 | 39 | let one = Scalar::ONE.to_bytes(); 40 | 41 | // R1CS is a set of three sparse matrices A B C, where is a row for every 42 | // constraint and a column for every entry in z = (vars, 1, inputs) 43 | // An R1CS instance is satisfiable iff: 44 | // Az \circ Bz = Cz, where z = (vars, 1, inputs) 45 | 46 | // constraint 0 entries in (A,B,C) 47 | // constraint 0 is Z0 * Z0 - Z1 = 0. 48 | A.push((0, 0, one)); 49 | B.push((0, 0, one)); 50 | C.push((0, 1, one)); 51 | 52 | // constraint 1 entries in (A,B,C) 53 | // constraint 1 is Z1 * Z0 - Z2 = 0. 54 | A.push((1, 1, one)); 55 | B.push((1, 0, one)); 56 | C.push((1, 2, one)); 57 | 58 | // constraint 2 entries in (A,B,C) 59 | // constraint 2 is (Z2 + Z0) * 1 - Z3 = 0. 60 | A.push((2, 2, one)); 61 | A.push((2, 0, one)); 62 | B.push((2, num_vars, one)); 63 | C.push((2, 3, one)); 64 | 65 | // constraint 3 entries in (A,B,C) 66 | // constraint 3 is (Z3 + 5) * 1 - I0 = 0. 67 | A.push((3, 3, one)); 68 | A.push((3, num_vars, Scalar::from(5u32).to_bytes())); 69 | B.push((3, num_vars, one)); 70 | C.push((3, num_vars + 1, one)); 71 | 72 | let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap(); 73 | 74 | // compute a satisfying assignment 75 | let mut csprng: OsRng = OsRng; 76 | let z0 = Scalar::random(&mut csprng); 77 | let z1 = z0 * z0; // constraint 0 78 | let z2 = z1 * z0; // constraint 1 79 | let z3 = z2 + z0; // constraint 2 80 | let i0 = z3 + Scalar::from(5u32); // constraint 3 81 | 82 | // create a VarsAssignment 83 | let mut vars = vec![Scalar::ZERO.to_bytes(); num_vars]; 84 | vars[0] = z0.to_bytes(); 85 | vars[1] = z1.to_bytes(); 86 | vars[2] = z2.to_bytes(); 87 | vars[3] = z3.to_bytes(); 88 | let assignment_vars = VarsAssignment::new(&vars).unwrap(); 89 | 90 | // create an InputsAssignment 91 | let mut inputs = vec![Scalar::ZERO.to_bytes(); num_inputs]; 92 | inputs[0] = i0.to_bytes(); 93 | let assignment_inputs = InputsAssignment::new(&inputs).unwrap(); 94 | 95 | // check if the instance we created is satisfiable 96 | let res = inst.is_sat(&assignment_vars, &assignment_inputs); 97 | assert!(res.unwrap(), "should be satisfied"); 98 | 99 | ( 100 | num_cons, 101 | num_vars, 102 | num_inputs, 103 | num_non_zero_entries, 104 | inst, 105 | assignment_vars, 106 | assignment_inputs, 107 | ) 108 | } 109 | 110 | fn main() { 111 | // produce an R1CS instance 112 | let ( 113 | num_cons, 114 | num_vars, 115 | num_inputs, 116 | num_non_zero_entries, 117 | inst, 118 | assignment_vars, 119 | assignment_inputs, 120 | ) = produce_r1cs(); 121 | 122 | // produce public parameters 123 | let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries); 124 | 125 | // create a commitment to the R1CS instance 126 | let (comm, decomm) = SNARK::encode(&inst, &gens); 127 | 128 | // produce a proof of satisfiability 129 | let mut prover_transcript = Transcript::new(b"snark_example"); 130 | let proof = SNARK::prove( 131 | &inst, 132 | &comm, 133 | &decomm, 134 | assignment_vars, 135 | &assignment_inputs, 136 | &gens, 137 | &mut prover_transcript, 138 | ); 139 | 140 | // verify the proof of satisfiability 141 | let mut verifier_transcript = Transcript::new(b"snark_example"); 142 | assert!(proof 143 | .verify(&comm, &assignment_inputs, &mut verifier_transcript, &gens) 144 | .is_ok()); 145 | println!("proof verification successful!"); 146 | } 147 | -------------------------------------------------------------------------------- /profiler/nizk.rs: -------------------------------------------------------------------------------- 1 | #![allow(non_snake_case)] 2 | #![allow(clippy::assertions_on_result_states)] 3 | 4 | extern crate flate2; 5 | extern crate libspartan; 6 | extern crate merlin; 7 | extern crate rand; 8 | 9 | use flate2::{write::ZlibEncoder, Compression}; 10 | use libspartan::{Instance, NIZKGens, NIZK}; 11 | use merlin::Transcript; 12 | 13 | fn print(msg: &str) { 14 | let star = "* "; 15 | println!("{:indent$}{}{}", "", star, msg, indent = 2); 16 | } 17 | 18 | pub fn main() { 19 | // the list of number of variables (and constraints) in an R1CS instance 20 | let inst_sizes = [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]; 21 | 22 | println!("Profiler:: NIZK"); 23 | for &s in inst_sizes.iter() { 24 | let num_vars = (2_usize).pow(s as u32); 25 | let num_cons = num_vars; 26 | let num_inputs = 10; 27 | 28 | // produce a synthetic R1CSShape 29 | let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); 30 | 31 | // produce public generators 32 | let gens = NIZKGens::new(num_cons, num_vars, num_inputs); 33 | 34 | // produce a proof of satisfiability 35 | let mut prover_transcript = Transcript::new(b"nizk_example"); 36 | let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript); 37 | 38 | let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default()); 39 | bincode::serialize_into(&mut encoder, &proof).unwrap(); 40 | let proof_encoded = encoder.finish().unwrap(); 41 | let msg_proof_len = format!("NIZK::proof_compressed_len {:?}", proof_encoded.len()); 42 | print(&msg_proof_len); 43 | 44 | // verify the proof of satisfiability 45 | let mut verifier_transcript = Transcript::new(b"nizk_example"); 46 | assert!(proof 47 | .verify(&inst, &inputs, &mut verifier_transcript, &gens) 48 | .is_ok()); 49 | 50 | println!(); 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /profiler/snark.rs: -------------------------------------------------------------------------------- 1 | #![allow(non_snake_case)] 2 | #![allow(clippy::assertions_on_result_states)] 3 | 4 | extern crate flate2; 5 | extern crate libspartan; 6 | extern crate merlin; 7 | 8 | use flate2::{write::ZlibEncoder, Compression}; 9 | use libspartan::{Instance, SNARKGens, SNARK}; 10 | use merlin::Transcript; 11 | 12 | fn print(msg: &str) { 13 | let star = "* "; 14 | println!("{:indent$}{}{}", "", star, msg, indent = 2); 15 | } 16 | 17 | pub fn main() { 18 | // the list of number of variables (and constraints) in an R1CS instance 19 | let inst_sizes = [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]; 20 | 21 | println!("Profiler:: SNARK"); 22 | for &s in inst_sizes.iter() { 23 | let num_vars = (2_usize).pow(s as u32); 24 | let num_cons = num_vars; 25 | let num_inputs = 10; 26 | 27 | // produce a synthetic R1CSShape 28 | let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); 29 | 30 | // produce public generators 31 | let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons); 32 | 33 | // create a commitment to R1CSShape 34 | let (comm, decomm) = SNARK::encode(&inst, &gens); 35 | 36 | // produce a proof of satisfiability 37 | let mut prover_transcript = Transcript::new(b"snark_example"); 38 | let proof = SNARK::prove( 39 | &inst, 40 | &comm, 41 | &decomm, 42 | vars, 43 | &inputs, 44 | &gens, 45 | &mut prover_transcript, 46 | ); 47 | 48 | let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default()); 49 | bincode::serialize_into(&mut encoder, &proof).unwrap(); 50 | let proof_encoded = encoder.finish().unwrap(); 51 | let msg_proof_len = format!("SNARK::proof_compressed_len {:?}", proof_encoded.len()); 52 | print(&msg_proof_len); 53 | 54 | // verify the proof of satisfiability 55 | let mut verifier_transcript = Transcript::new(b"snark_example"); 56 | assert!(proof 57 | .verify(&comm, &inputs, &mut verifier_transcript, &gens) 58 | .is_ok()); 59 | 60 | println!(); 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2018" 2 | tab_spaces = 2 3 | newline_style = "Unix" 4 | use_try_shorthand = true 5 | -------------------------------------------------------------------------------- /src/commitments.rs: -------------------------------------------------------------------------------- 1 | use super::group::{GroupElement, VartimeMultiscalarMul, GROUP_BASEPOINT_COMPRESSED}; 2 | use super::scalar::Scalar; 3 | use digest::{ExtendableOutput, Input, XofReader}; 4 | use serde::{Deserialize, Serialize}; 5 | use sha3::Shake256; 6 | 7 | #[derive(Debug, Serialize, Deserialize)] 8 | pub struct MultiCommitGens { 9 | pub n: usize, 10 | pub G: Vec, 11 | pub h: GroupElement, 12 | } 13 | 14 | impl MultiCommitGens { 15 | pub fn new(n: usize, label: &[u8]) -> Self { 16 | let mut shake = Shake256::default(); 17 | shake.input(label); 18 | shake.input(GROUP_BASEPOINT_COMPRESSED.as_bytes()); 19 | 20 | let mut reader = shake.xof_result(); 21 | let mut gens: Vec = Vec::new(); 22 | let mut uniform_bytes = [0u8; 64]; 23 | for _ in 0..n + 1 { 24 | reader.read(&mut uniform_bytes); 25 | gens.push(GroupElement::from_uniform_bytes(&uniform_bytes)); 26 | } 27 | 28 | MultiCommitGens { 29 | n, 30 | G: gens[..n].to_vec(), 31 | h: gens[n], 32 | } 33 | } 34 | 35 | pub fn clone(&self) -> MultiCommitGens { 36 | MultiCommitGens { 37 | n: self.n, 38 | h: self.h, 39 | G: self.G.clone(), 40 | } 41 | } 42 | 43 | pub fn scale(&self, s: &Scalar) -> MultiCommitGens { 44 | MultiCommitGens { 45 | n: self.n, 46 | h: self.h, 47 | G: (0..self.n).map(|i| s * self.G[i]).collect(), 48 | } 49 | } 50 | 51 | pub fn split_at(&self, mid: usize) -> (MultiCommitGens, MultiCommitGens) { 52 | let (G1, G2) = self.G.split_at(mid); 53 | 54 | ( 55 | MultiCommitGens { 56 | n: G1.len(), 57 | G: G1.to_vec(), 58 | h: self.h, 59 | }, 60 | MultiCommitGens { 61 | n: G2.len(), 62 | G: G2.to_vec(), 63 | h: self.h, 64 | }, 65 | ) 66 | } 67 | } 68 | 69 | pub trait Commitments { 70 | fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement; 71 | } 72 | 73 | impl Commitments for Scalar { 74 | fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement { 75 | assert_eq!(gens_n.n, 1); 76 | GroupElement::vartime_multiscalar_mul(&[*self, *blind], &[gens_n.G[0], gens_n.h]) 77 | } 78 | } 79 | 80 | impl Commitments for Vec { 81 | fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement { 82 | assert_eq!(gens_n.n, self.len()); 83 | GroupElement::vartime_multiscalar_mul(self, &gens_n.G) + blind * gens_n.h 84 | } 85 | } 86 | 87 | impl Commitments for [Scalar] { 88 | fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement { 89 | assert_eq!(gens_n.n, self.len()); 90 | GroupElement::vartime_multiscalar_mul(self, &gens_n.G) + blind * gens_n.h 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /src/dense_mlpoly.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::too_many_arguments)] 2 | use super::commitments::{Commitments, MultiCommitGens}; 3 | use super::errors::ProofVerifyError; 4 | use super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul}; 5 | use super::math::Math; 6 | use super::nizk::{DotProductProofGens, DotProductProofLog}; 7 | use super::random::RandomTape; 8 | use super::scalar::Scalar; 9 | use super::transcript::{AppendToTranscript, ProofTranscript}; 10 | use core::ops::Index; 11 | use merlin::Transcript; 12 | use serde::{Deserialize, Serialize}; 13 | 14 | #[cfg(feature = "multicore")] 15 | use rayon::prelude::*; 16 | 17 | #[derive(Debug, Serialize, Deserialize)] 18 | pub struct DensePolynomial { 19 | num_vars: usize, // the number of variables in the multilinear polynomial 20 | len: usize, 21 | Z: Vec, // evaluations of the polynomial in all the 2^num_vars Boolean inputs 22 | } 23 | 24 | #[derive(Serialize, Deserialize)] 25 | pub struct PolyCommitmentGens { 26 | pub gens: DotProductProofGens, 27 | } 28 | 29 | impl PolyCommitmentGens { 30 | // the number of variables in the multilinear polynomial 31 | pub fn new(num_vars: usize, label: &'static [u8]) -> PolyCommitmentGens { 32 | let (_left, right) = EqPolynomial::compute_factored_lens(num_vars); 33 | let gens = DotProductProofGens::new(right.pow2(), label); 34 | PolyCommitmentGens { gens } 35 | } 36 | } 37 | 38 | pub struct PolyCommitmentBlinds { 39 | blinds: Vec, 40 | } 41 | 42 | #[derive(Debug, Serialize, Deserialize)] 43 | pub struct PolyCommitment { 44 | C: Vec, 45 | } 46 | 47 | #[derive(Debug, Serialize, Deserialize)] 48 | pub struct ConstPolyCommitment { 49 | C: CompressedGroup, 50 | } 51 | 52 | pub struct EqPolynomial { 53 | r: Vec, 54 | } 55 | 56 | impl EqPolynomial { 57 | pub fn new(r: Vec) -> Self { 58 | EqPolynomial { r } 59 | } 60 | 61 | pub fn evaluate(&self, rx: &[Scalar]) -> Scalar { 62 | assert_eq!(self.r.len(), rx.len()); 63 | (0..rx.len()) 64 | .map(|i| self.r[i] * rx[i] + (Scalar::one() - self.r[i]) * (Scalar::one() - rx[i])) 65 | .product() 66 | } 67 | 68 | pub fn evals(&self) -> Vec { 69 | let ell = self.r.len(); 70 | 71 | let mut evals: Vec = vec![Scalar::one(); ell.pow2()]; 72 | let mut size = 1; 73 | for j in 0..ell { 74 | // in each iteration, we double the size of chis 75 | size *= 2; 76 | for i in (0..size).rev().step_by(2) { 77 | // copy each element from the prior iteration twice 78 | let scalar = evals[i / 2]; 79 | evals[i] = scalar * self.r[j]; 80 | evals[i - 1] = scalar - evals[i]; 81 | } 82 | } 83 | evals 84 | } 85 | 86 | pub fn compute_factored_lens(ell: usize) -> (usize, usize) { 87 | (ell / 2, ell - ell / 2) 88 | } 89 | 90 | pub fn compute_factored_evals(&self) -> (Vec, Vec) { 91 | let ell = self.r.len(); 92 | let (left_num_vars, _right_num_vars) = EqPolynomial::compute_factored_lens(ell); 93 | 94 | let L = EqPolynomial::new(self.r[..left_num_vars].to_vec()).evals(); 95 | let R = EqPolynomial::new(self.r[left_num_vars..ell].to_vec()).evals(); 96 | 97 | (L, R) 98 | } 99 | } 100 | 101 | pub struct IdentityPolynomial { 102 | size_point: usize, 103 | } 104 | 105 | impl IdentityPolynomial { 106 | pub fn new(size_point: usize) -> Self { 107 | IdentityPolynomial { size_point } 108 | } 109 | 110 | pub fn evaluate(&self, r: &[Scalar]) -> Scalar { 111 | let len = r.len(); 112 | assert_eq!(len, self.size_point); 113 | (0..len) 114 | .map(|i| Scalar::from((len - i - 1).pow2() as u64) * r[i]) 115 | .sum() 116 | } 117 | } 118 | 119 | impl DensePolynomial { 120 | pub fn new(Z: Vec) -> Self { 121 | DensePolynomial { 122 | num_vars: Z.len().log_2(), 123 | len: Z.len(), 124 | Z, 125 | } 126 | } 127 | 128 | pub fn get_num_vars(&self) -> usize { 129 | self.num_vars 130 | } 131 | 132 | pub fn len(&self) -> usize { 133 | self.len 134 | } 135 | 136 | pub fn clone(&self) -> DensePolynomial { 137 | DensePolynomial::new(self.Z[0..self.len].to_vec()) 138 | } 139 | 140 | pub fn split(&self, idx: usize) -> (DensePolynomial, DensePolynomial) { 141 | assert!(idx < self.len()); 142 | ( 143 | DensePolynomial::new(self.Z[..idx].to_vec()), 144 | DensePolynomial::new(self.Z[idx..2 * idx].to_vec()), 145 | ) 146 | } 147 | 148 | #[cfg(feature = "multicore")] 149 | fn commit_inner(&self, blinds: &[Scalar], gens: &MultiCommitGens) -> PolyCommitment { 150 | let L_size = blinds.len(); 151 | let R_size = self.Z.len() / L_size; 152 | assert_eq!(L_size * R_size, self.Z.len()); 153 | let C = (0..L_size) 154 | .into_par_iter() 155 | .map(|i| { 156 | self.Z[R_size * i..R_size * (i + 1)] 157 | .commit(&blinds[i], gens) 158 | .compress() 159 | }) 160 | .collect(); 161 | PolyCommitment { C } 162 | } 163 | 164 | #[cfg(not(feature = "multicore"))] 165 | fn commit_inner(&self, blinds: &[Scalar], gens: &MultiCommitGens) -> PolyCommitment { 166 | let L_size = blinds.len(); 167 | let R_size = self.Z.len() / L_size; 168 | assert_eq!(L_size * R_size, self.Z.len()); 169 | let C = (0..L_size) 170 | .map(|i| { 171 | self.Z[R_size * i..R_size * (i + 1)] 172 | .commit(&blinds[i], gens) 173 | .compress() 174 | }) 175 | .collect(); 176 | PolyCommitment { C } 177 | } 178 | 179 | pub fn commit( 180 | &self, 181 | gens: &PolyCommitmentGens, 182 | random_tape: Option<&mut RandomTape>, 183 | ) -> (PolyCommitment, PolyCommitmentBlinds) { 184 | let n = self.Z.len(); 185 | let ell = self.get_num_vars(); 186 | assert_eq!(n, ell.pow2()); 187 | 188 | let (left_num_vars, right_num_vars) = EqPolynomial::compute_factored_lens(ell); 189 | let L_size = left_num_vars.pow2(); 190 | let R_size = right_num_vars.pow2(); 191 | assert_eq!(L_size * R_size, n); 192 | 193 | let blinds = if let Some(t) = random_tape { 194 | PolyCommitmentBlinds { 195 | blinds: t.random_vector(b"poly_blinds", L_size), 196 | } 197 | } else { 198 | PolyCommitmentBlinds { 199 | blinds: vec![Scalar::zero(); L_size], 200 | } 201 | }; 202 | 203 | (self.commit_inner(&blinds.blinds, &gens.gens.gens_n), blinds) 204 | } 205 | 206 | pub fn bound(&self, L: &[Scalar]) -> Vec { 207 | let (left_num_vars, right_num_vars) = EqPolynomial::compute_factored_lens(self.get_num_vars()); 208 | let L_size = left_num_vars.pow2(); 209 | let R_size = right_num_vars.pow2(); 210 | (0..R_size) 211 | .map(|i| (0..L_size).map(|j| L[j] * self.Z[j * R_size + i]).sum()) 212 | .collect() 213 | } 214 | 215 | pub fn bound_poly_var_top(&mut self, r: &Scalar) { 216 | let n = self.len() / 2; 217 | for i in 0..n { 218 | self.Z[i] = self.Z[i] + r * (self.Z[i + n] - self.Z[i]); 219 | } 220 | self.Z.truncate(n); // Resize the vector Z to the new length 221 | self.num_vars -= 1; 222 | self.len = n; 223 | } 224 | 225 | pub fn bound_poly_var_bot(&mut self, r: &Scalar) { 226 | let n = self.len() / 2; 227 | for i in 0..n { 228 | self.Z[i] = self.Z[2 * i] + r * (self.Z[2 * i + 1] - self.Z[2 * i]); 229 | } 230 | self.Z.truncate(n); // Resize the vector Z to the new length 231 | self.num_vars -= 1; 232 | self.len = n; 233 | } 234 | 235 | // returns Z(r) in O(n) time 236 | pub fn evaluate(&self, r: &[Scalar]) -> Scalar { 237 | // r must have a value for each variable 238 | assert_eq!(r.len(), self.get_num_vars()); 239 | let chis = EqPolynomial::new(r.to_vec()).evals(); 240 | assert_eq!(chis.len(), self.Z.len()); 241 | DotProductProofLog::compute_dotproduct(&self.Z, &chis) 242 | } 243 | 244 | fn vec(&self) -> &Vec { 245 | &self.Z 246 | } 247 | 248 | pub fn extend(&mut self, other: &DensePolynomial) { 249 | // TODO: allow extension even when some vars are bound 250 | assert_eq!(self.Z.len(), self.len); 251 | let other_vec = other.vec(); 252 | assert_eq!(other_vec.len(), self.len); 253 | self.Z.extend(other_vec); 254 | self.num_vars += 1; 255 | self.len *= 2; 256 | assert_eq!(self.Z.len(), self.len); 257 | } 258 | 259 | pub fn merge<'a, I>(polys: I) -> DensePolynomial 260 | where 261 | I: IntoIterator, 262 | { 263 | let mut Z: Vec = Vec::new(); 264 | for poly in polys.into_iter() { 265 | Z.extend(poly.vec()); 266 | } 267 | 268 | // pad the polynomial with zero polynomial at the end 269 | Z.resize(Z.len().next_power_of_two(), Scalar::zero()); 270 | 271 | DensePolynomial::new(Z) 272 | } 273 | 274 | pub fn from_usize(Z: &[usize]) -> Self { 275 | DensePolynomial::new( 276 | (0..Z.len()) 277 | .map(|i| Scalar::from(Z[i] as u64)) 278 | .collect::>(), 279 | ) 280 | } 281 | } 282 | 283 | impl Index for DensePolynomial { 284 | type Output = Scalar; 285 | 286 | #[inline(always)] 287 | fn index(&self, _index: usize) -> &Scalar { 288 | &(self.Z[_index]) 289 | } 290 | } 291 | 292 | impl AppendToTranscript for PolyCommitment { 293 | fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) { 294 | transcript.append_message(label, b"poly_commitment_begin"); 295 | for i in 0..self.C.len() { 296 | transcript.append_point(b"poly_commitment_share", &self.C[i]); 297 | } 298 | transcript.append_message(label, b"poly_commitment_end"); 299 | } 300 | } 301 | 302 | #[derive(Debug, Serialize, Deserialize)] 303 | pub struct PolyEvalProof { 304 | proof: DotProductProofLog, 305 | } 306 | 307 | impl PolyEvalProof { 308 | fn protocol_name() -> &'static [u8] { 309 | b"polynomial evaluation proof" 310 | } 311 | 312 | pub fn prove( 313 | poly: &DensePolynomial, 314 | blinds_opt: Option<&PolyCommitmentBlinds>, 315 | r: &[Scalar], // point at which the polynomial is evaluated 316 | Zr: &Scalar, // evaluation of \widetilde{Z}(r) 317 | blind_Zr_opt: Option<&Scalar>, // specifies a blind for Zr 318 | gens: &PolyCommitmentGens, 319 | transcript: &mut Transcript, 320 | random_tape: &mut RandomTape, 321 | ) -> (PolyEvalProof, CompressedGroup) { 322 | transcript.append_protocol_name(PolyEvalProof::protocol_name()); 323 | 324 | // assert vectors are of the right size 325 | assert_eq!(poly.get_num_vars(), r.len()); 326 | 327 | let (left_num_vars, right_num_vars) = EqPolynomial::compute_factored_lens(r.len()); 328 | let L_size = left_num_vars.pow2(); 329 | let R_size = right_num_vars.pow2(); 330 | 331 | let default_blinds = PolyCommitmentBlinds { 332 | blinds: vec![Scalar::zero(); L_size], 333 | }; 334 | let blinds = blinds_opt.map_or(&default_blinds, |p| p); 335 | 336 | assert_eq!(blinds.blinds.len(), L_size); 337 | 338 | let zero = Scalar::zero(); 339 | let blind_Zr = blind_Zr_opt.map_or(&zero, |p| p); 340 | 341 | // compute the L and R vectors 342 | let eq = EqPolynomial::new(r.to_vec()); 343 | let (L, R) = eq.compute_factored_evals(); 344 | assert_eq!(L.len(), L_size); 345 | assert_eq!(R.len(), R_size); 346 | 347 | // compute the vector underneath L*Z and the L*blinds 348 | // compute vector-matrix product between L and Z viewed as a matrix 349 | let LZ = poly.bound(&L); 350 | let LZ_blind: Scalar = (0..L.len()).map(|i| blinds.blinds[i] * L[i]).sum(); 351 | 352 | // a dot product proof of size R_size 353 | let (proof, _C_LR, C_Zr_prime) = DotProductProofLog::prove( 354 | &gens.gens, 355 | transcript, 356 | random_tape, 357 | &LZ, 358 | &LZ_blind, 359 | &R, 360 | Zr, 361 | blind_Zr, 362 | ); 363 | 364 | (PolyEvalProof { proof }, C_Zr_prime) 365 | } 366 | 367 | pub fn verify( 368 | &self, 369 | gens: &PolyCommitmentGens, 370 | transcript: &mut Transcript, 371 | r: &[Scalar], // point at which the polynomial is evaluated 372 | C_Zr: &CompressedGroup, // commitment to \widetilde{Z}(r) 373 | comm: &PolyCommitment, 374 | ) -> Result<(), ProofVerifyError> { 375 | transcript.append_protocol_name(PolyEvalProof::protocol_name()); 376 | 377 | // compute L and R 378 | let eq = EqPolynomial::new(r.to_vec()); 379 | let (L, R) = eq.compute_factored_evals(); 380 | 381 | // compute a weighted sum of commitments and L 382 | let C_decompressed = comm.C.iter().map(|pt| pt.decompress().unwrap()); 383 | 384 | let C_LZ = GroupElement::vartime_multiscalar_mul(&L, C_decompressed).compress(); 385 | 386 | self 387 | .proof 388 | .verify(R.len(), &gens.gens, transcript, &R, &C_LZ, C_Zr) 389 | } 390 | 391 | pub fn verify_plain( 392 | &self, 393 | gens: &PolyCommitmentGens, 394 | transcript: &mut Transcript, 395 | r: &[Scalar], // point at which the polynomial is evaluated 396 | Zr: &Scalar, // evaluation \widetilde{Z}(r) 397 | comm: &PolyCommitment, 398 | ) -> Result<(), ProofVerifyError> { 399 | // compute a commitment to Zr with a blind of zero 400 | let C_Zr = Zr.commit(&Scalar::zero(), &gens.gens.gens_1).compress(); 401 | 402 | self.verify(gens, transcript, r, &C_Zr, comm) 403 | } 404 | } 405 | 406 | #[cfg(test)] 407 | mod tests { 408 | use super::super::scalar::ScalarFromPrimitives; 409 | use super::*; 410 | use rand::rngs::OsRng; 411 | 412 | fn evaluate_with_LR(Z: &[Scalar], r: &[Scalar]) -> Scalar { 413 | let eq = EqPolynomial::new(r.to_vec()); 414 | let (L, R) = eq.compute_factored_evals(); 415 | 416 | let ell = r.len(); 417 | // ensure ell is even 418 | assert!(ell % 2 == 0); 419 | // compute n = 2^\ell 420 | let n = ell.pow2(); 421 | // compute m = sqrt(n) = 2^{\ell/2} 422 | let m = (n as f64).sqrt() as usize; 423 | 424 | // compute vector-matrix product between L and Z viewed as a matrix 425 | let LZ = (0..m) 426 | .map(|i| (0..m).map(|j| L[j] * Z[j * m + i]).sum()) 427 | .collect::>(); 428 | 429 | // compute dot product between LZ and R 430 | DotProductProofLog::compute_dotproduct(&LZ, &R) 431 | } 432 | 433 | #[test] 434 | fn check_polynomial_evaluation() { 435 | // Z = [1, 2, 1, 4] 436 | let Z = vec![ 437 | Scalar::one(), 438 | (2_usize).to_scalar(), 439 | (1_usize).to_scalar(), 440 | (4_usize).to_scalar(), 441 | ]; 442 | 443 | // r = [4,3] 444 | let r = vec![(4_usize).to_scalar(), (3_usize).to_scalar()]; 445 | 446 | let eval_with_LR = evaluate_with_LR(&Z, &r); 447 | let poly = DensePolynomial::new(Z); 448 | 449 | let eval = poly.evaluate(&r); 450 | assert_eq!(eval, (28_usize).to_scalar()); 451 | assert_eq!(eval_with_LR, eval); 452 | } 453 | 454 | pub fn compute_factored_chis_at_r(r: &[Scalar]) -> (Vec, Vec) { 455 | let mut L: Vec = Vec::new(); 456 | let mut R: Vec = Vec::new(); 457 | 458 | let ell = r.len(); 459 | assert!(ell % 2 == 0); // ensure ell is even 460 | let n = ell.pow2(); 461 | let m = (n as f64).sqrt() as usize; 462 | 463 | // compute row vector L 464 | for i in 0..m { 465 | let mut chi_i = Scalar::one(); 466 | for j in 0..ell / 2 { 467 | let bit_j = ((m * i) & (1 << (r.len() - j - 1))) > 0; 468 | if bit_j { 469 | chi_i *= r[j]; 470 | } else { 471 | chi_i *= Scalar::one() - r[j]; 472 | } 473 | } 474 | L.push(chi_i); 475 | } 476 | 477 | // compute column vector R 478 | for i in 0..m { 479 | let mut chi_i = Scalar::one(); 480 | for j in ell / 2..ell { 481 | let bit_j = (i & (1 << (r.len() - j - 1))) > 0; 482 | if bit_j { 483 | chi_i *= r[j]; 484 | } else { 485 | chi_i *= Scalar::one() - r[j]; 486 | } 487 | } 488 | R.push(chi_i); 489 | } 490 | (L, R) 491 | } 492 | 493 | pub fn compute_chis_at_r(r: &[Scalar]) -> Vec { 494 | let ell = r.len(); 495 | let n = ell.pow2(); 496 | let mut chis: Vec = Vec::new(); 497 | for i in 0..n { 498 | let mut chi_i = Scalar::one(); 499 | for j in 0..r.len() { 500 | let bit_j = (i & (1 << (r.len() - j - 1))) > 0; 501 | if bit_j { 502 | chi_i *= r[j]; 503 | } else { 504 | chi_i *= Scalar::one() - r[j]; 505 | } 506 | } 507 | chis.push(chi_i); 508 | } 509 | chis 510 | } 511 | 512 | pub fn compute_outerproduct(L: Vec, R: Vec) -> Vec { 513 | assert_eq!(L.len(), R.len()); 514 | (0..L.len()) 515 | .map(|i| (0..R.len()).map(|j| L[i] * R[j]).collect::>()) 516 | .collect::>>() 517 | .into_iter() 518 | .flatten() 519 | .collect::>() 520 | } 521 | 522 | #[test] 523 | fn check_memoized_chis() { 524 | let mut csprng: OsRng = OsRng; 525 | 526 | let s = 10; 527 | let mut r: Vec = Vec::new(); 528 | for _i in 0..s { 529 | r.push(Scalar::random(&mut csprng)); 530 | } 531 | let chis = tests::compute_chis_at_r(&r); 532 | let chis_m = EqPolynomial::new(r).evals(); 533 | assert_eq!(chis, chis_m); 534 | } 535 | 536 | #[test] 537 | fn check_factored_chis() { 538 | let mut csprng: OsRng = OsRng; 539 | 540 | let s = 10; 541 | let mut r: Vec = Vec::new(); 542 | for _i in 0..s { 543 | r.push(Scalar::random(&mut csprng)); 544 | } 545 | let chis = EqPolynomial::new(r.clone()).evals(); 546 | let (L, R) = EqPolynomial::new(r).compute_factored_evals(); 547 | let O = compute_outerproduct(L, R); 548 | assert_eq!(chis, O); 549 | } 550 | 551 | #[test] 552 | fn check_memoized_factored_chis() { 553 | let mut csprng: OsRng = OsRng; 554 | 555 | let s = 10; 556 | let mut r: Vec = Vec::new(); 557 | for _i in 0..s { 558 | r.push(Scalar::random(&mut csprng)); 559 | } 560 | let (L, R) = tests::compute_factored_chis_at_r(&r); 561 | let eq = EqPolynomial::new(r); 562 | let (L2, R2) = eq.compute_factored_evals(); 563 | assert_eq!(L, L2); 564 | assert_eq!(R, R2); 565 | } 566 | 567 | #[test] 568 | fn check_polynomial_commit() { 569 | let Z = vec![ 570 | (1_usize).to_scalar(), 571 | (2_usize).to_scalar(), 572 | (1_usize).to_scalar(), 573 | (4_usize).to_scalar(), 574 | ]; 575 | let poly = DensePolynomial::new(Z); 576 | 577 | // r = [4,3] 578 | let r = vec![(4_usize).to_scalar(), (3_usize).to_scalar()]; 579 | let eval = poly.evaluate(&r); 580 | assert_eq!(eval, (28_usize).to_scalar()); 581 | 582 | let gens = PolyCommitmentGens::new(poly.get_num_vars(), b"test-two"); 583 | let (poly_commitment, blinds) = poly.commit(&gens, None); 584 | 585 | let mut random_tape = RandomTape::new(b"proof"); 586 | let mut prover_transcript = Transcript::new(b"example"); 587 | let (proof, C_Zr) = PolyEvalProof::prove( 588 | &poly, 589 | Some(&blinds), 590 | &r, 591 | &eval, 592 | None, 593 | &gens, 594 | &mut prover_transcript, 595 | &mut random_tape, 596 | ); 597 | 598 | let mut verifier_transcript = Transcript::new(b"example"); 599 | assert!(proof 600 | .verify(&gens, &mut verifier_transcript, &r, &C_Zr, &poly_commitment) 601 | .is_ok()); 602 | } 603 | } 604 | -------------------------------------------------------------------------------- /src/errors.rs: -------------------------------------------------------------------------------- 1 | use core::{ 2 | fmt::Display, 3 | fmt::{self, Debug}, 4 | }; 5 | 6 | #[derive(Debug, Default)] 7 | pub enum ProofVerifyError { 8 | #[default] 9 | InternalError, 10 | DecompressionError([u8; 32]), 11 | } 12 | 13 | impl Display for ProofVerifyError { 14 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 15 | match &self { 16 | ProofVerifyError::DecompressionError(bytes) => write!( 17 | f, 18 | "Compressed group element failed to decompress: {bytes:?}", 19 | ), 20 | ProofVerifyError::InternalError => { 21 | write!(f, "Proof verification failed",) 22 | } 23 | } 24 | } 25 | } 26 | 27 | #[derive(Clone, Debug, Eq, PartialEq)] 28 | pub enum R1CSError { 29 | /// returned if the number of constraints is not a power of 2 30 | NonPowerOfTwoCons, 31 | /// returned if the number of variables is not a power of 2 32 | NonPowerOfTwoVars, 33 | /// returned if a wrong number of inputs in an assignment are supplied 34 | InvalidNumberOfInputs, 35 | /// returned if a wrong number of variables in an assignment are supplied 36 | InvalidNumberOfVars, 37 | /// returned if a [u8;32] does not parse into a valid Scalar in the field of ristretto255 38 | InvalidScalar, 39 | /// returned if the supplied row or col in (row,col,val) tuple is out of range 40 | InvalidIndex, 41 | } 42 | -------------------------------------------------------------------------------- /src/group.rs: -------------------------------------------------------------------------------- 1 | use super::errors::ProofVerifyError; 2 | use super::scalar::{Scalar, ScalarBytes, ScalarBytesFromScalar}; 3 | use core::borrow::Borrow; 4 | use core::ops::{Mul, MulAssign}; 5 | 6 | pub type GroupElement = curve25519_dalek::ristretto::RistrettoPoint; 7 | pub type CompressedGroup = curve25519_dalek::ristretto::CompressedRistretto; 8 | 9 | pub trait CompressedGroupExt { 10 | type Group; 11 | fn unpack(&self) -> Result; 12 | } 13 | 14 | impl CompressedGroupExt for CompressedGroup { 15 | type Group = curve25519_dalek::ristretto::RistrettoPoint; 16 | fn unpack(&self) -> Result { 17 | self 18 | .decompress() 19 | .ok_or_else(|| ProofVerifyError::DecompressionError(self.to_bytes())) 20 | } 21 | } 22 | 23 | pub const GROUP_BASEPOINT_COMPRESSED: CompressedGroup = 24 | curve25519_dalek::constants::RISTRETTO_BASEPOINT_COMPRESSED; 25 | 26 | impl<'b> MulAssign<&'b Scalar> for GroupElement { 27 | fn mul_assign(&mut self, scalar: &'b Scalar) { 28 | let result = (self as &GroupElement) * Scalar::decompress_scalar(scalar); 29 | *self = result; 30 | } 31 | } 32 | 33 | impl<'b> Mul<&'b Scalar> for &GroupElement { 34 | type Output = GroupElement; 35 | fn mul(self, scalar: &'b Scalar) -> GroupElement { 36 | self * Scalar::decompress_scalar(scalar) 37 | } 38 | } 39 | 40 | impl<'b> Mul<&'b GroupElement> for &Scalar { 41 | type Output = GroupElement; 42 | 43 | fn mul(self, point: &'b GroupElement) -> GroupElement { 44 | Scalar::decompress_scalar(self) * point 45 | } 46 | } 47 | 48 | macro_rules! define_mul_variants { 49 | (LHS = $lhs:ty, RHS = $rhs:ty, Output = $out:ty) => { 50 | impl<'b> Mul<&'b $rhs> for $lhs { 51 | type Output = $out; 52 | fn mul(self, rhs: &'b $rhs) -> $out { 53 | &self * rhs 54 | } 55 | } 56 | 57 | impl<'a> Mul<$rhs> for &'a $lhs { 58 | type Output = $out; 59 | fn mul(self, rhs: $rhs) -> $out { 60 | self * &rhs 61 | } 62 | } 63 | 64 | impl Mul<$rhs> for $lhs { 65 | type Output = $out; 66 | fn mul(self, rhs: $rhs) -> $out { 67 | &self * &rhs 68 | } 69 | } 70 | }; 71 | } 72 | 73 | macro_rules! define_mul_assign_variants { 74 | (LHS = $lhs:ty, RHS = $rhs:ty) => { 75 | impl MulAssign<$rhs> for $lhs { 76 | fn mul_assign(&mut self, rhs: $rhs) { 77 | *self *= &rhs; 78 | } 79 | } 80 | }; 81 | } 82 | 83 | define_mul_assign_variants!(LHS = GroupElement, RHS = Scalar); 84 | define_mul_variants!(LHS = GroupElement, RHS = Scalar, Output = GroupElement); 85 | define_mul_variants!(LHS = Scalar, RHS = GroupElement, Output = GroupElement); 86 | 87 | pub trait VartimeMultiscalarMul { 88 | type Scalar; 89 | fn vartime_multiscalar_mul(scalars: I, points: J) -> Self 90 | where 91 | I: IntoIterator, 92 | I::Item: Borrow, 93 | J: IntoIterator, 94 | J::Item: Borrow, 95 | Self: Clone; 96 | } 97 | 98 | impl VartimeMultiscalarMul for GroupElement { 99 | type Scalar = super::scalar::Scalar; 100 | fn vartime_multiscalar_mul(scalars: I, points: J) -> Self 101 | where 102 | I: IntoIterator, 103 | I::Item: Borrow, 104 | J: IntoIterator, 105 | J::Item: Borrow, 106 | Self: Clone, 107 | { 108 | use curve25519_dalek::traits::VartimeMultiscalarMul; 109 | ::vartime_multiscalar_mul( 110 | scalars 111 | .into_iter() 112 | .map(|s| Scalar::decompress_scalar(s.borrow())) 113 | .collect::>(), 114 | points, 115 | ) 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(non_snake_case)] 2 | #![doc = include_str!("../README.md")] 3 | #![deny(missing_docs)] 4 | #![allow(clippy::assertions_on_result_states)] 5 | 6 | extern crate byteorder; 7 | extern crate core; 8 | extern crate curve25519_dalek; 9 | extern crate digest; 10 | extern crate merlin; 11 | extern crate sha3; 12 | 13 | #[cfg(feature = "multicore")] 14 | extern crate rayon; 15 | 16 | mod commitments; 17 | mod dense_mlpoly; 18 | mod errors; 19 | mod group; 20 | mod math; 21 | mod nizk; 22 | mod product_tree; 23 | mod r1cs; 24 | mod r1csproof; 25 | mod random; 26 | mod scalar; 27 | mod sparse_mlpoly; 28 | mod sumcheck; 29 | mod timer; 30 | mod transcript; 31 | mod unipoly; 32 | 33 | use core::cmp::max; 34 | use errors::{ProofVerifyError, R1CSError}; 35 | use merlin::Transcript; 36 | use r1cs::{R1CSCommitment, R1CSCommitmentGens, R1CSDecommitment, R1CSEvalProof, R1CSShape}; 37 | use r1csproof::{R1CSGens, R1CSProof}; 38 | use random::RandomTape; 39 | use scalar::Scalar; 40 | use serde::{Deserialize, Serialize}; 41 | use timer::Timer; 42 | use transcript::{AppendToTranscript, ProofTranscript}; 43 | 44 | /// `ComputationCommitment` holds a public preprocessed NP statement (e.g., R1CS) 45 | #[derive(Serialize, Deserialize)] 46 | pub struct ComputationCommitment { 47 | comm: R1CSCommitment, 48 | } 49 | 50 | /// `ComputationDecommitment` holds information to decommit `ComputationCommitment` 51 | #[derive(Serialize, Deserialize)] 52 | pub struct ComputationDecommitment { 53 | decomm: R1CSDecommitment, 54 | } 55 | 56 | /// `Assignment` holds an assignment of values to either the inputs or variables in an `Instance` 57 | #[derive(Clone, Serialize, Deserialize)] 58 | pub struct Assignment { 59 | assignment: Vec, 60 | } 61 | 62 | impl Assignment { 63 | /// Constructs a new `Assignment` from a vector 64 | pub fn new(assignment: &[[u8; 32]]) -> Result { 65 | let bytes_to_scalar = |vec: &[[u8; 32]]| -> Result, R1CSError> { 66 | let mut vec_scalar: Vec = Vec::new(); 67 | for v in vec { 68 | let val = Scalar::from_bytes(v); 69 | if val.is_some().unwrap_u8() == 1 { 70 | vec_scalar.push(val.unwrap()); 71 | } else { 72 | return Err(R1CSError::InvalidScalar); 73 | } 74 | } 75 | Ok(vec_scalar) 76 | }; 77 | 78 | let assignment_scalar = bytes_to_scalar(assignment); 79 | 80 | // check for any parsing errors 81 | if assignment_scalar.is_err() { 82 | return Err(R1CSError::InvalidScalar); 83 | } 84 | 85 | Ok(Assignment { 86 | assignment: assignment_scalar.unwrap(), 87 | }) 88 | } 89 | 90 | /// pads Assignment to the specified length 91 | fn pad(&self, len: usize) -> VarsAssignment { 92 | // check that the new length is higher than current length 93 | assert!(len > self.assignment.len()); 94 | 95 | let padded_assignment = { 96 | let mut padded_assignment = self.assignment.clone(); 97 | padded_assignment.extend(vec![Scalar::zero(); len - self.assignment.len()]); 98 | padded_assignment 99 | }; 100 | 101 | VarsAssignment { 102 | assignment: padded_assignment, 103 | } 104 | } 105 | } 106 | 107 | /// `VarsAssignment` holds an assignment of values to variables in an `Instance` 108 | pub type VarsAssignment = Assignment; 109 | 110 | /// `InputsAssignment` holds an assignment of values to variables in an `Instance` 111 | pub type InputsAssignment = Assignment; 112 | 113 | /// `Instance` holds the description of R1CS matrices and a hash of the matrices 114 | pub struct Instance { 115 | inst: R1CSShape, 116 | digest: Vec, 117 | } 118 | 119 | impl Instance { 120 | /// Constructs a new `Instance` and an associated satisfying assignment 121 | pub fn new( 122 | num_cons: usize, 123 | num_vars: usize, 124 | num_inputs: usize, 125 | A: &[(usize, usize, [u8; 32])], 126 | B: &[(usize, usize, [u8; 32])], 127 | C: &[(usize, usize, [u8; 32])], 128 | ) -> Result { 129 | let (num_vars_padded, num_cons_padded) = { 130 | let num_vars_padded = { 131 | let mut num_vars_padded = num_vars; 132 | 133 | // ensure that num_inputs + 1 <= num_vars 134 | num_vars_padded = max(num_vars_padded, num_inputs + 1); 135 | 136 | // ensure that num_vars_padded a power of two 137 | if num_vars_padded.next_power_of_two() != num_vars_padded { 138 | num_vars_padded = num_vars_padded.next_power_of_two(); 139 | } 140 | num_vars_padded 141 | }; 142 | 143 | let num_cons_padded = { 144 | let mut num_cons_padded = num_cons; 145 | 146 | // ensure that num_cons_padded is at least 2 147 | if num_cons_padded == 0 || num_cons_padded == 1 { 148 | num_cons_padded = 2; 149 | } 150 | 151 | // ensure that num_cons_padded is power of 2 152 | if num_cons.next_power_of_two() != num_cons { 153 | num_cons_padded = num_cons.next_power_of_two(); 154 | } 155 | num_cons_padded 156 | }; 157 | 158 | (num_vars_padded, num_cons_padded) 159 | }; 160 | 161 | let bytes_to_scalar = 162 | |tups: &[(usize, usize, [u8; 32])]| -> Result, R1CSError> { 163 | let mut mat: Vec<(usize, usize, Scalar)> = Vec::new(); 164 | for &(row, col, val_bytes) in tups { 165 | // row must be smaller than num_cons 166 | if row >= num_cons { 167 | return Err(R1CSError::InvalidIndex); 168 | } 169 | 170 | // col must be smaller than num_vars + 1 + num_inputs 171 | if col >= num_vars + 1 + num_inputs { 172 | return Err(R1CSError::InvalidIndex); 173 | } 174 | 175 | let val = Scalar::from_bytes(&val_bytes); 176 | if val.is_some().unwrap_u8() == 1 { 177 | // if col >= num_vars, it means that it is referencing a 1 or input in the satisfying 178 | // assignment 179 | if col >= num_vars { 180 | mat.push((row, col + num_vars_padded - num_vars, val.unwrap())); 181 | } else { 182 | mat.push((row, col, val.unwrap())); 183 | } 184 | } else { 185 | return Err(R1CSError::InvalidScalar); 186 | } 187 | } 188 | 189 | // pad with additional constraints up until num_cons_padded if the original constraints were 0 or 1 190 | // we do not need to pad otherwise because the dummy constraints are implicit in the sum-check protocol 191 | if num_cons == 0 || num_cons == 1 { 192 | for i in tups.len()..num_cons_padded { 193 | mat.push((i, num_vars, Scalar::zero())); 194 | } 195 | } 196 | 197 | Ok(mat) 198 | }; 199 | 200 | let A_scalar = bytes_to_scalar(A); 201 | if A_scalar.is_err() { 202 | return Err(A_scalar.err().unwrap()); 203 | } 204 | 205 | let B_scalar = bytes_to_scalar(B); 206 | if B_scalar.is_err() { 207 | return Err(B_scalar.err().unwrap()); 208 | } 209 | 210 | let C_scalar = bytes_to_scalar(C); 211 | if C_scalar.is_err() { 212 | return Err(C_scalar.err().unwrap()); 213 | } 214 | 215 | let inst = R1CSShape::new( 216 | num_cons_padded, 217 | num_vars_padded, 218 | num_inputs, 219 | &A_scalar.unwrap(), 220 | &B_scalar.unwrap(), 221 | &C_scalar.unwrap(), 222 | ); 223 | 224 | let digest = inst.get_digest(); 225 | 226 | Ok(Instance { inst, digest }) 227 | } 228 | 229 | /// Checks if a given R1CSShape is satisfiable with a given variables and inputs assignments 230 | pub fn is_sat( 231 | &self, 232 | vars: &VarsAssignment, 233 | inputs: &InputsAssignment, 234 | ) -> Result { 235 | if vars.assignment.len() > self.inst.get_num_vars() { 236 | return Err(R1CSError::InvalidNumberOfInputs); 237 | } 238 | 239 | if inputs.assignment.len() != self.inst.get_num_inputs() { 240 | return Err(R1CSError::InvalidNumberOfInputs); 241 | } 242 | 243 | // we might need to pad variables 244 | let padded_vars = { 245 | let num_padded_vars = self.inst.get_num_vars(); 246 | let num_vars = vars.assignment.len(); 247 | if num_padded_vars > num_vars { 248 | vars.pad(num_padded_vars) 249 | } else { 250 | vars.clone() 251 | } 252 | }; 253 | 254 | Ok( 255 | self 256 | .inst 257 | .is_sat(&padded_vars.assignment, &inputs.assignment), 258 | ) 259 | } 260 | 261 | /// Constructs a new synthetic R1CS `Instance` and an associated satisfying assignment 262 | pub fn produce_synthetic_r1cs( 263 | num_cons: usize, 264 | num_vars: usize, 265 | num_inputs: usize, 266 | ) -> (Instance, VarsAssignment, InputsAssignment) { 267 | let (inst, vars, inputs) = R1CSShape::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); 268 | let digest = inst.get_digest(); 269 | ( 270 | Instance { inst, digest }, 271 | VarsAssignment { assignment: vars }, 272 | InputsAssignment { assignment: inputs }, 273 | ) 274 | } 275 | } 276 | 277 | /// `SNARKGens` holds public parameters for producing and verifying proofs with the Spartan SNARK 278 | #[derive(Serialize, Deserialize)] 279 | pub struct SNARKGens { 280 | gens_r1cs_sat: R1CSGens, 281 | gens_r1cs_eval: R1CSCommitmentGens, 282 | } 283 | 284 | impl SNARKGens { 285 | /// Constructs a new `SNARKGens` given the size of the R1CS statement 286 | /// `num_nz_entries` specifies the maximum number of non-zero entries in any of the three R1CS matrices 287 | pub fn new(num_cons: usize, num_vars: usize, num_inputs: usize, num_nz_entries: usize) -> Self { 288 | let num_vars_padded = { 289 | let mut num_vars_padded = max(num_vars, num_inputs + 1); 290 | if num_vars_padded != num_vars_padded.next_power_of_two() { 291 | num_vars_padded = num_vars_padded.next_power_of_two(); 292 | } 293 | num_vars_padded 294 | }; 295 | 296 | let gens_r1cs_sat = R1CSGens::new(b"gens_r1cs_sat", num_cons, num_vars_padded); 297 | let gens_r1cs_eval = R1CSCommitmentGens::new( 298 | b"gens_r1cs_eval", 299 | num_cons, 300 | num_vars_padded, 301 | num_inputs, 302 | num_nz_entries, 303 | ); 304 | SNARKGens { 305 | gens_r1cs_sat, 306 | gens_r1cs_eval, 307 | } 308 | } 309 | } 310 | 311 | /// `SNARK` holds a proof produced by Spartan SNARK 312 | #[derive(Serialize, Deserialize, Debug)] 313 | pub struct SNARK { 314 | r1cs_sat_proof: R1CSProof, 315 | inst_evals: (Scalar, Scalar, Scalar), 316 | r1cs_eval_proof: R1CSEvalProof, 317 | } 318 | 319 | impl SNARK { 320 | fn protocol_name() -> &'static [u8] { 321 | b"Spartan SNARK proof" 322 | } 323 | 324 | /// A public computation to create a commitment to an R1CS instance 325 | pub fn encode( 326 | inst: &Instance, 327 | gens: &SNARKGens, 328 | ) -> (ComputationCommitment, ComputationDecommitment) { 329 | let timer_encode = Timer::new("SNARK::encode"); 330 | let (comm, decomm) = inst.inst.commit(&gens.gens_r1cs_eval); 331 | timer_encode.stop(); 332 | ( 333 | ComputationCommitment { comm }, 334 | ComputationDecommitment { decomm }, 335 | ) 336 | } 337 | 338 | /// A method to produce a SNARK proof of the satisfiability of an R1CS instance 339 | pub fn prove( 340 | inst: &Instance, 341 | comm: &ComputationCommitment, 342 | decomm: &ComputationDecommitment, 343 | vars: VarsAssignment, 344 | inputs: &InputsAssignment, 345 | gens: &SNARKGens, 346 | transcript: &mut Transcript, 347 | ) -> Self { 348 | let timer_prove = Timer::new("SNARK::prove"); 349 | 350 | // we create a Transcript object seeded with a random Scalar 351 | // to aid the prover produce its randomness 352 | let mut random_tape = RandomTape::new(b"proof"); 353 | 354 | transcript.append_protocol_name(SNARK::protocol_name()); 355 | comm.comm.append_to_transcript(b"comm", transcript); 356 | 357 | let (r1cs_sat_proof, rx, ry) = { 358 | let (proof, rx, ry) = { 359 | // we might need to pad variables 360 | let padded_vars = { 361 | let num_padded_vars = inst.inst.get_num_vars(); 362 | let num_vars = vars.assignment.len(); 363 | if num_padded_vars > num_vars { 364 | vars.pad(num_padded_vars) 365 | } else { 366 | vars 367 | } 368 | }; 369 | 370 | R1CSProof::prove( 371 | &inst.inst, 372 | padded_vars.assignment, 373 | &inputs.assignment, 374 | &gens.gens_r1cs_sat, 375 | transcript, 376 | &mut random_tape, 377 | ) 378 | }; 379 | 380 | let proof_encoded: Vec = bincode::serialize(&proof).unwrap(); 381 | Timer::print(&format!("len_r1cs_sat_proof {:?}", proof_encoded.len())); 382 | 383 | (proof, rx, ry) 384 | }; 385 | 386 | // We send evaluations of A, B, C at r = (rx, ry) as claims 387 | // to enable the verifier complete the first sum-check 388 | let timer_eval = Timer::new("eval_sparse_polys"); 389 | let inst_evals = { 390 | let (Ar, Br, Cr) = inst.inst.evaluate(&rx, &ry); 391 | Ar.append_to_transcript(b"Ar_claim", transcript); 392 | Br.append_to_transcript(b"Br_claim", transcript); 393 | Cr.append_to_transcript(b"Cr_claim", transcript); 394 | (Ar, Br, Cr) 395 | }; 396 | timer_eval.stop(); 397 | 398 | let r1cs_eval_proof = { 399 | let proof = R1CSEvalProof::prove( 400 | &decomm.decomm, 401 | &rx, 402 | &ry, 403 | &inst_evals, 404 | &gens.gens_r1cs_eval, 405 | transcript, 406 | &mut random_tape, 407 | ); 408 | 409 | let proof_encoded: Vec = bincode::serialize(&proof).unwrap(); 410 | Timer::print(&format!("len_r1cs_eval_proof {:?}", proof_encoded.len())); 411 | proof 412 | }; 413 | 414 | timer_prove.stop(); 415 | SNARK { 416 | r1cs_sat_proof, 417 | inst_evals, 418 | r1cs_eval_proof, 419 | } 420 | } 421 | 422 | /// A method to verify the SNARK proof of the satisfiability of an R1CS instance 423 | pub fn verify( 424 | &self, 425 | comm: &ComputationCommitment, 426 | input: &InputsAssignment, 427 | transcript: &mut Transcript, 428 | gens: &SNARKGens, 429 | ) -> Result<(), ProofVerifyError> { 430 | let timer_verify = Timer::new("SNARK::verify"); 431 | transcript.append_protocol_name(SNARK::protocol_name()); 432 | 433 | // append a commitment to the computation to the transcript 434 | comm.comm.append_to_transcript(b"comm", transcript); 435 | 436 | let timer_sat_proof = Timer::new("verify_sat_proof"); 437 | assert_eq!(input.assignment.len(), comm.comm.get_num_inputs()); 438 | let (rx, ry) = self.r1cs_sat_proof.verify( 439 | comm.comm.get_num_vars(), 440 | comm.comm.get_num_cons(), 441 | &input.assignment, 442 | &self.inst_evals, 443 | transcript, 444 | &gens.gens_r1cs_sat, 445 | )?; 446 | timer_sat_proof.stop(); 447 | 448 | let timer_eval_proof = Timer::new("verify_eval_proof"); 449 | let (Ar, Br, Cr) = &self.inst_evals; 450 | Ar.append_to_transcript(b"Ar_claim", transcript); 451 | Br.append_to_transcript(b"Br_claim", transcript); 452 | Cr.append_to_transcript(b"Cr_claim", transcript); 453 | self.r1cs_eval_proof.verify( 454 | &comm.comm, 455 | &rx, 456 | &ry, 457 | &self.inst_evals, 458 | &gens.gens_r1cs_eval, 459 | transcript, 460 | )?; 461 | timer_eval_proof.stop(); 462 | timer_verify.stop(); 463 | Ok(()) 464 | } 465 | } 466 | 467 | /// `NIZKGens` holds public parameters for producing and verifying proofs with the Spartan NIZK 468 | pub struct NIZKGens { 469 | gens_r1cs_sat: R1CSGens, 470 | } 471 | 472 | impl NIZKGens { 473 | /// Constructs a new `NIZKGens` given the size of the R1CS statement 474 | pub fn new(num_cons: usize, num_vars: usize, num_inputs: usize) -> Self { 475 | let num_vars_padded = { 476 | let mut num_vars_padded = max(num_vars, num_inputs + 1); 477 | if num_vars_padded != num_vars_padded.next_power_of_two() { 478 | num_vars_padded = num_vars_padded.next_power_of_two(); 479 | } 480 | num_vars_padded 481 | }; 482 | 483 | let gens_r1cs_sat = R1CSGens::new(b"gens_r1cs_sat", num_cons, num_vars_padded); 484 | NIZKGens { gens_r1cs_sat } 485 | } 486 | } 487 | 488 | /// `NIZK` holds a proof produced by Spartan NIZK 489 | #[derive(Serialize, Deserialize, Debug)] 490 | pub struct NIZK { 491 | r1cs_sat_proof: R1CSProof, 492 | r: (Vec, Vec), 493 | } 494 | 495 | impl NIZK { 496 | fn protocol_name() -> &'static [u8] { 497 | b"Spartan NIZK proof" 498 | } 499 | 500 | /// A method to produce a NIZK proof of the satisfiability of an R1CS instance 501 | pub fn prove( 502 | inst: &Instance, 503 | vars: VarsAssignment, 504 | input: &InputsAssignment, 505 | gens: &NIZKGens, 506 | transcript: &mut Transcript, 507 | ) -> Self { 508 | let timer_prove = Timer::new("NIZK::prove"); 509 | // we create a Transcript object seeded with a random Scalar 510 | // to aid the prover produce its randomness 511 | let mut random_tape = RandomTape::new(b"proof"); 512 | 513 | transcript.append_protocol_name(NIZK::protocol_name()); 514 | transcript.append_message(b"R1CSShapeDigest", &inst.digest); 515 | 516 | let (r1cs_sat_proof, rx, ry) = { 517 | // we might need to pad variables 518 | let padded_vars = { 519 | let num_padded_vars = inst.inst.get_num_vars(); 520 | let num_vars = vars.assignment.len(); 521 | if num_padded_vars > num_vars { 522 | vars.pad(num_padded_vars) 523 | } else { 524 | vars 525 | } 526 | }; 527 | 528 | let (proof, rx, ry) = R1CSProof::prove( 529 | &inst.inst, 530 | padded_vars.assignment, 531 | &input.assignment, 532 | &gens.gens_r1cs_sat, 533 | transcript, 534 | &mut random_tape, 535 | ); 536 | let proof_encoded: Vec = bincode::serialize(&proof).unwrap(); 537 | Timer::print(&format!("len_r1cs_sat_proof {:?}", proof_encoded.len())); 538 | (proof, rx, ry) 539 | }; 540 | 541 | timer_prove.stop(); 542 | NIZK { 543 | r1cs_sat_proof, 544 | r: (rx, ry), 545 | } 546 | } 547 | 548 | /// A method to verify a NIZK proof of the satisfiability of an R1CS instance 549 | pub fn verify( 550 | &self, 551 | inst: &Instance, 552 | input: &InputsAssignment, 553 | transcript: &mut Transcript, 554 | gens: &NIZKGens, 555 | ) -> Result<(), ProofVerifyError> { 556 | let timer_verify = Timer::new("NIZK::verify"); 557 | 558 | transcript.append_protocol_name(NIZK::protocol_name()); 559 | transcript.append_message(b"R1CSShapeDigest", &inst.digest); 560 | 561 | // We send evaluations of A, B, C at r = (rx, ry) as claims 562 | // to enable the verifier complete the first sum-check 563 | let timer_eval = Timer::new("eval_sparse_polys"); 564 | let (claimed_rx, claimed_ry) = &self.r; 565 | let inst_evals = inst.inst.evaluate(claimed_rx, claimed_ry); 566 | timer_eval.stop(); 567 | 568 | let timer_sat_proof = Timer::new("verify_sat_proof"); 569 | assert_eq!(input.assignment.len(), inst.inst.get_num_inputs()); 570 | let (rx, ry) = self.r1cs_sat_proof.verify( 571 | inst.inst.get_num_vars(), 572 | inst.inst.get_num_cons(), 573 | &input.assignment, 574 | &inst_evals, 575 | transcript, 576 | &gens.gens_r1cs_sat, 577 | )?; 578 | 579 | // verify if claimed rx and ry are correct 580 | assert_eq!(rx, *claimed_rx); 581 | assert_eq!(ry, *claimed_ry); 582 | timer_sat_proof.stop(); 583 | timer_verify.stop(); 584 | 585 | Ok(()) 586 | } 587 | } 588 | 589 | #[cfg(test)] 590 | mod tests { 591 | use super::*; 592 | 593 | #[test] 594 | pub fn check_snark() { 595 | let num_vars = 256; 596 | let num_cons = num_vars; 597 | let num_inputs = 10; 598 | 599 | // produce public generators 600 | let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons); 601 | 602 | // produce a synthetic R1CSShape 603 | let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); 604 | 605 | // create a commitment to R1CSShape 606 | let (comm, decomm) = SNARK::encode(&inst, &gens); 607 | 608 | // produce a proof 609 | let mut prover_transcript = Transcript::new(b"example"); 610 | let proof = SNARK::prove( 611 | &inst, 612 | &comm, 613 | &decomm, 614 | vars, 615 | &inputs, 616 | &gens, 617 | &mut prover_transcript, 618 | ); 619 | 620 | // verify the proof 621 | let mut verifier_transcript = Transcript::new(b"example"); 622 | assert!(proof 623 | .verify(&comm, &inputs, &mut verifier_transcript, &gens) 624 | .is_ok()); 625 | } 626 | 627 | #[test] 628 | pub fn check_r1cs_invalid_index() { 629 | let num_cons = 4; 630 | let num_vars = 8; 631 | let num_inputs = 1; 632 | 633 | let zero: [u8; 32] = [ 634 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 635 | 0, 636 | ]; 637 | 638 | let A = vec![(0, 0, zero)]; 639 | let B = vec![(100, 1, zero)]; 640 | let C = vec![(1, 1, zero)]; 641 | 642 | let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C); 643 | assert!(inst.is_err()); 644 | assert_eq!(inst.err(), Some(R1CSError::InvalidIndex)); 645 | } 646 | 647 | #[test] 648 | pub fn check_r1cs_invalid_scalar() { 649 | let num_cons = 4; 650 | let num_vars = 8; 651 | let num_inputs = 1; 652 | 653 | let zero: [u8; 32] = [ 654 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 655 | 0, 656 | ]; 657 | 658 | let larger_than_mod = [ 659 | 3, 0, 0, 0, 255, 255, 255, 255, 254, 91, 254, 255, 2, 164, 189, 83, 5, 216, 161, 9, 8, 216, 660 | 57, 51, 72, 125, 157, 41, 83, 167, 237, 115, 661 | ]; 662 | 663 | let A = vec![(0, 0, zero)]; 664 | let B = vec![(1, 1, larger_than_mod)]; 665 | let C = vec![(1, 1, zero)]; 666 | 667 | let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C); 668 | assert!(inst.is_err()); 669 | assert_eq!(inst.err(), Some(R1CSError::InvalidScalar)); 670 | } 671 | 672 | #[test] 673 | fn test_padded_constraints() { 674 | // parameters of the R1CS instance 675 | let num_cons = 1; 676 | let num_vars = 0; 677 | let num_inputs = 3; 678 | let num_non_zero_entries = 3; 679 | 680 | // We will encode the above constraints into three matrices, where 681 | // the coefficients in the matrix are in the little-endian byte order 682 | let mut A: Vec<(usize, usize, [u8; 32])> = Vec::new(); 683 | let mut B: Vec<(usize, usize, [u8; 32])> = Vec::new(); 684 | let mut C: Vec<(usize, usize, [u8; 32])> = Vec::new(); 685 | 686 | // Create a^2 + b + 13 687 | A.push((0, num_vars + 2, Scalar::one().to_bytes())); // 1*a 688 | B.push((0, num_vars + 2, Scalar::one().to_bytes())); // 1*a 689 | C.push((0, num_vars + 1, Scalar::one().to_bytes())); // 1*z 690 | C.push((0, num_vars, (-Scalar::from(13u64)).to_bytes())); // -13*1 691 | C.push((0, num_vars + 3, (-Scalar::one()).to_bytes())); // -1*b 692 | 693 | // Var Assignments (Z_0 = 16 is the only output) 694 | let vars = vec![Scalar::zero().to_bytes(); num_vars]; 695 | 696 | // create an InputsAssignment (a = 1, b = 2) 697 | let mut inputs = vec![Scalar::zero().to_bytes(); num_inputs]; 698 | inputs[0] = Scalar::from(16u64).to_bytes(); 699 | inputs[1] = Scalar::from(1u64).to_bytes(); 700 | inputs[2] = Scalar::from(2u64).to_bytes(); 701 | 702 | let assignment_inputs = InputsAssignment::new(&inputs).unwrap(); 703 | let assignment_vars = VarsAssignment::new(&vars).unwrap(); 704 | 705 | // Check if instance is satisfiable 706 | let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap(); 707 | let res = inst.is_sat(&assignment_vars, &assignment_inputs); 708 | assert!(res.unwrap(), "should be satisfied"); 709 | 710 | // SNARK public params 711 | let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries); 712 | 713 | // create a commitment to the R1CS instance 714 | let (comm, decomm) = SNARK::encode(&inst, &gens); 715 | 716 | // produce a SNARK 717 | let mut prover_transcript = Transcript::new(b"snark_example"); 718 | let proof = SNARK::prove( 719 | &inst, 720 | &comm, 721 | &decomm, 722 | assignment_vars.clone(), 723 | &assignment_inputs, 724 | &gens, 725 | &mut prover_transcript, 726 | ); 727 | 728 | // verify the SNARK 729 | let mut verifier_transcript = Transcript::new(b"snark_example"); 730 | assert!(proof 731 | .verify(&comm, &assignment_inputs, &mut verifier_transcript, &gens) 732 | .is_ok()); 733 | 734 | // NIZK public params 735 | let gens = NIZKGens::new(num_cons, num_vars, num_inputs); 736 | 737 | // produce a NIZK 738 | let mut prover_transcript = Transcript::new(b"nizk_example"); 739 | let proof = NIZK::prove( 740 | &inst, 741 | assignment_vars, 742 | &assignment_inputs, 743 | &gens, 744 | &mut prover_transcript, 745 | ); 746 | 747 | // verify the NIZK 748 | let mut verifier_transcript = Transcript::new(b"nizk_example"); 749 | assert!(proof 750 | .verify(&inst, &assignment_inputs, &mut verifier_transcript, &gens) 751 | .is_ok()); 752 | } 753 | } 754 | -------------------------------------------------------------------------------- /src/math.rs: -------------------------------------------------------------------------------- 1 | pub trait Math { 2 | fn pow2(self) -> usize; 3 | fn get_bits(self, num_bits: usize) -> Vec; 4 | fn log_2(self) -> usize; 5 | } 6 | 7 | impl Math for usize { 8 | #[inline] 9 | fn pow2(self) -> usize { 10 | let base: usize = 2; 11 | base.pow(self as u32) 12 | } 13 | 14 | /// Returns the num_bits from n in a canonical order 15 | fn get_bits(self, num_bits: usize) -> Vec { 16 | (0..num_bits) 17 | .map(|shift_amount| ((self & (1 << (num_bits - shift_amount - 1))) > 0)) 18 | .collect::>() 19 | } 20 | 21 | fn log_2(self) -> usize { 22 | assert_ne!(self, 0); 23 | 24 | if self.is_power_of_two() { 25 | (1usize.leading_zeros() - self.leading_zeros()) as usize 26 | } else { 27 | (0usize.leading_zeros() - self.leading_zeros()) as usize 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /src/nizk/bullet.rs: -------------------------------------------------------------------------------- 1 | //! This module is an adaptation of code from the bulletproofs crate. 2 | //! See NOTICE.md for more details 3 | #![allow(non_snake_case)] 4 | #![allow(clippy::type_complexity)] 5 | #![allow(clippy::too_many_arguments)] 6 | use super::super::errors::ProofVerifyError; 7 | use super::super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul}; 8 | use super::super::math::Math; 9 | use super::super::scalar::Scalar; 10 | use super::super::transcript::ProofTranscript; 11 | use core::iter; 12 | use merlin::Transcript; 13 | use serde::{Deserialize, Serialize}; 14 | 15 | #[derive(Debug, Serialize, Deserialize)] 16 | pub struct BulletReductionProof { 17 | L_vec: Vec, 18 | R_vec: Vec, 19 | } 20 | 21 | impl BulletReductionProof { 22 | /// Create an inner-product proof. 23 | /// 24 | /// The proof is created with respect to the bases \\(G\\). 25 | /// 26 | /// The `transcript` is passed in as a parameter so that the 27 | /// challenges depend on the *entire* transcript (including parent 28 | /// protocols). 29 | /// 30 | /// The lengths of the vectors must all be the same, and must all be 31 | /// either 0 or a power of 2. 32 | pub fn prove( 33 | transcript: &mut Transcript, 34 | Q: &GroupElement, 35 | G_vec: &[GroupElement], 36 | H: &GroupElement, 37 | a_vec: &[Scalar], 38 | b_vec: &[Scalar], 39 | blind: &Scalar, 40 | blinds_vec: &[(Scalar, Scalar)], 41 | ) -> ( 42 | BulletReductionProof, 43 | GroupElement, 44 | Scalar, 45 | Scalar, 46 | GroupElement, 47 | Scalar, 48 | ) { 49 | // Create slices G, H, a, b backed by their respective 50 | // vectors. This lets us reslice as we compress the lengths 51 | // of the vectors in the main loop below. 52 | let mut G = &mut G_vec.to_owned()[..]; 53 | let mut a = &mut a_vec.to_owned()[..]; 54 | let mut b = &mut b_vec.to_owned()[..]; 55 | 56 | // All of the input vectors must have a length that is a power of two. 57 | let mut n = G.len(); 58 | assert!(n.is_power_of_two()); 59 | let lg_n = n.log_2(); 60 | 61 | // All of the input vectors must have the same length. 62 | assert_eq!(G.len(), n); 63 | assert_eq!(a.len(), n); 64 | assert_eq!(b.len(), n); 65 | assert_eq!(blinds_vec.len(), 2 * lg_n); 66 | 67 | let mut L_vec = Vec::with_capacity(lg_n); 68 | let mut R_vec = Vec::with_capacity(lg_n); 69 | let mut blinds_iter = blinds_vec.iter(); 70 | let mut blind_final = *blind; 71 | 72 | while n != 1 { 73 | n /= 2; 74 | let (a_L, a_R) = a.split_at_mut(n); 75 | let (b_L, b_R) = b.split_at_mut(n); 76 | let (G_L, G_R) = G.split_at_mut(n); 77 | 78 | let c_L = inner_product(a_L, b_R); 79 | let c_R = inner_product(a_R, b_L); 80 | 81 | let (blind_L, blind_R) = blinds_iter.next().unwrap(); 82 | 83 | let L = GroupElement::vartime_multiscalar_mul( 84 | a_L 85 | .iter() 86 | .chain(iter::once(&c_L)) 87 | .chain(iter::once(blind_L)), 88 | G_R.iter().chain(iter::once(Q)).chain(iter::once(H)), 89 | ); 90 | 91 | let R = GroupElement::vartime_multiscalar_mul( 92 | a_R 93 | .iter() 94 | .chain(iter::once(&c_R)) 95 | .chain(iter::once(blind_R)), 96 | G_L.iter().chain(iter::once(Q)).chain(iter::once(H)), 97 | ); 98 | 99 | transcript.append_point(b"L", &L.compress()); 100 | transcript.append_point(b"R", &R.compress()); 101 | 102 | let u = transcript.challenge_scalar(b"u"); 103 | let u_inv = u.invert().unwrap(); 104 | 105 | for i in 0..n { 106 | a_L[i] = a_L[i] * u + u_inv * a_R[i]; 107 | b_L[i] = b_L[i] * u_inv + u * b_R[i]; 108 | G_L[i] = GroupElement::vartime_multiscalar_mul(&[u_inv, u], &[G_L[i], G_R[i]]); 109 | } 110 | 111 | blind_final = blind_final + blind_L * u * u + blind_R * u_inv * u_inv; 112 | 113 | L_vec.push(L.compress()); 114 | R_vec.push(R.compress()); 115 | 116 | a = a_L; 117 | b = b_L; 118 | G = G_L; 119 | } 120 | 121 | let Gamma_hat = 122 | GroupElement::vartime_multiscalar_mul(&[a[0], a[0] * b[0], blind_final], &[G[0], *Q, *H]); 123 | 124 | ( 125 | BulletReductionProof { L_vec, R_vec }, 126 | Gamma_hat, 127 | a[0], 128 | b[0], 129 | G[0], 130 | blind_final, 131 | ) 132 | } 133 | 134 | /// Computes three vectors of verification scalars \\([u\_{i}^{2}]\\), \\([u\_{i}^{-2}]\\) and \\([s\_{i}]\\) for combined multiscalar multiplication 135 | /// in a parent protocol. See [inner product protocol notes](index.html#verification-equation) for details. 136 | /// The verifier must provide the input length \\(n\\) explicitly to avoid unbounded allocation within the inner product proof. 137 | fn verification_scalars( 138 | &self, 139 | n: usize, 140 | transcript: &mut Transcript, 141 | ) -> Result<(Vec, Vec, Vec), ProofVerifyError> { 142 | let lg_n = self.L_vec.len(); 143 | if lg_n >= 32 { 144 | // 4 billion multiplications should be enough for anyone 145 | // and this check prevents overflow in 1< Result<(GroupElement, GroupElement, Scalar), ProofVerifyError> { 199 | let (u_sq, u_inv_sq, s) = self.verification_scalars(n, transcript)?; 200 | 201 | let Ls = self 202 | .L_vec 203 | .iter() 204 | .map(|p| p.decompress().ok_or(ProofVerifyError::InternalError)) 205 | .collect::, _>>()?; 206 | 207 | let Rs = self 208 | .R_vec 209 | .iter() 210 | .map(|p| p.decompress().ok_or(ProofVerifyError::InternalError)) 211 | .collect::, _>>()?; 212 | 213 | let G_hat = GroupElement::vartime_multiscalar_mul(s.iter(), G.iter()); 214 | let a_hat = inner_product(a, &s); 215 | 216 | let Gamma_hat = GroupElement::vartime_multiscalar_mul( 217 | u_sq 218 | .iter() 219 | .chain(u_inv_sq.iter()) 220 | .chain(iter::once(&Scalar::one())), 221 | Ls.iter().chain(Rs.iter()).chain(iter::once(Gamma)), 222 | ); 223 | 224 | Ok((G_hat, Gamma_hat, a_hat)) 225 | } 226 | } 227 | 228 | /// Computes an inner product of two vectors 229 | /// \\[ 230 | /// {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} = \sum\_{i=0}^{n-1} a\_i \cdot b\_i. 231 | /// \\] 232 | /// Panics if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal. 233 | pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar { 234 | assert!( 235 | a.len() == b.len(), 236 | "inner_product(a,b): lengths of vectors do not match" 237 | ); 238 | let mut out = Scalar::zero(); 239 | for i in 0..a.len() { 240 | out += a[i] * b[i]; 241 | } 242 | out 243 | } 244 | -------------------------------------------------------------------------------- /src/nizk/mod.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::too_many_arguments)] 2 | use super::commitments::{Commitments, MultiCommitGens}; 3 | use super::errors::ProofVerifyError; 4 | use super::group::{CompressedGroup, CompressedGroupExt}; 5 | use super::math::Math; 6 | use super::random::RandomTape; 7 | use super::scalar::Scalar; 8 | use super::transcript::{AppendToTranscript, ProofTranscript}; 9 | use merlin::Transcript; 10 | use serde::{Deserialize, Serialize}; 11 | 12 | mod bullet; 13 | use bullet::BulletReductionProof; 14 | 15 | #[derive(Serialize, Deserialize, Debug)] 16 | pub struct KnowledgeProof { 17 | alpha: CompressedGroup, 18 | z1: Scalar, 19 | z2: Scalar, 20 | } 21 | 22 | impl KnowledgeProof { 23 | fn protocol_name() -> &'static [u8] { 24 | b"knowledge proof" 25 | } 26 | 27 | pub fn prove( 28 | gens_n: &MultiCommitGens, 29 | transcript: &mut Transcript, 30 | random_tape: &mut RandomTape, 31 | x: &Scalar, 32 | r: &Scalar, 33 | ) -> (KnowledgeProof, CompressedGroup) { 34 | transcript.append_protocol_name(KnowledgeProof::protocol_name()); 35 | 36 | // produce two random Scalars 37 | let t1 = random_tape.random_scalar(b"t1"); 38 | let t2 = random_tape.random_scalar(b"t2"); 39 | 40 | let C = x.commit(r, gens_n).compress(); 41 | C.append_to_transcript(b"C", transcript); 42 | 43 | let alpha = t1.commit(&t2, gens_n).compress(); 44 | alpha.append_to_transcript(b"alpha", transcript); 45 | 46 | let c = transcript.challenge_scalar(b"c"); 47 | 48 | let z1 = x * c + t1; 49 | let z2 = r * c + t2; 50 | 51 | (KnowledgeProof { alpha, z1, z2 }, C) 52 | } 53 | 54 | pub fn verify( 55 | &self, 56 | gens_n: &MultiCommitGens, 57 | transcript: &mut Transcript, 58 | C: &CompressedGroup, 59 | ) -> Result<(), ProofVerifyError> { 60 | transcript.append_protocol_name(KnowledgeProof::protocol_name()); 61 | C.append_to_transcript(b"C", transcript); 62 | self.alpha.append_to_transcript(b"alpha", transcript); 63 | 64 | let c = transcript.challenge_scalar(b"c"); 65 | 66 | let lhs = self.z1.commit(&self.z2, gens_n).compress(); 67 | let rhs = (c * C.unpack()? + self.alpha.unpack()?).compress(); 68 | 69 | if lhs == rhs { 70 | Ok(()) 71 | } else { 72 | Err(ProofVerifyError::InternalError) 73 | } 74 | } 75 | } 76 | 77 | #[derive(Serialize, Deserialize, Debug)] 78 | pub struct EqualityProof { 79 | alpha: CompressedGroup, 80 | z: Scalar, 81 | } 82 | 83 | impl EqualityProof { 84 | fn protocol_name() -> &'static [u8] { 85 | b"equality proof" 86 | } 87 | 88 | pub fn prove( 89 | gens_n: &MultiCommitGens, 90 | transcript: &mut Transcript, 91 | random_tape: &mut RandomTape, 92 | v1: &Scalar, 93 | s1: &Scalar, 94 | v2: &Scalar, 95 | s2: &Scalar, 96 | ) -> (EqualityProof, CompressedGroup, CompressedGroup) { 97 | transcript.append_protocol_name(EqualityProof::protocol_name()); 98 | 99 | // produce a random Scalar 100 | let r = random_tape.random_scalar(b"r"); 101 | 102 | let C1 = v1.commit(s1, gens_n).compress(); 103 | C1.append_to_transcript(b"C1", transcript); 104 | 105 | let C2 = v2.commit(s2, gens_n).compress(); 106 | C2.append_to_transcript(b"C2", transcript); 107 | 108 | let alpha = (r * gens_n.h).compress(); 109 | alpha.append_to_transcript(b"alpha", transcript); 110 | 111 | let c = transcript.challenge_scalar(b"c"); 112 | 113 | let z = c * (s1 - s2) + r; 114 | 115 | (EqualityProof { alpha, z }, C1, C2) 116 | } 117 | 118 | pub fn verify( 119 | &self, 120 | gens_n: &MultiCommitGens, 121 | transcript: &mut Transcript, 122 | C1: &CompressedGroup, 123 | C2: &CompressedGroup, 124 | ) -> Result<(), ProofVerifyError> { 125 | transcript.append_protocol_name(EqualityProof::protocol_name()); 126 | C1.append_to_transcript(b"C1", transcript); 127 | C2.append_to_transcript(b"C2", transcript); 128 | self.alpha.append_to_transcript(b"alpha", transcript); 129 | 130 | let c = transcript.challenge_scalar(b"c"); 131 | let rhs = { 132 | let C = C1.unpack()? - C2.unpack()?; 133 | (c * C + self.alpha.unpack()?).compress() 134 | }; 135 | 136 | let lhs = (self.z * gens_n.h).compress(); 137 | 138 | if lhs == rhs { 139 | Ok(()) 140 | } else { 141 | Err(ProofVerifyError::InternalError) 142 | } 143 | } 144 | } 145 | 146 | #[derive(Serialize, Deserialize, Debug)] 147 | pub struct ProductProof { 148 | alpha: CompressedGroup, 149 | beta: CompressedGroup, 150 | delta: CompressedGroup, 151 | z: [Scalar; 5], 152 | } 153 | 154 | impl ProductProof { 155 | fn protocol_name() -> &'static [u8] { 156 | b"product proof" 157 | } 158 | 159 | pub fn prove( 160 | gens_n: &MultiCommitGens, 161 | transcript: &mut Transcript, 162 | random_tape: &mut RandomTape, 163 | x: &Scalar, 164 | rX: &Scalar, 165 | y: &Scalar, 166 | rY: &Scalar, 167 | z: &Scalar, 168 | rZ: &Scalar, 169 | ) -> ( 170 | ProductProof, 171 | CompressedGroup, 172 | CompressedGroup, 173 | CompressedGroup, 174 | ) { 175 | transcript.append_protocol_name(ProductProof::protocol_name()); 176 | 177 | // produce five random Scalar 178 | let b1 = random_tape.random_scalar(b"b1"); 179 | let b2 = random_tape.random_scalar(b"b2"); 180 | let b3 = random_tape.random_scalar(b"b3"); 181 | let b4 = random_tape.random_scalar(b"b4"); 182 | let b5 = random_tape.random_scalar(b"b5"); 183 | 184 | let X = x.commit(rX, gens_n).compress(); 185 | X.append_to_transcript(b"X", transcript); 186 | 187 | let Y = y.commit(rY, gens_n).compress(); 188 | Y.append_to_transcript(b"Y", transcript); 189 | 190 | let Z = z.commit(rZ, gens_n).compress(); 191 | Z.append_to_transcript(b"Z", transcript); 192 | 193 | let alpha = b1.commit(&b2, gens_n).compress(); 194 | alpha.append_to_transcript(b"alpha", transcript); 195 | 196 | let beta = b3.commit(&b4, gens_n).compress(); 197 | beta.append_to_transcript(b"beta", transcript); 198 | 199 | let delta = { 200 | let gens_X = &MultiCommitGens { 201 | n: 1, 202 | G: vec![X.decompress().unwrap()], 203 | h: gens_n.h, 204 | }; 205 | b3.commit(&b5, gens_X).compress() 206 | }; 207 | delta.append_to_transcript(b"delta", transcript); 208 | 209 | let c = transcript.challenge_scalar(b"c"); 210 | 211 | let z1 = b1 + c * x; 212 | let z2 = b2 + c * rX; 213 | let z3 = b3 + c * y; 214 | let z4 = b4 + c * rY; 215 | let z5 = b5 + c * (rZ - rX * y); 216 | let z = [z1, z2, z3, z4, z5]; 217 | 218 | ( 219 | ProductProof { 220 | alpha, 221 | beta, 222 | delta, 223 | z, 224 | }, 225 | X, 226 | Y, 227 | Z, 228 | ) 229 | } 230 | 231 | fn check_equality( 232 | P: &CompressedGroup, 233 | X: &CompressedGroup, 234 | c: &Scalar, 235 | gens_n: &MultiCommitGens, 236 | z1: &Scalar, 237 | z2: &Scalar, 238 | ) -> bool { 239 | let lhs = (P.decompress().unwrap() + c * X.decompress().unwrap()).compress(); 240 | let rhs = z1.commit(z2, gens_n).compress(); 241 | 242 | lhs == rhs 243 | } 244 | 245 | pub fn verify( 246 | &self, 247 | gens_n: &MultiCommitGens, 248 | transcript: &mut Transcript, 249 | X: &CompressedGroup, 250 | Y: &CompressedGroup, 251 | Z: &CompressedGroup, 252 | ) -> Result<(), ProofVerifyError> { 253 | transcript.append_protocol_name(ProductProof::protocol_name()); 254 | 255 | X.append_to_transcript(b"X", transcript); 256 | Y.append_to_transcript(b"Y", transcript); 257 | Z.append_to_transcript(b"Z", transcript); 258 | self.alpha.append_to_transcript(b"alpha", transcript); 259 | self.beta.append_to_transcript(b"beta", transcript); 260 | self.delta.append_to_transcript(b"delta", transcript); 261 | 262 | let z1 = self.z[0]; 263 | let z2 = self.z[1]; 264 | let z3 = self.z[2]; 265 | let z4 = self.z[3]; 266 | let z5 = self.z[4]; 267 | 268 | let c = transcript.challenge_scalar(b"c"); 269 | 270 | if ProductProof::check_equality(&self.alpha, X, &c, gens_n, &z1, &z2) 271 | && ProductProof::check_equality(&self.beta, Y, &c, gens_n, &z3, &z4) 272 | && ProductProof::check_equality( 273 | &self.delta, 274 | Z, 275 | &c, 276 | &MultiCommitGens { 277 | n: 1, 278 | G: vec![X.unpack()?], 279 | h: gens_n.h, 280 | }, 281 | &z3, 282 | &z5, 283 | ) 284 | { 285 | Ok(()) 286 | } else { 287 | Err(ProofVerifyError::InternalError) 288 | } 289 | } 290 | } 291 | 292 | #[derive(Debug, Serialize, Deserialize)] 293 | pub struct DotProductProof { 294 | delta: CompressedGroup, 295 | beta: CompressedGroup, 296 | z: Vec, 297 | z_delta: Scalar, 298 | z_beta: Scalar, 299 | } 300 | 301 | impl DotProductProof { 302 | fn protocol_name() -> &'static [u8] { 303 | b"dot product proof" 304 | } 305 | 306 | pub fn compute_dotproduct(a: &[Scalar], b: &[Scalar]) -> Scalar { 307 | assert_eq!(a.len(), b.len()); 308 | (0..a.len()).map(|i| a[i] * b[i]).sum() 309 | } 310 | 311 | pub fn prove( 312 | gens_1: &MultiCommitGens, 313 | gens_n: &MultiCommitGens, 314 | transcript: &mut Transcript, 315 | random_tape: &mut RandomTape, 316 | x_vec: &[Scalar], 317 | blind_x: &Scalar, 318 | a_vec: &[Scalar], 319 | y: &Scalar, 320 | blind_y: &Scalar, 321 | ) -> (DotProductProof, CompressedGroup, CompressedGroup) { 322 | transcript.append_protocol_name(DotProductProof::protocol_name()); 323 | 324 | let n = x_vec.len(); 325 | assert_eq!(x_vec.len(), a_vec.len()); 326 | assert_eq!(gens_n.n, a_vec.len()); 327 | assert_eq!(gens_1.n, 1); 328 | 329 | // produce randomness for the proofs 330 | let d_vec = random_tape.random_vector(b"d_vec", n); 331 | let r_delta = random_tape.random_scalar(b"r_delta"); 332 | let r_beta = random_tape.random_scalar(b"r_beta"); 333 | 334 | let Cx = x_vec.commit(blind_x, gens_n).compress(); 335 | Cx.append_to_transcript(b"Cx", transcript); 336 | 337 | let Cy = y.commit(blind_y, gens_1).compress(); 338 | Cy.append_to_transcript(b"Cy", transcript); 339 | 340 | a_vec.append_to_transcript(b"a", transcript); 341 | 342 | let delta = d_vec.commit(&r_delta, gens_n).compress(); 343 | delta.append_to_transcript(b"delta", transcript); 344 | 345 | let dotproduct_a_d = DotProductProof::compute_dotproduct(a_vec, &d_vec); 346 | 347 | let beta = dotproduct_a_d.commit(&r_beta, gens_1).compress(); 348 | beta.append_to_transcript(b"beta", transcript); 349 | 350 | let c = transcript.challenge_scalar(b"c"); 351 | 352 | let z = (0..d_vec.len()) 353 | .map(|i| c * x_vec[i] + d_vec[i]) 354 | .collect::>(); 355 | 356 | let z_delta = c * blind_x + r_delta; 357 | let z_beta = c * blind_y + r_beta; 358 | 359 | ( 360 | DotProductProof { 361 | delta, 362 | beta, 363 | z, 364 | z_delta, 365 | z_beta, 366 | }, 367 | Cx, 368 | Cy, 369 | ) 370 | } 371 | 372 | pub fn verify( 373 | &self, 374 | gens_1: &MultiCommitGens, 375 | gens_n: &MultiCommitGens, 376 | transcript: &mut Transcript, 377 | a: &[Scalar], 378 | Cx: &CompressedGroup, 379 | Cy: &CompressedGroup, 380 | ) -> Result<(), ProofVerifyError> { 381 | assert_eq!(gens_n.n, a.len()); 382 | assert_eq!(gens_1.n, 1); 383 | 384 | transcript.append_protocol_name(DotProductProof::protocol_name()); 385 | Cx.append_to_transcript(b"Cx", transcript); 386 | Cy.append_to_transcript(b"Cy", transcript); 387 | a.append_to_transcript(b"a", transcript); 388 | self.delta.append_to_transcript(b"delta", transcript); 389 | self.beta.append_to_transcript(b"beta", transcript); 390 | 391 | let c = transcript.challenge_scalar(b"c"); 392 | 393 | let mut result = 394 | c * Cx.unpack()? + self.delta.unpack()? == self.z.commit(&self.z_delta, gens_n); 395 | 396 | let dotproduct_z_a = DotProductProof::compute_dotproduct(&self.z, a); 397 | result &= c * Cy.unpack()? + self.beta.unpack()? == dotproduct_z_a.commit(&self.z_beta, gens_1); 398 | 399 | if result { 400 | Ok(()) 401 | } else { 402 | Err(ProofVerifyError::InternalError) 403 | } 404 | } 405 | } 406 | 407 | #[derive(Serialize, Deserialize)] 408 | pub struct DotProductProofGens { 409 | n: usize, 410 | pub gens_n: MultiCommitGens, 411 | pub gens_1: MultiCommitGens, 412 | } 413 | 414 | impl DotProductProofGens { 415 | pub fn new(n: usize, label: &[u8]) -> Self { 416 | let (gens_n, gens_1) = MultiCommitGens::new(n + 1, label).split_at(n); 417 | DotProductProofGens { n, gens_n, gens_1 } 418 | } 419 | } 420 | 421 | #[derive(Debug, Serialize, Deserialize)] 422 | pub struct DotProductProofLog { 423 | bullet_reduction_proof: BulletReductionProof, 424 | delta: CompressedGroup, 425 | beta: CompressedGroup, 426 | z1: Scalar, 427 | z2: Scalar, 428 | } 429 | 430 | impl DotProductProofLog { 431 | fn protocol_name() -> &'static [u8] { 432 | b"dot product proof (log)" 433 | } 434 | 435 | pub fn compute_dotproduct(a: &[Scalar], b: &[Scalar]) -> Scalar { 436 | assert_eq!(a.len(), b.len()); 437 | (0..a.len()).map(|i| a[i] * b[i]).sum() 438 | } 439 | 440 | pub fn prove( 441 | gens: &DotProductProofGens, 442 | transcript: &mut Transcript, 443 | random_tape: &mut RandomTape, 444 | x_vec: &[Scalar], 445 | blind_x: &Scalar, 446 | a_vec: &[Scalar], 447 | y: &Scalar, 448 | blind_y: &Scalar, 449 | ) -> (DotProductProofLog, CompressedGroup, CompressedGroup) { 450 | transcript.append_protocol_name(DotProductProofLog::protocol_name()); 451 | 452 | let n = x_vec.len(); 453 | assert_eq!(x_vec.len(), a_vec.len()); 454 | assert_eq!(gens.n, n); 455 | 456 | // produce randomness for generating a proof 457 | let d = random_tape.random_scalar(b"d"); 458 | let r_delta = random_tape.random_scalar(b"r_delta"); 459 | let r_beta = random_tape.random_scalar(b"r_delta"); 460 | let blinds_vec = { 461 | let v1 = random_tape.random_vector(b"blinds_vec_1", 2 * n.log_2()); 462 | let v2 = random_tape.random_vector(b"blinds_vec_2", 2 * n.log_2()); 463 | (0..v1.len()) 464 | .map(|i| (v1[i], v2[i])) 465 | .collect::>() 466 | }; 467 | 468 | let Cx = x_vec.commit(blind_x, &gens.gens_n).compress(); 469 | Cx.append_to_transcript(b"Cx", transcript); 470 | 471 | let Cy = y.commit(blind_y, &gens.gens_1).compress(); 472 | Cy.append_to_transcript(b"Cy", transcript); 473 | 474 | a_vec.append_to_transcript(b"a", transcript); 475 | 476 | // sample a random base and scale the generator used for 477 | // the output of the inner product 478 | let r = transcript.challenge_scalar(b"r"); 479 | let gens_1_scaled = gens.gens_1.scale(&r); 480 | 481 | let blind_Gamma = blind_x + r * blind_y; 482 | let (bullet_reduction_proof, _Gamma_hat, x_hat, a_hat, g_hat, rhat_Gamma) = 483 | BulletReductionProof::prove( 484 | transcript, 485 | &gens_1_scaled.G[0], 486 | &gens.gens_n.G, 487 | &gens.gens_n.h, 488 | x_vec, 489 | a_vec, 490 | &blind_Gamma, 491 | &blinds_vec, 492 | ); 493 | let y_hat = x_hat * a_hat; 494 | 495 | let delta = { 496 | let gens_hat = MultiCommitGens { 497 | n: 1, 498 | G: vec![g_hat], 499 | h: gens.gens_1.h, 500 | }; 501 | d.commit(&r_delta, &gens_hat).compress() 502 | }; 503 | delta.append_to_transcript(b"delta", transcript); 504 | 505 | let beta = d.commit(&r_beta, &gens_1_scaled).compress(); 506 | beta.append_to_transcript(b"beta", transcript); 507 | 508 | let c = transcript.challenge_scalar(b"c"); 509 | 510 | let z1 = d + c * y_hat; 511 | let z2 = a_hat * (c * rhat_Gamma + r_beta) + r_delta; 512 | 513 | ( 514 | DotProductProofLog { 515 | bullet_reduction_proof, 516 | delta, 517 | beta, 518 | z1, 519 | z2, 520 | }, 521 | Cx, 522 | Cy, 523 | ) 524 | } 525 | 526 | pub fn verify( 527 | &self, 528 | n: usize, 529 | gens: &DotProductProofGens, 530 | transcript: &mut Transcript, 531 | a: &[Scalar], 532 | Cx: &CompressedGroup, 533 | Cy: &CompressedGroup, 534 | ) -> Result<(), ProofVerifyError> { 535 | assert_eq!(gens.n, n); 536 | assert_eq!(a.len(), n); 537 | 538 | transcript.append_protocol_name(DotProductProofLog::protocol_name()); 539 | Cx.append_to_transcript(b"Cx", transcript); 540 | Cy.append_to_transcript(b"Cy", transcript); 541 | a.append_to_transcript(b"a", transcript); 542 | 543 | // sample a random base and scale the generator used for 544 | // the output of the inner product 545 | let r = transcript.challenge_scalar(b"r"); 546 | let gens_1_scaled = gens.gens_1.scale(&r); 547 | 548 | let Gamma = Cx.unpack()? + r * Cy.unpack()?; 549 | 550 | let (g_hat, Gamma_hat, a_hat) = 551 | self 552 | .bullet_reduction_proof 553 | .verify(n, a, transcript, &Gamma, &gens.gens_n.G)?; 554 | self.delta.append_to_transcript(b"delta", transcript); 555 | self.beta.append_to_transcript(b"beta", transcript); 556 | 557 | let c = transcript.challenge_scalar(b"c"); 558 | 559 | let c_s = &c; 560 | let beta_s = self.beta.unpack()?; 561 | let a_hat_s = &a_hat; 562 | let delta_s = self.delta.unpack()?; 563 | let z1_s = &self.z1; 564 | let z2_s = &self.z2; 565 | 566 | let lhs = ((Gamma_hat * c_s + beta_s) * a_hat_s + delta_s).compress(); 567 | let rhs = ((g_hat + gens_1_scaled.G[0] * a_hat_s) * z1_s + gens_1_scaled.h * z2_s).compress(); 568 | 569 | assert_eq!(lhs, rhs); 570 | 571 | if lhs == rhs { 572 | Ok(()) 573 | } else { 574 | Err(ProofVerifyError::InternalError) 575 | } 576 | } 577 | } 578 | 579 | #[cfg(test)] 580 | mod tests { 581 | use super::*; 582 | use rand::rngs::OsRng; 583 | #[test] 584 | fn check_knowledgeproof() { 585 | let mut csprng: OsRng = OsRng; 586 | 587 | let gens_1 = MultiCommitGens::new(1, b"test-knowledgeproof"); 588 | 589 | let x = Scalar::random(&mut csprng); 590 | let r = Scalar::random(&mut csprng); 591 | 592 | let mut random_tape = RandomTape::new(b"proof"); 593 | let mut prover_transcript = Transcript::new(b"example"); 594 | let (proof, committed_value) = 595 | KnowledgeProof::prove(&gens_1, &mut prover_transcript, &mut random_tape, &x, &r); 596 | 597 | let mut verifier_transcript = Transcript::new(b"example"); 598 | assert!(proof 599 | .verify(&gens_1, &mut verifier_transcript, &committed_value) 600 | .is_ok()); 601 | } 602 | 603 | #[test] 604 | fn check_equalityproof() { 605 | let mut csprng: OsRng = OsRng; 606 | 607 | let gens_1 = MultiCommitGens::new(1, b"test-equalityproof"); 608 | let v1 = Scalar::random(&mut csprng); 609 | let v2 = v1; 610 | let s1 = Scalar::random(&mut csprng); 611 | let s2 = Scalar::random(&mut csprng); 612 | 613 | let mut random_tape = RandomTape::new(b"proof"); 614 | let mut prover_transcript = Transcript::new(b"example"); 615 | let (proof, C1, C2) = EqualityProof::prove( 616 | &gens_1, 617 | &mut prover_transcript, 618 | &mut random_tape, 619 | &v1, 620 | &s1, 621 | &v2, 622 | &s2, 623 | ); 624 | 625 | let mut verifier_transcript = Transcript::new(b"example"); 626 | assert!(proof 627 | .verify(&gens_1, &mut verifier_transcript, &C1, &C2) 628 | .is_ok()); 629 | } 630 | 631 | #[test] 632 | fn check_productproof() { 633 | let mut csprng: OsRng = OsRng; 634 | 635 | let gens_1 = MultiCommitGens::new(1, b"test-productproof"); 636 | let x = Scalar::random(&mut csprng); 637 | let rX = Scalar::random(&mut csprng); 638 | let y = Scalar::random(&mut csprng); 639 | let rY = Scalar::random(&mut csprng); 640 | let z = x * y; 641 | let rZ = Scalar::random(&mut csprng); 642 | 643 | let mut random_tape = RandomTape::new(b"proof"); 644 | let mut prover_transcript = Transcript::new(b"example"); 645 | let (proof, X, Y, Z) = ProductProof::prove( 646 | &gens_1, 647 | &mut prover_transcript, 648 | &mut random_tape, 649 | &x, 650 | &rX, 651 | &y, 652 | &rY, 653 | &z, 654 | &rZ, 655 | ); 656 | 657 | let mut verifier_transcript = Transcript::new(b"example"); 658 | assert!(proof 659 | .verify(&gens_1, &mut verifier_transcript, &X, &Y, &Z) 660 | .is_ok()); 661 | } 662 | 663 | #[test] 664 | fn check_dotproductproof() { 665 | let mut csprng: OsRng = OsRng; 666 | 667 | let n = 1024; 668 | 669 | let gens_1 = MultiCommitGens::new(1, b"test-two"); 670 | let gens_1024 = MultiCommitGens::new(n, b"test-1024"); 671 | 672 | let mut x: Vec = Vec::new(); 673 | let mut a: Vec = Vec::new(); 674 | for _ in 0..n { 675 | x.push(Scalar::random(&mut csprng)); 676 | a.push(Scalar::random(&mut csprng)); 677 | } 678 | let y = DotProductProofLog::compute_dotproduct(&x, &a); 679 | let r_x = Scalar::random(&mut csprng); 680 | let r_y = Scalar::random(&mut csprng); 681 | 682 | let mut random_tape = RandomTape::new(b"proof"); 683 | let mut prover_transcript = Transcript::new(b"example"); 684 | let (proof, Cx, Cy) = DotProductProof::prove( 685 | &gens_1, 686 | &gens_1024, 687 | &mut prover_transcript, 688 | &mut random_tape, 689 | &x, 690 | &r_x, 691 | &a, 692 | &y, 693 | &r_y, 694 | ); 695 | 696 | let mut verifier_transcript = Transcript::new(b"example"); 697 | assert!(proof 698 | .verify(&gens_1, &gens_1024, &mut verifier_transcript, &a, &Cx, &Cy) 699 | .is_ok()); 700 | } 701 | 702 | #[test] 703 | fn check_dotproductproof_log() { 704 | let mut csprng: OsRng = OsRng; 705 | 706 | let n = 1024; 707 | 708 | let gens = DotProductProofGens::new(n, b"test-1024"); 709 | 710 | let x: Vec = (0..n).map(|_i| Scalar::random(&mut csprng)).collect(); 711 | let a: Vec = (0..n).map(|_i| Scalar::random(&mut csprng)).collect(); 712 | let y = DotProductProof::compute_dotproduct(&x, &a); 713 | 714 | let r_x = Scalar::random(&mut csprng); 715 | let r_y = Scalar::random(&mut csprng); 716 | 717 | let mut random_tape = RandomTape::new(b"proof"); 718 | let mut prover_transcript = Transcript::new(b"example"); 719 | let (proof, Cx, Cy) = DotProductProofLog::prove( 720 | &gens, 721 | &mut prover_transcript, 722 | &mut random_tape, 723 | &x, 724 | &r_x, 725 | &a, 726 | &y, 727 | &r_y, 728 | ); 729 | 730 | let mut verifier_transcript = Transcript::new(b"example"); 731 | assert!(proof 732 | .verify(n, &gens, &mut verifier_transcript, &a, &Cx, &Cy) 733 | .is_ok()); 734 | } 735 | } 736 | -------------------------------------------------------------------------------- /src/product_tree.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | use super::dense_mlpoly::DensePolynomial; 3 | use super::dense_mlpoly::EqPolynomial; 4 | use super::math::Math; 5 | use super::scalar::Scalar; 6 | use super::sumcheck::SumcheckInstanceProof; 7 | use super::transcript::ProofTranscript; 8 | use merlin::Transcript; 9 | use serde::{Deserialize, Serialize}; 10 | 11 | #[derive(Debug)] 12 | pub struct ProductCircuit { 13 | left_vec: Vec, 14 | right_vec: Vec, 15 | } 16 | 17 | impl ProductCircuit { 18 | fn compute_layer( 19 | inp_left: &DensePolynomial, 20 | inp_right: &DensePolynomial, 21 | ) -> (DensePolynomial, DensePolynomial) { 22 | let len = inp_left.len() + inp_right.len(); 23 | let outp_left = (0..len / 4) 24 | .map(|i| inp_left[i] * inp_right[i]) 25 | .collect::>(); 26 | let outp_right = (len / 4..len / 2) 27 | .map(|i| inp_left[i] * inp_right[i]) 28 | .collect::>(); 29 | 30 | ( 31 | DensePolynomial::new(outp_left), 32 | DensePolynomial::new(outp_right), 33 | ) 34 | } 35 | 36 | pub fn new(poly: &DensePolynomial) -> Self { 37 | let mut left_vec: Vec = Vec::new(); 38 | let mut right_vec: Vec = Vec::new(); 39 | 40 | let num_layers = poly.len().log_2(); 41 | let (outp_left, outp_right) = poly.split(poly.len() / 2); 42 | 43 | left_vec.push(outp_left); 44 | right_vec.push(outp_right); 45 | 46 | for i in 0..num_layers - 1 { 47 | let (outp_left, outp_right) = ProductCircuit::compute_layer(&left_vec[i], &right_vec[i]); 48 | left_vec.push(outp_left); 49 | right_vec.push(outp_right); 50 | } 51 | 52 | ProductCircuit { 53 | left_vec, 54 | right_vec, 55 | } 56 | } 57 | 58 | pub fn evaluate(&self) -> Scalar { 59 | let len = self.left_vec.len(); 60 | assert_eq!(self.left_vec[len - 1].get_num_vars(), 0); 61 | assert_eq!(self.right_vec[len - 1].get_num_vars(), 0); 62 | self.left_vec[len - 1][0] * self.right_vec[len - 1][0] 63 | } 64 | } 65 | 66 | pub struct DotProductCircuit { 67 | left: DensePolynomial, 68 | right: DensePolynomial, 69 | weight: DensePolynomial, 70 | } 71 | 72 | impl DotProductCircuit { 73 | pub fn new(left: DensePolynomial, right: DensePolynomial, weight: DensePolynomial) -> Self { 74 | assert_eq!(left.len(), right.len()); 75 | assert_eq!(left.len(), weight.len()); 76 | DotProductCircuit { 77 | left, 78 | right, 79 | weight, 80 | } 81 | } 82 | 83 | pub fn evaluate(&self) -> Scalar { 84 | (0..self.left.len()) 85 | .map(|i| self.left[i] * self.right[i] * self.weight[i]) 86 | .sum() 87 | } 88 | 89 | pub fn split(&mut self) -> (DotProductCircuit, DotProductCircuit) { 90 | let idx = self.left.len() / 2; 91 | assert_eq!(idx * 2, self.left.len()); 92 | let (l1, l2) = self.left.split(idx); 93 | let (r1, r2) = self.right.split(idx); 94 | let (w1, w2) = self.weight.split(idx); 95 | ( 96 | DotProductCircuit { 97 | left: l1, 98 | right: r1, 99 | weight: w1, 100 | }, 101 | DotProductCircuit { 102 | left: l2, 103 | right: r2, 104 | weight: w2, 105 | }, 106 | ) 107 | } 108 | } 109 | 110 | #[allow(dead_code)] 111 | #[derive(Debug, Serialize, Deserialize)] 112 | pub struct LayerProof { 113 | pub proof: SumcheckInstanceProof, 114 | pub claims: Vec, 115 | } 116 | 117 | #[allow(dead_code)] 118 | impl LayerProof { 119 | pub fn verify( 120 | &self, 121 | claim: Scalar, 122 | num_rounds: usize, 123 | degree_bound: usize, 124 | transcript: &mut Transcript, 125 | ) -> (Scalar, Vec) { 126 | self 127 | .proof 128 | .verify(claim, num_rounds, degree_bound, transcript) 129 | .unwrap() 130 | } 131 | } 132 | 133 | #[allow(dead_code)] 134 | #[derive(Debug, Serialize, Deserialize)] 135 | pub struct LayerProofBatched { 136 | pub proof: SumcheckInstanceProof, 137 | pub claims_prod_left: Vec, 138 | pub claims_prod_right: Vec, 139 | } 140 | 141 | #[allow(dead_code)] 142 | impl LayerProofBatched { 143 | pub fn verify( 144 | &self, 145 | claim: Scalar, 146 | num_rounds: usize, 147 | degree_bound: usize, 148 | transcript: &mut Transcript, 149 | ) -> (Scalar, Vec) { 150 | self 151 | .proof 152 | .verify(claim, num_rounds, degree_bound, transcript) 153 | .unwrap() 154 | } 155 | } 156 | 157 | #[derive(Debug, Serialize, Deserialize)] 158 | pub struct ProductCircuitEvalProof { 159 | proof: Vec, 160 | } 161 | 162 | #[derive(Debug, Serialize, Deserialize)] 163 | pub struct ProductCircuitEvalProofBatched { 164 | proof: Vec, 165 | claims_dotp: (Vec, Vec, Vec), 166 | } 167 | 168 | impl ProductCircuitEvalProof { 169 | #![allow(dead_code)] 170 | pub fn prove( 171 | circuit: &mut ProductCircuit, 172 | transcript: &mut Transcript, 173 | ) -> (Self, Scalar, Vec) { 174 | let mut proof: Vec = Vec::new(); 175 | let num_layers = circuit.left_vec.len(); 176 | 177 | let mut claim = circuit.evaluate(); 178 | let mut rand = Vec::new(); 179 | for layer_id in (0..num_layers).rev() { 180 | let len = circuit.left_vec[layer_id].len() + circuit.right_vec[layer_id].len(); 181 | 182 | let mut poly_C = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals()); 183 | assert_eq!(poly_C.len(), len / 2); 184 | 185 | let num_rounds_prod = poly_C.len().log_2(); 186 | let comb_func_prod = |poly_A_comp: &Scalar, 187 | poly_B_comp: &Scalar, 188 | poly_C_comp: &Scalar| 189 | -> Scalar { poly_A_comp * poly_B_comp * poly_C_comp }; 190 | let (proof_prod, rand_prod, claims_prod) = SumcheckInstanceProof::prove_cubic( 191 | &claim, 192 | num_rounds_prod, 193 | &mut circuit.left_vec[layer_id], 194 | &mut circuit.right_vec[layer_id], 195 | &mut poly_C, 196 | comb_func_prod, 197 | transcript, 198 | ); 199 | 200 | transcript.append_scalar(b"claim_prod_left", &claims_prod[0]); 201 | transcript.append_scalar(b"claim_prod_right", &claims_prod[1]); 202 | 203 | // produce a random challenge 204 | let r_layer = transcript.challenge_scalar(b"challenge_r_layer"); 205 | claim = claims_prod[0] + r_layer * (claims_prod[1] - claims_prod[0]); 206 | 207 | let mut ext = vec![r_layer]; 208 | ext.extend(rand_prod); 209 | rand = ext; 210 | 211 | proof.push(LayerProof { 212 | proof: proof_prod, 213 | claims: claims_prod[0..claims_prod.len() - 1].to_vec(), 214 | }); 215 | } 216 | 217 | (ProductCircuitEvalProof { proof }, claim, rand) 218 | } 219 | 220 | pub fn verify( 221 | &self, 222 | eval: Scalar, 223 | len: usize, 224 | transcript: &mut Transcript, 225 | ) -> (Scalar, Vec) { 226 | let num_layers = len.log_2(); 227 | let mut claim = eval; 228 | let mut rand: Vec = Vec::new(); 229 | //let mut num_rounds = 0; 230 | assert_eq!(self.proof.len(), num_layers); 231 | for (num_rounds, i) in (0..num_layers).enumerate() { 232 | let (claim_last, rand_prod) = self.proof[i].verify(claim, num_rounds, 3, transcript); 233 | 234 | let claims_prod = &self.proof[i].claims; 235 | transcript.append_scalar(b"claim_prod_left", &claims_prod[0]); 236 | transcript.append_scalar(b"claim_prod_right", &claims_prod[1]); 237 | 238 | assert_eq!(rand.len(), rand_prod.len()); 239 | let eq: Scalar = (0..rand.len()) 240 | .map(|i| { 241 | rand[i] * rand_prod[i] + (Scalar::one() - rand[i]) * (Scalar::one() - rand_prod[i]) 242 | }) 243 | .product(); 244 | assert_eq!(claims_prod[0] * claims_prod[1] * eq, claim_last); 245 | 246 | // produce a random challenge 247 | let r_layer = transcript.challenge_scalar(b"challenge_r_layer"); 248 | claim = (Scalar::one() - r_layer) * claims_prod[0] + r_layer * claims_prod[1]; 249 | let mut ext = vec![r_layer]; 250 | ext.extend(rand_prod); 251 | rand = ext; 252 | } 253 | 254 | (claim, rand) 255 | } 256 | } 257 | 258 | impl ProductCircuitEvalProofBatched { 259 | pub fn prove( 260 | prod_circuit_vec: &mut [&mut ProductCircuit], 261 | dotp_circuit_vec: &mut [&mut DotProductCircuit], 262 | transcript: &mut Transcript, 263 | ) -> (Self, Vec) { 264 | assert!(!prod_circuit_vec.is_empty()); 265 | 266 | let mut claims_dotp_final = (Vec::new(), Vec::new(), Vec::new()); 267 | 268 | let mut proof_layers: Vec = Vec::new(); 269 | let num_layers = prod_circuit_vec[0].left_vec.len(); 270 | let mut claims_to_verify = (0..prod_circuit_vec.len()) 271 | .map(|i| prod_circuit_vec[i].evaluate()) 272 | .collect::>(); 273 | let mut rand = Vec::new(); 274 | for layer_id in (0..num_layers).rev() { 275 | // prepare parallel instance that share poly_C first 276 | let len = prod_circuit_vec[0].left_vec[layer_id].len() 277 | + prod_circuit_vec[0].right_vec[layer_id].len(); 278 | 279 | let mut poly_C_par = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals()); 280 | assert_eq!(poly_C_par.len(), len / 2); 281 | 282 | let num_rounds_prod = poly_C_par.len().log_2(); 283 | let comb_func_prod = |poly_A_comp: &Scalar, 284 | poly_B_comp: &Scalar, 285 | poly_C_comp: &Scalar| 286 | -> Scalar { poly_A_comp * poly_B_comp * poly_C_comp }; 287 | 288 | let mut poly_A_batched_par: Vec<&mut DensePolynomial> = Vec::new(); 289 | let mut poly_B_batched_par: Vec<&mut DensePolynomial> = Vec::new(); 290 | for prod_circuit in prod_circuit_vec.iter_mut() { 291 | poly_A_batched_par.push(&mut prod_circuit.left_vec[layer_id]); 292 | poly_B_batched_par.push(&mut prod_circuit.right_vec[layer_id]) 293 | } 294 | let poly_vec_par = ( 295 | &mut poly_A_batched_par, 296 | &mut poly_B_batched_par, 297 | &mut poly_C_par, 298 | ); 299 | 300 | // prepare sequential instances that don't share poly_C 301 | let mut poly_A_batched_seq: Vec<&mut DensePolynomial> = Vec::new(); 302 | let mut poly_B_batched_seq: Vec<&mut DensePolynomial> = Vec::new(); 303 | let mut poly_C_batched_seq: Vec<&mut DensePolynomial> = Vec::new(); 304 | if layer_id == 0 && !dotp_circuit_vec.is_empty() { 305 | // add additional claims 306 | for item in dotp_circuit_vec.iter() { 307 | claims_to_verify.push(item.evaluate()); 308 | assert_eq!(len / 2, item.left.len()); 309 | assert_eq!(len / 2, item.right.len()); 310 | assert_eq!(len / 2, item.weight.len()); 311 | } 312 | 313 | for dotp_circuit in dotp_circuit_vec.iter_mut() { 314 | poly_A_batched_seq.push(&mut dotp_circuit.left); 315 | poly_B_batched_seq.push(&mut dotp_circuit.right); 316 | poly_C_batched_seq.push(&mut dotp_circuit.weight); 317 | } 318 | } 319 | let poly_vec_seq = ( 320 | &mut poly_A_batched_seq, 321 | &mut poly_B_batched_seq, 322 | &mut poly_C_batched_seq, 323 | ); 324 | 325 | // produce a fresh set of coeffs and a joint claim 326 | let coeff_vec = 327 | transcript.challenge_vector(b"rand_coeffs_next_layer", claims_to_verify.len()); 328 | let claim = (0..claims_to_verify.len()) 329 | .map(|i| claims_to_verify[i] * coeff_vec[i]) 330 | .sum(); 331 | 332 | let (proof, rand_prod, claims_prod, claims_dotp) = SumcheckInstanceProof::prove_cubic_batched( 333 | &claim, 334 | num_rounds_prod, 335 | poly_vec_par, 336 | poly_vec_seq, 337 | &coeff_vec, 338 | comb_func_prod, 339 | transcript, 340 | ); 341 | 342 | let (claims_prod_left, claims_prod_right, _claims_eq) = claims_prod; 343 | for i in 0..prod_circuit_vec.len() { 344 | transcript.append_scalar(b"claim_prod_left", &claims_prod_left[i]); 345 | transcript.append_scalar(b"claim_prod_right", &claims_prod_right[i]); 346 | } 347 | 348 | if layer_id == 0 && !dotp_circuit_vec.is_empty() { 349 | let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = claims_dotp; 350 | for i in 0..dotp_circuit_vec.len() { 351 | transcript.append_scalar(b"claim_dotp_left", &claims_dotp_left[i]); 352 | transcript.append_scalar(b"claim_dotp_right", &claims_dotp_right[i]); 353 | transcript.append_scalar(b"claim_dotp_weight", &claims_dotp_weight[i]); 354 | } 355 | claims_dotp_final = (claims_dotp_left, claims_dotp_right, claims_dotp_weight); 356 | } 357 | 358 | // produce a random challenge to condense two claims into a single claim 359 | let r_layer = transcript.challenge_scalar(b"challenge_r_layer"); 360 | 361 | claims_to_verify = (0..prod_circuit_vec.len()) 362 | .map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i])) 363 | .collect::>(); 364 | 365 | let mut ext = vec![r_layer]; 366 | ext.extend(rand_prod); 367 | rand = ext; 368 | 369 | proof_layers.push(LayerProofBatched { 370 | proof, 371 | claims_prod_left, 372 | claims_prod_right, 373 | }); 374 | } 375 | 376 | ( 377 | ProductCircuitEvalProofBatched { 378 | proof: proof_layers, 379 | claims_dotp: claims_dotp_final, 380 | }, 381 | rand, 382 | ) 383 | } 384 | 385 | pub fn verify( 386 | &self, 387 | claims_prod_vec: &[Scalar], 388 | claims_dotp_vec: &[Scalar], 389 | len: usize, 390 | transcript: &mut Transcript, 391 | ) -> (Vec, Vec, Vec) { 392 | let num_layers = len.log_2(); 393 | let mut rand: Vec = Vec::new(); 394 | //let mut num_rounds = 0; 395 | assert_eq!(self.proof.len(), num_layers); 396 | 397 | let mut claims_to_verify = claims_prod_vec.to_owned(); 398 | let mut claims_to_verify_dotp: Vec = Vec::new(); 399 | for (num_rounds, i) in (0..num_layers).enumerate() { 400 | if i == num_layers - 1 { 401 | claims_to_verify.extend(claims_dotp_vec); 402 | } 403 | 404 | // produce random coefficients, one for each instance 405 | let coeff_vec = 406 | transcript.challenge_vector(b"rand_coeffs_next_layer", claims_to_verify.len()); 407 | 408 | // produce a joint claim 409 | let claim = (0..claims_to_verify.len()) 410 | .map(|i| claims_to_verify[i] * coeff_vec[i]) 411 | .sum(); 412 | 413 | let (claim_last, rand_prod) = self.proof[i].verify(claim, num_rounds, 3, transcript); 414 | 415 | let claims_prod_left = &self.proof[i].claims_prod_left; 416 | let claims_prod_right = &self.proof[i].claims_prod_right; 417 | assert_eq!(claims_prod_left.len(), claims_prod_vec.len()); 418 | assert_eq!(claims_prod_right.len(), claims_prod_vec.len()); 419 | 420 | for i in 0..claims_prod_vec.len() { 421 | transcript.append_scalar(b"claim_prod_left", &claims_prod_left[i]); 422 | transcript.append_scalar(b"claim_prod_right", &claims_prod_right[i]); 423 | } 424 | 425 | assert_eq!(rand.len(), rand_prod.len()); 426 | let eq: Scalar = (0..rand.len()) 427 | .map(|i| { 428 | rand[i] * rand_prod[i] + (Scalar::one() - rand[i]) * (Scalar::one() - rand_prod[i]) 429 | }) 430 | .product(); 431 | let mut claim_expected: Scalar = (0..claims_prod_vec.len()) 432 | .map(|i| coeff_vec[i] * (claims_prod_left[i] * claims_prod_right[i] * eq)) 433 | .sum(); 434 | 435 | // add claims from the dotp instances 436 | if i == num_layers - 1 { 437 | let num_prod_instances = claims_prod_vec.len(); 438 | let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = &self.claims_dotp; 439 | for i in 0..claims_dotp_left.len() { 440 | transcript.append_scalar(b"claim_dotp_left", &claims_dotp_left[i]); 441 | transcript.append_scalar(b"claim_dotp_right", &claims_dotp_right[i]); 442 | transcript.append_scalar(b"claim_dotp_weight", &claims_dotp_weight[i]); 443 | 444 | claim_expected += coeff_vec[i + num_prod_instances] 445 | * claims_dotp_left[i] 446 | * claims_dotp_right[i] 447 | * claims_dotp_weight[i]; 448 | } 449 | } 450 | 451 | assert_eq!(claim_expected, claim_last); 452 | 453 | // produce a random challenge 454 | let r_layer = transcript.challenge_scalar(b"challenge_r_layer"); 455 | 456 | claims_to_verify = (0..claims_prod_left.len()) 457 | .map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i])) 458 | .collect::>(); 459 | 460 | // add claims to verify for dotp circuit 461 | if i == num_layers - 1 { 462 | let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = &self.claims_dotp; 463 | 464 | for i in 0..claims_dotp_vec.len() / 2 { 465 | // combine left claims 466 | let claim_left = claims_dotp_left[2 * i] 467 | + r_layer * (claims_dotp_left[2 * i + 1] - claims_dotp_left[2 * i]); 468 | 469 | let claim_right = claims_dotp_right[2 * i] 470 | + r_layer * (claims_dotp_right[2 * i + 1] - claims_dotp_right[2 * i]); 471 | 472 | let claim_weight = claims_dotp_weight[2 * i] 473 | + r_layer * (claims_dotp_weight[2 * i + 1] - claims_dotp_weight[2 * i]); 474 | claims_to_verify_dotp.push(claim_left); 475 | claims_to_verify_dotp.push(claim_right); 476 | claims_to_verify_dotp.push(claim_weight); 477 | } 478 | } 479 | 480 | let mut ext = vec![r_layer]; 481 | ext.extend(rand_prod); 482 | rand = ext; 483 | } 484 | (claims_to_verify, claims_to_verify_dotp, rand) 485 | } 486 | } 487 | -------------------------------------------------------------------------------- /src/r1cs.rs: -------------------------------------------------------------------------------- 1 | use crate::transcript::AppendToTranscript; 2 | 3 | use super::dense_mlpoly::DensePolynomial; 4 | use super::errors::ProofVerifyError; 5 | use super::math::Math; 6 | use super::random::RandomTape; 7 | use super::scalar::Scalar; 8 | use super::sparse_mlpoly::{ 9 | MultiSparseMatPolynomialAsDense, SparseMatEntry, SparseMatPolyCommitment, 10 | SparseMatPolyCommitmentGens, SparseMatPolyEvalProof, SparseMatPolynomial, 11 | }; 12 | use super::timer::Timer; 13 | use flate2::{write::ZlibEncoder, Compression}; 14 | use merlin::Transcript; 15 | use rand::rngs::OsRng; 16 | use serde::{Deserialize, Serialize}; 17 | 18 | #[derive(Debug, Serialize, Deserialize)] 19 | pub struct R1CSShape { 20 | num_cons: usize, 21 | num_vars: usize, 22 | num_inputs: usize, 23 | A: SparseMatPolynomial, 24 | B: SparseMatPolynomial, 25 | C: SparseMatPolynomial, 26 | } 27 | 28 | #[derive(Serialize, Deserialize)] 29 | pub struct R1CSCommitmentGens { 30 | gens: SparseMatPolyCommitmentGens, 31 | } 32 | 33 | impl R1CSCommitmentGens { 34 | pub fn new( 35 | label: &'static [u8], 36 | num_cons: usize, 37 | num_vars: usize, 38 | num_inputs: usize, 39 | num_nz_entries: usize, 40 | ) -> R1CSCommitmentGens { 41 | assert!(num_inputs < num_vars); 42 | let num_poly_vars_x = num_cons.log_2(); 43 | let num_poly_vars_y = (2 * num_vars).log_2(); 44 | let gens = 45 | SparseMatPolyCommitmentGens::new(label, num_poly_vars_x, num_poly_vars_y, num_nz_entries, 3); 46 | R1CSCommitmentGens { gens } 47 | } 48 | } 49 | 50 | #[derive(Debug, Serialize, Deserialize)] 51 | pub struct R1CSCommitment { 52 | num_cons: usize, 53 | num_vars: usize, 54 | num_inputs: usize, 55 | comm: SparseMatPolyCommitment, 56 | } 57 | 58 | impl AppendToTranscript for R1CSCommitment { 59 | fn append_to_transcript(&self, _label: &'static [u8], transcript: &mut Transcript) { 60 | transcript.append_u64(b"num_cons", self.num_cons as u64); 61 | transcript.append_u64(b"num_vars", self.num_vars as u64); 62 | transcript.append_u64(b"num_inputs", self.num_inputs as u64); 63 | self.comm.append_to_transcript(b"comm", transcript); 64 | } 65 | } 66 | 67 | #[derive(Serialize, Deserialize)] 68 | pub struct R1CSDecommitment { 69 | dense: MultiSparseMatPolynomialAsDense, 70 | } 71 | 72 | impl R1CSCommitment { 73 | pub fn get_num_cons(&self) -> usize { 74 | self.num_cons 75 | } 76 | 77 | pub fn get_num_vars(&self) -> usize { 78 | self.num_vars 79 | } 80 | 81 | pub fn get_num_inputs(&self) -> usize { 82 | self.num_inputs 83 | } 84 | } 85 | 86 | impl R1CSShape { 87 | pub fn new( 88 | num_cons: usize, 89 | num_vars: usize, 90 | num_inputs: usize, 91 | A: &[(usize, usize, Scalar)], 92 | B: &[(usize, usize, Scalar)], 93 | C: &[(usize, usize, Scalar)], 94 | ) -> R1CSShape { 95 | Timer::print(&format!("number_of_constraints {num_cons}")); 96 | Timer::print(&format!("number_of_variables {num_vars}")); 97 | Timer::print(&format!("number_of_inputs {num_inputs}")); 98 | Timer::print(&format!("number_non-zero_entries_A {}", A.len())); 99 | Timer::print(&format!("number_non-zero_entries_B {}", B.len())); 100 | Timer::print(&format!("number_non-zero_entries_C {}", C.len())); 101 | 102 | // check that num_cons is a power of 2 103 | assert_eq!(num_cons.next_power_of_two(), num_cons); 104 | 105 | // check that num_vars is a power of 2 106 | assert_eq!(num_vars.next_power_of_two(), num_vars); 107 | 108 | // check that number_inputs + 1 <= num_vars 109 | assert!(num_inputs < num_vars); 110 | 111 | // no errors, so create polynomials 112 | let num_poly_vars_x = num_cons.log_2(); 113 | let num_poly_vars_y = (2 * num_vars).log_2(); 114 | 115 | let mat_A = A 116 | .iter() 117 | .map(|(row, col, val)| SparseMatEntry::new(*row, *col, *val)) 118 | .collect::>(); 119 | let mat_B = B 120 | .iter() 121 | .map(|(row, col, val)| SparseMatEntry::new(*row, *col, *val)) 122 | .collect::>(); 123 | let mat_C = C 124 | .iter() 125 | .map(|(row, col, val)| SparseMatEntry::new(*row, *col, *val)) 126 | .collect::>(); 127 | 128 | let poly_A = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_A); 129 | let poly_B = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_B); 130 | let poly_C = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_C); 131 | 132 | Self { 133 | num_cons, 134 | num_vars, 135 | num_inputs, 136 | A: poly_A, 137 | B: poly_B, 138 | C: poly_C, 139 | } 140 | } 141 | 142 | pub fn get_num_vars(&self) -> usize { 143 | self.num_vars 144 | } 145 | 146 | pub fn get_num_cons(&self) -> usize { 147 | self.num_cons 148 | } 149 | 150 | pub fn get_num_inputs(&self) -> usize { 151 | self.num_inputs 152 | } 153 | 154 | pub fn get_digest(&self) -> Vec { 155 | let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default()); 156 | bincode::serialize_into(&mut encoder, &self).unwrap(); 157 | encoder.finish().unwrap() 158 | } 159 | 160 | pub fn produce_synthetic_r1cs( 161 | num_cons: usize, 162 | num_vars: usize, 163 | num_inputs: usize, 164 | ) -> (R1CSShape, Vec, Vec) { 165 | Timer::print(&format!("number_of_constraints {num_cons}")); 166 | Timer::print(&format!("number_of_variables {num_vars}")); 167 | Timer::print(&format!("number_of_inputs {num_inputs}")); 168 | 169 | let mut csprng: OsRng = OsRng; 170 | 171 | // assert num_cons and num_vars are power of 2 172 | assert_eq!((num_cons.log_2()).pow2(), num_cons); 173 | assert_eq!((num_vars.log_2()).pow2(), num_vars); 174 | 175 | // num_inputs + 1 <= num_vars 176 | assert!(num_inputs < num_vars); 177 | 178 | // z is organized as [vars,1,io] 179 | let size_z = num_vars + num_inputs + 1; 180 | 181 | // produce a random satisfying assignment 182 | let Z = { 183 | let mut Z: Vec = (0..size_z) 184 | .map(|_i| Scalar::random(&mut csprng)) 185 | .collect::>(); 186 | Z[num_vars] = Scalar::one(); // set the constant term to 1 187 | Z 188 | }; 189 | 190 | // three sparse matrices 191 | let mut A: Vec = Vec::new(); 192 | let mut B: Vec = Vec::new(); 193 | let mut C: Vec = Vec::new(); 194 | let one = Scalar::one(); 195 | for i in 0..num_cons { 196 | let A_idx = i % size_z; 197 | let B_idx = (i + 2) % size_z; 198 | A.push(SparseMatEntry::new(i, A_idx, one)); 199 | B.push(SparseMatEntry::new(i, B_idx, one)); 200 | let AB_val = Z[A_idx] * Z[B_idx]; 201 | 202 | let C_idx = (i + 3) % size_z; 203 | let C_val = Z[C_idx]; 204 | 205 | if C_val == Scalar::zero() { 206 | C.push(SparseMatEntry::new(i, num_vars, AB_val)); 207 | } else { 208 | C.push(SparseMatEntry::new( 209 | i, 210 | C_idx, 211 | AB_val * C_val.invert().unwrap(), 212 | )); 213 | } 214 | } 215 | 216 | Timer::print(&format!("number_non-zero_entries_A {}", A.len())); 217 | Timer::print(&format!("number_non-zero_entries_B {}", B.len())); 218 | Timer::print(&format!("number_non-zero_entries_C {}", C.len())); 219 | 220 | let num_poly_vars_x = num_cons.log_2(); 221 | let num_poly_vars_y = (2 * num_vars).log_2(); 222 | let poly_A = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, A); 223 | let poly_B = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, B); 224 | let poly_C = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, C); 225 | 226 | let inst = R1CSShape { 227 | num_cons, 228 | num_vars, 229 | num_inputs, 230 | A: poly_A, 231 | B: poly_B, 232 | C: poly_C, 233 | }; 234 | 235 | assert!(inst.is_sat(&Z[..num_vars], &Z[num_vars + 1..])); 236 | 237 | (inst, Z[..num_vars].to_vec(), Z[num_vars + 1..].to_vec()) 238 | } 239 | 240 | pub fn is_sat(&self, vars: &[Scalar], input: &[Scalar]) -> bool { 241 | assert_eq!(vars.len(), self.num_vars); 242 | assert_eq!(input.len(), self.num_inputs); 243 | 244 | let z = { 245 | let mut z = vars.to_vec(); 246 | z.extend(&vec![Scalar::one()]); 247 | z.extend(input); 248 | z 249 | }; 250 | 251 | // verify if Az * Bz - Cz = [0...] 252 | let Az = self 253 | .A 254 | .multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z); 255 | let Bz = self 256 | .B 257 | .multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z); 258 | let Cz = self 259 | .C 260 | .multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z); 261 | 262 | assert_eq!(Az.len(), self.num_cons); 263 | assert_eq!(Bz.len(), self.num_cons); 264 | assert_eq!(Cz.len(), self.num_cons); 265 | (0..self.num_cons).all(|i| Az[i] * Bz[i] == Cz[i]) 266 | } 267 | 268 | pub fn multiply_vec( 269 | &self, 270 | num_rows: usize, 271 | num_cols: usize, 272 | z: &[Scalar], 273 | ) -> (DensePolynomial, DensePolynomial, DensePolynomial) { 274 | assert_eq!(num_rows, self.num_cons); 275 | assert_eq!(z.len(), num_cols); 276 | assert!(num_cols > self.num_vars); 277 | ( 278 | DensePolynomial::new(self.A.multiply_vec(num_rows, num_cols, z)), 279 | DensePolynomial::new(self.B.multiply_vec(num_rows, num_cols, z)), 280 | DensePolynomial::new(self.C.multiply_vec(num_rows, num_cols, z)), 281 | ) 282 | } 283 | 284 | pub fn compute_eval_table_sparse( 285 | &self, 286 | num_rows: usize, 287 | num_cols: usize, 288 | evals: &[Scalar], 289 | ) -> (Vec, Vec, Vec) { 290 | assert_eq!(num_rows, self.num_cons); 291 | assert!(num_cols > self.num_vars); 292 | 293 | let evals_A = self.A.compute_eval_table_sparse(evals, num_rows, num_cols); 294 | let evals_B = self.B.compute_eval_table_sparse(evals, num_rows, num_cols); 295 | let evals_C = self.C.compute_eval_table_sparse(evals, num_rows, num_cols); 296 | 297 | (evals_A, evals_B, evals_C) 298 | } 299 | 300 | pub fn evaluate(&self, rx: &[Scalar], ry: &[Scalar]) -> (Scalar, Scalar, Scalar) { 301 | let evals = SparseMatPolynomial::multi_evaluate(&[&self.A, &self.B, &self.C], rx, ry); 302 | (evals[0], evals[1], evals[2]) 303 | } 304 | 305 | pub fn commit(&self, gens: &R1CSCommitmentGens) -> (R1CSCommitment, R1CSDecommitment) { 306 | let (comm, dense) = SparseMatPolynomial::multi_commit(&[&self.A, &self.B, &self.C], &gens.gens); 307 | let r1cs_comm = R1CSCommitment { 308 | num_cons: self.num_cons, 309 | num_vars: self.num_vars, 310 | num_inputs: self.num_inputs, 311 | comm, 312 | }; 313 | 314 | let r1cs_decomm = R1CSDecommitment { dense }; 315 | 316 | (r1cs_comm, r1cs_decomm) 317 | } 318 | } 319 | 320 | #[derive(Debug, Serialize, Deserialize)] 321 | pub struct R1CSEvalProof { 322 | proof: SparseMatPolyEvalProof, 323 | } 324 | 325 | impl R1CSEvalProof { 326 | pub fn prove( 327 | decomm: &R1CSDecommitment, 328 | rx: &[Scalar], // point at which the polynomial is evaluated 329 | ry: &[Scalar], 330 | evals: &(Scalar, Scalar, Scalar), 331 | gens: &R1CSCommitmentGens, 332 | transcript: &mut Transcript, 333 | random_tape: &mut RandomTape, 334 | ) -> R1CSEvalProof { 335 | let timer = Timer::new("R1CSEvalProof::prove"); 336 | let proof = SparseMatPolyEvalProof::prove( 337 | &decomm.dense, 338 | rx, 339 | ry, 340 | &[evals.0, evals.1, evals.2], 341 | &gens.gens, 342 | transcript, 343 | random_tape, 344 | ); 345 | timer.stop(); 346 | 347 | R1CSEvalProof { proof } 348 | } 349 | 350 | pub fn verify( 351 | &self, 352 | comm: &R1CSCommitment, 353 | rx: &[Scalar], // point at which the R1CS matrix polynomials are evaluated 354 | ry: &[Scalar], 355 | evals: &(Scalar, Scalar, Scalar), 356 | gens: &R1CSCommitmentGens, 357 | transcript: &mut Transcript, 358 | ) -> Result<(), ProofVerifyError> { 359 | self.proof.verify( 360 | &comm.comm, 361 | rx, 362 | ry, 363 | &[evals.0, evals.1, evals.2], 364 | &gens.gens, 365 | transcript, 366 | ) 367 | } 368 | } 369 | -------------------------------------------------------------------------------- /src/r1csproof.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::too_many_arguments)] 2 | use super::commitments::{Commitments, MultiCommitGens}; 3 | use super::dense_mlpoly::{ 4 | DensePolynomial, EqPolynomial, PolyCommitment, PolyCommitmentGens, PolyEvalProof, 5 | }; 6 | use super::errors::ProofVerifyError; 7 | use super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul}; 8 | use super::math::Math; 9 | use super::nizk::{EqualityProof, KnowledgeProof, ProductProof}; 10 | use super::r1cs::R1CSShape; 11 | use super::random::RandomTape; 12 | use super::scalar::Scalar; 13 | use super::sparse_mlpoly::{SparsePolyEntry, SparsePolynomial}; 14 | use super::sumcheck::ZKSumcheckInstanceProof; 15 | use super::timer::Timer; 16 | use super::transcript::{AppendToTranscript, ProofTranscript}; 17 | use core::iter; 18 | use merlin::Transcript; 19 | use serde::{Deserialize, Serialize}; 20 | 21 | #[derive(Serialize, Deserialize, Debug)] 22 | pub struct R1CSProof { 23 | comm_vars: PolyCommitment, 24 | sc_proof_phase1: ZKSumcheckInstanceProof, 25 | claims_phase2: ( 26 | CompressedGroup, 27 | CompressedGroup, 28 | CompressedGroup, 29 | CompressedGroup, 30 | ), 31 | pok_claims_phase2: (KnowledgeProof, ProductProof), 32 | proof_eq_sc_phase1: EqualityProof, 33 | sc_proof_phase2: ZKSumcheckInstanceProof, 34 | comm_vars_at_ry: CompressedGroup, 35 | proof_eval_vars_at_ry: PolyEvalProof, 36 | proof_eq_sc_phase2: EqualityProof, 37 | } 38 | 39 | #[derive(Serialize, Deserialize)] 40 | pub struct R1CSSumcheckGens { 41 | gens_1: MultiCommitGens, 42 | gens_3: MultiCommitGens, 43 | gens_4: MultiCommitGens, 44 | } 45 | 46 | // TODO: fix passing gens_1_ref 47 | impl R1CSSumcheckGens { 48 | pub fn new(label: &'static [u8], gens_1_ref: &MultiCommitGens) -> Self { 49 | let gens_1 = gens_1_ref.clone(); 50 | let gens_3 = MultiCommitGens::new(3, label); 51 | let gens_4 = MultiCommitGens::new(4, label); 52 | 53 | R1CSSumcheckGens { 54 | gens_1, 55 | gens_3, 56 | gens_4, 57 | } 58 | } 59 | } 60 | 61 | #[derive(Serialize, Deserialize)] 62 | pub struct R1CSGens { 63 | gens_sc: R1CSSumcheckGens, 64 | gens_pc: PolyCommitmentGens, 65 | } 66 | 67 | impl R1CSGens { 68 | pub fn new(label: &'static [u8], _num_cons: usize, num_vars: usize) -> Self { 69 | let num_poly_vars = num_vars.log_2(); 70 | let gens_pc = PolyCommitmentGens::new(num_poly_vars, label); 71 | let gens_sc = R1CSSumcheckGens::new(label, &gens_pc.gens.gens_1); 72 | R1CSGens { gens_sc, gens_pc } 73 | } 74 | } 75 | 76 | impl R1CSProof { 77 | fn prove_phase_one( 78 | num_rounds: usize, 79 | evals_tau: &mut DensePolynomial, 80 | evals_Az: &mut DensePolynomial, 81 | evals_Bz: &mut DensePolynomial, 82 | evals_Cz: &mut DensePolynomial, 83 | gens: &R1CSSumcheckGens, 84 | transcript: &mut Transcript, 85 | random_tape: &mut RandomTape, 86 | ) -> (ZKSumcheckInstanceProof, Vec, Vec, Scalar) { 87 | let comb_func = |poly_A_comp: &Scalar, 88 | poly_B_comp: &Scalar, 89 | poly_C_comp: &Scalar, 90 | poly_D_comp: &Scalar| 91 | -> Scalar { poly_A_comp * (poly_B_comp * poly_C_comp - poly_D_comp) }; 92 | 93 | let (sc_proof_phase_one, r, claims, blind_claim_postsc) = 94 | ZKSumcheckInstanceProof::prove_cubic_with_additive_term( 95 | &Scalar::zero(), // claim is zero 96 | &Scalar::zero(), // blind for claim is also zero 97 | num_rounds, 98 | evals_tau, 99 | evals_Az, 100 | evals_Bz, 101 | evals_Cz, 102 | comb_func, 103 | &gens.gens_1, 104 | &gens.gens_4, 105 | transcript, 106 | random_tape, 107 | ); 108 | 109 | (sc_proof_phase_one, r, claims, blind_claim_postsc) 110 | } 111 | 112 | fn prove_phase_two( 113 | num_rounds: usize, 114 | claim: &Scalar, 115 | blind_claim: &Scalar, 116 | evals_z: &mut DensePolynomial, 117 | evals_ABC: &mut DensePolynomial, 118 | gens: &R1CSSumcheckGens, 119 | transcript: &mut Transcript, 120 | random_tape: &mut RandomTape, 121 | ) -> (ZKSumcheckInstanceProof, Vec, Vec, Scalar) { 122 | let comb_func = 123 | |poly_A_comp: &Scalar, poly_B_comp: &Scalar| -> Scalar { poly_A_comp * poly_B_comp }; 124 | let (sc_proof_phase_two, r, claims, blind_claim_postsc) = ZKSumcheckInstanceProof::prove_quad( 125 | claim, 126 | blind_claim, 127 | num_rounds, 128 | evals_z, 129 | evals_ABC, 130 | comb_func, 131 | &gens.gens_1, 132 | &gens.gens_3, 133 | transcript, 134 | random_tape, 135 | ); 136 | 137 | (sc_proof_phase_two, r, claims, blind_claim_postsc) 138 | } 139 | 140 | fn protocol_name() -> &'static [u8] { 141 | b"R1CS proof" 142 | } 143 | 144 | pub fn prove( 145 | inst: &R1CSShape, 146 | vars: Vec, 147 | input: &[Scalar], 148 | gens: &R1CSGens, 149 | transcript: &mut Transcript, 150 | random_tape: &mut RandomTape, 151 | ) -> (R1CSProof, Vec, Vec) { 152 | let timer_prove = Timer::new("R1CSProof::prove"); 153 | transcript.append_protocol_name(R1CSProof::protocol_name()); 154 | 155 | // we currently require the number of |inputs| + 1 to be at most number of vars 156 | assert!(input.len() < vars.len()); 157 | 158 | input.append_to_transcript(b"input", transcript); 159 | 160 | let timer_commit = Timer::new("polycommit"); 161 | let (poly_vars, comm_vars, blinds_vars) = { 162 | // create a multilinear polynomial using the supplied assignment for variables 163 | let poly_vars = DensePolynomial::new(vars.clone()); 164 | 165 | // produce a commitment to the satisfying assignment 166 | let (comm_vars, blinds_vars) = poly_vars.commit(&gens.gens_pc, Some(random_tape)); 167 | 168 | // add the commitment to the prover's transcript 169 | comm_vars.append_to_transcript(b"poly_commitment", transcript); 170 | (poly_vars, comm_vars, blinds_vars) 171 | }; 172 | timer_commit.stop(); 173 | 174 | let timer_sc_proof_phase1 = Timer::new("prove_sc_phase_one"); 175 | 176 | // append input to variables to create a single vector z 177 | let z = { 178 | let num_inputs = input.len(); 179 | let num_vars = vars.len(); 180 | let mut z = vars; 181 | z.extend(&vec![Scalar::one()]); // add constant term in z 182 | z.extend(input); 183 | z.extend(&vec![Scalar::zero(); num_vars - num_inputs - 1]); // we will pad with zeros 184 | z 185 | }; 186 | 187 | // derive the verifier's challenge tau 188 | let (num_rounds_x, num_rounds_y) = (inst.get_num_cons().log_2(), z.len().log_2()); 189 | let tau = transcript.challenge_vector(b"challenge_tau", num_rounds_x); 190 | // compute the initial evaluation table for R(\tau, x) 191 | let mut poly_tau = DensePolynomial::new(EqPolynomial::new(tau).evals()); 192 | let (mut poly_Az, mut poly_Bz, mut poly_Cz) = 193 | inst.multiply_vec(inst.get_num_cons(), z.len(), &z); 194 | 195 | let (sc_proof_phase1, rx, _claims_phase1, blind_claim_postsc1) = R1CSProof::prove_phase_one( 196 | num_rounds_x, 197 | &mut poly_tau, 198 | &mut poly_Az, 199 | &mut poly_Bz, 200 | &mut poly_Cz, 201 | &gens.gens_sc, 202 | transcript, 203 | random_tape, 204 | ); 205 | assert_eq!(poly_tau.len(), 1); 206 | assert_eq!(poly_Az.len(), 1); 207 | assert_eq!(poly_Bz.len(), 1); 208 | assert_eq!(poly_Cz.len(), 1); 209 | timer_sc_proof_phase1.stop(); 210 | 211 | let (tau_claim, Az_claim, Bz_claim, Cz_claim) = 212 | (&poly_tau[0], &poly_Az[0], &poly_Bz[0], &poly_Cz[0]); 213 | let (Az_blind, Bz_blind, Cz_blind, prod_Az_Bz_blind) = ( 214 | random_tape.random_scalar(b"Az_blind"), 215 | random_tape.random_scalar(b"Bz_blind"), 216 | random_tape.random_scalar(b"Cz_blind"), 217 | random_tape.random_scalar(b"prod_Az_Bz_blind"), 218 | ); 219 | 220 | let (pok_Cz_claim, comm_Cz_claim) = { 221 | KnowledgeProof::prove( 222 | &gens.gens_sc.gens_1, 223 | transcript, 224 | random_tape, 225 | Cz_claim, 226 | &Cz_blind, 227 | ) 228 | }; 229 | 230 | let (proof_prod, comm_Az_claim, comm_Bz_claim, comm_prod_Az_Bz_claims) = { 231 | let prod = Az_claim * Bz_claim; 232 | ProductProof::prove( 233 | &gens.gens_sc.gens_1, 234 | transcript, 235 | random_tape, 236 | Az_claim, 237 | &Az_blind, 238 | Bz_claim, 239 | &Bz_blind, 240 | &prod, 241 | &prod_Az_Bz_blind, 242 | ) 243 | }; 244 | 245 | comm_Az_claim.append_to_transcript(b"comm_Az_claim", transcript); 246 | comm_Bz_claim.append_to_transcript(b"comm_Bz_claim", transcript); 247 | comm_Cz_claim.append_to_transcript(b"comm_Cz_claim", transcript); 248 | comm_prod_Az_Bz_claims.append_to_transcript(b"comm_prod_Az_Bz_claims", transcript); 249 | 250 | // prove the final step of sum-check #1 251 | let taus_bound_rx = tau_claim; 252 | let blind_expected_claim_postsc1 = taus_bound_rx * (prod_Az_Bz_blind - Cz_blind); 253 | let claim_post_phase1 = (Az_claim * Bz_claim - Cz_claim) * taus_bound_rx; 254 | let (proof_eq_sc_phase1, _C1, _C2) = EqualityProof::prove( 255 | &gens.gens_sc.gens_1, 256 | transcript, 257 | random_tape, 258 | &claim_post_phase1, 259 | &blind_expected_claim_postsc1, 260 | &claim_post_phase1, 261 | &blind_claim_postsc1, 262 | ); 263 | 264 | let timer_sc_proof_phase2 = Timer::new("prove_sc_phase_two"); 265 | // combine the three claims into a single claim 266 | let r_A = transcript.challenge_scalar(b"challenge_Az"); 267 | let r_B = transcript.challenge_scalar(b"challenge_Bz"); 268 | let r_C = transcript.challenge_scalar(b"challenge_Cz"); 269 | let claim_phase2 = r_A * Az_claim + r_B * Bz_claim + r_C * Cz_claim; 270 | let blind_claim_phase2 = r_A * Az_blind + r_B * Bz_blind + r_C * Cz_blind; 271 | 272 | let evals_ABC = { 273 | // compute the initial evaluation table for R(\tau, x) 274 | let evals_rx = EqPolynomial::new(rx.clone()).evals(); 275 | let (evals_A, evals_B, evals_C) = 276 | inst.compute_eval_table_sparse(inst.get_num_cons(), z.len(), &evals_rx); 277 | 278 | assert_eq!(evals_A.len(), evals_B.len()); 279 | assert_eq!(evals_A.len(), evals_C.len()); 280 | (0..evals_A.len()) 281 | .map(|i| r_A * evals_A[i] + r_B * evals_B[i] + r_C * evals_C[i]) 282 | .collect::>() 283 | }; 284 | 285 | // another instance of the sum-check protocol 286 | let (sc_proof_phase2, ry, claims_phase2, blind_claim_postsc2) = R1CSProof::prove_phase_two( 287 | num_rounds_y, 288 | &claim_phase2, 289 | &blind_claim_phase2, 290 | &mut DensePolynomial::new(z), 291 | &mut DensePolynomial::new(evals_ABC), 292 | &gens.gens_sc, 293 | transcript, 294 | random_tape, 295 | ); 296 | timer_sc_proof_phase2.stop(); 297 | 298 | let timer_polyeval = Timer::new("polyeval"); 299 | let eval_vars_at_ry = poly_vars.evaluate(&ry[1..]); 300 | let blind_eval = random_tape.random_scalar(b"blind_eval"); 301 | let (proof_eval_vars_at_ry, comm_vars_at_ry) = PolyEvalProof::prove( 302 | &poly_vars, 303 | Some(&blinds_vars), 304 | &ry[1..], 305 | &eval_vars_at_ry, 306 | Some(&blind_eval), 307 | &gens.gens_pc, 308 | transcript, 309 | random_tape, 310 | ); 311 | timer_polyeval.stop(); 312 | 313 | // prove the final step of sum-check #2 314 | let blind_eval_Z_at_ry = (Scalar::one() - ry[0]) * blind_eval; 315 | let blind_expected_claim_postsc2 = claims_phase2[1] * blind_eval_Z_at_ry; 316 | let claim_post_phase2 = claims_phase2[0] * claims_phase2[1]; 317 | let (proof_eq_sc_phase2, _C1, _C2) = EqualityProof::prove( 318 | &gens.gens_pc.gens.gens_1, 319 | transcript, 320 | random_tape, 321 | &claim_post_phase2, 322 | &blind_expected_claim_postsc2, 323 | &claim_post_phase2, 324 | &blind_claim_postsc2, 325 | ); 326 | 327 | timer_prove.stop(); 328 | 329 | ( 330 | R1CSProof { 331 | comm_vars, 332 | sc_proof_phase1, 333 | claims_phase2: ( 334 | comm_Az_claim, 335 | comm_Bz_claim, 336 | comm_Cz_claim, 337 | comm_prod_Az_Bz_claims, 338 | ), 339 | pok_claims_phase2: (pok_Cz_claim, proof_prod), 340 | proof_eq_sc_phase1, 341 | sc_proof_phase2, 342 | comm_vars_at_ry, 343 | proof_eval_vars_at_ry, 344 | proof_eq_sc_phase2, 345 | }, 346 | rx, 347 | ry, 348 | ) 349 | } 350 | 351 | pub fn verify( 352 | &self, 353 | num_vars: usize, 354 | num_cons: usize, 355 | input: &[Scalar], 356 | evals: &(Scalar, Scalar, Scalar), 357 | transcript: &mut Transcript, 358 | gens: &R1CSGens, 359 | ) -> Result<(Vec, Vec), ProofVerifyError> { 360 | transcript.append_protocol_name(R1CSProof::protocol_name()); 361 | 362 | input.append_to_transcript(b"input", transcript); 363 | 364 | let n = num_vars; 365 | // add the commitment to the verifier's transcript 366 | self 367 | .comm_vars 368 | .append_to_transcript(b"poly_commitment", transcript); 369 | 370 | let (num_rounds_x, num_rounds_y) = (num_cons.log_2(), (2 * num_vars).log_2()); 371 | 372 | // derive the verifier's challenge tau 373 | let tau = transcript.challenge_vector(b"challenge_tau", num_rounds_x); 374 | 375 | // verify the first sum-check instance 376 | let claim_phase1 = Scalar::zero() 377 | .commit(&Scalar::zero(), &gens.gens_sc.gens_1) 378 | .compress(); 379 | let (comm_claim_post_phase1, rx) = self.sc_proof_phase1.verify( 380 | &claim_phase1, 381 | num_rounds_x, 382 | 3, 383 | &gens.gens_sc.gens_1, 384 | &gens.gens_sc.gens_4, 385 | transcript, 386 | )?; 387 | // perform the intermediate sum-check test with claimed Az, Bz, and Cz 388 | let (comm_Az_claim, comm_Bz_claim, comm_Cz_claim, comm_prod_Az_Bz_claims) = &self.claims_phase2; 389 | let (pok_Cz_claim, proof_prod) = &self.pok_claims_phase2; 390 | 391 | pok_Cz_claim.verify(&gens.gens_sc.gens_1, transcript, comm_Cz_claim)?; 392 | proof_prod.verify( 393 | &gens.gens_sc.gens_1, 394 | transcript, 395 | comm_Az_claim, 396 | comm_Bz_claim, 397 | comm_prod_Az_Bz_claims, 398 | )?; 399 | 400 | comm_Az_claim.append_to_transcript(b"comm_Az_claim", transcript); 401 | comm_Bz_claim.append_to_transcript(b"comm_Bz_claim", transcript); 402 | comm_Cz_claim.append_to_transcript(b"comm_Cz_claim", transcript); 403 | comm_prod_Az_Bz_claims.append_to_transcript(b"comm_prod_Az_Bz_claims", transcript); 404 | 405 | let taus_bound_rx: Scalar = (0..rx.len()) 406 | .map(|i| rx[i] * tau[i] + (Scalar::one() - rx[i]) * (Scalar::one() - tau[i])) 407 | .product(); 408 | let expected_claim_post_phase1 = (taus_bound_rx 409 | * (comm_prod_Az_Bz_claims.decompress().unwrap() - comm_Cz_claim.decompress().unwrap())) 410 | .compress(); 411 | 412 | // verify proof that expected_claim_post_phase1 == claim_post_phase1 413 | self.proof_eq_sc_phase1.verify( 414 | &gens.gens_sc.gens_1, 415 | transcript, 416 | &expected_claim_post_phase1, 417 | &comm_claim_post_phase1, 418 | )?; 419 | 420 | // derive three public challenges and then derive a joint claim 421 | let r_A = transcript.challenge_scalar(b"challenge_Az"); 422 | let r_B = transcript.challenge_scalar(b"challenge_Bz"); 423 | let r_C = transcript.challenge_scalar(b"challenge_Cz"); 424 | 425 | // r_A * comm_Az_claim + r_B * comm_Bz_claim + r_C * comm_Cz_claim; 426 | let comm_claim_phase2 = GroupElement::vartime_multiscalar_mul( 427 | iter::once(&r_A) 428 | .chain(iter::once(&r_B)) 429 | .chain(iter::once(&r_C)), 430 | iter::once(&comm_Az_claim) 431 | .chain(iter::once(&comm_Bz_claim)) 432 | .chain(iter::once(&comm_Cz_claim)) 433 | .map(|pt| pt.decompress().unwrap()) 434 | .collect::>(), 435 | ) 436 | .compress(); 437 | 438 | // verify the joint claim with a sum-check protocol 439 | let (comm_claim_post_phase2, ry) = self.sc_proof_phase2.verify( 440 | &comm_claim_phase2, 441 | num_rounds_y, 442 | 2, 443 | &gens.gens_sc.gens_1, 444 | &gens.gens_sc.gens_3, 445 | transcript, 446 | )?; 447 | 448 | // verify Z(ry) proof against the initial commitment 449 | self.proof_eval_vars_at_ry.verify( 450 | &gens.gens_pc, 451 | transcript, 452 | &ry[1..], 453 | &self.comm_vars_at_ry, 454 | &self.comm_vars, 455 | )?; 456 | 457 | let poly_input_eval = { 458 | // constant term 459 | let mut input_as_sparse_poly_entries = vec![SparsePolyEntry::new(0, Scalar::one())]; 460 | //remaining inputs 461 | input_as_sparse_poly_entries.extend( 462 | (0..input.len()) 463 | .map(|i| SparsePolyEntry::new(i + 1, input[i])) 464 | .collect::>(), 465 | ); 466 | SparsePolynomial::new(n.log_2(), input_as_sparse_poly_entries).evaluate(&ry[1..]) 467 | }; 468 | 469 | // compute commitment to eval_Z_at_ry = (Scalar::one() - ry[0]) * self.eval_vars_at_ry + ry[0] * poly_input_eval 470 | let comm_eval_Z_at_ry = GroupElement::vartime_multiscalar_mul( 471 | iter::once(Scalar::one() - ry[0]).chain(iter::once(ry[0])), 472 | iter::once(&self.comm_vars_at_ry.decompress().unwrap()).chain(iter::once( 473 | &poly_input_eval.commit(&Scalar::zero(), &gens.gens_pc.gens.gens_1), 474 | )), 475 | ); 476 | 477 | // perform the final check in the second sum-check protocol 478 | let (eval_A_r, eval_B_r, eval_C_r) = evals; 479 | let expected_claim_post_phase2 = 480 | ((r_A * eval_A_r + r_B * eval_B_r + r_C * eval_C_r) * comm_eval_Z_at_ry).compress(); 481 | // verify proof that expected_claim_post_phase1 == claim_post_phase1 482 | self.proof_eq_sc_phase2.verify( 483 | &gens.gens_sc.gens_1, 484 | transcript, 485 | &expected_claim_post_phase2, 486 | &comm_claim_post_phase2, 487 | )?; 488 | 489 | Ok((rx, ry)) 490 | } 491 | } 492 | 493 | #[cfg(test)] 494 | mod tests { 495 | use super::*; 496 | use rand::rngs::OsRng; 497 | 498 | fn produce_tiny_r1cs() -> (R1CSShape, Vec, Vec) { 499 | // three constraints over five variables Z1, Z2, Z3, Z4, and Z5 500 | // rounded to the nearest power of two 501 | let num_cons = 128; 502 | let num_vars = 256; 503 | let num_inputs = 2; 504 | 505 | // encode the above constraints into three matrices 506 | let mut A: Vec<(usize, usize, Scalar)> = Vec::new(); 507 | let mut B: Vec<(usize, usize, Scalar)> = Vec::new(); 508 | let mut C: Vec<(usize, usize, Scalar)> = Vec::new(); 509 | 510 | let one = Scalar::one(); 511 | // constraint 0 entries 512 | // (Z1 + Z2) * I0 - Z3 = 0; 513 | A.push((0, 0, one)); 514 | A.push((0, 1, one)); 515 | B.push((0, num_vars + 1, one)); 516 | C.push((0, 2, one)); 517 | 518 | // constraint 1 entries 519 | // (Z1 + I1) * (Z3) - Z4 = 0 520 | A.push((1, 0, one)); 521 | A.push((1, num_vars + 2, one)); 522 | B.push((1, 2, one)); 523 | C.push((1, 3, one)); 524 | // constraint 3 entries 525 | // Z5 * 1 - 0 = 0 526 | A.push((2, 4, one)); 527 | B.push((2, num_vars, one)); 528 | 529 | let inst = R1CSShape::new(num_cons, num_vars, num_inputs, &A, &B, &C); 530 | 531 | // compute a satisfying assignment 532 | let mut csprng: OsRng = OsRng; 533 | let i0 = Scalar::random(&mut csprng); 534 | let i1 = Scalar::random(&mut csprng); 535 | let z1 = Scalar::random(&mut csprng); 536 | let z2 = Scalar::random(&mut csprng); 537 | let z3 = (z1 + z2) * i0; // constraint 1: (Z1 + Z2) * I0 - Z3 = 0; 538 | let z4 = (z1 + i1) * z3; // constraint 2: (Z1 + I1) * (Z3) - Z4 = 0 539 | let z5 = Scalar::zero(); //constraint 3 540 | 541 | let mut vars = vec![Scalar::zero(); num_vars]; 542 | vars[0] = z1; 543 | vars[1] = z2; 544 | vars[2] = z3; 545 | vars[3] = z4; 546 | vars[4] = z5; 547 | 548 | let mut input = vec![Scalar::zero(); num_inputs]; 549 | input[0] = i0; 550 | input[1] = i1; 551 | 552 | (inst, vars, input) 553 | } 554 | 555 | #[test] 556 | fn test_tiny_r1cs() { 557 | let (inst, vars, input) = tests::produce_tiny_r1cs(); 558 | let is_sat = inst.is_sat(&vars, &input); 559 | assert!(is_sat); 560 | } 561 | 562 | #[test] 563 | fn test_synthetic_r1cs() { 564 | let (inst, vars, input) = R1CSShape::produce_synthetic_r1cs(1024, 1024, 10); 565 | let is_sat = inst.is_sat(&vars, &input); 566 | assert!(is_sat); 567 | } 568 | 569 | #[test] 570 | pub fn check_r1cs_proof() { 571 | let num_vars = 1024; 572 | let num_cons = num_vars; 573 | let num_inputs = 10; 574 | let (inst, vars, input) = R1CSShape::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); 575 | 576 | let gens = R1CSGens::new(b"test-m", num_cons, num_vars); 577 | 578 | let mut random_tape = RandomTape::new(b"proof"); 579 | let mut prover_transcript = Transcript::new(b"example"); 580 | let (proof, rx, ry) = R1CSProof::prove( 581 | &inst, 582 | vars, 583 | &input, 584 | &gens, 585 | &mut prover_transcript, 586 | &mut random_tape, 587 | ); 588 | 589 | let inst_evals = inst.evaluate(&rx, &ry); 590 | 591 | let mut verifier_transcript = Transcript::new(b"example"); 592 | assert!(proof 593 | .verify( 594 | inst.get_num_vars(), 595 | inst.get_num_cons(), 596 | &input, 597 | &inst_evals, 598 | &mut verifier_transcript, 599 | &gens, 600 | ) 601 | .is_ok()); 602 | } 603 | } 604 | -------------------------------------------------------------------------------- /src/random.rs: -------------------------------------------------------------------------------- 1 | use super::scalar::Scalar; 2 | use super::transcript::ProofTranscript; 3 | use merlin::Transcript; 4 | use rand::rngs::OsRng; 5 | 6 | pub struct RandomTape { 7 | tape: Transcript, 8 | } 9 | 10 | impl RandomTape { 11 | pub fn new(name: &'static [u8]) -> Self { 12 | let tape = { 13 | let mut csprng: OsRng = OsRng; 14 | let mut tape = Transcript::new(name); 15 | tape.append_scalar(b"init_randomness", &Scalar::random(&mut csprng)); 16 | tape 17 | }; 18 | Self { tape } 19 | } 20 | 21 | pub fn random_scalar(&mut self, label: &'static [u8]) -> Scalar { 22 | self.tape.challenge_scalar(label) 23 | } 24 | 25 | pub fn random_vector(&mut self, label: &'static [u8], len: usize) -> Vec { 26 | self.tape.challenge_vector(label, len) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/scalar/mod.rs: -------------------------------------------------------------------------------- 1 | mod ristretto255; 2 | 3 | pub type Scalar = ristretto255::Scalar; 4 | pub type ScalarBytes = curve25519_dalek::scalar::Scalar; 5 | 6 | pub trait ScalarFromPrimitives { 7 | fn to_scalar(self) -> Scalar; 8 | } 9 | 10 | impl ScalarFromPrimitives for usize { 11 | #[inline] 12 | fn to_scalar(self) -> Scalar { 13 | (0..self).map(|_i| Scalar::one()).sum() 14 | } 15 | } 16 | 17 | impl ScalarFromPrimitives for bool { 18 | #[inline] 19 | fn to_scalar(self) -> Scalar { 20 | if self { 21 | Scalar::one() 22 | } else { 23 | Scalar::zero() 24 | } 25 | } 26 | } 27 | 28 | pub trait ScalarBytesFromScalar { 29 | fn decompress_scalar(s: &Scalar) -> ScalarBytes; 30 | } 31 | 32 | impl ScalarBytesFromScalar for Scalar { 33 | fn decompress_scalar(s: &Scalar) -> ScalarBytes { 34 | ScalarBytes::from_bytes_mod_order(s.to_bytes()) 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/sumcheck.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::too_many_arguments)] 2 | #![allow(clippy::type_complexity)] 3 | use super::commitments::{Commitments, MultiCommitGens}; 4 | use super::dense_mlpoly::DensePolynomial; 5 | use super::errors::ProofVerifyError; 6 | use super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul}; 7 | use super::nizk::DotProductProof; 8 | use super::random::RandomTape; 9 | use super::scalar::Scalar; 10 | use super::transcript::{AppendToTranscript, ProofTranscript}; 11 | use super::unipoly::{CompressedUniPoly, UniPoly}; 12 | use core::iter; 13 | use itertools::izip; 14 | use merlin::Transcript; 15 | use serde::{Deserialize, Serialize}; 16 | 17 | #[derive(Serialize, Deserialize, Debug)] 18 | pub struct SumcheckInstanceProof { 19 | compressed_polys: Vec, 20 | } 21 | 22 | impl SumcheckInstanceProof { 23 | pub fn new(compressed_polys: Vec) -> SumcheckInstanceProof { 24 | SumcheckInstanceProof { compressed_polys } 25 | } 26 | 27 | pub fn verify( 28 | &self, 29 | claim: Scalar, 30 | num_rounds: usize, 31 | degree_bound: usize, 32 | transcript: &mut Transcript, 33 | ) -> Result<(Scalar, Vec), ProofVerifyError> { 34 | let mut e = claim; 35 | let mut r: Vec = Vec::new(); 36 | 37 | // verify that there is a univariate polynomial for each round 38 | assert_eq!(self.compressed_polys.len(), num_rounds); 39 | for i in 0..self.compressed_polys.len() { 40 | let poly = self.compressed_polys[i].decompress(&e); 41 | 42 | // verify degree bound 43 | assert_eq!(poly.degree(), degree_bound); 44 | 45 | // check if G_k(0) + G_k(1) = e 46 | assert_eq!(poly.eval_at_zero() + poly.eval_at_one(), e); 47 | 48 | // append the prover's message to the transcript 49 | poly.append_to_transcript(b"poly", transcript); 50 | 51 | //derive the verifier's challenge for the next round 52 | let r_i = transcript.challenge_scalar(b"challenge_nextround"); 53 | 54 | r.push(r_i); 55 | 56 | // evaluate the claimed degree-ell polynomial at r_i 57 | e = poly.evaluate(&r_i); 58 | } 59 | 60 | Ok((e, r)) 61 | } 62 | } 63 | 64 | #[derive(Serialize, Deserialize, Debug)] 65 | pub struct ZKSumcheckInstanceProof { 66 | comm_polys: Vec, 67 | comm_evals: Vec, 68 | proofs: Vec, 69 | } 70 | 71 | impl ZKSumcheckInstanceProof { 72 | pub fn new( 73 | comm_polys: Vec, 74 | comm_evals: Vec, 75 | proofs: Vec, 76 | ) -> Self { 77 | ZKSumcheckInstanceProof { 78 | comm_polys, 79 | comm_evals, 80 | proofs, 81 | } 82 | } 83 | 84 | pub fn verify( 85 | &self, 86 | comm_claim: &CompressedGroup, 87 | num_rounds: usize, 88 | degree_bound: usize, 89 | gens_1: &MultiCommitGens, 90 | gens_n: &MultiCommitGens, 91 | transcript: &mut Transcript, 92 | ) -> Result<(CompressedGroup, Vec), ProofVerifyError> { 93 | // verify degree bound 94 | assert_eq!(gens_n.n, degree_bound + 1); 95 | 96 | // verify that there is a univariate polynomial for each round 97 | assert_eq!(self.comm_polys.len(), num_rounds); 98 | assert_eq!(self.comm_evals.len(), num_rounds); 99 | 100 | let mut r: Vec = Vec::new(); 101 | for i in 0..self.comm_polys.len() { 102 | let comm_poly = &self.comm_polys[i]; 103 | 104 | // append the prover's polynomial to the transcript 105 | comm_poly.append_to_transcript(b"comm_poly", transcript); 106 | 107 | //derive the verifier's challenge for the next round 108 | let r_i = transcript.challenge_scalar(b"challenge_nextround"); 109 | 110 | // verify the proof of sum-check and evals 111 | let res = { 112 | let comm_claim_per_round = if i == 0 { 113 | comm_claim 114 | } else { 115 | &self.comm_evals[i - 1] 116 | }; 117 | let comm_eval = &self.comm_evals[i]; 118 | 119 | // add two claims to transcript 120 | comm_claim_per_round.append_to_transcript(b"comm_claim_per_round", transcript); 121 | comm_eval.append_to_transcript(b"comm_eval", transcript); 122 | 123 | // produce two weights 124 | let w = transcript.challenge_vector(b"combine_two_claims_to_one", 2); 125 | 126 | // compute a weighted sum of the RHS 127 | let comm_target = GroupElement::vartime_multiscalar_mul( 128 | w.iter(), 129 | iter::once(&comm_claim_per_round) 130 | .chain(iter::once(&comm_eval)) 131 | .map(|pt| pt.decompress().unwrap()) 132 | .collect::>(), 133 | ) 134 | .compress(); 135 | 136 | let a = { 137 | // the vector to use to decommit for sum-check test 138 | let a_sc = { 139 | let mut a = vec![Scalar::one(); degree_bound + 1]; 140 | a[0] += Scalar::one(); 141 | a 142 | }; 143 | 144 | // the vector to use to decommit for evaluation 145 | let a_eval = { 146 | let mut a = vec![Scalar::one(); degree_bound + 1]; 147 | for j in 1..a.len() { 148 | a[j] = a[j - 1] * r_i; 149 | } 150 | a 151 | }; 152 | 153 | // take weighted sum of the two vectors using w 154 | assert_eq!(a_sc.len(), a_eval.len()); 155 | (0..a_sc.len()) 156 | .map(|i| w[0] * a_sc[i] + w[1] * a_eval[i]) 157 | .collect::>() 158 | }; 159 | 160 | self.proofs[i] 161 | .verify( 162 | gens_1, 163 | gens_n, 164 | transcript, 165 | &a, 166 | &self.comm_polys[i], 167 | &comm_target, 168 | ) 169 | .is_ok() 170 | }; 171 | if !res { 172 | return Err(ProofVerifyError::InternalError); 173 | } 174 | 175 | r.push(r_i); 176 | } 177 | 178 | Ok((self.comm_evals[self.comm_evals.len() - 1], r)) 179 | } 180 | } 181 | 182 | impl SumcheckInstanceProof { 183 | pub fn prove_cubic( 184 | claim: &Scalar, 185 | num_rounds: usize, 186 | poly_A: &mut DensePolynomial, 187 | poly_B: &mut DensePolynomial, 188 | poly_C: &mut DensePolynomial, 189 | comb_func: F, 190 | transcript: &mut Transcript, 191 | ) -> (Self, Vec, Vec) 192 | where 193 | F: Fn(&Scalar, &Scalar, &Scalar) -> Scalar, 194 | { 195 | let mut e = *claim; 196 | let mut r: Vec = Vec::new(); 197 | let mut cubic_polys: Vec = Vec::new(); 198 | for _j in 0..num_rounds { 199 | let mut eval_point_0 = Scalar::zero(); 200 | let mut eval_point_2 = Scalar::zero(); 201 | let mut eval_point_3 = Scalar::zero(); 202 | 203 | let len = poly_A.len() / 2; 204 | for i in 0..len { 205 | // eval 0: bound_func is A(low) 206 | eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i]); 207 | 208 | // eval 2: bound_func is -A(low) + 2*A(high) 209 | let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; 210 | let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; 211 | let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; 212 | eval_point_2 += comb_func( 213 | &poly_A_bound_point, 214 | &poly_B_bound_point, 215 | &poly_C_bound_point, 216 | ); 217 | 218 | // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) 219 | let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; 220 | let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; 221 | let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; 222 | 223 | eval_point_3 += comb_func( 224 | &poly_A_bound_point, 225 | &poly_B_bound_point, 226 | &poly_C_bound_point, 227 | ); 228 | } 229 | 230 | let evals = vec![eval_point_0, e - eval_point_0, eval_point_2, eval_point_3]; 231 | let poly = UniPoly::from_evals(&evals); 232 | 233 | // append the prover's message to the transcript 234 | poly.append_to_transcript(b"poly", transcript); 235 | 236 | //derive the verifier's challenge for the next round 237 | let r_j = transcript.challenge_scalar(b"challenge_nextround"); 238 | r.push(r_j); 239 | // bound all tables to the verifier's challenge 240 | poly_A.bound_poly_var_top(&r_j); 241 | poly_B.bound_poly_var_top(&r_j); 242 | poly_C.bound_poly_var_top(&r_j); 243 | e = poly.evaluate(&r_j); 244 | cubic_polys.push(poly.compress()); 245 | } 246 | 247 | ( 248 | SumcheckInstanceProof::new(cubic_polys), 249 | r, 250 | vec![poly_A[0], poly_B[0], poly_C[0]], 251 | ) 252 | } 253 | 254 | pub fn prove_cubic_batched( 255 | claim: &Scalar, 256 | num_rounds: usize, 257 | poly_vec_par: ( 258 | &mut Vec<&mut DensePolynomial>, 259 | &mut Vec<&mut DensePolynomial>, 260 | &mut DensePolynomial, 261 | ), 262 | poly_vec_seq: ( 263 | &mut Vec<&mut DensePolynomial>, 264 | &mut Vec<&mut DensePolynomial>, 265 | &mut Vec<&mut DensePolynomial>, 266 | ), 267 | coeffs: &[Scalar], 268 | comb_func: F, 269 | transcript: &mut Transcript, 270 | ) -> ( 271 | Self, 272 | Vec, 273 | (Vec, Vec, Scalar), 274 | (Vec, Vec, Vec), 275 | ) 276 | where 277 | F: Fn(&Scalar, &Scalar, &Scalar) -> Scalar, 278 | { 279 | let (poly_A_vec_par, poly_B_vec_par, poly_C_par) = poly_vec_par; 280 | let (poly_A_vec_seq, poly_B_vec_seq, poly_C_vec_seq) = poly_vec_seq; 281 | 282 | //let (poly_A_vec_seq, poly_B_vec_seq, poly_C_vec_seq) = poly_vec_seq; 283 | let mut e = *claim; 284 | let mut r: Vec = Vec::new(); 285 | let mut cubic_polys: Vec = Vec::new(); 286 | 287 | for _j in 0..num_rounds { 288 | let mut evals: Vec<(Scalar, Scalar, Scalar)> = Vec::new(); 289 | 290 | for (poly_A, poly_B) in poly_A_vec_par.iter().zip(poly_B_vec_par.iter()) { 291 | let mut eval_point_0 = Scalar::zero(); 292 | let mut eval_point_2 = Scalar::zero(); 293 | let mut eval_point_3 = Scalar::zero(); 294 | 295 | let len = poly_A.len() / 2; 296 | for i in 0..len { 297 | // eval 0: bound_func is A(low) 298 | eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C_par[i]); 299 | 300 | // eval 2: bound_func is -A(low) + 2*A(high) 301 | let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; 302 | let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; 303 | let poly_C_bound_point = poly_C_par[len + i] + poly_C_par[len + i] - poly_C_par[i]; 304 | eval_point_2 += comb_func( 305 | &poly_A_bound_point, 306 | &poly_B_bound_point, 307 | &poly_C_bound_point, 308 | ); 309 | 310 | // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) 311 | let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; 312 | let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; 313 | let poly_C_bound_point = poly_C_bound_point + poly_C_par[len + i] - poly_C_par[i]; 314 | 315 | eval_point_3 += comb_func( 316 | &poly_A_bound_point, 317 | &poly_B_bound_point, 318 | &poly_C_bound_point, 319 | ); 320 | } 321 | 322 | evals.push((eval_point_0, eval_point_2, eval_point_3)); 323 | } 324 | 325 | for (poly_A, poly_B, poly_C) in izip!( 326 | poly_A_vec_seq.iter(), 327 | poly_B_vec_seq.iter(), 328 | poly_C_vec_seq.iter() 329 | ) { 330 | let mut eval_point_0 = Scalar::zero(); 331 | let mut eval_point_2 = Scalar::zero(); 332 | let mut eval_point_3 = Scalar::zero(); 333 | let len = poly_A.len() / 2; 334 | for i in 0..len { 335 | // eval 0: bound_func is A(low) 336 | eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i]); 337 | // eval 2: bound_func is -A(low) + 2*A(high) 338 | let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; 339 | let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; 340 | let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; 341 | eval_point_2 += comb_func( 342 | &poly_A_bound_point, 343 | &poly_B_bound_point, 344 | &poly_C_bound_point, 345 | ); 346 | // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) 347 | let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; 348 | let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; 349 | let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; 350 | eval_point_3 += comb_func( 351 | &poly_A_bound_point, 352 | &poly_B_bound_point, 353 | &poly_C_bound_point, 354 | ); 355 | } 356 | evals.push((eval_point_0, eval_point_2, eval_point_3)); 357 | } 358 | 359 | let evals_combined_0 = (0..evals.len()).map(|i| evals[i].0 * coeffs[i]).sum(); 360 | let evals_combined_2 = (0..evals.len()).map(|i| evals[i].1 * coeffs[i]).sum(); 361 | let evals_combined_3 = (0..evals.len()).map(|i| evals[i].2 * coeffs[i]).sum(); 362 | 363 | let evals = vec![ 364 | evals_combined_0, 365 | e - evals_combined_0, 366 | evals_combined_2, 367 | evals_combined_3, 368 | ]; 369 | let poly = UniPoly::from_evals(&evals); 370 | 371 | // append the prover's message to the transcript 372 | poly.append_to_transcript(b"poly", transcript); 373 | 374 | //derive the verifier's challenge for the next round 375 | let r_j = transcript.challenge_scalar(b"challenge_nextround"); 376 | r.push(r_j); 377 | 378 | // bound all tables to the verifier's challenge 379 | for (poly_A, poly_B) in poly_A_vec_par.iter_mut().zip(poly_B_vec_par.iter_mut()) { 380 | poly_A.bound_poly_var_top(&r_j); 381 | poly_B.bound_poly_var_top(&r_j); 382 | } 383 | poly_C_par.bound_poly_var_top(&r_j); 384 | 385 | for (poly_A, poly_B, poly_C) in izip!( 386 | poly_A_vec_seq.iter_mut(), 387 | poly_B_vec_seq.iter_mut(), 388 | poly_C_vec_seq.iter_mut() 389 | ) { 390 | poly_A.bound_poly_var_top(&r_j); 391 | poly_B.bound_poly_var_top(&r_j); 392 | poly_C.bound_poly_var_top(&r_j); 393 | } 394 | 395 | e = poly.evaluate(&r_j); 396 | cubic_polys.push(poly.compress()); 397 | } 398 | 399 | let poly_A_par_final = (0..poly_A_vec_par.len()) 400 | .map(|i| poly_A_vec_par[i][0]) 401 | .collect(); 402 | let poly_B_par_final = (0..poly_B_vec_par.len()) 403 | .map(|i| poly_B_vec_par[i][0]) 404 | .collect(); 405 | let claims_prod = (poly_A_par_final, poly_B_par_final, poly_C_par[0]); 406 | 407 | let poly_A_seq_final = (0..poly_A_vec_seq.len()) 408 | .map(|i| poly_A_vec_seq[i][0]) 409 | .collect(); 410 | let poly_B_seq_final = (0..poly_B_vec_seq.len()) 411 | .map(|i| poly_B_vec_seq[i][0]) 412 | .collect(); 413 | let poly_C_seq_final = (0..poly_C_vec_seq.len()) 414 | .map(|i| poly_C_vec_seq[i][0]) 415 | .collect(); 416 | let claims_dotp = (poly_A_seq_final, poly_B_seq_final, poly_C_seq_final); 417 | 418 | ( 419 | SumcheckInstanceProof::new(cubic_polys), 420 | r, 421 | claims_prod, 422 | claims_dotp, 423 | ) 424 | } 425 | } 426 | 427 | impl ZKSumcheckInstanceProof { 428 | pub fn prove_quad( 429 | claim: &Scalar, 430 | blind_claim: &Scalar, 431 | num_rounds: usize, 432 | poly_A: &mut DensePolynomial, 433 | poly_B: &mut DensePolynomial, 434 | comb_func: F, 435 | gens_1: &MultiCommitGens, 436 | gens_n: &MultiCommitGens, 437 | transcript: &mut Transcript, 438 | random_tape: &mut RandomTape, 439 | ) -> (Self, Vec, Vec, Scalar) 440 | where 441 | F: Fn(&Scalar, &Scalar) -> Scalar, 442 | { 443 | let (blinds_poly, blinds_evals) = ( 444 | random_tape.random_vector(b"blinds_poly", num_rounds), 445 | random_tape.random_vector(b"blinds_evals", num_rounds), 446 | ); 447 | let mut claim_per_round = *claim; 448 | let mut comm_claim_per_round = claim_per_round.commit(blind_claim, gens_1).compress(); 449 | 450 | let mut r: Vec = Vec::new(); 451 | let mut comm_polys: Vec = Vec::new(); 452 | let mut comm_evals: Vec = Vec::new(); 453 | let mut proofs: Vec = Vec::new(); 454 | 455 | for j in 0..num_rounds { 456 | let (poly, comm_poly) = { 457 | let mut eval_point_0 = Scalar::zero(); 458 | let mut eval_point_2 = Scalar::zero(); 459 | 460 | let len = poly_A.len() / 2; 461 | for i in 0..len { 462 | // eval 0: bound_func is A(low) 463 | eval_point_0 += comb_func(&poly_A[i], &poly_B[i]); 464 | 465 | // eval 2: bound_func is -A(low) + 2*A(high) 466 | let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; 467 | let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; 468 | eval_point_2 += comb_func(&poly_A_bound_point, &poly_B_bound_point); 469 | } 470 | 471 | let evals = vec![eval_point_0, claim_per_round - eval_point_0, eval_point_2]; 472 | let poly = UniPoly::from_evals(&evals); 473 | let comm_poly = poly.commit(gens_n, &blinds_poly[j]).compress(); 474 | (poly, comm_poly) 475 | }; 476 | 477 | // append the prover's message to the transcript 478 | comm_poly.append_to_transcript(b"comm_poly", transcript); 479 | comm_polys.push(comm_poly); 480 | 481 | //derive the verifier's challenge for the next round 482 | let r_j = transcript.challenge_scalar(b"challenge_nextround"); 483 | 484 | // bound all tables to the verifier's challenge 485 | poly_A.bound_poly_var_top(&r_j); 486 | poly_B.bound_poly_var_top(&r_j); 487 | 488 | // produce a proof of sum-check and of evaluation 489 | let (proof, claim_next_round, comm_claim_next_round) = { 490 | let eval = poly.evaluate(&r_j); 491 | let comm_eval = eval.commit(&blinds_evals[j], gens_1).compress(); 492 | 493 | // we need to prove the following under homomorphic commitments: 494 | // (1) poly(0) + poly(1) = claim_per_round 495 | // (2) poly(r_j) = eval 496 | 497 | // Our technique is to leverage dot product proofs: 498 | // (1) we can prove: = claim_per_round 499 | // (2) we can prove: >(), 517 | ) 518 | .compress(); 519 | 520 | let blind = { 521 | let blind_sc = if j == 0 { 522 | blind_claim 523 | } else { 524 | &blinds_evals[j - 1] 525 | }; 526 | 527 | let blind_eval = &blinds_evals[j]; 528 | 529 | w[0] * blind_sc + w[1] * blind_eval 530 | }; 531 | assert_eq!(target.commit(&blind, gens_1).compress(), comm_target); 532 | 533 | let a = { 534 | // the vector to use to decommit for sum-check test 535 | let a_sc = { 536 | let mut a = vec![Scalar::one(); poly.degree() + 1]; 537 | a[0] += Scalar::one(); 538 | a 539 | }; 540 | 541 | // the vector to use to decommit for evaluation 542 | let a_eval = { 543 | let mut a = vec![Scalar::one(); poly.degree() + 1]; 544 | for j in 1..a.len() { 545 | a[j] = a[j - 1] * r_j; 546 | } 547 | a 548 | }; 549 | 550 | // take weighted sum of the two vectors using w 551 | assert_eq!(a_sc.len(), a_eval.len()); 552 | (0..a_sc.len()) 553 | .map(|i| w[0] * a_sc[i] + w[1] * a_eval[i]) 554 | .collect::>() 555 | }; 556 | 557 | let (proof, _comm_poly, _comm_sc_eval) = DotProductProof::prove( 558 | gens_1, 559 | gens_n, 560 | transcript, 561 | random_tape, 562 | &poly.as_vec(), 563 | &blinds_poly[j], 564 | &a, 565 | &target, 566 | &blind, 567 | ); 568 | 569 | (proof, eval, comm_eval) 570 | }; 571 | 572 | claim_per_round = claim_next_round; 573 | comm_claim_per_round = comm_claim_next_round; 574 | 575 | proofs.push(proof); 576 | r.push(r_j); 577 | comm_evals.push(comm_claim_per_round); 578 | } 579 | 580 | ( 581 | ZKSumcheckInstanceProof::new(comm_polys, comm_evals, proofs), 582 | r, 583 | vec![poly_A[0], poly_B[0]], 584 | blinds_evals[num_rounds - 1], 585 | ) 586 | } 587 | 588 | pub fn prove_cubic_with_additive_term( 589 | claim: &Scalar, 590 | blind_claim: &Scalar, 591 | num_rounds: usize, 592 | poly_A: &mut DensePolynomial, 593 | poly_B: &mut DensePolynomial, 594 | poly_C: &mut DensePolynomial, 595 | poly_D: &mut DensePolynomial, 596 | comb_func: F, 597 | gens_1: &MultiCommitGens, 598 | gens_n: &MultiCommitGens, 599 | transcript: &mut Transcript, 600 | random_tape: &mut RandomTape, 601 | ) -> (Self, Vec, Vec, Scalar) 602 | where 603 | F: Fn(&Scalar, &Scalar, &Scalar, &Scalar) -> Scalar, 604 | { 605 | let (blinds_poly, blinds_evals) = ( 606 | random_tape.random_vector(b"blinds_poly", num_rounds), 607 | random_tape.random_vector(b"blinds_evals", num_rounds), 608 | ); 609 | 610 | let mut claim_per_round = *claim; 611 | let mut comm_claim_per_round = claim_per_round.commit(blind_claim, gens_1).compress(); 612 | 613 | let mut r: Vec = Vec::new(); 614 | let mut comm_polys: Vec = Vec::new(); 615 | let mut comm_evals: Vec = Vec::new(); 616 | let mut proofs: Vec = Vec::new(); 617 | 618 | for j in 0..num_rounds { 619 | let (poly, comm_poly) = { 620 | let mut eval_point_0 = Scalar::zero(); 621 | let mut eval_point_2 = Scalar::zero(); 622 | let mut eval_point_3 = Scalar::zero(); 623 | 624 | let len = poly_A.len() / 2; 625 | for i in 0..len { 626 | // eval 0: bound_func is A(low) 627 | eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i], &poly_D[i]); 628 | 629 | // eval 2: bound_func is -A(low) + 2*A(high) 630 | let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; 631 | let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; 632 | let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; 633 | let poly_D_bound_point = poly_D[len + i] + poly_D[len + i] - poly_D[i]; 634 | eval_point_2 += comb_func( 635 | &poly_A_bound_point, 636 | &poly_B_bound_point, 637 | &poly_C_bound_point, 638 | &poly_D_bound_point, 639 | ); 640 | 641 | // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) 642 | let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; 643 | let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; 644 | let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; 645 | let poly_D_bound_point = poly_D_bound_point + poly_D[len + i] - poly_D[i]; 646 | eval_point_3 += comb_func( 647 | &poly_A_bound_point, 648 | &poly_B_bound_point, 649 | &poly_C_bound_point, 650 | &poly_D_bound_point, 651 | ); 652 | } 653 | 654 | let evals = vec![ 655 | eval_point_0, 656 | claim_per_round - eval_point_0, 657 | eval_point_2, 658 | eval_point_3, 659 | ]; 660 | let poly = UniPoly::from_evals(&evals); 661 | let comm_poly = poly.commit(gens_n, &blinds_poly[j]).compress(); 662 | (poly, comm_poly) 663 | }; 664 | 665 | // append the prover's message to the transcript 666 | comm_poly.append_to_transcript(b"comm_poly", transcript); 667 | comm_polys.push(comm_poly); 668 | 669 | //derive the verifier's challenge for the next round 670 | let r_j = transcript.challenge_scalar(b"challenge_nextround"); 671 | 672 | // bound all tables to the verifier's challenge 673 | poly_A.bound_poly_var_top(&r_j); 674 | poly_B.bound_poly_var_top(&r_j); 675 | poly_C.bound_poly_var_top(&r_j); 676 | poly_D.bound_poly_var_top(&r_j); 677 | 678 | // produce a proof of sum-check and of evaluation 679 | let (proof, claim_next_round, comm_claim_next_round) = { 680 | let eval = poly.evaluate(&r_j); 681 | let comm_eval = eval.commit(&blinds_evals[j], gens_1).compress(); 682 | 683 | // we need to prove the following under homomorphic commitments: 684 | // (1) poly(0) + poly(1) = claim_per_round 685 | // (2) poly(r_j) = eval 686 | 687 | // Our technique is to leverage dot product proofs: 688 | // (1) we can prove: = claim_per_round 689 | // (2) we can prove: >(), 707 | ) 708 | .compress(); 709 | 710 | let blind = { 711 | let blind_sc = if j == 0 { 712 | blind_claim 713 | } else { 714 | &blinds_evals[j - 1] 715 | }; 716 | 717 | let blind_eval = &blinds_evals[j]; 718 | 719 | w[0] * blind_sc + w[1] * blind_eval 720 | }; 721 | 722 | assert_eq!(target.commit(&blind, gens_1).compress(), comm_target); 723 | 724 | let a = { 725 | // the vector to use to decommit for sum-check test 726 | let a_sc = { 727 | let mut a = vec![Scalar::one(); poly.degree() + 1]; 728 | a[0] += Scalar::one(); 729 | a 730 | }; 731 | 732 | // the vector to use to decommit for evaluation 733 | let a_eval = { 734 | let mut a = vec![Scalar::one(); poly.degree() + 1]; 735 | for j in 1..a.len() { 736 | a[j] = a[j - 1] * r_j; 737 | } 738 | a 739 | }; 740 | 741 | // take weighted sum of the two vectors using w 742 | assert_eq!(a_sc.len(), a_eval.len()); 743 | (0..a_sc.len()) 744 | .map(|i| w[0] * a_sc[i] + w[1] * a_eval[i]) 745 | .collect::>() 746 | }; 747 | 748 | let (proof, _comm_poly, _comm_sc_eval) = DotProductProof::prove( 749 | gens_1, 750 | gens_n, 751 | transcript, 752 | random_tape, 753 | &poly.as_vec(), 754 | &blinds_poly[j], 755 | &a, 756 | &target, 757 | &blind, 758 | ); 759 | 760 | (proof, eval, comm_eval) 761 | }; 762 | 763 | proofs.push(proof); 764 | claim_per_round = claim_next_round; 765 | comm_claim_per_round = comm_claim_next_round; 766 | r.push(r_j); 767 | comm_evals.push(comm_claim_per_round); 768 | } 769 | 770 | ( 771 | ZKSumcheckInstanceProof::new(comm_polys, comm_evals, proofs), 772 | r, 773 | vec![poly_A[0], poly_B[0], poly_C[0], poly_D[0]], 774 | blinds_evals[num_rounds - 1], 775 | ) 776 | } 777 | } 778 | -------------------------------------------------------------------------------- /src/timer.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "profile")] 2 | use colored::Colorize; 3 | #[cfg(feature = "profile")] 4 | use core::sync::atomic::AtomicUsize; 5 | #[cfg(feature = "profile")] 6 | use core::sync::atomic::Ordering; 7 | #[cfg(feature = "profile")] 8 | use std::time::Instant; 9 | 10 | #[cfg(feature = "profile")] 11 | pub static CALL_DEPTH: AtomicUsize = AtomicUsize::new(0); 12 | 13 | #[cfg(feature = "profile")] 14 | pub struct Timer { 15 | label: String, 16 | timer: Instant, 17 | } 18 | 19 | #[cfg(feature = "profile")] 20 | impl Timer { 21 | #[inline(always)] 22 | pub fn new(label: &str) -> Self { 23 | let timer = Instant::now(); 24 | CALL_DEPTH.fetch_add(1, Ordering::Relaxed); 25 | let star = "* "; 26 | println!( 27 | "{:indent$}{}{}", 28 | "", 29 | star, 30 | label.yellow().bold(), 31 | indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed) 32 | ); 33 | Self { 34 | label: label.to_string(), 35 | timer, 36 | } 37 | } 38 | 39 | #[inline(always)] 40 | pub fn stop(&self) { 41 | let duration = self.timer.elapsed(); 42 | let star = "* "; 43 | println!( 44 | "{:indent$}{}{} {:?}", 45 | "", 46 | star, 47 | self.label.blue().bold(), 48 | duration, 49 | indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed) 50 | ); 51 | CALL_DEPTH.fetch_sub(1, Ordering::Relaxed); 52 | } 53 | 54 | #[inline(always)] 55 | pub fn print(msg: &str) { 56 | CALL_DEPTH.fetch_add(1, Ordering::Relaxed); 57 | let star = "* "; 58 | println!( 59 | "{:indent$}{}{}", 60 | "", 61 | star, 62 | msg.to_string().green().bold(), 63 | indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed) 64 | ); 65 | CALL_DEPTH.fetch_sub(1, Ordering::Relaxed); 66 | } 67 | } 68 | 69 | #[cfg(not(feature = "profile"))] 70 | pub struct Timer { 71 | _label: String, 72 | } 73 | 74 | #[cfg(not(feature = "profile"))] 75 | impl Timer { 76 | #[inline(always)] 77 | pub fn new(label: &str) -> Self { 78 | Self { 79 | _label: label.to_string(), 80 | } 81 | } 82 | 83 | #[inline(always)] 84 | pub fn stop(&self) {} 85 | 86 | #[inline(always)] 87 | pub fn print(_msg: &str) {} 88 | } 89 | -------------------------------------------------------------------------------- /src/transcript.rs: -------------------------------------------------------------------------------- 1 | use super::group::CompressedGroup; 2 | use super::scalar::Scalar; 3 | use merlin::Transcript; 4 | 5 | pub trait ProofTranscript { 6 | fn append_protocol_name(&mut self, protocol_name: &'static [u8]); 7 | fn append_scalar(&mut self, label: &'static [u8], scalar: &Scalar); 8 | fn append_point(&mut self, label: &'static [u8], point: &CompressedGroup); 9 | fn challenge_scalar(&mut self, label: &'static [u8]) -> Scalar; 10 | fn challenge_vector(&mut self, label: &'static [u8], len: usize) -> Vec; 11 | } 12 | 13 | impl ProofTranscript for Transcript { 14 | fn append_protocol_name(&mut self, protocol_name: &'static [u8]) { 15 | self.append_message(b"protocol-name", protocol_name); 16 | } 17 | 18 | fn append_scalar(&mut self, label: &'static [u8], scalar: &Scalar) { 19 | self.append_message(label, &scalar.to_bytes()); 20 | } 21 | 22 | fn append_point(&mut self, label: &'static [u8], point: &CompressedGroup) { 23 | self.append_message(label, point.as_bytes()); 24 | } 25 | 26 | fn challenge_scalar(&mut self, label: &'static [u8]) -> Scalar { 27 | let mut buf = [0u8; 64]; 28 | self.challenge_bytes(label, &mut buf); 29 | Scalar::from_bytes_wide(&buf) 30 | } 31 | 32 | fn challenge_vector(&mut self, label: &'static [u8], len: usize) -> Vec { 33 | (0..len) 34 | .map(|_i| self.challenge_scalar(label)) 35 | .collect::>() 36 | } 37 | } 38 | 39 | pub trait AppendToTranscript { 40 | fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript); 41 | } 42 | 43 | impl AppendToTranscript for Scalar { 44 | fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) { 45 | transcript.append_scalar(label, self); 46 | } 47 | } 48 | 49 | impl AppendToTranscript for [Scalar] { 50 | fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) { 51 | transcript.append_message(label, b"begin_append_vector"); 52 | for item in self { 53 | transcript.append_scalar(label, item); 54 | } 55 | transcript.append_message(label, b"end_append_vector"); 56 | } 57 | } 58 | 59 | impl AppendToTranscript for CompressedGroup { 60 | fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) { 61 | transcript.append_point(label, self); 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/unipoly.rs: -------------------------------------------------------------------------------- 1 | use super::commitments::{Commitments, MultiCommitGens}; 2 | use super::group::GroupElement; 3 | use super::scalar::{Scalar, ScalarFromPrimitives}; 4 | use super::transcript::{AppendToTranscript, ProofTranscript}; 5 | use merlin::Transcript; 6 | use serde::{Deserialize, Serialize}; 7 | 8 | // ax^2 + bx + c stored as vec![c,b,a] 9 | // ax^3 + bx^2 + cx + d stored as vec![d,c,b,a] 10 | #[derive(Debug)] 11 | pub struct UniPoly { 12 | coeffs: Vec, 13 | } 14 | 15 | // ax^2 + bx + c stored as vec![c,a] 16 | // ax^3 + bx^2 + cx + d stored as vec![d,b,a] 17 | #[derive(Serialize, Deserialize, Debug)] 18 | pub struct CompressedUniPoly { 19 | coeffs_except_linear_term: Vec, 20 | } 21 | 22 | impl UniPoly { 23 | pub fn from_evals(evals: &[Scalar]) -> Self { 24 | // we only support degree-2 or degree-3 univariate polynomials 25 | assert!(evals.len() == 3 || evals.len() == 4); 26 | let coeffs = if evals.len() == 3 { 27 | // ax^2 + bx + c 28 | let two_inv = (2_usize).to_scalar().invert().unwrap(); 29 | 30 | let c = evals[0]; 31 | let a = two_inv * (evals[2] - evals[1] - evals[1] + c); 32 | let b = evals[1] - c - a; 33 | vec![c, b, a] 34 | } else { 35 | // ax^3 + bx^2 + cx + d 36 | let two_inv = (2_usize).to_scalar().invert().unwrap(); 37 | let six_inv = (6_usize).to_scalar().invert().unwrap(); 38 | 39 | let d = evals[0]; 40 | let a = six_inv 41 | * (evals[3] - evals[2] - evals[2] - evals[2] + evals[1] + evals[1] + evals[1] - evals[0]); 42 | let b = two_inv 43 | * (evals[0] + evals[0] - evals[1] - evals[1] - evals[1] - evals[1] - evals[1] 44 | + evals[2] 45 | + evals[2] 46 | + evals[2] 47 | + evals[2] 48 | - evals[3]); 49 | let c = evals[1] - d - a - b; 50 | vec![d, c, b, a] 51 | }; 52 | 53 | UniPoly { coeffs } 54 | } 55 | 56 | pub fn degree(&self) -> usize { 57 | self.coeffs.len() - 1 58 | } 59 | 60 | pub fn as_vec(&self) -> Vec { 61 | self.coeffs.clone() 62 | } 63 | 64 | pub fn eval_at_zero(&self) -> Scalar { 65 | self.coeffs[0] 66 | } 67 | 68 | pub fn eval_at_one(&self) -> Scalar { 69 | (0..self.coeffs.len()).map(|i| self.coeffs[i]).sum() 70 | } 71 | 72 | pub fn evaluate(&self, r: &Scalar) -> Scalar { 73 | let mut eval = self.coeffs[0]; 74 | let mut power = *r; 75 | for i in 1..self.coeffs.len() { 76 | eval += power * self.coeffs[i]; 77 | power *= r; 78 | } 79 | eval 80 | } 81 | 82 | pub fn compress(&self) -> CompressedUniPoly { 83 | let coeffs_except_linear_term = [&self.coeffs[..1], &self.coeffs[2..]].concat(); 84 | assert_eq!(coeffs_except_linear_term.len() + 1, self.coeffs.len()); 85 | CompressedUniPoly { 86 | coeffs_except_linear_term, 87 | } 88 | } 89 | 90 | pub fn commit(&self, gens: &MultiCommitGens, blind: &Scalar) -> GroupElement { 91 | self.coeffs.commit(blind, gens) 92 | } 93 | } 94 | 95 | impl CompressedUniPoly { 96 | // we require eval(0) + eval(1) = hint, so we can solve for the linear term as: 97 | // linear_term = hint - 2 * constant_term - deg2 term - deg3 term 98 | pub fn decompress(&self, hint: &Scalar) -> UniPoly { 99 | let mut linear_term = 100 | hint - self.coeffs_except_linear_term[0] - self.coeffs_except_linear_term[0]; 101 | for i in 1..self.coeffs_except_linear_term.len() { 102 | linear_term -= self.coeffs_except_linear_term[i]; 103 | } 104 | 105 | let mut coeffs = vec![self.coeffs_except_linear_term[0], linear_term]; 106 | coeffs.extend(&self.coeffs_except_linear_term[1..]); 107 | assert_eq!(self.coeffs_except_linear_term.len() + 1, coeffs.len()); 108 | UniPoly { coeffs } 109 | } 110 | } 111 | 112 | impl AppendToTranscript for UniPoly { 113 | fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) { 114 | transcript.append_message(label, b"UniPoly_begin"); 115 | for i in 0..self.coeffs.len() { 116 | transcript.append_scalar(b"coeff", &self.coeffs[i]); 117 | } 118 | transcript.append_message(label, b"UniPoly_end"); 119 | } 120 | } 121 | 122 | #[cfg(test)] 123 | mod tests { 124 | 125 | use super::*; 126 | 127 | #[test] 128 | fn test_from_evals_quad() { 129 | // polynomial is 2x^2 + 3x + 1 130 | let e0 = Scalar::one(); 131 | let e1 = (6_usize).to_scalar(); 132 | let e2 = (15_usize).to_scalar(); 133 | let evals = vec![e0, e1, e2]; 134 | let poly = UniPoly::from_evals(&evals); 135 | 136 | assert_eq!(poly.eval_at_zero(), e0); 137 | assert_eq!(poly.eval_at_one(), e1); 138 | assert_eq!(poly.coeffs.len(), 3); 139 | assert_eq!(poly.coeffs[0], Scalar::one()); 140 | assert_eq!(poly.coeffs[1], (3_usize).to_scalar()); 141 | assert_eq!(poly.coeffs[2], (2_usize).to_scalar()); 142 | 143 | let hint = e0 + e1; 144 | let compressed_poly = poly.compress(); 145 | let decompressed_poly = compressed_poly.decompress(&hint); 146 | for i in 0..decompressed_poly.coeffs.len() { 147 | assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]); 148 | } 149 | 150 | let e3 = (28_usize).to_scalar(); 151 | assert_eq!(poly.evaluate(&(3_usize).to_scalar()), e3); 152 | } 153 | 154 | #[test] 155 | fn test_from_evals_cubic() { 156 | // polynomial is x^3 + 2x^2 + 3x + 1 157 | let e0 = Scalar::one(); 158 | let e1 = (7_usize).to_scalar(); 159 | let e2 = (23_usize).to_scalar(); 160 | let e3 = (55_usize).to_scalar(); 161 | let evals = vec![e0, e1, e2, e3]; 162 | let poly = UniPoly::from_evals(&evals); 163 | 164 | assert_eq!(poly.eval_at_zero(), e0); 165 | assert_eq!(poly.eval_at_one(), e1); 166 | assert_eq!(poly.coeffs.len(), 4); 167 | assert_eq!(poly.coeffs[0], Scalar::one()); 168 | assert_eq!(poly.coeffs[1], (3_usize).to_scalar()); 169 | assert_eq!(poly.coeffs[2], (2_usize).to_scalar()); 170 | assert_eq!(poly.coeffs[3], (1_usize).to_scalar()); 171 | 172 | let hint = e0 + e1; 173 | let compressed_poly = poly.compress(); 174 | let decompressed_poly = compressed_poly.decompress(&hint); 175 | for i in 0..decompressed_poly.coeffs.len() { 176 | assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]); 177 | } 178 | 179 | let e4 = (109_usize).to_scalar(); 180 | assert_eq!(poly.evaluate(&(4_usize).to_scalar()), e4); 181 | } 182 | } 183 | --------------------------------------------------------------------------------