├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── ci.yml │ └── linkify_changelog.yml ├── .gitignore ├── AUTHORS ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── bench-templates ├── Cargo.toml └── src │ └── lib.rs ├── poly-commit ├── Cargo.toml ├── README.md ├── benches │ ├── brakedown_ml_times.rs │ ├── hyrax_times.rs │ ├── ipa_times.rs │ ├── ligero_ml_times.rs │ └── size.rs └── src │ ├── constraints.rs │ ├── data_structures.rs │ ├── error.rs │ ├── hyrax │ ├── data_structures.rs │ ├── mod.rs │ ├── tests.rs │ └── utils.rs │ ├── ipa_pc │ ├── data_structures.rs │ └── mod.rs │ ├── kzg10 │ ├── data_structures.rs │ └── mod.rs │ ├── lib.rs │ ├── linear_codes │ ├── brakedown.rs │ ├── data_structures.rs │ ├── ligero.rs │ ├── mod.rs │ ├── multilinear_brakedown │ │ ├── mod.rs │ │ └── tests.rs │ ├── multilinear_ligero │ │ ├── mod.rs │ │ └── tests.rs │ ├── univariate_ligero │ │ ├── mod.rs │ │ └── tests.rs │ └── utils.rs │ ├── marlin │ ├── marlin_pc │ │ ├── data_structures.rs │ │ └── mod.rs │ ├── marlin_pst13_pc │ │ ├── combinations.rs │ │ ├── data_structures.rs │ │ └── mod.rs │ └── mod.rs │ ├── multilinear_pc │ ├── data_structures.rs │ └── mod.rs │ ├── optional_rng.rs │ ├── sonic_pc │ ├── data_structures.rs │ └── mod.rs │ ├── streaming_kzg │ ├── data_structures.rs │ ├── mod.rs │ ├── space.rs │ ├── tests.rs │ └── time.rs │ └── utils.rs └── scripts ├── install-hook.sh └── linkify_changelog.py /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @arkworks-rs/maintainers -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug Report 3 | about: Create a report to help us squash bugs! 4 | 5 | --- 6 | 7 | ∂ 12 | 13 | ## Summary of Bug 14 | 15 | 16 | 17 | ## Version 18 | 19 | 20 | 21 | ## Steps to Reproduce 22 | 23 | 24 | 25 | 26 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature Request 3 | about: Create a proposal to request a feature 4 | 5 | --- 6 | 7 | 13 | 14 | ## Summary 15 | 16 | 17 | 18 | ## Problem Definition 19 | 20 | 23 | 24 | ## Proposal 25 | 26 | 27 | 28 | ____ 29 | 30 | #### For Admin Use 31 | 32 | - [ ] Not duplicate issue 33 | - [ ] Appropriate labels applied 34 | - [ ] Appropriate contributors tagged 35 | - [ ] Contributor assigned/self-assigned 36 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 6 | 7 | ## Description 8 | 9 | 12 | 13 | closes: #XXXX 14 | 15 | --- 16 | 17 | Before we can merge this PR, please make sure that all the following items have been 18 | checked off. If any of the checklist items are not applicable, please leave them but 19 | write a little note why. 20 | 21 | - [ ] Targeted PR against correct branch (master) 22 | - [ ] Linked to Github issue with discussion and accepted design OR have an explanation in the PR that describes this work. 23 | - [ ] Wrote unit tests 24 | - [ ] Updated relevant documentation in the code 25 | - [ ] Added a relevant changelog entry to the `Pending` section in `CHANGELOG.md` 26 | - [ ] Re-reviewed `Files changed` in the Github PR explorer 27 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: 3 | merge_group: 4 | pull_request: 5 | push: 6 | branches: 7 | - master 8 | env: 9 | RUST_BACKTRACE: 1 10 | 11 | jobs: 12 | style: 13 | name: Check Style 14 | runs-on: ubuntu-latest 15 | steps: 16 | 17 | - name: Checkout 18 | uses: actions/checkout@v1 19 | - name: Install Rust 20 | uses: actions-rs/toolchain@v1 21 | with: 22 | profile: minimal 23 | toolchain: stable 24 | override: true 25 | components: rustfmt 26 | 27 | - name: cargo fmt --check 28 | uses: actions-rs/cargo@v1 29 | with: 30 | command: fmt 31 | args: --all -- --check 32 | 33 | test: 34 | name: Test 35 | runs-on: ubuntu-latest 36 | env: 37 | RUSTFLAGS: -Dwarnings 38 | strategy: 39 | matrix: 40 | rust: 41 | - stable 42 | - nightly 43 | steps: 44 | - name: Checkout 45 | uses: actions/checkout@v2 46 | 47 | - name: Install Rust (${{ matrix.rust }}) 48 | uses: actions-rs/toolchain@v1 49 | with: 50 | profile: minimal 51 | toolchain: ${{ matrix.rust }} 52 | override: true 53 | 54 | - name: Check examples 55 | uses: actions-rs/cargo@v1 56 | with: 57 | command: check 58 | args: --examples --all 59 | 60 | - name: Check examples with all features on stable 61 | uses: actions-rs/cargo@v1 62 | with: 63 | command: check 64 | args: --examples --all-features --all 65 | if: matrix.rust == 'stable' 66 | 67 | - name: Check benchmarks on nightly 68 | uses: actions-rs/cargo@v1 69 | with: 70 | command: check 71 | args: --all-features --examples --all --benches 72 | if: matrix.rust == 'nightly' 73 | 74 | - name: Test 75 | uses: actions-rs/cargo@v1 76 | with: 77 | command: test 78 | args: --release 79 | 80 | check_no_std: 81 | name: Check no_std 82 | runs-on: ubuntu-latest 83 | steps: 84 | - name: Checkout 85 | uses: actions/checkout@v2 86 | 87 | - name: Install Rust (${{ matrix.rust }}) 88 | uses: actions-rs/toolchain@v1 89 | with: 90 | toolchain: stable 91 | target: aarch64-unknown-none 92 | override: true 93 | 94 | - name: Build 95 | uses: actions-rs/cargo@v1 96 | with: 97 | use-cross: true 98 | command: build 99 | args: --workspace --no-default-features --target aarch64-unknown-none --exclude ark-pcs-bench-templates 100 | 101 | - name: Check 102 | uses: actions-rs/cargo@v1 103 | with: 104 | use-cross: true 105 | command: check 106 | args: --workspace --examples --no-default-features --target aarch64-unknown-none --exclude ark-pcs-bench-templates 107 | -------------------------------------------------------------------------------- /.github/workflows/linkify_changelog.yml: -------------------------------------------------------------------------------- 1 | name: Linkify Changelog 2 | 3 | on: 4 | workflow_dispatch 5 | 6 | jobs: 7 | linkify: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Checkout 11 | uses: actions/checkout@v2 12 | - name: Add links 13 | run: python3 scripts/linkify_changelog.py CHANGELOG.md 14 | - name: Commit 15 | run: | 16 | git config user.name github-actions 17 | git config user.email github-actions@github.com 18 | git add . 19 | git commit -m "Linkify Changelog" 20 | git push -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | .DS_Store 4 | .idea 5 | *.iml 6 | *.ipynb_checkpoints 7 | *.pyc 8 | *.sage.py 9 | params 10 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | Alessandro Chiesa 2 | Yuncong Hu 3 | William Lin 4 | Mary Maller 5 | Pratyush Mishra 6 | Noah Vesely 7 | Nicholas Ward 8 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # CHANGELOG 2 | 3 | ## Pending 4 | 5 | ### Breaking changes 6 | 7 | - [\#112](https://github.com/arkworks-rs/poly-commit/pull/112) Upgrade all dependencies to `0.4`. 8 | - [\#82](https://github.com/arkworks-rs/poly-commit/pull/82) Argument `opening_challenge: F` for `open`, 9 | `check`, has been changed from `F` to `opening_challenges: &mut ChallengeGenerator`. 10 | 11 | ### Features 12 | 13 | - [\#82](https://github.com/arkworks-rs/poly-commit/pull/82) Add multivariate opening challenge strategy. Integrate with sponge API. 14 | 15 | ### Improvements 16 | - [\#152](https://github.com/arkworks-rs/poly-commit/issues/152) Expose `kzg10::open_with_witness_polynomial` and `open` downstream. 17 | 18 | ### Bug fixes 19 | 20 | ## v0.3.0 21 | 22 | ### Breaking changes 23 | 24 | - [\#78](https://github.com/arkworks-rs/poly-commit/pull/78) Fix `MarlinPC`'s `CommitterKey` to return the correct `supported_degree`. 25 | 26 | ### Features 27 | 28 | ### Improvements 29 | 30 | ### Bug fixes 31 | 32 | ## v0.2.0 33 | 34 | - Initial release of `ark-poly-commit`. 35 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = ["poly-commit", "bench-templates"] 3 | resolver = "2" 4 | 5 | 6 | [workspace.package] 7 | version = "0.5.0" 8 | authors = ["arkworks contributors"] 9 | description = "A library for constructing polynomial commitment schemes for use in zkSNARKs" 10 | repository = "https://github.com/arkworks-rs/poly-commit" 11 | documentation = "https://docs.rs/ark-poly-commit/" 12 | keywords = ["cryptography", "commitments", "elliptic-curves", "pairing"] 13 | include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] 14 | categories = ["cryptography"] 15 | license = "MIT/Apache-2.0" 16 | edition = "2018" 17 | 18 | [workspace.dependencies] 19 | ark-serialize = { version = "0.5.0", default-features = false } 20 | ark-ff = { version = "0.5.0", default-features = false } 21 | ark-ec = { version = "0.5.0", default-features = false } 22 | ark-poly = { version = "0.5.0", default-features = false } 23 | ark-crypto-primitives = { version = "0.5.0", default-features = false } 24 | ark-std = { version = "0.5.0", default-features = false } 25 | ark-relations = { version = "0.5.0", default-features = false } 26 | ark-r1cs-std = { version = "0.5.0", default-features = false } 27 | rand_chacha = { version = "0.3.0", default-features = false } 28 | 29 | [profile.release] 30 | opt-level = 3 31 | lto = "thin" 32 | incremental = true 33 | debug = true 34 | 35 | [profile.test] 36 | opt-level = 3 37 | debug-assertions = true 38 | incremental = true 39 | debug = true 40 | 41 | # [patch.crates-io] 42 | # ark-std = { git = "https://github.com/arkworks-rs/std/" } 43 | # ark-ff = { git = "https://github.com/arkworks-rs/algebra/" } 44 | # ark-ec = { git = "https://github.com/arkworks-rs/algebra/" } 45 | # ark-serialize = { git = "https://github.com/arkworks-rs/algebra/" } 46 | # ark-poly = { git = "https://github.com/arkworks-rs/algebra/" } 47 | 48 | # ark-crypto-primitives = { git = "https://github.com/arkworks-rs/crypto-primitives/" } 49 | # ark-r1cs-std = { git = "https://github.com/arkworks-rs/r1cs-std/" } 50 | 51 | # ark-bls12-377 = { git = "https://github.com/arkworks-rs/algebra/" } 52 | # ark-bls12-381 = { git = "https://github.com/arkworks-rs/algebra/" } 53 | # ark-bn254 = { git = "https://github.com/arkworks-rs/algebra/" } 54 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /bench-templates/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ark-pcs-bench-templates" 3 | version.workspace = true 4 | authors.workspace = true 5 | repository.workspace = true 6 | categories.workspace = true 7 | include.workspace = true 8 | license.workspace = true 9 | edition.workspace = true 10 | publish = false 11 | 12 | [dependencies] 13 | ark-poly-commit = { path = "../poly-commit", default-features = false } 14 | ark-serialize = { workspace = true, features = [ "derive" ] } 15 | ark-ff.workspace = true 16 | ark-ec.workspace = true 17 | ark-poly.workspace = true 18 | ark-crypto-primitives = { workspace = true, features = ["sponge", "merkle_tree"] } 19 | ark-std.workspace = true 20 | rand_chacha.workspace = true 21 | 22 | criterion = { version = "0.5", default-features = false } 23 | paste = "1.0" 24 | -------------------------------------------------------------------------------- /bench-templates/src/lib.rs: -------------------------------------------------------------------------------- 1 | use ark_crypto_primitives::{ 2 | crh::{sha256::digest::Digest, CRHScheme}, 3 | sponge::{ 4 | poseidon::{PoseidonConfig, PoseidonSponge}, 5 | CryptographicSponge, 6 | }, 7 | }; 8 | use ark_ff::PrimeField; 9 | use ark_poly::Polynomial; 10 | use ark_serialize::{CanonicalSerialize, Compress}; 11 | use ark_std::{test_rng, UniformRand}; 12 | use rand_chacha::{ 13 | rand_core::{RngCore, SeedableRng}, 14 | ChaCha20Rng, 15 | }; 16 | 17 | use core::time::Duration; 18 | use std::{borrow::Borrow, marker::PhantomData, time::Instant}; 19 | 20 | use ark_poly_commit::{to_bytes, LabeledPolynomial, PolynomialCommitment}; 21 | 22 | pub use criterion::*; 23 | pub use paste::paste; 24 | 25 | /// Measure the time cost of `method` (i.e., commit/open/verify) of a 26 | /// multilinear PCS for all `num_vars` specified in `nv_list`. 27 | /// `rand_poly` is a function that outputs a random multilinear polynomial. 28 | /// `rand_point` is a function that outputs a random point in the domain of polynomial. 29 | pub fn bench_pcs_method, PCS: PolynomialCommitment>( 30 | c: &mut Criterion, 31 | nv_list: Vec, 32 | msg: &str, 33 | method: impl Fn( 34 | &PCS::CommitterKey, 35 | &PCS::VerifierKey, 36 | usize, 37 | fn(usize, &mut ChaCha20Rng) -> P, 38 | fn(usize, &mut ChaCha20Rng) -> P::Point, 39 | ) -> Duration, 40 | rand_poly: fn(usize, &mut ChaCha20Rng) -> P, 41 | rand_point: fn(usize, &mut ChaCha20Rng) -> P::Point, 42 | ) { 43 | let mut group = c.benchmark_group(msg); 44 | let rng = &mut ChaCha20Rng::from_rng(test_rng()).unwrap(); 45 | 46 | for num_vars in nv_list { 47 | let pp = PCS::setup(num_vars, Some(num_vars), rng).unwrap(); 48 | let (ck, vk) = PCS::trim(&pp, num_vars, num_vars, None).unwrap(); 49 | 50 | group.bench_with_input( 51 | BenchmarkId::from_parameter(num_vars), 52 | &num_vars, 53 | |b, num_vars| { 54 | b.iter_custom(|i| { 55 | let mut time = Duration::from_nanos(0); 56 | for _ in 0..i { 57 | time += method(&ck, &vk, *num_vars, rand_poly, rand_point); 58 | } 59 | time 60 | }); 61 | }, 62 | ); 63 | } 64 | 65 | group.finish(); 66 | } 67 | 68 | /// Report the time cost of a commitment 69 | pub fn commit, PCS: PolynomialCommitment>( 70 | ck: &PCS::CommitterKey, 71 | _vk: &PCS::VerifierKey, 72 | num_vars: usize, 73 | rand_poly: fn(usize, &mut ChaCha20Rng) -> P, 74 | _rand_point: fn(usize, &mut ChaCha20Rng) -> P::Point, 75 | ) -> Duration { 76 | let rng = &mut ChaCha20Rng::from_rng(test_rng()).unwrap(); 77 | 78 | let labeled_poly = 79 | LabeledPolynomial::new("test".to_string(), rand_poly(num_vars, rng), None, None); 80 | 81 | let start = Instant::now(); 82 | let (_, _) = PCS::commit(&ck, [&labeled_poly], Some(rng)).unwrap(); 83 | start.elapsed() 84 | } 85 | 86 | /// Report the size of a commitment 87 | pub fn commitment_size, PCS: PolynomialCommitment>( 88 | num_vars: usize, 89 | rand_poly: fn(usize, &mut ChaCha20Rng) -> P, 90 | ) -> usize { 91 | let rng = &mut ChaCha20Rng::from_rng(test_rng()).unwrap(); 92 | 93 | let pp = PCS::setup(num_vars, Some(num_vars), rng).unwrap(); 94 | 95 | let (ck, _) = PCS::trim(&pp, num_vars, num_vars, None).unwrap(); 96 | 97 | let labeled_poly = 98 | LabeledPolynomial::new("test".to_string(), rand_poly(num_vars, rng), None, None); 99 | 100 | let (coms, _) = PCS::commit(&ck, [&labeled_poly], Some(rng)).unwrap(); 101 | 102 | coms[0].commitment().serialized_size(Compress::No) 103 | } 104 | 105 | /// Report the time cost of an opening 106 | pub fn open( 107 | ck: &PCS::CommitterKey, 108 | _vk: &PCS::VerifierKey, 109 | num_vars: usize, 110 | rand_poly: fn(usize, &mut ChaCha20Rng) -> P, 111 | rand_point: fn(usize, &mut ChaCha20Rng) -> P::Point, 112 | ) -> Duration 113 | where 114 | F: PrimeField, 115 | P: Polynomial, 116 | PCS: PolynomialCommitment, 117 | { 118 | let rng = &mut ChaCha20Rng::from_rng(test_rng()).unwrap(); 119 | 120 | let labeled_poly = 121 | LabeledPolynomial::new("test".to_string(), rand_poly(num_vars, rng), None, None); 122 | 123 | let (coms, states) = PCS::commit(&ck, [&labeled_poly], Some(rng)).unwrap(); 124 | let point = rand_point(num_vars, rng); 125 | 126 | let start = Instant::now(); 127 | let _ = PCS::open( 128 | &ck, 129 | [&labeled_poly], 130 | &coms, 131 | &point, 132 | &mut test_sponge::(), 133 | &states, 134 | Some(rng), 135 | ) 136 | .unwrap(); 137 | start.elapsed() 138 | } 139 | 140 | /// Report the size of a proof 141 | pub fn proof_size(num_vars: usize, rand_poly: fn(usize, &mut ChaCha20Rng) -> P) -> usize 142 | where 143 | F: PrimeField, 144 | P: Polynomial, 145 | PCS: PolynomialCommitment, 146 | P::Point: UniformRand, 147 | { 148 | let rng = &mut ChaCha20Rng::from_rng(test_rng()).unwrap(); 149 | 150 | let pp = PCS::setup(num_vars, Some(num_vars), rng).unwrap(); 151 | 152 | let (ck, _) = PCS::trim(&pp, num_vars, num_vars, None).unwrap(); 153 | let labeled_poly = 154 | LabeledPolynomial::new("test".to_string(), rand_poly(num_vars, rng), None, None); 155 | 156 | let (coms, states) = PCS::commit(&ck, [&labeled_poly], Some(rng)).unwrap(); 157 | let point = P::Point::rand(rng); 158 | 159 | let proofs = PCS::open( 160 | &ck, 161 | [&labeled_poly], 162 | &coms, 163 | &point, 164 | &mut test_sponge::(), 165 | &states, 166 | Some(rng), 167 | ) 168 | .unwrap(); 169 | 170 | let bproof: PCS::BatchProof = vec![proofs].into(); 171 | 172 | bproof.serialized_size(Compress::No) 173 | } 174 | 175 | /// Report the time cost of a verification 176 | pub fn verify( 177 | ck: &PCS::CommitterKey, 178 | vk: &PCS::VerifierKey, 179 | num_vars: usize, 180 | rand_poly: fn(usize, &mut ChaCha20Rng) -> P, 181 | rand_point: fn(usize, &mut ChaCha20Rng) -> P::Point, 182 | ) -> Duration 183 | where 184 | F: PrimeField, 185 | P: Polynomial, 186 | PCS: PolynomialCommitment, 187 | { 188 | let rng = &mut ChaCha20Rng::from_rng(test_rng()).unwrap(); 189 | 190 | let labeled_poly = 191 | LabeledPolynomial::new("test".to_string(), rand_poly(num_vars, rng), None, None); 192 | 193 | let (coms, states) = PCS::commit(&ck, [&labeled_poly], Some(rng)).unwrap(); 194 | let point = rand_point(num_vars, rng); 195 | let claimed_eval = labeled_poly.evaluate(&point); 196 | let proof = PCS::open( 197 | &ck, 198 | [&labeled_poly], 199 | &coms, 200 | &point, 201 | &mut test_sponge::(), 202 | &states, 203 | Some(rng), 204 | ) 205 | .unwrap(); 206 | 207 | let start = Instant::now(); 208 | PCS::check( 209 | &vk, 210 | &coms, 211 | &point, 212 | [claimed_eval], 213 | &proof, 214 | &mut test_sponge::(), 215 | None, 216 | ) 217 | .unwrap(); 218 | start.elapsed() 219 | } 220 | 221 | /*************** Auxiliary functions ***************/ 222 | 223 | fn test_sponge() -> PoseidonSponge { 224 | let full_rounds = 8; 225 | let partial_rounds = 31; 226 | let alpha = 17; 227 | 228 | let mds = vec![ 229 | vec![F::one(), F::zero(), F::one()], 230 | vec![F::one(), F::one(), F::zero()], 231 | vec![F::zero(), F::one(), F::one()], 232 | ]; 233 | 234 | let mut v = Vec::new(); 235 | let mut ark_rng = test_rng(); 236 | 237 | for _ in 0..(full_rounds + partial_rounds) { 238 | let mut res = Vec::new(); 239 | 240 | for _ in 0..3 { 241 | res.push(F::rand(&mut ark_rng)); 242 | } 243 | v.push(res); 244 | } 245 | let config = PoseidonConfig::new(full_rounds, partial_rounds, alpha, mds, v, 2, 1); 246 | PoseidonSponge::new(&config) 247 | } 248 | 249 | #[macro_export] 250 | macro_rules! bench_method { 251 | ($c:expr, $method:ident, $scheme_type:ty, $rand_poly:ident, $rand_point:ident) => { 252 | let scheme_type_str = stringify!($scheme_type); 253 | let bench_name = format!("{} {}", stringify!($method), scheme_type_str); 254 | bench_pcs_method::<_, _, $scheme_type>( 255 | $c, 256 | (MIN_NUM_VARS..MAX_NUM_VARS).step_by(2).collect(), 257 | &bench_name, 258 | $method::<_, _, $scheme_type>, 259 | $rand_poly::<_>, 260 | $rand_point::<_>, 261 | ); 262 | }; 263 | } 264 | 265 | #[macro_export] 266 | macro_rules! bench { 267 | ( 268 | $scheme_type:ty, $rand_poly:ident, $rand_point:ident 269 | ) => { 270 | fn bench_pcs(c: &mut Criterion) { 271 | bench_method!(c, commit, $scheme_type, $rand_poly, $rand_point); 272 | bench_method!(c, open, $scheme_type, $rand_poly, $rand_point); 273 | bench_method!(c, verify, $scheme_type, $rand_poly, $rand_point); 274 | } 275 | 276 | criterion_group!(benches, bench_pcs); 277 | 278 | paste! { 279 | criterion_main!( 280 | benches 281 | ); 282 | } 283 | }; 284 | } 285 | 286 | /**** Auxiliary methods for linear-code-based PCSs ****/ 287 | 288 | /// Needed for benches and tests. 289 | pub struct LeafIdentityHasher; 290 | 291 | impl CRHScheme for LeafIdentityHasher { 292 | type Input = Vec; 293 | type Output = Vec; 294 | type Parameters = (); 295 | 296 | fn setup(_: &mut R) -> Result { 297 | Ok(()) 298 | } 299 | 300 | fn evaluate>( 301 | _: &Self::Parameters, 302 | input: T, 303 | ) -> Result { 304 | Ok(input.borrow().to_vec().into()) 305 | } 306 | } 307 | 308 | /// Needed for benches and tests. 309 | pub struct FieldToBytesColHasher 310 | where 311 | F: PrimeField + CanonicalSerialize, 312 | D: Digest, 313 | { 314 | _phantom: PhantomData<(F, D)>, 315 | } 316 | 317 | impl CRHScheme for FieldToBytesColHasher 318 | where 319 | F: PrimeField + CanonicalSerialize, 320 | D: Digest, 321 | { 322 | type Input = Vec; 323 | type Output = Vec; 324 | type Parameters = (); 325 | 326 | fn setup(_rng: &mut R) -> Result { 327 | Ok(()) 328 | } 329 | 330 | fn evaluate>( 331 | _parameters: &Self::Parameters, 332 | input: T, 333 | ) -> Result { 334 | let mut dig = D::new(); 335 | dig.update(to_bytes!(input.borrow()).unwrap()); 336 | Ok(dig.finalize().to_vec()) 337 | } 338 | } 339 | -------------------------------------------------------------------------------- /poly-commit/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ark-poly-commit" 3 | version.workspace = true 4 | authors.workspace = true 5 | repository.workspace = true 6 | categories.workspace = true 7 | include.workspace = true 8 | license.workspace = true 9 | edition.workspace = true 10 | description.workspace = true 11 | 12 | [dependencies] 13 | ark-serialize = { workspace = true, features = [ "derive" ] } 14 | ark-ff.workspace = true 15 | ark-ec.workspace = true 16 | ark-poly.workspace = true 17 | ark-crypto-primitives = { workspace = true, features = ["sponge", "merkle_tree"] } 18 | ark-std.workspace = true 19 | ark-relations = { workspace = true, optional = true } 20 | ark-r1cs-std = { workspace = true, optional = true } 21 | 22 | blake2 = { version = "0.10", default-features = false } 23 | derivative = { version = "2", features = [ "use_core" ] } 24 | digest = "0.10" 25 | hashbrown = { version = "0.15", default-features = false, features = ["inline-more", "allocator-api2"], optional = true } 26 | rand = { version = "0.8.0", optional = true } 27 | rayon = { version = "1", optional = true } 28 | merlin = { version = "3.0.0", default-features = false } 29 | 30 | [[bench]] 31 | name = "ipa_times" 32 | path = "benches/ipa_times.rs" 33 | harness = false 34 | 35 | [[bench]] 36 | name = "brakedown_times" 37 | path = "benches/brakedown_ml_times.rs" 38 | harness = false 39 | 40 | [[bench]] 41 | name = "ligero_ml_times" 42 | path = "benches/ligero_ml_times.rs" 43 | harness = false 44 | 45 | [[bench]] 46 | name = "hyrax_times" 47 | path = "benches/hyrax_times.rs" 48 | harness = false 49 | 50 | [[bench]] 51 | name = "size" 52 | path = "benches/size.rs" 53 | harness = false 54 | 55 | [target.'cfg(all(target_has_atomic = "8", target_has_atomic = "16", target_has_atomic = "32", target_has_atomic = "64", target_has_atomic = "ptr"))'.dependencies] 56 | ahash = { version = "0.8", default-features = false} 57 | 58 | [target.'cfg(not(all(target_has_atomic = "8", target_has_atomic = "16", target_has_atomic = "32", target_has_atomic = "64", target_has_atomic = "ptr")))'.dependencies] 59 | fnv = { version = "1.0", default-features = false } 60 | 61 | [dev-dependencies] 62 | ark-ed-on-bls12-381 = { version = "0.5.0", default-features = false } 63 | ark-bls12-381 = { version = "0.5.0", default-features = false, features = [ "curve" ] } 64 | ark-bls12-377 = { version = "0.5.0", default-features = false, features = [ "curve" ] } 65 | ark-bn254 = { version = "0.5.0", default-features = false, features = [ "curve" ] } 66 | rand_chacha = { version = "0.3.0", default-features = false } 67 | ark-pcs-bench-templates = { path = "../bench-templates" } 68 | 69 | [target.'cfg(target_arch = "aarch64")'.dependencies] 70 | num-traits = { version = "0.2", default-features = false, features = ["libm"] } 71 | 72 | [features] 73 | default = [ "std", "parallel" ] 74 | std = [ "ark-ff/std", "ark-ec/std", "ark-poly/std", "ark-std/std", "ark-relations/std", "ark-serialize/std", "ark-crypto-primitives/std"] 75 | r1cs = [ "ark-relations", "ark-r1cs-std", "hashbrown", "ark-crypto-primitives/r1cs"] 76 | print-trace = [ "ark-std/print-trace" ] 77 | parallel = [ "std", "ark-ff/parallel", "ark-ec/parallel", "ark-poly/parallel", "ark-std/parallel", "rayon", "rand" ] 78 | -------------------------------------------------------------------------------- /poly-commit/README.md: -------------------------------------------------------------------------------- 1 | ../README.md -------------------------------------------------------------------------------- /poly-commit/benches/brakedown_ml_times.rs: -------------------------------------------------------------------------------- 1 | use ark_crypto_primitives::{ 2 | crh::{sha256::Sha256, CRHScheme, TwoToOneCRHScheme}, 3 | merkle_tree::{ByteDigestConverter, Config}, 4 | }; 5 | use ark_pcs_bench_templates::*; 6 | use ark_poly::{DenseMultilinearExtension, MultilinearExtension}; 7 | 8 | use ark_bn254::Fr; 9 | use ark_ff::PrimeField; 10 | 11 | use ark_poly_commit::linear_codes::{LinearCodePCS, MultilinearBrakedown}; 12 | use blake2::Blake2s256; 13 | use rand_chacha::ChaCha20Rng; 14 | 15 | // Brakedown PCS over BN254 16 | struct MerkleTreeParams; 17 | type LeafH = LeafIdentityHasher; 18 | type CompressH = Sha256; 19 | impl Config for MerkleTreeParams { 20 | type Leaf = Vec; 21 | 22 | type LeafDigest = ::Output; 23 | type LeafInnerDigestConverter = ByteDigestConverter; 24 | type InnerDigest = ::Output; 25 | 26 | type LeafHash = LeafH; 27 | type TwoToOneHash = CompressH; 28 | } 29 | 30 | pub type MLE = DenseMultilinearExtension; 31 | type MTConfig = MerkleTreeParams; 32 | type ColHasher = FieldToBytesColHasher; 33 | type Brakedown = LinearCodePCS< 34 | MultilinearBrakedown, ColHasher>, 35 | F, 36 | MLE, 37 | MTConfig, 38 | ColHasher, 39 | >; 40 | 41 | fn rand_poly_brakedown_ml( 42 | num_vars: usize, 43 | rng: &mut ChaCha20Rng, 44 | ) -> DenseMultilinearExtension { 45 | DenseMultilinearExtension::rand(num_vars, rng) 46 | } 47 | 48 | fn rand_point_brakedown_ml(num_vars: usize, rng: &mut ChaCha20Rng) -> Vec { 49 | (0..num_vars).map(|_| F::rand(rng)).collect() 50 | } 51 | 52 | const MIN_NUM_VARS: usize = 12; 53 | const MAX_NUM_VARS: usize = 22; 54 | 55 | bench!( 56 | Brakedown, 57 | rand_poly_brakedown_ml, 58 | rand_point_brakedown_ml 59 | ); 60 | -------------------------------------------------------------------------------- /poly-commit/benches/hyrax_times.rs: -------------------------------------------------------------------------------- 1 | use ark_pcs_bench_templates::*; 2 | use ark_poly::{DenseMultilinearExtension, MultilinearExtension}; 3 | 4 | use ark_bn254::{Fr, G1Affine}; 5 | use ark_ff::PrimeField; 6 | use ark_poly_commit::hyrax::HyraxPC; 7 | 8 | use rand_chacha::ChaCha20Rng; 9 | 10 | // Hyrax PCS over BN254 11 | type Hyrax254 = HyraxPC>; 12 | 13 | fn rand_poly_hyrax( 14 | num_vars: usize, 15 | rng: &mut ChaCha20Rng, 16 | ) -> DenseMultilinearExtension { 17 | DenseMultilinearExtension::rand(num_vars, rng) 18 | } 19 | 20 | fn rand_point_hyrax(num_vars: usize, rng: &mut ChaCha20Rng) -> Vec { 21 | (0..num_vars).map(|_| F::rand(rng)).collect() 22 | } 23 | 24 | const MIN_NUM_VARS: usize = 12; 25 | const MAX_NUM_VARS: usize = 22; 26 | 27 | bench!(Hyrax254, rand_poly_hyrax, rand_point_hyrax); 28 | -------------------------------------------------------------------------------- /poly-commit/benches/ipa_times.rs: -------------------------------------------------------------------------------- 1 | use ark_pcs_bench_templates::*; 2 | use ark_poly::DenseUVPolynomial; 3 | use blake2::Blake2s256; 4 | 5 | use ark_ed_on_bls12_381::{EdwardsAffine, Fr}; 6 | use ark_ff::PrimeField; 7 | use ark_poly::univariate::DensePolynomial as DenseUnivariatePoly; 8 | use ark_poly_commit::ipa_pc::InnerProductArgPC; 9 | 10 | use rand_chacha::ChaCha20Rng; 11 | 12 | type UniPoly = DenseUnivariatePoly; 13 | 14 | // IPA_PC over the JubJub curve with Blake2s as the hash function 15 | #[allow(non_camel_case_types)] 16 | type IPA_JubJub = InnerProductArgPC; 17 | 18 | fn rand_poly_ipa_pc(degree: usize, rng: &mut ChaCha20Rng) -> DenseUnivariatePoly { 19 | DenseUnivariatePoly::rand(degree, rng) 20 | } 21 | 22 | fn rand_point_ipa_pc(_: usize, rng: &mut ChaCha20Rng) -> F { 23 | F::rand(rng) 24 | } 25 | 26 | const MIN_NUM_VARS: usize = 10; 27 | const MAX_NUM_VARS: usize = 20; 28 | 29 | bench!(IPA_JubJub, rand_poly_ipa_pc, rand_point_ipa_pc); 30 | -------------------------------------------------------------------------------- /poly-commit/benches/ligero_ml_times.rs: -------------------------------------------------------------------------------- 1 | use ark_crypto_primitives::{ 2 | crh::{sha256::Sha256, CRHScheme, TwoToOneCRHScheme}, 3 | merkle_tree::{ByteDigestConverter, Config}, 4 | }; 5 | use ark_pcs_bench_templates::*; 6 | use ark_poly::{DenseMultilinearExtension, MultilinearExtension}; 7 | 8 | use ark_bn254::Fr; 9 | use ark_ff::PrimeField; 10 | 11 | use ark_poly_commit::linear_codes::{LinearCodePCS, MultilinearLigero}; 12 | use blake2::Blake2s256; 13 | use rand_chacha::ChaCha20Rng; 14 | 15 | // Ligero PCS over BN254 16 | struct MerkleTreeParams; 17 | type LeafH = LeafIdentityHasher; 18 | type CompressH = Sha256; 19 | impl Config for MerkleTreeParams { 20 | type Leaf = Vec; 21 | 22 | type LeafDigest = ::Output; 23 | type LeafInnerDigestConverter = ByteDigestConverter; 24 | type InnerDigest = ::Output; 25 | 26 | type LeafHash = LeafH; 27 | type TwoToOneHash = CompressH; 28 | } 29 | 30 | pub type MLE = DenseMultilinearExtension; 31 | type MTConfig = MerkleTreeParams; 32 | type ColHasher = FieldToBytesColHasher; 33 | type Ligero = LinearCodePCS< 34 | MultilinearLigero, ColHasher>, 35 | F, 36 | MLE, 37 | MTConfig, 38 | ColHasher, 39 | >; 40 | 41 | fn rand_poly_ligero_ml( 42 | num_vars: usize, 43 | rng: &mut ChaCha20Rng, 44 | ) -> DenseMultilinearExtension { 45 | DenseMultilinearExtension::rand(num_vars, rng) 46 | } 47 | 48 | fn rand_point_ligero_ml(num_vars: usize, rng: &mut ChaCha20Rng) -> Vec { 49 | (0..num_vars).map(|_| F::rand(rng)).collect() 50 | } 51 | 52 | const MIN_NUM_VARS: usize = 12; 53 | const MAX_NUM_VARS: usize = 22; 54 | 55 | bench!(Ligero, rand_poly_ligero_ml, rand_point_ligero_ml); 56 | -------------------------------------------------------------------------------- /poly-commit/benches/size.rs: -------------------------------------------------------------------------------- 1 | use ark_pcs_bench_templates::*; 2 | use ark_poly::DenseUVPolynomial; 3 | use blake2::Blake2s256; 4 | 5 | use ark_ed_on_bls12_381::{EdwardsAffine, Fr}; 6 | use ark_ff::PrimeField; 7 | use ark_poly::univariate::DensePolynomial as DenseUnivariatePoly; 8 | use ark_poly_commit::ipa_pc::InnerProductArgPC; 9 | 10 | use rand_chacha::ChaCha20Rng; 11 | 12 | type UniPoly = DenseUnivariatePoly; 13 | type PC = InnerProductArgPC; 14 | 15 | // IPA_PC over the JubJub curve with Blake2s as the hash function 16 | #[allow(non_camel_case_types)] 17 | type IPA_JubJub = PC; 18 | 19 | fn rand_poly_ipa_pc(degree: usize, rng: &mut ChaCha20Rng) -> DenseUnivariatePoly { 20 | DenseUnivariatePoly::rand(degree, rng) 21 | } 22 | 23 | const MIN_NUM_VARS: usize = 10; 24 | const MAX_NUM_VARS: usize = 20; 25 | 26 | fn main() { 27 | println!("\nIPA on JubJub: Commitment size"); 28 | for num_vars in (MIN_NUM_VARS..MAX_NUM_VARS).step_by(2) { 29 | println!( 30 | "\tnum_vars: {}, size: {} B", 31 | num_vars, 32 | commitment_size::<_, _, IPA_JubJub>(num_vars, rand_poly_ipa_pc) 33 | ); 34 | } 35 | 36 | println!("\nIPA on JubJub: Proof size"); 37 | for num_vars in (MIN_NUM_VARS..MAX_NUM_VARS).step_by(2) { 38 | println!( 39 | "\tnum_vars: {}, size: {} B", 40 | num_vars, 41 | proof_size::<_, _, IPA_JubJub>(num_vars, rand_poly_ipa_pc) 42 | ); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /poly-commit/src/constraints.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | data_structures::LabeledCommitment, BatchLCProof, LCTerm, LinearCombination, 3 | PCPreparedCommitment, PCPreparedVerifierKey, PolynomialCommitment, 4 | }; 5 | use ark_ff::PrimeField; 6 | use ark_poly::Polynomial; 7 | use ark_r1cs_std::{ 8 | fields::{emulated_fp::EmulatedFpVar, fp::FpVar}, 9 | prelude::*, 10 | }; 11 | use ark_relations::r1cs::{ConstraintSystemRef, Namespace, Result as R1CSResult, SynthesisError}; 12 | use ark_std::{ 13 | borrow::Borrow, 14 | cmp::{Eq, PartialEq}, 15 | hash::{BuildHasherDefault, Hash}, 16 | }; 17 | #[cfg(not(feature = "std"))] 18 | use ark_std::{string::String, vec::Vec}; 19 | use hashbrown::{HashMap, HashSet}; 20 | 21 | #[cfg(all( 22 | target_has_atomic = "8", 23 | target_has_atomic = "16", 24 | target_has_atomic = "32", 25 | target_has_atomic = "64", 26 | target_has_atomic = "ptr" 27 | ))] 28 | type DefaultHasher = ahash::AHasher; 29 | 30 | #[cfg(not(all( 31 | target_has_atomic = "8", 32 | target_has_atomic = "16", 33 | target_has_atomic = "32", 34 | target_has_atomic = "64", 35 | target_has_atomic = "ptr" 36 | )))] 37 | type DefaultHasher = fnv::FnvHasher; 38 | 39 | /// Define the minimal interface of prepared allocated structures. 40 | pub trait PrepareGadget: Sized { 41 | /// Prepare from an unprepared element. 42 | fn prepare(unprepared: &Unprepared) -> R1CSResult; 43 | } 44 | 45 | /// A coefficient of `LinearCombination`. 46 | #[derive(Clone)] 47 | pub enum LinearCombinationCoeffVar { 48 | /// Coefficient 1. 49 | One, 50 | /// Coefficient -1. 51 | MinusOne, 52 | /// Other coefficient, represented as a "emulated" field element. 53 | Var(EmulatedFpVar), 54 | } 55 | 56 | /// An allocated version of `LinearCombination`. 57 | #[derive(Clone)] 58 | pub struct LinearCombinationVar { 59 | /// The label. 60 | pub label: String, 61 | /// The linear combination of `(coeff, poly_label)` pairs. 62 | pub terms: Vec<(LinearCombinationCoeffVar, LCTerm)>, 63 | } 64 | 65 | impl 66 | AllocVar, BaseField> 67 | for LinearCombinationVar 68 | { 69 | fn new_variable( 70 | cs: impl Into>, 71 | val: impl FnOnce() -> Result, 72 | mode: AllocationMode, 73 | ) -> R1CSResult 74 | where 75 | T: Borrow>, 76 | { 77 | let LinearCombination { label, terms } = val()?.borrow().clone(); 78 | 79 | let ns = cs.into(); 80 | let cs = ns.cs(); 81 | 82 | let new_terms: Vec<(LinearCombinationCoeffVar, LCTerm)> = terms 83 | .iter() 84 | .map(|term| { 85 | let (f, lc_term) = term; 86 | 87 | let fg = 88 | EmulatedFpVar::new_variable(ark_relations::ns!(cs, "term"), || Ok(f), mode) 89 | .unwrap(); 90 | 91 | (LinearCombinationCoeffVar::Var(fg), lc_term.clone()) 92 | }) 93 | .collect(); 94 | 95 | Ok(Self { 96 | label, 97 | terms: new_terms, 98 | }) 99 | } 100 | } 101 | 102 | #[derive(Clone, Debug)] 103 | /// A collection of random data used in the polynomial commitment checking. 104 | pub struct PCCheckRandomDataVar { 105 | /// Opening challenges. 106 | /// The prover and the verifier MUST use the same opening challenges. 107 | pub opening_challenges: Vec>, 108 | /// Bit representations of the opening challenges. 109 | pub opening_challenges_bits: Vec>>, 110 | /// Batching random numbers. 111 | /// The verifier can choose these numbers freely, as long as they are random. 112 | pub batching_rands: Vec>, 113 | /// Bit representations of the batching random numbers. 114 | pub batching_rands_bits: Vec>>, 115 | } 116 | 117 | /// Describes the interface for a gadget for a `PolynomialCommitment` 118 | /// verifier. 119 | pub trait PCCheckVar< 120 | PCF: PrimeField, 121 | P: Polynomial, 122 | PC: PolynomialCommitment, 123 | ConstraintF: PrimeField, 124 | >: Clone 125 | { 126 | /// The prepared verifier key for the scheme; used to check an evaluation proof. 127 | type PreparedVerifierKey: PCPreparedVerifierKey + Clone; 128 | /// The prepared commitment to a polynomial. 129 | type PreparedCommitment: PCPreparedCommitment; 130 | /// An allocated version of `PC::VerifierKey`. 131 | type VerifierKeyVar: AllocVar + Clone; 132 | /// An allocated version of `PC::PreparedVerifierKey`. 133 | type PreparedVerifierKeyVar: AllocVar 134 | + Clone 135 | + PrepareGadget; 136 | /// An allocated version of `PC::Commitment`. 137 | type CommitmentVar: AllocVar + Clone; 138 | /// An allocated version of `PC::PreparedCommitment`. 139 | type PreparedCommitmentVar: AllocVar 140 | + PrepareGadget 141 | + Clone; 142 | /// An allocated version of `LabeledCommitment`. 143 | type LabeledCommitmentVar: AllocVar, ConstraintF> + Clone; 144 | /// A prepared, allocated version of `LabeledCommitment`. 145 | type PreparedLabeledCommitmentVar: Clone; 146 | /// An allocated version of `PC::Proof`. 147 | type ProofVar: AllocVar + Clone; 148 | 149 | /// An allocated version of `PC::BatchLCProof`. 150 | type BatchLCProofVar: AllocVar, ConstraintF> + Clone; 151 | 152 | /// Add to `ConstraintSystemRef` new constraints that check that `proof_i` is a valid evaluation 153 | /// proof at `point_i` for the polynomial in `commitment_i`. 154 | fn batch_check_evaluations( 155 | cs: ConstraintSystemRef, 156 | verification_key: &Self::VerifierKeyVar, 157 | commitments: &[Self::LabeledCommitmentVar], 158 | query_set: &QuerySetVar, 159 | evaluations: &EvaluationsVar, 160 | proofs: &[Self::ProofVar], 161 | rand_data: &PCCheckRandomDataVar, 162 | ) -> R1CSResult>; 163 | 164 | /// Add to `ConstraintSystemRef` new constraints that conditionally check that `proof` is a valid evaluation 165 | /// proof at the points in `query_set` for the combinations `linear_combinations`. 166 | fn prepared_check_combinations( 167 | cs: ConstraintSystemRef, 168 | prepared_verification_key: &Self::PreparedVerifierKeyVar, 169 | linear_combinations: &[LinearCombinationVar], 170 | prepared_commitments: &[Self::PreparedLabeledCommitmentVar], 171 | query_set: &QuerySetVar, 172 | evaluations: &EvaluationsVar, 173 | proof: &Self::BatchLCProofVar, 174 | rand_data: &PCCheckRandomDataVar, 175 | ) -> R1CSResult>; 176 | 177 | /// Create the labeled commitment gadget from the commitment gadget 178 | fn create_labeled_commitment( 179 | label: String, 180 | commitment: Self::CommitmentVar, 181 | degree_bound: Option>, 182 | ) -> Self::LabeledCommitmentVar; 183 | 184 | /// Create the prepared labeled commitment gadget from the commitment gadget 185 | fn create_prepared_labeled_commitment( 186 | label: String, 187 | commitment: Self::PreparedCommitmentVar, 188 | degree_bound: Option>, 189 | ) -> Self::PreparedLabeledCommitmentVar; 190 | } 191 | 192 | #[derive(Clone, Hash, PartialEq, Eq)] 193 | /// A labeled point variable, for queries to a polynomial commitment. 194 | pub struct LabeledPointVar { 195 | /// The label of the point. 196 | /// MUST be a unique identifier in a query set. 197 | pub name: String, 198 | /// The point value. 199 | pub value: EmulatedFpVar, 200 | } 201 | 202 | /// An allocated version of `QuerySet`. 203 | #[derive(Clone)] 204 | pub struct QuerySetVar( 205 | pub HashSet< 206 | (String, LabeledPointVar), 207 | BuildHasherDefault, 208 | >, 209 | ); 210 | 211 | /// An allocated version of `Evaluations`. 212 | #[derive(Clone)] 213 | pub struct EvaluationsVar( 214 | pub HashMap< 215 | LabeledPointVar, 216 | EmulatedFpVar, 217 | BuildHasherDefault, 218 | >, 219 | ); 220 | 221 | impl EvaluationsVar { 222 | /// find the evaluation result 223 | pub fn get_lc_eval( 224 | &self, 225 | lc_string: &str, 226 | point: &EmulatedFpVar, 227 | ) -> Result, SynthesisError> { 228 | let key = LabeledPointVar:: { 229 | name: String::from(lc_string), 230 | value: point.clone(), 231 | }; 232 | Ok(self.0.get(&key).map(|v| (*v).clone()).unwrap()) 233 | } 234 | } 235 | -------------------------------------------------------------------------------- /poly-commit/src/error.rs: -------------------------------------------------------------------------------- 1 | #[cfg(not(feature = "std"))] 2 | use ark_std::string::String; 3 | 4 | /// The error type for `PolynomialCommitment`. 5 | #[derive(Debug)] 6 | pub enum Error { 7 | /// The query set contains a label for a polynomial that was not provided as 8 | /// input to the `PC::open`. 9 | MissingPolynomial { 10 | /// The label of the missing polynomial. 11 | label: String, 12 | }, 13 | 14 | /// `Evaluations` does not contain an evaluation for the polynomial labelled 15 | /// `label` at a particular query. 16 | MissingEvaluation { 17 | /// The label of the missing polynomial. 18 | label: String, 19 | }, 20 | 21 | /// The LHS of the equation is empty. 22 | MissingLHS { 23 | /// The label of the equation. 24 | label: String, 25 | }, 26 | 27 | /// The provided polynomial was meant to be hiding, but `rng` was `None`. 28 | MissingRng, 29 | 30 | /// The degree provided in setup was too small; degree 0 polynomials 31 | /// are not supported. 32 | DegreeIsZero, 33 | 34 | /// The degree of the polynomial passed to `commit` or `open` 35 | /// was too large. 36 | TooManyCoefficients { 37 | /// The number of coefficients in the polynomial. 38 | num_coefficients: usize, 39 | /// The maximum number of powers provided in `Powers`. 40 | num_powers: usize, 41 | }, 42 | 43 | /// The hiding bound was not `None`, but the hiding bound was zero. 44 | HidingBoundIsZero, 45 | 46 | /// The hiding bound was too large for the given `Powers`. 47 | HidingBoundToolarge { 48 | /// The hiding bound 49 | hiding_poly_degree: usize, 50 | /// The number of powers. 51 | num_powers: usize, 52 | }, 53 | 54 | /// The degree provided to `trim` was too large. 55 | TrimmingDegreeTooLarge, 56 | 57 | /// The provided `enforced_degree_bounds` was `Some<&[]>`. 58 | EmptyDegreeBounds, 59 | 60 | /// The provided equation contained multiple polynomials, of which least one 61 | /// had a strict degree bound. 62 | EquationHasDegreeBounds(String), 63 | 64 | /// The required degree bound is not supported by ck/vk 65 | UnsupportedDegreeBound(usize), 66 | 67 | /// The degree bound for the `index`-th polynomial passed to `commit`, `open` 68 | /// or `check` was incorrect, that is, `degree_bound >= poly_degree` or 69 | /// `degree_bound <= max_degree`. 70 | IncorrectDegreeBound { 71 | /// Degree of the polynomial. 72 | poly_degree: usize, 73 | /// Degree bound. 74 | degree_bound: usize, 75 | /// Maximum supported degree. 76 | supported_degree: usize, 77 | /// Index of the offending polynomial. 78 | label: String, 79 | }, 80 | 81 | /// The inputs to `commit`, `open` or `verify` had incorrect lengths. 82 | IncorrectInputLength(String), 83 | 84 | /// An invalid number of variables was provided to `setup` 85 | InvalidNumberOfVariables, 86 | 87 | /// The degree of the `index`-th polynomial passed to `commit`, `open` 88 | /// or `check` was incorrect, that is, `supported_degree <= poly_degree` 89 | PolynomialDegreeTooLarge { 90 | /// Degree of the polynomial. 91 | poly_degree: usize, 92 | /// Maximum supported degree. 93 | supported_degree: usize, 94 | /// Index of the offending polynomial. 95 | label: String, 96 | }, 97 | 98 | /// This means a failure in verifying the commitment or the opening. 99 | InvalidCommitment, 100 | 101 | /// This means during opening or verification, a commitment of incorrect 102 | /// size (for example, with an insufficient number of entries) was 103 | /// encountered 104 | IncorrectCommitmentSize { 105 | /// Encountered commitment size 106 | encountered: usize, 107 | /// Expected commitment size 108 | expected: usize, 109 | }, 110 | 111 | /// For PCS which rely on Fiat-Shamir to be rendered non-interactive, 112 | /// these are errors that result from incorrect transcript manipulation. 113 | TranscriptError, 114 | 115 | /// This means the required soundness error bound is inherently impossible. 116 | /// E.g., the field is not big enough. 117 | InvalidParameters(String), 118 | 119 | /// Error resulting from hashing in linear code - based PCS. 120 | HashingError, 121 | 122 | /// Shows that encoding is not feasible 123 | EncodingError, 124 | 125 | /// This means a commitment with a certain label was matched with a 126 | /// a polynomial which has a different label - which shouldn't happen 127 | MismatchedLabels { 128 | /// The label of the commitment 129 | commitment_label: String, 130 | /// The label of the polynomial 131 | polynomial_label: String, 132 | }, 133 | 134 | /// This means multivariate polynomial with a certain number of variables 135 | /// was matched (for instance, during commitment, opening or verification) 136 | /// to a point with a different number of variables. 137 | MismatchedNumVars { 138 | /// The number of variables of the polynomial 139 | poly_nv: usize, 140 | /// The number of variables of the point 141 | point_nv: usize, 142 | }, 143 | } 144 | 145 | impl core::fmt::Display for Error { 146 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 147 | match self { 148 | Error::MissingPolynomial { label } => write!( 149 | f, 150 | "`QuerySet` refers to polynomial \"{}\", but it was not provided.", 151 | label 152 | ), 153 | Error::MissingEvaluation { label } => write!( 154 | f, 155 | "`QuerySet` refers to polynomial \"{}\", but `Evaluations` does not contain an evaluation for it.", 156 | label 157 | ), 158 | Error::MissingLHS { label } => { 159 | write!(f, "Equation \"{}\" does not have a LHS.", label) 160 | }, 161 | Error::MissingRng => write!(f, "hiding commitments require `Some(rng)`"), 162 | Error::DegreeIsZero => write!( 163 | f, 164 | "this scheme does not support committing to degree 0 polynomials" 165 | ), 166 | Error::TooManyCoefficients { 167 | num_coefficients, 168 | num_powers, 169 | } => write!( 170 | f, 171 | "the number of coefficients in the polynomial ({:?}) is greater than\ 172 | the maximum number of powers in `Powers` ({:?})", 173 | num_coefficients, num_powers 174 | ), 175 | Error::HidingBoundIsZero => write!( 176 | f, 177 | "this scheme does not support non-`None` hiding bounds that are 0" 178 | ), 179 | Error::HidingBoundToolarge { 180 | hiding_poly_degree, 181 | num_powers, 182 | } => write!( 183 | f, 184 | "the degree of the hiding poly ({:?}) is not less than the maximum number of powers in `Powers` ({:?})", 185 | hiding_poly_degree, num_powers 186 | ), 187 | Error::TrimmingDegreeTooLarge => { 188 | write!(f, "the degree provided to `trim` was too large") 189 | } 190 | Error::EmptyDegreeBounds => { 191 | write!(f, "provided `enforced_degree_bounds` was `Some<&[]>`") 192 | } 193 | Error::EquationHasDegreeBounds(e) => write!( 194 | f, 195 | "the eqaution \"{}\" contained degree-bounded polynomials", 196 | e 197 | ), 198 | Error::UnsupportedDegreeBound(bound) => write!( 199 | f, 200 | "the degree bound ({:?}) is not supported by the parameters", 201 | bound, 202 | ), 203 | Error::IncorrectDegreeBound { 204 | poly_degree, 205 | degree_bound, 206 | supported_degree, 207 | label, 208 | } => write!( 209 | f, 210 | "the degree bound ({:?}) for the polynomial {} \ 211 | (having degree {:?}) is greater than the maximum \ 212 | supported degree ({:?})", 213 | degree_bound, label, poly_degree, supported_degree 214 | ), 215 | Error::InvalidNumberOfVariables => write!( 216 | f, 217 | "An invalid number of variables was provided to `setup`" 218 | ), 219 | Error::PolynomialDegreeTooLarge { 220 | poly_degree, 221 | supported_degree, 222 | label, 223 | } => write!( 224 | f, 225 | "the polynomial {} has degree {:?}, but parameters only 226 | support up to degree ({:?})", label, poly_degree, supported_degree 227 | ), 228 | Error::IncorrectInputLength(err) => write!(f, "{}", err), 229 | Error::InvalidCommitment => write!(f, "Failed to verify the commitment"), 230 | Error::IncorrectCommitmentSize { 231 | encountered, 232 | expected, 233 | } => write!( 234 | f, 235 | "the commitment has size {}, but size {} was expected", 236 | encountered, expected 237 | ), 238 | Error::TranscriptError => write!(f, "Incorrect transcript manipulation"), 239 | Error::InvalidParameters(err) => write!(f, "{}", err), 240 | Error::HashingError => write!(f, "Error resulting from hashing"), 241 | Error::EncodingError => write!(f, "Encoding failed"), 242 | Error::MismatchedLabels { commitment_label, polynomial_label } => 243 | write!(f, "Mismatched labels: commitment label: {}, polynomial label: {}", 244 | commitment_label, 245 | polynomial_label 246 | ), 247 | Error::MismatchedNumVars { poly_nv, point_nv } => 248 | write!(f, "Mismatched number of variables: polynomial has {}, point has {}", 249 | poly_nv, 250 | point_nv, 251 | ), 252 | } 253 | } 254 | } 255 | 256 | impl ark_std::error::Error for Error {} 257 | -------------------------------------------------------------------------------- /poly-commit/src/hyrax/data_structures.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | utils::Matrix, PCCommitment, PCCommitmentState, PCCommitterKey, PCUniversalParams, 3 | PCVerifierKey, 4 | }; 5 | use ark_ec::AffineRepr; 6 | use ark_ff::PrimeField; 7 | use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; 8 | use ark_std::{rand::RngCore, vec::Vec}; 9 | 10 | /// `UniversalParams` amounts to a Pederson commitment key of sufficient length 11 | #[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] 12 | #[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] 13 | pub struct HyraxUniversalParams { 14 | /// A list of generators of the group. 15 | pub com_key: Vec, 16 | /// A generator of the group. 17 | pub h: G, 18 | } 19 | 20 | impl PCUniversalParams for HyraxUniversalParams { 21 | fn max_degree(&self) -> usize { 22 | // Only MLEs are supported 23 | 1 24 | } 25 | } 26 | 27 | /// The committer key, which coincides with the universal parameters 28 | pub type HyraxCommitterKey = HyraxUniversalParams; 29 | 30 | /// The verifier key, which coincides with the committer key 31 | pub type HyraxVerifierKey = HyraxCommitterKey; 32 | 33 | impl PCCommitterKey for HyraxCommitterKey { 34 | fn max_degree(&self) -> usize { 35 | // Only MLEs are supported 36 | 1 37 | } 38 | fn supported_degree(&self) -> usize { 39 | // Only MLEs are supported 40 | 1 41 | } 42 | } 43 | 44 | impl PCVerifierKey for HyraxVerifierKey { 45 | // Only MLEs are supported 46 | fn max_degree(&self) -> usize { 47 | 1 48 | } 49 | // Only MLEs are supported 50 | fn supported_degree(&self) -> usize { 51 | 1 52 | } 53 | } 54 | 55 | /// Hyrax commitment to a polynomial consisting of one multi-commit per row of 56 | /// the coefficient matrix 57 | #[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] 58 | #[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] 59 | pub struct HyraxCommitment { 60 | /// A list of multi-commits to each row of the matrix representing the 61 | /// polynomial. 62 | pub row_coms: Vec, 63 | } 64 | 65 | impl PCCommitment for HyraxCommitment { 66 | #[inline] 67 | fn empty() -> Self { 68 | HyraxCommitment { 69 | row_coms: Vec::new(), 70 | } 71 | } 72 | 73 | // The degree bound is always 1, since only multilinear polynomials are 74 | // supported 75 | fn has_degree_bound(&self) -> bool { 76 | true 77 | } 78 | } 79 | 80 | pub(crate) type HyraxRandomness = Vec; 81 | 82 | /// Hyrax Commitment state: matrix of polynomial coefficients and list of random 83 | /// scalars used in each of the row-wise Pedersen commitments 84 | #[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] 85 | #[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] 86 | pub struct HyraxCommitmentState 87 | where 88 | F: PrimeField, 89 | { 90 | pub(crate) randomness: HyraxRandomness, 91 | pub(crate) mat: Matrix, 92 | } 93 | 94 | /// A vector of scalars, each of which multiplies the distinguished group 95 | /// element in the Pederson commitment key for a different commitment 96 | impl PCCommitmentState for HyraxCommitmentState { 97 | type Randomness = HyraxRandomness; 98 | fn empty() -> Self { 99 | unimplemented!() 100 | } 101 | 102 | fn rand( 103 | num_queries: usize, 104 | _has_degree_bound: bool, 105 | _num_vars: Option, 106 | rng: &mut R, 107 | ) -> Self::Randomness { 108 | (0..num_queries).map(|_| F::rand(rng)).collect() 109 | } 110 | } 111 | 112 | /// Proof of a Hyrax opening, containing various commitments 113 | /// and auxiliary values generated randomly during the opening 114 | #[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] 115 | #[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] 116 | pub struct HyraxProof { 117 | /// Commitment to the evaluation of the polynomial at the requested point 118 | pub com_eval: G, 119 | /// Commitment to auxiliary random vector `d` 120 | pub com_d: G, 121 | /// Commitment to auxiliary random scalar `b` 122 | pub com_b: G, 123 | /// Auxiliary random vector 124 | pub z: Vec, 125 | /// Auxiliary random scalar 126 | pub z_d: G::ScalarField, 127 | /// Auxiliary random scalar 128 | pub z_b: G::ScalarField, 129 | } 130 | -------------------------------------------------------------------------------- /poly-commit/src/hyrax/tests.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | hyrax::HyraxPC, tests::*, utils::test_sponge, LabeledPolynomial, PolynomialCommitment, 3 | }; 4 | use ark_bls12_377::G1Affine; 5 | use ark_ec::AffineRepr; 6 | use ark_ed_on_bls12_381::EdwardsAffine; 7 | use ark_ff::PrimeField; 8 | use ark_poly::{DenseMultilinearExtension, MultilinearExtension}; 9 | use ark_std::test_rng; 10 | use rand_chacha::{rand_core::SeedableRng, ChaCha20Rng}; 11 | 12 | // The test structure is largely taken from the multilinear_ligero module 13 | // inside this crate 14 | 15 | // ****************** types ****************** 16 | 17 | type Fq = ::ScalarField; 18 | type Hyrax377 = HyraxPC>; 19 | 20 | type Fr = ::ScalarField; 21 | type Hyrax381 = HyraxPC>; 22 | 23 | // ******** auxiliary test functions ******** 24 | 25 | fn rand_poly( 26 | _: usize, // degree: unused 27 | num_vars: Option, 28 | rng: &mut ChaCha20Rng, 29 | ) -> DenseMultilinearExtension { 30 | match num_vars { 31 | Some(n) => DenseMultilinearExtension::rand(n, rng), 32 | None => panic!("Must specify the number of variables"), 33 | } 34 | } 35 | 36 | fn constant_poly( 37 | _: usize, // degree: unused 38 | num_vars: Option, 39 | rng: &mut ChaCha20Rng, 40 | ) -> DenseMultilinearExtension { 41 | match num_vars { 42 | Some(0) => DenseMultilinearExtension::rand(0, rng), 43 | _ => panic!("Must specify the number of variables: 0"), 44 | } 45 | } 46 | 47 | fn rand_point(num_vars: Option, rng: &mut ChaCha20Rng) -> Vec { 48 | match num_vars { 49 | Some(n) => (0..n).map(|_| F::rand(rng)).collect(), 50 | None => panic!("Must specify the number of variables"), 51 | } 52 | } 53 | 54 | // ****************** tests ****************** 55 | 56 | #[test] 57 | fn test_hyrax_construction() { 58 | // Desired number of variables (must be even!) 59 | let n = 8; 60 | 61 | let chacha = &mut ChaCha20Rng::from_rng(test_rng()).unwrap(); 62 | 63 | let pp = Hyrax381::setup(1, Some(n), chacha).unwrap(); 64 | 65 | let (ck, vk) = Hyrax381::trim(&pp, 1, 1, None).unwrap(); 66 | 67 | let l_poly = LabeledPolynomial::new( 68 | "test_poly".to_string(), 69 | rand_poly::(0, Some(n), chacha), 70 | None, 71 | None, 72 | ); 73 | 74 | let (c, rands) = Hyrax381::commit(&ck, &[l_poly.clone()], Some(chacha)).unwrap(); 75 | 76 | let point: Vec = rand_point(Some(n), chacha); 77 | let value = l_poly.evaluate(&point); 78 | 79 | // Dummy argument 80 | let mut test_sponge = test_sponge::(); 81 | 82 | let proof = Hyrax381::open( 83 | &ck, 84 | &[l_poly], 85 | &c, 86 | &point, 87 | &mut (test_sponge.clone()), 88 | &rands, 89 | Some(chacha), 90 | ) 91 | .unwrap(); 92 | 93 | assert!(Hyrax381::check( 94 | &vk, 95 | &c, 96 | &point, 97 | [value], 98 | &proof, 99 | &mut test_sponge, 100 | Some(chacha), 101 | ) 102 | .unwrap()); 103 | } 104 | 105 | #[test] 106 | fn hyrax_single_poly_test() { 107 | single_poly_test::<_, _, Hyrax377, _>( 108 | Some(10), 109 | rand_poly, 110 | rand_point, 111 | poseidon_sponge_for_test::, 112 | ) 113 | .expect("test failed for bls12-377"); 114 | single_poly_test::<_, _, Hyrax381, _>( 115 | Some(10), 116 | rand_poly, 117 | rand_point, 118 | poseidon_sponge_for_test::, 119 | ) 120 | .expect("test failed for bls12-381"); 121 | } 122 | 123 | #[test] 124 | fn hyrax_constant_poly_test() { 125 | single_poly_test::<_, _, Hyrax377, _>( 126 | Some(0), 127 | constant_poly, 128 | rand_point, 129 | poseidon_sponge_for_test::, 130 | ) 131 | .expect("test failed for bls12-377"); 132 | single_poly_test::<_, _, Hyrax381, _>( 133 | Some(0), 134 | constant_poly, 135 | rand_point, 136 | poseidon_sponge_for_test::, 137 | ) 138 | .expect("test failed for bls12-381"); 139 | } 140 | 141 | #[test] 142 | fn hyrax_full_end_to_end_test() { 143 | full_end_to_end_test::<_, _, Hyrax377, _>( 144 | Some(8), 145 | rand_poly, 146 | rand_point, 147 | poseidon_sponge_for_test::, 148 | ) 149 | .expect("test failed for bls12-377"); 150 | full_end_to_end_test::<_, _, Hyrax381, _>( 151 | Some(10), 152 | rand_poly, 153 | rand_point, 154 | poseidon_sponge_for_test::, 155 | ) 156 | .expect("test failed for bls12-381"); 157 | } 158 | 159 | #[test] 160 | fn hyrax_single_equation_test() { 161 | single_equation_test::<_, _, Hyrax377, _>( 162 | Some(6), 163 | rand_poly, 164 | rand_point, 165 | poseidon_sponge_for_test::, 166 | ) 167 | .expect("test failed for bls12-377"); 168 | single_equation_test::<_, _, Hyrax381, _>( 169 | Some(6), 170 | rand_poly, 171 | rand_point, 172 | poseidon_sponge_for_test::, 173 | ) 174 | .expect("test failed for bls12-381"); 175 | } 176 | 177 | #[test] 178 | fn hyrax_two_equation_test() { 179 | two_equation_test::<_, _, Hyrax377, _>( 180 | Some(10), 181 | rand_poly, 182 | rand_point, 183 | poseidon_sponge_for_test::, 184 | ) 185 | .expect("test failed for bls12-377"); 186 | two_equation_test::<_, _, Hyrax381, _>( 187 | Some(10), 188 | rand_poly, 189 | rand_point, 190 | poseidon_sponge_for_test::, 191 | ) 192 | .expect("test failed for bls12-381"); 193 | } 194 | 195 | #[test] 196 | fn hyrax_full_end_to_end_equation_test() { 197 | full_end_to_end_equation_test::<_, _, Hyrax377, _>( 198 | Some(8), 199 | rand_poly, 200 | rand_point, 201 | poseidon_sponge_for_test::, 202 | ) 203 | .expect("test failed for bls12-377"); 204 | full_end_to_end_equation_test::<_, _, Hyrax381, _>( 205 | Some(8), 206 | rand_poly, 207 | rand_point, 208 | poseidon_sponge_for_test::, 209 | ) 210 | .expect("test failed for bls12-381"); 211 | } 212 | -------------------------------------------------------------------------------- /poly-commit/src/hyrax/utils.rs: -------------------------------------------------------------------------------- 1 | use ark_ff::Field; 2 | #[cfg(not(feature = "std"))] 3 | use ark_std::vec::Vec; 4 | 5 | #[cfg(feature = "parallel")] 6 | use rayon::prelude::*; 7 | 8 | /// Transforms a flat vector into a n*m matrix in column-major order. The 9 | /// latter is given as a list of rows. 10 | /// 11 | /// For example, if flat = [1, 2, 3, 4, 5, 6] and n = 3, m = 2, then 12 | /// the output is [[1, 3, 5], [2, 4, 6]]. 13 | pub(crate) fn flat_to_matrix_column_major(flat: &[T], n: usize, m: usize) -> Vec> { 14 | assert_eq!(flat.len(), n * m, "n * m should coincide with flat.len()"); 15 | let mut res = Vec::new(); 16 | 17 | for row in 0..n { 18 | res.push((0..m).map(|col| flat[col * n + row]).collect()) 19 | } 20 | res 21 | } 22 | 23 | // This function computes all evaluations of the MLE EQ(i, values) for i 24 | // between 0...0 and 1...1 (n-bit strings). This results in essentially 25 | // the same as the tensor_vec function in the `linear_codes/utils.rs`, 26 | // the difference being the endianness of the order of the output. 27 | pub(crate) fn tensor_prime(values: &[F]) -> Vec { 28 | if values.is_empty() { 29 | return vec![F::one()]; 30 | } 31 | 32 | let tail = tensor_prime(&values[1..]); 33 | let val = values[0]; 34 | 35 | cfg_iter!(tail) 36 | .map(|v| *v * (F::one() - val)) 37 | .chain(cfg_iter!(tail).map(|v| *v * val)) 38 | .collect() 39 | } 40 | -------------------------------------------------------------------------------- /poly-commit/src/ipa_pc/data_structures.rs: -------------------------------------------------------------------------------- 1 | use crate::*; 2 | use ark_ec::AffineRepr; 3 | use ark_ff::{UniformRand, Zero}; 4 | use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; 5 | 6 | /// `UniversalParams` are the universal parameters for the inner product arg scheme. 7 | #[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] 8 | #[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] 9 | pub struct UniversalParams { 10 | /// The key used to commit to polynomials. 11 | pub comm_key: Vec, 12 | 13 | /// Some group generator. 14 | pub h: G, 15 | 16 | /// Some group generator specifically used for hiding. 17 | pub s: G, 18 | } 19 | 20 | impl PCUniversalParams for UniversalParams { 21 | fn max_degree(&self) -> usize { 22 | self.comm_key.len() - 1 23 | } 24 | } 25 | 26 | /// `CommitterKey` is used to commit to, and create evaluation proofs for, a given 27 | /// polynomial. 28 | #[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] 29 | #[derivative( 30 | Default(bound = ""), 31 | Hash(bound = ""), 32 | Clone(bound = ""), 33 | Debug(bound = "") 34 | )] 35 | pub struct CommitterKey { 36 | /// The key used to commit to polynomials. 37 | pub comm_key: Vec, 38 | 39 | /// A random group generator. 40 | pub h: G, 41 | 42 | /// A random group generator that is to be used to make 43 | /// a commitment hiding. 44 | pub s: G, 45 | 46 | /// The maximum degree supported by the parameters 47 | /// this key was derived from. 48 | pub max_degree: usize, 49 | } 50 | 51 | impl PCCommitterKey for CommitterKey { 52 | fn max_degree(&self) -> usize { 53 | self.max_degree 54 | } 55 | fn supported_degree(&self) -> usize { 56 | self.comm_key.len() - 1 57 | } 58 | } 59 | 60 | /// `VerifierKey` is used to check evaluation proofs for a given commitment. 61 | pub type VerifierKey = CommitterKey; 62 | 63 | impl PCVerifierKey for VerifierKey { 64 | fn max_degree(&self) -> usize { 65 | self.max_degree 66 | } 67 | 68 | fn supported_degree(&self) -> usize { 69 | self.comm_key.len() - 1 70 | } 71 | } 72 | 73 | /// Nothing to do to prepare this verifier key (for now). 74 | pub type PreparedVerifierKey = VerifierKey; 75 | 76 | impl PCPreparedVerifierKey> for PreparedVerifierKey { 77 | /// prepare `PreparedVerifierKey` from `VerifierKey` 78 | fn prepare(vk: &VerifierKey) -> Self { 79 | vk.clone() 80 | } 81 | } 82 | 83 | /// Commitment to a polynomial that optionally enforces a degree bound. 84 | #[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] 85 | #[derivative( 86 | Default(bound = ""), 87 | Hash(bound = ""), 88 | Clone(bound = ""), 89 | Copy(bound = ""), 90 | Debug(bound = ""), 91 | PartialEq(bound = ""), 92 | Eq(bound = "") 93 | )] 94 | pub struct Commitment { 95 | /// A Pedersen commitment to the polynomial. 96 | pub comm: G, 97 | 98 | /// A Pedersen commitment to the shifted polynomial. 99 | /// This is `none` if the committed polynomial does not 100 | /// enforce a strict degree bound. 101 | pub shifted_comm: Option, 102 | } 103 | 104 | impl PCCommitment for Commitment { 105 | #[inline] 106 | fn empty() -> Self { 107 | Commitment { 108 | comm: G::zero(), 109 | shifted_comm: None, 110 | } 111 | } 112 | 113 | fn has_degree_bound(&self) -> bool { 114 | false 115 | } 116 | } 117 | 118 | /// Nothing to do to prepare this commitment (for now). 119 | pub type PreparedCommitment = Commitment; 120 | 121 | impl PCPreparedCommitment> for PreparedCommitment { 122 | /// prepare `PreparedCommitment` from `Commitment` 123 | fn prepare(vk: &Commitment) -> Self { 124 | vk.clone() 125 | } 126 | } 127 | 128 | /// `Randomness` hides the polynomial inside a commitment and is outputted by `InnerProductArg::commit`. 129 | #[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] 130 | #[derivative( 131 | Default(bound = ""), 132 | Hash(bound = ""), 133 | Clone(bound = ""), 134 | Debug(bound = ""), 135 | PartialEq(bound = ""), 136 | Eq(bound = "") 137 | )] 138 | pub struct Randomness { 139 | /// Randomness is some scalar field element. 140 | pub rand: G::ScalarField, 141 | 142 | /// Randomness applied to the shifted commitment is some scalar field element. 143 | pub shifted_rand: Option, 144 | } 145 | 146 | impl PCCommitmentState for Randomness { 147 | type Randomness = Self; 148 | fn empty() -> Self { 149 | Self { 150 | rand: G::ScalarField::zero(), 151 | shifted_rand: None, 152 | } 153 | } 154 | 155 | fn rand(_: usize, has_degree_bound: bool, _: Option, rng: &mut R) -> Self { 156 | let rand = G::ScalarField::rand(rng); 157 | let shifted_rand = if has_degree_bound { 158 | Some(G::ScalarField::rand(rng)) 159 | } else { 160 | None 161 | }; 162 | 163 | Self { rand, shifted_rand } 164 | } 165 | } 166 | 167 | /// `Proof` is an evaluation proof that is output by `InnerProductArg::open`. 168 | #[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] 169 | #[derivative( 170 | Default(bound = ""), 171 | Hash(bound = ""), 172 | Clone(bound = ""), 173 | Debug(bound = "") 174 | )] 175 | pub struct Proof { 176 | /// Vector of left elements for each of the log_d iterations in `open` 177 | pub l_vec: Vec, 178 | 179 | /// Vector of right elements for each of the log_d iterations within `open` 180 | pub r_vec: Vec, 181 | 182 | /// Committer key from the last iteration within `open` 183 | pub final_comm_key: G, 184 | 185 | /// Coefficient from the last iteration within withinopen` 186 | pub c: G::ScalarField, 187 | 188 | /// Commitment to the blinding polynomial. 189 | pub hiding_comm: Option, 190 | 191 | /// Linear combination of all the randomness used for commitments 192 | /// to the opened polynomials, along with the randomness used for the 193 | /// commitment to the hiding polynomial. 194 | pub rand: Option, 195 | } 196 | 197 | /// `SuccinctCheckPolynomial` is a succinctly-representated polynomial 198 | /// generated from the `log_d` random oracle challenges generated in `open`. 199 | /// It has the special property that can be evaluated in `O(log_d)` time. 200 | pub struct SuccinctCheckPolynomial(pub Vec); 201 | 202 | impl SuccinctCheckPolynomial { 203 | /// Computes the coefficients of the underlying degree `d` polynomial. 204 | pub fn compute_coeffs(&self) -> Vec { 205 | let challenges = &self.0; 206 | let log_d = challenges.len(); 207 | 208 | let mut coeffs = vec![F::one(); 1 << log_d]; 209 | for (i, challenge) in challenges.iter().enumerate() { 210 | let i = i + 1; 211 | let elem_degree = 1 << (log_d - i); 212 | for start in (elem_degree..coeffs.len()).step_by(elem_degree * 2) { 213 | for offset in 0..elem_degree { 214 | coeffs[start + offset] *= challenge; 215 | } 216 | } 217 | } 218 | 219 | coeffs 220 | } 221 | 222 | /// Evaluate `self` at `point` in time `O(log_d)`. 223 | pub fn evaluate(&self, point: F) -> F { 224 | let challenges = &self.0; 225 | let log_d = challenges.len(); 226 | 227 | let mut product = F::one(); 228 | for (i, challenge) in challenges.iter().enumerate() { 229 | let i = i + 1; 230 | let elem_degree: u64 = (1 << (log_d - i)) as u64; 231 | let elem = point.pow([elem_degree]); 232 | product *= &(F::one() + &(elem * challenge)); 233 | } 234 | 235 | product 236 | } 237 | } 238 | -------------------------------------------------------------------------------- /poly-commit/src/linear_codes/brakedown.rs: -------------------------------------------------------------------------------- 1 | use super::utils::SprsMat; 2 | use super::BrakedownPCParams; 3 | use super::LinCodeParametersInfo; 4 | use crate::linear_codes::utils::calculate_t; 5 | use crate::utils::ceil_div; 6 | use crate::utils::{ceil_mul, ent}; 7 | use crate::{PCCommitterKey, PCUniversalParams, PCVerifierKey}; 8 | 9 | use ark_crypto_primitives::crh::{CRHScheme, TwoToOneCRHScheme}; 10 | use ark_crypto_primitives::merkle_tree::{Config, LeafParam, TwoToOneParam}; 11 | use ark_ff::PrimeField; 12 | use ark_std::log2; 13 | use ark_std::rand::RngCore; 14 | use ark_std::vec::Vec; 15 | #[cfg(all(not(feature = "std"), target_arch = "aarch64"))] 16 | use num_traits::Float; 17 | 18 | impl PCUniversalParams for BrakedownPCParams 19 | where 20 | F: PrimeField, 21 | C: Config, 22 | H: CRHScheme, 23 | { 24 | fn max_degree(&self) -> usize { 25 | usize::MAX 26 | } 27 | } 28 | 29 | impl PCCommitterKey for BrakedownPCParams 30 | where 31 | F: PrimeField, 32 | C: Config, 33 | H: CRHScheme, 34 | { 35 | fn max_degree(&self) -> usize { 36 | usize::MAX 37 | } 38 | 39 | fn supported_degree(&self) -> usize { 40 | as PCCommitterKey>::max_degree(self) 41 | } 42 | } 43 | 44 | impl PCVerifierKey for BrakedownPCParams 45 | where 46 | F: PrimeField, 47 | C: Config, 48 | H: CRHScheme, 49 | { 50 | fn max_degree(&self) -> usize { 51 | usize::MAX 52 | } 53 | 54 | fn supported_degree(&self) -> usize { 55 | as PCVerifierKey>::max_degree(self) 56 | } 57 | } 58 | 59 | impl LinCodeParametersInfo for BrakedownPCParams 60 | where 61 | F: PrimeField, 62 | C: Config, 63 | H: CRHScheme, 64 | { 65 | fn check_well_formedness(&self) -> bool { 66 | self.check_well_formedness 67 | } 68 | 69 | fn distance(&self) -> (usize, usize) { 70 | (self.rho_inv.1 * self.beta.0, self.rho_inv.0 * self.beta.1) 71 | } 72 | 73 | fn sec_param(&self) -> usize { 74 | self.sec_param 75 | } 76 | 77 | fn compute_dimensions(&self, _n: usize) -> (usize, usize) { 78 | (self.n, self.m) 79 | } 80 | 81 | fn leaf_hash_param(&self) -> &<::LeafHash as CRHScheme>::Parameters { 82 | &self.leaf_hash_param 83 | } 84 | 85 | fn two_to_one_hash_param( 86 | &self, 87 | ) -> &<::TwoToOneHash as TwoToOneCRHScheme>::Parameters { 88 | &self.two_to_one_hash_param 89 | } 90 | 91 | fn col_hash_params(&self) -> &::Parameters { 92 | &self.col_hash_params 93 | } 94 | } 95 | 96 | impl BrakedownPCParams 97 | where 98 | F: PrimeField, 99 | C: Config, 100 | H: CRHScheme, 101 | { 102 | /// Create a default UniversalParams, with the values from Fig. 2 from the paper. 103 | pub fn default( 104 | rng: &mut R, 105 | poly_len: usize, 106 | check_well_formedness: bool, 107 | leaf_hash_param: LeafParam, 108 | two_to_one_hash_param: TwoToOneParam, 109 | col_hash_params: H::Parameters, 110 | ) -> Self { 111 | let sec_param = 128; 112 | let a = (178, 1000); 113 | let b = (61, 1000); 114 | let r = (1521, 1000); 115 | let base_len = 30; 116 | let t = calculate_t::(sec_param, (b.0 * r.1, b.1 * r.0), poly_len).unwrap(); // we want to get a rough idea what t is 117 | let n = 1 << log2((ceil_div(2 * poly_len, t) as f64).sqrt().ceil() as usize); 118 | let m = ceil_div(poly_len, n); 119 | let c = Self::cn_const(a, b); 120 | let d = Self::dn_const(a, b, r); 121 | let ct = Constants { a, b, r, c, d }; 122 | let (a_dims, b_dims) = Self::mat_size(m, base_len, &ct); 123 | let a_mats = Self::make_all(rng, &a_dims); 124 | let b_mats = Self::make_all(rng, &b_dims); 125 | 126 | Self::new( 127 | sec_param, 128 | a, 129 | b, 130 | r, 131 | base_len, 132 | n, 133 | m, 134 | a_dims, 135 | b_dims, 136 | a_mats, 137 | b_mats, 138 | check_well_formedness, 139 | leaf_hash_param, 140 | two_to_one_hash_param, 141 | col_hash_params, 142 | ) 143 | } 144 | 145 | /// This function creates a UniversalParams. It does not check if the paramters are consistent/correct. 146 | pub fn new( 147 | sec_param: usize, 148 | a: (usize, usize), 149 | b: (usize, usize), 150 | r: (usize, usize), 151 | base_len: usize, 152 | n: usize, 153 | m: usize, 154 | a_dims: Vec<(usize, usize, usize)>, 155 | b_dims: Vec<(usize, usize, usize)>, 156 | a_mats: Vec>, 157 | b_mats: Vec>, 158 | check_well_formedness: bool, 159 | leaf_hash_param: LeafParam, 160 | two_to_one_hash_param: TwoToOneParam, 161 | col_hash_params: H::Parameters, 162 | ) -> Self { 163 | let m_ext = if a_dims.is_empty() { 164 | ceil_mul(m, r) 165 | } else { 166 | Self::codeword_len(&a_dims, &b_dims) 167 | }; 168 | let start = a_dims 169 | .iter() 170 | .scan(0, |acc, &(row, _, _)| { 171 | *acc += row; 172 | Some(*acc) 173 | }) 174 | .collect::>(); 175 | let end = b_dims 176 | .iter() 177 | .scan(m_ext, |acc, &(_, col, _)| { 178 | *acc -= col; 179 | Some(*acc) 180 | }) 181 | .collect::>(); 182 | 183 | Self { 184 | sec_param, 185 | alpha: a, 186 | beta: b, 187 | rho_inv: r, 188 | base_len, 189 | n, 190 | m, 191 | m_ext, 192 | a_dims, 193 | b_dims, 194 | start, 195 | end, 196 | a_mats, 197 | b_mats, 198 | check_well_formedness, 199 | leaf_hash_param, 200 | two_to_one_hash_param, 201 | col_hash_params, 202 | } 203 | } 204 | /// mu = rho_inv - 1 - rho_inv * alpha 205 | fn mu(a: (usize, usize), r: (usize, usize)) -> f64 { 206 | let nom = r.0 * (a.1 - a.0) - r.1 * a.1; 207 | let den = r.1 * a.1; 208 | nom as f64 / den as f64 209 | } 210 | /// nu = beta + alpha * beta + 0.03 211 | fn nu(a: (usize, usize), b: (usize, usize)) -> f64 { 212 | let c = (3usize, 100usize); 213 | let nom = b.0 * (a.1 + a.0) * c.1 + c.0 * b.1 * a.1; 214 | let den = b.1 * a.1 * c.1; 215 | nom as f64 / den as f64 216 | } 217 | /// cn_const 218 | fn cn_const(a: (usize, usize), b: (usize, usize)) -> (f64, f64) { 219 | let a = div(a); 220 | let b = div(b); 221 | let arg = 1.28 * b / a; 222 | let nom = ent(b) + a * ent(arg); 223 | let den = -b * arg.log2(); 224 | (nom, den) 225 | } 226 | /// cn 227 | fn cn(n: usize, ct: &Constants) -> usize { 228 | use ark_std::cmp::{max, min}; 229 | let b = ct.b; 230 | let c = ct.c; 231 | min( 232 | max(ceil_mul(n, (32 * b.0, 25 * b.1)), 4 + ceil_mul(n, b)), 233 | ((110f64 / (n as f64) + c.0) / c.1).ceil() as usize, 234 | ) 235 | } 236 | /// dn_const 237 | fn dn_const(a: (usize, usize), b: (usize, usize), r: (usize, usize)) -> (f64, f64) { 238 | let m = Self::mu(a, r); 239 | let n = Self::nu(a, b); 240 | let a = div(a); 241 | let b = div(b); 242 | let r = div(r); 243 | let nm = n / m; 244 | let nom = r * a * ent(b / r) + m * ent(nm); 245 | let den = -a * b * nm.log2(); 246 | (nom, den) 247 | } 248 | /// dn 249 | fn dn(n: usize, ct: &Constants) -> usize { 250 | use ark_std::cmp::min; 251 | let b = ct.b; 252 | let r = ct.r; 253 | let d = ct.d; 254 | min( 255 | ceil_mul(n, (2 * b.0, b.1)) 256 | + ((ceil_mul(n, r) - n + 110) as f64 / F::MODULUS_BIT_SIZE as f64).ceil() as usize, // 2 * beta * n + n * (r - 1 + 110/n) 257 | ((110f64 / (n as f64) + d.0) / d.1).ceil() as usize, 258 | ) 259 | } 260 | fn mat_size( 261 | mut n: usize, 262 | base_len: usize, 263 | ct: &Constants, 264 | ) -> (Vec<(usize, usize, usize)>, Vec<(usize, usize, usize)>) { 265 | let mut a_dims: Vec<(usize, usize, usize)> = Vec::default(); 266 | let a = ct.a; 267 | let r = ct.r; 268 | 269 | while n >= base_len { 270 | let m = ceil_mul(n, a); 271 | let cn = Self::cn(n, ct); 272 | let cn = if cn < m { cn } else { m }; // can't generate more nonzero entries than there are columns 273 | a_dims.push((n, m, cn)); 274 | n = m; 275 | } 276 | 277 | let b_dims = a_dims 278 | .iter() 279 | .map(|&(an, am, _)| { 280 | let n = ceil_mul(am, r); 281 | let m = ceil_mul(an, r) - an - n; 282 | let dn = Self::dn(n, ct); 283 | let dn = if dn < m { dn } else { m }; // can't generate more nonzero entries than there are columns 284 | (n, m, dn) 285 | }) 286 | .collect::>(); 287 | (a_dims, b_dims) 288 | } 289 | 290 | /// This function computes the codeword length 291 | /// Notice that it assumes the input is bigger than base_len (i.e., a_dim is not empty) 292 | pub(crate) fn codeword_len( 293 | a_dims: &[(usize, usize, usize)], 294 | b_dims: &[(usize, usize, usize)], 295 | ) -> usize { 296 | b_dims.iter().map(|(_, col, _)| col).sum::() + // Output v of the recursive encoding 297 | a_dims.iter().map(|(row, _, _)| row).sum::() + // Input x to the recursive encoding 298 | b_dims.last().unwrap().0 // Output z of the last step of recursion 299 | } 300 | 301 | /// Create a matrix with `n` rows and `m` columns and `d` non-zero entries in each row. 302 | /// This function creates a list for entries of each columns and calls the constructor 303 | /// from `SprsMat`. It leverages Fisher–Yates shuffle for choosing `d` indices in each 304 | /// row. 305 | fn make_mat(n: usize, m: usize, d: usize, rng: &mut R) -> SprsMat { 306 | let mut tmp: Vec = (0..m).collect(); 307 | let mut mat: Vec> = vec![vec![]; m]; 308 | for i in 0..n { 309 | // Fisher–Yates shuffle algorithm 310 | let idxs = { 311 | (0..d) 312 | .map(|j| { 313 | let r = rng.next_u64() as usize % (m - j); 314 | tmp.swap(r, m - 1 - j); 315 | tmp[m - 1 - j] 316 | }) 317 | .collect::>() 318 | }; 319 | // Sampling values for each non-zero entry 320 | for j in idxs { 321 | mat[j].push(( 322 | i, 323 | loop { 324 | let r = F::rand(rng); 325 | if r != F::zero() { 326 | break r; 327 | } 328 | }, 329 | )) 330 | } 331 | } 332 | SprsMat::::new_from_columns(n, m, d, &mat) 333 | } 334 | 335 | fn make_all(rng: &mut R, dims: &[(usize, usize, usize)]) -> Vec> { 336 | dims.iter() 337 | .map(|(n, m, d)| Self::make_mat(*n, *m, *d, rng)) 338 | .collect::>() 339 | } 340 | } 341 | 342 | #[inline] 343 | fn div(a: (usize, usize)) -> f64 { 344 | a.0 as f64 / a.1 as f64 345 | } 346 | 347 | struct Constants { 348 | a: (usize, usize), 349 | b: (usize, usize), 350 | r: (usize, usize), 351 | c: (f64, f64), 352 | d: (f64, f64), 353 | } 354 | -------------------------------------------------------------------------------- /poly-commit/src/linear_codes/data_structures.rs: -------------------------------------------------------------------------------- 1 | use crate::{linear_codes::utils::SprsMat, utils::Matrix, PCCommitment, PCCommitmentState}; 2 | use ark_crypto_primitives::{ 3 | crh::CRHScheme, 4 | merkle_tree::{Config, LeafParam, Path, TwoToOneParam}, 5 | }; 6 | use ark_ff::PrimeField; 7 | use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; 8 | #[cfg(not(feature = "std"))] 9 | use ark_std::vec::Vec; 10 | use ark_std::{marker::PhantomData, rand::RngCore}; 11 | 12 | #[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] 13 | #[derivative(Clone(bound = ""), Debug(bound = ""))] 14 | /// The public parameters for Brakedown PCS. 15 | pub struct BrakedownPCParams { 16 | /// The security parameter 17 | pub(crate) sec_param: usize, 18 | /// alpha in the paper 19 | pub(crate) alpha: (usize, usize), 20 | /// beta in the paper 21 | pub(crate) beta: (usize, usize), 22 | /// The inverse of the code rate. 23 | pub(crate) rho_inv: (usize, usize), 24 | /// Threshold of the base case to encode with RS 25 | pub(crate) base_len: usize, 26 | /// Length of each column in the matrix that represents the polynomials 27 | pub(crate) n: usize, 28 | /// Length of each row in the matrix that represents the polynomials 29 | pub(crate) m: usize, 30 | /// Length of each row in the matrix that represents the polynomials, **after encoding** 31 | pub(crate) m_ext: usize, 32 | /// Constarints on A matrices. `a_dims[i]` is `(n, m, c)`, where `n` is 33 | /// the number of rows, `m` is the number of columns, `c` is the number of 34 | /// non-zero elements in each row, for the matrix A in the `i`th step of 35 | /// the encoding. 36 | pub(crate) a_dims: Vec<(usize, usize, usize)>, 37 | /// Same as `a_dims`, but for B matrices. 38 | pub(crate) b_dims: Vec<(usize, usize, usize)>, 39 | /// By having `a_dims` and `b_dims`, we compute a vector of indices that 40 | /// specfies where is the beginning of the sub-chunk that we need to 41 | /// encode during the recursive encoding. Notice that we do not recurse 42 | /// in this implementation, instead we do it iteratively. 43 | pub(crate) start: Vec, 44 | /// Same as `start`, but stores the end index of those chunks. 45 | pub(crate) end: Vec, 46 | /// A vector of all A matrices we need for encoding. 47 | pub(crate) a_mats: Vec>, 48 | /// A vector of all B matrices we need for encoding. 49 | pub(crate) b_mats: Vec>, 50 | /// This is a flag which determines if the random linear combination is done. 51 | pub(crate) check_well_formedness: bool, 52 | /// Parameters for hash function of Merkle tree leaves 53 | #[derivative(Debug = "ignore")] 54 | pub(crate) leaf_hash_param: LeafParam, 55 | /// Parameters for hash function of Merke tree combining two nodes into one 56 | #[derivative(Debug = "ignore")] 57 | pub(crate) two_to_one_hash_param: TwoToOneParam, 58 | // Parameters for obtaining leaf digest from leaf value. 59 | #[derivative(Debug = "ignore")] 60 | pub(crate) col_hash_params: H::Parameters, 61 | } 62 | 63 | #[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] 64 | #[derivative(Clone(bound = ""), Debug(bound = ""))] 65 | /// The public parameters for Ligero PCS. 66 | pub struct LigeroPCParams { 67 | pub(crate) _field: PhantomData, 68 | /// The security parameter 69 | pub(crate) sec_param: usize, 70 | /// The inverse of the code rate. 71 | pub(crate) rho_inv: usize, 72 | /// This is a flag which determines if the random linear combination is done. 73 | pub(crate) check_well_formedness: bool, 74 | /// Parameters for hash function of Merkle tree leaves 75 | #[derivative(Debug = "ignore")] 76 | pub(crate) leaf_hash_param: LeafParam, 77 | /// Parameters for hash function of Merke tree combining two nodes into one 78 | #[derivative(Debug = "ignore")] 79 | pub(crate) two_to_one_hash_param: TwoToOneParam, 80 | // Parameters for obtaining leaf digest from leaf value. 81 | #[derivative(Debug = "ignore")] 82 | pub(crate) col_hash_params: H::Parameters, 83 | } 84 | 85 | #[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] 86 | #[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] 87 | pub(crate) struct Metadata { 88 | pub(crate) n_rows: usize, 89 | pub(crate) n_cols: usize, 90 | pub(crate) n_ext_cols: usize, 91 | } 92 | 93 | /// The commitment to a polynomial is a root of the merkle tree, 94 | /// where each node is a hash of the column of the encoded coefficient matrix U. 95 | #[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] 96 | #[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] 97 | pub struct LinCodePCCommitment { 98 | // number of rows resp. columns of the square matrix containing the coefficients of the polynomial 99 | pub(crate) metadata: Metadata, 100 | pub(crate) root: C::InnerDigest, 101 | } 102 | 103 | impl PCCommitment for LinCodePCCommitment { 104 | fn empty() -> Self { 105 | LinCodePCCommitment::default() 106 | } 107 | 108 | fn has_degree_bound(&self) -> bool { 109 | false 110 | } 111 | } 112 | 113 | #[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] 114 | #[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] 115 | pub struct LinCodePCCommitmentState 116 | where 117 | F: PrimeField, 118 | H: CRHScheme, 119 | { 120 | pub(crate) mat: Matrix, 121 | pub(crate) ext_mat: Matrix, 122 | pub(crate) leaves: Vec, 123 | } 124 | 125 | impl PCCommitmentState for LinCodePCCommitmentState 126 | where 127 | F: PrimeField, 128 | H: CRHScheme, 129 | { 130 | type Randomness = (); 131 | fn empty() -> Self { 132 | unimplemented!() 133 | } 134 | 135 | fn rand( 136 | _num_queries: usize, 137 | _has_degree_bound: bool, 138 | _num_vars: Option, 139 | _rng: &mut R, 140 | ) -> Self::Randomness { 141 | unimplemented!() 142 | } 143 | } 144 | 145 | /// Proof of an individual linear code well-formedness check or opening 146 | #[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] 147 | #[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] 148 | pub(crate) struct LinCodePCProofSingle 149 | where 150 | F: PrimeField, 151 | C: Config, 152 | { 153 | /// For each of the indices in q, `paths` contains the path from the root of the merkle tree to the leaf 154 | pub(crate) paths: Vec>, 155 | 156 | /// v, s.t. E(v) = w 157 | pub(crate) v: Vec, 158 | 159 | pub(crate) columns: Vec>, 160 | } 161 | 162 | /// The Proof type for linear code PCS, which amounts to an array of individual proofs 163 | #[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] 164 | #[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] 165 | pub struct LinCodePCProof 166 | where 167 | F: PrimeField, 168 | C: Config, 169 | { 170 | pub(crate) opening: LinCodePCProofSingle, 171 | pub(crate) well_formedness: Option>, 172 | } 173 | 174 | // Multiple poly at one point 175 | pub(crate) type LPCPArray = Vec>; 176 | -------------------------------------------------------------------------------- /poly-commit/src/linear_codes/ligero.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | linear_codes::{utils::calculate_t, LigeroPCParams, LinCodeParametersInfo}, 3 | utils::ceil_div, 4 | PCCommitterKey, PCUniversalParams, PCVerifierKey, 5 | }; 6 | use ark_crypto_primitives::{ 7 | crh::{CRHScheme, TwoToOneCRHScheme}, 8 | merkle_tree::{Config, LeafParam, TwoToOneParam}, 9 | }; 10 | use ark_ff::PrimeField; 11 | use ark_std::{log2, marker::PhantomData}; 12 | #[cfg(all(not(feature = "std"), target_arch = "aarch64"))] 13 | use num_traits::Float; 14 | 15 | impl LigeroPCParams 16 | where 17 | F: PrimeField, 18 | C: Config, 19 | H: CRHScheme, 20 | { 21 | /// Create new UniversalParams 22 | pub fn new( 23 | sec_param: usize, 24 | rho_inv: usize, 25 | check_well_formedness: bool, 26 | leaf_hash_param: LeafParam, 27 | two_to_one_hash_param: TwoToOneParam, 28 | col_hash_params: H::Parameters, 29 | ) -> Self { 30 | Self { 31 | _field: PhantomData, 32 | sec_param, 33 | rho_inv, 34 | check_well_formedness, 35 | leaf_hash_param, 36 | two_to_one_hash_param, 37 | col_hash_params, 38 | } 39 | } 40 | } 41 | 42 | impl PCUniversalParams for LigeroPCParams 43 | where 44 | F: PrimeField, 45 | C: Config, 46 | H: CRHScheme, 47 | { 48 | fn max_degree(&self) -> usize { 49 | if F::TWO_ADICITY < self.rho_inv as u32 { 50 | 0 51 | } else if (F::TWO_ADICITY - self.rho_inv as u32) * 2 < 64 { 52 | 2_usize.pow((F::TWO_ADICITY - self.rho_inv as u32) * 2) 53 | } else { 54 | usize::MAX 55 | } 56 | } 57 | } 58 | 59 | impl PCCommitterKey for LigeroPCParams 60 | where 61 | F: PrimeField, 62 | C: Config, 63 | H: CRHScheme, 64 | { 65 | fn max_degree(&self) -> usize { 66 | if (F::TWO_ADICITY - self.rho_inv as u32) * 2 < 64 { 67 | 2_usize.pow((F::TWO_ADICITY - self.rho_inv as u32) * 2) 68 | } else { 69 | usize::MAX 70 | } 71 | } 72 | 73 | fn supported_degree(&self) -> usize { 74 | as PCCommitterKey>::max_degree(self) 75 | } 76 | } 77 | 78 | impl PCVerifierKey for LigeroPCParams 79 | where 80 | F: PrimeField, 81 | C: Config, 82 | H: CRHScheme, 83 | { 84 | fn max_degree(&self) -> usize { 85 | if (F::TWO_ADICITY - self.rho_inv as u32) * 2 < 64 { 86 | 2_usize.pow((F::TWO_ADICITY - self.rho_inv as u32) * 2) 87 | } else { 88 | usize::MAX 89 | } 90 | } 91 | 92 | fn supported_degree(&self) -> usize { 93 | as PCVerifierKey>::max_degree(self) 94 | } 95 | } 96 | 97 | impl LinCodeParametersInfo for LigeroPCParams 98 | where 99 | F: PrimeField, 100 | C: Config, 101 | H: CRHScheme, 102 | { 103 | fn check_well_formedness(&self) -> bool { 104 | self.check_well_formedness 105 | } 106 | 107 | fn distance(&self) -> (usize, usize) { 108 | (self.rho_inv - 1, self.rho_inv) 109 | } 110 | 111 | fn sec_param(&self) -> usize { 112 | self.sec_param 113 | } 114 | 115 | /// Compute the a suitable (for instance, FFT-friendly over F) matrix with at least poly_len entries. 116 | /// The return pair (n, m) corresponds to the dimensions n x m. 117 | /// FIXME: Maybe, there should be some checks for making sure the extended row can have an FFT. 118 | fn compute_dimensions(&self, poly_len: usize) -> (usize, usize) { 119 | assert_eq!( 120 | (poly_len as f64) as usize, 121 | poly_len, 122 | "n cannot be converted to f64: aborting" 123 | ); 124 | let t = calculate_t::(self.sec_param(), self.distance(), poly_len).unwrap(); 125 | let n = 1 << log2((ceil_div(2 * poly_len, t) as f64).sqrt().ceil() as usize); 126 | let m = ceil_div(poly_len, n); 127 | (n, m) 128 | } 129 | 130 | fn leaf_hash_param(&self) -> &<::LeafHash as CRHScheme>::Parameters { 131 | &self.leaf_hash_param 132 | } 133 | 134 | fn two_to_one_hash_param( 135 | &self, 136 | ) -> &<::TwoToOneHash as TwoToOneCRHScheme>::Parameters { 137 | &self.two_to_one_hash_param 138 | } 139 | 140 | fn col_hash_params(&self) -> &::Parameters { 141 | &self.col_hash_params 142 | } 143 | } 144 | -------------------------------------------------------------------------------- /poly-commit/src/linear_codes/multilinear_brakedown/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::Error; 2 | 3 | use super::utils::tensor_vec; 4 | use super::{BrakedownPCParams, LinearEncode}; 5 | use ark_crypto_primitives::{ 6 | crh::{CRHScheme, TwoToOneCRHScheme}, 7 | merkle_tree::Config, 8 | }; 9 | use ark_ff::{Field, PrimeField}; 10 | use ark_poly::{MultilinearExtension, Polynomial}; 11 | #[cfg(not(feature = "std"))] 12 | use ark_std::vec::Vec; 13 | use ark_std::{log2, marker::PhantomData, rand::RngCore}; 14 | 15 | mod tests; 16 | 17 | /// The multilinear Brakedown polynomial commitment scheme based on [[Brakedown]][bd]. 18 | /// The scheme defaults to the naive batching strategy. 19 | /// 20 | /// Note: The scheme currently does not support hiding. 21 | /// 22 | /// [bd]: https://eprint.iacr.org/2021/1043.pdf 23 | pub struct MultilinearBrakedown, H: CRHScheme> 24 | { 25 | _phantom: PhantomData<(F, C, P, H)>, 26 | } 27 | 28 | impl LinearEncode for MultilinearBrakedown 29 | where 30 | F: PrimeField, 31 | C: Config, 32 | P: MultilinearExtension, 33 |

>::Point: Into>, 34 | H: CRHScheme, 35 | { 36 | type LinCodePCParams = BrakedownPCParams; 37 | 38 | fn setup( 39 | _max_degree: usize, 40 | num_vars: Option, 41 | rng: &mut R, 42 | leaf_hash_param: <::LeafHash as CRHScheme>::Parameters, 43 | two_to_one_hash_param: <::TwoToOneHash as TwoToOneCRHScheme>::Parameters, 44 | col_hash_params: H::Parameters, 45 | ) -> Self::LinCodePCParams { 46 | Self::LinCodePCParams::default( 47 | rng, 48 | 1 << num_vars.unwrap(), 49 | true, 50 | leaf_hash_param, 51 | two_to_one_hash_param, 52 | col_hash_params, 53 | ) 54 | } 55 | 56 | fn encode(msg: &[F], pp: &Self::LinCodePCParams) -> Result, Error> { 57 | if msg.len() != pp.m { 58 | return Err(Error::EncodingError); 59 | } 60 | let cw_len = pp.m_ext; 61 | let mut cw = Vec::with_capacity(cw_len); 62 | cw.extend_from_slice(msg); 63 | 64 | // Multiply by matrices A 65 | for (i, &s) in pp.start.iter().enumerate() { 66 | let mut src = pp.a_mats[i].row_mul(&cw[s - pp.a_dims[i].0..s]); 67 | cw.append(&mut src); 68 | } 69 | 70 | // later we don't necessarily mutate in order, so we need the full vec now. 71 | cw.resize(cw_len, F::zero()); 72 | // RS encode the last one 73 | let rss = *pp.start.last().unwrap_or(&0); 74 | let rsie = rss + pp.a_dims.last().unwrap_or(&(0, pp.m, 0)).1; 75 | let rsoe = *pp.end.last().unwrap_or(&cw_len); 76 | naive_reed_solomon(&mut cw, rss, rsie, rsoe); 77 | 78 | // Come back 79 | for (i, (&s, &e)) in pp.start.iter().zip(&pp.end).enumerate() { 80 | let src = &pp.b_mats[i].row_mul(&cw[s..e]); 81 | cw[e..e + pp.b_dims[i].1].copy_from_slice(src); 82 | } 83 | Ok(cw.to_vec()) 84 | } 85 | 86 | fn poly_to_vec(polynomial: &P) -> Vec { 87 | polynomial.to_evaluations() 88 | } 89 | 90 | fn point_to_vec(point:

>::Point) -> Vec { 91 | point 92 | } 93 | 94 | /// For a multilinear polynomial in n+m variables it returns a tuple for k={n,m}: 95 | /// ((1-z_1)*(1-z_2)*...*(1_z_k), z_1*(1-z_2)*...*(1-z_k), ..., z_1*z_2*...*z_k) 96 | fn tensor( 97 | point: &

>::Point, 98 | left_len: usize, 99 | _right_len: usize, 100 | ) -> (Vec, Vec) { 101 | let point: Vec = Self::point_to_vec(point.clone()); 102 | 103 | let split = log2(left_len) as usize; 104 | let left = &point[..split]; 105 | let right = &point[split..]; 106 | (tensor_vec(left), tensor_vec(right)) 107 | } 108 | } 109 | 110 | // This RS encoding is on points 1, ..., oe - s without relying on FFTs 111 | fn naive_reed_solomon(cw: &mut [F], s: usize, ie: usize, oe: usize) { 112 | let mut res = vec![F::zero(); oe - s]; 113 | let mut x = F::one(); 114 | for r in res.iter_mut() { 115 | for j in (s..ie).rev() { 116 | *r *= x; 117 | *r += cw[j]; 118 | } 119 | x += F::one(); 120 | } 121 | cw[s..oe].copy_from_slice(&res); 122 | } 123 | -------------------------------------------------------------------------------- /poly-commit/src/linear_codes/multilinear_brakedown/tests.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | 4 | use crate::linear_codes::LinearCodePCS; 5 | use crate::utils::test_sponge; 6 | use crate::{ 7 | linear_codes::{utils::*, BrakedownPCParams, MultilinearBrakedown, PolynomialCommitment}, 8 | LabeledPolynomial, 9 | }; 10 | use ark_bls12_377::Fr; 11 | use ark_bls12_381::Fr as Fr381; 12 | use ark_crypto_primitives::{ 13 | crh::{sha256::Sha256, CRHScheme, TwoToOneCRHScheme}, 14 | merkle_tree::{ByteDigestConverter, Config}, 15 | }; 16 | use ark_ff::{Field, PrimeField}; 17 | use ark_poly::evaluations::multivariate::{MultilinearExtension, SparseMultilinearExtension}; 18 | use ark_std::test_rng; 19 | use blake2::Blake2s256; 20 | use rand_chacha::{rand_core::SeedableRng, ChaCha20Rng}; 21 | 22 | type LeafH = LeafIdentityHasher; 23 | type CompressH = Sha256; 24 | type ColHasher = FieldToBytesColHasher; 25 | 26 | struct MerkleTreeParams; 27 | 28 | impl Config for MerkleTreeParams { 29 | type Leaf = Vec; 30 | 31 | type LeafDigest = ::Output; 32 | type LeafInnerDigestConverter = ByteDigestConverter; 33 | type InnerDigest = ::Output; 34 | 35 | type LeafHash = LeafH; 36 | type TwoToOneHash = CompressH; 37 | } 38 | 39 | type MTConfig = MerkleTreeParams; 40 | 41 | type BrakedownPCS = LinearCodePCS< 42 | MultilinearBrakedown, ColHasher>, 43 | F, 44 | SparseMultilinearExtension, 45 | MTConfig, 46 | ColHasher, 47 | >; 48 | 49 | fn rand_poly( 50 | _: usize, 51 | num_vars: Option, 52 | rng: &mut ChaCha20Rng, 53 | ) -> SparseMultilinearExtension { 54 | match num_vars { 55 | Some(n) => SparseMultilinearExtension::rand(n, rng), 56 | None => unimplemented!(), // should not happen in ML case! 57 | } 58 | } 59 | 60 | fn constant_poly( 61 | _: usize, 62 | num_vars: Option, 63 | rng: &mut ChaCha20Rng, 64 | ) -> SparseMultilinearExtension { 65 | match num_vars { 66 | Some(n) => { 67 | let points = vec![(1, Fr::rand(rng))]; 68 | SparseMultilinearExtension::from_evaluations(n, &points) 69 | } 70 | None => unimplemented!(), // should not happen in ML case! 71 | } 72 | } 73 | 74 | #[test] 75 | fn test_construction() { 76 | let mut rng = &mut test_rng(); 77 | let num_vars = 11; 78 | // just to make sure we have the right degree given the FFT domain for our field 79 | let leaf_hash_param = ::setup(&mut rng).unwrap(); 80 | let two_to_one_hash_param = ::setup(&mut rng) 81 | .unwrap() 82 | .clone(); 83 | let col_hash_params = as CRHScheme>::setup(&mut rng).unwrap(); 84 | let check_well_formedness = true; 85 | 86 | let pp: BrakedownPCParams> = 87 | BrakedownPCParams::default( 88 | rng, 89 | 1 << num_vars, 90 | check_well_formedness, 91 | leaf_hash_param, 92 | two_to_one_hash_param, 93 | col_hash_params, 94 | ); 95 | 96 | let (ck, vk) = BrakedownPCS::::trim(&pp, 0, 0, None).unwrap(); 97 | 98 | let rand_chacha = &mut ChaCha20Rng::from_rng(test_rng()).unwrap(); 99 | let labeled_poly = LabeledPolynomial::new( 100 | "test".to_string(), 101 | rand_poly(1, Some(num_vars), rand_chacha), 102 | Some(num_vars), 103 | Some(num_vars), 104 | ); 105 | 106 | let mut test_sponge = test_sponge::(); 107 | let (c, states) = BrakedownPCS::::commit(&ck, &[labeled_poly.clone()], None).unwrap(); 108 | 109 | let point = rand_point(Some(num_vars), rand_chacha); 110 | 111 | let value = labeled_poly.evaluate(&point); 112 | 113 | let proof = BrakedownPCS::::open( 114 | &ck, 115 | &[labeled_poly], 116 | &c, 117 | &point, 118 | &mut (test_sponge.clone()), 119 | &states, 120 | None, 121 | ) 122 | .unwrap(); 123 | assert!(BrakedownPCS::::check( 124 | &vk, 125 | &c, 126 | &point, 127 | [value], 128 | &proof, 129 | &mut test_sponge, 130 | None 131 | ) 132 | .unwrap()); 133 | } 134 | 135 | fn rand_point(num_vars: Option, rng: &mut ChaCha20Rng) -> Vec { 136 | match num_vars { 137 | Some(n) => (0..n).map(|_| F::rand(rng)).collect(), 138 | None => unimplemented!(), // should not happen! 139 | } 140 | } 141 | 142 | #[test] 143 | fn single_poly_test() { 144 | use crate::tests::*; 145 | single_poly_test::<_, _, BrakedownPCS, _>( 146 | Some(5), 147 | rand_poly::, 148 | rand_point::, 149 | poseidon_sponge_for_test::, 150 | ) 151 | .expect("test failed for bls12-377"); 152 | single_poly_test::<_, _, BrakedownPCS, _>( 153 | Some(10), 154 | rand_poly::, 155 | rand_point::, 156 | poseidon_sponge_for_test::, 157 | ) 158 | .expect("test failed for bls12-381"); 159 | } 160 | 161 | #[test] 162 | fn constant_poly_test() { 163 | use crate::tests::*; 164 | single_poly_test::<_, _, BrakedownPCS, _>( 165 | Some(10), 166 | constant_poly::, 167 | rand_point::, 168 | poseidon_sponge_for_test::, 169 | ) 170 | .expect("test failed for bls12-377"); 171 | single_poly_test::<_, _, BrakedownPCS, _>( 172 | Some(5), 173 | constant_poly::, 174 | rand_point::, 175 | poseidon_sponge_for_test::, 176 | ) 177 | .expect("test failed for bls12-381"); 178 | } 179 | 180 | #[test] 181 | fn full_end_to_end_test() { 182 | use crate::tests::*; 183 | full_end_to_end_test::<_, _, BrakedownPCS, _>( 184 | Some(8), 185 | rand_poly::, 186 | rand_point::, 187 | poseidon_sponge_for_test::, 188 | ) 189 | .expect("test failed for bls12-377"); 190 | println!("Finished bls12-377"); 191 | full_end_to_end_test::<_, _, BrakedownPCS, _>( 192 | Some(9), 193 | rand_poly::, 194 | rand_point::, 195 | poseidon_sponge_for_test::, 196 | ) 197 | .expect("test failed for bls12-381"); 198 | println!("Finished bls12-381"); 199 | } 200 | 201 | #[test] 202 | fn single_equation_test() { 203 | use crate::tests::*; 204 | single_equation_test::<_, _, BrakedownPCS, _>( 205 | Some(10), 206 | rand_poly::, 207 | rand_point::, 208 | poseidon_sponge_for_test::, 209 | ) 210 | .expect("test failed for bls12-377"); 211 | println!("Finished bls12-377"); 212 | single_equation_test::<_, _, BrakedownPCS, _>( 213 | Some(5), 214 | rand_poly::, 215 | rand_point::, 216 | poseidon_sponge_for_test::, 217 | ) 218 | .expect("test failed for bls12-381"); 219 | println!("Finished bls12-381"); 220 | } 221 | 222 | #[test] 223 | fn two_equation_test() { 224 | use crate::tests::*; 225 | two_equation_test::<_, _, BrakedownPCS, _>( 226 | Some(5), 227 | rand_poly::, 228 | rand_point::, 229 | poseidon_sponge_for_test::, 230 | ) 231 | .expect("test failed for bls12-377"); 232 | println!("Finished bls12-377"); 233 | two_equation_test::<_, _, BrakedownPCS, _>( 234 | Some(10), 235 | rand_poly::, 236 | rand_point::, 237 | poseidon_sponge_for_test::, 238 | ) 239 | .expect("test failed for bls12-381"); 240 | println!("Finished bls12-381"); 241 | } 242 | 243 | #[test] 244 | fn full_end_to_end_equation_test() { 245 | use crate::tests::*; 246 | full_end_to_end_equation_test::<_, _, BrakedownPCS, _>( 247 | Some(5), 248 | rand_poly::, 249 | rand_point::, 250 | poseidon_sponge_for_test::, 251 | ) 252 | .expect("test failed for bls12-377"); 253 | println!("Finished bls12-377"); 254 | full_end_to_end_equation_test::<_, _, BrakedownPCS, _>( 255 | Some(8), 256 | rand_poly::, 257 | rand_point::, 258 | poseidon_sponge_for_test::, 259 | ) 260 | .expect("test failed for bls12-381"); 261 | println!("Finished bls12-381"); 262 | } 263 | } 264 | -------------------------------------------------------------------------------- /poly-commit/src/linear_codes/multilinear_ligero/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | linear_codes::{ 3 | utils::{reed_solomon, tensor_vec}, 4 | LigeroPCParams, LinearEncode, 5 | }, 6 | Error, 7 | }; 8 | use ark_crypto_primitives::{ 9 | crh::{CRHScheme, TwoToOneCRHScheme}, 10 | merkle_tree::Config, 11 | }; 12 | use ark_ff::{FftField, PrimeField}; 13 | use ark_poly::{MultilinearExtension, Polynomial}; 14 | #[cfg(not(feature = "std"))] 15 | use ark_std::vec::Vec; 16 | use ark_std::{log2, marker::PhantomData}; 17 | 18 | mod tests; 19 | 20 | /// The multilinear Ligero polynomial commitment scheme based on [[Ligero]][ligero]. 21 | /// The scheme defaults to the naive batching strategy. 22 | /// 23 | /// Note: The scheme currently does not support hiding. 24 | /// 25 | /// [ligero]: https://eprint.iacr.org/2022/1608.pdf 26 | pub struct MultilinearLigero, H: CRHScheme> { 27 | _phantom: PhantomData<(F, C, P, H)>, 28 | } 29 | 30 | impl LinearEncode for MultilinearLigero 31 | where 32 | F: PrimeField + FftField, 33 | C: Config, 34 | P: MultilinearExtension, 35 |

>::Point: Into>, 36 | H: CRHScheme, 37 | { 38 | type LinCodePCParams = LigeroPCParams; 39 | 40 | fn setup( 41 | _max_degree: usize, 42 | _num_vars: Option, 43 | _rng: &mut R, 44 | leaf_hash_param: <::LeafHash as CRHScheme>::Parameters, 45 | two_to_one_hash_param: <::TwoToOneHash as TwoToOneCRHScheme>::Parameters, 46 | col_hash_params: H::Parameters, 47 | ) -> Self::LinCodePCParams { 48 | Self::LinCodePCParams::new( 49 | 128, 50 | 2, 51 | true, 52 | leaf_hash_param, 53 | two_to_one_hash_param, 54 | col_hash_params, 55 | ) 56 | } 57 | 58 | fn encode(msg: &[F], param: &Self::LinCodePCParams) -> Result, Error> { 59 | Ok(reed_solomon(msg, param.rho_inv)) 60 | } 61 | 62 | fn poly_to_vec(polynomial: &P) -> Vec { 63 | polynomial.to_evaluations() 64 | } 65 | 66 | fn point_to_vec(point:

>::Point) -> Vec { 67 | point 68 | } 69 | 70 | /// For a multilinear polynomial in n+m variables it returns a tuple for k={n,m}: 71 | /// ((1-z_1)*(1-z_2)*...*(1_z_k), z_1*(1-z_2)*...*(1-z_k), ..., z_1*z_2*...*z_k) 72 | fn tensor( 73 | point: &

>::Point, 74 | left_len: usize, 75 | _right_len: usize, 76 | ) -> (Vec, Vec) { 77 | let point: Vec = Self::point_to_vec(point.clone()); 78 | 79 | let split = log2(left_len) as usize; 80 | let left = &point[..split]; 81 | let right = &point[split..]; 82 | (tensor_vec(left), tensor_vec(right)) 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /poly-commit/src/linear_codes/multilinear_ligero/tests.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | use crate::{ 4 | linear_codes::{LigeroPCParams, LinearCodePCS, MultilinearLigero, PolynomialCommitment}, 5 | utils::test_sponge, 6 | LabeledPolynomial, 7 | }; 8 | use ark_bls12_377::Fr; 9 | use ark_bls12_381::Fr as Fr381; 10 | use ark_crypto_primitives::{ 11 | crh::{sha256::Sha256, CRHScheme, TwoToOneCRHScheme}, 12 | merkle_tree::{ByteDigestConverter, Config}, 13 | }; 14 | use ark_ff::{Field, PrimeField}; 15 | use ark_poly::evaluations::multivariate::{MultilinearExtension, SparseMultilinearExtension}; 16 | use ark_std::test_rng; 17 | use blake2::Blake2s256; 18 | use rand_chacha::{rand_core::SeedableRng, ChaCha20Rng}; 19 | 20 | use ark_pcs_bench_templates::{FieldToBytesColHasher, LeafIdentityHasher}; 21 | 22 | type LeafH = LeafIdentityHasher; 23 | type CompressH = Sha256; 24 | type ColHasher = FieldToBytesColHasher; 25 | 26 | struct MerkleTreeParams; 27 | 28 | impl Config for MerkleTreeParams { 29 | type Leaf = Vec; 30 | 31 | type LeafDigest = ::Output; 32 | type LeafInnerDigestConverter = ByteDigestConverter; 33 | type InnerDigest = ::Output; 34 | 35 | type LeafHash = LeafH; 36 | type TwoToOneHash = CompressH; 37 | } 38 | 39 | type MTConfig = MerkleTreeParams; 40 | type LigeroPCS = LinearCodePCS< 41 | MultilinearLigero, ColHasher>, 42 | F, 43 | SparseMultilinearExtension, 44 | MTConfig, 45 | ColHasher, 46 | >; 47 | 48 | fn rand_poly( 49 | _: usize, 50 | num_vars: Option, 51 | rng: &mut ChaCha20Rng, 52 | ) -> SparseMultilinearExtension { 53 | match num_vars { 54 | Some(n) => SparseMultilinearExtension::rand(n, rng), 55 | None => unimplemented!(), // should not happen in ML case! 56 | } 57 | } 58 | 59 | fn constant_poly( 60 | _: usize, 61 | num_vars: Option, 62 | rng: &mut ChaCha20Rng, 63 | ) -> SparseMultilinearExtension { 64 | // f1 = (1-x1)(1-x2)(1-x3)(1-x5)[(1-x6)*x4 + 2(1-x4)*x6] 65 | match num_vars { 66 | Some(n) => { 67 | let points = vec![(1, Fr::rand(rng))]; 68 | SparseMultilinearExtension::from_evaluations(n, &points) 69 | } 70 | None => unimplemented!(), // should not happen in ML case! 71 | } 72 | } 73 | 74 | #[test] 75 | fn test_construction() { 76 | let mut rng = &mut test_rng(); 77 | let num_vars = 10; 78 | // just to make sure we have the right degree given the FFT domain for our field 79 | let leaf_hash_param = ::setup(&mut rng).unwrap(); 80 | let two_to_one_hash_param = ::setup(&mut rng) 81 | .unwrap() 82 | .clone(); 83 | let col_hash_params = as CRHScheme>::setup(&mut rng).unwrap(); 84 | let check_well_formedness = true; 85 | 86 | let pp: LigeroPCParams> = LigeroPCParams::new( 87 | 128, 88 | 4, 89 | check_well_formedness, 90 | leaf_hash_param, 91 | two_to_one_hash_param, 92 | col_hash_params, 93 | ); 94 | 95 | let (ck, vk) = LigeroPCS::::trim(&pp, 0, 0, None).unwrap(); 96 | 97 | let rand_chacha = &mut ChaCha20Rng::from_rng(test_rng()).unwrap(); 98 | let labeled_poly = LabeledPolynomial::new( 99 | "test".to_string(), 100 | rand_poly(1, Some(num_vars), rand_chacha), 101 | Some(num_vars), 102 | Some(num_vars), 103 | ); 104 | 105 | let mut test_sponge = test_sponge::(); 106 | let (c, rands) = LigeroPCS::::commit(&ck, &[labeled_poly.clone()], None).unwrap(); 107 | 108 | let point = rand_point(Some(num_vars), rand_chacha); 109 | 110 | let value = labeled_poly.evaluate(&point); 111 | 112 | let proof = LigeroPCS::::open( 113 | &ck, 114 | &[labeled_poly], 115 | &c, 116 | &point, 117 | &mut (test_sponge.clone()), 118 | &rands, 119 | None, 120 | ) 121 | .unwrap(); 122 | assert!( 123 | LigeroPCS::::check(&vk, &c, &point, [value], &proof, &mut test_sponge, None) 124 | .unwrap() 125 | ); 126 | } 127 | 128 | fn rand_point(num_vars: Option, rng: &mut ChaCha20Rng) -> Vec { 129 | match num_vars { 130 | Some(n) => (0..n).map(|_| F::rand(rng)).collect(), 131 | None => unimplemented!(), // should not happen! 132 | } 133 | } 134 | 135 | #[test] 136 | fn single_poly_test() { 137 | use crate::tests::*; 138 | single_poly_test::<_, _, LigeroPCS, _>( 139 | Some(5), 140 | rand_poly::, 141 | rand_point::, 142 | poseidon_sponge_for_test::, 143 | ) 144 | .expect("test failed for bls12-377"); 145 | single_poly_test::<_, _, LigeroPCS, _>( 146 | Some(10), 147 | rand_poly::, 148 | rand_point::, 149 | poseidon_sponge_for_test::, 150 | ) 151 | .expect("test failed for bls12-381"); 152 | } 153 | 154 | #[test] 155 | fn constant_poly_test() { 156 | use crate::tests::*; 157 | single_poly_test::<_, _, LigeroPCS, _>( 158 | Some(10), 159 | constant_poly::, 160 | rand_point::, 161 | poseidon_sponge_for_test::, 162 | ) 163 | .expect("test failed for bls12-377"); 164 | single_poly_test::<_, _, LigeroPCS, _>( 165 | Some(5), 166 | constant_poly::, 167 | rand_point::, 168 | poseidon_sponge_for_test::, 169 | ) 170 | .expect("test failed for bls12-381"); 171 | } 172 | 173 | #[test] 174 | fn full_end_to_end_test() { 175 | use crate::tests::*; 176 | full_end_to_end_test::<_, _, LigeroPCS, _>( 177 | Some(8), 178 | rand_poly::, 179 | rand_point::, 180 | poseidon_sponge_for_test::, 181 | ) 182 | .expect("test failed for bls12-377"); 183 | println!("Finished bls12-377"); 184 | full_end_to_end_test::<_, _, LigeroPCS, _>( 185 | Some(3), 186 | rand_poly::, 187 | rand_point::, 188 | poseidon_sponge_for_test::, 189 | ) 190 | .expect("test failed for bls12-381"); 191 | println!("Finished bls12-381"); 192 | } 193 | 194 | #[test] 195 | fn single_equation_test() { 196 | use crate::tests::*; 197 | single_equation_test::<_, _, LigeroPCS, _>( 198 | Some(10), 199 | rand_poly::, 200 | rand_point::, 201 | poseidon_sponge_for_test::, 202 | ) 203 | .expect("test failed for bls12-377"); 204 | println!("Finished bls12-377"); 205 | single_equation_test::<_, _, LigeroPCS, _>( 206 | Some(5), 207 | rand_poly::, 208 | rand_point::, 209 | poseidon_sponge_for_test::, 210 | ) 211 | .expect("test failed for bls12-381"); 212 | println!("Finished bls12-381"); 213 | } 214 | 215 | #[test] 216 | fn two_equation_test() { 217 | use crate::tests::*; 218 | two_equation_test::<_, _, LigeroPCS, _>( 219 | Some(5), 220 | rand_poly::, 221 | rand_point::, 222 | poseidon_sponge_for_test::, 223 | ) 224 | .expect("test failed for bls12-377"); 225 | println!("Finished bls12-377"); 226 | two_equation_test::<_, _, LigeroPCS, _>( 227 | Some(10), 228 | rand_poly::, 229 | rand_point::, 230 | poseidon_sponge_for_test::, 231 | ) 232 | .expect("test failed for bls12-381"); 233 | println!("Finished bls12-381"); 234 | } 235 | 236 | #[test] 237 | fn full_end_to_end_equation_test() { 238 | use crate::tests::*; 239 | full_end_to_end_equation_test::<_, _, LigeroPCS, _>( 240 | Some(5), 241 | rand_poly::, 242 | rand_point::, 243 | poseidon_sponge_for_test::, 244 | ) 245 | .expect("test failed for bls12-377"); 246 | println!("Finished bls12-377"); 247 | full_end_to_end_equation_test::<_, _, LigeroPCS, _>( 248 | Some(8), 249 | rand_poly::, 250 | rand_point::, 251 | poseidon_sponge_for_test::, 252 | ) 253 | .expect("test failed for bls12-381"); 254 | println!("Finished bls12-381"); 255 | } 256 | } 257 | -------------------------------------------------------------------------------- /poly-commit/src/linear_codes/univariate_ligero/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | linear_codes::{utils::reed_solomon, LigeroPCParams, LinearEncode}, 3 | Error, 4 | }; 5 | use ark_crypto_primitives::{ 6 | crh::{CRHScheme, TwoToOneCRHScheme}, 7 | merkle_tree::Config, 8 | }; 9 | use ark_ff::PrimeField; 10 | use ark_poly::DenseUVPolynomial; 11 | use ark_std::marker::PhantomData; 12 | #[cfg(not(feature = "std"))] 13 | use ark_std::vec::Vec; 14 | 15 | mod tests; 16 | 17 | /// The univariate Ligero polynomial commitment scheme based on [[Ligero]][ligero]. 18 | /// The scheme defaults to the naive batching strategy. 19 | /// 20 | /// Note: The scheme currently does not support hiding. 21 | /// 22 | /// [ligero]: https://eprint.iacr.org/2022/1608.pdf 23 | pub struct UnivariateLigero, H: CRHScheme> { 24 | _phantom: PhantomData<(F, C, P, H)>, 25 | } 26 | 27 | impl LinearEncode for UnivariateLigero 28 | where 29 | F: PrimeField, 30 | C: Config, 31 | P: DenseUVPolynomial, 32 | P::Point: Into, 33 | H: CRHScheme, 34 | { 35 | type LinCodePCParams = LigeroPCParams; 36 | 37 | fn setup( 38 | _max_degree: usize, 39 | _num_vars: Option, 40 | _rng: &mut R, 41 | leaf_hash_param: <::LeafHash as CRHScheme>::Parameters, 42 | two_to_one_hash_param: <::TwoToOneHash as TwoToOneCRHScheme>::Parameters, 43 | col_hash_params: H::Parameters, 44 | ) -> Self::LinCodePCParams { 45 | Self::LinCodePCParams::new( 46 | 128, 47 | 4, 48 | true, 49 | leaf_hash_param, 50 | two_to_one_hash_param, 51 | col_hash_params, 52 | ) 53 | } 54 | 55 | fn encode(msg: &[F], param: &Self::LinCodePCParams) -> Result, Error> { 56 | Ok(reed_solomon(msg, param.rho_inv)) 57 | } 58 | 59 | /// For a univariate polynomial, we simply return the list of coefficients. 60 | fn poly_to_vec(polynomial: &P) -> Vec { 61 | polynomial.coeffs().to_vec() 62 | } 63 | 64 | fn point_to_vec(point: P::Point) -> Vec { 65 | vec![point] 66 | } 67 | 68 | /// For a univariate polynomial it returns a tuple: 69 | /// ((1, z, z^2, ..., z^n), (1, z^n, z^(2n), ..., z^((m-1)n))) 70 | fn tensor(z: &F, left: usize, right: usize) -> (Vec, Vec) { 71 | let mut left_out = Vec::with_capacity(left); 72 | let mut pow_a = F::one(); 73 | for _ in 0..left { 74 | left_out.push(pow_a); 75 | pow_a *= z; 76 | } 77 | 78 | let mut right_out = Vec::with_capacity(right); 79 | let mut pow_b = F::one(); 80 | for _ in 0..right { 81 | right_out.push(pow_b); 82 | pow_b *= pow_a; 83 | } 84 | 85 | (left_out, right_out) 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /poly-commit/src/marlin/marlin_pst13_pc/combinations.rs: -------------------------------------------------------------------------------- 1 | //! Compute all combinations of values in a given list 2 | //! Credit: https://github.com/meltinglava/uniquecombinations/ 3 | #[cfg(not(feature = "std"))] 4 | use ark_std::vec::Vec; 5 | /// Compute all combinations of values in a given list. 6 | pub(crate) struct Combinations 7 | where 8 | T: Ord + Clone, 9 | { 10 | original: Vec, 11 | possition: Vec, 12 | len: usize, 13 | started: bool, 14 | } 15 | 16 | impl Combinations 17 | where 18 | T: Ord + Clone, 19 | { 20 | /// Initialize the permutations. 21 | pub(crate) fn new(mut original: Vec, len: usize) -> Self { 22 | if original.len() > len && len >= 1 { 23 | original.sort_unstable(); 24 | Self { 25 | original, 26 | possition: (0..len).collect(), 27 | len, 28 | started: false, 29 | } 30 | } else { 31 | panic!("the length has to be smaller then the datasets len"); 32 | } 33 | } 34 | 35 | #[inline] 36 | fn insert(&self, col: &mut Vec) { 37 | col.clear(); 38 | self.possition 39 | .iter() 40 | .enumerate() 41 | .for_each(|(p, n)| col.insert(p, self.original[*n].clone())) 42 | } 43 | 44 | /// Clear the contents of the comb vector and insert the next combination. 45 | fn next_combination(&mut self, mut comb: &mut Vec) -> bool { 46 | if !self.started { 47 | // first pass throught 48 | self.started = true; 49 | self.insert(&mut comb); 50 | true 51 | } else { 52 | let org_len = self.original.len(); 53 | // check if we cant bump the back number 54 | if self.original[self.possition[self.len - 1]] == self.original[org_len - 1] { 55 | // locate the number closest behind that needs to be bumped 56 | for i in 2..=self.len { 57 | if self.original[self.possition[self.len - i]] < self.original[org_len - i] { 58 | //find the value of the 59 | let lastpos = self.possition[self.len - i]; 60 | let val = &self.original[lastpos]; 61 | for j in lastpos + 1..org_len { 62 | if *val < self.original[j] { 63 | for k in 0..i { 64 | self.possition[self.len - i + k] = j + k; 65 | } 66 | self.insert(&mut comb); 67 | return true; 68 | } 69 | } 70 | } 71 | } 72 | false 73 | } else { 74 | let mut i = self.possition[self.len - 1]; 75 | let current = &self.original[i]; 76 | let mut next = current; 77 | while current == next { 78 | i += 1; 79 | next = &self.original[i]; 80 | } 81 | self.possition[self.len - 1] = i; 82 | self.insert(&mut comb); 83 | true 84 | } 85 | } 86 | } 87 | } 88 | 89 | impl Iterator for Combinations 90 | where 91 | T: Ord + Clone, 92 | { 93 | type Item = Vec; 94 | 95 | fn next(&mut self) -> Option { 96 | let mut vals = Vec::with_capacity(self.len); 97 | if self.next_combination(&mut vals) { 98 | Some(vals) 99 | } else { 100 | None 101 | } 102 | } 103 | } 104 | 105 | #[cfg(test)] 106 | mod tests { 107 | use super::*; 108 | 109 | #[test] 110 | fn equals() { 111 | assert!(Combinations::new(vec![2, 2, 2], 2).next().unwrap() == vec![2, 2]) 112 | } 113 | 114 | #[test] 115 | fn t_123() { 116 | assert!( 117 | dbg!(Combinations::new(vec![1, 2, 3], 2) 118 | .take(10) 119 | .collect::>()) 120 | == vec![vec![1, 2], vec![1, 3], vec![2, 3]] 121 | ) 122 | } 123 | 124 | #[test] 125 | fn complicated() { 126 | let actual: Vec<_> = Combinations::new(vec![1, 2, 2, 3, 4], 3).collect(); 127 | let expected = vec![ 128 | vec![1, 2, 2], 129 | vec![1, 2, 3], 130 | vec![1, 2, 4], 131 | vec![1, 3, 4], 132 | vec![2, 2, 3], 133 | vec![2, 2, 4], 134 | vec![2, 3, 4], 135 | ]; 136 | assert!(actual == expected) 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /poly-commit/src/multilinear_pc/data_structures.rs: -------------------------------------------------------------------------------- 1 | use ark_ec::pairing::Pairing; 2 | use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; 3 | #[cfg(not(feature = "std"))] 4 | use ark_std::vec::Vec; 5 | #[allow(type_alias_bounds)] 6 | /// Evaluations over {0,1}^n for G1 7 | pub type EvaluationHyperCubeOnG1 = Vec; 8 | #[allow(type_alias_bounds)] 9 | /// Evaluations over {0,1}^n for G2 10 | pub type EvaluationHyperCubeOnG2 = Vec; 11 | 12 | /// Public Parameter used by prover 13 | #[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug)] 14 | pub struct UniversalParams { 15 | /// number of variables 16 | pub num_vars: usize, 17 | /// `pp_{num_vars}`, `pp_{num_vars - 1}`, `pp_{num_vars - 2}`, ..., defined by XZZPD19 18 | pub powers_of_g: Vec>, 19 | /// `pp_{num_vars}`, `pp_{num_vars - 1}`, `pp_{num_vars - 2}`, ..., defined by XZZPD19 20 | pub powers_of_h: Vec>, 21 | /// generator for G1 22 | pub g: E::G1Affine, 23 | /// generator for G2 24 | pub h: E::G2Affine, 25 | /// g^randomness 26 | pub g_mask: Vec, 27 | } 28 | 29 | /// Public Parameter used by prover 30 | #[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug)] 31 | pub struct CommitterKey { 32 | /// number of variables 33 | pub nv: usize, 34 | /// pp_k defined by libra 35 | pub powers_of_g: Vec>, 36 | /// pp_h defined by libra 37 | pub powers_of_h: Vec>, 38 | /// generator for G1 39 | pub g: E::G1Affine, 40 | /// generator for G2 41 | pub h: E::G2Affine, 42 | } 43 | 44 | /// Public Parameter used by prover 45 | #[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug)] 46 | pub struct VerifierKey { 47 | /// number of variables 48 | pub nv: usize, 49 | /// generator of G1 50 | pub g: E::G1Affine, 51 | /// generator of G2 52 | pub h: E::G2Affine, 53 | /// g^t1, g^t2, ... 54 | pub g_mask_random: Vec, 55 | } 56 | 57 | #[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug)] 58 | /// commitment 59 | pub struct Commitment { 60 | /// number of variables 61 | pub nv: usize, 62 | /// product of g as described by the vRAM paper 63 | pub g_product: E::G1Affine, 64 | } 65 | 66 | #[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug)] 67 | /// proof of opening 68 | pub struct Proof { 69 | /// Evaluation of quotients 70 | pub proofs: Vec, 71 | } 72 | -------------------------------------------------------------------------------- /poly-commit/src/multilinear_pc/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::multilinear_pc::data_structures::{ 2 | Commitment, CommitterKey, Proof, UniversalParams, VerifierKey, 3 | }; 4 | use ark_ec::{ 5 | pairing::Pairing, 6 | scalar_mul::{BatchMulPreprocessing, ScalarMul}, 7 | AffineRepr, CurveGroup, VariableBaseMSM, 8 | }; 9 | use ark_ff::{Field, One, PrimeField, Zero}; 10 | use ark_poly::{DenseMultilinearExtension, MultilinearExtension}; 11 | #[cfg(not(feature = "std"))] 12 | use ark_std::vec::Vec; 13 | use ark_std::{ 14 | collections::LinkedList, iter::FromIterator, marker::PhantomData, ops::Mul, rand::RngCore, 15 | UniformRand, 16 | }; 17 | 18 | /// data structures used by multilinear extension commitment scheme 19 | pub mod data_structures; 20 | 21 | /// Polynomial Commitment Scheme on multilinear extensions. 22 | pub struct MultilinearPC { 23 | _engine: PhantomData, 24 | } 25 | 26 | impl MultilinearPC { 27 | /// setup 28 | pub fn setup(num_vars: usize, rng: &mut R) -> UniversalParams { 29 | assert!(num_vars > 0, "constant polynomial not supported"); 30 | let g = E::G1::rand(rng); 31 | let h = E::G2::rand(rng); 32 | let mut powers_of_g = Vec::new(); 33 | let mut powers_of_h = Vec::new(); 34 | let t: Vec<_> = (0..num_vars).map(|_| E::ScalarField::rand(rng)).collect(); 35 | 36 | let mut eq: LinkedList> = 37 | LinkedList::from_iter(eq_extension(&t).into_iter()); 38 | let mut eq_arr = LinkedList::new(); 39 | let mut base = eq.pop_back().unwrap().evaluations; 40 | 41 | for i in (0..num_vars).rev() { 42 | eq_arr.push_front(remove_dummy_variable(&base, i)); 43 | if i != 0 { 44 | let mul = eq.pop_back().unwrap().evaluations; 45 | base = base 46 | .into_iter() 47 | .zip(mul.into_iter()) 48 | .map(|(a, b)| a * &b) 49 | .collect(); 50 | } 51 | } 52 | 53 | let mut pp_powers = Vec::new(); 54 | for i in 0..num_vars { 55 | let eq = eq_arr.pop_front().unwrap(); 56 | let pp_k_powers = (0..(1 << (num_vars - i))).map(|x| eq[x]); 57 | pp_powers.extend(pp_k_powers); 58 | } 59 | 60 | let g_table = BatchMulPreprocessing::new(g, num_vars); 61 | let pp_g = g_table.batch_mul(&pp_powers); 62 | let pp_h = h.batch_mul(&pp_powers); 63 | let mut start = 0; 64 | for i in 0..num_vars { 65 | let size = 1 << (num_vars - i); 66 | let pp_k_g = (&pp_g[start..(start + size)]).to_vec(); 67 | let pp_k_h = (&pp_h[start..(start + size)]).to_vec(); 68 | powers_of_g.push(pp_k_g); 69 | powers_of_h.push(pp_k_h); 70 | start += size; 71 | } 72 | 73 | // uncomment to measure the time for calculating vp 74 | // let vp_generation_timer = start_timer!(|| "VP generation"); 75 | let g_mask = g_table.batch_mul(&t); 76 | // end_timer!(vp_generation_timer); 77 | 78 | UniversalParams { 79 | num_vars, 80 | g: g.into_affine(), 81 | g_mask, 82 | h: h.into_affine(), 83 | powers_of_g, 84 | powers_of_h, 85 | } 86 | } 87 | 88 | /// Trim the universal parameters to specialize the public parameters 89 | /// for multilinear polynomials to the given `supported_num_vars`, and returns committer key and verifier key. 90 | /// `supported_num_vars` should be in range `1..=params.num_vars` 91 | pub fn trim( 92 | params: &UniversalParams, 93 | supported_num_vars: usize, 94 | ) -> (CommitterKey, VerifierKey) { 95 | assert!(supported_num_vars <= params.num_vars); 96 | let to_reduce = params.num_vars - supported_num_vars; 97 | let ck = CommitterKey { 98 | powers_of_h: (¶ms.powers_of_h[to_reduce..]).to_vec(), 99 | powers_of_g: (¶ms.powers_of_g[to_reduce..]).to_vec(), 100 | g: params.g, 101 | h: params.h, 102 | nv: supported_num_vars, 103 | }; 104 | let vk = VerifierKey { 105 | nv: supported_num_vars, 106 | g: params.g, 107 | h: params.h, 108 | g_mask_random: (¶ms.g_mask[to_reduce..]).to_vec(), 109 | }; 110 | (ck, vk) 111 | } 112 | 113 | /// commit 114 | pub fn commit( 115 | ck: &CommitterKey, 116 | polynomial: &impl MultilinearExtension, 117 | ) -> Commitment { 118 | let nv = polynomial.num_vars(); 119 | let scalars: Vec<_> = polynomial 120 | .to_evaluations() 121 | .into_iter() 122 | .map(|x| x.into_bigint()) 123 | .collect(); 124 | let g_product = 125 | ::msm_bigint(&ck.powers_of_g[0], scalars.as_slice()) 126 | .into_affine(); 127 | Commitment { nv, g_product } 128 | } 129 | 130 | /// On input a polynomial `p` and a point `point`, outputs a proof for the same. 131 | pub fn open( 132 | ck: &CommitterKey, 133 | polynomial: &impl MultilinearExtension, 134 | point: &[E::ScalarField], 135 | ) -> Proof { 136 | assert_eq!(polynomial.num_vars(), ck.nv, "Invalid size of polynomial"); 137 | let nv = polynomial.num_vars(); 138 | let mut r: Vec> = (0..nv + 1).map(|_| Vec::new()).collect(); 139 | let mut q: Vec> = (0..nv + 1).map(|_| Vec::new()).collect(); 140 | 141 | r[nv] = polynomial.to_evaluations(); 142 | 143 | let mut proofs = Vec::new(); 144 | for i in 0..nv { 145 | let k = nv - i; 146 | let point_at_k = point[i]; 147 | q[k] = (0..(1 << (k - 1))) 148 | .map(|_| E::ScalarField::zero()) 149 | .collect(); 150 | r[k - 1] = (0..(1 << (k - 1))) 151 | .map(|_| E::ScalarField::zero()) 152 | .collect(); 153 | for b in 0..(1 << (k - 1)) { 154 | q[k][b] = r[k][(b << 1) + 1] - &r[k][b << 1]; 155 | r[k - 1][b] = r[k][b << 1] * &(E::ScalarField::one() - &point_at_k) 156 | + &(r[k][(b << 1) + 1] * &point_at_k); 157 | } 158 | let scalars: Vec<_> = (0..(1 << k)) 159 | .map(|x| q[k][x >> 1].into_bigint()) // fine 160 | .collect(); 161 | 162 | let pi_h = 163 | ::msm_bigint(&ck.powers_of_h[i], &scalars).into_affine(); // no need to move outside and partition 164 | proofs.push(pi_h); 165 | } 166 | 167 | Proof { proofs } 168 | } 169 | 170 | /// Verifies that `value` is the evaluation at `x` of the polynomial 171 | /// committed inside `comm`. 172 | pub fn check<'a>( 173 | vk: &VerifierKey, 174 | commitment: &Commitment, 175 | point: &[E::ScalarField], 176 | value: E::ScalarField, 177 | proof: &Proof, 178 | ) -> bool { 179 | let left = E::pairing(commitment.g_product.into_group() - &vk.g.mul(value), vk.h); 180 | 181 | let g_mul = vk.g.into_group().batch_mul(point); 182 | 183 | let pairing_lefts: Vec<_> = (0..vk.nv) 184 | .map(|i| vk.g_mask_random[i].into_group() - &g_mul[i]) 185 | .collect(); 186 | let pairing_lefts: Vec = E::G1::normalize_batch(&pairing_lefts); 187 | let pairing_lefts: Vec = pairing_lefts 188 | .into_iter() 189 | .map(|x| E::G1Prepared::from(x)) 190 | .collect(); 191 | 192 | let pairing_rights: Vec = proof 193 | .proofs 194 | .iter() 195 | .map(|x| E::G2Prepared::from(*x)) 196 | .collect(); 197 | 198 | let right = E::multi_pairing(pairing_lefts, pairing_rights); 199 | left == right 200 | } 201 | } 202 | 203 | /// fix first `pad` variables of `poly` represented in evaluation form to zero 204 | fn remove_dummy_variable(poly: &[F], pad: usize) -> Vec { 205 | if pad == 0 { 206 | return poly.to_vec(); 207 | } 208 | if !poly.len().is_power_of_two() { 209 | panic!("Size of polynomial should be power of two. ") 210 | } 211 | let nv = ark_std::log2(poly.len()) as usize - pad; 212 | let table: Vec<_> = (0..(1 << nv)).map(|x| poly[x << pad]).collect(); 213 | table 214 | } 215 | 216 | /// generate eq(t,x), a product of multilinear polynomials with fixed t. 217 | /// eq(a,b) is takes extensions of a,b in {0,1}^num_vars such that if a and b in {0,1}^num_vars are equal 218 | /// then this polynomial evaluates to 1. 219 | fn eq_extension(t: &[F]) -> Vec> { 220 | let dim = t.len(); 221 | let mut result = Vec::new(); 222 | for i in 0..dim { 223 | let mut poly = Vec::with_capacity(1 << dim); 224 | for x in 0..(1 << dim) { 225 | let xi = if x >> i & 1 == 1 { F::one() } else { F::zero() }; 226 | let ti = t[i]; 227 | let ti_xi = ti * xi; 228 | poly.push(ti_xi + ti_xi - xi - ti + F::one()); 229 | } 230 | result.push(DenseMultilinearExtension::from_evaluations_vec(dim, poly)); 231 | } 232 | 233 | result 234 | } 235 | 236 | #[cfg(test)] 237 | mod tests { 238 | use crate::multilinear_pc::{data_structures::UniversalParams, MultilinearPC}; 239 | use ark_bls12_381::Bls12_381; 240 | use ark_ec::pairing::Pairing; 241 | use ark_poly::{ 242 | DenseMultilinearExtension, MultilinearExtension, Polynomial, SparseMultilinearExtension, 243 | }; 244 | #[cfg(not(feature = "std"))] 245 | use ark_std::vec::Vec; 246 | use ark_std::{rand::RngCore, test_rng, UniformRand}; 247 | type E = Bls12_381; 248 | type Fr = ::ScalarField; 249 | 250 | fn test_polynomial( 251 | uni_params: &UniversalParams, 252 | poly: &impl MultilinearExtension, 253 | rng: &mut R, 254 | ) { 255 | let nv = poly.num_vars(); 256 | assert_ne!(nv, 0); 257 | let (ck, vk) = MultilinearPC::::trim(&uni_params, nv); 258 | let point: Vec<_> = (0..nv).map(|_| Fr::rand(rng)).collect(); 259 | let com = MultilinearPC::commit(&ck, poly); 260 | let proof = MultilinearPC::open(&ck, poly, &point); 261 | 262 | let value = poly.evaluate(&point); 263 | let result = MultilinearPC::check(&vk, &com, &point, value, &proof); 264 | assert!(result); 265 | } 266 | 267 | #[test] 268 | fn setup_commit_verify_correct_polynomials() { 269 | let mut rng = test_rng(); 270 | 271 | // normal polynomials 272 | let uni_params = MultilinearPC::setup(10, &mut rng); 273 | 274 | let poly1 = DenseMultilinearExtension::rand(8, &mut rng); 275 | test_polynomial(&uni_params, &poly1, &mut rng); 276 | 277 | let poly2 = SparseMultilinearExtension::rand_with_config(9, 1 << 5, &mut rng); 278 | test_polynomial(&uni_params, &poly2, &mut rng); 279 | 280 | // single-variate polynomials 281 | 282 | let poly3 = DenseMultilinearExtension::rand(1, &mut rng); 283 | test_polynomial(&uni_params, &poly3, &mut rng); 284 | 285 | let poly4 = SparseMultilinearExtension::rand_with_config(1, 1 << 1, &mut rng); 286 | test_polynomial(&uni_params, &poly4, &mut rng); 287 | } 288 | 289 | #[test] 290 | #[should_panic] 291 | fn setup_commit_verify_constant_polynomial() { 292 | let mut rng = test_rng(); 293 | 294 | // normal polynomials 295 | MultilinearPC::::setup(0, &mut rng); 296 | } 297 | 298 | #[test] 299 | fn setup_commit_verify_incorrect_polynomial_should_return_false() { 300 | let mut rng = test_rng(); 301 | let nv = 8; 302 | let uni_params = MultilinearPC::setup(nv, &mut rng); 303 | let poly = DenseMultilinearExtension::rand(nv, &mut rng); 304 | let nv = uni_params.num_vars; 305 | let (ck, vk) = MultilinearPC::::trim(&uni_params, nv); 306 | let point: Vec<_> = (0..nv).map(|_| Fr::rand(&mut rng)).collect(); 307 | let com = MultilinearPC::commit(&ck, &poly); 308 | let proof = MultilinearPC::open(&ck, &poly, &point); 309 | 310 | let value = poly.evaluate(&point); 311 | let result = MultilinearPC::check(&vk, &com, &point, value + &(1u16.into()), &proof); 312 | assert!(!result); 313 | } 314 | } 315 | -------------------------------------------------------------------------------- /poly-commit/src/optional_rng.rs: -------------------------------------------------------------------------------- 1 | use ark_std::rand::{Error, RngCore}; 2 | use core::num::NonZeroU32; 3 | 4 | /// `OptionalRng` is a hack that is necessary because `Option<&mut R>` is not implicitly reborrowed 5 | /// like `&mut R` is. This causes problems when a variable of type `Option<&mut R>` 6 | /// is moved (eg, in a loop). 7 | /// 8 | /// To overcome this, we define the wrapper `OptionalRng` here that can be borrowed 9 | /// mutably, without fear of being moved. 10 | pub struct OptionalRng(pub Option); 11 | 12 | impl RngCore for OptionalRng { 13 | #[inline] 14 | fn next_u32(&mut self) -> u32 { 15 | (&mut self.0) 16 | .as_mut() 17 | .map(|r| r.next_u32()) 18 | .expect("Rng was invoked in a non-hiding context") 19 | } 20 | 21 | #[inline] 22 | fn next_u64(&mut self) -> u64 { 23 | (&mut self.0) 24 | .as_mut() 25 | .map(|r| r.next_u64()) 26 | .expect("Rng was invoked in a non-hiding context") 27 | } 28 | 29 | #[inline] 30 | fn fill_bytes(&mut self, dest: &mut [u8]) { 31 | (&mut self.0) 32 | .as_mut() 33 | .map(|r| r.fill_bytes(dest)) 34 | .expect("Rng was invoked in a non-hiding context") 35 | } 36 | 37 | #[inline] 38 | fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { 39 | match &mut self.0 { 40 | Some(r) => r.try_fill_bytes(dest), 41 | None => Err(NonZeroU32::new(Error::CUSTOM_START).unwrap().into()), 42 | } 43 | } 44 | } 45 | 46 | impl From for OptionalRng { 47 | fn from(other: R) -> Self { 48 | Self(Some(other)) 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /poly-commit/src/sonic_pc/data_structures.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | kzg10, BTreeMap, PCCommitterKey, PCPreparedCommitment, PCPreparedVerifierKey, PCVerifierKey, 3 | }; 4 | use ark_ec::{pairing::Pairing, AdditiveGroup}; 5 | use ark_serialize::{ 6 | CanonicalDeserialize, CanonicalSerialize, Compress, SerializationError, Valid, Validate, 7 | }; 8 | use ark_std::io::{Read, Write}; 9 | #[cfg(not(feature = "std"))] 10 | use ark_std::vec::Vec; 11 | 12 | /// `UniversalParams` are the universal parameters for the KZG10 scheme. 13 | pub type UniversalParams = kzg10::UniversalParams; 14 | 15 | /// `Randomness` is the randomness for the KZG10 scheme. 16 | pub type Randomness = kzg10::Randomness; 17 | 18 | /// `Commitment` is the commitment for the KZG10 scheme. 19 | pub type Commitment = kzg10::Commitment; 20 | 21 | /// `PreparedCommitment` is the prepared commitment for the KZG10 scheme. 22 | pub type PreparedCommitment = kzg10::PreparedCommitment; 23 | 24 | impl PCPreparedCommitment> for PreparedCommitment { 25 | /// prepare `PreparedCommitment` from `Commitment` 26 | fn prepare(comm: &Commitment) -> Self { 27 | let mut prepared_comm = Vec::::new(); 28 | let mut cur = E::G1::from(comm.0.clone()); 29 | for _ in 0..128 { 30 | prepared_comm.push(cur.clone().into()); 31 | cur.double_in_place(); 32 | } 33 | 34 | Self { 0: prepared_comm } 35 | } 36 | } 37 | 38 | /// `ComitterKey` is used to commit to, and create evaluation proofs for, a given 39 | /// polynomial. 40 | #[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] 41 | #[derivative( 42 | Default(bound = ""), 43 | Hash(bound = ""), 44 | Clone(bound = ""), 45 | Debug(bound = "") 46 | )] 47 | pub struct CommitterKey { 48 | /// The key used to commit to polynomials. 49 | pub powers_of_g: Vec, 50 | 51 | /// The key used to commit to hiding polynomials. 52 | pub powers_of_gamma_g: Vec, 53 | 54 | /// The powers used to commit to shifted polynomials. 55 | /// This is `None` if `self` does not support enforcing any degree bounds. 56 | pub shifted_powers_of_g: Option>, 57 | 58 | /// The powers used to commit to shifted hiding polynomials. 59 | /// This is `None` if `self` does not support enforcing any degree bounds. 60 | pub shifted_powers_of_gamma_g: Option>>, 61 | 62 | /// The degree bounds that are supported by `self`. 63 | /// Sorted in ascending order from smallest bound to largest bound. 64 | /// This is `None` if `self` does not support enforcing any degree bounds. 65 | pub enforced_degree_bounds: Option>, 66 | 67 | /// The maximum degree supported by the `UniversalParams` from which `self` was derived 68 | pub max_degree: usize, 69 | } 70 | 71 | impl CommitterKey { 72 | /// Obtain powers for the underlying KZG10 construction 73 | pub fn powers(&self) -> kzg10::Powers { 74 | kzg10::Powers { 75 | powers_of_g: self.powers_of_g.as_slice().into(), 76 | powers_of_gamma_g: self.powers_of_gamma_g.as_slice().into(), 77 | } 78 | } 79 | 80 | /// Obtain powers for committing to shifted polynomials. 81 | pub fn shifted_powers( 82 | &self, 83 | degree_bound: impl Into>, 84 | ) -> Option> { 85 | match (&self.shifted_powers_of_g, &self.shifted_powers_of_gamma_g) { 86 | (Some(shifted_powers_of_g), Some(shifted_powers_of_gamma_g)) => { 87 | let max_bound = self 88 | .enforced_degree_bounds 89 | .as_ref() 90 | .unwrap() 91 | .last() 92 | .unwrap(); 93 | let (bound, powers_range) = if let Some(degree_bound) = degree_bound.into() { 94 | assert!(self 95 | .enforced_degree_bounds 96 | .as_ref() 97 | .unwrap() 98 | .contains(°ree_bound)); 99 | (degree_bound, (max_bound - degree_bound)..) 100 | } else { 101 | (*max_bound, 0..) 102 | }; 103 | 104 | let ck = kzg10::Powers { 105 | powers_of_g: shifted_powers_of_g[powers_range.clone()].into(), 106 | powers_of_gamma_g: shifted_powers_of_gamma_g[&bound].clone().into(), 107 | }; 108 | 109 | Some(ck) 110 | } 111 | 112 | (_, _) => None, 113 | } 114 | } 115 | } 116 | 117 | impl PCCommitterKey for CommitterKey { 118 | fn max_degree(&self) -> usize { 119 | self.max_degree 120 | } 121 | 122 | fn supported_degree(&self) -> usize { 123 | self.powers_of_g.len() - 1 124 | } 125 | } 126 | 127 | /// `VerifierKey` is used to check evaluation proofs for a given commitment. 128 | #[derive(Derivative)] 129 | #[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] 130 | pub struct VerifierKey { 131 | /// The generator of G1. 132 | pub g: E::G1Affine, 133 | 134 | /// The generator of G1 that is used for making a commitment hiding. 135 | pub gamma_g: E::G1Affine, 136 | 137 | /// The generator of G2. 138 | pub h: E::G2Affine, 139 | 140 | /// \beta times the generator of G2. 141 | pub beta_h: E::G2Affine, 142 | 143 | /// The generator of G2, prepared for use in pairings. 144 | pub prepared_h: E::G2Prepared, 145 | 146 | /// The \beta times the generator of G2, prepared for use in pairings. 147 | pub prepared_beta_h: E::G2Prepared, 148 | 149 | /// Pairs a degree_bound with its corresponding G2 element, which has been prepared for use in pairings. 150 | /// Each pair is in the form `(degree_bound, \beta^{degree_bound - max_degree} h),` where `h` is the generator of G2 above 151 | pub degree_bounds_and_neg_powers_of_h: Option>, 152 | 153 | /// The maximum degree supported by the trimmed parameters that `self` is 154 | /// a part of. 155 | pub supported_degree: usize, 156 | 157 | /// The maximum degree supported by the `UniversalParams` `self` was derived 158 | /// from. 159 | pub max_degree: usize, 160 | } 161 | 162 | impl VerifierKey { 163 | /// Find the appropriate shift for the degree bound. 164 | pub fn get_shift_power(&self, degree_bound: usize) -> Option { 165 | self.degree_bounds_and_neg_powers_of_h 166 | .as_ref() 167 | .and_then(|v| { 168 | v.binary_search_by(|(d, _)| d.cmp(°ree_bound)) 169 | .ok() 170 | .map(|i| v[i].1.clone().into()) 171 | }) 172 | } 173 | } 174 | 175 | impl Valid for VerifierKey { 176 | fn check(&self) -> Result<(), SerializationError> { 177 | self.g.check()?; 178 | self.gamma_g.check()?; 179 | self.h.check()?; 180 | self.beta_h.check()?; 181 | self.degree_bounds_and_neg_powers_of_h.check()?; 182 | if self.supported_degree > self.max_degree { 183 | return Err(SerializationError::InvalidData); 184 | } 185 | Ok(()) 186 | } 187 | } 188 | 189 | impl CanonicalSerialize for VerifierKey { 190 | fn serialize_with_mode( 191 | &self, 192 | mut writer: W, 193 | compress: Compress, 194 | ) -> Result<(), SerializationError> { 195 | self.g.serialize_with_mode(&mut writer, compress)?; 196 | self.gamma_g.serialize_with_mode(&mut writer, compress)?; 197 | self.h.serialize_with_mode(&mut writer, compress)?; 198 | self.beta_h.serialize_with_mode(&mut writer, compress)?; 199 | self.degree_bounds_and_neg_powers_of_h 200 | .serialize_with_mode(&mut writer, compress)?; 201 | self.supported_degree 202 | .serialize_with_mode(&mut writer, compress)?; 203 | self.max_degree.serialize_with_mode(&mut writer, compress) 204 | } 205 | 206 | fn serialized_size(&self, compress: Compress) -> usize { 207 | self.g.serialized_size(compress) 208 | + self.gamma_g.serialized_size(compress) 209 | + self.h.serialized_size(compress) 210 | + self.beta_h.serialized_size(compress) 211 | + self 212 | .degree_bounds_and_neg_powers_of_h 213 | .serialized_size(compress) 214 | + self.supported_degree.serialized_size(compress) 215 | + self.max_degree.serialized_size(compress) 216 | } 217 | } 218 | 219 | impl CanonicalDeserialize for VerifierKey { 220 | fn deserialize_with_mode( 221 | mut reader: R, 222 | compress: Compress, 223 | validate: Validate, 224 | ) -> Result { 225 | let g = E::G1Affine::deserialize_with_mode(&mut reader, compress, Validate::No)?; 226 | let gamma_g = E::G1Affine::deserialize_with_mode(&mut reader, compress, Validate::No)?; 227 | let h = E::G2Affine::deserialize_with_mode(&mut reader, compress, Validate::No)?; 228 | let beta_h = E::G2Affine::deserialize_with_mode(&mut reader, compress, Validate::No)?; 229 | let degree_bounds_and_neg_powers_of_h = 230 | Option::>::deserialize_with_mode( 231 | &mut reader, 232 | compress, 233 | Validate::No, 234 | )?; 235 | let supported_degree = usize::deserialize_with_mode(&mut reader, compress, Validate::No)?; 236 | let max_degree = usize::deserialize_with_mode(&mut reader, compress, Validate::No)?; 237 | 238 | let prepared_h = E::G2Prepared::from(h.clone()); 239 | let prepared_beta_h = E::G2Prepared::from(beta_h.clone()); 240 | 241 | let result = Self { 242 | g, 243 | gamma_g, 244 | h, 245 | beta_h, 246 | prepared_h, 247 | prepared_beta_h, 248 | degree_bounds_and_neg_powers_of_h, 249 | supported_degree, 250 | max_degree, 251 | }; 252 | 253 | if let Validate::Yes = validate { 254 | result.check()?; 255 | } 256 | 257 | Ok(result) 258 | } 259 | } 260 | 261 | impl PCVerifierKey for VerifierKey { 262 | fn max_degree(&self) -> usize { 263 | self.max_degree 264 | } 265 | 266 | fn supported_degree(&self) -> usize { 267 | self.supported_degree 268 | } 269 | } 270 | 271 | /// Nothing to do to prepare this verifier key (for now). 272 | pub type PreparedVerifierKey = VerifierKey; 273 | 274 | impl PCPreparedVerifierKey> for PreparedVerifierKey { 275 | /// prepare `PreparedVerifierKey` from `VerifierKey` 276 | fn prepare(vk: &VerifierKey) -> Self { 277 | vk.clone() 278 | } 279 | } 280 | 281 | /// Evaluation proof at a query set. 282 | #[derive(Derivative)] 283 | #[derivative( 284 | Default(bound = ""), 285 | Hash(bound = ""), 286 | Clone(bound = ""), 287 | Debug(bound = ""), 288 | PartialEq(bound = ""), 289 | Eq(bound = "") 290 | )] 291 | pub struct BatchProof(pub(crate) Vec>); 292 | -------------------------------------------------------------------------------- /poly-commit/src/streaming_kzg/data_structures.rs: -------------------------------------------------------------------------------- 1 | use crate::utils::ceil_div; 2 | use ark_ff::Field; 3 | #[cfg(not(feature = "std"))] 4 | use ark_std::vec::Vec; 5 | use ark_std::{borrow::Borrow, iterable::Iterable}; 6 | 7 | /// A `Streamer` folding a vector of coefficients 8 | /// with the given challenges, and producing a stream of items 9 | /// `(i, v)` where `i` indicates the depth, and `v` is the next coefficient. 10 | /// The stream can produce all foldings in the tree with a single pass over the initial stream. 11 | #[derive(Clone, Copy)] 12 | pub struct FoldedPolynomialTree<'a, F, S> { 13 | challenges: &'a [F], 14 | coefficients: &'a S, 15 | } 16 | 17 | impl<'a, F, S> FoldedPolynomialTree<'a, F, S> 18 | where 19 | S: Iterable, 20 | F: Field, 21 | S::Item: Borrow, 22 | { 23 | /// Initialize a new polynomial tree. 24 | pub fn new(coefficients: &'a S, challenges: &'a [F]) -> Self { 25 | Self { 26 | coefficients, 27 | challenges, 28 | } 29 | } 30 | 31 | /// Outputs the depth of the polynomial tree. 32 | #[inline] 33 | pub fn depth(&self) -> usize { 34 | self.challenges.len() 35 | } 36 | } 37 | 38 | impl<'a, F, S> Iterable for FoldedPolynomialTree<'a, F, S> 39 | where 40 | S: Iterable, 41 | F: Field, 42 | S::Item: Borrow, 43 | { 44 | type Item = (usize, F); 45 | 46 | type Iter = FoldedPolynomialTreeIter<'a, F, S::Iter>; 47 | 48 | fn iter(&self) -> Self::Iter { 49 | FoldedPolynomialTreeIter::new( 50 | self.coefficients.iter(), 51 | self.coefficients.len(), 52 | self.challenges, 53 | ) 54 | } 55 | 56 | fn len(&self) -> usize { 57 | self.coefficients.len() 58 | } 59 | } 60 | 61 | /// Iterator of the polynomial tree. 62 | pub struct FoldedPolynomialTreeIter<'a, F, I> { 63 | challenges: &'a [F], 64 | iterator: I, 65 | stack: Vec<(usize, F)>, 66 | } 67 | 68 | fn init_stack(n: usize, challenges_len: usize) -> Vec<(usize, F)> { 69 | let mut stack = Vec::with_capacity(challenges_len); 70 | 71 | // generally we expect the size to be a power of two. 72 | // If not, we are going to fill the stack as if the array was padded to zero up to the expected size. 73 | let chunk_size = 1 << challenges_len; 74 | if n % chunk_size != 0 { 75 | let mut delta = chunk_size - n % chunk_size; 76 | for i in (0..challenges_len).rev() { 77 | if delta >= 1 << i { 78 | stack.push((i, F::zero())); 79 | delta -= 1 << i 80 | } 81 | } 82 | } 83 | stack 84 | } 85 | 86 | impl<'a, F, I> FoldedPolynomialTreeIter<'a, F, I> 87 | where 88 | F: Field, 89 | I: Iterator, 90 | I::Item: Borrow, 91 | { 92 | fn new(iterator: I, n: usize, challenges: &'a [F]) -> Self { 93 | let stack = init_stack(n, challenges.len()); 94 | 95 | Self { 96 | challenges, 97 | iterator, 98 | stack, 99 | } 100 | } 101 | } 102 | 103 | impl<'a, F, I> Iterator for FoldedPolynomialTreeIter<'a, F, I> 104 | where 105 | F: Field, 106 | I: Iterator, 107 | I::Item: Borrow, 108 | { 109 | type Item = (usize, F); 110 | 111 | fn next(&mut self) -> Option<::Item> { 112 | let len = self.stack.len(); 113 | let item = if len > 1 && self.stack[len - 1].0 == self.stack[len - 2].0 { 114 | // pop the last two elements from the stack. 115 | // we could also use .pop() twice but truncate is slightly faster. 116 | let (_level, lhs) = self.stack[len - 1]; 117 | let (level, rhs) = self.stack[len - 2]; 118 | self.stack.truncate(len - 2); 119 | // fold them producing the coefficient and the level `level+1` 120 | let folded_coefficient = rhs * self.challenges[level] + lhs; 121 | (level + 1, folded_coefficient) 122 | } else { 123 | (0, *self.iterator.next()?.borrow()) 124 | }; 125 | 126 | // do not add to the stack the coefficient of the max-depth folded polynomial. 127 | if item.0 != self.challenges.len() { 128 | self.stack.push(item) 129 | } 130 | 131 | // Skip the base polynomial, recursively calling itself to access the next level 132 | if item.0 == 0 { 133 | self.next() 134 | } else { 135 | Some(item) 136 | } 137 | } 138 | } 139 | 140 | /// Stream implementation of foleded polynomial. 141 | #[derive(Clone, Copy)] 142 | pub struct FoldedPolynomialStream<'a, F, S>(FoldedPolynomialTree<'a, F, S>); 143 | /// Iterator implementation of foleded polynomial. 144 | pub struct FoldedPolynomialStreamIter<'a, F, I> { 145 | challenges: &'a [F], 146 | iterator: I, 147 | stack: Vec<(usize, F)>, 148 | } 149 | 150 | impl<'a, F, S> FoldedPolynomialStream<'a, F, S> 151 | where 152 | S: Iterable, 153 | F: Field, 154 | S::Item: Borrow, 155 | { 156 | /// Initialize a new folded polynomial stream. 157 | pub fn new(coefficients: &'a S, challenges: &'a [F]) -> Self { 158 | let tree = FoldedPolynomialTree::new(coefficients, challenges); 159 | Self(tree) 160 | } 161 | } 162 | 163 | impl<'a, F, S> Iterable for FoldedPolynomialStream<'a, F, S> 164 | where 165 | S: Iterable, 166 | F: Field, 167 | S::Item: Borrow, 168 | { 169 | type Item = F; 170 | type Iter = FoldedPolynomialStreamIter<'a, F, S::Iter>; 171 | 172 | fn iter(&self) -> Self::Iter { 173 | let iterator = self.0.coefficients.iter(); 174 | let challenges = self.0.challenges; 175 | let stack = init_stack(self.0.coefficients.len(), challenges.len()); 176 | FoldedPolynomialStreamIter { 177 | iterator, 178 | challenges, 179 | stack, 180 | } 181 | } 182 | 183 | fn len(&self) -> usize { 184 | ceil_div(self.0.len(), 1 << self.0.challenges.len()) 185 | } 186 | } 187 | 188 | impl<'a, F, I> Iterator for FoldedPolynomialStreamIter<'a, F, I> 189 | where 190 | F: Field, 191 | I: Iterator, 192 | I::Item: Borrow, 193 | { 194 | type Item = F; 195 | 196 | fn next(&mut self) -> Option { 197 | let target_level = self.challenges.len(); 198 | loop { 199 | let len = self.stack.len(); 200 | let (level, element) = if len > 1 && self.stack[len - 1].0 == self.stack[len - 2].0 { 201 | let (_level, lhs) = self.stack[len - 1]; 202 | let (level, rhs) = self.stack[len - 2]; 203 | self.stack.truncate(len - 2); 204 | 205 | let folded_coefficient = rhs * self.challenges[level] + lhs; 206 | (level + 1, folded_coefficient) 207 | } else if target_level > 0 && (len == 0 || (len > 0 && self.stack[len - 1].0 != 0)) { 208 | // If the target level is strictly positive, there's no need to put elements of level zero in the stream. 209 | // We can immediately read 2 elements from the stream and push an element of the form (1, folded_coefficient). 210 | // Nota bene: this branch is not needed, but brings in a decent speed-up for the resulting implementation. 211 | let rhs = self.iterator.next()?; 212 | let lhs = self.iterator.next()?; 213 | 214 | let folded_coefficient = self.challenges[0] * rhs.borrow() + lhs.borrow(); 215 | (1, folded_coefficient) 216 | } else { 217 | (0, *self.iterator.next()?.borrow()) 218 | }; 219 | 220 | // do not add to the stack the coefficient of the folded polynomial, but instead return it. 221 | if level != target_level { 222 | self.stack.push((level, element)) 223 | } else { 224 | return Some(element); 225 | } 226 | } 227 | } 228 | } 229 | 230 | #[test] 231 | fn test_folded_polynomial() { 232 | use ark_bls12_381::Fr as F; 233 | use ark_ff::One; 234 | 235 | let two = F::one() + F::one(); 236 | 237 | let coefficients = vec![F::one(), two, F::one(), F::one()]; 238 | let challenges = vec![F::one(), two]; 239 | let coefficients_stream = coefficients.as_slice(); 240 | let foldstream = FoldedPolynomialTree::new(&coefficients_stream, challenges.as_slice()); 241 | let fold_stream = FoldedPolynomialStream(foldstream); 242 | assert_eq!(fold_stream.len(), 1); 243 | assert_eq!( 244 | fold_stream.iter().next(), 245 | Some(two + two * (F::one() + two)) 246 | ); 247 | 248 | let one = F::one(); 249 | let coefficients = vec![one; 12]; 250 | let challenges = vec![F::one(); 4]; 251 | let coefficients_stream = coefficients.as_slice(); 252 | let foldstream = FoldedPolynomialTree::new(&coefficients_stream, challenges.as_slice()); 253 | let fold_stream = FoldedPolynomialStream(foldstream).iter(); 254 | assert_eq!(fold_stream.last(), Some(coefficients.iter().sum())); 255 | } 256 | 257 | #[test] 258 | fn test_folded_polynomial_tree() { 259 | use ark_bls12_381::Fr as F; 260 | use ark_ff::One; 261 | 262 | let two = F::one() + F::one(); 263 | 264 | let coefficients = vec![F::one(), two, F::one(), F::one()]; 265 | let challenges = vec![F::one(), two]; 266 | let coefficients_stream = coefficients.as_slice(); 267 | let fold_streamer = FoldedPolynomialTree::new(&coefficients_stream, challenges.as_slice()); 268 | let mut fold_iter = fold_streamer.iter(); 269 | // assert_eq!(fold_stream.next(), Some((0, F::one()))); 270 | // assert_eq!(fold_stream.next(), Some((0, two))); 271 | assert_eq!(fold_iter.next(), Some((1, F::one() + two))); 272 | // assert_eq!(fold_stream.next(), Some((0, F::one()))); 273 | // assert_eq!(fold_stream.next(), Some((0, F::one()))); 274 | assert_eq!(fold_iter.next(), Some((1, F::one() + F::one()))); 275 | assert_eq!(fold_iter.next(), Some((2, two + two * (F::one() + two)))); 276 | 277 | let one = F::one(); 278 | let coefficients = vec![one; 12]; 279 | let challenges = vec![F::one(); 4]; 280 | let coefficients_stream = coefficients.as_slice(); 281 | let fold_streamer = FoldedPolynomialTree::new(&coefficients_stream, challenges.as_slice()); 282 | let fold_init = fold_streamer.iter(); 283 | let mut fold_iter = fold_init.skip(5); 284 | assert_eq!(fold_iter.next(), Some((1, two))); 285 | assert_eq!(fold_iter.last(), Some((4, coefficients.iter().sum()))); 286 | } 287 | -------------------------------------------------------------------------------- /poly-commit/src/streaming_kzg/space.rs: -------------------------------------------------------------------------------- 1 | //! Space-efficient implementation of the polynomial commitment of Kate et al. 2 | use crate::{ 3 | streaming_kzg::{ 4 | time::CommitterKey, vanishing_polynomial, Commitment, EvaluationProof, 5 | FoldedPolynomialTree, VerifierKey, 6 | }, 7 | utils::ceil_div, 8 | }; 9 | use ark_ec::{ 10 | pairing::Pairing, 11 | scalar_mul::variable_base::{ChunkedPippenger, HashMapPippenger, VariableBaseMSM}, 12 | CurveGroup, 13 | }; 14 | use ark_ff::{PrimeField, Zero}; 15 | use ark_poly::Polynomial; 16 | #[cfg(not(feature = "std"))] 17 | use ark_std::vec::Vec; 18 | use ark_std::{ 19 | borrow::Borrow, 20 | collections::VecDeque, 21 | iterable::{Iterable, Reverse}, 22 | }; 23 | 24 | const LENGTH_MISMATCH_MSG: &str = "Expecting at least one element in the committer key."; 25 | 26 | /// The streaming SRS for the polynomial commitment scheme consists of a stream of consecutive powers of g. 27 | /// It also implements functions for `setup`, `commit` and `open`. 28 | #[derive(Clone)] 29 | pub struct CommitterKeyStream 30 | where 31 | E: Pairing, 32 | SG: Iterable, 33 | SG::Item: Borrow, 34 | { 35 | /// Stream of G1 elements. 36 | pub powers_of_g: SG, 37 | /// Two G2 elements needed for the committer. 38 | pub powers_of_g2: Vec, 39 | } 40 | 41 | impl CommitterKeyStream 42 | where 43 | E: Pairing, 44 | SG: Iterable, 45 | SG::Item: Borrow, 46 | { 47 | /// Turn a streaming SRS into a normal SRS. 48 | pub fn as_committer_key(&self, max_degree: usize) -> CommitterKey { 49 | let offset = self.powers_of_g.len() - max_degree; 50 | let mut powers_of_g = self 51 | .powers_of_g 52 | .iter() 53 | .skip(offset) 54 | .map(|x| *x.borrow()) 55 | .collect::>(); 56 | powers_of_g.reverse(); 57 | let powers_of_g2 = self.powers_of_g2.clone().to_vec(); 58 | CommitterKey { 59 | powers_of_g, 60 | powers_of_g2, 61 | } 62 | } 63 | 64 | /// Evaluate a single polynomial at the point `alpha`, and provide an evaluation proof along with the evaluation. 65 | pub fn open( 66 | &self, 67 | polynomial: &SF, 68 | alpha: &E::ScalarField, 69 | max_msm_buffer: usize, 70 | ) -> (E::ScalarField, EvaluationProof) 71 | where 72 | SF: Iterable, 73 | SF::Item: Borrow, 74 | { 75 | let mut quotient: ChunkedPippenger = ChunkedPippenger::new(max_msm_buffer); 76 | 77 | let bases_init = self.powers_of_g.iter(); 78 | let scalars = polynomial.iter(); 79 | 80 | // align the streams and remove one degree 81 | // TODO: change `skip` to `advance_by` once rust-lang/rust#7774 is fixed. 82 | // See 83 | let bases = bases_init.skip(self.powers_of_g.len() - polynomial.len()); 84 | 85 | let mut previous = E::ScalarField::zero(); 86 | for (scalar, base) in scalars.zip(bases) { 87 | quotient.add(base, previous.into_bigint()); 88 | let coefficient = previous * alpha + scalar.borrow(); 89 | previous = coefficient; 90 | } 91 | 92 | let evaluation = previous; 93 | let evaluation_proof = quotient.finalize().into_affine(); 94 | (evaluation, EvaluationProof(evaluation_proof)) 95 | } 96 | 97 | /// Evaluate a single polynomial at a set of points `points`, and provide an evaluation proof along with evaluations. 98 | pub fn open_multi_points( 99 | &self, 100 | polynomial: &SF, 101 | points: &[E::ScalarField], 102 | max_msm_buffer: usize, 103 | ) -> (Vec, EvaluationProof) 104 | where 105 | SF: Iterable, 106 | SF::Item: Borrow, 107 | { 108 | let zeros = vanishing_polynomial(points); 109 | let mut quotient: ChunkedPippenger = ChunkedPippenger::new(max_msm_buffer); 110 | let bases_init = self.powers_of_g.iter(); 111 | // TODO: change `skip` to `advance_by` once rust-lang/rust#7774 is fixed. 112 | // See 113 | let mut bases = bases_init.skip(self.powers_of_g.len() - polynomial.len() + zeros.degree()); 114 | 115 | let mut state = VecDeque::::with_capacity(points.len()); 116 | 117 | let mut polynomial_iterator = polynomial.iter(); 118 | 119 | (0..points.len()).for_each(|_| { 120 | state.push_back(*polynomial_iterator.next().unwrap().borrow()); 121 | }); 122 | 123 | for coefficient in polynomial_iterator { 124 | let coefficient = coefficient.borrow(); 125 | let quotient_coefficient = state.pop_front().unwrap(); 126 | state.push_back(*coefficient); 127 | (0..points.len()).for_each(|i| { 128 | state[i] -= zeros.coeffs[zeros.degree() - i - 1] * quotient_coefficient; 129 | }); 130 | let base = bases.next().unwrap(); 131 | quotient.add(base, quotient_coefficient.into_bigint()); 132 | } 133 | let remainder = state.make_contiguous().to_vec(); 134 | let commitment = EvaluationProof(quotient.finalize().into_affine()); 135 | (remainder, commitment) 136 | } 137 | 138 | /// The commitment procedures, that takes as input a committer key and the streaming coefficients of polynomial, and produces the desired commitment. 139 | pub fn commit(&self, polynomial: &SF) -> Commitment 140 | where 141 | SF: Iterable, 142 | SF::Item: Borrow, 143 | { 144 | assert!(self.powers_of_g.len() >= polynomial.len()); 145 | 146 | Commitment( 147 | ::msm_chunks(&self.powers_of_g, polynomial).into_affine(), 148 | ) 149 | } 150 | 151 | /// The batch commitment procedures, that takes as input a committer key and the streaming coefficients of a list of polynomials, and produces the desired commitments. 152 | pub fn batch_commit<'a, F>( 153 | &self, 154 | polynomials: &[&'a dyn Iterable>], 155 | ) -> Vec> 156 | where 157 | F: Borrow, 158 | { 159 | polynomials.iter().map(|&p| self.commit(p)).collect() 160 | } 161 | 162 | /// The commitment procedures for our tensor check protocol. 163 | /// The algorithm takes advantage of the tree structure of folding polynomials in our protocol. Please refer to our paper for more details. 164 | /// The function takes as input a committer key and the tree structure of all the folding polynomials, and produces the desired commitment for each polynomial. 165 | pub fn commit_folding( 166 | &self, 167 | polynomials: &FoldedPolynomialTree, 168 | max_msm_buffer: usize, 169 | ) -> Vec> 170 | where 171 | SF: Iterable, 172 | SF::Item: Borrow, 173 | { 174 | let n = polynomials.depth(); 175 | let mut pippengers: Vec> = Vec::new(); 176 | let mut folded_bases = Vec::new(); 177 | for i in 1..n + 1 { 178 | let pippenger: ChunkedPippenger<::G1> = 179 | ChunkedPippenger::with_size(max_msm_buffer / n); 180 | let bases_init = self.powers_of_g.iter(); 181 | 182 | let delta = self.powers_of_g.len() - ceil_div(polynomials.len(), 1 << i); 183 | // TODO: change `skip` to `advance_by` once rust-lang/rust#7774 is fixed. 184 | // See 185 | let bases = bases_init.skip(delta); 186 | folded_bases.push(bases); 187 | pippengers.push(pippenger); 188 | } 189 | 190 | for (i, coefficient) in polynomials.iter() { 191 | let base = folded_bases[i - 1].next().unwrap(); 192 | pippengers[i - 1].add(base.borrow(), coefficient.into_bigint()); 193 | } 194 | 195 | pippengers 196 | .into_iter() 197 | .map(|p| Commitment(p.finalize().into_affine())) 198 | .collect::>() 199 | } 200 | 201 | /// The commitment procedures for our tensor check protocol. 202 | /// The algorithm takes advantage of the tree structure of folding polynomials in our protocol. Please refer to our paper for more details. 203 | /// The function evaluates all the folding polynomials at a set of evaluation points `points` and produces a single batched evaluation proof. 204 | /// `eta` is the random challenge for batching folding polynomials. 205 | pub fn open_folding<'a, SF>( 206 | &self, 207 | polynomials: FoldedPolynomialTree<'a, E::ScalarField, SF>, 208 | points: &[E::ScalarField], 209 | etas: &[E::ScalarField], 210 | max_msm_buffer: usize, 211 | ) -> (Vec>, EvaluationProof) 212 | where 213 | SG: Iterable, 214 | SF: Iterable, 215 | E: Pairing, 216 | SG::Item: Borrow, 217 | SF::Item: Borrow + Copy, 218 | { 219 | let n = polynomials.depth(); 220 | let mut pippenger = HashMapPippenger::::new(max_msm_buffer); 221 | let mut folded_bases = Vec::new(); 222 | let zeros = vanishing_polynomial(points); 223 | let mut remainders = vec![VecDeque::new(); n]; 224 | 225 | for i in 1..n + 1 { 226 | let bases_init = self.powers_of_g.iter(); 227 | let delta = self.powers_of_g.len() - ceil_div(polynomials.len(), 1 << i); 228 | // TODO: change `skip` to `advance_by` once rust-lang/rust#7774 is fixed. 229 | // See 230 | let bases = bases_init.skip(delta); 231 | 232 | (0..points.len()).for_each(|_| { 233 | remainders[i - 1].push_back(E::ScalarField::zero()); 234 | }); 235 | 236 | folded_bases.push(bases); 237 | } 238 | 239 | for (i, coefficient) in polynomials.iter() { 240 | if i == 0 { 241 | continue; 242 | } // XXX. skip the 0th elements automatically 243 | 244 | let base = folded_bases[i - 1].next().unwrap(); 245 | let quotient_coefficient = remainders[i - 1].pop_front().unwrap(); 246 | remainders[i - 1].push_back(coefficient); 247 | (0..points.len()).for_each(|j| { 248 | remainders[i - 1][j] -= zeros.coeffs[zeros.degree() - j - 1] * quotient_coefficient; 249 | }); 250 | 251 | let scalar = etas[i - 1] * quotient_coefficient; 252 | pippenger.add(base, scalar); 253 | } 254 | 255 | let evaluation_proof = pippenger.finalize().into_affine(); 256 | let remainders = remainders 257 | .iter_mut() 258 | .map(|x| x.make_contiguous().to_vec()) 259 | .collect::>(); 260 | 261 | (remainders, EvaluationProof(evaluation_proof)) 262 | } 263 | } 264 | 265 | impl<'a, E: Pairing> From<&'a CommitterKey> 266 | for CommitterKeyStream> 267 | { 268 | fn from(ck: &'a CommitterKey) -> Self { 269 | CommitterKeyStream { 270 | powers_of_g: Reverse(ck.powers_of_g.as_slice()), 271 | powers_of_g2: ck.powers_of_g2.clone(), 272 | } 273 | } 274 | } 275 | 276 | impl From<&CommitterKeyStream> for VerifierKey 277 | where 278 | E: Pairing, 279 | SG: Iterable, 280 | SG::Item: Borrow, 281 | { 282 | fn from(ck: &CommitterKeyStream) -> Self { 283 | let powers_of_g2 = ck.powers_of_g2.to_vec(); 284 | // take the first element from the stream 285 | let g = *ck 286 | .powers_of_g 287 | .iter() 288 | .last() 289 | .expect(LENGTH_MISMATCH_MSG) 290 | .borrow(); 291 | Self { 292 | powers_of_g2, 293 | powers_of_g: vec![g], 294 | } 295 | } 296 | } 297 | -------------------------------------------------------------------------------- /poly-commit/src/streaming_kzg/tests.rs: -------------------------------------------------------------------------------- 1 | use crate::streaming_kzg::{ 2 | space::CommitterKeyStream, time::CommitterKey, vanishing_polynomial, VerifierKey, 3 | }; 4 | use ark_bls12_381::{Bls12_381, Fr}; 5 | use ark_ff::Field; 6 | use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial}; 7 | #[cfg(not(feature = "std"))] 8 | use ark_std::vec::Vec; 9 | use ark_std::{ 10 | borrow::Borrow, 11 | iterable::{Iterable, Reverse}, 12 | UniformRand, Zero, 13 | }; 14 | 15 | /// Polynomial evaluation, assuming that the 16 | /// coefficients are in little-endian. 17 | #[inline] 18 | fn evaluate_le(polynomial: &[F], x: &F) -> F 19 | where 20 | F: Field, 21 | { 22 | evaluate_be(polynomial.iter().rev(), x) 23 | } 24 | 25 | /// Polynomial evaluation, assuming that the 26 | /// coeffients are in big-endian. 27 | #[inline] 28 | fn evaluate_be(polynomial: I, x: &F) -> F 29 | where 30 | F: Field, 31 | I: IntoIterator, 32 | I::Item: Borrow, 33 | { 34 | polynomial 35 | .into_iter() 36 | .fold(F::zero(), |previous, c| previous * x + c.borrow()) 37 | } 38 | 39 | #[test] 40 | fn test_commitment_consistency() { 41 | let rng = &mut ark_std::test_rng(); 42 | let d = 15; 43 | let polynomial = DensePolynomial::::rand(d, rng); 44 | let polynomial_stream = Reverse(polynomial.coeffs()); 45 | let time_ck = CommitterKey::::new(d + 1, 3, rng); 46 | let space_ck = CommitterKeyStream::from(&time_ck); 47 | 48 | // compute the time commitment 49 | let time_commitment = time_ck.commit(&polynomial); 50 | let space_commitment = space_ck.commit(&polynomial_stream); 51 | 52 | assert_eq!(space_commitment, time_commitment); 53 | } 54 | 55 | #[test] 56 | fn test_ck_consistency() { 57 | use ark_bls12_381::Bls12_381; 58 | 59 | let rng = &mut ark_std::test_rng(); 60 | let time_ck = CommitterKey::::new(10, 3, rng); 61 | let space_ck = CommitterKeyStream::from(&time_ck); 62 | // Make sure that there are enough elements for the entire array. 63 | assert_eq!(time_ck.powers_of_g.len(), space_ck.powers_of_g.len()); 64 | } 65 | 66 | #[test] 67 | fn test_open_consistency() { 68 | let rng = &mut ark_std::test_rng(); 69 | let d = 15; 70 | let max_msm_buffer = 1 << 20; 71 | let polynomials = DensePolynomial::::rand(d, rng); 72 | let polynomial_stream = Reverse(polynomials.coeffs()); 73 | let time_ck = CommitterKey::::new(d + 1, 3, rng); 74 | let space_ck = CommitterKeyStream::from(&time_ck); 75 | let alpha = Fr::rand(rng); 76 | 77 | // compute the time commitment 78 | let (time_evaluation, time_open) = time_ck.open(&polynomials, &alpha); 79 | let (space_evaluation, space_open) = space_ck.open(&polynomial_stream, &alpha, max_msm_buffer); 80 | // compute the space commitment 81 | assert_eq!(time_evaluation, space_evaluation); 82 | assert_eq!(time_open, space_open); 83 | } 84 | 85 | #[test] 86 | fn test_open_multipoints_correctness() { 87 | let mut rng = &mut ark_std::test_rng(); 88 | let d = 100; 89 | 90 | let eval_points = (0..5).map(|_| Fr::rand(rng)).collect::>(); 91 | let polynomials = (0..15) 92 | .map(|_| DensePolynomial::::rand(d, rng).coeffs) 93 | .collect::>(); 94 | let evals = polynomials 95 | .iter() 96 | .map(|p| { 97 | eval_points 98 | .iter() 99 | .map(|e| evaluate_le(p, e)) 100 | .collect::>() 101 | }) 102 | .collect::>(); 103 | 104 | let time_ck = CommitterKey::::new(d + 1, eval_points.len(), rng); 105 | let time_vk = VerifierKey::from(&time_ck); 106 | 107 | let time_batched_commitments = time_ck.batch_commit(&polynomials); 108 | 109 | let eta: Fr = u128::rand(&mut rng).into(); 110 | 111 | let proof = time_ck.batch_open_multi_points( 112 | &polynomials.iter().collect::>()[..], 113 | &eval_points, 114 | &eta, 115 | ); 116 | 117 | let verification_result = time_vk.verify_multi_points( 118 | &time_batched_commitments, 119 | &eval_points, 120 | &evals, 121 | &proof, 122 | &eta, 123 | ); 124 | 125 | assert!(verification_result.is_ok()); 126 | } 127 | 128 | #[test] 129 | fn test_vanishing_polynomial() { 130 | use ark_bls12_381::Fr as F; 131 | use ark_ff::Zero; 132 | 133 | let points = [F::from(10u64), F::from(5u64), F::from(13u64)]; 134 | let zeros = vanishing_polynomial(&points); 135 | assert_eq!(evaluate_le(&zeros, &points[0]), F::zero()); 136 | assert_eq!(evaluate_le(&zeros, &points[1]), F::zero()); 137 | assert_eq!(evaluate_le(&zeros, &points[2]), F::zero()); 138 | } 139 | 140 | #[test] 141 | fn test_srs() { 142 | use ark_bls12_381::Bls12_381; 143 | 144 | let rng = &mut ark_std::test_rng(); 145 | let ck = CommitterKey::::new(10, 3, rng); 146 | let vk = VerifierKey::from(&ck); 147 | // Make sure that there are enough elements for the entire array. 148 | assert_eq!(ck.powers_of_g.len(), 11); 149 | assert_eq!(ck.powers_of_g2, &vk.powers_of_g2[..]); 150 | } 151 | 152 | #[test] 153 | fn test_trivial_commitment() { 154 | use ark_bls12_381::Bls12_381; 155 | use ark_bls12_381::Fr; 156 | use ark_poly::univariate::DensePolynomial; 157 | use ark_poly::DenseUVPolynomial; 158 | use ark_std::One; 159 | 160 | let rng = &mut ark_std::test_rng(); 161 | let ck = CommitterKey::::new(10, 3, rng); 162 | let vk = VerifierKey::from(&ck); 163 | let polynomial = DensePolynomial::from_coefficients_slice(&[Fr::zero(), Fr::one(), Fr::one()]); 164 | let alpha = Fr::zero(); 165 | 166 | let commitment = ck.commit(&polynomial); 167 | let (evaluation, proof) = ck.open(&polynomial, &alpha); 168 | assert_eq!(evaluation, Fr::zero()); 169 | assert!(vk.verify(&commitment, &alpha, &evaluation, &proof).is_ok()) 170 | } 171 | 172 | #[test] 173 | fn test_commitment() { 174 | use ark_bls12_381::Bls12_381; 175 | use ark_bls12_381::Fr; 176 | use ark_poly::univariate::DensePolynomial; 177 | use ark_poly::DenseUVPolynomial; 178 | use ark_poly::Polynomial; 179 | 180 | let rng = &mut ark_std::test_rng(); 181 | let ck = CommitterKey::::new(100, 3, rng); 182 | let vk = VerifierKey::from(&ck); 183 | let polynomial = DensePolynomial::rand(100, rng); 184 | let alpha = Fr::zero(); 185 | 186 | let commitment = ck.commit(&polynomial); 187 | let (evaluation, proof) = ck.open(&polynomial, &alpha); 188 | let expected_evaluation = polynomial.evaluate(&alpha); 189 | assert_eq!(evaluation, expected_evaluation); 190 | assert!(vk.verify(&commitment, &alpha, &evaluation, &proof).is_ok()) 191 | } 192 | 193 | #[test] 194 | fn test_open_multi_points() { 195 | use crate::ark_std::UniformRand; 196 | use ark_bls12_381::{Bls12_381, Fr}; 197 | use ark_ff::Field; 198 | use ark_poly::univariate::DensePolynomial; 199 | use ark_poly::DenseUVPolynomial; 200 | use ark_std::test_rng; 201 | 202 | let max_msm_buffer = 1 << 20; 203 | let rng = &mut test_rng(); 204 | // f = 80*x^6 + 80*x^5 + 88*x^4 + 3*x^3 + 73*x^2 + 7*x + 24 205 | let polynomial = [ 206 | Fr::from(80u64), 207 | Fr::from(80u64), 208 | Fr::from(88u64), 209 | Fr::from(3u64), 210 | Fr::from(73u64), 211 | Fr::from(7u64), 212 | Fr::from(24u64), 213 | ]; 214 | let polynomial_stream = &polynomial[..]; 215 | let beta = Fr::from(53u64); 216 | 217 | let time_ck = CommitterKey::::new(200, 3, rng); 218 | let space_ck = CommitterKeyStream::from(&time_ck); 219 | 220 | let (remainder, _commitment) = space_ck.open_multi_points( 221 | &polynomial_stream, 222 | &[beta.square(), beta, -beta], 223 | max_msm_buffer, 224 | ); 225 | let evaluation_remainder = evaluate_be(&remainder, &beta); 226 | assert_eq!(evaluation_remainder, Fr::from(1807299544171u64)); 227 | 228 | let (remainder, _commitment) = 229 | space_ck.open_multi_points(&polynomial_stream, &[beta], max_msm_buffer); 230 | assert_eq!(remainder.len(), 1); 231 | 232 | // get a random polynomial with random coefficient, 233 | let polynomial = DensePolynomial::rand(100, rng).coeffs().to_vec(); 234 | let polynomial_stream = &polynomial[..]; 235 | let beta = Fr::rand(rng); 236 | let (_, evaluation_proof_batch) = 237 | space_ck.open_multi_points(&polynomial_stream, &[beta], max_msm_buffer); 238 | let (_, evaluation_proof_single) = space_ck.open(&polynomial_stream, &beta, max_msm_buffer); 239 | assert_eq!(evaluation_proof_batch, evaluation_proof_single); 240 | 241 | let (remainder, _evaluation_poof) = space_ck.open_multi_points( 242 | &polynomial_stream, 243 | &[beta, -beta, beta.square()], 244 | max_msm_buffer, 245 | ); 246 | let expected_evaluation = evaluate_be(&remainder, &beta); 247 | let obtained_evaluation = evaluate_be(&polynomial, &beta); 248 | assert_eq!(expected_evaluation, obtained_evaluation); 249 | let expected_evaluation = evaluate_be(&remainder, &beta.square()); 250 | let obtained_evaluation = evaluate_be(&polynomial, &beta.square()); 251 | assert_eq!(expected_evaluation, obtained_evaluation); 252 | // let expected_evaluation = evaluate_be(&remainder, &beta.square()); 253 | // let obtained_evaluation = evaluate_be(&polynomial, &beta.square()); 254 | // assert_eq!(expected_evaluation, obtained_evaluation); 255 | // let expected_evaluation = evaluate_be(&remainder, &beta.square()); 256 | // let obtained_evaluation = evaluate_be(&polynomial, &beta.square()); 257 | // assert_eq!(expected_evaluation, obtained_evaluation); 258 | } 259 | -------------------------------------------------------------------------------- /poly-commit/src/streaming_kzg/time.rs: -------------------------------------------------------------------------------- 1 | //! An impementation of a time-efficient version of Kate et al's polynomial commitment, 2 | //! with optimization from [\[BDFG20\]](https://eprint.iacr.org/2020/081.pdf). 3 | use crate::streaming_kzg::{ 4 | linear_combination, msm, powers, vanishing_polynomial, Commitment, EvaluationProof, VerifierKey, 5 | }; 6 | use ark_ec::{pairing::Pairing, scalar_mul::ScalarMul, CurveGroup}; 7 | use ark_ff::Zero; 8 | use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial}; 9 | #[cfg(not(feature = "std"))] 10 | use ark_std::vec::Vec; 11 | use ark_std::{borrow::Borrow, ops::Div, ops::Mul, rand::RngCore, UniformRand}; 12 | 13 | /// The SRS for the polynomial commitment scheme for a max 14 | /// 15 | /// The SRS consists of the `max_degree` powers of \\(\tau\\) in \\(\GG_1\\) 16 | /// plus the `max_eval_degree` powers over \\(\GG_2\\), 17 | /// where `max_degree` is the max polynomial degree to commit to, 18 | /// and `max_eval_degree` is the max number of different points to open simultaneously. 19 | pub struct CommitterKey { 20 | pub(crate) powers_of_g: Vec, 21 | pub(crate) powers_of_g2: Vec, 22 | } 23 | 24 | impl From<&CommitterKey> for VerifierKey { 25 | fn from(ck: &CommitterKey) -> VerifierKey { 26 | let max_eval_points = ck.max_eval_points(); 27 | let powers_of_g2 = ck.powers_of_g2[..max_eval_points + 1].to_vec(); 28 | let powers_of_g = ck.powers_of_g[..max_eval_points].to_vec(); 29 | 30 | VerifierKey { 31 | powers_of_g, 32 | powers_of_g2, 33 | } 34 | } 35 | } 36 | 37 | impl CommitterKey { 38 | /// The setup algorithm for the commitment scheme. 39 | /// 40 | /// Given a degree bound `max_degree`, 41 | /// an evaluation point bound `max_eval_points`, 42 | /// and a cryptographically-secure random number generator `rng`, 43 | /// construct the committer key. 44 | pub fn new(max_degree: usize, max_eval_points: usize, rng: &mut impl RngCore) -> Self { 45 | // Compute the consecutive powers of an element. 46 | let tau = E::ScalarField::rand(rng); 47 | let powers_of_tau = powers(tau, max_degree + 1); 48 | 49 | let g = E::G1::rand(rng); 50 | let powers_of_g = g.batch_mul(&powers_of_tau); 51 | 52 | let g2 = E::G2::rand(rng).into_affine(); 53 | let powers_of_g2 = powers_of_tau 54 | .iter() 55 | .take(max_eval_points + 1) 56 | .map(|t| g2.mul(t).into_affine()) 57 | .collect::>(); 58 | 59 | CommitterKey { 60 | powers_of_g, 61 | powers_of_g2, 62 | } 63 | } 64 | 65 | /// Return the bound on evaluation points. 66 | #[inline] 67 | pub fn max_eval_points(&self) -> usize { 68 | self.powers_of_g2.len() - 1 69 | } 70 | 71 | /// Given a polynomial `polynomial` of degree less than `max_degree`, return a commitment to `polynomial`. 72 | pub fn commit(&self, polynomial: &[E::ScalarField]) -> Commitment { 73 | Commitment(msm::(&self.powers_of_g, polynomial)) 74 | } 75 | 76 | /// Obtain a new preprocessed committer key defined by the indices `indices`. 77 | pub fn index_by(&self, indices: &[usize]) -> Self { 78 | let mut indexed_powers_of_g = vec![E::G1::zero(); self.powers_of_g.len()]; 79 | indices 80 | .iter() 81 | .zip(self.powers_of_g.iter()) 82 | .for_each(|(&i, &g)| indexed_powers_of_g[i] = indexed_powers_of_g[i] + g); 83 | Self { 84 | powers_of_g2: self.powers_of_g2.clone(), 85 | powers_of_g: E::G1::normalize_batch(indexed_powers_of_g.as_slice()), 86 | } 87 | } 88 | 89 | /// Given an iterator over `polynomials`, expressed as vectors of coefficients, return a vector of commitmetns to all of them. 90 | pub fn batch_commit(&self, polynomials: J) -> Vec> 91 | where 92 | J: IntoIterator, 93 | J::Item: Borrow>, 94 | { 95 | polynomials 96 | .into_iter() 97 | .map(|p| self.commit(p.borrow())) 98 | .collect::>() 99 | } 100 | 101 | /// Given a polynomial `polynomial` and an evaluation point `evaluation_point`, 102 | /// return the evaluation of `polynomial in `evaluation_point`, 103 | /// together with an evaluation proof. 104 | pub fn open( 105 | &self, 106 | polynomial: &[E::ScalarField], 107 | evalualtion_point: &E::ScalarField, 108 | ) -> (E::ScalarField, EvaluationProof) { 109 | let mut quotient = Vec::new(); 110 | 111 | let mut previous = E::ScalarField::zero(); 112 | for &c in polynomial.iter().rev() { 113 | let coefficient = c + previous * evalualtion_point; 114 | quotient.insert(0, coefficient); 115 | previous = coefficient; 116 | } 117 | 118 | let (&evaluation, quotient) = quotient 119 | .split_first() 120 | .unwrap_or((&E::ScalarField::zero(), &[])); 121 | let evaluation_proof = msm::(&self.powers_of_g, quotient); 122 | (evaluation, EvaluationProof(evaluation_proof)) 123 | } 124 | 125 | /// Evaluate a single polynomial at a set of points `eval_points`, and provide a single evaluation proof. 126 | pub fn open_multi_points( 127 | &self, 128 | polynomial: &[E::ScalarField], 129 | eval_points: &[E::ScalarField], 130 | ) -> EvaluationProof { 131 | // Computing the vanishing polynomial over eval_points 132 | let z_poly = vanishing_polynomial(eval_points); 133 | 134 | let f_poly = DensePolynomial::from_coefficients_slice(polynomial); 135 | let q_poly = f_poly.div(&z_poly); 136 | EvaluationProof(self.commit(&q_poly.coeffs).0) 137 | } 138 | 139 | /// Evaluate a set of polynomials at a set of points `eval_points`, and provide a single batched evaluation proof. 140 | /// `eval_chal` is the random challenge for batching evaluation proofs across different polynomials. 141 | pub fn batch_open_multi_points( 142 | &self, 143 | polynomials: &[&Vec], 144 | eval_points: &[E::ScalarField], 145 | eval_chal: &E::ScalarField, 146 | ) -> EvaluationProof { 147 | assert!(eval_points.len() < self.powers_of_g2.len()); 148 | let etas = powers(*eval_chal, polynomials.len()); 149 | let batched_polynomial = 150 | linear_combination(polynomials, &etas).unwrap_or_else(|| vec![E::ScalarField::zero()]); 151 | self.open_multi_points(&batched_polynomial, eval_points) 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /poly-commit/src/utils.rs: -------------------------------------------------------------------------------- 1 | use ark_ff::Field; 2 | use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; 3 | #[cfg(all(not(feature = "std")))] 4 | use ark_std::vec::Vec; 5 | #[cfg(all(not(feature = "std"), target_arch = "aarch64"))] 6 | use num_traits::Float; 7 | #[cfg(feature = "parallel")] 8 | use rayon::{ 9 | iter::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator}, 10 | prelude::IndexedParallelIterator, 11 | }; 12 | 13 | /// Takes as input a struct, and converts them to a series of bytes. All traits 14 | /// that implement `CanonicalSerialize` can be automatically converted to bytes 15 | /// in this manner. 16 | /// From jellyfish lib 17 | #[macro_export] 18 | macro_rules! to_bytes { 19 | ($x:expr) => {{ 20 | let mut buf = ark_std::vec![]; 21 | ark_serialize::CanonicalSerialize::serialize_compressed($x, &mut buf).map(|_| buf) 22 | }}; 23 | } 24 | 25 | /// Entropy function 26 | pub(crate) fn ent(x: f64) -> f64 { 27 | assert!(0f64 <= x && x <= 1f64); 28 | if x == 0f64 || x == 1f64 { 29 | 0f64 30 | } else { 31 | -x * x.log2() - (1.0 - x) * (1.0 - x).log2() 32 | } 33 | } 34 | 35 | /// ceil of a * b, where a is integer and b is a rational number 36 | #[inline] 37 | pub(crate) fn ceil_mul(a: usize, b: (usize, usize)) -> usize { 38 | (a * b.0 + b.1 - 1) / b.1 39 | } 40 | 41 | /// Return ceil(x / y). 42 | pub(crate) fn ceil_div(x: usize, y: usize) -> usize { 43 | // XXX. warning: this expression can overflow. 44 | (x + y - 1) / y 45 | } 46 | 47 | #[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] 48 | #[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] 49 | pub struct Matrix { 50 | pub(crate) n: usize, 51 | pub(crate) m: usize, 52 | entries: Vec>, 53 | } 54 | 55 | impl Matrix { 56 | /// Returns a Matrix of dimensions n x m given a list of n * m field elements. 57 | /// The list should be ordered row-first, i.e. [a11, ..., a1m, a21, ..., a2m, ...]. 58 | /// 59 | /// # Panics 60 | /// Panics if the dimensions do not match the length of the list 61 | pub(crate) fn new_from_flat(n: usize, m: usize, entry_list: &[F]) -> Self { 62 | assert_eq!( 63 | entry_list.len(), 64 | n * m, 65 | "Invalid matrix construction: dimensions are {} x {} but entry vector has {} entries", 66 | n, 67 | m, 68 | entry_list.len() 69 | ); 70 | 71 | // TODO more efficient to run linearly? 72 | let entries: Vec> = (0..n) 73 | .map(|row| (0..m).map(|col| entry_list[m * row + col]).collect()) 74 | .collect(); 75 | 76 | Self { n, m, entries } 77 | } 78 | 79 | /// Returns a Matrix given a list of its rows, each in turn represented as a list of field elements. 80 | /// 81 | /// # Panics 82 | /// Panics if the sub-lists do not all have the same length. 83 | pub(crate) fn new_from_rows(row_list: Vec>) -> Self { 84 | let m = row_list[0].len(); 85 | 86 | for row in row_list.iter().skip(1) { 87 | assert_eq!( 88 | row.len(), 89 | m, 90 | "Invalid matrix construction: not all rows have the same length" 91 | ); 92 | } 93 | 94 | Self { 95 | n: row_list.len(), 96 | m, 97 | entries: row_list, 98 | } 99 | } 100 | 101 | /// Returns the entry in position (i, j). **Indexing starts at 0 in both coordinates**, 102 | /// i.e. the first element is in position (0, 0) and the last one in (n - 1, j - 1), 103 | /// where n and m are the number of rows and columns, respectively. 104 | /// 105 | /// Index bound checks are waived for efficiency and behaviour under invalid indexing is undefined 106 | #[cfg(test)] 107 | pub(crate) fn entry(&self, i: usize, j: usize) -> F { 108 | self.entries[i][j] 109 | } 110 | 111 | /// Returns self as a list of rows 112 | pub(crate) fn rows(&self) -> Vec> { 113 | self.entries.clone() 114 | } 115 | 116 | /// Returns self as a list of columns 117 | pub(crate) fn cols(&self) -> Vec> { 118 | (0..self.m) 119 | .map(|col| (0..self.n).map(|row| self.entries[row][col]).collect()) 120 | .collect() 121 | } 122 | 123 | /// Returns the product v * self, where v is interpreted as a row vector. In other words, 124 | /// it returns a linear combination of the rows of self with coefficients given by v. 125 | /// 126 | /// Panics if the length of v is different from the number of rows of self. 127 | pub(crate) fn row_mul(&self, v: &[F]) -> Vec { 128 | assert_eq!( 129 | v.len(), 130 | self.n, 131 | "Invalid row multiplication: vector has {} elements whereas each matrix column has {}", 132 | v.len(), 133 | self.n 134 | ); 135 | 136 | cfg_into_iter!(0..self.m) 137 | .map(|col| { 138 | inner_product( 139 | v, 140 | &cfg_into_iter!(0..self.n) 141 | .map(|row| self.entries[row][col]) 142 | .collect::>(), 143 | ) 144 | }) 145 | .collect() 146 | } 147 | } 148 | 149 | #[inline] 150 | pub(crate) fn inner_product(v1: &[F], v2: &[F]) -> F { 151 | ark_std::cfg_iter!(v1) 152 | .zip(v2) 153 | .map(|(li, ri)| *li * ri) 154 | .sum() 155 | } 156 | 157 | #[inline] 158 | pub(crate) fn scalar_by_vector(s: F, v: &[F]) -> Vec { 159 | ark_std::cfg_iter!(v).map(|x| *x * s).collect() 160 | } 161 | 162 | #[inline] 163 | pub(crate) fn vector_sum(v1: &[F], v2: &[F]) -> Vec { 164 | ark_std::cfg_iter!(v1) 165 | .zip(v2) 166 | .map(|(li, ri)| *li + ri) 167 | .collect() 168 | } 169 | 170 | #[inline] 171 | #[cfg(test)] 172 | pub(crate) fn to_field(v: Vec) -> Vec { 173 | v.iter().map(|x| F::from(*x)).collect::>() 174 | } 175 | 176 | // TODO: replace by https://github.com/arkworks-rs/crypto-primitives/issues/112. 177 | #[cfg(test)] 178 | use ark_crypto_primitives::sponge::poseidon::PoseidonSponge; 179 | #[cfg(test)] 180 | use ark_ff::PrimeField; 181 | 182 | #[cfg(test)] 183 | pub(crate) fn test_sponge() -> PoseidonSponge { 184 | use ark_crypto_primitives::sponge::{poseidon::PoseidonConfig, CryptographicSponge}; 185 | use ark_std::test_rng; 186 | 187 | let full_rounds = 8; 188 | let partial_rounds = 31; 189 | let alpha = 17; 190 | 191 | let mds = vec![ 192 | vec![F::one(), F::zero(), F::one()], 193 | vec![F::one(), F::one(), F::zero()], 194 | vec![F::zero(), F::one(), F::one()], 195 | ]; 196 | 197 | let mut v = Vec::new(); 198 | let mut ark_rng = test_rng(); 199 | 200 | for _ in 0..(full_rounds + partial_rounds) { 201 | let mut res = Vec::new(); 202 | 203 | for _ in 0..3 { 204 | res.push(F::rand(&mut ark_rng)); 205 | } 206 | v.push(res); 207 | } 208 | let config = PoseidonConfig::new(full_rounds, partial_rounds, alpha, mds, v, 2, 1); 209 | PoseidonSponge::new(&config) 210 | } 211 | 212 | #[cfg(test)] 213 | pub(crate) mod tests { 214 | use super::*; 215 | use ark_bls12_377::Fr; 216 | 217 | #[test] 218 | fn test_matrix_constructor_flat() { 219 | let entries: Vec = to_field(vec![10, 100, 4, 67, 44, 50]); 220 | let mat = Matrix::new_from_flat(2, 3, &entries); 221 | assert_eq!(mat.entry(1, 2), Fr::from(50)); 222 | } 223 | 224 | #[test] 225 | fn test_matrix_constructor_flat_square() { 226 | let entries: Vec = to_field(vec![10, 100, 4, 67]); 227 | let mat = Matrix::new_from_flat(2, 2, &entries); 228 | assert_eq!(mat.entry(1, 1), Fr::from(67)); 229 | } 230 | 231 | #[test] 232 | #[should_panic(expected = "dimensions are 2 x 3 but entry vector has 5 entries")] 233 | fn test_matrix_constructor_flat_panic() { 234 | let entries: Vec = to_field(vec![10, 100, 4, 67, 44]); 235 | Matrix::new_from_flat(2, 3, &entries); 236 | } 237 | 238 | #[test] 239 | fn test_matrix_constructor_rows() { 240 | let rows: Vec> = vec![ 241 | to_field(vec![10, 100, 4]), 242 | to_field(vec![23, 1, 0]), 243 | to_field(vec![55, 58, 9]), 244 | ]; 245 | let mat = Matrix::new_from_rows(rows); 246 | assert_eq!(mat.entry(2, 0), Fr::from(55)); 247 | } 248 | 249 | #[test] 250 | #[should_panic(expected = "not all rows have the same length")] 251 | fn test_matrix_constructor_rows_panic() { 252 | let rows: Vec> = vec![ 253 | to_field(vec![10, 100, 4]), 254 | to_field(vec![23, 1, 0]), 255 | to_field(vec![55, 58]), 256 | ]; 257 | Matrix::new_from_rows(rows); 258 | } 259 | 260 | #[test] 261 | fn test_cols() { 262 | let rows: Vec> = vec![ 263 | to_field(vec![4, 76]), 264 | to_field(vec![14, 92]), 265 | to_field(vec![17, 89]), 266 | ]; 267 | 268 | let mat = Matrix::new_from_rows(rows); 269 | 270 | assert_eq!(mat.cols()[1], to_field(vec![76, 92, 89])); 271 | } 272 | 273 | #[test] 274 | fn test_row_mul() { 275 | let rows: Vec> = vec![ 276 | to_field(vec![10, 100, 4]), 277 | to_field(vec![23, 1, 0]), 278 | to_field(vec![55, 58, 9]), 279 | ]; 280 | 281 | let mat = Matrix::new_from_rows(rows); 282 | let v: Vec = to_field(vec![12, 41, 55]); 283 | // by giving the result in the integers and then converting to Fr 284 | // we ensure the test will still pass even if Fr changes 285 | assert_eq!(mat.row_mul(&v), to_field::(vec![4088, 4431, 543])); 286 | } 287 | } 288 | -------------------------------------------------------------------------------- /scripts/install-hook.sh: -------------------------------------------------------------------------------- 1 | #!/bin/env bash 2 | # This script will install the provided directory ../.hooks as the hook 3 | # directory for the present repo. See there for hooks, including a pre-commit 4 | # hook that runs rustfmt on files before a commit. 5 | 6 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 7 | HOOKS_DIR="${DIR}/../.hooks" 8 | 9 | git config core.hooksPath "$HOOKS_DIR" 10 | -------------------------------------------------------------------------------- /scripts/linkify_changelog.py: -------------------------------------------------------------------------------- 1 | import re 2 | import sys 3 | import fileinput 4 | import os 5 | 6 | # Set this to the name of the repo, if you don't want it to be read from the filesystem. 7 | # It assumes the changelog file is in the root of the repo. 8 | repo_name = "" 9 | 10 | # This script goes through the provided file, and replaces any " \#", 11 | # with the valid mark down formatted link to it. e.g. 12 | # " [\#number](https://github.com/arkworks-rs/template/pull/) 13 | # Note that if the number is for a an issue, github will auto-redirect you when you click the link. 14 | # It is safe to run the script multiple times in succession. 15 | # 16 | # Example usage $ python3 linkify_changelog.py ../CHANGELOG.md 17 | if len(sys.argv) < 2: 18 | print("Must include path to changelog as the first argument to the script") 19 | print("Example Usage: python3 linkify_changelog.py ../CHANGELOG.md") 20 | exit() 21 | 22 | changelog_path = sys.argv[1] 23 | if repo_name == "": 24 | path = os.path.abspath(changelog_path) 25 | components = path.split(os.path.sep) 26 | repo_name = components[-2] 27 | 28 | for line in fileinput.input(inplace=True): 29 | line = re.sub(r"\- #([0-9]*)", r"- [\\#\1](https://github.com/arkworks-rs/" + repo_name + r"/pull/\1)", line.rstrip()) 30 | # edits the current file 31 | print(line) --------------------------------------------------------------------------------